query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Check that the template was rendered
|
def rendered(template):
def was_rendered(client, response, testcase):
testcase.assertTemplateUsed(response, template)
return was_rendered
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"async def _validate_template(self, template):\n try:\n templater.Template(template, self.hass).async_render()\n return True\n except Exception as exception: # pylint: disable=broad-except\n _LOGGER.error(exception)\n pass\n return False",
"def test_register_page_is_rendered(self):\n url = \"/regiter/\"\n response = self.client.get('/register/')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"register_user.html\")",
"def test_for_template(self):\n self.assertTemplateUsed(self.response, 'my_info_template.html')",
"def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n from reviewboard.urls import diffviewer_url_names\n match = context['request'].resolver_match\n\n # If we're on a diff viewer page, then this should be initially\n # rendered, but might be hidden.\n if match.url_name in diffviewer_url_names:\n return True\n\n review_request = context.get('review_request')\n\n return (super().should_render(context=context) and\n review_request is not None and\n review_request.has_diffsets)",
"def test_template(self):\n\t\tself.assertTemplateUsed(self.resp, 'inicio.html')",
"def test_template(self):\n self.assertTemplateUsed(self.response, 'formularios.html')",
"def test_render_template(self):\n template = self.block.meta.template\n self.assertEqual(template, 'common/blocks/google_calendar.html', 'The templates are not the same')",
"def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n request = context['request']\n user = request.user\n\n return (super().should_render(context=context) and\n user.is_authenticated and\n not is_site_read_only_for(user) and\n not unified_banner_feature.is_enabled(request=request))",
"def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n request = context['request']\n user = request.user\n\n return (super().should_render(context=context) and\n user.is_authenticated and\n not is_site_read_only_for(user) and\n not unified_banner_feature.is_enabled(request=request))",
"def render_response(self, _template, **context):\n if self.logged_in():\n ctx = {'user': self.user}\n else:\n ctx = {}\n\n ctx.update(context)\n\n if 'model_edited' not in ctx:\n model_edited = self.get_session_property('model_edited')\n if model_edited is not None:\n ctx.update({'model_edited': model_edited})\n\n if 'is_model_saved' not in ctx:\n is_model_saved = self.get_session_property('is_model_saved')\n print is_model_saved\n if is_model_saved is not None:\n ctx.update({'is_model_saved': is_model_saved})\n\n template = jinja_environment.get_template(_template)\n self.response.out.write(template.render({'active_upload': True}, **ctx))",
"def should_render(\n self,\n context: Context,\n ) -> bool:\n request = context['request']\n user = request.user\n\n return (super().should_render(context=context) and\n user.is_authenticated and\n not is_site_read_only_for(user) and\n unified_banner_feature.is_enabled(request=request))",
"def test_should_render_for_owner(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context()))",
"def test_template(self):\n\t\tself.assertTemplateUsed(self.resp, 'cadastro.html')",
"def test_view_uses_correct_template(self):\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'home.html')",
"def test_render_template(self):\n template = self.block.meta.template\n self.assertEqual(template, 'common/blocks/journals_tab_block.html', 'Templates were not the same')",
"def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n request = context['request']\n review_request = context.get('review_request')\n user = request.user\n\n return (user.is_authenticated and\n review_request is not None and\n review_request.public and\n not is_site_read_only_for(user) and\n super().should_render(context=context))",
"def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n request = context['request']\n review_request = context.get('review_request')\n user = request.user\n\n return (user.is_authenticated and\n review_request is not None and\n review_request.public and\n not is_site_read_only_for(user) and\n super().should_render(context=context))",
"def exists(self):\n try:\n select_template(self.get_paths())\n return True\n except TemplateDoesNotExist:\n return False",
"def test_template_home(self):\n self.assertTemplateUsed(self.response, 'index.html')",
"def test_content_is_correct(self):\n home_page = self.client.get(\"/\")\n self.assertTemplateUsed(home_page, \"home/home.html\")\n home_page_template_output = render_to_response(\"home/home.html\", {\"active\": \"home\"}).content\n self.assertEqual(home_page.content, home_page_template_output)",
"def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n request = context['request']\n user = request.user\n\n return (super().should_render(context=context) and\n user.is_authenticated and\n not is_site_read_only_for(user) and\n general_comments_feature.is_enabled(request=request) and\n not unified_banner_feature.is_enabled(request=request))",
"def test_empty_template(self):\n with TemplateRenderThread('yaml_file_empty.t', 'yaml_file_empty.tmp.out') as renderer:\n def check_render_got_exception():\n return renderer.raised_exception\n\n self.assertTrue(legion.utils.ensure_function_succeed(check_render_got_exception, 5, 3))\n\n self.assertIsNotNone(renderer.raised_exception)\n self.assertIsInstance(renderer.raised_exception, Exception)\n self.assertEqual(renderer.raised_exception.args[0], 'Template doesnt use any plugin')",
"def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n request = context['request']\n review_request = context.get('review_request')\n perms = context.get('perms')\n user = request.user\n\n return (super().should_render(context=context) and\n review_request is not None and\n review_request.status == ReviewRequest.PENDING_REVIEW and\n not is_site_read_only_for(user) and\n (request.user.pk == review_request.submitter_id or\n (bool(perms) and\n perms['reviews']['can_change_status'] and\n review_request.public)))",
"def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n review_request = context.get('review_request')\n\n return (super().should_render(context=context) and\n review_request is not None and\n review_request.public and\n not is_site_read_only_for(context['request'].user))",
"def test_home_view_template(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'user_data.html')",
"def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n request = context['request']\n review_request = context.get('review_request')\n perms = context.get('perms')\n user = request.user\n\n return (super().should_render(context=context) and\n review_request is not None and\n review_request.status == ReviewRequest.PENDING_REVIEW and\n not is_site_read_only_for(user) and\n (user.pk == review_request.submitter_id or\n (bool(perms) and perms['reviews']['can_edit_reviewrequest'])))",
"def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n request = context['request']\n review_request = context.get('review_request')\n perms = context.get('perms')\n user = request.user\n\n return (super().should_render(context=context) and\n review_request is not None and\n review_request.status == ReviewRequest.PENDING_REVIEW and\n not is_site_read_only_for(user) and\n (user.pk == review_request.submitter_id or\n (bool(perms) and\n perms['reviews']['can_edit_reviewrequest'])))",
"def test_render_call_without_request(self, mock_render):\n context = MagicMock()\n render('template_name.html', context)\n mock_render.assert_called_with('template_name.html', context)",
"def _ask_for_template_use(self):\n use_template = self._view_callback_provider.ask_for_template_use()\n\n # explicit True and False, because _view_callback_provider can also return \"cancel\"\n if use_template == True:\n self._check_unsaved_changes()\n elif not use_template:\n self._save_current_as_new()",
"def requires_safe_render(self) -> bool:\n return True\n # return any(is_reserved(child.name) for child in self.children)"
] |
[
"0.72408986",
"0.65604",
"0.647206",
"0.63943523",
"0.6388394",
"0.6358663",
"0.63448054",
"0.6297637",
"0.6297637",
"0.6272517",
"0.6264347",
"0.6259718",
"0.6252725",
"0.6245319",
"0.62295973",
"0.6227864",
"0.6227864",
"0.6158289",
"0.61369985",
"0.6132051",
"0.6128815",
"0.6092733",
"0.6062353",
"0.60584754",
"0.60502577",
"0.60313934",
"0.6012751",
"0.6011897",
"0.60092974",
"0.600804"
] |
0.7561209
|
0
|
Check that the email.outbox contains n items
|
def outbox_len(count):
def outbox_len_is(client, response, testcase):
testcase.assertEqual(
len(mail.outbox),
count
)
return outbox_len_is
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def check_not_empty(self, wait_time):\n for i in range(3):\n self.refresh_emailbox(wait_time)\n if not self._device(resourceId='com.tct.email:id/empty_view').exists:\n self._logger.debug('The box is not empty')\n return True\n self._logger.debug('The box is empty')\n return False\n\n # def test(self, address):\n # self._device(resourceId='com.android.email:id/forward').click()\n # self._device.delay(2)\n # if self._device(resourceId='android:id/button1').exists:\n # self._device(resourceId='android:id/button1').click()\n # self._device.delay(3)\n # self._device(className='android.widget.MultiAutoCompleteTextView',description='To').set_text(address)\n # self._device.delay(2)\n # self._device(description='Send').click()\n # self._device.delay(2)\n # self._logger.debug('email sending...')\n # if self._device(resourceId='com.android.email:id/forward').exists:\n # self._device.delay(10)",
"def test_send_subscribe_email(self):\n #Verifica se foi enviado 1 e-mail, o este não envia e-mail\n self.assertEqual(1, len(mail.outbox))",
"def test_bob_unread(self):\n messages = list(self.bob_inbox.unread)\n self.assertEqual(2, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertNotIn(self.read_message, messages)\n self.assertIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)",
"def test_bob_unread(self):\n messages = list(self.bob_storage.unread)\n self.assertEqual(2, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertNotIn(self.read_message, messages)\n self.assertIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)",
"def test_bob_all(self):\n messages = list(self.bob_inbox.all)\n self.assertEqual(3, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertIn(self.read_message, messages)\n self.assertIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)",
"def check_empty(self, wait_time_refrese, wait_time_empty=0):\n for i in range(3):\n self.refresh_emailbox(wait_time_refrese)\n if self._device(resourceId='com.tct.email:id/empty_view').exists:\n self._logger.debug('The box is empty')\n return True\n if wait_time_empty <> 0:\n self._device.delay(wait_time_empty)\n self._logger.debug('The box is not empty')\n return False",
"def test_alice_all(self):\n messages = list(self.alice_inbox.all)\n self.assertEqual(2, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertIn(self.read_message, messages)\n self.assertNotIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)",
"def valid_multiple_in_response(self):\n return self._repeatable[1] is True",
"def test_alice_unread(self):\n messages = list(self.alice_inbox.unread)\n self.assertEqual(1, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertNotIn(self.read_message, messages)\n self.assertNotIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)",
"def test_chunked_queries_send_numerous_emails(self, email_mock):\r\n mock_factory = MockCourseEmailResult()\r\n email_mock.side_effect = mock_factory.get_mock_update_subtask_status()\r\n added_users = []\r\n for _ in xrange(LARGE_NUM_EMAILS):\r\n user = UserFactory()\r\n added_users.append(user)\r\n CourseEnrollmentFactory.create(user=user, course_id=self.course.id)\r\n\r\n optouts = []\r\n for i in [1, 3, 9, 10, 18]: # 5 random optouts\r\n user = added_users[i]\r\n optouts.append(user)\r\n optout = Optout(user=user, course_id=self.course.id)\r\n optout.save()\r\n\r\n test_email = {\r\n 'action': 'Send email',\r\n 'send_to': 'all',\r\n 'subject': 'test subject for all',\r\n 'message': 'test message for all'\r\n }\r\n # Post the email to the instructor dashboard API\r\n response = self.client.post(self.send_mail_url, test_email)\r\n self.assertEquals(json.loads(response.content), self.success_content)\r\n\r\n self.assertEquals(mock_factory.emails_sent,\r\n 1 + len(self.staff) + len(self.students) + LARGE_NUM_EMAILS - len(optouts))\r\n outbox_contents = [e.to[0] for e in mail.outbox]\r\n should_send_contents = ([self.instructor.email] +\r\n [s.email for s in self.staff] +\r\n [s.email for s in self.students] +\r\n [s.email for s in added_users if s not in optouts])\r\n self.assertItemsEqual(outbox_contents, should_send_contents)",
"def test_alice_unread(self):\n messages = list(self.alice_storage.unread)\n self.assertEqual(1, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertNotIn(self.read_message, messages)\n self.assertNotIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)",
"def test_bob_sent(self):\n messages = list(self.bob_storage.sent)\n self.assertEqual(1, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)",
"def check_item(self, item: PoItem):\n for line in item.msgstr:\n if len(line) > MAX_LINE_LENGTH - 2: # 2 is for \"\"\n item.add_error(\n self.name,\n f\"Line too long ({len(line) + 2} > \"\n f\"{MAX_LINE_LENGTH}): {line}\",\n )",
"def email_sent_with_subject(subject):\n return [email.subject == subject for email in mail.outbox]",
"def get_new_mails(self):\n\t\tif cint(self.settings.use_imap):\n\t\t\tself.imap.select(\"Inbox\")\n\t\t\tif self.settings.no_remaining == '0' and self.settings.uidnext:\n\t\t\t\tif self.settings.uidnext == self.settings.newuidnext:\n\t\t\t\t\treturn False\n\t\t\t\telse:\n\t\t\t\t\t#request all messages between last uidnext and new\n\t\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tresponse, message = self.imap.uid('search', None, \"ALL\")\n\t\t\temail_list = message[0].split()\n\t\telse:\n\t\t\temail_list = self.pop.list()[1]\n\n\t\treturn email_list",
"def test_send_notification(self):\n management.call_command('send_first_report_notification', [], {})\n eq_(len(mail.outbox), 4)",
"def test_invitation_email(self):\n queryset = models.Invitation.objects.filter(id=self.invitation.id)\n self.admin_instance.send_new_activation_email(self.some_request, queryset)\n # check whether there is a mail in the outbox\n self.assertEqual(len(mail.outbox), 1)\n # check subject\n self.assertEqual(\n mail.outbox[0].subject,\n \"Er is een account voor u aangemaakt op sso.lizard.net\",\n )\n self.assertEqual(mail.outbox[0].to, [\"[email protected]\"])\n # check mail starts with 'Hallo Reinout,'\n self.assertTrue(mail.outbox[0].body.startswith(\"Hallo Reinout,\"))",
"def filter_unread(check_what, criteria, return_what):\n imap = imaplib.IMAP4_SSL(config[\"email\"][\"server\"])\n imap.login(config[\"email\"][\"user\"], config[\"email\"][\"pass\"])\n status, messages = imap.select(\"INBOX\")\n \n status, response = imap.search(None, '(UNSEEN)')\n unread_msg_nums = response[0].split()\n\n ret = [] \n for i in unread_msg_nums:\n parse_return = parse(imap, i, check_what, criteria, return_what)\n if parse_return is not None:\n ret.append(parse_return)\n set_unseen(imap, i)\n imap.close()\n imap.logout()\n\n return ret",
"def _check_items_limit(self):\n if self.items_limit and self.items_limit == self.get_metadata('items_count'):\n raise ItemsLimitReached('Finishing job after items_limit reached:'\n ' {} items written.'.format(self.get_metadata('items_count')))",
"def objectsReady(self, n):\n return len(self.files) >= n",
"def _assert_escalation_email_available(self, available):\n func = self.assertIn if available else self.assertNotIn\n response = self.client.get(self.url)\n func('escalation-email-container', response.content.decode('utf-8'))",
"def delete_mail(self, box):\n self._logger.debug('delete the mail of %s', box)\n\n if not self.enter_mailbox(box):\n return False\n maxtime = 0\n while self.check_not_empty(100):\n if self.refresh_emailbox(60) <> 2:\n return False\n if box == 'Trash':\n if self._device(resourceId='com.tct.email:id/empty_trash').exists:\n self._device(resourceId='com.tct.email:id/empty_trash').click()\n self._device.delay(2)\n if self._device(resourceId='android:id/button1', text='DELETE').exists:\n self._device(resourceId='android:id/button1', text='DELETE').click()\n self._device.delay(3)\n else:\n index = 1\n if self._device(resourceId='com.tct.email:id/conversation_list_view').child(\n className='android.widget.FrameLayout', instance=index).exists:\n if self._device(resourceId='com.tct.email:id/conversation_list_view').child(\n resourceId='com.tct.email:id/outbox').exists:\n if self._device(resourceId='com.tct.email:id/conversation_list_view').getChildCount() == 1:\n return True\n else:\n self._device(resourceId='com.tct.email:id/conversation_list_view').child(\n className='android.widget.FrameLayout', instance=index).long_click()\n self._device.delay(2)\n else:\n self._device(resourceId='com.tct.email:id/conversation_list_view').child(\n className='android.widget.FrameLayout', instance=index).long_click()\n self._device.delay(2)\n if self._device(description='Select all').exists:\n self._device(description='Select all').click()\n self._device.delay(2)\n if self._device(description='Delete').exists:\n self._device(description='Delete').click()\n self._device.delay(2)\n if self._device(description='Discard failed').exists:\n self._device(description='Discard failed').click()\n self._device.delay(2)\n if self._device(resourceId='android:id/button1', text='OK').exists:\n self._device(resourceId='android:id/button1', text='OK').click()\n self._device.delay(2)\n if self._device(resourceId='com.tct.email:id/empty_text').exists:\n break\n if maxtime > 30:\n break\n maxtime += 1\n if self._device(resourceId='com.tct.email:id/empty_text').exists:\n self._logger.debug('mail of the %s has delete complete', box)\n return True\n else:\n return False",
"def check_delivered_messages(results):\n assert results[\"metrics\"][\"Delivered messages\"] == 20",
"def outbox():\n with mail.record_messages() as messages:\n yield messages",
"def num_messages_with_attachments(self):\n\n limit = self.limit if self.limit > 0 else False\n gm_ids = self.inbox.search(self.search_string, gm_ids=True, limit=limit)\n return len(gm_ids)",
"def not_empty(entry):\n gt_boxes = entry['boxes']\n return gt_boxes.shape[0] > 0",
"def test_bob_all(self):\n messages = list(self.bob_storage.all)\n self.assertEqual(3, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertIn(self.read_message, messages)\n self.assertIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)",
"def test_alice_all(self):\n messages = list(self.alice_storage.all)\n self.assertEqual(2, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertIn(self.read_message, messages)\n self.assertNotIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)",
"def display_available_items(self):\n count = 0\n print(\"Available Items:\")\n for item in self.item_list.values():\n if item.check_availability():\n count += 1\n print(item)\n if count == 0:\n print(\"No items are available\")",
"def test_item_count(self):\n self.assertEqual(len(self.items), 2)"
] |
[
"0.6196226",
"0.61041015",
"0.57169133",
"0.5665311",
"0.5621528",
"0.55776346",
"0.55671704",
"0.5560721",
"0.55464005",
"0.5522656",
"0.5504371",
"0.546202",
"0.54347897",
"0.54145616",
"0.53828126",
"0.5365601",
"0.53643",
"0.53439236",
"0.5324718",
"0.5317421",
"0.531184",
"0.52810025",
"0.5255632",
"0.5249457",
"0.52482116",
"0.52450734",
"0.52082807",
"0.519712",
"0.5195308",
"0.5194892"
] |
0.662837
|
0
|
Set the pool of servers used by this client.
|
def set_servers(self, servers):
self.servers = []
self.buckets = []
for server_desc in servers:
if type(server_desc) == tuple:
server_addr, weight = server_desc
else:
server_addr, weight = server_desc, 1
server = _ServerConnection(server_addr, weight, self._debuglog)
self.servers.append(server)
for _index in range(weight):
self.buckets.append(server)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def server_pool(self, server_pool):\n\n self._server_pool = server_pool",
"def set_servers(self, servers):\r\n self.servers = [_Host(s, self.debug, dead_retry=self.dead_retry,\r\n socket_timeout=self.socket_timeout)\r\n for s in servers]\r\n self._init_buckets()",
"def set_servers(self, servers):\n kwargs = dict(io_loop = self.io_loop)\n #if self.connect_timeout:\n # kwargs['connect_timeout'] = self.connect_timeout \n if self.dead_retry:\n kwargs['dead_retry'] = self.dead_retry \n self.servers = [_Host(s, self.debuglog, **kwargs) for s in servers]\n self._init_buckets()",
"def set_servers(self, servers):\n if isinstance(servers, six.string_types):\n servers = [servers]\n\n assert servers, \"No memcached servers supplied\"\n self._servers = [Protocol(\n server=server,\n username=self.username,\n password=self.password,\n compression=self.compression,\n socket_timeout=self.socket_timeout,\n pickle_protocol=self.pickle_protocol,\n pickler=self.pickler,\n unpickler=self.unpickler,\n tls_context=self.tls_context,\n ) for server in servers]",
"def set_pool_size(self, pool_size):\n self._aspp.set_pool_size(pool_size)",
"def update_connection_pool(maxsize=1):\n get_pool().connection_pool_kw.update(maxsize=maxsize)",
"def set_ports_pool(self, being: int, end: int):\n self.ports_pool = (being, end)\n return self",
"def set_clients(self, pps, pfs):\n\n self._clients = {\n 'pps': pps,\n 'pfs': pfs\n }",
"def set_servers(self, server_infos):\n self.remove_servers_channels()\n for server_info in server_infos:\n server_section = server_info['server']\n server_name = server_section.name\n self._init_section_id(server_section)\n self._sections[self._server_hash(server_name)] = server_section\n\n for channel_section in server_info['channels']:\n if channel_section is None:\n continue\n channel_section.name = server_name\n self._init_section_id(channel_section)\n self._sections[self._channel_hash(server_name)] = channel_section",
"def set_pool_size(self, pool_size):\n self._semantic_decoder.set_pool_size(pool_size)\n if self._instance_decoder is not None:\n self._instance_decoder.set_pool_size(pool_size)",
"def setNumThreads(self, num):\n # implement ThreadPool interface\n assert not self.prepared, \"You can't change number of threads for working server\"\n self.threads = num",
"def setNumThreads(self, num):\r\n # implement ThreadPool interface\r\n assert not self.prepared, \"You can't change number of threads for working server\"\r\n self.threads = num",
"def reset_servers(self):\n\n servers = []\n for _, g in self.groups.items():\n g.get_servers(servers)\n\n for s in servers:\n self.servers[s.vid] = s",
"def set_connections(self, connections: dict):\n self._connections = connections",
"def __init__(self,server_list):\n self.workers=[]\n self.worker_by_name={}\n worker_id = 1\n for host,port in server_list:\n # Add the uid here can help with port conflicts, but only works\n # on Unix clusters. We really need to work out a daemon service\n # model that makes the port mess transparent.\n port = port #+ os.getuid()\n new_worker = sync_cluster.standard_sync_client(host,port,worker_id)\n self.workers.append(new_worker)\n self.worker_by_name[host] = new_worker\n worker_id = worker_id + 1",
"def add_servers(self, servers: List[Server]):\n pass",
"def set_workers(self, nworkers):\n\n self.max_workers = nworkers",
"def pool_size(self, pool_size: ConfigNodePropertyInteger):\n\n self._pool_size = pool_size",
"def init_poolmanager(self, connections, maxsize, block=False):\n ctx = ssl.create_default_context()\n ctx.set_ciphers(\"DEFAULT@SECLEVEL=1\")\n\n # FIX #33770129\n # https://stackoverflow.com/questions/33770129/how-do-i-disable-the-ssl-check-in-python-3-x\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n\n self.poolmanager = poolmanager.PoolManager(\n num_pools=connections,\n maxsize=maxsize,\n block=block,\n ssl_version=ssl.PROTOCOL_TLS,\n ssl_context=ctx,\n )",
"def mset(self, mapping):\n servers = {}\n for key, value in mapping.items():\n server_name = self.get_server_name(key)\n servers.setdefault(server_name, [])\n servers[server_name].append((key, value))\n for name, items in servers.items():\n self.connections[name].mset(dict(items))\n return True",
"def limit_num_clients(self, limit_num_clients):\n\n self._limit_num_clients = limit_num_clients",
"def __init__(self, pool_size: float = 10):\n self.pool_size = pool_size",
"def _set_processes(self, processes: int = 1):\n self.__processes = processes",
"def setNameservers(self, nameserver):\n # type: (tp.Any)->None\n\n self.validateOne('nameservers', self._valid['nameservers'], nameserver)\n self._ifAttributes['nameservers'] = nameserver",
"def set_pooling_options(opts, pooling_options=None):\n if not (isinstance(pooling_options, dict)):\n raise Exception(\"`pooling_options` must be a dictionary\")\n\n if (pooling_options is not None):\n for (option_name, value) in pooling_options.items():\n opt = opts.pooling_options.add()\n opt.option = option_name\n opt.value = value\n\n return opts",
"def create_ldap_server_pool_obj(self,\n ldap_servers: typing.List[str] = None) -> ldap3.ServerPool:\n server_pool = ldap3.ServerPool(\n ldap_servers,\n pool_strategy=self.server_pool_strategy.upper(),\n active=self.server_pool_active,\n exhaust=self.server_pool_exhaust\n )\n return server_pool",
"def pool_size(self):\n if self.options.pool_size == 0:\n return 2 * len(self._server_list)\n return self.options.pool_size",
"def set_cpus(self, num_cpus: int) -> None:\n if self.batch:\n if self.launcher in [\"pbs\", \"cobalt\"]:\n if hasattr(self, \"batch_settings\") and self.batch_settings:\n if hasattr(self.batch_settings, \"set_ncpus\"):\n self.batch_settings.set_ncpus(num_cpus)\n if self.launcher == \"slurm\":\n if hasattr(self, \"batch_settings\") and self.batch_settings:\n if hasattr(self.batch_settings, \"set_cpus_per_task\"):\n self.batch_settings.set_cpus_per_task(num_cpus)\n\n for db in self.dbnodes:\n db.run_settings.set_cpus_per_task(num_cpus)\n if db.is_mpmd and hasattr(db.run_settings, \"mpmd\"):\n for mpmd in db.run_settings.mpmd:\n mpmd.set_cpus_per_task(num_cpus)",
"def set_max_clients(self, clients: int = 50_000) -> None:\n self.set_db_conf(\"maxclients\", str(clients))",
"def _set_requestor(self, pool_options):\n # We had been importing this at the top of the module, but that seemed\n # to break some CI environments\n import requests\n\n if not pool_options['enable']:\n self._requestor = requests\n return\n\n session = requests.Session()\n adapter = requests.adapters.HTTPAdapter(\n pool_block=pool_options['block'],\n pool_connections=pool_options['number'],\n pool_maxsize=pool_options['maxsize'],\n )\n logger.info(\n 'Created connection pool (block={}, number={}, maxsize={})'.format(\n pool_options['block'],\n pool_options['number'],\n pool_options['maxsize']))\n\n prefix = _get_protocol_prefix(self.api_root)\n if prefix:\n session.mount(prefix, adapter)\n logger.info('Mounted connection pool for \"{}\"'.format(prefix))\n else:\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n logger.info(\n 'Could not find protocol prefix in API root, mounted '\n 'connection pool on both http and https.')\n\n self._requestor = session"
] |
[
"0.7319679",
"0.680685",
"0.6783699",
"0.6467647",
"0.6271275",
"0.615868",
"0.60961",
"0.60128826",
"0.5960502",
"0.5913157",
"0.58820456",
"0.5873742",
"0.5798574",
"0.5772111",
"0.5718808",
"0.5644137",
"0.55850923",
"0.55629134",
"0.5554513",
"0.55439126",
"0.55362344",
"0.5525065",
"0.5522727",
"0.55188817",
"0.5486031",
"0.5478364",
"0.54553723",
"0.5425725",
"0.5423926",
"0.5411814"
] |
0.7087884
|
1
|
Get statistics from each of the servers.
|
def get_stats(self):
data = []
for server in self.servers:
stats = yield server.get_stats()
data.append(stats)
raise StopIteration(data)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getServerStats():\n return _xmlUrlToDict(serverString + \"/rest/stats\", int)",
"def get_stats(self):\n\n\t\tserver_data = {}\n\n\t\tyield self.sendall(\"stats\\r\\n\")\n\n\t\twhile True:\n\t\t\tline = yield self.read_line()\n\n\t\t\tif not line or line.strip() == \"END\":\n\t\t\t\tbreak\n\n\t\t\t_stat, name, value = line.split(' ', 2)\n\t\t\tserver_data[name] = value\n\n\t\traise StopIteration(server_data)",
"def get_all_stats(self) -> Dict[str, Any]:\n return self.http.get(self.config.paths.stat)",
"def get_host_stats(self):\n status, data, errors, messages = self._make_get_request(CraftyAPIRoutes.SERVER_STATS)\n \n if status == 200:\n return data\n elif status == 500:\n self._check_errors(errors, messages)",
"def server_agent_statistics(ctx):\n data = ctx.obj.get_agent_statistics()\n output_json_data(data)",
"def get_servers():\n all_servers = []\n start = 0\n size = 100\n\n while True:\n params = {\n 'start': start,\n 'size': size,\n 'names': 1,\n 'cdata': 1\n }\n\n xml_content = _call(\n servers_base_url + 'get_server_list.php',\n parser='xml',\n params=params\n )\n\n servers = [Server.load(server_node) for server_node in xml_content.xpath('/result/server')]\n\n if not servers:\n break\n\n all_servers.extend(servers)\n\n if servers[-1].is_last:\n break\n\n start += size\n\n _set_servers_location(all_servers)\n _set_server_event(all_servers)\n\n all_servers.sort(\n key=lambda s: s.players.current,\n reverse=True\n )\n\n return all_servers",
"def get_all_servers(self) -> List[Server]:\n pass",
"def stats(self, **kwargs):\n return stats.stats(self._host, self._session, **kwargs)",
"def server_stats():\n out = subprocess.check_output(cmd_preamble + [\"admin\", \"stats\"])\n return out.decode()",
"def get_servers(self):\n\t\treturn self.__servers",
"def stats(self):\n url = client.build_url('stats')\n _, res_json = client.get(url, headers=self.headers)\n\n return res_json",
"def get_host_stats(self, refresh=False):",
"def describe_servers(ServerName=None, NextToken=None, MaxResults=None):\n pass",
"def check_servers (self):\n results = []\n\n for server_name in self.servers_sumtimes.keys():\n server = self._get_server(server_name)\n\n time = float(self.servers_sumtimes[server_name]) / self.servers_counts[server_name]\n server_time, created = models.HBase_ServerTime.objects.get_or_create(server=server)\n server_avg_state = server_time.averager_state\n avg_time = server_avg_state.value()\n\n if avg_time is not None and self.is_anomaly(avg_time, time):\n msg=\"\"\"\nAverage server's probe time exceeded average + threshold (%.2f%%). Values:\n- server: %s\n- history response time %.2f ms\n- probe time %.2f ms\n\"\"\" % (anomaly_threshold * 100.0, server_name, avg_time, time)\n result = ProcessAnomaly(is_region=False, object_name=server_name,\n text=\"Request time %.2f ms (avg is %.2f)\" % (time, avg_time),\n description=msg)\n results.append(result)\n else:\n # Normal value, update state\n self.averager.update(time, server_avg_state)\n server_time.averager_state = server_avg_state\n server_time.save()\n return results",
"def get_cache_stats():\n hostnames = get_memcached_hosts()\n\n if not hostnames:\n return None\n\n all_stats = []\n\n for hostname in hostnames:\n try:\n host, port = hostname.split(':')\n except ValueError:\n # Assume this is a hostname without a port.\n socket_af = socket.AF_INET\n host = hostname\n port = 11211\n\n if host == 'unix':\n socket_af = socket.AF_UNIX\n connect_param = port\n else:\n socket_af = socket.AF_INET\n connect_param = (host, int(port))\n\n s = socket.socket(socket_af, socket.SOCK_STREAM)\n\n try:\n s.connect(connect_param)\n except socket.error:\n logger.error('Unable to connect to \"%s\"' % hostname)\n s.close()\n continue\n\n s.send(b'stats\\r\\n')\n data = s.recv(2048).decode('ascii')\n s.close()\n\n stats = {}\n\n for line in data.splitlines():\n info = line.split(' ')\n\n if info[0] == 'STAT' and len(info) == 3:\n try:\n value = int(info[2])\n except ValueError:\n value = info[2]\n\n stats[info[1]] = value\n\n if stats['cmd_get'] == 0:\n stats['hit_rate'] = 0\n stats['miss_rate'] = 0\n else:\n stats['hit_rate'] = 100 * stats['get_hits'] / stats['cmd_get']\n stats['miss_rate'] = 100 * stats['get_misses'] / stats['cmd_get']\n\n all_stats.append((hostname, stats))\n\n return all_stats",
"def get_all(self):\n\n servers = self._scoped_servers()\n servers = [{u'id': x.id, u'name': x.name} for x in servers]\n return self.format_collection(servers)",
"def run(self):\n self._list_servers()",
"def get_servers_info(self):\n return self.mrr_obj.get('/info/servers')",
"def servers(self):\n return self._servers",
"def get_servers(self) -> dict:\n uri = f\"{self.uri}/servers\"\n\n response = self.request(uri=uri)\n return response.json()",
"def servers(self, details=True, **query):\n srv = _server.ServerDetail if details else _server.Server\n return list(self._list(srv, paginated=True, **query))",
"def get_all_stats():\n\n return get_component(CachingPackage.COMPONENT_NAME).get_all_stats()",
"def get_stats(self, service, bigips, stat_keys):\n collected_stats = {}\n for stat_key in stat_keys:\n collected_stats[stat_key] = 0\n\n virtual = self.service_adapter.get_virtual(service)\n part = virtual[\"partition\"]\n for bigip in bigips:\n try:\n vs_stats = self.vs_helper.get_stats(\n bigip,\n name=virtual[\"name\"],\n partition=part,\n stat_keys=stat_keys)\n for stat_key in stat_keys:\n if stat_key in vs_stats:\n collected_stats[stat_key] += vs_stats[stat_key]\n\n except Exception as e:\n # log error but continue on\n LOG.error(\"Error getting virtual server stats: %s\", e.message)\n\n return collected_stats",
"def get_stats(self):\n return self.manager.get_stats(self)",
"def _get_stats(self):\n self.stats = set()\n self._bstats = set()\n self._h_bstats = set()\n self._tstats = set()\n self._ftstats = set()\n for cl in self.data_classes:\n for stat in cl._bstats:\n self.stats.add(stat)\n self._bstats.add(stat)\n for stat in cl._hbstats:\n self.stats.add(stat)\n self._h_bstats.add(stat)\n for stat in cl._tstats:\n self._tstats.add(stat)\n self.stats.add(stat)\n try:\n trips = cl.triples\n f_stats = cl.read_tfstats(trips,eq=False,lande=False)\n for trip in f_stats:\n for stat in f_stats[trip]:\n self._ftstats.add(stat)\n self.stats.add(stat)\n except:\n AttributeError",
"def stats(self, **kwargs):\n return self.client.api.stats(self.id, **kwargs)",
"def get_all(self):\r\n ret = []\r\n for cache_name, stat in self.stats_per_cache.items():\r\n ret.append({\r\n 'cache_name': cache_name,\r\n 'num_hits': len(stat.hit_targets),\r\n 'num_misses': len(stat.miss_targets),\r\n 'hits': stat.hit_targets,\r\n 'misses': stat.miss_targets\r\n })\r\n return ret",
"def update_server_stats(self):\n try:\n aio.run(self.client.execute, 'ANALYZE')\n except Exception:\n pass # swallow; CrateDB 4.1.0+ is required to run ANALYZE",
"def get_statistics(self):\n return self.results",
"def get_stats(self):\n # pool.map needs an arg for each function that will be run\n dmx_mean = [self.dmx.mean()] * len(self.genome_paths)\n with ProcessingPool() as pool:\n results = pool.map(genome.mp_stats, self.genome_paths, dmx_mean)\n self.stats = pd.concat(results)\n self.stats.to_csv(self.stats_path)"
] |
[
"0.7074891",
"0.699148",
"0.69702584",
"0.67387414",
"0.67188823",
"0.6682422",
"0.6670441",
"0.6586258",
"0.6582014",
"0.65445226",
"0.6522328",
"0.65184695",
"0.65160424",
"0.6503475",
"0.64788616",
"0.64288133",
"0.6426006",
"0.6406493",
"0.64061654",
"0.6395011",
"0.63823503",
"0.63767165",
"0.6375878",
"0.6344752",
"0.6337555",
"0.6332784",
"0.630148",
"0.62788826",
"0.6278066",
"0.6257436"
] |
0.7845391
|
0
|
Sends a command to the server to atomically increment the value for ``key`` by ``delta``, or by 1 if ``delta`` is unspecified. Returns None if ``key`` doesn't exist on server, otherwise it returns the new value after incrementing. Note that the value for ``key`` must already exist in the memcache, and it must be the string representation of an integer. >>> mc.set("counter", "20") returns True, indicating success 1 >>> mc.incr("counter") 21 >>> mc.incr("counter") 22 Overflow on server is not checked. Be aware of values approaching 232. See ``decr``.
|
def incr(self, key, delta=1):
return self._incrdecr("incr", key, delta)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def incr(self, key, delta=1, callback=None):\n self._incrdecr(\"incr\", key, delta, callback=callback)",
"def incr(self, key, delta=1):\r\n if delta < 0:\r\n return self._incrdecr(\"decr\", key, -delta)\r\n else:\r\n return self._incrdecr(\"incr\", key, delta)",
"def incr(self, key, delta=1):\n try:\n key = self.prepare_key(key)\n return super(CacheClass, self).incr(key, delta)\n except Exception as err:\n return self.warn_or_error(err, delta)",
"def incr(self, key, delta=1, version=None, client=None):\r\n return self._incr(key=key, delta=delta, version=version, client=client)",
"def incr_version(self, key, delta=1, version=None, client=None):\r\n\r\n if client is None:\r\n client = self.get_client(write=True)\r\n\r\n if version is None:\r\n version = self._backend.version\r\n\r\n old_key = self.make_key(key, version)\r\n value = self.get(old_key, version=version, client=client)\r\n\r\n try:\r\n ttl = client.ttl(old_key)\r\n except ConnectionError:\r\n raise ConnectionInterrupted(connection=client)\r\n\r\n if value is None:\r\n raise ValueError(\"Key '%s' not found\" % key)\r\n\r\n if isinstance(key, CacheKey):\r\n new_key = self.make_key(key.original_key(), version=version + delta)\r\n else:\r\n new_key = self.make_key(key, version=version + delta)\r\n\r\n self.set(new_key, value, timeout=ttl, client=client)\r\n self.delete(old_key, client=client)\r\n return version + delta",
"def inc(self, key, delta=1):\n if self.has(key):\n _filter = {'_id': key}\n document = {'$inc': {'value': delta}}\n try:\n self.collection.update(_filter, document)\n except PyMongoError:\n return None\n else:\n self.add(key, delta)\n return self.get(key)",
"def incr(self, key, value=1):\n try:\n self[key] += value\n except TypeError:\n raise TypeError('Tried to increment non-numeric key {!r} ({!r}) by {}'.format(\n key, self[key], value\n ))\n except KeyError:\n self[key] = value\n\n return u''",
"def _incrdecr(self, cmd, key, delta):\n\t\tcheck_key(key)\n\t\tserver, key = yield self._get_server_for(key)\n\t\tif not server:\n\t\t\treturn\n\n\t\tcmd = \"%s %s %d\\r\\n\" % (cmd, key, delta)\n\n\t\ttry:\n\t\t\tyield server.sendall(cmd)\n\t\t\tline = yield server.read_line()\n\t\t\traise StopIteration(int(line))\n\t\texcept tcp.ConnectionClosedException:\n\t\t\tserver.mark_dead()",
"def incrdecr(con,command,key,value=1):\n # yy=atpic.log.setname(xx,'incrdecr')\n thecommand=\"{command} {key} {value}\\r\\n\".format(command=command,key=key,value=value)\n con.send(thecommand.encode('utf-8'))\n line=get_line(con)\n # atpic.log.debug(yy,line)\n if line==b'NOT_FOUND':\n return None\n else:\n return int(line.strip())",
"def add(name, key, delta):\n num_shards = CounterShardConfig.get_num_shards(name)\n def txn():\n index = random.randint(0, num_shards - 1)\n shard_key = '%s-%s-%s' % (name, key, str(index))\n counter = CounterShard.get_by_key_name(shard_key)\n if counter is None:\n counter = CounterShard(key_name=shard_key, name=name, reference_key=key)\n counter.count += delta\n counter.put()\n db.run_in_transaction(txn)\n \n cache_key = make_key('counter', name, key)\n cached = memcache.get(cache_key)\n if cached != None:\n memcache.set(cache_key, cached + delta)",
"def decr(self, key, delta=1):\r\n if delta < 0:\r\n return self._incrdecr(\"incr\", key, -delta)\r\n else:\r\n return self._incrdecr(\"decr\", key, delta)",
"def inc(self, key):\n if key in self.key_dict:\n self.increase(key)\n return\n self.key_dict[key] = key_node = KeyNode(key, 1)\n value_node = self.value_dict.get(1)\n if value_node is None:\n self.value_dict[1] = value_node = ValueNode(1, None, self.head)\n if self.head:\n self.head.prev = value_node\n self.head = value_node\n if self.last is None:\n self.last = value_node\n self.insert_key_node(key_node)",
"def testIncrementDecrement(self):\n\n memcache.incr('unknown_key')\n assert memcache.get('unknown_key') == None\n memcache.set('counter', 0)\n assert memcache.get('counter') == 0\n memcache.incr('counter')\n assert memcache.get('counter') == 1\n memcache.incr('counter', delta=2)\n assert memcache.get('counter') == 3\n memcache.decr('counter')\n assert memcache.get('counter') == 2\n memcache.decr('counter', 2)\n assert memcache.get('counter') == 0\n memcache.incr('second_counter', initial_value=10)\n assert memcache.get('second_counter') == 11\n memcache.decr('third_counter', initial_value=10)\n assert memcache.get('third_counter') == 9\n\n # This should cause an error message, because zero deltas are not\n # allowed.\n memcache.incr('counter', delta=0)\n\n memcache.set('lcounter', long(20))\n assert memcache.get('lcounter') == long(20)\n memcache.incr('lcounter')\n assert memcache.get('lcounter') == long(21)",
"def handle_incr(self, api, command):\n key = self._sandboxed_key(api.sandbox_id, command.get('key'))\n if not (yield self.check_keys(api, key)):\n returnValue(self._too_many_keys(command))\n amount = command.get('amount', 1)\n try:\n value = yield self.redis.incr(key, amount=amount)\n except Exception, e:\n returnValue(self.reply(command, success=False, reason=unicode(e)))\n returnValue(self.reply(command, value=int(value), success=True))",
"def decr(self, key, delta=1, version=None, client=None):\r\n return self._incr(key=key, delta=-delta, version=version,\r\n client=client)",
"def incrby(self, key, value, timeBucket=None,\n retentionSecs=None, labels={}):\n params = [key, value]\n self.appendTimeBucket(params, timeBucket)\n self.appendRetention(params, retentionSecs)\n self.appendLabels(params, labels)\n\n return self.execute_command(self.INCRBY_CMD, *params)",
"def inc(self, key: str) -> None:\n if key in self.keyCnt:\n self.changeKey(key, 1)\n else:\n self.keyCnt[key] = 1\n # 说明没有计数为1的节点,在self.head后面加入\n if self.head.next.cnt != 1:\n self.addNodeAfter(Node(1), self.head)\n self.head.next.keySet.add(key)\n self.cntKey[1] = self.head.next",
"async def incr(req):\n key, ttl, err = validate_params(req)\n if err is not None:\n return err\n\n counter = incr_with_ttl(key, ttl)\n return web.json_response(data={'status': 'success', 'counter': counter})",
"def decr(self, key, delta=1):\n\t\treturn self._incrdecr(\"decr\", key, delta)",
"def decr(self, key, delta=1, callback=None):\n self._incrdecr(\"decr\", key, delta, callback=callback)",
"def test_pos_operate_increment_nonexistent_key(self):\n key = (\"test\", \"demo\", \"non_existentkey\")\n llist = [{\"op\": aerospike.OPERATOR_INCR, \"bin\": \"age\", \"val\": 5}]\n\n self.as_connection.operate(key, llist)\n\n (key, _, bins) = self.as_connection.get(key)\n\n assert bins == {\"age\": 5}\n\n self.as_connection.remove(key)",
"def increase(self, key:str) -> None:\n\n hash_key = self.hash_key(key)\n head = self.array[hash_key] \n \n while head.next: \n if head.next.key == key:\n head.next.value +=1\n head = head.next",
"def dec(self, key, delta=1):\n return self.inc(key, -delta)",
"def decr(self, key, delta=1):\n try:\n key = self.prepare_key(key)\n return super(CacheClass, self).decr(key, delta)\n except Exception as err:\n return self.warn_or_error(err, delta)",
"def incr_underhanded(self, key, value):\n for plugin in self.server.plugins:\n if isinstance(plugin,MemcachedPlugin) and not plugin is self:\n v = plugin._incr_data(key,value)\n if v is False:\n return False\n return v",
"def inc(self, key: str) -> None:\n if key not in self.bucket_of_keys:\n self.bucket_of_keys[key] = self.buckets.insert(self.buckets.begin(), Node(0, {key}))\n bucket, next_bucket = self.bucket_of_keys[key], self.bucket_of_keys[key].next\n if next_bucket is self.buckets.end() or next_bucket.value > bucket.value + 1:\n next_bucket = self.buckets.insert(next_bucket, Node(bucket.value + 1, set()))\n next_bucket.keys.add(key)\n self.bucket_of_keys[key] = next_bucket\n\n bucket.keys.remove(key)\n if not bucket.keys:\n self.buckets.erase(bucket)",
"def inc(self, key):\n if key in self.keyCountMap:\n self._updateCount(key, 1)\n else:\n self.keyCountMap[key] = 1\n if self.head.next.count != 1:\n self._addBucketAfter(Bucket(1), self.head)\n self.head.next.keySet.add(key)\n self.countBucketMap[1] = self.head.next",
"def increment_metric_counter(metric_name, redis_db):\n if TEST_MODE:\n print 'Simulate redis incremet, key is %s' % metric_name\n return\n if redis_db:\n try:\n redis_db.incr(metric_name)\n except Exception as e:\n logger.warning(\"Failed to increment redis metric '%s' \"\n \"with exception '%s'\", metric_name, e)",
"def _increment_count(self, key):\n\n if not self._count.has_key(key):\n self._count[key] = 0\n\n self._count[key] += 1",
"def _increment_count(self, key):\n\n if not self._count.has_key(key):\n self._count[key] = 0\n\n self._count[key] += 1"
] |
[
"0.77754945",
"0.77449256",
"0.76950294",
"0.7513234",
"0.6945156",
"0.6858105",
"0.6754959",
"0.66525286",
"0.63007706",
"0.6083981",
"0.599613",
"0.59376466",
"0.59372324",
"0.5911901",
"0.5840948",
"0.5809797",
"0.57680327",
"0.5759166",
"0.56261635",
"0.5539868",
"0.54871196",
"0.547032",
"0.5455945",
"0.5408776",
"0.5366712",
"0.53515065",
"0.53492385",
"0.526243",
"0.52577853",
"0.52577853"
] |
0.77985793
|
0
|
For each key in data, determine which server that key should be mapped to. Returns a dict. Keys are `_ServerConnection` instances; for each server, the value is a list of (prefixed_key, original_key) tuples for all values which belong on that server.
|
def _map_keys_to_servers(self, key_iterable, key_prefix):
# Only check the prefix once
key_extra_len = len(key_prefix)
if key_prefix:
check_key(key_prefix)
# server -> list of (prefixed_key, value)
server_keys = {}
deprefix = {}
# build up a list for each server of all the keys we want.
for orig_key in key_iterable:
if type(orig_key) is tuple:
# Tuple of hashvalue, key ala _get_server_for(). The caller is essentially
# telling us what server to stuff this on.
str_orig_key = str(orig_key[1])
# Ensure call to _get_server_for gets a Tuple as well.
# Gotta pre-mangle key before hashing to a server. Returns the mangled key.
server, key = yield self._get_server_for((orig_key[0], key_prefix + str_orig_key))
else:
str_orig_key = str(orig_key) # set_multi supports int / long keys.
server, key = yield self._get_server_for(key_prefix + str_orig_key)
# Now check to make sure key length is proper ...
check_key(str_orig_key, key_extra_len=key_extra_len)
if not server:
continue
if server not in server_keys:
server_keys[server] = []
server_keys[server].append((key, orig_key))
deprefix[key] = orig_key
raise StopIteration((server_keys, deprefix))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _map_and_prefix_keys(self, key_iterable, key_prefix):\r\n # Check it just once ...\r\n key_extra_len=len(key_prefix)\r\n #changed by steve\r\n #if key_prefix:\r\n #self.check_key(key_prefix)\r\n\r\n # server (_Host) -> list of unprefixed server keys in mapping\r\n server_keys = {}\r\n\r\n prefixed_to_orig_key = {}\r\n # build up a list for each server of all the keys we want.\r\n for orig_key in key_iterable:\r\n if isinstance(orig_key, tuple):\r\n # Tuple of hashvalue, key ala _get_server(). Caller is essentially telling us what server to stuff this on.\r\n # Ensure call to _get_server gets a Tuple as well.\r\n str_orig_key = str(orig_key[1])\r\n server, key = self._get_server((orig_key[0], key_prefix + str_orig_key)) # Gotta pre-mangle key before hashing to a server. Returns the mangled key.\r\n else:\r\n str_orig_key = str(orig_key) # set_multi supports int / long keys.\r\n server, key = self._get_server(key_prefix + str_orig_key)\r\n\r\n # Now check to make sure key length is proper ...\r\n #changed by steve\r\n #self.check_key(str_orig_key, key_extra_len=key_extra_len)\r\n key = self.check_key(key_prefix + str_orig_key)\r\n\r\n if not server:\r\n continue\r\n\r\n if server not in server_keys:\r\n server_keys[server] = []\r\n server_keys[server].append(key)\r\n prefixed_to_orig_key[key] = orig_key\r\n\r\n return (server_keys, prefixed_to_orig_key)",
"def get_multi(self, keys, key_prefix=''):\n\n\t\tserver_keys, deprefix = yield self._map_keys_to_servers(keys, key_prefix)\n\n\t\t# send out all requests on each server before reading anything\n\t\tdead_servers = []\n\n\t\tfor server in server_keys.iterkeys():\n\t\t\ttry:\n\t\t\t\tserver.sendall(\"get %s\\r\\n\" % \" \".join(\n\t\t\t\t\tprefixed_key for prefixed_key, _original_key in server_keys[server]\n\t\t\t\t))\n\t\t\texcept tcp.ConnectionClosedException:\n\t\t\t\tserver.mark_dead()\n\t\t\t\tdead_servers.append(server)\n\n\t\t# if any servers died on the way, don't expect them to respond.\n\t\tfor server in dead_servers:\n\t\t\tdel server_keys[server]\n\n\t\tretvals = {}\n\t\tfor server in server_keys.iterkeys():\n\t\t\ttry:\n\t\t\t\tline = yield server.read_line()\n\t\t\t\twhile line and line != 'END':\n\t\t\t\t\tif line[:5] == \"VALUE\":\n\t\t\t\t\t\t_resp, rkey, flags, data_len = line.split()\n\t\t\t\t\t\tvalue = self._parse_value(\n\t\t\t\t\t\t\t(yield server.read_exactly(int(data_len) + 2))[:-2],\n\t\t\t\t\t\t\tint(flags)\n\t\t\t\t\t\t)\n\n\t\t\t\t\t\tretvals[deprefix[rkey]] = value\n\n\t\t\t\t\tline = yield server.read_line()\n\n\t\t\texcept tcp.ConnectionClosedException:\n\t\t\t\tserver.mark_dead()\n\n\t\traise StopIteration(retvals)",
"def mget(self, keys, *args):\n args = list_or_args(keys, args)\n server_keys = {}\n ret_dict = {}\n for key in args:\n server_name = self.get_server_name(key)\n server_keys[server_name] = server_keys.get(server_name, [])\n server_keys[server_name].append(key)\n for server_name, sub_keys in iteritems(server_keys):\n values = self.connections[server_name].mget(sub_keys)\n ret_dict.update(dict(zip(sub_keys, values)))\n result = []\n for key in args:\n result.append(ret_dict.get(key, None))\n return result",
"def create_servers_from_data(self, data, orgs):\n item_dict = {}\n for (item_name, data) in data.items():\n item_dict[item_name] = DSServer(\n ip_addr=data['ip_addr'],\n mac_addr=data['mac_addr'],\n connected_at=data['connected_at'],\n last_echo_at=data['last_echo_at'],\n organization=orgs.get(data['organization']))\n item_dict[item_name].save()\n return item_dict",
"def get_irc_servers(self):\n servers = {}\n\n def get_server_info(server_name):\n server_name = canonicalize_server_name(server_name)\n\n try:\n server_info = servers[server_name]\n except KeyError:\n servers[server_name] = server_info = {\n 'channels': []\n }\n\n return server_info\n\n for key, value in self._sections.iteritems():\n section = self._sections[key]\n if re.match(ur'^ server ', key):\n server_info = get_server_info(section.name)\n server_info['server'] = section\n elif re.match(ur'^ channel ', key):\n server_info = get_server_info(section.name)\n server_info['channels'].append(section)\n\n rv = []\n for key, value in servers.iteritems():\n server_info = servers[key]\n if 'server' in server_info:\n rv.append(server_info)\n return rv",
"def set_multi(self, mapping, expiry_time=0, key_prefix='', min_compress_len=0):\n\n\t\tserver_keys, deprefix = yield self._map_keys_to_servers(mapping.iterkeys(), key_prefix)\n\n\t\t# send out all requests on each server before reading anything\n\t\tdead_servers = []\n\n\t\tfor server in server_keys.iterkeys():\n\t\t\tcommands = []\n\t\t\tfor prefixed_key, original_key in server_keys[server]:\n\t\t\t\tstored_info = self._value_to_stored(mapping[original_key], min_compress_len)\n\t\t\t\tif stored_info is None:\n\t\t\t\t\t# If it's not storable due to length, just ignore it\n\t\t\t\t\tcontinue\n\n\t\t\t\tflags, stored = stored_info\n\t\t\t\tcommands.append(\"set %s %d %d %d\\r\\n%s\\r\\n\" % (\n\t\t\t\t\tprefixed_key,\n\t\t\t\t\tflags,\n\t\t\t\t\texpiry_time,\n\t\t\t\t\tlen(stored),\n\t\t\t\t\tstored\n\t\t\t\t))\n\n\t\t\ttry:\n\t\t\t\tserver.send_cmds(''.join(commands))\n\t\t\texcept tcp.ConnectionClosedException:\n\t\t\t\tserver.mark_dead()\n\t\t\t\tdead_servers.append(server)\n\n\t\t# if any servers died on the way, don't expect them to respond.\n\t\tfor server in dead_servers:\n\t\t\tdel server_keys[server]\n\n\t\tnotstored = [] # original keys.\n\t\tfor server, keys in server_keys.iteritems():\n\t\t\ttry:\n\t\t\t\tfor key in keys:\n\t\t\t\t\tline = server.read_line()\n\t\t\t\t\tif line == 'STORED':\n\t\t\t\t\t\tcontinue\n\t\t\t\t\telse:\n\t\t\t\t\t\tnotstored.append(deprefix[key]) #un-mangle.\n\t\t\texcept tcp.ConnectionClosedException:\n\t\t\t\tserver.mark_dead()\n\n\t\traise StopIteration(notstored)",
"def mset(self, mapping):\n servers = {}\n for key, value in mapping.items():\n server_name = self.get_server_name(key)\n servers.setdefault(server_name, [])\n servers[server_name].append((key, value))\n for name, items in servers.items():\n self.connections[name].mset(dict(items))\n return True",
"def get_server_descriptions(self) -> Dict[Union[Tuple[str, int], Any], ServerDescription]:\n return {address: ServerDescription(address) for address in self.seeds}",
"def Servers(self, server=None):\n if server:\n self.current = server\n return \"successful\"\n\n servers = []\n for x in XbmcServers.select():\n servers.append({'name': x.name, 'id': x.id})\n if len(servers) < 1:\n return\n return {'current': self.current, 'servers': servers}",
"def get_servers(self) -> dict:\n uri = f\"{self.uri}/servers\"\n\n response = self.request(uri=uri)\n return response.json()",
"def _get_connections(self) -> _ConnectionsMap:\n seen: Dict[int, Any] = {}\n for parent in self.target.ancestors:\n if not isinstance(parent, NodeInstance):\n continue\n if parent is self.target.root:\n break\n if self.operation_host:\n self._get_connection(self.operation_host, parent, seen)\n self._get_connection(self.target.root, parent, seen)\n # get the rest of the default connections\n self._get_connection(self.target.root, None, seen)\n\n # reverse so nearest relationships replace less specific ones that have matching names\n connections = _ConnectionsMap( # the list() is for Python 3.7\n (rel.name, rel) for rel in reversed(list(seen.values()))\n )\n return connections",
"def _get_all_servers(self, key):\n hints = {}\n hosts = []\n for vm in NovaScheduler.vms:\n if vm['state'] == 'active':\n hosts.append(vm['id'])\n if len(hosts) > 0:\n hints[key] = hosts\n LOG.info(\"%s:%s() %s: %s\", self.__class__.__name__,\n sys._getframe().f_code.co_name, key, hints)\n return hints",
"def grab_server_data(self):\n\n for server, channels in self.servers.items():\n for channel in channels:\n cutoff = self.get_last_scrape_date(server, channel)\n print('grabbing data for {} : {} back to {} ...'.format(server, channel, cutoff.isoformat()))\n message_data = self.grab_channel_data(server, channel, cutoff)\n self.merge_and_save(message_data, server, channel)",
"def group_servers(\n servers: Iterable[Server]) -> Dict[ServerGroup, List[Server]]:\n groups: Dict[ServerGroup, List[Server]] = {\n ServerGroup.INSTITUTE_ACCESS: [],\n ServerGroup.SECURE_INTERNET: [],\n ServerGroup.OTHER: [],\n }\n for server in servers:\n if isinstance(server, InstituteAccessServer):\n groups[ServerGroup.INSTITUTE_ACCESS].append(server)\n elif isinstance(server, (OrganisationServer, SecureInternetLocation)):\n groups[ServerGroup.SECURE_INTERNET].append(server)\n elif isinstance(server, CustomServer):\n groups[ServerGroup.OTHER].append(server)\n else:\n raise TypeError(server)\n return groups",
"def _get_server_for(self, key):\n\n\t\tif type(key) == tuple:\n\t\t\tserverhash, key = key\n\t\telse:\n\t\t\tserverhash = crc32(key)\n\n\t\tfor i in range(Client._SERVER_RETRIES):\n\t\t\tserver = self.buckets[serverhash % len(self.buckets)]\n\n\t\t\tif (yield server.check()):\n\t\t\t\traise StopIteration((server, key))\n\n\t\t\tserverhash = crc32(str(serverhash) + str(i))\n\n\t\traise StopIteration((None, None))",
"def mmo_shard_servers(self, mmo_connection):\n shard_servers = []\n c = mmo_connection[\"config\"].shards.find({})\n for doc in c:\n shard = doc[\"_id\"]\n for host in doc[\"host\"].split(shard + \"/\", 1)[1].split(\",\"):\n hostname, port = host.split(\":\")\n shard_servers.append({ \"shard\": shard, \"hostname\": hostname, \"port\": int(port) })\n return shard_servers",
"def tunnel_bindings(self):\n return dict((_server.remote_address, _server.local_address) for\n _server in self._server_list if\n self.tunnel_is_up[_server.local_address])",
"def normalize_server_list_json(server_list):\n myservers = dict()\n global most_fields\n #most_fields = dict()\n #most_fields = {'none': 0} # too lazy to make complex condition\n\n for server in server_list:\n \"\"\"\n Iterate over servers and cherry pick wanted variables/data\n \"\"\"\n myservers[server['name']] = {\n \"name\": server['name'],\n \"flavor_id\": server['flavor']['id'],\n \"flavor_name\": str(server['flavor']['name']),\n \"image_id\": server['image']['id'],\n \"region_name\": server['location']['region_name'],\n \"project_id\": server['location']['project']['id'],\n \"access_ip4\": server['accessIPv4'],\n \"access_ip6\": server['accessIPv6'],\n \"interface_ip4\": server['interface_ip'],\n \"created_at\": server['created_at'],\n \"updated_at\": server['updated'],\n \"terminated_at\": server['terminated_at'],\n \"status\": server['status'],\n \"power_state\": server['power_state'],\n \"provider_ip_zone\": server['RAX-PUBLIC-IP-ZONE-ID:publicIPZoneId'],\n \"host_id\": server['host_id'],\n \"id\": server['id'],\n \"tenant_id\": server['tenant_id']\n }\n\n # @TODO: move this to function add checks when some fields are missing\n if len(server['volumes']) > 0:\n i = 0\n for vol in server['volumes']:\n myservers[server['name']].update({\n \"vol\" + str(i) + '_id': vol['id'],\n \"vol\" + str(i) + '_name': vol['name'],\n \"vol\" + str(i) + '_status': vol['status'],\n \"vol\" + str(i) + '_size': vol['size'],\n \"vol\" + str(i) + '_created_at': vol['created_at'],\n \"vol\" + str(i) + '_updated_at': vol['updated_at'],\n \"vol\" + str(i) + '_type': vol['volume_type'],\n \"vol\" + str(i) + '_device': vol['device'],\n \"vol\" + str(i) + '_storage_node': vol['metadata']['storage-node'],\n #\"vol\" + str(i) + '_storage_mode': vol['metadata']['attached_mode'],\n \"vol\" + str(i) + '_server_id': vol['attachments'][0]['server_id'],\n \"vol\" + str(i) + '_attachment_id': vol['attachments'][0]['attachment_id'],\n \"vol\" + str(i) + '_host_name': vol['attachments'][0]['host_name'],\n \"vol\" + str(i) + '_volume_id': vol['attachments'][0]['volume_id'],\n \"vol\" + str(i) + '_az': vol['availability_zone']\n })\n i = i + 1\n\n else:\n myservers[server['name']].update({\n \"additional_storage\": 0\n })\n\n if int(len(myservers[server['name']])) > int(list(most_fields.values())[-1]):\n most_fields = dict()\n most_fields[server['name']] = int(len(myservers[server['name']]))\n\n # @TODO: add iteration via server['metadata'] when len > 0\n # @TODO: add iteration via server['properties'] when len > 0\n # @TODO: add iteration via server['addresses'] and dynamically add 'networks - Galaxy, public, private ..'\n\n return myservers",
"def kitero():\n return dict(hostname=hostname)",
"def _get_rekey_ddi_data(ddi_data):\n for enum, item in enumerate(ddi_data):\n ddi_data[enum] = dict((d['network'],\n dict(d, index=index))\n for (index, d) in enumerate(item))\n return ddi_data",
"def get_all_servers_maps():\n servers = get_servers()\n maps = {}\n\n for server in servers:\n if not server.map.id:\n continue\n\n if server.type.startswith('vanilla') or server.type == 'pvp':\n server_type = 'vanilla'\n else:\n server_type = server.type\n\n if server_type not in maps:\n maps[server_type] = {\n 'name': server.type_name,\n 'maps': {}\n }\n\n if server.map.id not in maps[server_type]['maps']:\n maps[server_type]['maps'][server.map.id] = server.map.name_display\n\n ret = []\n\n for game_type in maps.values():\n group = {\n 'type': 'group',\n 'label': game_type['name'],\n 'entries': []\n }\n\n for map_id, map_name in game_type['maps'].items():\n group['entries'].append({\n 'value': map_id,\n 'label': map_name\n })\n\n group['entries'] = sorted(group['entries'], key=lambda k: k['label'])\n\n ret.append(group)\n\n return sorted(ret, key=lambda k: k['label'])",
"def mmo_config_servers(self, mmo_connection):\n config_servers = []\n c = mmo_connection[\"admin\"].command(\"getCmdLineOpts\")[\"parsed\"][\"sharding\"][\"configDB\"]\n for item in c.split(\",\"):\n hostname, port = item.split(\":\")\n if \"/\" in hostname: # cfg Replset server\n hostname = hostname.partition(\"/\")[2]\n config_servers.append( { \"hostname\": hostname, \"port\": int(port) } )\n return config_servers",
"def _connection_keys(self):\n return (\n \"server\",\n \"database\",\n \"schema\",\n \"user\",\n \"authentication\",\n )",
"def __getLocalAndRemoteMachineNames(self):\n hostNameMapping = {}\n ## collect the qualified hostnames for each remote node\n for nodeId in list(set(self.runInfoDict['Nodes'])):\n hostNameMapping[nodeId.strip()] = socket.gethostbyname(nodeId.strip())\n self.raiseADebug('Host \"'+nodeId.strip()+'\" identified with IP: ', hostNameMapping[nodeId.strip()])\n\n return hostNameMapping",
"def get_all_servers_locations():\n servers = get_servers()\n locations = {}\n\n for server in servers:\n if not server.location.country_code:\n continue\n\n if server.location.continent_code not in locations:\n locations[server.location.continent_code] = {\n 'name': server.location.continent_name,\n 'countries': {}\n }\n\n if server.location.country_code not in locations[server.location.continent_code]['countries']:\n locations[server.location.continent_code]['countries'][server.location.country_code] = server.location.country_name\n\n ret = []\n\n for continent_code, continent in locations.items():\n group = {\n 'type': 'group',\n 'value': 'continent:' + continent_code,\n 'label': continent['name'],\n 'entries': []\n }\n\n for country_code, country_name in continent['countries'].items():\n group['entries'].append({\n 'value': 'country:' + country_code,\n 'label': country_name\n })\n\n group['entries'] = sorted(group['entries'], key=lambda k: k['label'])\n\n ret.append(group)\n\n ret = sorted(ret, key=lambda k: k['label'])\n\n # Extra location filters\n ret.append({\n 'value': 'continent:eu+continent:na',\n 'label': 'Europe + North America'\n })\n\n return ret",
"def getConnections():\n\n c = psutil.net_connections()\n connects = {}\n\n count = 0\n for connection in c:\n conn = {}\n status = connection.status\n if status == 'ESTABLISHED' or connection.status == 'CLOSE_WAIT':\n conn['status'] = status\n conn['local'] = connection.laddr[0] + ':' + str(connection.laddr[1])\n conn['remote'] = connection.raddr[0] + ':' + str(connection.raddr[1])\n connects[count] = conn\n count += 1\n elif status == 'LISTEN':\n conn['status'] = status\n conn['local'] = connection.laddr[0] + ':' + str(connection.laddr[1])\n connects[count] = conn\n count += 1\n else:\n pass\n\n return connects",
"def get_preprocessed_connections(connections):\n fast_dico = {}\n\n # Collect all potential subflows\n for conn_id, conn in connections.iteritems():\n if conn.attr.get(co.START, None):\n for flow_id, flow in conn.flows.iteritems():\n if (flow.attr[co.SADDR], flow.attr[co.DADDR], flow.attr[co.SPORT], flow.attr[co.DPORT]) not in fast_dico:\n fast_dico[(flow.attr[co.SADDR], flow.attr[co.DADDR], flow.attr[co.SPORT], flow.attr[co.DPORT])] = []\n\n fast_dico[(flow.attr[co.SADDR], flow.attr[co.DADDR], flow.attr[co.SPORT], flow.attr[co.DPORT])] += [(conn.attr[co.START],\n float(conn.attr[co.DURATION]),\n conn_id, flow_id)]\n\n # Sort them for faster processing\n for quadruplet in fast_dico.keys():\n fast_dico[quadruplet] = sorted(fast_dico[quadruplet], key=lambda x: x[0])\n\n return fast_dico",
"def mmo_mongos_servers(self, mmo_connection):\n mongos_servers = []\n c = mmo_connection[\"config\"].mongos.find({}, { \"_id\": 1 } )\n for doc in c:\n hostname, port = doc[\"_id\"].split(\":\")\n mongos_servers.append({ \"hostname\": hostname, \"port\": int(port) })\n return mongos_servers",
"def get_server_info(p_id_guilda, p_id_server):\r\n server_list = select_data.get_guild_servers(p_id_guilda)\r\n \r\n for server in server_list:\r\n if server['id_server_sk'] == p_id_server:\r\n return_data = server\r\n break\r\n return return_data",
"def _get_one_server(self, key):\n hints = {}\n for vm in NovaScheduler.vms:\n if vm['state'] == 'active':\n hints[key] = vm['id']\n break\n LOG.info(\"%s:%s() %s: %s\", self.__class__.__name__,\n sys._getframe().f_code.co_name, key, hints)\n return hints"
] |
[
"0.671543",
"0.57453936",
"0.5713001",
"0.57047987",
"0.5527355",
"0.53337836",
"0.5226764",
"0.5178444",
"0.517593",
"0.50799996",
"0.50652796",
"0.50172985",
"0.4971343",
"0.49088967",
"0.49088296",
"0.48994195",
"0.48865232",
"0.4877507",
"0.48635",
"0.48313108",
"0.48205438",
"0.47835904",
"0.47765264",
"0.47676054",
"0.47322157",
"0.47308666",
"0.47216043",
"0.47040245",
"0.46995306",
"0.46875688"
] |
0.69429505
|
0
|
Transform value to a storable representation, returning a tuple of the flags and the new value.
|
def _value_to_stored(value, min_compress_len):
flags = 0
if isinstance(value, str):
pass
elif isinstance(value, int):
flags |= Client._FLAG_INTEGER
value = "%d" % value
# Don't try to compress it
min_compress_len = 0
elif isinstance(value, long):
flags |= Client._FLAG_LONG
value = "%d" % value
# Don't try to compress it
min_compress_len = 0
else:
flags |= Client._FLAG_PICKLE
value = pickle.dumps(value, 0)
# silently do not store if value length exceeds maximum
if len(value) >= SERVER_MAX_VALUE_LENGTH:
return None
if min_compress_len and _SUPPORTS_COMPRESS and len(value) > min_compress_len:
# Try compressing
compressed_value = compress(value)
#Only retain the result if the compression result is smaller than the original.
if len(compressed_value) < len(value):
flags |= Client._FLAG_COMPRESSED
value = compressed_value
return flags, value
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _val_to_store_info(self, val, min_compress_len):\r\n flags = 0\r\n if isinstance(val, str):\r\n pass\r\n elif isinstance(val, int):\r\n flags |= Client._FLAG_INTEGER\r\n val = \"%d\" % val\r\n # force no attempt to compress this silly string.\r\n min_compress_len = 0\r\n elif isinstance(val, long):\r\n flags |= Client._FLAG_LONG\r\n val = \"%d\" % val\r\n # force no attempt to compress this silly string.\r\n min_compress_len = 0\r\n else:\r\n flags |= Client._FLAG_PICKLE\r\n file = StringIO()\r\n if self.picklerIsKeyword:\r\n pickler = self.pickler(file, protocol = self.pickleProtocol)\r\n else:\r\n pickler = self.pickler(file, self.pickleProtocol)\r\n if self.persistent_id:\r\n pickler.persistent_id = self.persistent_id\r\n pickler.dump(val)\r\n val = file.getvalue()\r\n\r\n lv = len(val)\r\n # We should try to compress if min_compress_len > 0 and we could\r\n # import zlib and this string is longer than our min threshold.\r\n if min_compress_len and _supports_compress and lv > min_compress_len:\r\n comp_val = compress(val)\r\n # Only retain the result if the compression result is smaller\r\n # than the original.\r\n if len(comp_val) < lv:\r\n flags |= Client._FLAG_COMPRESSED\r\n val = comp_val\r\n\r\n # silently do not store if value length exceeds maximum\r\n if self.server_max_value_length != 0 and \\\r\n len(val) > self.server_max_value_length: return(0)\r\n\r\n return (flags, len(val), val)",
"def IntToValue(self, v, flags):\n if (self.GetValue() != v):\n return (True, v)\n return False",
"def _to_packed(self, value):\n raise NotImplementedError",
"def pickle(self, value):\r\n\r\n if isinstance(value, bool) or not isinstance(value, integer_types):\r\n return pickle.dumps(value, self._pickle_version)\r\n\r\n return value",
"def from_value(value):\n return pickle.dumps(value)",
"def to_python(self, value):\n if value is None:\n return value\n value = super(BitOptionsField, self).to_python(value)\n return BitOptions(self.options.flags, value)",
"def pack_val(self,val,stream=''):\n try:\n return stream + struct.pack(\">\"+self.fmt,val)\n except:\n raise ValueError",
"def value(self):\n return self.flags.value",
"def compress(value):\n pickled = pickle_util.dump(value)\n return zlib.compress(pickled)",
"def TransformFlags(self) -> _n_2_t_0[bool]:",
"def StringToValue(self, s, flags):\n v = PyObjectPropertyValue(s)\n return (True, v)",
"def compress(value):\n\t# type: (Any, ) -> Any\n\n\t# sets are not processed because they cannot contain lists or bytearrays anyway.\n\n\tif isinstance(value, (tuple, list)): # tuple *can* contain mutables\n\t\treturn tuple(compress(x) for x in value)\n\telif isinstance(value, bytearray):\n\t\treturn bytes(value) # bytearray can only be bytes or List[int] right?\n\telif isinstance(value, dict):\n\t\treturn {k: compress(v) for k, v in value.items()}\n\telse:\n\t\treturn value",
"def serialize_value(self, value):\n\n return value",
"def _store_bitfield(self, value, access):\n ir_typ = self._get_bitfield_ir_typ(access, False)\n full_bitsize = ir_typ.bits\n assert access.bitshift + access.bitsize <= full_bitsize\n mask = ((1 << access.bitsize) - 1) << access.bitshift\n full_mask = (1 << full_bitsize) - 1\n inv_mask = full_mask ^ mask\n\n assert value.ty.is_integer\n\n # Optionally cast value:\n if value.ty is not ir_typ:\n value = self.builder.emit_cast(value, ir_typ)\n\n # Load memory value:\n # TODO: volatile used to enforce struct in memory.\n # Should not be required?\n loaded = self.builder.emit_load(access.address, ir_typ, volatile=True)\n\n # Shift value:\n if access.bitshift:\n value = self.builder.emit_binop(\n value, \"<<\", access.bitshift, ir_typ\n )\n\n # Clip value:\n value = self.builder.emit_binop(value, \"&\", mask, ir_typ)\n\n # Clear bits for bitfield:\n loaded = self.builder.emit_binop(loaded, \"&\", inv_mask, ir_typ)\n\n # Or with value\n value = self.builder.emit_binop(loaded, \"|\", value, ir_typ)\n\n # Store modified value back:\n self.emit(ir.Store(value, access.address))",
"def marshal_value(self, value):\n\n return value",
"def _encode_value(self, value):\n return pickle.dumps(value)",
"def as_tuple(self):\n return (self.oid, self.type, self.value)",
"def _parse_value(self, data, flags):\n\n\t\tif flags & Client._FLAG_COMPRESSED:\n\t\t\tdata = decompress(data)\n\n\t\tif flags == 0 or flags == Client._FLAG_COMPRESSED:\n\t\t\t# Either a bare string or a compressed string now decompressed...\n\t\t\tvalue = data\n\t\telif flags & Client._FLAG_INTEGER:\n\t\t\tvalue = int(data)\n\t\telif flags & Client._FLAG_LONG:\n\t\t\tvalue = long(data)\n\t\telif flags & Client._FLAG_PICKLE:\n\t\t\ttry:\n\t\t\t\tvalue = pickle.loads(data)\n\t\t\texcept Exception:\n\t\t\t\tself._debuglog('Pickle error...\\n')\n\t\t\t\tvalue = None\n\t\telse:\n\t\t\tself._debuglog(\"unknown flags on get: %x\\n\" % flags)\n\n\t\treturn value",
"def _decode_value(self, value):\n return pickle.loads(value.value) if value else value",
"def serialize(self, value):\n return value",
"def to_python(self, value):\r\n return value",
"def to_python(self, value):\r\n return value",
"def _from_packed(self, value):\n raise NotImplementedError",
"def StoreBits32(self, val):\n tmp_val = struct.pack(\">L\", val)\n self.StoreBits( (StrToList(tmp_val), 32))",
"def to_python(self, value):\n return force_bool(value)",
"def get_prep_value(self,value):\n matrix = cPickle.dumps(value)\n return matrix",
"def to_python(self, value):\n return value",
"def to_python(self, value):\n return value",
"def dump_object(self, value):\n return pickle.dumps(value)",
"def _to_serialize(value):\n return value.serialize() if value is not None else None"
] |
[
"0.59164286",
"0.5815009",
"0.57775044",
"0.5704896",
"0.554912",
"0.5429599",
"0.5235166",
"0.52273405",
"0.5196163",
"0.5105681",
"0.508569",
"0.5038232",
"0.50112695",
"0.5006383",
"0.49907935",
"0.49732548",
"0.49670067",
"0.4964211",
"0.49320275",
"0.4927446",
"0.490499",
"0.490499",
"0.4898061",
"0.4869935",
"0.48578587",
"0.48499343",
"0.48468596",
"0.48468596",
"0.48368135",
"0.48338464"
] |
0.6581007
|
0
|
Test incr and decr functions
|
def test_incrdecr(self):
yield self.conn.set("an_integer", 42)
self.assertEqual((yield self.conn.incr("an_integer", 1)), 43)
self.assertEqual((yield self.conn.decr("an_integer", 1)), 42)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def testIncrementDecrement(self):\n\n memcache.incr('unknown_key')\n assert memcache.get('unknown_key') == None\n memcache.set('counter', 0)\n assert memcache.get('counter') == 0\n memcache.incr('counter')\n assert memcache.get('counter') == 1\n memcache.incr('counter', delta=2)\n assert memcache.get('counter') == 3\n memcache.decr('counter')\n assert memcache.get('counter') == 2\n memcache.decr('counter', 2)\n assert memcache.get('counter') == 0\n memcache.incr('second_counter', initial_value=10)\n assert memcache.get('second_counter') == 11\n memcache.decr('third_counter', initial_value=10)\n assert memcache.get('third_counter') == 9\n\n # This should cause an error message, because zero deltas are not\n # allowed.\n memcache.incr('counter', delta=0)\n\n memcache.set('lcounter', long(20))\n assert memcache.get('lcounter') == long(20)\n memcache.incr('lcounter')\n assert memcache.get('lcounter') == long(21)",
"def test_decrement_of_player():\n player_1 = player.Player(True, True)\n\n player_1.start_tokens -= 1\n player_1.board_tokens += 1\n\n assert player_1.start_tokens == 8\n assert player_1.board_tokens == 1",
"def testCounter():\n c = Counter()\n print(\"Expect 0: \", c)\n for i in range(5):\n c.increment()\n print(\"Expect 5: \", c)\n c.reset()\n print(\"Expect 0: \", c)",
"def testComapareAndSet(self):\n\n client = memcache.Client()\n client.set('mycounter', 0)\n\n def bump_counter(key):\n retries = 0\n while retries < 10: # Retry loop\n counter = client.gets(key)\n assert counter is not None, 'Uninitialized counter'\n if client.cas(key, counter+1):\n break\n retries += 1\n\n bump_counter('mycounter')\n assert client.get('mycounter') == 1",
"def testUnsuccessfulIncrement(self):\n\n cache = self.stub._cache\n self.stub._cache = {}\n\n memcache.incr('somekey')\n\n self.stub._cache = cache",
"def test_balance_value(comp):\n assert comp.balance() == 0\n comp.delete(7)\n comp.delete(9)\n comp.delete(15)\n assert comp.balance() == -1\n comp.delete(4)\n assert comp.balance() == 0\n comp.delete(12)\n assert comp.balance() == 0\n comp.delete(14)\n assert comp.balance() == -1\n comp.delete(6)\n comp.delete(10)\n assert comp.balance() == 0\n comp.delete(8)\n assert comp.balance() == 1\n comp.delete(13)\n assert comp.balance() == 0\n comp.delete(11)\n assert comp.balance() == 0",
"def incrdecr(con,command,key,value=1):\n # yy=atpic.log.setname(xx,'incrdecr')\n thecommand=\"{command} {key} {value}\\r\\n\".format(command=command,key=key,value=value)\n con.send(thecommand.encode('utf-8'))\n line=get_line(con)\n # atpic.log.debug(yy,line)\n if line==b'NOT_FOUND':\n return None\n else:\n return int(line.strip())",
"def decr(self, key, delta=1):\r\n if delta < 0:\r\n return self._incrdecr(\"incr\", key, -delta)\r\n else:\r\n return self._incrdecr(\"decr\", key, delta)",
"def test_inc_rolls(self):\n computer1 = computer.Computer(1)\n computer1.inc_rolls()\n res = computer1.rolls\n exp = 1\n self.assertEqual(res, exp)",
"def _incrdecr(self, cmd, key, delta):\n\t\tcheck_key(key)\n\t\tserver, key = yield self._get_server_for(key)\n\t\tif not server:\n\t\t\treturn\n\n\t\tcmd = \"%s %s %d\\r\\n\" % (cmd, key, delta)\n\n\t\ttry:\n\t\t\tyield server.sendall(cmd)\n\t\t\tline = yield server.read_line()\n\t\t\traise StopIteration(int(line))\n\t\texcept tcp.ConnectionClosedException:\n\t\t\tserver.mark_dead()",
"def test_op_repeat(self) -> None:\n op_base = OpIncrForTest()\n kwargs_per_step_to_add = [\n dict(key_in=\"data.val.a\", key_out=\"data.val.b\"),\n dict(key_in=\"data.val.b\", key_out=\"data.val.c\"),\n dict(key_in=\"data.val.b\", key_out=\"data.val.d\"),\n dict(key_in=\"data.val.d\", key_out=\"data.val.d\"),\n ]\n op_repeat = OpRepeat(op_base, kwargs_per_step_to_add)\n sample_dict = NDict({})\n sample_dict[\"data.val.a\"] = 5\n sample_dict = op_repeat(sample_dict, \"_.test_repeat\", incr_value=3)\n self.assertEqual(sample_dict[\"data.val.a\"], 5)\n self.assertEqual(sample_dict[\"data.val.b\"], 8)\n self.assertEqual(sample_dict[\"data.val.c\"], 11)\n self.assertEqual(sample_dict[\"data.val.d\"], 14)\n\n op_repeat.reverse(\n sample_dict,\n key_to_follow=\"data.val.d\",\n key_to_reverse=\"data.val.d\",\n op_id=\"_.test_repeat\",\n )\n self.assertEqual(sample_dict[\"data.val.a\"], 5)\n self.assertEqual(sample_dict[\"data.val.b\"], 8)\n self.assertEqual(sample_dict[\"data.val.c\"], 11)\n self.assertEqual(sample_dict[\"data.val.d\"], 8)\n\n sample_dict[\"data.val.e\"] = 48\n op_repeat.reverse(\n sample_dict,\n key_to_follow=\"data.val.d\",\n key_to_reverse=\"data.val.e\",\n op_id=\"_.test_repeat\",\n )\n self.assertEqual(sample_dict[\"data.val.a\"], 5)\n self.assertEqual(sample_dict[\"data.val.b\"], 8)\n self.assertEqual(sample_dict[\"data.val.c\"], 11)\n self.assertEqual(sample_dict[\"data.val.d\"], 8)\n self.assertEqual(sample_dict[\"data.val.e\"], 42)",
"def decr(self, key, delta=1):\n\t\treturn self._incrdecr(\"decr\", key, delta)",
"def incr_operand(self):\n pass",
"def increment_counter(self) -> None:",
"def incr(self, key, delta=1):\n\t\treturn self._incrdecr(\"incr\", key, delta)",
"def incr(self, key, delta=1, callback=None):\n self._incrdecr(\"incr\", key, delta, callback=callback)",
"def test_increment(self):\n x0 = 0\n y0 = increment(x0) # y0 should be 1\n self.assertEqual(y0, 1)\n\n x1 = 100\n y1 = increment(x1) # y1 should be 101\n self.assertTrue(y1, 101)\n\n x2 = -1\n y2 = increment(x2) # y2 should be 0\n self.assertEqual(y2, 0)\n\n x3 = -1.5\n y3 = increment(x3) # y3 should be -0.5\n self.assertEqual(y3, -0.5)",
"def incr(self, key, delta=1):\r\n if delta < 0:\r\n return self._incrdecr(\"decr\", key, -delta)\r\n else:\r\n return self._incrdecr(\"incr\", key, delta)",
"def test_concurrent_updates(self):\r\n instance = TestCounterModel.create()\r\n new1 = TestCounterModel.get(partition=instance.partition)\r\n new2 = TestCounterModel.get(partition=instance.partition)\r\n\r\n new1.counter += 5\r\n new1.save()\r\n new2.counter += 5\r\n new2.save()\r\n\r\n actual = TestCounterModel.get(partition=instance.partition)\r\n assert actual.counter == 10",
"def increment(self):\r\n return self.add(1)",
"def decrement(self):\r\n return self.add(-1)",
"def test_increment(self):\r\n self.p + 1\r\n self.assertEqual(str(self.p), '1% [....................]')\r\n self.p + 4\r\n self.assertEqual(str(self.p), '5% [#...................]')",
"def testspecincrement(self):\n global idct\n a = Base()\n idct += 1\n self.assertEqual(a.id, idct)\n b = Base(19)\n self.assertEqual(b.id, 19)\n c = Base()\n idct += 1\n self.assertEqual(c.id, idct)\n d = Base()\n idct += 1\n self.assertEqual(d.id, idct)",
"def is_incr(self, idx):\n return False",
"def is_incr(self, idx):\n return False",
"def is_incr(self, idx):\n return False",
"def test_refcount():\n genesis = resource.genesis\n terminus = resource.terminus\n called = {'genesis': False, 'terminus': False}\n def decorated_genesis():\n genesis()\n called['genesis'] = True\n def decorated_terminus():\n terminus()\n called['terminus'] = True\n resource.genesis = decorated_genesis\n resource.terminus = decorated_terminus\n assert not called['genesis']\n assert not called['terminus']\n assert resource.reference_count == 0\n resource.increment_refcount()\n assert called['genesis']\n assert not called['terminus']\n assert resource.reference_count == 1\n resource.increment_refcount()\n assert not called['terminus']\n assert resource.reference_count == 2\n resource.decrement_refcount()\n assert not called['terminus']\n assert resource.reference_count == 1\n resource.decrement_refcount()\n assert called['terminus']\n assert resource.reference_count == 0",
"def decr(self, key, delta=1, callback=None):\n self._incrdecr(\"decr\", key, delta, callback=callback)",
"def test_add_integer(self):\n assert cr.add(3, 2) == 3 + 2",
"def test_updates(self):\r\n instance = TestCounterModel.create()\r\n instance.counter += 5\r\n instance.save()\r\n\r\n actual = TestCounterModel.get(partition=instance.partition)\r\n assert actual.counter == 5"
] |
[
"0.7287375",
"0.6329485",
"0.63262177",
"0.62672204",
"0.6183841",
"0.6032318",
"0.6031581",
"0.5853862",
"0.5818598",
"0.5796555",
"0.57911915",
"0.57771933",
"0.57524484",
"0.56881315",
"0.56778234",
"0.56689155",
"0.566017",
"0.5618242",
"0.5608999",
"0.55975217",
"0.5581912",
"0.55769426",
"0.5564893",
"0.55410963",
"0.55410963",
"0.55410963",
"0.549193",
"0.5475804",
"0.54706067",
"0.5432352"
] |
0.7240594
|
1
|
Check that invalid keys raise the appropriate exception
|
def test_invalid_keys(self):
try:
yield self.conn.set("this has spaces", 1)
except ValueError:
pass
else:
self.fail("key with spaces did not raise ValueError")
try:
yield self.conn.set("\x10control\x02characters\x11", 1)
except ValueError:
pass
else:
self.fail("key with control characters did not raise ValueError")
try:
yield self.conn.set("a" * (SERVER_MAX_KEY_LENGTH + 1), 1)
except ValueError:
pass
else:
self.fail("long key did not raise ValueError")
try:
yield self.conn.set(u"unicode\u4f1a", 1)
except TypeError:
pass
else:
self.fail("unicode key did not raise ValueError")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_keyerror(self):\n try:\n self.db['foo']\n except KeyError, e:\n assert \"no key 'foo' in database <SequenceFileDB\" in str(e), str(e)",
"def _check_key(self, key):\n raise NotImplementedError",
"def validate_key_throw(*args):\n validation_result = validate_key(*args)\n if not validation_result:\n raise ValueError(str(validation_result))\n return validation_result",
"def test_handle_key_error():\n\n @handle_key_error\n def get_item(key):\n data = {\"A\": 1, \"B\": 2}\n return data[key]\n\n value = get_item(\"A\")\n assert value == 1\n\n with pytest.raises(InvalidParameter) as exc:\n get_item(\"C\")\n\n assert \"C\" in str(exc.value)",
"def test_keys_failure(self):\n storage = Storage()\n storage._keys_dict = {'1': 'one',\n 'abc': '1'}\n self.assertRaises(StoragePatternError, storage.keys, 'ab[cd')",
"def __check_key_validity(self, key):\n if not isinstance(key, tuple):\n raise TypeError(\"key must be a tuple\")\n if len(key) != 2:\n raise ValueError(\"key must be of length two\")\n if not (isinstance(key[0], int) and isinstance(key[1], int)):\n raise TypeError(\"elements of key must be integers\")\n if not ((0 <= key[0] < self.m) and (0 <= key[1] < self.n)):\n raise exc.OutOfBoundsError(\"key is out of bounds\")",
"def testBadKeys(self):\n # Ignore access to protected members\n # pylint: disable=W0212\n self.assertRaises(DOLAPI._DOLAPIError,\n self.badauth.table,\n self.dataset,\n self.table)",
"def test_invalid_key(self):\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('221b=\"starts with number\"')\n assert 'Invalid key' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('_=\"not assignable\"')\n assert 'Invalid key' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('o-o=\"invalid character\"')\n assert 'Invalid key' in str(err.value)",
"def check_keys(self):",
"def test_neg_exists_key_invalid_data(self, key, ex_code, ex_msg):\n with pytest.raises(e.ParamError):\n key, _ = self.as_connection.exists(key)",
"def isValidKey(key):\n return True",
"def test_getObjectByKey_raises_KeyError(self):\n try:\n self.tile_bucket.getObjectByKey('foo_key')\n except KeyError:\n return\n assert(False)",
"def test_wrong_key(self):\n with pytest.raises(FinderInsideProException) as ex:\n FinderInsidePro(\"\")\n assert FinderInsideProException.EXCEPTION_TEXT_KEY_NOT_SET in str(ex)\n assert ex.value.extype == FinderInsideProException.TYPE_KEY_IS_WRONG\n\n with pytest.raises(FinderInsideProException) as ex:\n FinderInsidePro('aaa')\n assert FinderInsideProException.EXCEPTION_TEXT_WRONG_KEY in str(ex)\n assert ex.value.extype == FinderInsideProException.TYPE_KEY_IS_WRONG",
"def testMissingKeys(self):\n self.assertRaises(ValueError,\n self.unauth.table,\n self.dataset,\n self.table)",
"def validate_key(self, key: keyType) -> bool:\n if isinstance(key, (dict,bool)):\n raise Exception\n if key is None:\n raise Exception\n # Numerical key object has no len(),\n # so explicitly specify which types are not allowed to use empty value as keys\n if isinstance(key, (str, tuple, set, list)) and (len(key) == 0):\n raise Exception\n return True",
"def test_get_invalid_key(self):\n pairs = {'library': '~/home/documents/dms',\n 'key': 'value',\n }\n exceptionKeys = ['Hello', 'spam']\n try:\n tempconfig = tempfile.NamedTemporaryFile(\n suffix=\".yaml\", delete=False)\n tempconfig.write('ham: eggs'.encode('UTF-8'))\n tempconfig.close()\n config = easydms.config.Config(tempconfig.name)\n\n for key, value in pairs.items():\n self.assertEqual(config.getKey(key, value), value)\n\n for key in exceptionKeys:\n with self.assertRaises(easydms.config.ErrorConfigKeyNotFound):\n config.getRequiredKey(key)\n finally:\n os.remove(tempconfig.name)",
"def test_toomanykeys(self):\n self.assertRaises(recordparser.KeyListMismatchError,\n recordparser.getfields, \"1234567890\", \"10s\", (\"key1\", \"key2\"))",
"def test_get_invalid_key(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(KeyError):\n md[\"invalid_key\"]",
"def _check_keys(setting_dict):\n for key in SettingContainer.key_list:\n if not key in setting_dict:\n raise Exception(\n f\"No value for {key} found in language-settings\")",
"def _check_key(key): # type: (str) -> None\n if not key:\n raise ValueError('Key must not be empty.')\n if '.' in key:\n raise ValueError('Key must not contain dots.')",
"def test_is_valid_label_key_invalid_input():\n # test length violations\n assert not is_valid_label_key(key=None) # Too short\n assert not is_valid_label_key(key=\"\") # Too short\n assert not is_valid_label_key(key=f\"{'p' * 254}/n\") # prefix too long\n assert not is_valid_label_key(key=\"/n\") # prefix too short\n assert not is_valid_label_key(key=\"p/\") # name too short\n assert not is_valid_label_key(key=\"a\" * 254) # name too long\n assert not is_valid_label_key(key=f\"d/{'b'*64}\") # name too long\n # test first character violations (not alphanum)\n assert not is_valid_label_key(key=\"-a\")\n assert not is_valid_label_key(key=\".b\")\n assert not is_valid_label_key(key=\" c\")\n # test last character violations (not alphanum)\n assert not is_valid_label_key(key=\"a-\")\n assert not is_valid_label_key(key=\"b.\")\n assert not is_valid_label_key(key=\"c \")\n assert not is_valid_label_key(key=\"sw33T#\")\n # test middle characters violations\n assert not is_valid_label_key(key=\"a$$a\")\n assert not is_valid_label_key(key=\"b b\")",
"def validate_instruction_keys(instruction: TransactionInstruction, expected: int) -> None:\n if len(instruction.keys) < expected:\n raise ValueError(f\"invalid instruction: found {len(instruction.keys)} keys, expected at least {expected}\")",
"def test_map_missing_key_encountered():\n with pytest.raises(KeyError):\n Map().read_key(10, b\"\")",
"def test_is_valid_annotation_key_invalid_input():\n # test length violations\n assert not is_valid_annotation_key(key=None) # Too short\n assert not is_valid_annotation_key(key=\"\") # Too short\n assert not is_valid_annotation_key(key=f\"{'p' * 254}/n\") # prefix too long\n assert not is_valid_annotation_key(key=\"/n\") # prefix too short\n assert not is_valid_annotation_key(key=\"p/\") # name too short\n assert not is_valid_annotation_key(key=\"a\" * 254) # name too long\n assert not is_valid_annotation_key(key=f\"d/{'b'*64}\") # name too long\n # test first character violations (not alphanum)\n assert not is_valid_annotation_key(key=\"-a\")\n assert not is_valid_annotation_key(key=\".b\")\n assert not is_valid_annotation_key(key=\" c\")\n # test last character violations (not alphanum)\n assert not is_valid_annotation_key(key=\"a-\")\n assert not is_valid_annotation_key(key=\"b.\")\n assert not is_valid_annotation_key(key=\"c \")\n assert not is_valid_annotation_key(key=\"sw33T#\")\n # test middle characters violations\n assert not is_valid_annotation_key(key=\"a$$a\")\n assert not is_valid_annotation_key(key=\"b b\")",
"def test_missing_entry_raises_KeyError(self):\n with self.assertRaises(KeyError):\n self.phonebook.lookup(\"missing\")",
"def testKeyInfoTooShort(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='x', keyInfo='xx')",
"def test_config_key_error():\n c = core.Config()\n\n with pytest.raises(KeyError):\n c['doesNotExist']",
"def test_set_invalid_key(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(KeyError):\n md[\"invalid_key\"] = \"value\"",
"def testBadKeyToToken(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='hey')",
"def test_invalid_chars_ssck(self):\r\n valid_base = SlashSeparatedCourseKey(u'org.dept-1%2', u'course.sub-2%3', u'run.faster-4%5')\r\n for key in SlashSeparatedCourseKey.KEY_FIELDS:\r\n with self.assertRaises(InvalidKeyError):\r\n # this ends up calling the constructor where the legality check should occur\r\n valid_base.replace(**{key: u'funny thing'})"
] |
[
"0.74442935",
"0.74044627",
"0.73613185",
"0.72661304",
"0.7248212",
"0.72250146",
"0.7207309",
"0.72047716",
"0.7202045",
"0.71926993",
"0.71520114",
"0.7141901",
"0.7126506",
"0.7093022",
"0.7073858",
"0.7037861",
"0.70023704",
"0.6989978",
"0.6964261",
"0.69501865",
"0.692735",
"0.6911471",
"0.68498003",
"0.6825918",
"0.6773882",
"0.67637515",
"0.6758699",
"0.6726444",
"0.67175686",
"0.6714448"
] |
0.7550915
|
0
|
Check that get_multi works as expected
|
def test_get_multi(self):
yield self.conn.set("an_integer", 42)
yield self.conn.set("a_string", "hello")
res = yield self.conn.get_multi([ "a_string", "an_integer" ])
self.assertEquals(res, { "a_string": "hello", "an_integer": 42 })
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_multi(self):\n self.assertEqual(6, foo.multi(2, 3))",
"def test_multi(self):\n self.assertEqual(6, multi(2, 3))",
"def test_get_multi_argument(self):\n models.storage.close()\n models.storage = models.engine.db_storage.DBStorage()\n models.storage.reload()\n obj = self.populate()\n with self.assertRaises(TypeError):\n models.storage.get(type(obj[0]), obj[0].id, obj[1].id)",
"def test_listtem_using_get(self):\n pass",
"def test_get2(self):\n pass",
"def test_get1(self):\n pass",
"def test_gettem_using_get(self):\n pass",
"def testMulti(self):\n\n memcache.set_multi({'map_key_one': 1, 'map_key_two': u'some value'})\n values = memcache.get_multi(['map_key_one', 'map_key_two'])\n assert {'map_key_one': 1, 'map_key_two': u'some value'} == values\n\n memcache.add_multi(\n {'map_key_one': 'one', 'map_key_two': 2, 'three': u'trois'})\n values = memcache.get_multi(['map_key_two', 'three'])\n assert {'map_key_two': u'some value', 'three': u'trois'} == values",
"def get_multiple1(self):\n pass",
"def test_multiple(self):\n\n with self.assertRaises(MultipleObjectsReturned):\n RST_FBO().get()",
"def test_populator_only_fetches_needy():\n o1, o2 = MediaBag(id=1), MediaBag(media=2)\n with build_multi_get(1) as multi_get:\n media.build_populator('id', multi_get)([o1, o2])",
"def test_get_list(self):\n pass",
"def test_multiple_results(self):\n obj_list = [self.factory.create(name='hello') for i in range(2)]\n response = self._get(get_kwargs={'search': 'ello'})\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, self.template_name)\n self.assertEquals(response.context['object_list'].count(), 2)\n for obj in obj_list:\n self.assertTrue(obj in response.context['object_list'])",
"def test_populator_aborts_early():\n o1, o2 = MediaBag(media=1), MediaBag(media=2)\n\n def multi_get(*keys):\n raise AssertionError('tried calling multi_get')\n\n results = media.build_populator('id', multi_get)([o1, o2])\n assert results == [o1, o2]",
"def test_populator():\n o1, o2 = MediaBag(id=1), MediaBag(id=2)\n with build_multi_get(2) as multi_get:\n media.build_populator('id', multi_get)([o1, o2])\n assert (o1.media, o2.media) == (1, 2)",
"def test_products_ref_groups_get(self):\n pass",
"def test_get(self):\n pass",
"def test_get_token_supply_all_using_get(self):\n pass",
"def testGetMultiplePetitionsById():\n\tapi = c.Api()\n\toutput = api.getMultiplePetitionsById([2297756, 1756395])\n\tif type(output) is list:\n\t\tassert True",
"def test_get_multipleobjects_exception(self):\r\n with self.assertRaises(TestModel.MultipleObjectsReturned):\r\n TestModel.objects.get(test_id=1)",
"def test_intercommunalitys_get(self):\n pass",
"def test_get_collection(self):\n pass",
"def multiget(self, keys):\n return self.sp.multiget(keys)",
"def test_query_multiple(test_store, andy, pandy):\n items = list(test_store.get_by(age=12))\n assert len(items) == 2\n assert andy in items\n assert pandy in items",
"def test_get(self):\n simple_fields = {\n \"verbose\": False,\n \"min_core_neighbors\": self.min_core_neighbors,\n \"num_features\": 1,\n \"num_unpacked_features\": 2,\n \"num_distance_components\": 1,\n \"radius\": self.radius,\n \"num_examples\": 30,\n }\n\n for field, ans in simple_fields.items():\n self.assertEqual(self.model._get(field), ans, \"{} failed\".format(field))\n\n _list_fields = {\n \"distance\": self.distance,\n \"unpacked_features\": [\"X1[0]\", \"X1[1]\"],\n \"features\": [\"X1\"],\n }\n\n for field, ans in _list_fields.items():\n self.assertItemsEqual(\n self.model._get(field), ans, \"{} failed\".format(field)\n )\n self.assertGreaterEqual(self.model.training_time, 0)\n self.assertGreaterEqual(self.model.num_clusters, 0)\n self.assertEqual(self.model.cluster_id.num_rows(), 30)",
"def test_get_multiple(multiple_bucket): # pylint: disable=redefined-outer-name\n for idx in range(2):\n element_number = idx + 1\n assert multiple_bucket.get(f\"key {element_number}\") == f\"value {element_number}\"",
"def testMultiSet2(self):\n # Specify a per element timestamp\n data_store.DB.MultiSet(self.test_row,\n {\"aff4:size\": [(1, 100)],\n \"aff4:stored\": [(\"2\", 200)]},\n token=self.token)\n\n (stored, ts) = data_store.DB.Resolve(self.test_row, \"aff4:size\",\n token=self.token)\n self.assertEqual(stored, 1)\n self.assertEqual(ts, 100)\n\n (stored, ts) = data_store.DB.Resolve(self.test_row, \"aff4:stored\",\n token=self.token)\n self.assertEqual(stored, \"2\")\n self.assertEqual(ts, 200)",
"def test_get_multipleobjects_exception(self):\r\n with self.assertRaises(self.table.MultipleObjectsReturned):\r\n self.table.objects.get(test_id=1)",
"def test_get_list8(self):\n pass",
"def test_groups_get(self):\n pass"
] |
[
"0.7208365",
"0.7107856",
"0.66856176",
"0.64896005",
"0.6420742",
"0.6312199",
"0.6270394",
"0.6264926",
"0.62275136",
"0.621508",
"0.62059",
"0.6088066",
"0.60627234",
"0.60491973",
"0.6030645",
"0.5929987",
"0.5928162",
"0.59056866",
"0.58726704",
"0.5856411",
"0.5839454",
"0.57695013",
"0.5756537",
"0.5755437",
"0.57457316",
"0.57234734",
"0.5718706",
"0.57025445",
"0.5660897",
"0.5641091"
] |
0.7426123
|
0
|
Inserts the interval into the tree.
|
def insert(self, interval):
if self.root == None:
self.root = Node(interval)
return self.root
(start, end) = interval
node = self.root
while True:
if node.key <= start:
path = 'right'
else:
path = 'left'
# Maintain the high invariant, each node contains the leftmost
# value in it's subtree.
if node.high < end:
node.high = end
# Add a new node leaf.
if getattr(node, path, None) is None:
setattr(node, path, Node(interval))
getattr(node, path).parent = node
break
else:
node = getattr(node, path)
return getattr(node, path)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def insert(self, interval):\n\t\tif not self.head and not self.tail:\n\t\t\tself.head = interval\n\t\t\tself.tail = interval\n\t\telif interval.start <= (self.tail.end + 1):\n\t\t\tself.tail.end = interval.end\n\t\telse:\n\t\t\tself.tail.next = interval\n\t\t\tself.tail = interval",
"def insert(self, item):\n insert_location = self.__find(item)\n if insert_location is None: #No root\n self.root = Node(item, None)\n elif item < insert_location.item:\n insert_location.left_child = Node(item, insert_location)\n else: # it should be that item >= insert_location.item\n insert_location.right_child = Node(item, insert_location)",
"def insert(self, val):\n\n\t\tif not self.root:\n\t\t\tself.root = BinaryTreeNode(val)\n\n\t\telse:\n\t\t\tQ = [self.root]\n\t\t\twhile Q:\n\t\t\t\tnode = Q.pop(0)\n\t\t\t\tif not node.left:\n\t\t\t\t\tnode.left = BinaryTreeNode(val)\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tQ.append(node.left)\n\n\t\t\t\tif not node.right:\n\t\t\t\t\tnode.right = BinaryTreeNode(val)\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tQ.append(node.right)\n\n\t\tself.numNodes += 1",
"def insert(self, val):\n if self.val is None:\n self.__init__(val)\n elif self.val > val:\n self.left.insert(val)\n elif self.val < val:\n self.right.insert(val)",
"def insert(self, value):\n\t\tif value > self.value:\n\t\t\tif self.right == None:\n\t\t\t\tself.right = BSTreeNode(value, parent=self)\n\t\t\telse:\n\t\t\t\tself.right.insert(value)\n\t\telif value < self.value:\n\t\t\tif self.left == None:\n\t\t\t\tself.left = BSTreeNode(value, parent=self)\n\t\t\telse:\n\t\t\t\tself.left.insert(value)\n\t\tself.check_balance()",
"def insert(self, value):\n\t\tif value > self.value:\n\t\t\tif self.right == None:\n\t\t\t\tself.right = BSTreeNode(value)\n\t\t\telse:\n\t\t\t\tself.right.insert(value)\n\t\telif value < self.value:\n\t\t\tif self.left == None:\n\t\t\t\tself.left = BSTreeNode(value)\n\t\t\telse:\n\t\t\t\tself.left.insert(value)",
"def insert(self, value):\n if value < self.value:\n if self.left:\n self.left.insert(value)\n else:\n self.left = BSTNode(value)\n else:\n if self.right:\n self.right.insert(value)\n else:\n self.right = BSTNode(value)",
"def insert(self, val):\n if not self.root:\n self.root = Node(val)\n self.size_number += 1\n else:\n self._sink(val, self.root)\n # check parent from node, until unbalanced.",
"def insert(self, value):\n new_node = Node(value)\n if self.root is None:\n self.root = new_node\n else:\n node = self.root\n while(node!=None):\n if(value <= node.data):\n if node.left is None:\n node.left = new_node\n node = node.left\n node = node.left\n elif(value > node.data):\n if node.right is None:\n node.right = new_node\n node = node.right\n node = node.right",
"def insertion(self, i):\n n = self._size\n if not 0 < i <= n + 1:\n raise ValueError(\"integer to be inserted not \"\n \"in the appropriate interval\")\n\n def add1(u):\n if u >= i:\n return u + 1\n return u\n rels = [(add1(a), add1(b))\n for (a, b) in self.decreasing_cover_relations()]\n rels += [(add1(a), add1(b))\n for (a, b) in self.increasing_cover_relations()]\n rels += [(k, k - 1) for k in [i] if i > 1]\n rels += [(k, k + 1) for k in [i] if i <= n]\n return TamariIntervalPoset(n + 1, rels)",
"def insert(self, val):\n if type(val) not in [int, float]:\n raise TypeError('This tree accepts numbers only.')\n if self.contains(val):\n raise ValueError('Node already in tree.')\n new_node = Node(val)\n if self._size == 0:\n self._root = new_node\n self._max_depth = 1\n self._rbal = 1\n self._lbal = 1\n else:\n current_depth = 1\n current_node = self._root\n while val is not current_node._data:\n current_depth += 1\n if val < current_node._data:\n if current_node._lkid:\n current_node = current_node._lkid\n else:\n current_node._lkid = new_node\n new_node._parent = current_node\n self._get_new_max()\n elif val > current_node._data:\n if current_node._rkid:\n current_node = current_node._rkid\n else:\n current_node._rkid = new_node\n new_node._parent = current_node\n self._get_new_max()\n self._size += 1",
"def insert(self, val):\n node = Node(val)\n current = self.root\n\n if self.root is None:\n self.root = node\n return node\n\n while current:\n if val >= current.val:\n if current.right is not None:\n current = current.right\n else:\n current.right = node\n break\n\n elif val < current.val:\n if current.left is not None:\n current = current.left\n else:\n current.left = node\n break\n\n return node",
"def insert(self, value):\n\n node = self\n parent = None # Keeps track of parent node, is initialized as None for root node\n\n while node is not None: # Base case, for when the leaf node is reached\n\n if value < node.value:\n left = True # Indicates that the value to be inserted should go on the left of the current node\n parent = node\n node = node.left\n\n elif value >= node.value:\n left = False # Indicates that the value to be inserted should go on the right of the current node\n parent = node\n node = node.right\n\n if left:\n parent.left = BST(value)\n else:\n parent.right = BST(value)\n\n return self",
"def _insert(self, value, cur_node):\n if value < cur_node.value:\n if cur_node.left_child == None:\n cur_node.left_child = Node(value)\n else: \n self._insert(value, cur_node.left_child)\n elif value > cur_node.value: #creating elif in case the value is same as the current node \n if cur_node.right_child == None:\n cur_node.right_child = Node(value)\n else:\n self._insert(value, cur_node.right_child)\n else:\n print(\"Value already in the tree\")",
"def insert(self, index: int, tree: 'Tree') -> None:\n ...",
"def _insert(self, current, new_val):\r\n if new_val <= current.value:\r\n if current.left:\r\n self._insert(current.left, new_val)\r\n else:\r\n current.left = RBTreeNode(new_val, parent=current) # new nodes are red by default\r\n self._fix_rb_prop(current.left)\r\n else:\r\n if current.right:\r\n self._insert(current.right, new_val)\r\n else:\r\n current.right = RBTreeNode(new_val, parent=current) # new nodes are red by default\r\n self._fix_rb_prop(current.right)",
"def insert(self, value):\n insertion_point = self._find(value)\n n = SplayNode(value)\n\n # value already in the tree; add at leftmost position in right subtreepa\n if value == insertion_point.value:\n if insertion_point.right is None:\n insertion_point.right = n\n n.parent = insertion_point\n else:\n insertion_point = insertion_point.right\n while insertion_point.left is not None:\n insertion_point = insertion_point.left\n insertion_point.left = n\n n.parent = insertion_point\n\n # value belongs to the left\n elif value < insertion_point.value:\n insertion_point.left = n\n n.parent = insertion_point\n\n # value belongs to the right\n else:\n insertion_point.right = n\n n.parent = insertion_point\n\n n._splay()\n return n # return new root",
"def __insert_tree(self, t):\n\t\tif not t:\n\t\t\treturn\n\t\tif t.value > self.value:\n\t\t\tif self.right == None:\n\t\t\t\tself.right = t\n\t\t\telse:\n\t\t\t\tself.right.__insert_tree(t)\n\t\telif t.value < self.value:\n\t\t\tif self.left == None:\n\t\t\t\tself.left = t\n\t\t\telse:\n\t\t\t\tself.left.__insert_tree(t)",
"def insert(self, val, balanced=True, render=False):\n if self.val is not None:\n if val == self.val:\n return None\n if val < self.val:\n if self.left is None:\n self.left = Node(val, self)\n if balanced:\n self.left._self_balance()\n else:\n self.left.insert(val, balanced, render)\n elif val > self.val:\n if self.right is None:\n self.right = Node(val, self)\n if balanced:\n self.right._self_balance()\n else:\n self.right.insert(val, balanced, render)\n else:\n self.val = val\n if render and self.parent is None:\n self.save_render()",
"def bounded_insert(self, time, tailnumber):\n if self.root is None: \n node = self.insert(time, tailnumber)\n return node\n\n if self.simple is False: \n conflict = self.find_conflict(time)\n if conflict is not None: \n new_time = conflict.key + self.wait_time\n self.bounded_insert(new_time, tailnumber)\n else: \n node = self.insert(time, tailnumber)\n return node \n else: \n conflict = self.find_conflict(time)\n if conflict is None: \n node = self.insert(time, tailnumber)",
"def insert(self, key, val=None):\n self.root = self._insert(self.root, key, val) # Returns root of resulting tree after insertion - update it\n self.n += 1",
"def insert(self, data):\n if data < self.data:\n if self.left is None:\n self.left = Node(data, self)\n else:\n self.left.insert(data)\n elif data > self.data:\n if self.right is None:\n self.right = Node(data, self)\n else:\n self.right.insert(data)",
"def insert(self, data):\n if self.data:\n if data < self.data:\n if self.left is None:\n self.left = Node(data)\n else:\n self.left.insert(data)\n elif data > self.data:\n if self.right is None:\n self.right = Node(data)\n else:\n self.right.insert(data)\n else:\n self.data = data",
"def insert(self, data):\n \n def _find_parent(current, node):\n \"\"\"Recursively descend through the tree to find the node that\n should be the parent of the new node. Do not allow for duplicates.\n \"\"\"\n \n if node == current:\n raise ValueError(str(node.data) + \" is already in the tree.\")\n if node < current: # Travel left\n if current.left:\n return _find_parent(current.left,node)\n else:\n return current\n else: # Travel right\n if current.right:\n return _find_parent(current.right,node)\n else:\n return current\n \n n = KDTNode(data) # Make a new node\n if len(data) != self.k:\n raise ValueError(\"data must be of length \" + str(self.k))\n if not self.root:\n self.root = n # Case 1: empty tree\n n.axis = 0\n else: # Case 2: use _find_parent\n parent = _find_parent(self.root, n) # Get the parent\n if n < parent: parent.left = n # Insert the node\n else: parent.right = n\n n.prev = parent # Double link\n n.axis = (n.prev.axis + 1) % self.k\n return n",
"def _insert(self, key):\n if self.min > key:\n self.min = key\n if self.max < key:\n self.max = key\n if key == self.key:\n return self\n self.size += 1\n if key < self.key:\n if self.left is None:\n self.left = self._create_new(key)\n self.left.parent = self\n return self\n self.left = self.left._insert(key)\n else:\n if self.right is None:\n self.right = self._create_new(key)\n self.right.parent = self\n return self\n self.right = self.right._insert(key)\n return self",
"def _insert(self, data, cur_node):\n if data < cur_node.data:\n if cur_node.left_child == None:\n cur_node.left_child = AVLNode(data)\n cur_node.left_child.parent=cur_node # set parent\n self._check_balance(cur_node.left_child)\n else:\n self._insert(data, cur_node.left_child)\n elif data > cur_node.data:\n if cur_node.right_child == None:\n cur_node.right_child = AVLNode(data)\n cur_node.right_child.parent = cur_node # set parent\n self._check_balance(cur_node.right_child)\n else:\n self._insert(data,cur_node.right_child)\n # else:\n # print(\"data already in tree!\")",
"def insert(self, value):\n\n\n if value < self.data:\n if self.left:\n self.left.insert(value)\n else:\n self.left = BinaryNode(value)\n\n elif value > self.data:\n if self.right:\n self.right.insert(value)\n else:\n self.right = BinaryNode(value)\n\n else:\n self.data = self.data",
"def test_insert(self):\n\n # test expected behavior for correctly formatted inputs\n int1 = interval('[1,2)')\n int2 = interval('(1,2]')\n int12 = interval('[1,2]')\n inserted12 = insert([int1], int2)\n self.assertEqual([int12], inserted12)\n int3 = interval('[3,3]')\n int13 = interval('[1,3]')\n self.assertEqual([int13], insert([int12], int3))\n int4 = interval('(3,4]')\n int58 = interval('[5,8]')\n inserted4 = insert([],int4)\n self.assertEqual([int4], inserted4)\n self.assertEqual([int13, int58], insert([int12, int3], int58))\n self.assertEqual([int13, int58], insert([int58], int13))\n self.assertEqual([int13], insert([int2, int3], int1))\n self.assertEqual([int13], insert([int1, int2, int2, int3], int12))\n self.assertEqual([int1], insert([int1], int1))\n\n # test expected behavior for incorrectly formatted inputs\n with self.assertRaises(ValueError):\n int1 = insert([int1], 4)\n with self.assertRaises(ValueError):\n int1 = insert([3], int1)\n with self.assertRaises(ValueError):\n int1 = insert([3], \"not an interval\")\n with self.assertRaises(ValueError):\n int1 = insert([3], \"[1,3]\")\n with self.assertRaises(ValueError):\n int1 = insert([[]], \"\")\n with self.assertRaises(ValueError):\n int1 = insert([[12, \"hi\"]], \"interval\")\n with self.assertRaises(ValueError):\n int1 = insert([int1], \"\")\n with self.assertRaises(ValueError):\n int1 = insert([[]], int2)\n print(\"insert test complete\")",
"def insert(self, value):\n i = 0\n n = len(self._tree)\n while i < n:\n cur = self._tree[i]\n self._counts[i] += 1\n if value < cur:\n i = 2 * i + 1\n elif value > cur:\n i = 2 * i + 2\n else:\n return\n raise ValueError(\"Value %s not contained in tree.\" \"Also, the counts are now messed up.\" % value)",
"def insert(self, value):\n i = 0\n n = len(self._tree)\n while i < n:\n cur = self._tree[i]\n self._counts[i] += 1\n if value < cur:\n i = 2 * i + 1\n elif value > cur:\n i = 2 * i + 2\n else:\n return\n raise ValueError(\"Value %s not contained in tree.\"\n \"Also, the counts are now messed up.\" % value)"
] |
[
"0.7717948",
"0.6542942",
"0.6509033",
"0.6505067",
"0.64955026",
"0.64484197",
"0.64471847",
"0.644557",
"0.6391438",
"0.6373396",
"0.6361766",
"0.63453394",
"0.6314026",
"0.62809587",
"0.62677604",
"0.62472606",
"0.624186",
"0.62068784",
"0.61892736",
"0.61792856",
"0.61694473",
"0.6146116",
"0.61325604",
"0.6129288",
"0.6128366",
"0.6124542",
"0.6111672",
"0.6104296",
"0.6081551",
"0.6077446"
] |
0.7850783
|
0
|
raises the given exception type in the context of this thread
|
def raise_exc(self, exctype):
_async_raise(self._get_my_tid(), exctype)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def raise_exc(self, exctype):\n _async_raise(self._get_my_tid(), exctype)",
"def raise_exc(self, exctype):\n _async_raise(self._get_my_tid(), exctype)",
"def exception(self, *args, **kwargs):",
"def throw(self, type, value=None, traceback=None):\n pass",
"def exception(self, e):\n pass",
"def handle_exception(exc_type, exception, traceback):\n report(UNKNOWN, \"unhandled exception: %s\" % (exception,))",
"def unexpectedException(self):",
"def raises(self, exception_type, function, *args, **kwargs):\n try:\n result = function(*args, **kwargs)\n self.log_error(\"{} did not throw exception {}\".format(\n function.__name__,\n exception_type.__name__\n ), None)\n return result\n except Exception as e:\n if type(e) != exception_type:\n self.log_error(\"{} did raise {}: {}\".format(\n function.__name__,\n type(e).__name__, e\n ), None)",
"def unexpected_error(self, exception):",
"def _async_raise(tid, exctype):\n\tif not inspect.isclass(exctype):\n\t\traise TypeError(\"Only types can be raised (not instances)\")\n\tres = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))\n\tif res == 0:\n\t\traise ValueError(\"invalid thread id\")\n\telif res != 1:\n\t\t# \"\"\"if it returns a number greater than one, you're in trouble, \n\t\t# and you should call it again with exc=NULL to revert the effect\"\"\"\n\t\tctypes.pythonapi.PyThreadState_SetAsyncExc(tid, 0)\n\t\traise SystemError(\"PyThreadState_SetAsyncExc failed\")",
"def _async_raise(tid, exctype):\r\n if not inspect.isclass(exctype):\r\n raise TypeError(\"Only types can be raised (not instances)\")\r\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), ctypes.py_object(exctype))\r\n if res == 0:\r\n raise ValueError(\"invalid thread id\")\r\n elif res != 1:\r\n # \"\"\"if it returns a number greater than one, you're in trouble,\r\n # and you should call it again with exc=NULL to revert the effect\"\"\"\r\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)\r\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")",
"def _async_raise(tid, exctype):\n if not inspect.isclass(exctype):\n raise TypeError(\"Only types can be raised (not instances)\")\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid),\n ctypes.py_object(exctype))\n if res == 0:\n raise ValueError(\"invalid thread id\")\n elif res != 1:\n # \"if it returns a number greater than one, you're in trouble,\n # and you should call it again with exc=NULL to revert the effect\"\n ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), None)\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")",
"def exception_handler(self, exception):\n pass",
"def _Raise(self, t):\n self.RaiseError(t, \"Exception raising not supported\")",
"def _exception_dispatcher(self, e):\n # TODO Currently not doing anything\n raise e",
"def raise_exception(self):\n thread_id = self.get_id()\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(\n thread_id, ctypes.py_object(SystemExit)\n )\n if res > 1:\n ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, 0)\n print(\"Exception raise failure\")",
"def raise_exception(self):\n thread_id = self.get_id()\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(\n thread_id, ctypes.py_object(SystemExit)\n )\n if res > 1:\n ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, 0)\n print(\"Exception raise failure\")",
"def report_unexpected_exception(self, *args, **kwargs):\n pass",
"def _async_raise(t, exctype):\n tid = ctypes.c_long(t)\n if not inspect.isclass(exctype):\n exctype = type(exctype)\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))\n if res == 0:\n raise ValueError(\"invalid thread id\")\n elif res != 1:\n # \"\"\"if it returns a number greater than one, you're in trouble,\n # and you should call it again with exc=NULL to revert the effect\"\"\"\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)",
"def _async_raise(self,tid, exctype): \n tid = c_long(tid) \n if not inspect.isclass(exctype): \n exctype = type(exctype) \n res = pythonapi.PyThreadState_SetAsyncExc(tid, py_object(exctype)) \n if res == 0: \n raise ValueError(\"invalid thread id\") \n elif res != 1: \n # \"\"\"if it returns a number greater than one, you're in trouble, \n # and you should call it again with exc=NULL to revert the effect\"\"\" \n pythonapi.PyThreadState_SetAsyncExc(tid, None) \n raise SystemError(\"PyThreadState_SetAsyncExc failed\")",
"def _async_raise(self, tid, exctype):\n tid = ctypes.c_long(tid)\n if not inspect.isclass(exctype):\n exctype = type(exctype)\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))\n if res == 0:\n raise ValueError(\"invalid thread id\")\n elif res != 1:\n # \"\"\"if it returns a number greater than one, you're in trouble,\n # and you should call it again with exc=NULL to revert the effect\"\"\"\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")",
"def _async_raise(self,tid, exctype):\n tid = ctypes.c_long(tid)\n if not inspect.isclass(exctype):\n exctype = type(exctype)\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))\n if res == 0:\n raise ValueError(\"invalid thread id\")\n elif res != 1:\n # \"\"\"if it returns a number greater than one, you're in trouble,\n # and you should call it again with exc=NULL to revert the effect\"\"\"\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")",
"def _async_raise(self,tid, exctype):\r\n tid = ctypes.c_long(tid)\r\n if not inspect.isclass(exctype):\r\n exctype = type(exctype)\r\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))\r\n if res == 0:\r\n raise ValueError(\"invalid thread id\")\r\n elif res != 1:\r\n # \"\"\"if it returns a number greater than one, you're in trouble,\r\n # and you should call it again with exc=NULL to revert the effect\"\"\"\r\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)\r\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")",
"def exception(self):\n raise Exception(\"Exception test\")",
"def _async_raise(tid, exctype):\r\n # tid = ctypes.c_long(tid)\r\n if not inspect.isclass(exctype):\r\n exctype = type(exctype)\r\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))\r\n # res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))\r\n # if res == 0:\r\n # raise ValueError(\"invalid thread id\")\r\n # elif res != 1:\r\n # ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)\r\n # raise SystemError(\"PyThreadState_SetAsyncExc failed !\")\r",
"def _async_raise(self, tid, exctype):\r\n tid = ctypes.c_long(tid)\r\n if not inspect.isclass(exctype):\r\n exctype = type(exctype)\r\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid,\r\n ctypes.py_object(\r\n exctype))\r\n if res == 0:\r\n raise ValueError(\"invalid thread id\")\r\n elif res != 1:\r\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)\r\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")",
"def _async_raise(self, tid, exctype):\n tid = ctypes.c_long(tid)\n\n if not inspect.isclass(exctype):\n exctype = type(exctype)\n\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(\n tid, ctypes.py_object(exctype))\n\n if res == 0:\n raise ValueError(\"invalid thread id\")\n\n elif res != 1:\n # \"\"\"if it returns a number greater than one, you're in trouble,\n # and you should call it again with exc=NULL to revert the effect\"\"\"\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")",
"def _async_raise(self, tid, exctype): \n tid = ctypes.c_long(tid) \n if not inspect.isclass(exctype): \n exctype = type(exctype) \n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype)) \n if res == 0: \n raise ValueError(\"invalid thread id\") \n elif res != 1: \n # \"\"\"if it returns a number greater than one, you're in trouble, \n # and you should call it again with exc=NULL to revert the effect\"\"\" \n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) \n raise SystemError(\"PyThreadState_SetAsyncExc failed\")",
"def test_exception_execution(self):\r\n a_thread = workerthread.WorkerThread(exception_queue=self.exception_queue,\r\n return_queue=self.message_queue,\r\n target=self.sample_exception_function, args=(1, 2))\r\n a_thread.start()\r\n a_thread.join()\r\n exc_type, exc = self.exception_queue.get()\r\n self.assertTrue(isinstance(exc, Exception))",
"def __raise_clean_exception(exc_type, exc_value, exc_traceback):\n if exc_type.__name__ not in dir(napalm.exceptions) and \\\n exc_type.__name__ not in __builtins__.keys():\n epilog = (\"NAPALM didn't catch this exception. Please, fill a bugfix on \"\n \"https://github.com/napalm-automation/napalm/issues\\n\"\n \"Don't forget to include this traceback.\")\n print(epilog)\n raise exc_type, exc_value, exc_traceback"
] |
[
"0.754653",
"0.754653",
"0.73001415",
"0.72129756",
"0.71473205",
"0.7090101",
"0.70805275",
"0.7043147",
"0.6917533",
"0.6837347",
"0.6776692",
"0.67522043",
"0.6658733",
"0.66576314",
"0.6646157",
"0.6635256",
"0.6635256",
"0.6628891",
"0.6615626",
"0.6608355",
"0.66057897",
"0.66001034",
"0.65978247",
"0.65926456",
"0.6587174",
"0.6581194",
"0.6567932",
"0.65604687",
"0.6514528",
"0.64990413"
] |
0.76775926
|
0
|
Add VM to db Return True if this is new.
|
def add_cloudyvent_vm(self, runname, iaasid, nodeid, hostname, service_type, parent, runlogdir, vmlogdir):
cyvm = self.get_by_iaasid(iaasid)
if not cyvm:
cyvm = _CYVM(runname, iaasid, nodeid, hostname, service_type, parent, runlogdir, vmlogdir)
self.session.add(cyvm)
return True
else:
cyvm.hostname = hostname
cyvm.service_type = service_type
cyvm.nodeid = nodeid
cyvm.parent = parent
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add(self, database):\n id = database.session.query(Workout.id) \\\n .filter(Workout.external_id == self.external_id) \\\n .filter(Workout.source == self.source) \\\n .first()\n if id:\n # don't add if this workout has already been added\n return False\n else:\n try:\n database.session.add(self)\n database.session.flush()\n except exc.SQLAlchemyError as e:\n logger.error(\"Database error: {}\".format(e.args))\n return False\n logger.info(\"Added new workout {}\".format(self))\n self.handle_duplicates(database)\n return True",
"def save(self):\n try:\n db.session.add(self)\n db.session.commit()\n return True\n except SQLAlchemyError as error_message:\n app_logger.error(error_message)\n return False",
"def save(self):\n try:\n db.session.add(self)\n db.session.commit()\n return True\n except SQLAlchemyError as e:\n db.session.rollback()\n logger.error(\"database operation error: \", e)\n return False",
"def insert(self, row):\n if not self.loaded:\n print(\"Database is not loaded\")\n return False\n\n self.rows.append(row)\n return True",
"def insert_vm_migration(vm, hostname):\n IMPL.insert_vm_migration(vm, hostname)",
"def _create_vm(self):\n self._create_instance_in_the_db()\n self.type_data = db.instance_type_get_by_name(None, 'm1.large')\n self.conn.spawn(self.context, self.instance, self.network_info)\n self._check_vm_record()",
"def add(self):\n\n db.session.add(self)\n db.session.commit()",
"def add(self):\n\n db.session.add(self)\n db.session.commit()",
"def add(self, string=str) -> bool:\n try:\n if not self.exists(string):\n self.table[string] = len(self.table)\n return(True)\n else:\n return(False)\n except Exception as error:\n print(f\"Error: self.add({string}) -> {error}\")",
"def add(self, credential: Credential) -> bool:\n try:\n result = self._coll.insert_one(credential.to_dict())\n except DuplicateKeyError:\n logger.warning(f'A credential with credential_id {credential.credential_id} already exists in the db')\n return False\n _success = result.inserted_id == credential.obj_id\n logger.debug(f'Added credential {credential} to the db: {_success}')\n return _success",
"def insert(self):\n ret = True\n\n schema = self.schema\n fields = self.depopulate(False)\n\n q = self.query\n q.set_fields(fields)\n pk = q.insert()\n if pk:\n fields = q.fields\n fields[schema.pk.name] = pk\n self._populate(fields)\n\n else:\n ret = False\n\n return ret",
"def add(self, database):\n if not database.session:\n logger.error(\"no database session\")\n return False\n\n id = database.session.query(Sport.id).filter(\n Sport.name == self.name).first()\n if id:\n # this sport already exists\n self.id = id[0]\n return False\n else:\n # create a new one and flush it immediately in order to update the id\n try:\n database.session.add(self)\n database.session.flush()\n except exc.SQLAlchemyError as e:\n logger.error(\"Database error: {}\".format(e.args))\n return False\n logger.info(\"Added new sport '{}' id {}\".format(self.name, self.id))\n return True",
"def insert_and_check(self, item) -> bool:\n with Monitor.acquire(self):\n if item in self:\n return False\n self.add(item)\n return True",
"def insert(self, val):\n if val in self.record:\n return False\n \n self.record[val] = len(self.data)\n self.data.append(val)\n return True",
"def insert(self):\n self.getDbRecord().insert()\n\n return",
"def insert(self, val: int) -> bool:\n if self.store_dict.get(val) != None:\n return False\n self.store_list.append(val)\n self.store_dict[val] = len(self.store_list) - 1\n return True",
"def has_add_permission(self, request, instance=None):\n return False",
"def insert(self, val):\n if val not in self.table.keys():\n self.table[val] = len(self.ls)\n self.ls.append(val)\n return True\n return False",
"def add(self, server):\n if server not in self.servers:\n self.servers.append(server)\n self.sync()\n return True\n return False",
"def save(self):\n ret = False\n\n # we will only use the primary key if it hasn't been modified\n pk = None\n if self.schema.pk.name not in self.modified_fields:\n pk = self.pk\n\n if pk:\n ret = self.update()\n else:\n ret = self.insert()\n\n return ret",
"def AddTransaction(self, tx):\n if BC.Default() is None:\n return False\n\n if tx.Hash.ToBytes() in self.MemPool.keys():\n return False\n\n if BC.Default().ContainsTransaction(tx.Hash):\n return False\n\n if not tx.Verify(self.MemPool.values()):\n logger.error(\"Verifying tx result... failed\")\n return False\n\n self.MemPool[tx.Hash.ToBytes()] = tx\n\n return True",
"def add_custom_command(cmd: CustomCommand) -> bool:\n\n if get_custom_command(cmd.channel, cmd.name) is not None:\n return False\n\n session = get_database_session()\n session.add(cmd)\n session.commit()\n return True",
"def add_object(self, object_to_be_added):\n new_mapping = Map.add_object(self.id, object_to_be_added)\n if new_mapping:\n object_to_be_added.save()\n new_mapping.ref_id = object_to_be_added.id\n return True\n else:\n return False",
"def insert(self, val):\n new_item = False\n if val not in self.ds:\n self.ds.add(val)\n self.keys.append(val)\n new_item = True\n return new_item",
"def add_record(self):\n if not self.record_exists(self.args.date):\n record = self.create_record()\n self.records.append(record)\n self.write_json_file(self.records_file, self.records)\n return True\n return False",
"def check_db_for_vid(self):\n with db.cursor() as cursor:\n if self.videoId in db.\n pass",
"def inserted(self):\n return True",
"def insert(self, val):\n if val in self.dic:\n return False\n else:\n self.data.append(val)\n self.dic[val]=len(self.data)-1\n return True",
"def add_store_to_db(self, connexion):\r\n # initiate a cursor\r\n cursor = connexion.cursor()\r\n # check if the store already exists in database\r\n cursor.execute(\"\"\"SELECT name FROM Store\r\n WHERE name = %s\"\"\", (self.name, ))\r\n rows = cursor.fetchall()\r\n if not rows:\r\n # insert data\r\n cursor.execute(\"\"\"INSERT INTO Store (name)\r\n VALUES (%(name)s)\"\"\", self.__dict__)\r\n # commit the changes\r\n connexion.commit()",
"def add(self, workout, database):\n if not database.session:\n logger.error(\"no database session\")\n return False\n\n self.cleanup_sportstype(workout)\n self.associate_sport(database)\n id = database.session.query(SportsType.id).filter(\n SportsType.name == self.name).first()\n if id:\n self.id = id[0]\n return False\n else:\n try:\n database.session.add(self)\n database.session.flush()\n except exc.SQLAlchemyError as e:\n logger.error(\"Database error: {}\".format(e.args))\n return False\n logger.info(\"Adding new sportstype '{}' id {} of sport {}\".format(\n self.name, self.id, self.sport_id))\n return True"
] |
[
"0.6002193",
"0.58125263",
"0.57646483",
"0.5651389",
"0.54877454",
"0.5483257",
"0.5449598",
"0.5449598",
"0.5433799",
"0.5397175",
"0.53970116",
"0.53811187",
"0.53527033",
"0.5349176",
"0.53205526",
"0.53083843",
"0.52993494",
"0.52819586",
"0.52705383",
"0.52662474",
"0.52647305",
"0.52445614",
"0.52406245",
"0.5235892",
"0.5225556",
"0.5212656",
"0.519865",
"0.51780415",
"0.5171348",
"0.51668364"
] |
0.628304
|
0
|
Parse ingredients The ingredients come in from the form as normal key value pairs. This needs to be changed so that the ingredients can be stored as the following data structure. [ {
|
def _parse_ingredients(recipe):
ingredients = []
group_counter = 1
counter = 0
filtered_dict = {k: v for k, v in recipe.items() if "ingredient" in k}
ingredient = {}
for key, value in filtered_dict.items():
if not value:
continue
elif key == f"ingredient{group_counter}":
ingredient["name"] = value
elif key == f"ingredientQuantity{group_counter}":
ingredient["quantity"] = value
elif key == f"ingredientMeasurement{group_counter}":
ingredient["measurement"] = value
counter += 1
if counter % 3 == 0:
ingredients.append(ingredient)
ingredient = {}
group_counter += 1
return ingredients
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def recipe_parser(form_data, user):\n\n # The way the ingredients and steps data is structured in the form data is not the\n # structure required for the database so additional processing is required. The\n # `ingredients` and `steps` lists will be created and added to the recipe\n recipe[\"ingredients\"] = _parse_ingredients(recipe)\n recipe[\"steps\"] = _parse_steps(recipe)\n\n # Additional fields are created to store additional information about the recipe\n recipe[\"created_by\"] = user\n recipe[\"created_at\"] = datetime.now()\n recipe[\"views\"] = 0\n recipe[\"likes\"] = 0\n\n # As the structure of the ingredients have changed, the initial data contained\n # in the form needs to be removed\n recipe = _strip_excess_data(recipe)\n\n return recipe",
"def parse_ingredients(inp):\n # Standardize to uppercase\n # inp = inp.upper()\n parsed, _ = _parse(inp)\n return parsed",
"def test_parse_ingredients(self):\n pass",
"def get_ingredients_to_add(cls, new_count, requestform):\n\n ingredients_to_add = {}\n\n for i in range(1, (new_count+1)):\n #the range refers to the range of integers that appear in the ingredient names\n ingredients_to_add[i] = []\n for r in requestform:\n\n # looks for entries that end with an integer\n if r[0:3] == 'ite' or r[0:3] == 'pre' or r[0:3] == 'mea' or r[0:3] == 'qty':\n\n # checks if the last character(s) of an entry equals the integer we're using\n # if yes, appends key value pair in our ingredients dictionary\n # sorts the value so we know how to index the list later\n\n try:\n int(r[-2:])\n if int(r[-2:]) == i:\n ingredients_to_add[i].append([r, requestform[r]])\n ingredients_to_add[i].sort()\n except Exception:\n if int(r[-1]) == i:\n ingredients_to_add[i].append([r, requestform[r]])\n ingredients_to_add[i].sort()\n\n # creates a new list of ingredients\n # takes out the ingredient heading and unnecessary nested lists\n # (this is because we just want the actual text)\n # appends cleaned up ingredient info to a new list\n # sets new list as the new value in the corresponding dict key\n new_ingredient_list = []\n for x in ingredients_to_add[i]:\n\n del x[0]\n for y in x:\n x = y\n new_ingredient_list.append(x)\n ingredients_to_add[i] = new_ingredient_list\n return ingredients_to_add",
"def parseIngredientList(ingredients):\n\n try: \n # Flour kludge\n for i, item in enumerate(ingredients):\n ingredients[i] = re.sub('all purpose','all-purpose',item)\n\n # 1/3 amount kludge (weird NYT bug)\n firstParse = P.parseIngredients(ingredients)\n one_thirds = []\n for i,item in enumerate(firstParse):\n if item.amount==1/3.0:\n one_thirds.append(i)\n \n # Write the list of ingredients to a file\n ingredientFile = \"./NYT/ingredients.txt\"\n with open(ingredientFile,'w') as outfile:\n for item in ingredients:\n # Unicode kludge\n item = replaceFractions(item)\n line = str(item.encode(\"utf-8\", errors='ignore').decode(\"utf-8\") + \"\\n\")\n outfile.writelines(line)\n\n # Use the trained model to predict tags for the list of ingredients\n result = os.system(\"python ./NYT/bin/parse-ingredients.py ./NYT/ingredients.txt > ./NYT/results.txt\")\n if result != 0:\n print('System error. Error code: {0}'.format(result))\n \n # Convert result to json format\n result = os.system(\"python ./NYT/bin/convert-to-json.py ./NYT/results.txt > ./NYT/results.json\")\n if result != 0:\n print('System error. Error code: {0}'.format(result))\n \n # Return the json format\n json_obj = json.load(open('./NYT/results.json'))\n\n # Kludge to fix 1/3 in NYT\n for i, item in enumerate(json_obj):\n if i in one_thirds:\n json_obj[i]['qty'] = '1/3'\n except:\n print((sys.exc_info()[0], sys.exc_info()[1]))\n json_obj = []\n\n return json_obj",
"def get_ingredients(self):\n try:\n ingredients = self.soup.find_all(class_=[\"recipe-table\", \"table-list-header\"])\n ingredients_list = []\n for elem in ingredients:\n if elem.name == \"h4\" and elem.text.strip() != \"\":\n ingredients_list.append(\"\\n\\n\" + elem.text.strip() + \"\\n\\n\")\n elif elem.name == \"table\":\n rows = text_maker.handle(str(elem)).split(\"\\n\")\n rows = \"\\n\".join(\"* \" + r for r in rows if r.strip())\n ingredients_list.append(rows)\n self.ingredients = \"\".join(ingredients_list).strip()\n except Exception:\n current_app.logger.error(f\"Could not extract ingredients: {traceback.format_exc()}\")\n self.ingredients = \"\"",
"def get_ingredients(self):\n try:\n ingredients = self.soup.find_all(class_=\"ingredient-group\")\n out = []\n for i in ingredients:\n # Convert h3 into div\n for x in i.find_all(\"h3\"):\n x.name = \"div\"\n x.string = x.text + \":\"\n out.append(text_maker.handle(str(i)).strip())\n self.ingredients = \"\\n\\n\".join(i for i in out)\n except Exception:\n current_app.logger.error(f\"Could not extract ingredients: {traceback.format_exc()}\")\n self.ingredients = \"\"",
"def ingredients(id):\n if id == \"\":\n return \"None\"\n else:\n try:\n return BreweryDb.beer(id + \"/ingredients\")['data']\n except Exception:\n return [{\"category\": \"\", \"name\": \"\"}]",
"def format_ingredients(self, ingredients):\n\t\tto_replace = {'cointreau': 'triple sec', '&': 'and'}\n\t\tupdated_ingredients = []\n\t\tfor i in ingredients:\n\t\t\ti = i.lower().strip()\n\t\t\tif i in to_replace:\n\t\t\t\tingredient = to_replace[i]\n\t\t\telse:\n\t\t\t\tingredient = i\n\t\t\tupdated_ingredients.append(ingredient)\n\t\treturn updated_ingredients",
"def clean_ingredients(self):\n ingredients = self.cleaned_data['ingredients']\n if len(ingredients) < 1:\n v_err('no_ing')\n return ingredients",
"def _create_ingredient(self, data):\n return Ingredient(**data)",
"def get_recipe_ingredients(soup_recipe):\n ingredients_list = soup_recipe.find_all(\"li\", {\"itemprop\": \"ingredients\"})\n ingredients = []\n for ing in ingredients_list:\n ingredients.append(ing.get_text().split('\\n')[0])\n return ingredients",
"def getIngredients():\n ingredients = ['Whiskey', 'Tequila', 'Vodka', 'Blue Curacao', 'Orange Juice',\n 'Pineapple Juice', 'Cranberry Juice', 'Sour Mix']\n return ingredients",
"def _format_recipe(recipe):\n # Some fields are not consistently returned from the API, make sure they exist first.\n if \"analyzedInstructions\" in recipe.keys() and len(\n recipe[\"analyzedInstructions\"]\n ):\n step_instructions = recipe[\"analyzedInstructions\"][0][\"steps\"]\n else:\n step_instructions = None\n\n return {\n \"spoonacular_id\": recipe[\"id\"],\n \"dish_name\": recipe[\"title\"],\n \"servings\": recipe.get(\"servings\", None),\n \"image\": recipe.get(\"image\", None),\n \"is_vegetarian\": recipe[\"vegetarian\"],\n \"is_vegan\": recipe[\"vegan\"],\n \"is_gluten_free\": recipe[\"glutenFree\"],\n \"is_dairy_free\": recipe[\"dairyFree\"],\n \"cook_time_min\": recipe.get(\"cookingMinutes\", None),\n \"prep_time_min\": recipe.get(\"preparationMinutes\", None),\n \"spoonacular_score\": recipe[\"spoonacularScore\"],\n \"ingredients\": [\n ingredient[\"originalName\"]\n for ingredient in recipe[\"extendedIngredients\"]\n ],\n \"instructions\": recipe.get(\"instructions\", None),\n \"step_instructions\": step_instructions,\n }",
"def get_recipe_ingredients(soup_recipe):\n ingredients_list = soup_recipe.find_all(\"li\", {\"itemprop\": \"ingredients\"})\n ingredients = []\n for ing in ingredients_list:\n ingredients.append(ing.get_text())\n return ingredients",
"def convert_ingredients(total_ingredients):\n final_ingredients = []\n\n for key, value in total_ingredients.items():\n unit = value[1].pop()\n if unit == 'N/A':\n final_ingredients.append([key, value[0], None])\n else:\n final_ingredients.append([key, round((value[0]/gram_conversions[unit]), 2), unit])\n\n return sorted(final_ingredients)",
"def clean_ingredients_file(dirty):\n clean_file = 'clean_ingredients.json'\n all_ingredients = []\n with open(dirty) as infile:\n line = infile.readline()\n while line:\n split_line = line.split(',')\n for ing in split_line:\n ing = ing.strip(' \\n.')\n all_ingredients.append(ing)\n line = infile.readline()\n with open(clean_file, 'w') as outfile:\n json.dump(all_ingredients, outfile)\n return clean_file",
"def consumeData(self, data):\n ret = []\n\n soup = BeautifulSoup(StringIO(data))\n ingredientses = soup.find_all(None, itemprop='ingredients')\n for ing in ingredientses:\n separateByClass(soup, ing, \"ingredient\")\n separateByTag(soup, ing, ['br', 'tr', 'li'])\n instructionses = soup.find_all(None, itemprop=\"recipeInstructions\")\n for ins in instructionses:\n separateByClass(soup, ins, \"instruction\")\n separateByTag(soup, ins, ['br', 'tr', 'li'])\n workingDocument = StringIO(soup.encode('utf-8'))\n\n items = microdata.get_items(workingDocument)\n for i in items:\n for typ in i.itemtype:\n if typ.string == MICROFORMAT_RECIPE:\n ret.append(i.json())\n break\n return map(json.loads, ret)",
"def parseRemainingVariables(json_response, recipe):\n ready_in_minutes = json_response.get(\"readyInMinutes\")\n recipe.ready_in_minutes = int(ready_in_minutes)\n\n servings = json_response.get(\"servings\")\n recipe.servings = int(servings)\n\n vegetarian = json_response.get(\"vegetarian\")\n recipe.vegetarian = bool(vegetarian)\n\n source_url = json_response.get(\"sourceUrl\")\n recipe.source_url = str(source_url)\n\n aggregate_likes = json_response.get(\"aggregateLikes\")\n recipe.aggregate_likes = int(aggregate_likes)\n\n health_score = json_response.get(\"healthScore\")\n recipe.health_score = int(health_score)\n\n ingredients = []\n\n #parse all the ingredients and add them to the list of ingredients as a ingredient object\n if json_response.get(\"extendedIngredients\") is not None:\n for ingr in json_response.get(\"extendedIngredients\"):\n ingredient_name = ingr.get(\"name\")\n ingredient_id = ingr.get(\"id\")\n amount = ingr.get(\"amount\")\n unit = ingr.get(\"unit\")\n\n ingredient = Ingredient(ingredient_name, ingredient_id, amount, unit)\n ingredients.append(ingredient)\n\n recipe.ingredients = ingredients\n\n instructions = []\n\n #parse all the instructions and add them as instruction objects to the instructions list\n if json_response.get(\"analyzedInstructions\") is not None:\n\n if len(json_response.get(\"analyzedInstructions\")) != 0:\n\n if json_response.get(\"analyzedInstructions\")[0].get(\"steps\") is not None:\n\n for instr in json_response.get(\"analyzedInstructions\")[0].get(\"steps\"):\n instruction_number = instr.get(\"number\")\n step = instr.get(\"step\")\n\n ingred = []\n\n for json_ingred in instr.get(\"ingredients\"):\n for saved_ingred in ingredients:\n if saved_ingred.ingredient_id == json_ingred.get(\"id\"):\n ingred.append(saved_ingred)\n\n equipments = []\n\n for equip in instr.get(\"equipment\"):\n equipment_name = equip.get(\"name\")\n equipment_id = equip.get(\"id\")\n\n equipment = Equipment(equipment_name, equipment_id)\n\n equipments.append(equipment)\n\n instruction = Instruction(instruction_number, step, ingred, equipments)\n\n instructions.append(instruction)\n\n recipe.instructions = instructions\n\n return recipe",
"def create_ingredient(client, ing):\n return client.post('/ingredients/create', data=dict(\n id=ing['id'], name=ing['name'], portion_size = ing['portion_size'],\n portion_size_unit = ing['portion_size_unit'], protein = ing['protein'],fat = ing['fat'], carbs = ing['carbs'],\n calories = ing['calories'], price = ing['price'], price_size = ing['price_size'],\n price_size_unit = ing['price_size_unit'], tag = ing['tag'], notes = ing['notes']\n ), follow_redirects=True)",
"def recalculate_ingredients(self, ingredients, factor):\n result = dict()\n for ing, val in ingredients.items():\n result[ing] = val / factor\n\n return result",
"def get_beer_ingredients(beer):\n beer_ingredients = []\n for ing in beer['ingredients']:\n for item in beer['ingredients'][ing]:\n if 'name' in item:\n if item['name'] not in beer_ingredients:\n beer_ingredients.append(item['name'])\n\n return beer_ingredients",
"def _strip_excess_data(recipe):\n for key in list(recipe.keys()):\n if key == \"ingredients\" or key == \"steps\":\n continue\n elif \"ingredient\" in key or \"step\" in key:\n del recipe[key]\n\n return recipe",
"def from_dict(cls, dikt) -> 'IngredientObjectItems':\n return util.deserialize_model(dikt, cls)",
"def ingredient_db():\n # type: () -> List[Text]\n return [\"abricot\",\n \"banane\",\n \"cassis\",\n \"cerise\",\n \"citron\",\n \"clémentine\",\n \"coing\",\n \"fraise\",\n \"framboise\",\n \"groseille\",\n \"mirabelle\",\n \"mûre\",\n \"myrtille\",\n \"nectarine\",\n \"orange\",\n \"pamplemousse\",\n \"pomelo\",\n \"pêche\",\n \"poire\",\n \"pomme\",\n \"prune\",\n \"pruneau\",\n \"raisin\",\n \"rhubarbe\",\n \"ananas\",\n \"figue\",\n \"fruit de la passion\",\n \"goyave\",\n \"grenade\",\n \"kaki\",\n \"kiwi\",\n \"kumquat\",\n \"litchi\",\n \"mangue\",\n \"melon\",\n \"papaye\",\n \"pastèque\",\n \"vanille\",\n \"amande\",\n \"datte\",\n \"noisette\",\n \"artichaut\",\n \"aubergine\",\n \"asperge\",\n \"avocat\",\n \"betterave\",\n \"blette\",\n \"brocoli\",\n \"banane plantain\",\n \"carotte\",\n \"cardon\",\n \"céleri rave\",\n \"céleri branche\",\n \"champignon\",\n \"champignon de paris\",\n \"chou blanc\",\n \"chou rouge\",\n \"chou de bruxelles\",\n \"chou-fleur\",\n \"citrouille\",\n \"concombre\",\n \"courge\",\n \"courgette\",\n \"crosne\",\n \"echalote\",\n \"epinard\",\n \"endive\",\n \"fenouil\",\n \"haricot vert\",\n \"haricot\",\n \"navet\",\n \"oignon\",\n \"oseille\",\n \"panais\",\n \"pâtisson\",\n \"petit pois\",\n \"poireau\",\n \"poivron\",\n \"potiron\",\n \"radis rouge\",\n \"rutabaga\",\n \"navet\",\n \"salade \",\n \"salsifis\",\n \"tomate\",\n \"topinambour\",\n \"maïs\"]",
"def from_model(ingredient):\n\n # Only include values that are actually set, otherwise\n # wtforms will not be happy with None values\n return {\n k: v\n for k, v in [\n ('amount', ingredient.amount),\n ('measure', ingredient.measure),\n ('item', ingredient.item)]\n if v\n }",
"def add_ingredient_to_recipe(cls, new_count, ingredients_dict, recipe_id):\n\n for i in range(1, (new_count+1)):\n item = ingredients_dict[i][0]\n measure = ingredients_dict[i][1]\n prepnotes = ingredients_dict[i][2]\n qty = ingredients_dict[i][3]\n\n new_ingredient = Ingredient(recipe_id=recipe_id, item=item, quantity=qty,\n measure=measure, prep_notes=prepnotes)\n\n db.session.add(new_ingredient)\n db.session.commit()\n print \"You successfully added ingredients!\"",
"def process(raw):\n entry = { }\n cooked = [ ]\n\n for line in raw:\n line = line.strip()\n if len(line) == 0 or line[0]==\"#\" :\n continue\n parts = line.split(';')\n if len(parts) == 3:\n entry[\"description\"] = parts[0].strip() #adding key and values to the dict\n entry[\"long\"] = parts[1].strip()\n entry[\"lat\"] = parts[2].strip()\n cooked.append(entry) #add this dict entry into the array\n entry = { }\n continue\n else:\n raise ValueError(\"Trouble wiht line: '{}'\\n\".format(line))\n \n return cooked #returning an array of dicts",
"def from_model(ingredient_group):\n\n return {\n 'title': ingredient_group.title,\n 'ingredients': [\n IngredientForm.from_model(i)\n for i in ingredient_group.ingredients],\n }",
"def slot_mappings(self):\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n\n return {\"ingredient\": self.from_entity(entity=\"ingredient\",\n not_intent=\"greet\")}"
] |
[
"0.70173126",
"0.69355285",
"0.65697306",
"0.62525654",
"0.6195944",
"0.6152212",
"0.6131279",
"0.598573",
"0.59830916",
"0.5981633",
"0.5865807",
"0.58411986",
"0.58324665",
"0.580999",
"0.56558895",
"0.5591535",
"0.5525932",
"0.5497969",
"0.54900944",
"0.5484163",
"0.54178256",
"0.5416643",
"0.541058",
"0.5382363",
"0.53603345",
"0.5329191",
"0.5302655",
"0.5289617",
"0.52413034",
"0.5237037"
] |
0.79610616
|
0
|
Parse steps The steps come in from the form as normal key value pairs. This needs to be changed so that the ingredients can be stored as the following data structure. [ "Dice chicken", "Cook chicken until brown" ]
|
def _parse_steps(recipe):
steps = []
filtered_dict = {k: v for k, v in recipe.items() if "step" in k}
for key, value in filtered_dict.items():
if value:
steps.append(value)
return steps
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _parse_ingredients(recipe):\n ingredients = []\n group_counter = 1\n counter = 0\n\n filtered_dict = {k: v for k, v in recipe.items() if \"ingredient\" in k}\n ingredient = {}\n\n for key, value in filtered_dict.items():\n if not value:\n continue\n\n elif key == f\"ingredient{group_counter}\":\n ingredient[\"name\"] = value\n\n elif key == f\"ingredientQuantity{group_counter}\":\n ingredient[\"quantity\"] = value\n\n elif key == f\"ingredientMeasurement{group_counter}\":\n ingredient[\"measurement\"] = value\n\n counter += 1\n if counter % 3 == 0:\n ingredients.append(ingredient)\n ingredient = {}\n group_counter += 1\n\n return ingredients",
"def recipe_parser(form_data, user):\n\n # The way the ingredients and steps data is structured in the form data is not the\n # structure required for the database so additional processing is required. The\n # `ingredients` and `steps` lists will be created and added to the recipe\n recipe[\"ingredients\"] = _parse_ingredients(recipe)\n recipe[\"steps\"] = _parse_steps(recipe)\n\n # Additional fields are created to store additional information about the recipe\n recipe[\"created_by\"] = user\n recipe[\"created_at\"] = datetime.now()\n recipe[\"views\"] = 0\n recipe[\"likes\"] = 0\n\n # As the structure of the ingredients have changed, the initial data contained\n # in the form needs to be removed\n recipe = _strip_excess_data(recipe)\n\n return recipe",
"def test_parse_ingredients(self):\n pass",
"def parse_parliament_steps(self, response):\n inquiry_item = response.meta['inquiry_item']\n\n phases = INQUIRY.PHASES.xt(response)\n\n for phase in phases:\n # Create phase if we don't have it yet\n phase_item, created = Phase.objects.get_or_create(\n title=phase['title'])\n if created:\n log.msg(u\"Created Phase {}\".format(\n green(u'[{}]'.format(phase_item.title))),level=log.DEBUG)\n\n # Create steps\n for step in phase['steps']:\n step_item, created = Step.objects.update_or_create(\n title=step['title']['text'],\n sortkey=step['sortkey'],\n date=step['date'],\n protocol_url=step['protocol_url'],\n law=inquiry_item,\n phase=phase_item,\n source_link=response.url\n )\n step_item.save()\n if created:\n log.msg(u\"Created Step {}\".format(\n green(u'[{}]'.format(step_item.title))),level=log.DEBUG)\n\n # Save statements for this step, if applicable\n if 'statements' in step['title']:\n for stmnt in step['title']['statements']:\n # Find the person\n pq = Person.objects.filter(\n source_link__endswith=stmnt['person_source_link'])\n if pq.exists() and pq.count() == 1:\n person_item = pq.first()\n st_data = {\n 'speech_type': stmnt['statement_type'],\n 'protocol_url': stmnt['protocol_link']\n }\n st_item, st_created = Statement.objects.update_or_create(\n index=stmnt['index'],\n person=person_item,\n step=step_item,\n defaults=st_data)\n if st_created:\n log.msg(u\"Created Statement by {} on {}\".format(\n green(\n u'[{}]'.format(person_item.full_name)),\n step_item.date),level=log.DEBUG)\n else:\n log.msg(u\"Updated Statement by {} on {}\".format(\n green(\n u'[{}]'.format(person_item.full_name)),\n step_item.date),level=log.DEBUG)\n else:\n # We can't save statements if we can't find the\n # Person\n self.logger.warning(\n red(u\"Skipping Statement by {}: Person with source_link {} does{} exist{}\").format(\n green(\n u'[{}]'.format(stmnt['person_name'])),\n blue(\n \"[{}]\".format(stmnt['person_source_link'])),\n red(\"{}\").format(\n \"\" if pq.exists() else \" not\"),\n \"\" if pq.count() > 1 else \", but {} persons matching found!\".format(\n pq.count())\n ))\n continue",
"def parse_ingredients(inp):\n # Standardize to uppercase\n # inp = inp.upper()\n parsed, _ = _parse(inp)\n return parsed",
"def test__parse_steps():\n verification_screen_step_0 = VerificationScreenStep(\n required = True,\n title = 'Yukari',\n step_type = VerificationScreenStepType.text_input,\n values = ['kisaki'],\n )\n \n verification_screen_step_1 = VerificationScreenStep(\n required = False,\n title = 'Yurica',\n step_type = VerificationScreenStepType.rules,\n values = None,\n )\n \n for input_data, expected_output in (\n ({}, None),\n ({'form_fields': None}, None),\n ({'form_fields': []}, None),\n (\n {'form_fields': [verification_screen_step_0.to_data(defaults = True)]},\n (verification_screen_step_0, ),\n ),\n (\n {\n 'form_fields': [\n verification_screen_step_0.to_data(defaults = True),\n verification_screen_step_1.to_data(defaults = True),\n ],\n },\n (verification_screen_step_0, verification_screen_step_1),\n ),\n ):\n output = parse_steps(input_data)\n vampytest.assert_eq(output, expected_output)",
"def parse_steps(self, response):\n response_link = []\n inquiry_item = response.meta['inquiry_item']\n\n # Get or created a default-phase for inquiries, because there are no phases in\n # simple inquiries.\n phase_item, created = Phase.objects.get_or_create(\n title='default_inqu')\n if created:\n log.msg(u\"Created Phase {}\".format(\n green(u'[{}]'.format(phase_item.title))),level=log.DEBUG)\n\n steps = INQUIRY.STEPS.xt(response)\n\n for step in steps:\n if \"Schriftliche Beantwortung\" in step[\"title\"]:\n response_link = INQUIRY.RESPONSE_LINK.xt(response)\n\n for step in steps:\n step_item, created = Step.objects.update_or_create(\n title=step['title'],\n sortkey=step['sortkey'],\n date=step['date'],\n protocol_url=step['protocol_url'],\n law=inquiry_item,\n phase=phase_item,\n source_link=response.url\n )\n step_item.save()\n if response_link:\n return response_link\n else:\n return",
"async def next_step(\n self, step_context: WaterfallStepContext\n ) -> DialogTurnResult:\n step_context.values[\"input\"] = step_context.result\n user_input = step_context.values[\"input\"]\n\n # TODO: remove this notification, it is for demo purposes only.\n await step_context.context.send_activity(\n MessageFactory.text(f\"[In this step, we will use Recognizers-Text to learn the user intention.]\")\n )\n # -------------------------------------------------------------\n results = parse_all(user_input, DEFAULT_CULTURE)\n # Flatten results\n results = [item for sublist in results for item in sublist]\n\n # ------------\n # parse results to find the data we need:\n has_time_stamp = False\n has_price = False\n has_quantity = False\n amount = None\n\n # temporary lists\n list_number = []\n list_currency = []\n list_datetime = []\n value_key = \"value\"\n\n for i in results:\n # in each pass, according to type_name, append to a list, or several.\n type_name = i.type_name\n if type_name == Constants.currency_type_name:\n has_price = True\n list_currency.append(i.resolution.get(value_key))\n if type_name == Constants.datetime_type_name or type_name == Constants.date_type_name:\n has_time_stamp = True\n list_datetime.append(i.resolution.get(\"values\", \"\")[0][value_key])\n if type_name == Constants.number_type_name:\n if i.resolution.get(value_key):\n has_quantity = True\n value = i.resolution.get(value_key)\n else:\n value = i.text\n has_quantity = False\n\n list_number.append(value)\n\n # this contains the whole collection of stocks of the user.\n # in the init method, it should populate the holdings using the data text file\n self.portfolio = Portfolio()\n\n # this represents a position taken with an investment instrument.\n # usually, there are many open at the same time.\n holding = Holding()\n\n # represents the intermediary broker\n self.broker = Broker()\n\n # for current operation (buy, sell)\n self.operation = Operation()\n\n self.operation.buy = True if ('buy' in user_input or 'Buy' in user_input) else False\n self.operation.sell = True if ('sell' in user_input or 'Sell' in user_input) else False\n\n if self.operation.buy:\n self.operation = BuyOperation()\n self.operation.buy = True\n self.operation.sell = False\n self.operation.type = 'buy'\n\n if self.operation.sell:\n self.operation = SellOperation()\n self.operation.buy = False\n self.operation.sell = True\n self.operation.type = 'sell'\n\n # TODO: we should have a dict or similar with [ticker, company_name]\n # refactor this for other companies\n holding.stock.ticker = 'MSFT' if (\n 'MSFT' in user_input.upper() or 'microsoft' in user_input.lower()) else 'x'\n\n if holding.stock.ticker == 'MSFT':\n holding.stock.company = \"Microsoft\"\n\n if has_time_stamp:\n self.operation.time_stamp = list_datetime[0]\n\n if len(Sets.intersection(list_currency, list_number)) == 1:\n self.operation.price = Sets.intersection(list_currency, list_number)[0]\n holding.quantity = Sets.diff(list_number, list_currency)[0]\n\n if has_quantity and has_price:\n print(\"Quantity: \" + str(holding.quantity))\n amount = int(holding.quantity) * float(self.operation.price)\n self.operation.amount = round(amount, Constants.max_decimals)\n\n print(\"Stock: \" + holding.to_string())\n print(\"Price: $ \" + str(self.operation.price))\n\n if has_time_stamp:\n print(\"TimeStamp: \" + str(self.operation.time_stamp))\n\n if has_quantity and amount:\n print(Constants.separator)\n print(\"OPERATION DETAILS\")\n print(Constants.separator)\n print(\"Operation type: \" + self.operation.type)\n print(\"Amount: $ \" + str(amount))\n self.operation.commission = round(amount * self.broker.commission, Constants.max_decimals)\n # tax, over the commission is 0.01 (10%)\n self.operation.tax = round(self.operation.commission * Constants.tax, Constants.max_decimals)\n print(\"Commission: $ \" + str(self.operation.commission))\n print(\"TAX: $ \" + str(self.operation.tax))\n print(Constants.separator)\n print(\"Total: $ \" + str(amount + self.operation.commission + self.operation.tax))\n print(Constants.separator)\n self.operation.quantity = holding.quantity\n self.operation.stock.ticker = holding.stock.ticker\n self.operation.stock.company = holding.stock.company\n self.operation.stock.market = holding.stock.market\n\n str_quantity = str(holding.quantity)\n str_price = \"$ \" + str(self.operation.price)\n str_time_stamp = \" on \" + str(self.operation.time_stamp) if has_time_stamp else \"\"\n\n # TODO: Check if the ticker is in use.\n find_result = any(elem.stock.ticker == holding.stock.ticker for elem in self.portfolio.stocks_owned)\n\n if find_result:\n updated_holding = next((i for i in self.portfolio.stocks_owned if i.stock.ticker == holding.stock.ticker), None)\n a = int(updated_holding.quantity)\n b = int(holding.quantity)\n # TODO: Check if is a buy or sell, the arithmetic logic\n if self.operation.type == 'buy':\n updated_holding.quantity = str(a + b)\n # cash should be decreased by the total cost of the operation\n elif self.operation.type == 'sell':\n # in fact, this should alter the compromised quantity, until the order is executed. Its ok for now.\n updated_holding.quantity = str(a - b)\n # also, the cash should be incremented when selling\n # self.portfolio.cash =\n else:\n self.portfolio.stocks_owned.append(holding)\n # -------------------------------------------------------------\n\n # TODO: Test write the portfolio with new values\n self.portfolio.write_json_data_to_file()\n\n operation_details = \"\"\n if has_quantity and amount:\n commission = round(amount * self.broker.commission, Constants.max_decimals)\n tax = round(commission * Constants.tax, Constants.max_decimals)\n\n operation_details += Constants.separator + \"\\n\"\n operation_details += \"OPERATION DETAILS\" + \"\\n\"\n operation_details += Constants.separator + \"\\n\"\n operation_details += \"Operation type: \" + self.operation.type + \"\\n\"\n operation_details += \"Amount: $ \" + str(amount) + \"\\n\"\n operation_details += \"Commission: $ \" + str(commission) + \"\\n\"\n operation_details += \"TAX: $ \" + str(tax) + \"\\n\"\n operation_details += Constants.separator + \"\\n\"\n operation_details += \"Total: $ \" + str(amount + commission + tax) + \"\\n\"\n operation_details += Constants.separator + \"\\n\"\n\n await step_context.context.send_activity(\n MessageFactory.text(operation_details)\n )\n\n # TODO: Here, we can show how much profit comes from the sale operation.\n query = \"Do you wish to \" + self.operation.type + \" \" + str_quantity + \" \" + holding.stock.ticker + \" stocks at \" + str_price + str_time_stamp + \"?\"\n return await step_context.prompt(\n ConfirmPrompt.__name__,\n PromptOptions(\n prompt=MessageFactory.text(query)\n ),\n )\n\n # if we don't ask for confirmation, we terminate it:\n # return await step_context.end_dialog()",
"def parse_step_info(self, step: str) -> Tuple[str]:\n step_info = re.findall(r'\\[[^\\[\\]]+\\]', step)[0][1:-1].split('/')\n return step_info[0], step_info[1]",
"def do_steps(self):\n steps = self.get_step_conf()\n all_step_config = dict()\n for k, v in steps.items():\n tmp_list = list()\n all_step_config[k] = tmp_list\n start = v[\"Start Value\"]\n end = v[\"End Value\"]\n # special handling of edge length\n if(k == \"Edge Length\"):\n start = self.convert_to_tuple(start)\n end = self.convert_to_tuple(end)\n tmp_list.append(str(start))\n while(start != end):\n start = self.add_edge_length(\n start, self.convert_to_tuple(v[\"Step\"]))\n tmp_list.append(str(start))\n print start\n else:\n tmp_list.append(float(start))\n while float(start) < float(end):\n start = float(start) + float(v[\"Step\"])\n tmp_list.append(start)\n return all_step_config",
"def parse_input(parts):\n\n \"\"\"\n Begin in state A.\n Perform a diagnostic checksum after 6 steps.\n \"\"\"\n turing = {}\n metadata_part = parts[0].split('\\n')\n start_state = metadata_part[0][-2]\n checksum_after = 12302209\n\n metadata = (start_state, checksum_after)\n\n for part in parts[1:]:\n lines = part.split('\\n')\n state = lines[0][-2]\n state_num = int(lines[1][-2])\n # print(\"PART N: \", state, state_num)\n # - Write the value X.\n write_val = int(lines[2][-2])\n move = '>' if lines[3][-6:-1] == 'right' else '<'\n next_state = lines[4][-2]\n turing[(state, state_num)] = (write_val, move, next_state)\n\n state_num = int(lines[5][-2])\n # print(\"PART N: \", state, state_num)\n write_val = int(lines[6][-2])\n move = '>' if lines[7][-6:-1] == 'right' else '<'\n next_state = lines[8][-2]\n turing[(state, state_num)] = (write_val, move, next_state)\n\n # print(turing)\n\n return turing, metadata",
"def getSteps():",
"def convert_input_text(text):\n steps = defaultdict(list)\n predecessors = set()\n for line in text:\n regex = search(r\"Step (.) must be finished before step (.) can begin.\", line)\n # steps[step] = [list of predecessors]\n steps[regex.group(2)].append(regex.group(1))\n predecessors.add(regex.group(1))\n for key in predecessors - set(steps):\n steps[key] = []\n return steps",
"def process_step(self, request, step, form):\n pass",
"def iterate_steps(steps):\n pop = None\n while steps:\n for step, depends in steps.items():\n if depends == []:\n pop = step\n if not pop:\n return\n pop_step(pop, steps)\n yield pop",
"def _format_recipe(recipe):\n # Some fields are not consistently returned from the API, make sure they exist first.\n if \"analyzedInstructions\" in recipe.keys() and len(\n recipe[\"analyzedInstructions\"]\n ):\n step_instructions = recipe[\"analyzedInstructions\"][0][\"steps\"]\n else:\n step_instructions = None\n\n return {\n \"spoonacular_id\": recipe[\"id\"],\n \"dish_name\": recipe[\"title\"],\n \"servings\": recipe.get(\"servings\", None),\n \"image\": recipe.get(\"image\", None),\n \"is_vegetarian\": recipe[\"vegetarian\"],\n \"is_vegan\": recipe[\"vegan\"],\n \"is_gluten_free\": recipe[\"glutenFree\"],\n \"is_dairy_free\": recipe[\"dairyFree\"],\n \"cook_time_min\": recipe.get(\"cookingMinutes\", None),\n \"prep_time_min\": recipe.get(\"preparationMinutes\", None),\n \"spoonacular_score\": recipe[\"spoonacularScore\"],\n \"ingredients\": [\n ingredient[\"originalName\"]\n for ingredient in recipe[\"extendedIngredients\"]\n ],\n \"instructions\": recipe.get(\"instructions\", None),\n \"step_instructions\": step_instructions,\n }",
"def _expand_inputs(step, steps=None):\n if steps is None:\n steps = set()\n\n if 'inputs' in step._kwargs.keys():\n for i in step._kwargs['inputs']:\n steps.update(_expand_inputs(i))\n\n steps.add(step)\n return steps",
"def parse_step(self, transition, prob=None):\n if transition == 0: # Left-Arc\n d=(self.stack[-1],self.stack[-2])\n self.dependencies.append(d)\n self.stack.pop(-2)\n elif transition == 1: # Right-Arc\n d=(self.stack[-2],self.stack[-1])\n self.dependencies.append(d)\n self.stack.pop(-1)\n elif transition == 2: # Shift\n self.stack.append(self.buffer.pop(0))\n self.transitions.append(transition)\n self.probs.append(prob)\n if len(self.buffer) == 0 and len(self.stack) == 1:\n self.finish = True\n self.convert_dep()",
"def step(\n self, actions: ActionDict\n ) -> tuple[\n ObsDict, dict[str, float], dict[str, bool], dict[str, bool], dict[str, dict]\n ]:\n raise NotImplementedError",
"def parseIngredientList(ingredients):\n\n try: \n # Flour kludge\n for i, item in enumerate(ingredients):\n ingredients[i] = re.sub('all purpose','all-purpose',item)\n\n # 1/3 amount kludge (weird NYT bug)\n firstParse = P.parseIngredients(ingredients)\n one_thirds = []\n for i,item in enumerate(firstParse):\n if item.amount==1/3.0:\n one_thirds.append(i)\n \n # Write the list of ingredients to a file\n ingredientFile = \"./NYT/ingredients.txt\"\n with open(ingredientFile,'w') as outfile:\n for item in ingredients:\n # Unicode kludge\n item = replaceFractions(item)\n line = str(item.encode(\"utf-8\", errors='ignore').decode(\"utf-8\") + \"\\n\")\n outfile.writelines(line)\n\n # Use the trained model to predict tags for the list of ingredients\n result = os.system(\"python ./NYT/bin/parse-ingredients.py ./NYT/ingredients.txt > ./NYT/results.txt\")\n if result != 0:\n print('System error. Error code: {0}'.format(result))\n \n # Convert result to json format\n result = os.system(\"python ./NYT/bin/convert-to-json.py ./NYT/results.txt > ./NYT/results.json\")\n if result != 0:\n print('System error. Error code: {0}'.format(result))\n \n # Return the json format\n json_obj = json.load(open('./NYT/results.json'))\n\n # Kludge to fix 1/3 in NYT\n for i, item in enumerate(json_obj):\n if i in one_thirds:\n json_obj[i]['qty'] = '1/3'\n except:\n print((sys.exc_info()[0], sys.exc_info()[1]))\n json_obj = []\n\n return json_obj",
"def _get_install_steps(self):\n\n content = self._get_yaml_content()\n\n return self._fix_install_steps(content['install'])",
"def wizard_process_received_form(form):\n lines = {key.split('_')[1]: value.split('_')[1] for key, value in form.items() if key.startswith(\"line\")}\n # print(lines)\n times = {key.split('_')[1]: value for key, value in form.items() if key.startswith(\"time\")}\n # print(times)\n return {int(value): times[key] for key, value in lines.items()}",
"def generate_steplist(my_factory):\n steps = []\n stepnames = {}\n\n for factory, cmdargs in my_factory.steps:\n cmdargs = cmdargs.copy()\n try:\n step = factory(**cmdargs)\n except:\n print >>sys.stderr, ('error while creating step, factory=%s, args=%s'\n % (factory, cmdargs))\n raise\n name = step.name\n if name in stepnames:\n count = stepnames[name]\n count += 1\n stepnames[name] = count\n name = step.name + ('_%d' % count)\n else:\n stepnames[name] = 0\n step.name = name\n\n #TODO: is this a bug in FileUpload?\n if not hasattr(step, 'description') or not step.description:\n step.description = [step.name]\n if not hasattr(step, 'descriptionDone') or not step.descriptionDone:\n step.descriptionDone = [step.name]\n\n step.locks = []\n steps.append(step)\n\n return steps",
"def parseRemainingVariables(json_response, recipe):\n ready_in_minutes = json_response.get(\"readyInMinutes\")\n recipe.ready_in_minutes = int(ready_in_minutes)\n\n servings = json_response.get(\"servings\")\n recipe.servings = int(servings)\n\n vegetarian = json_response.get(\"vegetarian\")\n recipe.vegetarian = bool(vegetarian)\n\n source_url = json_response.get(\"sourceUrl\")\n recipe.source_url = str(source_url)\n\n aggregate_likes = json_response.get(\"aggregateLikes\")\n recipe.aggregate_likes = int(aggregate_likes)\n\n health_score = json_response.get(\"healthScore\")\n recipe.health_score = int(health_score)\n\n ingredients = []\n\n #parse all the ingredients and add them to the list of ingredients as a ingredient object\n if json_response.get(\"extendedIngredients\") is not None:\n for ingr in json_response.get(\"extendedIngredients\"):\n ingredient_name = ingr.get(\"name\")\n ingredient_id = ingr.get(\"id\")\n amount = ingr.get(\"amount\")\n unit = ingr.get(\"unit\")\n\n ingredient = Ingredient(ingredient_name, ingredient_id, amount, unit)\n ingredients.append(ingredient)\n\n recipe.ingredients = ingredients\n\n instructions = []\n\n #parse all the instructions and add them as instruction objects to the instructions list\n if json_response.get(\"analyzedInstructions\") is not None:\n\n if len(json_response.get(\"analyzedInstructions\")) != 0:\n\n if json_response.get(\"analyzedInstructions\")[0].get(\"steps\") is not None:\n\n for instr in json_response.get(\"analyzedInstructions\")[0].get(\"steps\"):\n instruction_number = instr.get(\"number\")\n step = instr.get(\"step\")\n\n ingred = []\n\n for json_ingred in instr.get(\"ingredients\"):\n for saved_ingred in ingredients:\n if saved_ingred.ingredient_id == json_ingred.get(\"id\"):\n ingred.append(saved_ingred)\n\n equipments = []\n\n for equip in instr.get(\"equipment\"):\n equipment_name = equip.get(\"name\")\n equipment_id = equip.get(\"id\")\n\n equipment = Equipment(equipment_name, equipment_id)\n\n equipments.append(equipment)\n\n instruction = Instruction(instruction_number, step, ingred, equipments)\n\n instructions.append(instruction)\n\n recipe.instructions = instructions\n\n return recipe",
"def parse_arguments():\n global parser\n parser = argparse.ArgumentParser(\n description='Certainly this isn\\'t how Food Network does it',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=textwrap.dedent('''\n Recipe List must appear as follows. **\n =======\n recipe_name\n serveing_size\n ingredient 0\n ingredient 1\n ingredient 2\n ...\n ...\n ...\n ingredient n\n '''))\n parser.add_argument('input_file',\n help=\"An input text file to read in recipes from. \"\n \"Must adhere certain structure.**\")\n parser.add_argument('out_file', help=\"File to write json recipe data to.\")\n parser.add_argument('-s', '--serving-size', type=str,\n help='The number of servings you\\'d like to make.',\n dest='serving_size', default=4)\n parser.add_argument('-f', '--filter-items', type=split_cmdline_filter_items,\n dest='filter_items',\n help='A comma delimited string of ingredients to filter recipes by. '\n 'Multi-word ingredients must be quoted.')\n global args\n args = parser.parse_args()\n\n global serving_size_override\n serving_size_override = args.serving_size\n global filter_ingredients\n filter_ingredients = args.filter_items",
"def _collect_kwargs(step):\n dicts = {}\n for s in _expand_inputs(step):\n name = s.name if s.name is not None else s.__class__.__name__\n if name in dicts.keys():\n raise ValueError(\"Duplicate step names: %s\" % name)\n\n d = dict(s._kwargs)\n d.pop('inputs', None)\n dicts[name] = d\n\n return dicts",
"def add_step_args(cls, parser):",
"def get_recipe_preperation(soup_recipe):\n prep_steps = soup_recipe.find_all(\"li\", {\"itemprop\": \"recipeInstructions\"})\n prep = []\n for step in prep_steps:\n prep.append(step.get_text().strip())\n return prep",
"def process_step(self, form):\n #print(form.data)\n\n #print(form.data)\n #print(self)\n \n institution = {}\n inst_list = []\n if self.steps.current == '1':\n \n institution['institution'] = form.data['1-0-institution']\n institution['date_from'] = form.data['1-0-date_from']\n institution['date_to'] = form.data['1-0-date_to']\n inst_list.append(institution)\n inst_keys = dict(form.data.lists())\n \n #Create dictionary dynamically for the other institutions incase more than two institutions are entered\n if inst_keys.get('1-NaN-institution') and type(inst_keys.get('1-NaN-institution')) is list:\n inst_list2 = []\n #Add institutions \n for i,insti in enumerate(inst_keys.get('1-NaN-institution')):\n inst_i = {}\n #print(i)\n date_from = inst_keys['1-NaN-date_from'][i]\n date_to = inst_keys['1-NaN-date_to'][i]\n course_duration = inst_keys['1-NaN-course_duration'][i]\n inst_i['institution'] = insti\n inst_i['date_from'] = date_from\n inst_i['date_to'] = date_to\n \n inst_list2.append(inst_i)\n #print(inst_list2)\n inst_list.extend(inst_list2)\n #Create dictionary dynamically for the other institutions incase more than two institutions are entered\n if inst_keys.get('1-NaN-institution') and type(inst_keys.get('1-NaN-institution')) is not list:\n inst_0 = {}\n inst_0['institution'] = form.data['1-NaN-institution']\n inst_0['date_from'] = form.data['1-NaN-date_from']\n inst_0['date_to'] = form.data['1-NaN-date_to']\n inst_0['course_duration'] = form.data['1-NaN-course_duration']\n #inst_0['achievements'] = ''\n inst_list.append(inst_0)\n \n #Add the entered information to a session object\n self.request.session['institution'] = inst_list",
"def get_recipe_preperation(soup_recipe):\n prep_steps = soup_recipe.find(\"div\",\n {\"itemprop\": \"recipeInstructions\"}).find_all(\"li\")\n prep = []\n for step in prep_steps:\n prep.append(step.get_text().strip())\n return prep"
] |
[
"0.61660594",
"0.61499614",
"0.5814299",
"0.5728871",
"0.5694139",
"0.56098646",
"0.556041",
"0.5349966",
"0.5326637",
"0.5303917",
"0.52398884",
"0.52076477",
"0.5195352",
"0.51346636",
"0.5120102",
"0.5115197",
"0.50925595",
"0.501864",
"0.50049645",
"0.4997988",
"0.49881917",
"0.49825257",
"0.4968874",
"0.4965109",
"0.49540827",
"0.49493563",
"0.49211758",
"0.49088284",
"0.49058685",
"0.48978665"
] |
0.71416456
|
0
|
Strip excess data Remove any references to the old data structure from the initial form data. This will be old `ingredients` and `steps` keys
|
def _strip_excess_data(recipe):
for key in list(recipe.keys()):
if key == "ingredients" or key == "steps":
continue
elif "ingredient" in key or "step" in key:
del recipe[key]
return recipe
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def cleanStep(idict):\n for step in ['input', 'output']:\n data = idict.get(step, {})\n for key, values in data.items():\n for elem in values:\n for skip in ['pfn', 'InputPFN', 'OutputPFN', 'inputpfns']:\n if skip in elem:\n del elem[skip]\n data[key] = values\n return idict",
"def clean_ingredients_file(dirty):\n clean_file = 'clean_ingredients.json'\n all_ingredients = []\n with open(dirty) as infile:\n line = infile.readline()\n while line:\n split_line = line.split(',')\n for ing in split_line:\n ing = ing.strip(' \\n.')\n all_ingredients.append(ing)\n line = infile.readline()\n with open(clean_file, 'w') as outfile:\n json.dump(all_ingredients, outfile)\n return clean_file",
"def cleanup(self, data):\n tmp = copy.copy(data)\n for field in ('log_entries', 'instances',\n 'picked_instances', 'saved_instances',\n 'terminated_instances', 'skipped_instances'):\n if field in tmp:\n del tmp[field]\n return tmp",
"def task_2_remove_dict_fields(data: DT, redundant_keys: List[str]) -> DT:\n dict2 = copy.deepcopy(data)\n for item in dict2:\n for key in redundant_keys:\n item.pop(key)\n return dict2",
"def cleanData(self, recipes, url):\n ret = []\n for recipe in recipes:\n props = recipe['properties']\n for k, vals in props.items():\n new = []\n for v in vals:\n if type(v) is dict:\n if v.has_key('properties'):\n vv = ''\n for prop in v['properties'].values():\n vv += prop[0]\n v = vv\n else:\n continue\n lines = v.splitlines()\n vv = ' '.join([line.strip() for line in lines]).strip()\n new.append(vv)\n props[k] = new\n props['importedFromURL'] = url\n ret.append(recipe)\n return ret",
"def cleaning (data):",
"def pop_non_relevant_vuln_fields(data: Dict):\n keys_to_keep = [\n \"title\",\n \"description\",\n \"content_type\",\n \"published_at\",\n \"references\",\n \"severity\",\n \"solutions\",\n \"alternate_ids\",\n ]\n for key in list(data):\n if key not in keys_to_keep:\n data.pop(key)",
"def remove_prep_from_obj(objt):\n objt['initial_value'] = \" \".join(objt['initial_value'].split(\" \")[1:])\n objt['replacement_value'] = \" \".join(objt['replacement_value'].split(\" \")[1:])\n return objt",
"def strip_unwanted_fields(self, data, many, **kwargs):\n unwanted_fields = [\"resource_type\"]\n for field in unwanted_fields:\n if field in data:\n data.pop(field)\n return data",
"def _cleanse_dict(original):\n return {k: v for k, v in original.items() if \"_pass\" not in k}",
"def _remove_empty(self, data, many):\n if not many:\n for key in list(data):\n if key == 'versions':\n data.pop(key)\n\n return {\n key: value for key, value in data.items()\n if value is not None\n }\n for item in data:\n for key in list(item):\n if (key == 'versions') or (item[key] is None):\n item.pop(key)\n\n return data",
"def _remove_none(self, data):\r\n for key, value in data.items():\r\n if value is None or isinstance(value, forms.Field):\r\n del data[key]\r\n if isinstance(value, dict):\r\n self._remove_none(data[key])",
"def templatize(self):\n self.sanitize_ids()\n del self.steps[1:]\n self.current_step = None",
"def get_cleaned_data(form, keys_to_remove=[], values_to_remove=[]):\n if not values_to_remove:\n values_to_remove = get_ignorable_form_values()\n\n cleaned_data = copy.copy(form.cleaned_data)\n cleaned_data = clean_dict(\n cleaned_data,\n keys=list(set(cleaned_data.keys()) - set(keys_to_remove)),\n values=values_to_remove\n )\n\n ordered_cleaned_data = OrderedDict()\n for key in form.fields.keys():\n if key in cleaned_data:\n ordered_cleaned_data[key] = cleaned_data[key]\n\n return ordered_cleaned_data",
"def _revert(self):\n self.kwargs[\"collect\"].reset_scan_form_data()",
"def trim_data(flattened_beer_json, trim_level):\n\n attributes_of_interest = {\n 0: ['style_category_name', 'style_ibuMin', 'style_ibuMax', 'style_abvMin', 'style_abvMax'],\n 1: ['abv', 'ibu'],\n 2: ['style_fgMax'],\n 3: ['style_fgMax', 'style_ogMin'],\n 4: ['style_fgMax', 'style_fgMin', 'style_ogMin'], # don't use style_ogMax, no beer has the attribute!\n 5: ['servingTemperature', 'glass_name', 'available_name', 'abv', 'ibu'],\n 6: ['ingredients'],\n 7: ['ingredients', 'abv', 'ibu']\n }\n\n trimmed = {'data': [], 'labels': []}\n\n # Get all ingredients\n possible_ingredients = get_all_ingredients()\n # Get top 50 highest occurring ingredients\n possible_ingredients = dict(sorted(possible_ingredients.items(), key=operator.itemgetter(1), reverse=True)[:50])\n\n for index, beer in enumerate(flattened_beer_json['data']):\n # if all attributes of interest are present\n if all(attribute in beer for attribute in attributes_of_interest[trim_level]):\n for attribute in list(beer):\n if attribute == 'ingredients':\n for ing in possible_ingredients.keys():\n if ing in get_beer_ingredients(beer):\n beer[ing] = True\n else:\n beer[ing] = False\n del(beer[attribute])\n elif attribute not in attributes_of_interest[trim_level]:\n del (beer[attribute])\n trimmed['data'].append(beer)\n trimmed['labels'].append(flattened_beer_json['labels'][index])\n\n return trimmed",
"def _get_normalized_form_data(self, form, key):\n data = {} if form.data else form.initial\n prefix = 'gc{}-'.format(key)\n\n for field_name in form.data:\n normalized_field_name = field_name[len(prefix):]\n\n if field_name in form.data and field_name.startswith(prefix) and form.data[field_name]:\n data[normalized_field_name] = form.data[field_name]\n\n for field_name in data:\n if field_name == 'quantity':\n data[field_name] = str(data[field_name])\n\n return data",
"def _cleanse_dict(original):\n return dict((k, v) for k, v in original.items() if \"_pass\" not in k)",
"def task_2_remove_dict_fields(data: DT, redundant_keys: List[str]) -> DT:\n return [{key: value for key, value in dic.items() if key not in redundant_keys} for dic in data]",
"def clean_up_data(self):\n pass",
"def cleanse_body(self):\n if self.request.method.upper() == 'POST':\n if isinstance(self.request.data, dict):\n for key in self.request.data.keys():\n if key not in self.post_body_fields:\n self.request.data.pop(key)\n\n if isinstance(self.request.data, list):\n for dic in self.request.data:\n for key in dic.keys():\n if key not in self.post_body_fields:\n dic.pop(key)\n\n elif self.request.method.upper() == 'PUT':\n for key in self.request.data.keys():\n if key not in self.put_body_fields:\n self.request.data.pop(key)",
"def strip(self):\n self.document_type = self.document_type.strip()\n self.document_reg_id = self.document_reg_id.strip()\n self.owner_cross_reference = self.owner_cross_reference.strip()\n self.routing_slip_number = self.routing_slip_number.strip()\n self.bcol_account = self.bcol_account.strip()\n self.dat_number = self.dat_number.strip()\n self.examiner_id = self.examiner_id.strip()\n self.update_id = self.update_id.strip()\n self.phone_number = self.phone_number.strip()\n self.attention_reference = self.attention_reference.strip()\n self.name = self.name.strip()\n self.legacy_address = self.legacy_address.strip()\n self.consideration_value = self.consideration_value.strip()\n self.affirm_by_name = self.affirm_by_name.strip()\n self.liens_with_consent = self.liens_with_consent.strip()\n self.client_reference_id = self.client_reference_id.strip()\n self.own_land = self.own_land.strip()",
"def _clean(self):\n map(self.__delitem__, self.keys())\n self._original = []\n self._columns = {}\n self._modified, self._deleted = {}, {}",
"def complete_form_data():\n\n missing_fields = {\n 'link' : 'http://bvsalud.org',\n 'originator' : 'BIREME',\n 'source_type': 1,\n 'source_language': 1,\n 'originator_location' : 1,\n\n 'main-descriptor-content_type-object_id-TOTAL_FORMS' : '1',\n\n 'main-descriptor-content_type-object_id-0-id' : '',\n 'main-descriptor-content_type-object_id-0-text' : 'malaria',\n 'main-descriptor-content_type-object_id-0-code' : '^d8462',\n 'main-descriptor-content_type-object_id-0-status' : '0',\n\n 'main-resourcethematic-content_type-object_id-TOTAL_FORMS' : '1',\n 'main-resourcethematic-content_type-object_id-0-thematic_area' : '1',\n 'main-resourcethematic-content_type-object_id-0-status' : '0',\n }\n\n complete_form_data = minimal_form_data()\n complete_form_data.update(missing_fields)\n\n return complete_form_data",
"def recipe_parser(form_data, user):\n\n # The way the ingredients and steps data is structured in the form data is not the\n # structure required for the database so additional processing is required. The\n # `ingredients` and `steps` lists will be created and added to the recipe\n recipe[\"ingredients\"] = _parse_ingredients(recipe)\n recipe[\"steps\"] = _parse_steps(recipe)\n\n # Additional fields are created to store additional information about the recipe\n recipe[\"created_by\"] = user\n recipe[\"created_at\"] = datetime.now()\n recipe[\"views\"] = 0\n recipe[\"likes\"] = 0\n\n # As the structure of the ingredients have changed, the initial data contained\n # in the form needs to be removed\n recipe = _strip_excess_data(recipe)\n\n return recipe",
"def _prepare_multipart_form_data(data):\n output = dict()\n for key in data:\n output[key] = (None, data[key])\n return output",
"def clean_data(data):\n if type(data) == types.DictType:\n return data\n cleaned_data = {}\n keys = data.keys()\n for key in keys:\n this_item = data.getlist(key)\n if len(this_item) == 1:\n cleaned_data[key] = this_item[0]\n else:\n cleaned_data[key] = this_item\n return cleaned_data",
"def clean(self):\n cleaned_data = super().clean()\n cleaned_data = {key: field for key, field in cleaned_data.items()\n if field is not None}\n return cleaned_data",
"def _remove_data(things, lst_remove=None):\n\n for data in things:\n data.pop(\"_sa_instance_state\", None)\n data.pop(\"user_id\", None)\n\n if lst_remove is not None:\n for str_remove in lst_remove:\n if str_remove in data:\n data.pop(str_remove, None)\n\n return things",
"def _handle_dump_unknown(self, data, original):\n for key, val in original.items():\n if key not in self.fields:\n data[key] = val\n return data"
] |
[
"0.6569909",
"0.61799103",
"0.6013431",
"0.59730214",
"0.59530836",
"0.5908101",
"0.5787217",
"0.5767013",
"0.5740882",
"0.57120657",
"0.5707393",
"0.56738675",
"0.5667914",
"0.5654368",
"0.55943626",
"0.55742645",
"0.5562125",
"0.5559802",
"0.55534947",
"0.555338",
"0.5553001",
"0.5546724",
"0.55410683",
"0.5529639",
"0.5514621",
"0.5508203",
"0.55066574",
"0.5489808",
"0.54850173",
"0.54685766"
] |
0.8121244
|
0
|
Recipe parser Bundles up the information retrieved from the request, parses it and strips away the excess information.
|
def recipe_parser(form_data, user):
# The way the ingredients and steps data is structured in the form data is not the
# structure required for the database so additional processing is required. The
# `ingredients` and `steps` lists will be created and added to the recipe
recipe["ingredients"] = _parse_ingredients(recipe)
recipe["steps"] = _parse_steps(recipe)
# Additional fields are created to store additional information about the recipe
recipe["created_by"] = user
recipe["created_at"] = datetime.now()
recipe["views"] = 0
recipe["likes"] = 0
# As the structure of the ingredients have changed, the initial data contained
# in the form needs to be removed
recipe = _strip_excess_data(recipe)
return recipe
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def parse_request(self, request):\n request.process_inputs()",
"def parseRemainingVariables(json_response, recipe):\n ready_in_minutes = json_response.get(\"readyInMinutes\")\n recipe.ready_in_minutes = int(ready_in_minutes)\n\n servings = json_response.get(\"servings\")\n recipe.servings = int(servings)\n\n vegetarian = json_response.get(\"vegetarian\")\n recipe.vegetarian = bool(vegetarian)\n\n source_url = json_response.get(\"sourceUrl\")\n recipe.source_url = str(source_url)\n\n aggregate_likes = json_response.get(\"aggregateLikes\")\n recipe.aggregate_likes = int(aggregate_likes)\n\n health_score = json_response.get(\"healthScore\")\n recipe.health_score = int(health_score)\n\n ingredients = []\n\n #parse all the ingredients and add them to the list of ingredients as a ingredient object\n if json_response.get(\"extendedIngredients\") is not None:\n for ingr in json_response.get(\"extendedIngredients\"):\n ingredient_name = ingr.get(\"name\")\n ingredient_id = ingr.get(\"id\")\n amount = ingr.get(\"amount\")\n unit = ingr.get(\"unit\")\n\n ingredient = Ingredient(ingredient_name, ingredient_id, amount, unit)\n ingredients.append(ingredient)\n\n recipe.ingredients = ingredients\n\n instructions = []\n\n #parse all the instructions and add them as instruction objects to the instructions list\n if json_response.get(\"analyzedInstructions\") is not None:\n\n if len(json_response.get(\"analyzedInstructions\")) != 0:\n\n if json_response.get(\"analyzedInstructions\")[0].get(\"steps\") is not None:\n\n for instr in json_response.get(\"analyzedInstructions\")[0].get(\"steps\"):\n instruction_number = instr.get(\"number\")\n step = instr.get(\"step\")\n\n ingred = []\n\n for json_ingred in instr.get(\"ingredients\"):\n for saved_ingred in ingredients:\n if saved_ingred.ingredient_id == json_ingred.get(\"id\"):\n ingred.append(saved_ingred)\n\n equipments = []\n\n for equip in instr.get(\"equipment\"):\n equipment_name = equip.get(\"name\")\n equipment_id = equip.get(\"id\")\n\n equipment = Equipment(equipment_name, equipment_id)\n\n equipments.append(equipment)\n\n instruction = Instruction(instruction_number, step, ingred, equipments)\n\n instructions.append(instruction)\n\n recipe.instructions = instructions\n\n return recipe",
"def parse (self, request):\n\n data = {}\n body_start = request.find('\\r\\n\\r\\n')\n if body_start == -1:\n data['body'] = None\n else:\n data['body'] = request[body_start+4:]\n parts = request.split(' ', 2)\n data['method'] = parts[0]\n data['resource'] = parts[1]\n return (data)",
"def _parse_request(self):\n if len(self.request.body) > 0:\n try:\n return tornado.escape.json_decode(self.request.body)\n except Exception:\n #Not Json, Using Form data\n return self.request.arguments\n else:\n return self.request.arguments",
"def _parse_ingredients(recipe):\n ingredients = []\n group_counter = 1\n counter = 0\n\n filtered_dict = {k: v for k, v in recipe.items() if \"ingredient\" in k}\n ingredient = {}\n\n for key, value in filtered_dict.items():\n if not value:\n continue\n\n elif key == f\"ingredient{group_counter}\":\n ingredient[\"name\"] = value\n\n elif key == f\"ingredientQuantity{group_counter}\":\n ingredient[\"quantity\"] = value\n\n elif key == f\"ingredientMeasurement{group_counter}\":\n ingredient[\"measurement\"] = value\n\n counter += 1\n if counter % 3 == 0:\n ingredients.append(ingredient)\n ingredient = {}\n group_counter += 1\n\n return ingredients",
"def _strip_excess_data(recipe):\n for key in list(recipe.keys()):\n if key == \"ingredients\" or key == \"steps\":\n continue\n elif \"ingredient\" in key or \"step\" in key:\n del recipe[key]\n\n return recipe",
"def parse_detail(self, response):\r\n # get item\r\n item = response.meta['item']\r\n\r\n # process ingredients\r\n ing_list = []\r\n ing_li = response.xpath('//*[@id=\"__layout\"]//ul[@class=\"recipe-ingredients__list\"]/li')\r\n for li in ing_li:\r\n ing = li.xpath('.//a/text()').extract_first()\r\n if ing is not None:\r\n ing_list.append(ing.strip())\r\n item['ingredients'] = ', '.join(ing_list)\r\n\r\n # process steps\r\n step_list = []\r\n step_li = response.xpath('//*[@id=\"__layout\"]//li[@class=\"recipe-directions__step\"]')\r\n for li in step_li:\r\n step = li.xpath('.//text()').extract_first()\r\n if step is not None:\r\n step_list.append(step.strip())\r\n item['steps'] = '\\n'.join(step_list)\r\n\r\n yield item",
"def parse(self, response):\r\n recipes = json.loads(response.text)['response']['results']\r\n # test json data\r\n # fp = open(\"./food.json\", \"w\", encoding=\"utf-8\")\r\n # json.dump(recipes, fp=fp, ensure_ascii=False)\r\n for recipe in recipes:\r\n if recipe['record_type'] == 'Recipe':\r\n item = RecipespidersItem()\r\n\r\n self.recipe_count += 1\r\n item['id'] = self.recipe_count\r\n item['name'] = recipe['main_title']\r\n item['description'] = recipe['main_description']\r\n\r\n item['rating_num'] = int(recipe['main_num_ratings'])\r\n item['rating_star'] = int(recipe['main_rating_mapping'])\r\n item['rating_score'] = float(recipe['main_rating'])\r\n\r\n item['total_time'] = int(recipe['recipe_totaltime'])\r\n\r\n if recipe.get('recipe_photo_url') is None:\r\n continue\r\n else:\r\n item['photo_url'] = recipe['recipe_photo_url']\r\n\r\n item['record_url'] = recipe['record_url']\r\n\r\n yield scrapy.Request(url=recipe['record_url'], callback=self.parse_detail, meta={'item': item})\r\n\r\n # process remaining pages\r\n if self.page_num <= 21000:\r\n print(self.page_num)\r\n new_url = format(self.base_url % self.page_num)\r\n self.page_num += 1\r\n\r\n yield scrapy.Request(url=new_url, callback=self.parse)",
"def cleanData(self, recipes, url):\n ret = []\n for recipe in recipes:\n props = recipe['properties']\n for k, vals in props.items():\n new = []\n for v in vals:\n if type(v) is dict:\n if v.has_key('properties'):\n vv = ''\n for prop in v['properties'].values():\n vv += prop[0]\n v = vv\n else:\n continue\n lines = v.splitlines()\n vv = ' '.join([line.strip() for line in lines]).strip()\n new.append(vv)\n props[k] = new\n props['importedFromURL'] = url\n ret.append(recipe)\n return ret",
"def _format_recipe(recipe):\n # Some fields are not consistently returned from the API, make sure they exist first.\n if \"analyzedInstructions\" in recipe.keys() and len(\n recipe[\"analyzedInstructions\"]\n ):\n step_instructions = recipe[\"analyzedInstructions\"][0][\"steps\"]\n else:\n step_instructions = None\n\n return {\n \"spoonacular_id\": recipe[\"id\"],\n \"dish_name\": recipe[\"title\"],\n \"servings\": recipe.get(\"servings\", None),\n \"image\": recipe.get(\"image\", None),\n \"is_vegetarian\": recipe[\"vegetarian\"],\n \"is_vegan\": recipe[\"vegan\"],\n \"is_gluten_free\": recipe[\"glutenFree\"],\n \"is_dairy_free\": recipe[\"dairyFree\"],\n \"cook_time_min\": recipe.get(\"cookingMinutes\", None),\n \"prep_time_min\": recipe.get(\"preparationMinutes\", None),\n \"spoonacular_score\": recipe[\"spoonacularScore\"],\n \"ingredients\": [\n ingredient[\"originalName\"]\n for ingredient in recipe[\"extendedIngredients\"]\n ],\n \"instructions\": recipe.get(\"instructions\", None),\n \"step_instructions\": step_instructions,\n }",
"def _parser(self, request, *args, **kwargs):\n\n self.request = request\n\n # parse header\n self.header = {k[5:]: v for k, v in request.META.items() if k.startswith('HTTP_')}\n self.header['CONTENT_TYPE'] = request.META.get('CONTENT_TYPE')\n\n # parse boby\n if request.method not in ['GET', 'HEAD']:\n\n # TODO: serve other body format\n if 'multipart/form-data' in self.header['CONTENT_TYPE']:\n self.body = request.POST.dict()\n\n else:\n # default: application/json\n if self.request.body:\n try:\n self.body = json.loads(self.request.body)\n except Exception as e:\n raise Exception('parse json body error')\n \n # parse query\n self.query = request.GET.dict()\n\n # parse cookie\n self.cookie = {k: v for k, v in request.COOKIES.items()}",
"def parse_arguments():\n global parser\n parser = argparse.ArgumentParser(\n description='Certainly this isn\\'t how Food Network does it',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=textwrap.dedent('''\n Recipe List must appear as follows. **\n =======\n recipe_name\n serveing_size\n ingredient 0\n ingredient 1\n ingredient 2\n ...\n ...\n ...\n ingredient n\n '''))\n parser.add_argument('input_file',\n help=\"An input text file to read in recipes from. \"\n \"Must adhere certain structure.**\")\n parser.add_argument('out_file', help=\"File to write json recipe data to.\")\n parser.add_argument('-s', '--serving-size', type=str,\n help='The number of servings you\\'d like to make.',\n dest='serving_size', default=4)\n parser.add_argument('-f', '--filter-items', type=split_cmdline_filter_items,\n dest='filter_items',\n help='A comma delimited string of ingredients to filter recipes by. '\n 'Multi-word ingredients must be quoted.')\n global args\n args = parser.parse_args()\n\n global serving_size_override\n serving_size_override = args.serving_size\n global filter_ingredients\n filter_ingredients = args.filter_items",
"def _scrape_dict(self, req, instruction):\n\n while 'extends' in instruction:\n extends = instruction.pop('extends')\n if isinstance(extends, basestring):\n loaded_instruction, target_uri = self._load_uri(req.uri, extends)\n self._extend_instruction(instruction, loaded_instruction)\n elif isinstance(extends, dict):\n self._extend_instruction(instruction, extends)\n elif isinstance(extends, list):\n for ex in extends:\n if isinstance(ex, basestring):\n loaded_instruction, target_uri = self._load_uri(req.uri, ex)\n self._extend_instruction(instruction, loaded_instruction)\n elif isinstance(ex, dict):\n self._extend_instruction(instruction, ex)\n else:\n raise InvalidInstructionError(\"element of `extends` list must be a dict or str\")\n else:\n raise TypeError()\n\n # Imperfect solution, but updating tags in request directly\n # should be safe at this point.\n tags = instruction.get('tags', {})\n req.tags.update(tags)\n\n then = instruction.get('then', [])\n description = instruction.get('description', None)\n\n if 'find' in instruction:\n return self._scrape_find(req, instruction, description, then)\n elif 'load' in instruction:\n return self._scrape_load(req, instruction, description, then)\n else:\n raise InvalidInstructionError(\"Could not find `find` or `load` key.\")",
"def parse_request(req):\n # Parsing out the request body\n data = req.get_json()\n if (data is None or\n 'action' not in data or\n 'task_id' not in data or\n 'release_id' not in data):\n abort(400)\n \n action = data['action']\n task = data['task_id']\n release = data['release_id']\n return action, task, release",
"def test_parse_ingredients(self):\n pass",
"def consumeData(self, data):\n ret = []\n\n soup = BeautifulSoup(StringIO(data))\n ingredientses = soup.find_all(None, itemprop='ingredients')\n for ing in ingredientses:\n separateByClass(soup, ing, \"ingredient\")\n separateByTag(soup, ing, ['br', 'tr', 'li'])\n instructionses = soup.find_all(None, itemprop=\"recipeInstructions\")\n for ins in instructionses:\n separateByClass(soup, ins, \"instruction\")\n separateByTag(soup, ins, ['br', 'tr', 'li'])\n workingDocument = StringIO(soup.encode('utf-8'))\n\n items = microdata.get_items(workingDocument)\n for i in items:\n for typ in i.itemtype:\n if typ.string == MICROFORMAT_RECIPE:\n ret.append(i.json())\n break\n return map(json.loads, ret)",
"def parseRecipe (self,filename):\r\n return RecipeFileObject(os.path.join(main.settings.RECIPE_DIR,filename)).getParsed(self.actualTopology, self.anlage)",
"def _parse_in_request(self, request):\n error = None\n self.logger.debug(\"Http method: %s\" % request.method)\n if request.method == 'GET':\n self._params = request.args.to_dict()\n self.logger.debug(\"Request params: %s\" % self._params)\n \n elif request.method == 'POST':\n self._params = request.form.to_dict()\n self.logger.debug(\"Request params: %s\" % self._params)",
"def scrub_request(self, data):\n return self.__request_scrubber(data)",
"def _process_request(self, request, response):\n ...",
"def get_recipe_preperation(soup_recipe):\n prep_steps = soup_recipe.find_all(\"li\", {\"itemprop\": \"recipeInstructions\"})\n prep = []\n for step in prep_steps:\n prep.append(step.get_text().strip())\n return prep",
"def _recipe_details_generator(self, converted_content, overview_recipe):\n def get_cooking_shop_strings(lines):\n ret = []\n buf = None\n is_recipe_step_area = False\n for l in lines:\n if re.search(\"軒目\", l.strip()) or re.match(r\"^[①-⑳*].*『.*』\", l.strip()) or re.match(r\"^[①-⑳*].*「.*」\", l.strip()):\n if buf:\n ret.append(buf)\n buf = l.strip()\n continue\n\n if re.search(\"^(料理|万能調味料)\", l.strip()):\n is_recipe_step_area = False\n\n if re.search(\"^材料\", l.strip()):\n title, materials = re.search(\"(材料)(.*)\", l.strip()).groups()\n # buf += \"\\n\" + \"\\n\".join(l.strip().split(None, 1))\n buf += \"\\n\" + title + \"\\n\" + materials.strip()\n continue\n\n if re.search(\"^作り方\", l.strip()):\n is_recipe_step_area = True\n title, recipe_steps = re.search(\"(作り方)(.*)\", l.strip()).groups()\n # buf += \"\\n\" + \"\\n\".join(l.strip().split(None, 1))\n buf += \"\\n\" + title + \"\\n\" + recipe_steps.strip()\n continue\n \n if buf:\n if is_recipe_step_area:\n if re.match(r\"^[①-⑳*]\", l.strip()):\n buf += \"\\n\" + l.strip()\n else:\n buf += l.strip()\n else:\n buf += \"\\n\" + l.strip()\n if buf:\n ret.append(buf)\n\n return ret\n \n \n for ii, l in enumerate(converted_content.splitlines()):\n if ii == 1:\n overview_recipe.cooking_name_sub = l.strip()\n continue\n \n if -1 < l.find(\"初回放送\"):\n overview_recipe.program_date = dateutil.parser.parse(\"/\".join(re.search(r\"(\\d+)\\D+(\\d+)\\D+(\\d+)\\D+\", l).groups()))\n break\n\n cooking_shop_strings = get_cooking_shop_strings(converted_content.splitlines())\n\n logger.debug(\"-\" * 20)\n logger.debug(cooking_shop_strings)\n for shop_string in cooking_shop_strings:\n recipe_shop = None\n recipe = None\n is_material_area = False\n is_recipe_step_area = False\n for l in shop_string.splitlines():\n if len(l.strip()) == 0:\n continue\n \n if is_material_area == False and is_recipe_step_area == False:\n if re.search(\"軒目\", l.strip()) or re.match(r\"^[①-⑳*].*『.*』\", l.strip()) or re.match(r\"^[①-⑳*].*「.*」\", l.strip()):\n recipe_shop = copy.deepcopy(overview_recipe)\n recipe = None\n \n m = re.search(r\"「(.*)」\", l)\n if m:\n recipe_shop.cooking_name_sub += \"/\" + m.group(1)\n else:\n m2 = re.search(r\"『(.*)』\", l)\n if m2:\n recipe_shop.cooking_name_sub += \"/\" + m2.group(1)\n \n continue\n \n if re.search(\"^(料理|万能調味料)\", l.strip()):\n is_material_area = False\n is_recipe_step_area = False\n if recipe:\n yield recipe\n\n if recipe_shop:\n recipe = copy.deepcopy(recipe_shop)\n else:\n recipe = copy.deepcopy(overview_recipe)\n \n if -1 < l.find(\":\"):\n recipe.cooking_name = l.split(\":\")[1].strip()\n elif -1 < l.find(\":\"):\n recipe.cooking_name = l.split(\":\")[1].strip()\n elif re.search(r\"^(料理|万能調味料)[①-⑳]\", l.strip()):\n # https://www.nhk.or.jp/program/manpuku/recipe/dg0_200115.pdf\n # 料理①カルパッチョ\n recipe.cooking_name = l.strip()[3:].strip()\n else:\n recipe.cooking_name = l.split(None, 1)[1].strip()\n continue\n \n if re.search(\"^材料\", l.strip()):\n is_material_area = True\n is_recipe_step_area = False\n if l.strip() == \"材料\":\n continue\n \n if re.search(\"^作り方\", l.strip()):\n is_material_area = False\n is_recipe_step_area = True\n if l.strip() == \"作り方\":\n pass\n else:\n l = l.replace(\"作り方\", \"\", 1)\n # recipeがNoneの場合はエラーとして検出したい\n recipe.recipe_steps.append(RecipeText(l.strip()))\n continue\n \n \n if is_material_area:\n for material in l.strip().split(\"、\"):\n material = material.strip()\n if len(material):\n if material.startswith(\"(\"):\n recipe.materials.append(RecipeText(material))\n else:\n recipe.materials.append(RecipeText(material.replace(\"(\", \": \").replace(\")\", \"\")))\n \n if is_recipe_step_area:\n recipe.recipe_steps.append(RecipeText(l.strip()))\n if recipe:\n yield recipe",
"def parse_product(self, resp):\n loader = ItemLoader(item=EstateProperty(), response=resp)\n loader.add_value(\"url\", resp.request.url)\n\n # for the standard fields, extraction is straight forward\n for field, xpath in list(self.standard_fields.items()):\n loader.add_xpath(field, xpath)\n\n # exclude items where price is blank\n # may correspond to rentals\n price = resp.xpath(self.standard_fields['price']).extract_first()\n if price is None or price.strip()==\"\":\n # mark the item as dirty\n # to avoid sending it\n loader.add_value('is_dirty', True)\n\n # some items' titles are stored in a legacy path\n title = resp.xpath(self.standard_fields['title']).extract_first()\n if title is None or title.strip()==\"\":\n # try another way\n title = resp.xpath(self.special_fields['title_legacy']).extract_first()\n if title is None or title.strip()==\"\":\n # mark it dirty\n loader.add_value('is_dirty', True)\n else:\n loader.add_value('title', title)\n\n # sku is preprended by dirty text\n sku_dirty = resp.xpath(self.special_fields['sku']).extract_first()\n try:\n m = re.search(r'\\s{0,}\\S{3}\\s{1,}(?P<ref>.+)\\s{0,}', sku_dirty)\n loader.add_value('sku', m.group('ref'))\n except Exception as e:\n self.logger.error(e)\n loader.add_value('is_dirty', True)\n\n area_dirty = resp.xpath(self.special_fields['area']).extract_first()\n try:\n m = re.search(r'(?P<area>\\d+)\\sm.+', area_dirty)\n float_area = float(m.group('area'))\n loader.add_value('area', float_area)\n except Exception as e:\n self.logger.error(e)\n # parsing error on area is not a cause of dirty item\n\n yield loader.load_item()",
"def parse_request_body(self):\n try:\n request_arguments = self.request.arguments\n if request_arguments:\n new_request_arguments = {\n k: common.my_str(v[0].decode('utf8'))\n for k, v in request_arguments.items()\n }\n return new_request_arguments\n else:\n request_body = self.request.body\n request_data = request_body.decode('utf-8')\n request_data_dict = json.loads(request_data)\n self.request.arguments = {\n k: [str(v)]\n for k, v in request_data_dict.items()\n }\n new_request_arguments = {\n k: common.my_str(v)\n for k, v in request_data_dict.items()\n }\n return new_request_arguments\n except Exception as e:\n raise tornado.web.HTTPError(\n status_code=400, log_message='bad_request: {}'.format(str(e)))",
"def prod_parser(self, response):\n products = []\n pk = Product.objects.last()\n if pk:\n i = pk.id\n else:\n i = 0\n for product in response:\n # crawling product for name, desc, API_url, image_url, nutriscore, nutient_100g\n if 'ingredients_text_fr' not in product:\n desc = \"\"\n else:\n desc = product['ingredients_text_fr']\n nutrigrade = \"\".join(product[\"nutrition_grades_tags\"])\n if nutrigrade in (\"a\", \"b\", \"c\", \"d\", \"e\") \\\n and 'fat_100g' in product['nutriments'] \\\n and 'image_url' in product \\\n and 'product_name_fr' in product:\n i += 1\n product[i] = {\n \"id\": i,\n \"name\": product['product_name_fr'],\n \"desc\": desc,\n \"categories\": product[\"categories\"].split(\", \"),\n \"API_link\": product['url'],\n \"photo\": product['image_url'],\n \"nutriscore\": nutrigrade,\n \"nutrient_100g\":\n \"saturated_fat_100g:{}:{}, \".format(\n product['nutriments']['saturated-fat_100g'],\n product['nutrient_levels']['saturated-fat']) +\n \"fat_100g:{}:{}, \".format(\n product['nutriments']['fat_100g'], product['nutrient_levels']['fat']) +\n \"salt_100g:{}:{}, \".format(\n product['nutriments']['salt_100g'], product['nutrient_levels']['salt']) +\n \"sugars_100g:{}:{} \".format(\n product['nutriments']['sugars_100g'], product['nutrient_levels']['sugars'])\n }\n products.append(product[i])\n else:\n pass\n return products",
"def get_recipe_preperation(soup_recipe):\n prep_steps = soup_recipe.find(\"div\",\n {\"itemprop\": \"recipeInstructions\"}).find_all(\"li\")\n prep = []\n for step in prep_steps:\n prep.append(step.get_text().strip())\n return prep",
"def process_request(self, request):\n if request.form:\n data = request.form\n self.request['email'], self.request['name'], self.request['feedback'] = [data.get('email', ''), data.get('name', ''), data.get('feedback', '')]\n if not self.request['email'] or not self.request['name'] or not self.request['feedback']:\n self.errors.append('Not all required fields were found.')\n\n try:\n parse_email(self.request['email'])\n except Exception:\n self.errors.append('|%s| is not a valid email address.' % self.request['email'])\n\n self._send_feedback()\n return self._output()",
"def parse_ingredients(inp):\n # Standardize to uppercase\n # inp = inp.upper()\n parsed, _ = _parse(inp)\n return parsed",
"def test_get_recipe_information(self):\n pass",
"def parse_request(self, request):\n response=''\n http_code = 200\n\n request_line = request.splitlines()[0]\n request_method, path, request_version = request_line.split()\n\n #Try to split path into it's components: the operation requested and the keyvalue\n try:\n request_op, request_keyvalue = path.split('?')\n request_op = request_op[1:]\n\n #If request is a get we split in a different order than if it's a set\n if request_op == 'get':\n request_value, request_key = request_keyvalue.split('=')\n response, http_code = self.get_value(request_key)\n elif request_op == 'set':\n request_key, request_value = request_keyvalue.split('=')\n response, http_code = self.set_value(request_key, request_value)\n else:\n response = 'Unknown operation in URL. Must be either GET or SET.'\n http_code = 400\n\n except ValueError: #Catch any paths that don't match the form we're interested in\n response = dedent(\"\"\"Incorrect path (%s)\n Requested URL must take the form http://%s:%s/[operation]?[value]\"\"\" % (path, self.server_address, self.server_port))\n http_code = 400\n return response, http_code\n\n return response, http_code"
] |
[
"0.6257471",
"0.62344533",
"0.5686614",
"0.56802547",
"0.5585636",
"0.5573089",
"0.5454585",
"0.53896827",
"0.5322964",
"0.530237",
"0.5281328",
"0.5248121",
"0.5238918",
"0.52310014",
"0.52302814",
"0.51896566",
"0.5145601",
"0.511923",
"0.51176846",
"0.51045305",
"0.50358987",
"0.50313795",
"0.50255245",
"0.5016689",
"0.49726966",
"0.49660167",
"0.4958515",
"0.49536183",
"0.4932894",
"0.4926821"
] |
0.64149237
|
0
|
This function will return a boolean if a user with specific username exits in the database, it will support later tests of user with assertion Function, will search for a user id that has matching username from the arguments
|
def userExists(self, username):
data = db.session.query(User.id).filter_by(username = username).first()
if data is None:
return False
else:
return True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def user_exists(username):\n sql = \"SELECT username \" \\\n \"FROM users \" \\\n \"WHERE username=:username\"\n result = db.session.execute(sql, {\"username\": username})\n user = result.fetchone()\n if user is None:\n return False\n else:\n return True",
"def is_user_present(self, username): # WORKS\n done = self.cur.execute(\"SELECT username FROM users WHERE username = \\\"{}\\\"\".format(username))\n if done == 1:\n return True\n else:\n return False",
"def check():\n username = request.args.get(\"user_name\")\n users = db.execute(\"SELECT * FROM users WHERE username = :username\",\n {\"username\": username}).fetchone()\n if users is None:\n return jsonify(True)\n # Username is taken\n return jsonify(False)",
"def test_get_user_by_username(self):\n\t\tusername_in_db = server.get_user_by_username('Natasha')\n\t\tself.assertTrue(username_in_db, 'Query did not fetch user object.')\n\t\tusername_not_in_db = server.get_user_by_username('xyz')\n\t\tself.assertFalse(username_not_in_db, 'Query fetched user that did not exist (xyz).')",
"def check_user(self, username):\n self.dbcursor.execute(self.SQL_CHECK_USER, [username])\n row = self.dbcursor.fetchone()\n if row:\n return True \n return False",
"def check_user(self, username):\n self.dbcursor.execute(self.SQL_CHECK_USER, [username])\n row = self.dbcursor.fetchone()\n if row:\n return True \n return False",
"def check():\n # Sets variable username to username inputed by user\n username = request.args.get(\"username\")\n # Selects userid from username inputed by user (if there is one)\n userinfo = db.execute(\"SELECT * FROM users WHERE username = :username\", username=username)\n # If there is no info on the username inputed, that means username is not taken, and user can take the username\n if not userinfo:\n # Return true for the username is not taken\n return jsonify(True)\n # Return false if there is info on the username (meaning it was taken)\n return jsonify(False)",
"def _user_exists(self, username):\n return self.db.query(User).filter_by(name=username).first() is not None",
"def __contains__(self, user_name):\n tuples = self._execute(\n \"SELECT name FROM users WHERE name == ?\",\n (user_name,)\n )\n return len(tuples) == 1",
"def _checkUserExists(username,self):\r\n \r\n exists = False\r\n \r\n if _findUser(username) is not None:\r\n exists = True\r\n \r\n return exists",
"def exists(username):\n if Users.query.filter_by(username=username).first():\n return True\n return False",
"def check():\n username = request.args.get(\"username\")\n if len(username) < 1:\n print(\"false len\")\n return jsonify(\"false\")\n name = db.execute(f\"SELECT * FROM users WHERE username = '{username}'\")\n if name:\n print(\"false\")\n return \"false\"\n else:\n print(\"true\")\n return \"true\"",
"def check():\n enteredUsername = request.args.get('username')\n if not enteredUsername:\n return jsonify(False)\n currentUsernames = db.execute(\"SELECT username FROM users\")\n for username in currentUsernames:\n if enteredUsername == username[\"username\"]:\n return jsonify(False)\n\n return jsonify(True)",
"def username_exist(username):\n return User.objects.filter(username=username).first()",
"def exists_user(self, tenant_name, username):\n base = basedn.people_dn(username, tenant_name)\n return self.exists_entry(base)",
"def UserExist(self, username):\n return self.com.CheckUserexists(username)",
"def verify_user(self, username):\n try:\n self.c.execute('SELECT name FROM profiles WHERE name=(?)' (username,))\n user = self.c.fetchone()[0]\n return user == username\n\n except TypeError:\n return False",
"def test_find_user_by_username(self):\n self.new_user.save_user()\n test_user = User(\"Test\", \"user\", \"test\", \"walIas15\")\n test_user.save_user()\n user_exists = User.user_exist(\"test\")\n self.assertTrue(user_exists)",
"def exists_in_db(self) -> bool:\n query = \"\"\"SELECT * \n FROM Users \n WHERE Username=?;\"\"\"\n return len(self.db.fetchall(query, values=(self.username,))) > 0",
"def has_user(self, username):\n return username in self.user_table",
"def check_username(search_username):\n for find_username in USERS_LIST:\n if find_username[\"username\"] == search_username:\n return True\n return False",
"def check():\n username = request.args.get(\"username\")\n names = db.execute(\"SELECT * FROM users WHERE username = :username\", username=username)\n if names and username:\n return jsonify(False)\n elif not names and username:\n return jsonify(True)\n else:\n return jsonify(False)",
"def check_username():\n\n users = app.users\n username = request.data.decode('utf-8')\n\n if username in users:\n return Response('true')\n else:\n return Response('false')",
"def check_user(user):\n result_user = search_column_with_constraint(choose_database(\"auth\"), \"users\", \"id\", \"id\", user)\n # result_user = search_single_entry(choose_database(\"auth\"), \"users\", \"id\", user)\n\n if len(result_user) == 0:\n return 0\n else:\n return 1",
"def check():\n username = request.args.get(\"username\")\n rows = db.execute(\"SELECT * FROM users WHERE username = :username\", username=username)\n\n # If inputed username does not have at least 1 character and is not taken\n if len(username) > 1 and not rows:\n return jsonify(True)\n\n else:\n return jsonify(False)",
"def check():\n\n users_rows = db.execute('SELECT username FROM users')\n\n users = [user['username'] for user in users_rows]\n\n if len(str(request.args.get('username'))) > 1 and request.args.get('username') not in users:\n return jsonify(True)\n else:\n return jsonify(False)",
"def checkUserExists(self, email, username):\n query = \"SELECT * FROM User WHERE Email='\"+email+\"' OR UserName = '\"+username+\"';\"\n self.cur.execute(query)\n\n data = self.cur.fetchall()\n if len(data):\n return True\n else:\n return False",
"def userCheck(name):\r\n \r\n from logger.gamelogger import logger\r\n \r\n sql = \"\"\"SELECT count(*) FROM players where name = '{0}' COLLATE NOCASE;\"\"\".format(name)\r\n \r\n try:\r\n conn = sqlite3.connect(os.path.join(\"data\", \"players.db\"))\r\n cursor = conn.cursor()\r\n cursor.execute(sql)\r\n \r\n results = cursor.fetchall()\r\n\r\n except sqlite3.Error, e:\r\n logger.log.critical(\"Error using utils.gameutils.userCheck(): {0}\".format(e.args[0]))\r\n return False\r\n \r\n for row in results:\r\n if row[0] is 1:\r\n return True\r\n elif row[0] > 1:\r\n logger.log.warn(\"Duplicate username exists in player database: {0}\".format(name))\r\n \r\n return False",
"def is_username_taken(username):\n if User.objects.filter(username=username).exists():\n return True\n return False",
"def username_exists(self, username):\n user = [user for user in ALL_USERS if user['username'] == username]\n if user:\n return True\n return False"
] |
[
"0.7592645",
"0.75204927",
"0.7509843",
"0.74982107",
"0.7488085",
"0.7488085",
"0.74790424",
"0.74468815",
"0.7440057",
"0.7429507",
"0.74283344",
"0.7402009",
"0.73424625",
"0.7338444",
"0.73368734",
"0.7325905",
"0.7322203",
"0.73187786",
"0.7275225",
"0.7264799",
"0.7249369",
"0.72492176",
"0.723599",
"0.72338367",
"0.72280437",
"0.72107404",
"0.71644527",
"0.71297556",
"0.7099289",
"0.70947313"
] |
0.7744443
|
0
|
This function will search the booking of specific user id and car id then tell whether it exists or not
|
def bookingExists(self, user_id, car_id):
data = db.session.query(Booking).filter_by(user_id = user_id, car_id = car_id).first()
if data is None:
return False
else:
return True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def validate_bookings(bookingid, username, car_id):\n\n # get booking object for bookingid\n booking = Booking.query.get(bookingid)\n print(\"Booking:::\")\n print(booking)\n\n user = booking.customer\n print(\"User:::\")\n print(user)\n \n print(\"Params:::\"+bookingid+\"--\"+username+\" --\"+car_id+\"--=>\"+str(booking.carid==car_id))\n\n isValidBooking = False\n if booking and user.email==username and booking.carid==int(car_id):\n isValidBooking = True\n\n print(str(isValidBooking)+\"----------------------------------------\")\n return jsonify({\"isValidBooking\": isValidBooking})",
"def check_book(book_info, user_id):\n book = session.query(Book).filter(or_(Book.id == book_info,\n Book.book_name == book_info)).filter(Book.user_id == user_id).first()\n if book:\n return book",
"def find_car_owner(database, user):\n try:\n # check if user is an officer\n c = database.cursor()\n c.execute('SELECT utype FROM users WHERE uid = ?', (user, ))\n user_type = c.fetchone()[0]\n\n # If user is an officer \n if user_type == 'o':\n print(pm.car_own)\n c = database.cursor()\n\n make = str(input(\"Make: \"))\n model = str(input(\"Model: \"))\n year = int(input(\"Year: \"))\n color = str(input(\"Color: \"))\n plate = str(input(\"Plate: \"))\n\n c.execute(\"\"\"SELECT DISTINCT p.fname, p.lname FROM persons p JOIN registrations r ON (r.fname, r.lname) = \n (p.fname, p.lname) JOIN vehicles v ON r.vin = v.vin WHERE v.make = ? OR v.model = ? OR v.year = ? OR v.color = ?\n OR r.plate = ?\"\"\", (make, model, year, color, plate))\n result = c.fetchall()\n\n if len(result) > 4:\n c.execute(\"\"\"SELECT DISTINCT r.fname, r.lname, v.make, v.model, v.year, v.color, r.plate FROM persons p JOIN registrations r\n ON (r.fname, r.lname) = (p.fname, p.lname) JOIN vehicles v ON r.vin = v.vin WHERE v.make = ? OR v.model = ? OR\n v.year = ? OR v.color = ? OR r.plate = ?\"\"\", (make, model, year, color, plate))\n result = c.fetchall()\n for values in result:\n print(\"\\n-----------------------------------------\")\n print(f\"Full Name: {values[0]} {values[1]}\")\n print(\"------------------------------------------\")\n print(f\"Make: {values[2]}\")\n print(f\"Model: {values[3]}\")\n print(f\"Year: {values[4]}\")\n print(f\"Color: {values[5]}\")\n print(f\"Plate: {values[6]}\")\n elif len(result) <= 4:\n c.execute(\"\"\"SELECT DISTINCT r.fname, r.lname, v.make, v.model, v.year, v.color, r.plate, r.regdate, r.expiry FROM persons p JOIN registrations r\n ON (r.fname, r.lname) = (p.fname, p.lname) JOIN vehicles v ON r.vin = v.vin WHERE v.make = ? OR v.model = ? OR\n v.year = ? OR v.color = ? OR r.plate = ?\"\"\", (make, model, year, color, plate))\n result = c.fetchall()\n for values in result:\n print(\"\\n-----------------------------------------\")\n print(f\"Full Name: {values[0]} {values[1]}\")\n print(\"------------------------------------------\")\n print(f\"Make: {values[2]}\")\n print(f\"Model: {values[3]}\")\n print(f\"Year: {values[4]}\")\n print(f\"Color: {values[5]}\")\n print(f\"Plate: {values[6]}\")\n print(f'Registration Date: {values[7]}')\n print(f\"Expiry: {values[8]}\")\n \n\n print(pm.all_done)\n else:\n print(pm.for_officers_only)\n sys.exit()\n except:\n print(pm.something_went_wrong)\n sys.exit()",
"def check_reservation(self, gs_id, vehicle_id, user_id, res_type):\n\n # perform the query\n status = reservation_exists(self.settings, user_id, vehicle_id, gs_id, res_type) \n\n # return status\n if status:\n return True\n else:\n return False",
"def check_car_availability():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n today = datetime.date.today()\n if request.method == 'POST':\n car_id = request.form['car-id']\n car = get_car_identified_by_id(car_id)\n date_from = request.form['date-from']\n date_to = request.form['date-to']\n if not are_dates_valid(date_from, date_to):\n if check_authentication(session_id, user_id):\n return render_template('car_details.html', car=car, error=\"Please insert a valid date interval!\", user=user_id, session_id=session_id, today=today)\n else:\n return render_template('car_details.html', car=car, error=\"Please insert a valid date interval!\", today=today)\n if is_car_available_in_the_selected_period(date_from, date_to, car_id):\n if check_authentication(session_id, user_id):\n return render_template('car_details.html', car=car, is_available=True,\n total_price=calc_total_price(car.price, date_from, date_to), show_confirm_div=True,\n date_from=date_from, date_to=date_to, user=user_id, session_id=session_id,\n today=today)\n else:\n return render_template('car_details.html', car=car, is_available=True,\n total_price=calc_total_price(car.price, date_from, date_to),\n show_confirm_div=True,\n date_from=date_from, date_to=date_to, today=today)\n else:\n if check_authentication(session_id, user_id):\n return render_template('car_details.html', car=car, is_available=False, show_confirm_div=True,\n date_from=date_from, date_to=date_to, user=user_id, session_id=session_id, today=today)\n else:\n return render_template('car_details.html', car=car, is_available=False, show_confirm_div=True,\n date_from=date_from, date_to=date_to, today=today)\n else:\n if check_authentication(session_id, user_id):\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), user=user_id,\n session_id=session_id, authjs=False, preview_length=get_cars_preview().__len__())\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)",
"def carbooking():\n if 'loggedin' in session:\n if request.method == 'POST':\n\n isactive = True\n userid = session['userid']\n fromdate = request.form['fromdate']\n todate = request.form['todate']\n carid = request.form['carid']\n response = requests.post(\n \"http://localhost:8080/api/add_booking\", {\n \"carid\": carid, \"userid\": userid, \"fromdate\": fromdate, \"todate\": todate})\n acc = json.loads(response.text)\n return redirect(url_for('site.bookings'))",
"def show_emp_bookings(self):\n try:\n emp_id = int(input(\"Enter Employee Id: \"))\n bookings = self.admin_repository.show_emp_bookings(emp_id)\n if bookings:\n for booking in bookings:\n print(\"Booking Id : {}\".format(booking[5]))\n print(\"Date : {}\".format(booking[0]))\n print(\"Pick up time : {}\".format(booking[1]))\n print(\"Cab_Number : {}\".format(booking[2]))\n print(\"Pick up location: {}\".format(booking[3]))\n print(\"Destination : {}\".format(booking[4]))\n print(\"----------------------------\")\n return True\n else:\n print(\"No records found.\")\n return False\n\n except Exception as e:\n print(\"Some Error occurred.\")\n return False",
"def carExists(self, carmake):\n data = db.session.query(Car.id).filter_by(make = carmake).first()\n if data is None:\n return False\n else:\n return True",
"def historyExists(self, user_id, car_id):\n data = db.session.query(History).filter_by(user_id = user_id, car_id = car_id).first()\n if data is None:\n return False\n else:\n return True",
"def check_if_exists(self, bookID):\n query = f\"\"\"SELECT * from {TABLE} WHERE bookID = '{bookID}';\"\"\"\n res = self.cursor.execute(query)\n\n if self.cursor.fetchall():\n return True\n else:\n return False",
"def available_book(rentalList, idBook):\n for rent in reversed(rentalList):\n if idBook == rent.get_idBook():\n if rent.get_flag() == \"1\":\n raise RepositoryExceptionRent (\"\\n The book is already rented. \\n\".upper())\n else:\n break",
"def add_booking():\n try:\n \n carid = request.form[\"carid\"]\n userid = request.form[\"userid\"]\n fromdate = request.form[\"fromdate\"].strip()\n todate = request.form[\"todate\"].strip()\n\n print(fromdate, \"|\", todate)\n\n car = Car.query.get(carid)\n car.isavailable = False\n\n user = User.query.get(userid)\n user_email = user.email\n\n fromdate_obj = datetime.datetime.strptime(fromdate, '%Y-%m-%d')\n todate_obj = datetime.datetime.strptime(todate, '%Y-%m-%d')\n \n summary = \"Car Booking. Car id: \" + carid\n\n cal = CalendarUtil()\n resp = cal.addToCalendar(user_email, fromdate_obj, todate_obj, summary)\n cal_event_id = resp['id']\n booking = Booking(carid=carid, userid=userid, fromdate=fromdate, todate=todate, caleventid= cal_event_id, isactive=True)\n\n test = db.session.add(booking)\n db.session.commit()\n return bookingSchema.jsonify(booking)\n except Exception as ex:\n print(\"Failed to add event to calender. Exception: \", str(ex))\n return jsonify(None)",
"def confirm_car_reservation():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n today = datetime.date.today()\n if request.method == 'POST':\n car_id = request.form['hidden-car-id']\n car = get_car_identified_by_id(car_id)\n date_from = request.form['hidden-date-from']\n date_to = request.form['hidden-date-to']\n if not are_dates_valid(date_from, date_to):\n if check_authentication(session_id, user_id):\n return render_template('car_details.html', car=car, error=\"Please insert a valid date interval!\",\n user=user_id, session_id=session_id, today=today)\n else:\n return render_template('car_details.html', car=car, error=\"Please insert a valid date interval!\", today=today)\n if is_car_available_in_the_selected_period(date_from, date_to, car_id):\n if check_authentication(session_id, user_id):\n if has_user_age_requirement(user_id, car_id):\n reservation_id = save_car_reservation(car_id, user_id, date_from, date_to)\n return render_template('car_reservation_details.html', user=user_id, session_id=session_id,\n reservation_id=reservation_id, car=car, date_from=date_from, date_to=date_to,\n total_price=calc_total_price(car.price, date_from, date_to),\n reservation_just_completed=True)\n else:\n error_msg = \"The reservation has failed because you are not at least \" + str(car.min_age) +\\\n \" years old!\"\n return render_template('car_details.html', user=user_id, session_id=session_id,\n error=error_msg, car=car, today=today)\n else:\n return render_template('car_details.html', car=car,\n error=\"You need to be authenticated in order to complete this action!\", today=today)\n else:\n if check_authentication(session_id, user_id):\n return render_template('car_details.html', car=car, is_available=False, show_confirm_div=True,\n date_from=date_from, date_to=date_to, user=user_id, session_id=session_id, today=today)\n else:\n return render_template('car_details.html', car=car, is_available=False, show_confirm_div=True,\n date_from=date_from, date_to=date_to, today=today)\n else:\n if check_authentication(session_id, user_id):\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), user=user_id,\n session_id=session_id, authjs=False, preview_length=get_cars_preview().__len__())\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)",
"def available_customer(rentalList, idCustomer):\n for rent in reversed(rentalList):\n if idCustomer == rent.get_idCustomer():\n if rent.get_flag()==\"1\":\n raise RepositoryExceptionRent (\"\\n The customer has already a rented book. \\n\".upper())\n else:\n break",
"def test_searchCar(self):\n make = \"Toyota\"\n body_type = \"Seden\"\n colour = \"Black\"\n seats = \"5\"\n cost_per_hour = \"10.5\"\n booked = True\n cars = db.session.query(Car).filter(or_(Car.make == make, \n Car.body_type == body_type,\n Car.colour == colour,\n Car.seats == seats,\n Car.cost_per_hour == cost_per_hour,\n Car.booked == booked)).all()\n self.assertTrue((cars is not None))",
"def is_car_available_in_the_selected_period(date_from, date_to, car_id):\n session = start_session()\n queryset = session.query(CarReservation).filter(CarReservation.id_car.__eq__(car_id))\n reservations_list = queryset2list(queryset)\n try:\n date_from = datetime.strptime(date_from, '%Y-%m-%d')\n date_to = datetime.strptime(date_to, '%Y-%m-%d')\n is_available = True\n for reservation in reservations_list:\n if dates_intervals_are_overlapped(reservation.date_from, reservation.date_to, date_from.date(), date_to.date()):\n is_available = False\n return is_available\n except ValueError:\n return False",
"def already_booked(slots, attendees, user_name):\n already_joined = False\n for i in attendees:\n if i[\"email\"] == user_name+'@student.wethinkcode.co.za':\n already_joined = True\n\n if already_joined == True:\n return False\n else:\n return True",
"def can_reserve(train_id,segment_id):\n cursor.execute(\"\"\"select freeseat from seats_free where train_id= %s and segment_id= %s\"\"\", [train_id,segment_id]) # query\n available_seats = cursor.fetchone() # fetch all reservations related to that passenger\n print(available_seats)\n if available_seats[0] == 448:\n return False;\n return True;",
"def car_details():\n car_id = request.args.get('car-id', None)\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n date_from = request.args.get('rent-from', None)\n date_to = request.args.get('rent-to', None)\n car = get_car_identified_by_id(car_id)\n today = datetime.date.today()\n if check_authentication(session_id, user_id):\n if is_car_available_in_the_selected_period(str(date_from), str(date_to), car_id):\n return render_template('car_details.html', car=car, user=user_id, session_id=session_id, today=today,\n is_available=True, show_confirm_div=True, date_from=date_from, date_to=date_to,\n total_price=calc_total_price(car.price, date_from, date_to))\n else:\n return render_template('car_details.html', car=car, user=user_id, session_id=session_id, today=today)\n else:\n return render_template('car_details.html', car=car, today=today)",
"def test_bookCar(self):\n user_id = \"12\"\n car_id = \"6\"\n begin_date = \"2020-05-21\" \n begin_time = \"12:00:00\"\n return_date = \"2020-05-23\"\n return_time = \"12:00:00\"\n\n begin_datetime = \"{} {}\".format(begin_date, begin_time) \n return_datetime = \"{} {}\".format(return_date, return_time)\n\n newBooking = Booking( user_id = user_id,\n car_id = car_id,\n begin_time = begin_datetime,\n return_time = return_datetime,\n ongoing = False)\n # Add new row to the database\n db.session.add(newBooking)\n\n # Update car's availability \n car = Car.query.get(car_id) \n car.booked = True\n\n # Commit changes\n db.session.commit()\n self.assertTrue(self.bookingExists(user_id, car_id))",
"def check_availability(car):\n plate_num = int(car.plate[-1]) # Get the last number of the plate\n date = car.date # Get the date \n weekday = (date.weekday() + 1)*2 # Get the number of the week day\n time = date.time() # Get the time \n restricted = [(weekday-1) , weekday % 10] # Create an interval of restrictions\n check_time = (time <= morning_end.time() and time >= morning_in.time()) or \\\n (time <= afternoon_end.time() and time >= afternoon_in.time())\n # Boolean that verify the time \n if check_time and plate_num in restricted:\n car.availability = False\n else:\n car.availability = True",
"def userObjExists(self, user : bbUser.bbUser) -> bool:\n return self.userIDExists(user.id)",
"def search_user_by_id(self,id, cursor):\n sql = \"SELECT * FROM users WHERE userid = %s\"\n cursor.execute(sql, (id,))\n return cursor",
"def _checkTruckRec(self, tNode, Uid):\n if type(Uid) == int:\n result = self.searchTree(tNode, Uid)\n if result:\n if result[1] == -1:\n print(f'Vehicle id {result[0]} did not come to the warehouse today')\n else:\n if result[1] == 0:\n print(f'Vehicle id {result[0]} just reached the warehouse')\n if result[1] % 2 == 0:\n print(f'Vehicle id {result[0]} entered {result[2]} times into the system. '\n f'It just completed an order')\n if result[1] % 2 != 0:\n print(f'Vehicle id {result[0]} entered {result[2]} times into the system. '\n f'It is currently fulfilling an open order')\n print('------------------------------------')",
"def isCarAvailable(self, car, start, end):\n rentals = self.filterRentals(None, car)\n for rent in rentals:\n if start > rent.end or end < rent.start:\n continue\n return False\n return True",
"def booking_query_info(\n creator: str or int,\n room: str,\n start: str\n ) -> bool:\n url = ROOMBOOKING_URL + 'booking-query-info'\n data = {\n 'creator': creator,\n 'room': room,\n 'start': start\n }\n r = requests.post(url, json=data).json()\n\n # this means that the account is not in RBBot's friendlist\n if 'data' not in r:\n raise PermissionError\n\n return len(r['data']) > 0",
"def search_for_adaptation():\n\n book_id = 0\n # variables for status results; 0 for no error, 1 for no book found, 2 for no movie found,\n # 3 for no tv show found, 4 for no tv show and movie found\n status_msg = \"\"\n status_num = 0\n\n # if the Random Book button is chosen, then select a random book from the list\n # try to match the book with a movie or tv show until one is found\n if request.args.get('random') == \"1\":\n search_term = data_functions.get_random_book()\n else:\n # if search input is used, then get the search term\n search_term = request.form['search'] # get search term from input box\n\n # Goodreads API functions\n gr_result = API_functions.request_book(search_term) # use function in API_functions.py\n\n # if no book is found, generate status code\n if gr_result[\"total\"] == 0:\n status_msg = \"No matching book found for {0}. Try another.\".format(search_term)\n status_num = 1\n\n # TheMovieDB functions\n movie_result = {} # empty dictionary\n tv_result = {} # empty dictionary\n if status_num == 0: # only continue if there is a book found\n # search for movie\n # use function in API_functions.py\n movie_result = API_functions.request_movie(gr_result[\"name_split\"], gr_result[\"author_name_clean\"], 0)\n\n if movie_result[\"total_results\"] != 0: # if a movie is found, save some of its data\n movie_id = movie_result[\"id\"] # save movie ID\n\n else: # if no movie is found, generate status message\n status_msg = \"No movie found. Try another.\"\n status_num = 2\n\n # search for TV show\n # use function in API_functions.py\n tv_result = API_functions.request_tv_show(gr_result[\"name_split\"], gr_result[\"author_name_clean\"], 0)\n\n if tv_result[\"total_results\"] != 0: # if a tv show is found, save some of its data\n tv_id = tv_result[\"id\"] # save tv ID\n\n else: # if no tv show is found, generate status message\n status_msg = \"No TV Show found. Try another.\"\n status_num = 3\n\n if movie_result[\"total_results\"] == 0 and tv_result[\"total_results\"] == 0:\n # if no movie and tv show found, generate status message.\n # in the case they are found, but not based on the book, generate the same message\n status_msg = \"No adaptation found for {0}. Try another.\".format(search_term)\n status_num = 4\n\n if previous_searches.count(\n gr_result[\"name_split\"]) == 0 and status_num != 4: # only add if book name is not in deque\n if len(previous_searches) == 5: # keep the deque at only five most recent searches\n previous_searches.pop() # remove one if there is already five\n previous_searches.appendleft(gr_result[\"name_split\"]) # add recent search to beginning of deque\n # render the page again with updated information, pass all data to render_template method\n return render_template(\"index.html\", book_id=book_id, book_data=gr_result, movie_data=movie_result,\n tv_data=tv_result, app_name=app_name, search=search_term, status_msg=status_msg,\n status_num=status_num, previous_searches=previous_searches)",
"def consultation_booking_query(self, cid, sid, time, date):\n if not self.check_course_exist(cid):\n return ConsultationError.INVALID_COURSE.value\n is_weekday, feedback = self.check_weekday(date)\n time = self.round_time(time)\n if is_weekday:\n try:\n avail_list = self.get_avail_time_slots(cid.upper(), date) # return available time slot list\n logger.debug(avail_list)\n if time in avail_list:\n self.add_consultation(cid, sid, time, date) # add into database\n self.emailer.send_confirm_booking(cid=cid, time=time, date=date, receiver='[email protected]')\n return \"{}\".format(feedback)\n else:\n if not avail_list:\n return \"Sorry, there is no available time slot on date\"\n result = \"Sorry this time slot has been booked, \" \\\n \"please choose another one from following time slots on {}\".format(date)\n return '{}: {}'.format(result, ', '.join(avail_list))\n except ValueError:\n logger.error(\"Invalid Input\")\n return\n else:\n logger.debug(feedback)\n return feedback",
"def researchbybrasserie():\n if request.method == 'GET':\n user1 = request.args.get('brass')\n data1 = {}\n data1 = Beers.find({\"Brasserie\":user1}, {\"_id\":0})\n return fct.returning(data1)",
"def is_book_available(self, book):\n request_url = \"%s?q=%s\" % (self.API_URL, book)\n json_data = self.make_request(request_url)\n if json_data and len(json_data['docs']) >= 1:\n return True\n return False"
] |
[
"0.6837736",
"0.6354792",
"0.61274904",
"0.6110173",
"0.58761245",
"0.58010334",
"0.5644561",
"0.5604423",
"0.5591727",
"0.5572659",
"0.5558989",
"0.5531499",
"0.54869264",
"0.5457973",
"0.53667784",
"0.5342457",
"0.53367656",
"0.5316068",
"0.52929175",
"0.5258301",
"0.5199735",
"0.5184972",
"0.5164148",
"0.51582813",
"0.51463836",
"0.51363605",
"0.51290905",
"0.51093817",
"0.5104812",
"0.5103576"
] |
0.79800326
|
0
|
This function will search the history of specific user id and car id then tell whether it exists or not
|
def historyExists(self, user_id, car_id):
data = db.session.query(History).filter_by(user_id = user_id, car_id = car_id).first()
if data is None:
return False
else:
return True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def has_history(self, user):\n\n header = connect(self.__path)\n curs = header.cursor()\n encrypted_id = md5((str(user.id) + \"typicaluser\").encode()).hexdigest()\n curs.execute(\"SELECT * FROM users WHERE id = (?)\", (encrypted_id,))\n data = curs.fetchall()\n return len(data) >= 1",
"def search_history(cls, keyword, user_id):\n query = \"\"\"SELECT keyword FROM search_history WHERE user_id='{user_id}' AND keyword LIKE '{keyword}'\"\"\"\n cls.db.query(query, {'user_id': user_id, 'keyword': '{}{}{}'.format('%', keyword, '%')})\n search_results = cls.db.cursor.fetchall()\n\n return search_results",
"def insert_in_history(cls, keyword, user_id):\n\n search_query = \"\"\"SELECT keyword FROM search_history WHERE user_id='{user_id}' AND keyword='{keyword}'\"\"\"\n cls.db.query(search_query, {'user_id': user_id, 'keyword': keyword})\n search_results = cls.db.cursor.fetchall()\n if not search_results:\n query = \"\"\"INSERT INTO search_history (keyword, user_id) VALUES('{keyword}', '{user_id}')\"\"\"\n cls.db.query(query, {'user_id': user_id, 'keyword': keyword})\n cls.db.commit() # do this in processor on exception handling and rollback",
"def bookingExists(self, user_id, car_id):\n data = db.session.query(Booking).filter_by(user_id = user_id, car_id = car_id).first()\n if data is None:\n return False\n else:\n return True",
"def user_history(self):\n self.query_1 = \"SELECT * FROM orders WHERE user_id=%s\"\n self.input_1 = (self.user_id,) \n self.event = \"user_history\"\n self.message = \"Order history fetched successfully.\"\n self.error = \"Unable to fetch order history.\"",
"def show_history(user_id):\n return History.where('user_id', user_id).get()",
"def checkuserhistory():\n try:\n auth, permitted = validate_user()\n user = helpers.get_user()\n except AttributeError:\n raise eh.KarpGeneralError('No user name provided', 'checkuserhistory')\n try:\n size = helpers.get_size(default=10, settings={'allowed': permitted})\n from src.dbhandler.dbhandler import dbselect\n updates = []\n for lexicon in permitted:\n # add updates from lexicons that are kept in sql\n if configM.get_lexicon_sql(lexicon):\n updates.extend(dbselect(lexicon, user=user, max_hits=size))\n\n return jsonify({'updates': updates})\n except Exception as e:\n logging.exception(e)\n raise eh.KarpGeneralError(str(e))",
"def get_user_borrowing_history(user_id):\n borrowed_books = BorrowBook.get_all_borrowed_books()\n user_books = [\n book for book in borrowed_books if book.user_id == user_id]\n borrowing_history = []\n book_details = {}\n for book in user_books:\n try:\n singleBook = Book.get_book_by_id(book.book_id)\n book_details[\"id\"] = singleBook.id\n book_details[\"title\"] = singleBook.title\n book_details[\"author\"] = singleBook.author\n book_details[\"isbn\"] = singleBook.isbn\n book_details[\"borrowDate\"] = book.date_borrowed\n if book.returned:\n book_details[\"returnDate\"] = book.date_returned\n else:\n book_details[\"dueDate\"] = book.date_due\n except Exception as e:\n print(e)\n finally:\n borrowing_history.append(book_details)\n book_details = {}\n return borrowing_history",
"def checkhistory(lexicon, lid):\n from src.dbhandler.dbhandler import dbselect\n auth, permitted = validate_user(mode=\"read\")\n settings = {'allowed': permitted}\n size = helpers.get_size(default=10, settings=settings)\n return jsonify({'updates': dbselect(lexicon, _id=lid, max_hits=size)})",
"def history(self, update, context):\n\n message = update.message.text.lower().split(\" \")\n user = self.User(update)\n output = \"\"\n if message[1] == \"show\":\n if not self.data_base.has_history(user):\n output = \"you don't have any history\"\n self.data_base.log(user, update.message.text, output)\n else:\n output = self.data_base.show_history(user)\n if len(output) > 4096:\n output = output[-4096::]\n self.data_base.log(user, update.message.text, \"Successfully showed history\")\n\n elif message[1] == \"clear\":\n if not self.data_base.has_history(user):\n output = \"your history is already clean\"\n else:\n self.data_base.clear_history(user)\n output = \"Clean\"\n self.data_base.log(user, update.message.text, output)\n else:\n output = \"Looks like you have a little mistake\\n\" \\\n \"the correct way of using the /history command is:\\n\" \\\n \"/history show\\n\" \\\n \"/history clear\"\n self.data_base.log(user, update.message.text, output)\n user.send_message(output)",
"def show_history(self, user: TelegramController.User):\n\n header = connect(self.__path)\n curs = header.cursor()\n encrypted_id = md5((str(user.id) + \"typicaluser\").encode()).hexdigest()\n curs.execute(\"SELECT * FROM users WHERE id = (?)\", (encrypted_id,))\n data = curs.fetchall()[0][1]\n return data",
"def fetch_history(username, keyword):\n user_id = fetch_user_id(username)\n if not user_id:\n user_id = create_user(username)\n queryset = run_query(\n f\"SELECT keyword from history where user_id='{user_id}' and keyword LIKE '%{keyword}%' ORDER BY updated_on DESC\"\n )\n output = queryset.fetchall()\n return ', '.join((ele[0] for ele in output))",
"def check_book(book_info, user_id):\n book = session.query(Book).filter(or_(Book.id == book_info,\n Book.book_name == book_info)).filter(Book.user_id == user_id).first()\n if book:\n return book",
"def _checkTruckRec(self, tNode, Uid):\n if type(Uid) == int:\n result = self.searchTree(tNode, Uid)\n if result:\n if result[1] == -1:\n print(f'Vehicle id {result[0]} did not come to the warehouse today')\n else:\n if result[1] == 0:\n print(f'Vehicle id {result[0]} just reached the warehouse')\n if result[1] % 2 == 0:\n print(f'Vehicle id {result[0]} entered {result[2]} times into the system. '\n f'It just completed an order')\n if result[1] % 2 != 0:\n print(f'Vehicle id {result[0]} entered {result[2]} times into the system. '\n f'It is currently fulfilling an open order')\n print('------------------------------------')",
"def added_by(self, user):\n return ChefsHasRecipes.objects.filter(recipe=self, chef=user).exists()",
"def carExists(self, carmake):\n data = db.session.query(Car.id).filter_by(make = carmake).first()\n if data is None:\n return False\n else:\n return True",
"def prehistory_recept(self, userdialog):\n\n return False, None",
"def prehistory_recept(self, userdialog):\n # import ipdb; ipdb.set_trace()\n\n return False, None",
"def get_summoner_match_history(name):\n\n try:\n # Try to retrieve the provided username\n summoner = cass.get_summoner(name=name)\n\n # Try to retrieve the summoner's match history, then return it \n match_history = summoner.match_history\n return match_history\n \n except cass.datastores.riotapi.common.APIRequestError:\n # Catch the API Request Error, which occurs when the API key is invalid\n print(\"The Riot API Key is not valid. Please enter a valid API key.\")\n os._exit(1) # Exit the program\n \n except:\n # Any other exception will occur when the summoner cannot be found\n error_message = \"The summoner '\" + name + \"' could not be found.\" + \\\n \"Please enter a valid username.\"\n print(error_message)\n os._exit(1) #Exit the program",
"async def history(self, ctx, user_id: str):\n\n session = self.bot.helpers.get_db_session()\n try:\n self.bot.log.info(\n f\"CMD {ctx.command} called by {ctx.message.author} ({ctx.message.author.id})\"\n )\n guild = ctx.message.guild\n user = await self.bot.helpers.get_member_or_user(user_id, guild)\n if not user:\n return await ctx.send(\n f\"Unable to find the requested user. Please make sure the user ID or @ mention is valid.\"\n )\n\n (\n embed_result_entries,\n footer_text,\n ) = await self.bot.helpers.get_action_history(session, user, guild)\n\n p = FieldPages(ctx, per_page=8, entries=embed_result_entries,)\n p.embed.color = 0xFF8C00\n p.embed.set_author(\n name=f\"Member: {user} ({user.id})\", icon_url=user.avatar_url\n )\n p.embed.set_footer(text=footer_text)\n await p.paginate()\n except discord.HTTPException as err:\n self.bot.log.exception(\n f\"Discord HTTP Error responding to {ctx.command} request via Msg ID {ctx.message.id}. {sys.exc_info()[0].__name__}: {err}\"\n )\n await ctx.send(\n f\"Error processing {ctx.command}. Error has already been reported to my developers.\"\n )\n except DBAPIError as err:\n self.bot.log.exception(\n f\"Error logging note to database. {sys.exc_info()[0].__name__}: {err}\"\n )\n await ctx.send(\n f\"Error processing {ctx.command}. Error has already been reported to my developers.\"\n )\n session.rollback()\n except Exception as err:\n self.bot.log.exception(\n f\"Error responding to {ctx.command} via Msg ID {ctx.message.id}. {sys.exc_info()[0].__name__}: {err}\"\n )\n await ctx.send(\n f\"Error processing {ctx.command}. Error has already been reported to my developers.\"\n )\n finally:\n session.close()",
"def test_userHistories(self):\n user_id = 12\n histories = History.query.filter_by(user_id = user_id).all()\n self.assertTrue((histories is not None))",
"def UserRecords(self, username):\n return not self.com.CheckUid(username) is None",
"def album_detection(user):\n seen_tracks = user.seen_tracks\n\n list_of_albums = {}\n album_number_of_tracks = {}\n for track in seen_tracks:\n if \"album\" not in track:\n continue\n\n if track[\"name\"] == \"sanjake\":\n continue\n\n album_name = track[\"album\"][\"name\"]\n if album_name not in list_of_albums:\n list_of_albums[album_name] = 0\n album_number_of_tracks[album_name] = track[\"album\"][\"total_tracks\"]\n list_of_albums[album_name] += 1\n\n if list_of_albums[album_name] > 1 and list_of_albums[album_name] == album_number_of_tracks[album_name]:\n print(f\"Album search detected: {album_name}, number of tracks: {album_number_of_tracks[album_name]}\")\n print(f\"User: {user.email_address}\")",
"def findHistory(objList, historyName):\r\n if objList:\r\n foundHistoryList = []\r\n for objName in objList:\r\n # find historyName in the object's history:\r\n histList = cmds.listHistory(objName)\r\n for hist in histList:\r\n histType = cmds.objectType(hist)\r\n if histType == historyName:\r\n foundHistoryList.append(hist)\r\n return foundHistoryList",
"def history():\n user_history=db.execute(\"SELECT * FROM history WHERE user_id=:u_i\",u_i=session[\"user_id\"])\n return render_template(\"history.html\",s=user_history)",
"def reported_by(self, user):\n return Report.objects.filter(recipe=self, chef=user).exists()",
"def record_exists(user):\n cnx = create_connection()\n cursor = cnx.cursor()\n\n query = \"SELECT * FROM \" + USAGE_TABLE['name'] + \" WHERE \" + USAGE_TABLE['relational_column'] + \" = '\" + user + \"'\"\n\n try:\n cursor.execute(query)\n except mysql.connector.Error as e:\n cursor.close()\n cnx.close()\n if e.errno == errorcode.ER_BAD_TABLE_ERROR:\n print(\"Table doesn't exist!\")\n else:\n print(e)\n return\n\n rows = cursor.fetchall()\n cnx.close()\n cursor.close()\n\n if len(rows):\n return True\n else:\n return False",
"def onRegisterHistory(self):\n pass",
"def history():\n transactions = db.execute(\"SELECT * FROM history WHERE user_id = ?\", session[\"user_id\"])\n user_name = db.execute(\"SELECT username, cash FROM users WHERE id = ?\", session[\"user_id\"])\n \n return render_template(\"history.html\", transactions=transactions, user_name=user_name[0][\"username\"])",
"def searching(self) -> None:\n heading = self.chassis.get_heading()\n self.turret.scan(heading)\n\n if self.vision.get_vision_data()[2] is not None:\n # means no data is available\n # print(f\"searching -> tracking {self.vision.get_vision_data()}\")\n # self.next_state(\"tracking\")\n self.state = self.tracking"
] |
[
"0.6492603",
"0.59272593",
"0.58328664",
"0.5809773",
"0.5701043",
"0.5635138",
"0.5616981",
"0.53315604",
"0.5285922",
"0.5251379",
"0.5230105",
"0.5202547",
"0.5191416",
"0.51679724",
"0.51483643",
"0.51421005",
"0.5132743",
"0.51297987",
"0.5099777",
"0.5097889",
"0.509201",
"0.5088658",
"0.50572884",
"0.5048873",
"0.5045236",
"0.5028469",
"0.50061023",
"0.5002496",
"0.49884367",
"0.49794808"
] |
0.7956357
|
0
|
This test will test the login credentials by query the database to see if that username and password do exist
|
def test_login(self):
username = "user1"
password = "pw1"
userID = 1
data = db.session.query(User.id).filter_by(username = username, password = password).first()
self.assertTrue(data is not None)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def check_db(args):\n # Open connection to the registered users db\n base_path = \"pypackage\"\n users_db = \"openaq_users.db\"\n conn = sqlite3.connect(os.path.join(base_path, users_db))\n cursor = conn.cursor()\n\n # Check for username\n row = cursor.execute(\"SELECT * FROM user_database WHERE username = ?\",\n (args.username,))\n results = row.fetchall()\n conn.commit()\n\n if results:\n # Add salt\n salt = str(results[0][2])\n digest = salt + args.password\n\n # Compute the hash\n for i in range(1000):\n digest = hashlib.sha256(digest.encode('utf-8')).hexdigest()\n\n # Check for password\n if digest == results[0][1]:\n print('Successful log-in. Welcome {}!'.format(args.username))\n return True\n\n else:\n print(\n \"Password is invalid for user {}.\".format(args.username)\n )\n return False\n else:\n print(\"Username not present.\")\n return False",
"def check_credentials(username, password):\n\n return db.auth_user(username, password)",
"def test_can_login(self):\n user = authenticate(username='jack', password='secret')\n self.assertTrue(user is not None)\n self.assertTrue(user.is_authenticated)",
"def test_get_user_by_username(self):\n\t\tusername_in_db = server.get_user_by_username('Natasha')\n\t\tself.assertTrue(username_in_db, 'Query did not fetch user object.')\n\t\tusername_not_in_db = server.get_user_by_username('xyz')\n\t\tself.assertFalse(username_not_in_db, 'Query fetched user that did not exist (xyz).')",
"def test_login_with_valid_user(self, test_client, init_database, insert_user_db):\n url = '/api/v1/auth/login'\n data = {\n 'email': insert_user_db.email,\n 'password': 'password'\n }\n response = test_client.post(url, json=data)\n\n assert response.status_code == 201\n assert response.json['name'] == insert_user_db.name\n assert response.json['email'] == insert_user_db.email\n assert response.json['access'] is not None\n assert response.json['refresh'] is not None",
"def test_user_can_login(self):\n user = authenticate(username='Marry', password='secret')\n self.assertFalse(user is None)\n self.assertTrue(user.is_authenticated)",
"def testLoginPassword(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userI\", \"password\"))\n self.assertEquals(models.ERR_BAD_CREDENTIALS, self.users.login(\"userI\", \"passw0rd\"))",
"def login_doctor(args):\n con, cursor = db.connect_db()\n\n cursor.execute(\"SELECT * FROM Doctor WHERE SSN = ? AND Password = ?;\",\n (args[\"ssn\"], args[\"password\"]))\n\n # No user in database with those credentials.\n res = cursor.fetchone()\n if not res:\n con.close()\n return False\n\n # User has correct credentials.\n con.close()\n return True",
"def authorise_login(self, username, password):\n con = self.connect()\n cursor = con.cursor()\n cursor.execute(\"SELECT username,password \\\n FROM users WHERE password = %s\", (password,))\n credentials = cursor.fetchone()\n cursor.close()\n con.commit()\n con.close()\n if credentials is None:\n return False\n if username != credentials[0]:\n return False\n if password != credentials[1]:\n return False\n return True",
"def login(uname, password, db, session):\n\tquery = db((db.User.username == uname) & (db.User.password == password))\n\tif query.count() == 1:\n\t\tsession.auth = query.select().first().id\n\t\treturn True\n\telse:\n\t\treturn False",
"def test_successful_login(self):\n pass",
"def test_basic_login(self):\n c = Client()\n c.login(username='a', password='123456')",
"def test_login(self):\n res = self.client.get(\"/login\")\n data = res.data.decode(\"utf-8\")\n assert res.status == \"200 OK\"\n assert \"Already a member!\" in data",
"def test_login_login_inexisting_user_false(self):\n logins = {\n \"Email\": \"[email protected]\",\n \"Password\": \"pass1234\"\n }\n resp = self.client().post('/api/v1/auth/login', data=logins)\n self.assertEqual(resp.status_code, 400)\n resp = resp.get_json()\n self.assertEqual(resp['error'],\n 'User not found in our database')",
"def test_login(self):\n self.user_api()\n self.base.metadata.create_all(self.engine)\n people = self.provision_users()\n p = {'__action': 'login', 'id': people[0].id, 'password': \"testing\"}\n self.post('user', 200, params=p)",
"def test_getUserNonexistentDatabase(self):\n self.db = checkers.FilePasswordDB(\"test_thisbetternoteverexist.db\")\n\n self.assertRaises(error.UnauthorizedLogin, self.db.getUser, \"user\")",
"def check_auth(username, password, expected_user, expected_pw):\n return username == expected_user and password == expected_pw",
"def check_auth(username, password):\n session.pop('username', None)\n session.pop('password', None)\n session['username'] = username\n session['password'] = password\n # Test if we can connect to a region\n connect_to_region()\n return True",
"def check_user(self,username, password):\n safe_input = (username, password)\n vals = self.cur.execute(\"SELECT Username, Password FROM Users WHERE Username=? AND Password=?\",safe_input).fetchone()\n if vals:\n logging.info('%s was authenticated', username)\n return True\n else:\n logging.info('Failed login for %s', username)\n return False",
"def testLoginUsername(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userH\", \"password\"))\n self.assertEquals(models.ERR_BAD_CREDENTIALS, self.users.login(\"userHX\", \"password\"))",
"def login_clerk(args):\n con, cursor = db.connect_db()\n\n cursor.execute(\"SELECT * FROM Clerk WHERE SSN = ? AND Password = ?;\",\n (args[\"ssn\"], args[\"password\"]))\n\n # No user in database with those credentials.\n res = cursor.fetchone()\n if not res:\n con.close()\n return False\n\n # User has correct credentials.\n con.close()\n return True",
"def test_login(self):\n user = UUIDUser.objects.create_user(username=\"uuid\", password=\"test\")\n self.assertTrue(self.client.login(username=\"uuid\", password=\"test\"))\n self.assertEqual(\n UUIDUser.objects.get(pk=self.client.session[SESSION_KEY]), user\n )",
"def test_logged_user_data(self, test_client, init_database, insert_user_db):\n login_url = '/api/v1/auth/login'\n login_data = {\n 'email': insert_user_db.email,\n 'password': 'password'\n }\n login_response = test_client.post(login_url, json=login_data)\n\n url = '/api/v1/auth/me'\n headers = {\n 'Authorization': 'Bearer {}'.format(login_response.json['access'])\n }\n response = test_client.get(url, headers=headers)\n\n assert response.status_code == 200\n assert response.json['id'] is not None\n assert response.json['name'] == insert_user_db.name\n assert response.json['email'] == insert_user_db.email",
"def test_incorrect_username(db_session):\n assert (login(\"unknown\", \"unknown\") == \"login.failed\")",
"def login_patient(args):\n\n con, cursor = db.connect_db()\n\n cursor.execute(\"SELECT * FROM Patient WHERE P_SSN = ? AND Password = ?;\",\n (args[\"ssn\"], args[\"password\"]))\n\n # No user in database with those credentials.\n res = cursor.fetchone()\n if not res:\n con.close()\n return False\n\n # User has correct credentials.\n con.close()\n return True",
"def test_sucessful_login(self):\n self.user.list_of_accounts = [{'username': 'dalton',\n 'pwd': 'chromelegend',\n 'email': '[email protected]'}]\n msg = self.user.login(\"[email protected]\", \"chromelegend\")\n self.assertEqual(msg, \"Success!\")",
"def check_auth(username, password):\n user = User.query.filter(User.username == username).first()\n\n if user:\n return user.password == password\n else:\n return False",
"def test_database(self):\n subprocess.check_call(['reading', '-c', self.tmpfile, 'create_tables'])\n subprocess.check_call(['reading', '-c', self.tmpfile, 'add_user', '[email protected]'])\n\n conn = sqlite3.connect(self.tmp_config.database.host)\n curs = conn.cursor()\n curs.execute('SELECT email, administrator FROM user')\n users = curs.fetchall()\n\n self.assertEqual(len(users), 1)\n user = users[0]\n self.assertEqual(user[0], '[email protected]')\n self.assertEqual(user[1], False)\n\n subprocess.check_call(['reading', '-c', self.tmpfile,\n 'add_user', '--administrator', '[email protected]'])\n\n conn = sqlite3.connect(self.tmp_config.database.host)\n curs = conn.cursor()\n curs.execute('SELECT email, administrator FROM user')\n users = curs.fetchall()\n\n self.assertEqual(len(users), 2)\n users.sort()\n users = tuple((u[0], bool(u[1])) for u in users)\n self.assertEqual(users, (('[email protected]', False), ('[email protected]', True)))\n\n subprocess.check_call(['reading', '-c', self.tmpfile, 'create_tables'])\n conn = sqlite3.connect(self.tmp_config.database.host)\n curs = conn.cursor()\n curs.execute('SELECT COUNT(*) FROM user')\n self.assertEqual(0, curs.fetchall()[0][0])",
"def test_user_authenticate(self):\n\n user = User.authenticate(\"test1\", \"password\")\n\n self.assertEqual(user.username, \"test1\")\n self.assertIn(\"$2b$\", user.password)",
"def test_authentication_success():\n d = Dexcom(USERNAME, PASSWORD)\n d._validate_account()\n d._validate_session_id()"
] |
[
"0.7468209",
"0.7456986",
"0.7269804",
"0.72061574",
"0.7067546",
"0.70660067",
"0.70542073",
"0.7041604",
"0.69754773",
"0.6974427",
"0.6931147",
"0.69266355",
"0.6925161",
"0.6922705",
"0.69079673",
"0.6888839",
"0.68805784",
"0.68792796",
"0.68628114",
"0.68610346",
"0.6846957",
"0.6846792",
"0.6844504",
"0.68380433",
"0.6835816",
"0.6832528",
"0.6824883",
"0.6819913",
"0.6818155",
"0.6797124"
] |
0.759397
|
0
|
This function will be the getter for dummy user id we created in the test abow
|
def get_test_id(self):
test_username = "testusername"
test_user_id = db.session.query(User.id).filter_by(username = test_username).first()
print(test_user_id)
return test_user_id
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_user_id_get(self):\n pass",
"def get_user_id(self):\n raise NotImplementedError",
"def test_get_user_id(self):\n print('(' + self.test_get_user_id.__name__+')',\n self.test_get_user_id.__doc__)\n # for patient\n self.assertEqual(\n PATIENT_ID, self.connection.get_user_id(PATIENT_USERNAME))\n # for doctor\n self.assertEqual(\n DOCTOR_ID, self.connection.get_user_id(DOCTOR_USERNAME))",
"def get_id(self): \n\t\treturn (self.user_id)",
"def get_user(id):\n pass",
"def get_userid():\n return _userid()",
"def mock_get_real_user(_anon_id):\r\n return self.user",
"def __int__(self):\r\n return self.userid",
"def test_get_id(self):\r\n # the migration adds an initial admin user to the system\r\n user = UserMgr.get(user_id=1)\r\n self.assertEqual(\r\n user.id,\r\n 1,\r\n \"Should have a user id of 1: \" + str(user.id))\r\n self.assertEqual(\r\n user.username,\r\n 'admin',\r\n \"Should have a username of admin: \" + user.username)",
"def get_id(self):\r\n return self.username",
"def test_get_user_by_uuiduser_uuid_get(self):\n pass",
"def get_one_user():",
"def get_user_id(self):\n return self.id_user",
"def get_id(self):\n return self.username",
"def get_id(self):\n return self.username",
"def get_id(self):\n return self.username",
"def user_id(self):\n # type: () -> string_types\n return self._user_id",
"def test_user_id(self):\n new_user = self.app\n self.assertTrue(new_user.user_id, 0)\n new_user.create_user()\n self.assertTrue(new_user.user_id, 1)\n for key in new_user.users:\n self.assertEqual(new_user.user_id, key)",
"def custom_user_id(self):\n # type: () -> string_types\n return self._custom_user_id",
"def get_accessible_user_id(self):\n ### DATABASE CODE GOES HERE\n return 1",
"def get_id(self):\n return self.user_id",
"def test_get_user_id_unknown_user(self):\n print('(' + self.test_get_user_id.__name__+')',\n self.test_get_user_id.__doc__)\n self.assertIsNone(self.connection.get_user_id(\n NON_EXIST_PATIENT_USERNAME))",
"def get_user():\n\treturn '1', 200",
"def getUserID(self):\n\t\treturn self.UserID",
"def generate_user(self):\n token = str(uuid.uuid4())\n return self.generate_subid(token=token, return_user=True)",
"def test_get_user_id(self, mixin, mdecoded_jwt):\n mdecoded_jwt.return_value = {'id': sentinel.user_id}\n\n assert mixin.get_user_id() == sentinel.user_id",
"def test_get_user_id(self, mixin, mdecoded_jwt):\n mdecoded_jwt.return_value = {'id': sentinel.user_id}\n\n assert mixin.get_user_id() == sentinel.user_id",
"def _get_unknown_userid(self):\n cursor = self.conn.cursor()\n unknown_user_str = dbtypes.User.null\n cursor.execute(\"select id from users where uniqueid='%s'\" % unknown_user_str)\n return cursor.fetchone()[0]",
"def get_id(self) -> int:\n return self.user_id",
"def user_id(self):\n return json_loads(self.user_json).get('id')"
] |
[
"0.82884496",
"0.7636512",
"0.7501366",
"0.74051195",
"0.7372923",
"0.7365709",
"0.73336416",
"0.7273932",
"0.722477",
"0.72180974",
"0.72082794",
"0.7208228",
"0.717936",
"0.70351905",
"0.70351905",
"0.70351905",
"0.7012597",
"0.700154",
"0.6981743",
"0.6961412",
"0.6958782",
"0.6939805",
"0.6924385",
"0.69238615",
"0.6921096",
"0.6906165",
"0.6906165",
"0.6890145",
"0.68558556",
"0.68473953"
] |
0.76873684
|
1
|
This test will user history by query the database to see if any row with matching user id (in this case 12) exists
|
def test_userHistories(self):
user_id = 12
histories = History.query.filter_by(user_id = user_id).all()
self.assertTrue((histories is not None))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def has_history(self, user):\n\n header = connect(self.__path)\n curs = header.cursor()\n encrypted_id = md5((str(user.id) + \"typicaluser\").encode()).hexdigest()\n curs.execute(\"SELECT * FROM users WHERE id = (?)\", (encrypted_id,))\n data = curs.fetchall()\n return len(data) >= 1",
"def historyExists(self, user_id, car_id):\n data = db.session.query(History).filter_by(user_id = user_id, car_id = car_id).first()\n if data is None:\n return False\n else:\n return True",
"def test_existence(self):\n self.assertTrue(User.objects.filter(username='rcm').exists())",
"def isonce(self, user, id, period=None):\n if period is None:\n period_from = datetime.datetime.min\n else:\n period_from = datetime.datetime.utcnow() - period\n return (History.objects(user=user).filter(\n scenario__attributes__id=id,\n created_at__gte=period_from).count() == 0)",
"def record_exists(user):\n cnx = create_connection()\n cursor = cnx.cursor()\n\n query = \"SELECT * FROM \" + USAGE_TABLE['name'] + \" WHERE \" + USAGE_TABLE['relational_column'] + \" = '\" + user + \"'\"\n\n try:\n cursor.execute(query)\n except mysql.connector.Error as e:\n cursor.close()\n cnx.close()\n if e.errno == errorcode.ER_BAD_TABLE_ERROR:\n print(\"Table doesn't exist!\")\n else:\n print(e)\n return\n\n rows = cursor.fetchall()\n cnx.close()\n cursor.close()\n\n if len(rows):\n return True\n else:\n return False",
"def user_history(self):\n self.query_1 = \"SELECT * FROM orders WHERE user_id=%s\"\n self.input_1 = (self.user_id,) \n self.event = \"user_history\"\n self.message = \"Order history fetched successfully.\"\n self.error = \"Unable to fetch order history.\"",
"def UserRecords(self, username):\n return not self.com.CheckUid(username) is None",
"def test_exist_users(db, session): # pylint: disable=unused-argument\n # add a notification for user\n user_id = 'notf-user'\n request_id = 223\n request_type = 'registration'\n request_status = 3\n message = 'this is a test notification'\n notification = Notification(user_id=user_id, request_id=request_id, request_type=request_type,\n request_status=request_status, message=message)\n notification.add()\n user_exists_bool = Notification.exist_users(user_id)\n\n # check if it really exists in db\n res = exists_user_notifications(session, user_id)\n assert res is user_exists_bool\n\n # remove the above notification and then check again\n assert delete_user_notifications(session, user_id)\n user_exists_bool = Notification.exist_users(user_id)\n res = exists_user_notifications(session, user_id)\n assert res is user_exists_bool",
"def test_func(self):\n return (Student.objects.filter(user=self.request.user).exists())",
"def checkuserhistory():\n try:\n auth, permitted = validate_user()\n user = helpers.get_user()\n except AttributeError:\n raise eh.KarpGeneralError('No user name provided', 'checkuserhistory')\n try:\n size = helpers.get_size(default=10, settings={'allowed': permitted})\n from src.dbhandler.dbhandler import dbselect\n updates = []\n for lexicon in permitted:\n # add updates from lexicons that are kept in sql\n if configM.get_lexicon_sql(lexicon):\n updates.extend(dbselect(lexicon, user=user, max_hits=size))\n\n return jsonify({'updates': updates})\n except Exception as e:\n logging.exception(e)\n raise eh.KarpGeneralError(str(e))",
"def exists_in_db(self) -> bool:\n query = \"\"\"SELECT * \n FROM Users \n WHERE Username=?;\"\"\"\n return len(self.db.fetchall(query, values=(self.username,))) > 0",
"def check_user(user):\n result_user = search_column_with_constraint(choose_database(\"auth\"), \"users\", \"id\", \"id\", user)\n # result_user = search_single_entry(choose_database(\"auth\"), \"users\", \"id\", user)\n\n if len(result_user) == 0:\n return 0\n else:\n return 1",
"def userIDExists(self, id : int) -> bool:\n return id in self.users.keys()",
"def test_user_id(self):\n new_user = self.app\n self.assertTrue(new_user.user_id, 0)\n new_user.create_user()\n self.assertTrue(new_user.user_id, 1)\n for key in new_user.users:\n self.assertEqual(new_user.user_id, key)",
"def _has_data(cls):\n return User.objects.count() > 0",
"def user_already_exist(list_json: dict) -> bool:\n users = User.query.all()\n for user in users:\n login = user.user_login == list_json[\"user_login\"]\n email = user.email == list_json[\"email\"]\n row = User.query.filter(login and email or login or email).first()\n if row:\n return True\n return False",
"def test_user_not_in_db_before_addition(self):\n\n username = 'testuser'\n user = User.query.filter_by(username=username).first()\n self.assertTrue(user.__str__(), None)",
"def user_exists(conn, account):\n cur = conn.cursor()\n cur.execute(f\"SELECT * FROM users WHERE account = '{account}'\")\n\n rows = cur.fetchall()\n\n if len(rows) > 0:\n return True\n else:\n return False",
"def check_if_row_exists(self, session, data):\n\n row_exists = None\n user_id = 0\n\n try:\n\n user_row = self.get_user_by_id(session, data)\n\n if user_row is not None:\n user_id = user_row.user_id\n else:\n user_id = 0\n\n logger.info('User Row object in DB: %s', str(user_row))\n\n row_exists = session.query(UsersAuthModel).filter(UsersAuthModel.user_id == user_id). \\\n filter(UsersAuthModel.is_active == \"true\").scalar()\n\n logger.info('Row to data: {}, Exists: %s'.format(data), str(row_exists))\n\n except SQLAlchemyError as exc:\n row_exists = None\n logger.exception('An exception was occurred while execute transactions: %s', str(str(exc.args) + ':' +\n str(exc.code)))\n raise mvc_exc.IntegrityError(\n 'Row not stored in \"{}\". IntegrityError: {}'.format(data.get('username'),\n str(str(exc.args) + ':' + str(exc.code)))\n )\n finally:\n session.close()\n\n return row_exists",
"def show_history(user_id):\n return History.where('user_id', user_id).get()",
"def test_get_user_exists(self):\n # First make the user\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n\n # Now get the user data and verify it is correct\n resp = self.app.get('/users/{}'.format(self.test_user1_userid))\n assert resp.status_code == 200\n data = json.loads(resp.data)\n for key in ['first_name', 'last_name', 'userid', 'groups']:\n assert key in data\n assert data['first_name'] == self.test_user1_first\n assert data['last_name'] == self.test_user1_last\n assert data['userid'] == self.test_user1_userid\n for groupid in self.test_user1_groups:\n assert groupid in data['groups']",
"def user_exists(self,unique_ID):\n\t\ttry:\n\t\t\tself.data[unique_ID]\n\t\texcept KeyError:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True",
"def test_user_creation(self):\n self.assertTrue(User.objects.exists())",
"def test_detail_user_does_not_exists(self, client, users):\n user = users[0]\n url = reverse('users:detail', args=(user.pk + 100,))\n response = client.get(url)\n assert response.status_code == 404",
"def test_user_not_in_users_table(self) -> None:\n user1 = self.register_user(\"user1\", \"pass\")\n token1 = self.login(user1, \"pass\")\n room = self.helper.create_room_as(user1, is_public=True, tok=token1)\n\n # Inject a join event for a user who doesn't exist\n self.get_success(inject_member_event(self.hs, room, \"@not-a-user:test\", \"join\"))\n\n # Another new user registers and joins the room\n user2 = self.register_user(\"user2\", \"pass\")\n token2 = self.login(user2, \"pass\")\n self.helper.join(room, user2, tok=token2)\n\n # The dodgy event should not have stopped us from processing user2's join.\n in_public = self.get_success(self.user_dir_helper.get_users_in_public_rooms())\n self.assertEqual(set(in_public), {(user1, room), (user2, room)})",
"def has_account(user_name):\n session = Session()\n num_meds = session.query(Med).filter_by(account_id=user_name).count()\n session.close()\n return num_meds > 0",
"def assert_user_exists(self, user_id):\n result = self.con.execute(\n 'SELECT id FROM registered_user WHERE id = ? AND active = 1',\n (user_id,)\n ).fetchone()\n if result is None:\n raise err.UnknownUserError(user_id)",
"def exists_in_db(self) -> bool:\n query = '''SELECT * \n FROM ESLReceipts \n WHERE Transaction_Number=? AND Date=? AND Description=? \n AND Memo=? AND Amount_Debit=? \n AND Amount_Credit=? AND Balance=? \n AND Check_Number=? AND Fees=? \n AND Card_Type=? AND Is_Payment=? \n AND Is_Transaction=? AND User_id=?;'''\n return len(self.db.fetchall(query, values=self.to_tuple())) > 0",
"def test_get_user_if_exists(self):\n user = User.objects.create(username=self.username)\n actual = get_user_if_exists(None, self.details, user=user)\n self.assertDictEqual(actual, {'is_new': False})",
"def test_user_not_in_postgres(self):\n pg_db = auth.postgresDB()\n\n # Make sure user and results are not yet in Postgres\n sql_query = (\n f\"DELETE FROM results \"\n f\"WHERE user_id = '{self.project_id}' \"\n f\"AND project_id = '{self.project_id}'\"\n )\n pg_db.query(sql_query)\n sql_query = \"DELETE FROM users WHERE user_id = '{0}'\".format(self.project_id)\n pg_db.query(sql_query)\n\n transfer_results.transfer_results()\n\n sql_query = \"SELECT * FROM users WHERE user_id = '{0}'\".format(self.project_id)\n result = pg_db.retr_query(sql_query)\n self.assertIsNotNone(result)\n\n sql_query = (\n f\"SELECT * \"\n f\"FROM results \"\n f\"WHERE project_id = '{self.project_id}' \"\n f\"AND user_id = '{self.project_id}'\"\n )\n result = pg_db.retr_query(sql_query)\n self.assertIsNotNone(result)"
] |
[
"0.7834494",
"0.6747436",
"0.6676087",
"0.6339552",
"0.6306387",
"0.6265124",
"0.62559104",
"0.6212702",
"0.6148387",
"0.6148346",
"0.6137069",
"0.6118635",
"0.61039686",
"0.60899067",
"0.60179096",
"0.5970495",
"0.59647316",
"0.5952292",
"0.5942389",
"0.59386134",
"0.59352106",
"0.587301",
"0.5855153",
"0.5851995",
"0.58457536",
"0.58214813",
"0.58029544",
"0.5801458",
"0.5796348",
"0.577561"
] |
0.76068485
|
1
|
This test will test the car search by query the database to see if any car with specific make, body type, etc do exist
|
def test_searchCar(self):
make = "Toyota"
body_type = "Seden"
colour = "Black"
seats = "5"
cost_per_hour = "10.5"
booked = True
cars = db.session.query(Car).filter(or_(Car.make == make,
Car.body_type == body_type,
Car.colour == colour,
Car.seats == seats,
Car.cost_per_hour == cost_per_hour,
Car.booked == booked)).all()
self.assertTrue((cars is not None))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_create_a_car_make(car_make):\n car_makes = models.CarMake.objects.all()\n\n assert car_make\n assert car_make.name == \"Volkswagen\"\n assert len(car_makes) == 1",
"def test_filter_make_and_model(self):\n request = self.factory.get('/api/v1/cars', {'make': 'BMW',\n 'model': '3 Series',\n 'distance': 100000})\n response = CarAdViewSet.as_view({'get': 'list'})(request)\n car_other_make = None\n for car in response.data['results']:\n if car['make'] != 'BMW' or car['model'] != '3 Series':\n car_other_make = car\n break\n self.assertEqual(response.status_code, HTTPStatus.OK._value_)\n self.assertIs(car_other_make, None)",
"def test_create_a_car_model(car_model):\n car_models = models.CarMake.objects.all()\n\n assert car_model\n assert car_model.name == \"Golf\"\n assert car_model.car_make.name == \"Volkswagen\"\n assert len(car_models) == 1",
"def test_get_models_by_make(self):\n request = self.factory.get('/api/v1/cars', {'make': 'BMW',\n 'distance': 100000})\n response = CarAdViewSet.as_view({'get': 'list'})(request)\n self.assertEqual(response.status_code, HTTPStatus.OK._value_)\n self.assertNotEqual(response.data['models'], [])\n self.assertIs(type(response.data['models'][0]['model']), str)\n self.assertIs(type(response.data['models'][0]['count']), int)",
"def test_analyze_a_recipe_search_query(self):\n pass",
"def carExists(self, carmake):\n data = db.session.query(Car.id).filter_by(make = carmake).first()\n if data is None:\n return False\n else:\n return True",
"def test_get_cars():\n response = client.get(\"/\")\n assert response.status_code == STATUS_OK\n\n cars = response.json()\n assert len(cars) == 1000\n # first id should be 1\n assert cars[0][\"id\"] == 1\n # last id should be 1000\n assert cars[-1][\"id\"] == 1000\n\n expected_first_car = {\n \"id\": 1,\n \"make\": \"BMW\",\n \"model\": \"3 Series\",\n \"year\": 1998,\n \"vin\": \"JH4CU2F60AC794232\",\n }\n assert cars[0] == expected_first_car",
"def test_get_foods_search(self):\n pass",
"def test_foodtrucks_searchByKeyword(self):\n\n\t\tprint 'API Test: retrieving foodtruck record by keyword'\n\t\turl = reverse('foodtruck_list')\n\t\tdata0 = {'objectid': 0, 'applicant':'Pizza', 'x': 0.0, 'y': 0.5}\n\t\tdata1 = {'objectid': 1, 'applicant':'Chicago', 'fooditems': 'deep dish pizza and sandwiches', 'x': 1.0, 'y': 1.0}\n\t\tdata2 = {'objectid': 2, 'applicant':'hotdog house', 'x': 1.5, 'y': 1.5}\n\t\tresponse = self.client.post(url, data0, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\tresponse = self.client.post(url, data1, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\tresponse = self.client.post(url, data2, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\t#print response.status_code\n\t\tresponse = self.client.get('/foodtrucks/bykeyword?keyword=pizza', format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(len(response.data), 2)\n\t\tself.assertEqual(response.data[0]['objectid'], 0)\n\t\tself.assertEqual(response.data[1]['objectid'], 1)\n\t\tprint 'pass'",
"def test_search_recipes(self):\n pass",
"def test_create_vehicle(self):\n payload = {\n 'user': self.user,\n 'type': 'Ambulance',\n 'license_plate': 'AA-123-AA'\n }\n\n self.client.post(VEHICLE_URL, payload)\n\n exists = Vehicle.objects.filter(\n user=self.user,\n type=payload['type'],\n license_plate=payload['license_plate']\n )\n\n self.assertTrue(exists)",
"def test_search(self):\n from importCsv.models import City, Hotel\n path = reverse(\"search\")\n user = mixer.blend(User, is_staff=True, is_superuser=True)\n city = mixer.blend(City, abbrev=\"tes\", name=\"test\")\n mixer.blend(Hotel, city=city, data=\"testData\", name=\"test hotel\")\n client = Client()\n client.force_login(user)\n r = client.post(path, {\"tes\": \"on\"})\n assert r.status_code == 200\n assert r.content.find(b'test hotel')",
"def test_search_recipes_by_nutrients(self):\n pass",
"def test_search_recipes_by_ingredients(self):\n pass",
"def test_model_object_index(self):\n car = Car.objects.first()\n car.index_to_appsearch()\n self.assertEqual(self.client_index.call_count, 1)",
"def test_compound_match(self):\n query_string = f'{self.beer.name[:10]}+{self.beer.manufacturer.name[:5]}'\n tap = TapFactory(beer=self.beer)\n response = self.client.get(\n f'{self.url}?search={query_string.upper()}',\n )\n eq_(response.status_code, 200)\n eq_(len(response.data['results']), 1, response.data)\n eq_(response.data['results'][0]['name'], self.beer.name, response.data)\n eq_(\n response.data['results'][0]['venues'][0]['id'],\n tap.venue.id,\n response.data,\n )\n eq_(len(response.data['results'][0]['venues']), 1, response.data)",
"def test_create_car_valid_data():\n initial_number_of_cars = len(client.get(\"/\").json())\n\n car_data = {\n \"make\": \"BMW\",\n \"model\": \"3 Series New\",\n \"year\": 2019,\n \"vin\": \"abc\",\n }\n response = client.post(\"/\", data=car_data)\n assert response.status_code == STATUS_CREATED\n\n assert len(client.get(\"/\").json()) == initial_number_of_cars + 1",
"def test_get_car_valid_id(car_id, expected_car):\n response = client.get(\"/{0}\".format(car_id))\n assert response.status_code == STATUS_OK\n assert response.json() == expected_car",
"def test_retrieve_vehicle_list(self):\n Vehicle.objects.create(\n user=self.user,\n type='Ambulance',\n license_plate='AA-123-AA'\n )\n Vehicle.objects.create(\n user=self.user,\n type='VSL',\n license_plate='BB-123-BB'\n )\n\n res = self.client.get(VEHICLE_URL)\n\n vehicles = Vehicle.objects.all()\n serializer = VehicleSerializer(vehicles, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)",
"def test_valid_search_query_and_category_return_results(self):\n article = ArticleFactory()\n article.publish()\n request = RequestFactory().get(\"\", {\"q\": article.title[:10]})\n response = Search.as_view()(request, category=\"articles\")\n self.assertIn(\"results\", response.context_data)\n results = response.context_data[\"results\"]\n self.assertEqual(len(results), 1)\n self.assertIn(article, results)",
"def test_find_by_availability(self):\n Pet(0, \"fido\", \"dog\", False).save()\n Pet(0, \"kitty\", \"cat\", True).save()\n pets = Pet.find_by_availability(True)\n self.assertEqual(len(pets), 1)\n self.assertEqual(pets[0].name, \"kitty\")",
"def test_search(self):\n pass",
"def test_search(self):\n pass",
"def test_search(self):\n pass",
"def test_cartography_details(self):\n\t\tcreate_cartography()\n\t\tcartography = Document.objects.get(id=1)\n\t\tc = Client()\n\t\tresponse = c.get(\"/cartography/%s\" % str(cartography.id))\n\t\tself.assertEquals(response.status_code, 200)",
"def test_movements_search(api_client):\n\n MovementFactory(\n description=\"Best Food Ever!\", category=\"Lunch\", sub_category=\"Lunch\"\n )\n MovementFactory(description=\"Booze Monday\", category=\"Food\", sub_category=\"Beer\")\n MovementFactory(\n description=\"Kindle book\", category=\"E-Commerce\", sub_category=\"Book\"\n )\n MovementFactory(\n description=\"That cool restaurant\", category=\"Restaurant\", sub_category=\"Food\"\n )\n MovementFactory(\n description=\"Changed tires\", category=\"Transport\", sub_category=\"Car Repair\"\n )\n\n response = api_client.get(reverse(\"api:movements-list\"), {\"search\": \"Food\"})\n\n assert response.status_code == 200\n assert len(response.data) == 3\n assert response.data[0][\"description\"] == \"Best Food Ever!\"\n assert response.data[0][\"category\"] == \"Lunch\"\n assert response.data[0][\"sub_category\"] == \"Lunch\"\n assert response.data[1][\"description\"] == \"Booze Monday\"\n assert response.data[1][\"category\"] == \"Food\"\n assert response.data[1][\"sub_category\"] == \"Beer\"\n assert response.data[2][\"description\"] == \"That cool restaurant\"\n assert response.data[2][\"category\"] == \"Restaurant\"\n assert response.data[2][\"sub_category\"] == \"Food\"",
"def test_search_by_ingredients(self):\n recipe_id = self.request_mgr.search_by_ingredients(['butter', 'sugar', 'eggs'])\n self.assertGreater(recipe_id, 0)",
"def test_search_show(self):\n self.assertEquals(\n len(self.t['life on mars'].search('the', key='episodename')),\n 10\n )",
"def test_hotel_search(self):\n test_params = {\n 'DEBUG': False,\n 'TESTING': True\n }\n\n app = create_app(settings_override=test_params).test_client\n params = {\n \"city\": \"Las Vegas\",\n \"checkin\": \"2018-05-27\",\n \"checkout\": \"2018-05-28\"\n }\n\n with app() as c:\n response = c.post('/search', json=params)\n\n assert response.status_code == 200\n assert isinstance(response.json, list)\n assert len(response.json) > 0",
"def test_search(self):\n response = Tmdb.search('ozark')\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n # TODO check if all the shows are in the good format (can be from_dict/to_dict)"
] |
[
"0.7030627",
"0.6953",
"0.6661251",
"0.64787984",
"0.6447302",
"0.6376898",
"0.63212156",
"0.62511206",
"0.6214951",
"0.6190375",
"0.6076356",
"0.60682404",
"0.5993362",
"0.5983236",
"0.5946412",
"0.590402",
"0.59029984",
"0.5900849",
"0.5812821",
"0.58114743",
"0.5802515",
"0.58000666",
"0.58000666",
"0.58000666",
"0.5788658",
"0.5747383",
"0.57411045",
"0.57187515",
"0.5713496",
"0.5680308"
] |
0.8014443
|
0
|
This test will create a booking in the database
|
def test_bookCar(self):
user_id = "12"
car_id = "6"
begin_date = "2020-05-21"
begin_time = "12:00:00"
return_date = "2020-05-23"
return_time = "12:00:00"
begin_datetime = "{} {}".format(begin_date, begin_time)
return_datetime = "{} {}".format(return_date, return_time)
newBooking = Booking( user_id = user_id,
car_id = car_id,
begin_time = begin_datetime,
return_time = return_datetime,
ongoing = False)
# Add new row to the database
db.session.add(newBooking)
# Update car's availability
car = Car.query.get(car_id)
car.booked = True
# Commit changes
db.session.commit()
self.assertTrue(self.bookingExists(user_id, car_id))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_create_bookings(self):\n baker.make_recipe('booking.user', _quantity=3)\n baker.make_recipe('booking.future_EV', _quantity=2)\n self.assertEqual(Booking.objects.all().count(), 0)\n management.call_command('create_bookings')\n self.assertEqual(Booking.objects.all().count(), 6)",
"def test_make_a_booking(self):\n date = datetime(2030, 3, 1, 11)\n\n response = self.client.post(reverse('bookings', kwargs={'facility': 'g'}), {\n 'book': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA - 1)\n self.assertEqual(type(context[\"info\"]), BookingSuccessfulAlert)\n\n for week in bookings:\n for row in week.rows:\n for block in row.blocks:\n if block.date == date:\n self.assertEqual(type(block), BlockReserved)\n else:\n self.assertEqual(type(block), BlockAvailable)",
"def test_api_can_create_a_book(self):\n book = {\n 'bookid': '23',\n 'title': 'Neues Buch',\n 'description': 'Unsinn',\n 'seite50_sentence': 'Ein neuer Satz ohne Verb.',\n 'published_date': '1980-01-01',\n }\n res = self.client.post(\n reverse('create_book'), book, format=\"json\")\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)",
"def test_create_book(self):\n url = reverse('book-list')\n data = {'isbn':'96712116-1',\n 'title':'New Star',\n 'author_last_name':'Khaled',\n 'author_first_name':'Roshdy',\n 'page_count':250,\n 'description':'the book description'}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)",
"def test_book_table(self):\n url = \"/book_table\"\n data = {\n \"guest_details\": {\n \"first_name\": \"Prasad\",\n \"last_name\": \"Dalavi\",\n \"email\": \"[email protected]\",\n \"phone_number\": \"8983050327\",\n \"registration\": False\n },\n \"time\": \"23:00\",\n \"date\": \"2020-03-16\",\n \"guest_count\": 2,\n \"table\": 2,\n \"selected_menu\": [4]\n }\n\n response = app.test_client().post(url,\n json=data,\n content_type='application/json')\n assert response.status_code == 200, logging.error(\n \"Booking Table Failed!\")\n logging.info(\"Booking Table Tested!\")",
"def test_meeting_create(self):\n pass",
"def post(self, request): # FIRST EXAMPLE\n model = self._create_booking(\n request=request) # when _create_booking is invoked, historio Client will log model\n print('save me')",
"def test_get_booking(self):\n url = \"/get_bookings\"\n data = {\n \"user\": 2\n }\n response = app.test_client().post(url,\n json=data,\n content_type='application/json')\n assert response.status_code == 200, logging.error(\n \"GET Bookings Failed!\")\n logging.info(\"GET Bookings Tested!\")",
"def test_create(self):\n self.assertEqual(Routine.objects.count(), 2)\n payload = {\n 'name': 'Monday routine',\n }\n self.client.post('/routines/', data=payload)\n self.assertEqual(Routine.objects.count(), 3)",
"def test_create_bookings_without_events(self):\n self.assertEqual(Booking.objects.all().count(), 0)\n\n management.call_command('create_bookings')\n # confirm no errors, and no booking are created\n self.assertEqual(Booking.objects.all().count(), 0)",
"def create(self, validated_data):\n print(validated_data)\n return Booking.objects.create(**validated_data)",
"def test_create_boat(self):\n pass",
"def add_booking(user_id, rest_id, number_of_people, booking_datetime, table_id, entrance_datetime=None):\r\n try:\r\n booking = Booking()\r\n booking.restaurant_id = rest_id\r\n booking.user_id = user_id\r\n booking.booking_datetime = booking_datetime\r\n booking.entrance_datetime = entrance_datetime\r\n booking.number_of_people = number_of_people\r\n booking.table_id = table_id\r\n booking.datetime = datetime.datetime.now()\r\n db.session.add(booking)\r\n db.session.commit()\r\n return booking.id\r\n except:\r\n db.session.rollback()\r\n return None",
"def test_view_booking(client):\n response = client.post(\n BOOKING_API_URL + '/view',\n data=dict(\n booking_id=1\n ),\n content_type='multipart/form-data'\n )\n\n assert b'Booking ID: 1' in response.data\n assert b'Car ID: </strong> 1' in response.data\n assert b'User ID: </strong> 1' in response.data\n assert b'Pickup time: </strong> ' + \\\n str.encode(PICKUP_DATE.strftime(\n DEFAULT_DATETIME_FORMAT)) in response.data\n assert b'Return time: </strong> ' + \\\n str.encode(RETURN_DATE.strftime(\n DEFAULT_DATETIME_FORMAT)) in response.data\n assert b'Canceled' in response.data # previous test canceled this booking.",
"def test_create(self):\n pass",
"def create_book():\n Book.objects.create(book_id=\"test_id\",\n title=\"test_title\",\n authors=\"test_author\",\n published_date=\"2021\",\n categories=[\"test_category\"],\n average_rating=5,\n ratings_count=5,\n thumbnail=\"http://books.google.com/books/test\"\n )",
"def test_user_can_create_a_book(self):\n\n\t\twith self.client:\n\t\t\tadd_book = {\n\t\t\t\t'title': 'Hello Books',\n\t\t\t\t'isbn': '5698745124'\n\t\t\t}\n\t\t\tlogin_data = self.login_test_user()\n\t\t\ttoken = login_data['auth_token']\n\t\t\tres = self.client.post(\n\t\t\t\tf'{URL_BOOKS}',\n\t\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\t\tcontent_type='application/json',\n\t\t\t\tdata=json.dumps(add_book)\n\t\t\t)\n\t\t\tres2 = json.loads(res.data.decode())\n\t\t\tself.assertIn('success', str(res2))",
"def test_create(self):\n retreat = Retreat.objects.create(\n name=\"random_retreat\",\n details=\"This is a description of the retreat.\",\n seats=40,\n address_line1=\"123 random street\",\n postal_code=\"123 456\",\n state_province=\"Random state\",\n country=\"Random country\",\n timezone=\"America/Montreal\",\n price=3,\n start_time=LOCAL_TIMEZONE.localize(datetime(2130, 1, 15, 8)),\n end_time=LOCAL_TIMEZONE.localize(datetime(2130, 1, 17, 12)),\n min_day_refund=7,\n min_day_exchange=7,\n refund_rate=100,\n is_active=True,\n accessibility=True,\n form_url=\"example.com\",\n carpool_url='example2.com',\n review_url='example3.com',\n has_shared_rooms=True,\n room_type=Retreat.DOUBLE_OCCUPATION,\n toilet_gendered=True,\n )\n\n self.assertEqual(retreat.__str__(), \"random_retreat\")",
"def test_create_customer_rental(self):\n create_rental_url = reverse(\n \"customer_rental_list\", kwargs={\"pk\": self.user1.pk}\n )\n\n data = {\"book\": self.book1.pk}\n response = self.client.post(create_rental_url, data=data, format=\"json\")\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)",
"def create_booking(self, request):\n model = AssignmentModelFromDynamo('assignment', 'params')\n\n model.save()\n\n return model",
"def test_create(self):\n self.assertTrue(WayPoint.objects.exists())",
"def test_foodtrucks_create(self):\n\t\tprint 'API Test: create a new foodtruck'\n\t\turl = reverse('foodtruck_list')\n\t\tdata = {\"status\" : \"APPROVED\",\\\n\t\t \"expirationdate\" : \"2015-03-15T00:00:00\",\\\n\t\t \"permit\" : \"14MFF-0107\",\\\n\t\t \"block\" : \"3794\",\\\n\t\t \"received\" : \"Jun 24 2014 1:49PM\",\\\n\t\t \"facilitytype\" : \"Truck\",\\\n\t\t \"blocklot\" : \"3794002A\",\\\n\t\t \"locationdescription\" : \"02ND ST: TOWNSEND ST to KING ST (700 - 799)\",\\\n\t\t \"cnn\" : 148000,\\\n\t\t \"priorpermit\" : 1,\\\n\t\t \"approved\" : \"2014-06-24T13:55:30\",\\\n\t\t \"noisent\" : \"2013-07-25T00:00:00\",\\\n\t\t \"schedule\" : \"http://bsm.sfdpw.org/PermitsTracker/reports/report.aspx?title=schedule&report=rptSchedule¶ms=permit=14MFF-0107&ExportPDF=1&Filename=14MFF-0107_schedule.pdf\",\\\n\t\t \"address\" : \"750 02ND ST\",\\\n\t\t \"applicant\" : \"Steve's Mobile Deli\",\\\n\t\t \"lot\" : \"002A\",\\\n\t\t \"fooditems\" : \"Cold Truck: Pre-packaged sandwiches: Burgers: Hot Dogs: Muffin Sandwiches: Enchiladas: Bagels: Burritos: Salads: Snacks: Beverages\",\\\n\t\t \"longitude\" : -122.402978526686,\\\n\t\t \"latitude\" : 37.7302216813049, \\\n\t\t \"y\" : 2093947.369,\\\n\t\t \"x\" : 6011371.493,\\\n\t\t \"objectid\" : 554527}\n\t\t\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\t\n\t\tquant = '1.000000'\n\t\tfor k, v in data.iteritems():\n\t\t\tif v is not None and (k is \"y\" or k is \"x\" or k is \"latitude\" or k is \"longitude\"):\n\t\t\t\tself.assertEqual(response.data[k].quantize(Decimal(quant)), Decimal(v).quantize(Decimal(quant)))\n\t\t\telif v is not None and (k is \"approved\" or k is \"received\" or k is \"expirationdate\" or k is \"noisent\"):\n\t\t\t\tself.assertEqual(response.data[k], parse(v))\n\t\t\telse:\n\t\t\t\tself.assertEqual(response.data[k], v)\n\t\t\n\t\tresponse = self.client.get(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\n\t\tfor k, v in data.iteritems():\n\t\t\tif v is not None and (k is \"y\" or k is \"x\" or k is \"latitude\" or k is \"longitude\"):\n\t\t\t\tself.assertEqual(response.data[0][k].quantize(Decimal(quant)), Decimal(v).quantize(Decimal(quant)))\n\t\t\telif v is not None and (k is \"approved\" or k is \"received\" or k is \"expirationdate\" or k is \"noisent\"):\n\t\t\t\tself.assertEqual(response.data[0][k], parse(v))\n\t\t\telse:\n\t\t\t\tself.assertEqual(response.data[0][k], v)\n\t\tprint 'pass'",
"def test_create_bookings_without_users(self):\n baker.make_recipe('booking.future_EV')\n self.assertEqual(Booking.objects.all().count(), 0)\n self.assertEqual(User.objects.all().count(), 0)\n management.call_command('create_bookings')\n self.assertEqual(Booking.objects.all().count(), 3)\n self.assertEqual(User.objects.all().count(), 6)",
"def test_create_rental_with_unavailable_book(self):\n create_rental_url = reverse(\n \"customer_rental_list\", kwargs={\"pk\": self.user1.pk}\n )\n\n data = {\"book\": self.book3.pk}\n response = self.client.post(create_rental_url, data=data, format=\"json\")\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_add_book(self):\n\n first_book_list = BookList()\n first_book = Book()\n\n first_book.create_book({\n \"title\": \"First Man\",\n \"author\": \"James R. Hansen\",\n \"year\": 2005,\n \"publisher_name\": \"Simon & Schuster\",\n \"publication_date\": \"01/01/2018\",\n \"num_copies\": 1\n })\n\n assert first_book_list.add_book(first_book)\n assert first_book_list.find_book(\"First Man\")\n assert first_book_list.num_books() == 1",
"def test_view_car_booking(client):\n response = client.post(\n BOOKING_API_URL + '/book',\n data=dict(\n pickup_datetime=PICKUP_DATE,\n return_datetime=RETURN_DATE,\n car_id=1,\n ),\n content_type='multipart/form-data'\n )\n\n assert b'Available!' in response.data",
"def test_office_creation(self):\n url = '/api/v1/consultorios/'\n data = {\n \"hospital\": \"Angeles Roma\",\n \"office\": \"306\"\n }\n request = self.client.post(url, data)\n\n self.assertEqual(request.status_code, status.HTTP_201_CREATED)",
"def test_new_flight_succeeds(self, init_db, new_flight):\n assert new_flight == new_flight.save()",
"def add_booking():\n try:\n \n carid = request.form[\"carid\"]\n userid = request.form[\"userid\"]\n fromdate = request.form[\"fromdate\"].strip()\n todate = request.form[\"todate\"].strip()\n\n print(fromdate, \"|\", todate)\n\n car = Car.query.get(carid)\n car.isavailable = False\n\n user = User.query.get(userid)\n user_email = user.email\n\n fromdate_obj = datetime.datetime.strptime(fromdate, '%Y-%m-%d')\n todate_obj = datetime.datetime.strptime(todate, '%Y-%m-%d')\n \n summary = \"Car Booking. Car id: \" + carid\n\n cal = CalendarUtil()\n resp = cal.addToCalendar(user_email, fromdate_obj, todate_obj, summary)\n cal_event_id = resp['id']\n booking = Booking(carid=carid, userid=userid, fromdate=fromdate, todate=todate, caleventid= cal_event_id, isactive=True)\n\n test = db.session.add(booking)\n db.session.commit()\n return bookingSchema.jsonify(booking)\n except Exception as ex:\n print(\"Failed to add event to calender. Exception: \", str(ex))\n return jsonify(None)",
"def test_create_record(self):\n pass"
] |
[
"0.75097334",
"0.73627186",
"0.7205623",
"0.7142108",
"0.7126723",
"0.7043033",
"0.6995864",
"0.69878817",
"0.69494665",
"0.6936681",
"0.6909777",
"0.68665344",
"0.6819778",
"0.6799046",
"0.6789994",
"0.6734838",
"0.6725396",
"0.66904336",
"0.6687076",
"0.66357887",
"0.66139317",
"0.6613828",
"0.6559078",
"0.6555775",
"0.65362054",
"0.64978683",
"0.64914256",
"0.6490178",
"0.64901006",
"0.6463509"
] |
0.7655829
|
0
|
This test will delete a booking in the database
|
def test_cancelBooking(self):
user_id = "12"
car_id = "6"
begin_date = "2020-05-21"
begin_time = "12:00:00"
begin_datetime = "{} {}".format(begin_date, begin_time)
booking = db.session.query(Booking).filter( Booking.user_id == user_id,
Booking.car_id == car_id,
Booking.begin_time == begin_datetime).first()
# Delete row from the database
db.session.delete(booking)
# Update car's availability
car = Car.query.get(car_id)
car.booked = False
# Commit changes
db.session.commit()
self.assertFalse(self.bookingExists(user_id, car_id))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_meeting_delete(self):\n pass",
"def test_delete_book(self):\n response = self.client.delete(self.book.get_absolute_url()) \n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(Book.objects.count(), 0)",
"def booking_delete(id):\n booking = Booking.query.get(id)\n payment = Payment.query.filter_by(booking_id=id).first()\n if not booking:\n return \"DELETED\"\n db.session.delete(booking)\n db.session.delete(payment)\n db.session.commit()\n return redirect(url_for('bookings.booking_index'))",
"def test_delete_boat(self):\n pass",
"def test_delete_book(self):\n\n\t\t# create book\n\t\tadd_book = {\n\t\t\t'title': 'Hello Books',\n\t\t\t'isbn': '5698745124'\n\t\t}\n\n\t\tlogin_data = self.login_test_user()\n\t\ttoken = login_data['auth_token']\n\t\tres = self.client.post(\n\t\t\tf'{URL_BOOKS}',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='application/json',\n\t\t\tdata=json.dumps(add_book)\n\t\t)\n\n\t\t# delete book\n\t\tdel_book = self.client.delete(\n\t\t\tf'{URL_BOOKS}/1',\n\t\t\theaders=dict(Authorization=f'Bearer {token}')\n\t\t)\n\n\t\tres3 = json.loads(del_book.data.decode())\n\t\tself.assertTrue(res3['message'] == 'book with id 1 has been deleted')",
"def test_delete_case(self):\n pass",
"def test_delete(self):\n\n test_user = get_user_model().objects.create_user(username='tester',password='pass')\n test_user.save()\n\n test_book = Book.objects.create(\n publisher = test_user,\n name = 'Title of Blog',\n description = 'Words about the blog'\n )\n\n test_book.save()\n\n book = Book.objects.get()\n\n url = reverse('book_detail', kwargs={'pk': book.id})\n\n\n response = self.client.delete(url)\n\n self.assertEquals(response.status_code, status.HTTP_204_NO_CONTENT, url)",
"def test_delete(self):\n pass",
"async def test_cancel_booking(client):\n headers = { \n 'Authorization': 'Bearer special-key',\n }\n response = await client.request(\n method='DELETE',\n path='/vms/api/v1/bookings/{booking_id}'.format(booking_id='booking_id_example'),\n headers=headers,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')",
"def test_delete_book(self):\n\n delete_books()\n\n book = create_book(\"title one\")[\"book\"]\n\n self.assertEqual(\n read_book(book[\"id\"]),\n {\n \"status\": \"success\",\n \"book\": book\n }\n )\n\n with test_client.delete(\"/book/{}/\".format(book[\"id\"])) as response:\n\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"success\",\n \"book\": book\n }\n )\n\n self.assertEqual(\n read_book(book[\"id\"]),\n {\n \"status\": \"error\"\n }\n )\n\n with test_client.delete(\"/book/{}/\".format(book[\"id\"])) as response:\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"error\"\n }\n )\n\n \"\"\"\n clear the table, create several books and list them, remove one and list them again, remove another one \n and list them again\n \"\"\"\n\n delete_books()\n\n book_one = create_book(\"title one\")[\"book\"]\n book_two = create_book(\"title two\")[\"book\"]\n\n self.assertEqual(\n list_books(),\n {\n \"status\": \"success\",\n \"books\": [\n book_one,\n book_two\n ]\n }\n )\n\n with test_client.delete(\"/book/{}/\".format(book_two[\"id\"])) as response:\n\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"success\",\n \"book\": book_two\n }\n )\n\n self.assertEqual(\n list_books(),\n {\n \"status\": \"success\",\n \"books\": [\n book_one\n ]\n }\n )\n\n with test_client.delete(\"/book/{}/\".format(book_one[\"id\"])) as response:\n\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"success\",\n \"book\": book_one\n }\n )\n\n self.assertEqual(\n list_books(),\n {\n \"status\": \"success\",\n \"books\": []\n }\n )",
"def test_delete_record(self):\n pass",
"def test_delete(self):\n # add a task\n self.add(title=\"Sample task doing\", description=\"for sample\", state=\"doing\")\n task = Task.query.filter_by(title='Sample task doing').first()\n\n # delete\n self.delete(id=task.id)\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertIsNone(task)",
"def test_delete_restaurant(self):\n from espresso import db\n from espresso import Restaurant\n\n name = 'Restaurant Greco'\n db.session.add(Restaurant(name=name, creator='[email protected]'))\n db.session.commit()\n\n # Since this is a freshly created table, the first id should be 1\n resp = self.test_client.delete(self.API_BASE + '/1', headers=auth_header_all_permissions)\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['success'], True)\n\n resp = self.test_client.get(self.API_BASE + '/1', headers=auth_header_all_permissions)\n self.assertEqual(resp.status_code, 404)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['success'], False)",
"def delete_bookings(bookingid):\n # get booking object for bookingid\n booking = Booking.query.get(bookingid)\n\n # update cavaibility of car to available\n car = booking.car\n car.isavailable = True\n \n cal_eventid = booking.caleventid\n\n # delete booking\n db.session.delete(booking)\n db.session.commit()\n\n #remove google calender events\n cal = CalendarUtil()\n resp = cal.deleteFromCalendar(cal_eventid)\n\n if resp == False:\n print(\"Failed to delete event from calender.\")\n\n return bookingSchema.jsonify(booking)",
"def test_delete_lecture(lecture_class, course, valid_datetime):\n id = lecture_class.create_lecture(course, valid_datetime)\n assert id != None\n assert lecture_class.delete_lecture()",
"def test_meeting_poll_delete(self):\n pass",
"def test_delete_records(self):\n pass",
"def test_delete(self):\n person = Person('test_person_b')\n person.delete()\n with database() as db:\n results = db.query(\"SELECT * FROM persons WHERE person_name = 'test_person_b'\")\n self.assertEqual(results, [])",
"def test_delete_run(self):\n pass",
"def test_delete1(self):\n pass",
"def test_recipe_deletion(self):\n recipe = sample_recipe()\n recipe.ingredients.create(name='Eggs')\n\n url = recipe_detail_url(recipe.id)\n res = self.client.delete(url)\n\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(Recipe.objects.count(), 0)\n self.assertEqual(Ingredient.objects.count(), 0)",
"def test_deleteEvent(self):\n event_a = Event.objects.create(title=\"christmas party\",\n start=datetime.strptime(\"2020-12-03 12:00\", \"%Y-%m-%d %H:%M\"),\n end=datetime.strptime(\"2020-12-03 16:00\", \"%Y-%m-%d %H:%M\"),\n event_owner=self.person_a,\n duration=timedelta(hours=4),\n recurrence_interval=0, description=\"happy christmas party\", website_publish=True)\n event_a.invites.add(self.comms_grp)\n event_a.save()\n client = APIClient()\n resp = client.delete('/api/events/christmas-party',\n { \"search\": {\"title\": \"christmas party\"}}, format='json')\n self.assertEqual(resp.status_code, 204)",
"def test_delete_patient(self):\n response = self.client.delete(\n reverse('patient:patient-detail', kwargs={'pk': Patient.objects.get().id}))\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(Patient.objects.count(), 0)",
"def test_delete_activity(self):\n pass",
"def test_delete7(self):\n pass",
"def test_delete_question(self):\n\n question = {\n \"title\" : \"Blue\",\n \"question\": \"How do I refactor tests with database?\"\n }\n\n self.app.post('/api/v1/questions',\n data=json.dumps(question),\n content_type='application/json'\n )\n question_id = id_generator(\"Blue\")\n res = self.app.delete('/api/v1/questions/'+str(question_id))\n self.assertEqual(res.status_code, 200)",
"def test_duo_application_delete(self):\n pass",
"def test_delete_card(self):\n CardFactory()\n self.session.commit()\n resp = self.app.get('cards/1')\n\n assert resp.status_code == 200\n\n resp = self.app.delete('cards/1')\n\n assert resp.status_code == 200\n\n resp = self.app.get('cards/1')\n\n assert resp.status_code == 404",
"def delete(self, book_info, destroy):\n self.connect()\n bid = book_info[0].get()\n\n delete_sql = f\"delete from {self.book_table} where bid = '{bid}'\"\n delete_issue = f\"delete from {self.issued_table} where bid = '{bid}'\"\n try:\n self.cur.execute(delete_sql)\n self.con.commit()\n self.cur.execute(delete_issue)\n self.con.commit()\n messagebox.showinfo('Success', \"Book Record Deleted Successfully\")\n book_info[0].delete(0, END)\n except MySQLError as err:\n messagebox.showinfo(\"Please check Book ID\")\n print(err)\n destroy()",
"def test_delete_ingredient(self):\n ingredient = Ingredient.objects.create(user=self.user, name='Lettuce')\n url = detail_url(ingredient.id)\n res = self.client.delete(url)\n\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n ingredients = Ingredient.objects.filter(user=self.user)\n self.assertFalse(ingredients.exists())"
] |
[
"0.7421814",
"0.7413696",
"0.7400403",
"0.7325648",
"0.7278549",
"0.71134543",
"0.7106417",
"0.70954233",
"0.7064444",
"0.6992723",
"0.69602615",
"0.6913653",
"0.6905339",
"0.6885712",
"0.68804866",
"0.68193513",
"0.6801034",
"0.6797192",
"0.6784873",
"0.6738316",
"0.673257",
"0.67308587",
"0.67150915",
"0.6672547",
"0.66599566",
"0.66453874",
"0.6633422",
"0.6628876",
"0.66271085",
"0.66160893"
] |
0.7573865
|
0
|
This test will create a booking in the database, then will run the unlockCar function
|
def test_unlockCar(self):
user_id = "12"
car_id = "7"
begin_date = "2020-05-21"
begin_time = "12:00:00"
return_date = "2020-05-23"
return_time = "12:00:00"
begin_datetime = "{} {}".format(begin_date, begin_time)
return_datetime = "{} {}".format(return_date, return_time)
newBooking = Booking( user_id = user_id,
car_id = car_id,
begin_time = begin_datetime,
return_time = return_datetime,
ongoing = False)
# Add new row to the database
db.session.add(newBooking)
# Update car's availability
car = Car.query.get(car_id)
car.booked = True
# Commit changes
db.session.commit()
# Check if this is the right user with the right booked car and time
booking = db.session.query(Booking).filter_by( user_id = user_id,
car_id = car_id,
begin_time = begin_datetime).first()
# Activate booking
booking.ongoing = True
# Commit changes
db.session.commit()
self.assertTrue(self.bookingExists(user_id, car_id))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_bookCar(self):\n user_id = \"12\"\n car_id = \"6\"\n begin_date = \"2020-05-21\" \n begin_time = \"12:00:00\"\n return_date = \"2020-05-23\"\n return_time = \"12:00:00\"\n\n begin_datetime = \"{} {}\".format(begin_date, begin_time) \n return_datetime = \"{} {}\".format(return_date, return_time)\n\n newBooking = Booking( user_id = user_id,\n car_id = car_id,\n begin_time = begin_datetime,\n return_time = return_datetime,\n ongoing = False)\n # Add new row to the database\n db.session.add(newBooking)\n\n # Update car's availability \n car = Car.query.get(car_id) \n car.booked = True\n\n # Commit changes\n db.session.commit()\n self.assertTrue(self.bookingExists(user_id, car_id))",
"def test_lockCar(self):\n user_id = \"12\"\n car_id = \"7\"\n\n # Find the booking\n booking = db.session.query(Booking).filter_by( user_id = user_id,\n car_id = car_id,\n ongoing = 1).first()\n\n # Remove record from Booking table\n # db.session.delete(booking)\n\n begin_date = \"2020-05-21\" \n begin_time = \"12:00:00\"\n return_date = \"2020-05-23\"\n return_time = \"12:00:00\"\n\n begin_datetime = \"{} {}\".format(begin_date, begin_time) \n return_datetime = \"{} {}\".format(return_date, return_time)\n\n # Record finished booking to History table\n newHistory = History( user_id = user_id,\n car_id = car_id,\n begin_time = begin_datetime,\n return_time = return_datetime)\n db.session.add(newHistory)\n\n\n # Update car's availability\n car = Car.query.get(car_id) \n car.booked = False\n\n # Commit changes\n db.session.commit()\n self.assertTrue(self.historyExists(user_id, car_id))",
"def test_cancelBooking(self):\n user_id = \"12\"\n car_id = \"6\"\n begin_date = \"2020-05-21\" \n begin_time = \"12:00:00\"\n\n begin_datetime = \"{} {}\".format(begin_date, begin_time) \n\n booking = db.session.query(Booking).filter( Booking.user_id == user_id,\n Booking.car_id == car_id,\n Booking.begin_time == begin_datetime).first()\n \n # Delete row from the database\n db.session.delete(booking)\n\n # Update car's availability \n car = Car.query.get(car_id)\n car.booked = False\n\n # Commit changes\n db.session.commit()\n self.assertFalse(self.bookingExists(user_id, car_id))",
"def test_create_bookings(self):\n baker.make_recipe('booking.user', _quantity=3)\n baker.make_recipe('booking.future_EV', _quantity=2)\n self.assertEqual(Booking.objects.all().count(), 0)\n management.call_command('create_bookings')\n self.assertEqual(Booking.objects.all().count(), 6)",
"def test_make_a_booking(self):\n date = datetime(2030, 3, 1, 11)\n\n response = self.client.post(reverse('bookings', kwargs={'facility': 'g'}), {\n 'book': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA - 1)\n self.assertEqual(type(context[\"info\"]), BookingSuccessfulAlert)\n\n for week in bookings:\n for row in week.rows:\n for block in row.blocks:\n if block.date == date:\n self.assertEqual(type(block), BlockReserved)\n else:\n self.assertEqual(type(block), BlockAvailable)",
"def test_ran_out_book_for_borrow(self):\n book = Book.objects.get(pk=1)\n self.assertEqual(book.copies, 1)\n client1 = APIClient()\n client1.login(username=self.students[0].username, password=\"salam*123\")\n response = client1.post(\"/borrows/\", data={\"book\": book.id})\n self.assertEqual(response.status_code, 201)\n client2 = APIClient()\n client2.login(username=self.students[1].username, password=\"salam*123\")\n response = client2.post(\"/borrows/\", data={\"book\": book.id})\n self.assertEqual(response.status_code, 400)",
"def test_create_bookings_without_users(self):\n baker.make_recipe('booking.future_EV')\n self.assertEqual(Booking.objects.all().count(), 0)\n self.assertEqual(User.objects.all().count(), 0)\n management.call_command('create_bookings')\n self.assertEqual(Booking.objects.all().count(), 3)\n self.assertEqual(User.objects.all().count(), 6)",
"def test_create_bookings_without_events(self):\n self.assertEqual(Booking.objects.all().count(), 0)\n\n management.call_command('create_bookings')\n # confirm no errors, and no booking are created\n self.assertEqual(Booking.objects.all().count(), 0)",
"def test_book_return_makes_book_available_for_borrow(self):\n book = Book.objects.get(copies=1)\n client1 = APIClient()\n client1.login(username=self.students[0].username, password=\"salam*123\")\n client1.post(\"/borrows/\", data={\"book\": book.id})\n client2 = APIClient()\n client2.login(username=self.manager.username, password=\"salam*123\")\n client2.post(\"/borrows/1/start/\", data={\"duration\": 5})\n response = client2.post(\"/borrows/1/terminate/\")\n self.assertIsNotNone(response.json()[\"returned_at\"])\n response = client1.post(\"/borrows/\", data={\"book\": book.id})\n self.assertEqual(response.status_code, 201)",
"def test_view_car_booking(client):\n response = client.post(\n BOOKING_API_URL + '/book',\n data=dict(\n pickup_datetime=PICKUP_DATE,\n return_datetime=RETURN_DATE,\n car_id=1,\n ),\n content_type='multipart/form-data'\n )\n\n assert b'Available!' in response.data",
"def test_cancel_booking(self):\n date = datetime(2060, 3, 1, 11)\n\n booking = create_test_booking(self.user, date, date.hour)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'cancel': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)\n self.assertEqual(type(context[\"info\"]), CancellationAlert)\n\n for week in bookings:\n for row in week.rows:\n for block in row.blocks:\n self.assertEqual(type(block), BlockAvailable)",
"def test_create_boat(self):\n pass",
"def test_can_not_reserve_booked_block(self):\n booking_other = create_test_booking(self.someone, self.first_day, 11)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'book': str(booking_other.date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)\n\n self.assertEqual(type(context[\"info\"]), NotAllowedAlert)",
"def test_one_reserveation_and_one_booked(self):\n own_booking = create_test_booking(self.user, self.first_day, 11)\n other_booking = create_test_booking(self.someone, self.first_day, 12)\n\n response = self.client.get(\n reverse('bookings', kwargs={'facility': 'g'}))\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA - 1)\n\n for week in bookings:\n for row in week.rows:\n for block in row.blocks:\n if block.date == own_booking.date:\n self.assertEqual(type(block), BlockReserved)\n elif block.date == other_booking.date:\n self.assertEqual(type(block), BlockBooked)\n else:\n self.assertEqual(type(block), BlockAvailable)",
"def test_ejection_after_battle(self):\n self.battle.submission_id = \"TEST\" # So update_all will work correctly\n\n old_bob_region = self.bob.region\n old_alice_region = self.alice.region\n self.battle.create_skirmish(self.alice, 5)\n\n self.end_battle()\n\n self.assertEqual(self.battle.victor, self.alice.team)\n\n self.assertNotEqual(self.bob.region, self.alice.region)\n self.assertNotEqual(self.bob.region, old_bob_region)\n self.assertEqual(self.alice.region, old_alice_region)",
"def test_save_slot(self):\n business = BUSINESS_FACTORY.create_business()\n slot = Slot.objects.create(site_id=2, business_id=business.id,\n start_date = datetime.date.today(),\n end_date = datetime.date.today() + datetime.timedelta(1))\n LOG.debug(slot)\n self.assertTrue(slot.id)\n self.assertEqual(slot.renewal_rate, 10)\n self.assertEqual(slot.is_autorenew, False)",
"def test_one_reserveation(self):\n test_booking = create_test_booking(self.user, self.first_day, 11)\n\n response = self.client.get(\n reverse('bookings', kwargs={'facility': 'g'}))\n\n bookings = response.context[\"bookings\"]\n\n for week in bookings:\n for row in week.rows:\n for block in row.blocks:\n if block.date == test_booking.date:\n self.assertEqual(type(block), BlockReserved)\n else:\n self.assertEqual(type(block), BlockAvailable)",
"def test_execute_leave_car(self):\n # Setup params\n n_slots = 3\n leave_cmd = \"leave\"\n self.prepare_cars(n_slots)\n\n # Verify command is able execute purge command for all cars\n for i in range(1, n_slots+1):\n success, slot_id = self.controller.execute(leave_cmd, *(i,))\n self.assertTrue(success)\n \n # Verify parking lot is empty\n car_count = self.parking_lot.count_vehicle()\n self.assertEqual(car_count, 0)",
"def test_seat_not_available(self):\n\n user1 = User.objects.create(username=\"user1\", password=\"\", email=\"[email protected]\")\n user2 = User.objects.create(username=\"user2\", password=\"\", email=\"[email protected]\")\n\n course = Course.objects.first()\n course.student.add(user1)\n course.student.add(user2)\n\n self.assertFalse(course.is_seat_available())",
"def test_book_table(self):\n url = \"/book_table\"\n data = {\n \"guest_details\": {\n \"first_name\": \"Prasad\",\n \"last_name\": \"Dalavi\",\n \"email\": \"[email protected]\",\n \"phone_number\": \"8983050327\",\n \"registration\": False\n },\n \"time\": \"23:00\",\n \"date\": \"2020-03-16\",\n \"guest_count\": 2,\n \"table\": 2,\n \"selected_menu\": [4]\n }\n\n response = app.test_client().post(url,\n json=data,\n content_type='application/json')\n assert response.status_code == 200, logging.error(\n \"Booking Table Failed!\")\n logging.info(\"Booking Table Tested!\")",
"def test_new_flight_succeeds(self, init_db, new_flight):\n assert new_flight == new_flight.save()",
"def test_view_booking(client):\n response = client.post(\n BOOKING_API_URL + '/view',\n data=dict(\n booking_id=1\n ),\n content_type='multipart/form-data'\n )\n\n assert b'Booking ID: 1' in response.data\n assert b'Car ID: </strong> 1' in response.data\n assert b'User ID: </strong> 1' in response.data\n assert b'Pickup time: </strong> ' + \\\n str.encode(PICKUP_DATE.strftime(\n DEFAULT_DATETIME_FORMAT)) in response.data\n assert b'Return time: </strong> ' + \\\n str.encode(RETURN_DATE.strftime(\n DEFAULT_DATETIME_FORMAT)) in response.data\n assert b'Canceled' in response.data # previous test canceled this booking.",
"def test_pallet_finish(self) -> None:\n # set some arbitrary values\n pallet_name = 'Hopefully this never matches !@#$%^&*()_+'\n location_code = '0409C2'\n box_type_code = 'Evans'\n starting_box_number = 98765\n number_of_boxes = 40\n ending_box_number = starting_box_number + number_of_boxes\n product_choices = 'Corn', 'Green Beans'\n exp_year_choices = (now().year + 1), (now().year + 2)\n\n # get corresponding records\n box_type_rec = BoxType.objects.get(box_type_code=box_type_code)\n product1 = Product.objects.get(prod_name=product_choices[0])\n product2 = Product.objects.get(prod_name=product_choices[1])\n product_rec_choices = product1, product2\n\n bm = BoxManagementClass()\n\n # build the pallet\n location_rec = Location.objects.get(loc_code=location_code)\n pallet_rec = Pallet.objects.create(\n name=pallet_name,\n location=location_rec,\n pallet_status=Pallet.FILL,\n )\n pallet_rec_id = pallet_rec.id\n\n # build table of values for later comparison\n pallet_box_info = dict()\n for ndx, box_number in enumerate(\n range(starting_box_number, ending_box_number)):\n ind = ndx % 2\n box_name = BoxNumber.format_box_number(box_number)\n product = product_rec_choices[ind]\n exp_year = exp_year_choices[ind]\n box_rec = bm.box_new(box_number=box_name, box_type=box_type_rec)\n pallet_box_rec = PalletBox.objects.create(\n pallet=pallet_rec,\n box_number=box_name,\n box=box_rec,\n product=product,\n exp_year=exp_year,\n box_status=PalletBox.NEW\n )\n pallet_box_info[box_number] = PalletBoxInfo(\n pallet_box_id=pallet_box_rec.id, box_id=box_rec.id,\n box_number=box_name, product=product, exp_year=exp_year)\n\n # finish (publish) the pallet\n bm.pallet_finish(pallet_rec)\n\n # validate that worked properly\n for entry in pallet_box_info:\n with raises(PalletBox.DoesNotExist):\n _ = PalletBox.objects.get(\n pk=pallet_box_info[entry].pallet_box_id\n )\n box_rec = Box.objects.get(pk=pallet_box_info[entry].box_id)\n assert box_rec.box_number == pallet_box_info[entry].box_number\n assert box_rec.box_type == box_type_rec\n assert box_rec.location == location_rec\n assert box_rec.product == pallet_box_info[entry].product\n assert box_rec.exp_year == pallet_box_info[entry].exp_year\n assert box_rec.exp_month_start == 0\n assert box_rec.exp_month_end == 0\n filled_seconds_ago = (now() - box_rec.date_filled).total_seconds()\n assert filled_seconds_ago < 10\n assert box_rec.quantity == box_type_rec.box_type_qty\n\n with raises(Pallet.DoesNotExist):\n _ = Pallet.objects.get(pk=pallet_rec_id)\n return",
"def test_available_book():\n rep = RentRepository()\n rep.store( '23','12', '1', '1')\n try:\n\n idBook = '12'\n idCustomer = '22'\n flag = '1'\n id = '1'\n Validator.available_book(rep.get_all(), idBook)\n\n assert False\n\n except RepositoryExceptionRent as msg:\n assert True",
"def test_finished_new_keycard_scan(self):\n new_scan_pk = 1\n new_rfid = '9999999999'\n\n t_info('Creating new NewKeycardScan object we should have'\n 'at this point..........', 3)\n new_nks_obj = NewKeycardScan.objects.create(\n rfid=new_rfid, assigner_user_id=self.staff_only_user.pk)\n\n t_info('Make sure it has the pk from URL', 4)\n self.assertEqual(new_nks_obj.pk, new_scan_pk)\n\n t_info('Getting response..........', 3)\n response = self.client.get('/done_scan/%d/' % new_scan_pk)\n\n t_info('Check response status code', 4)\n self.assertEqual(response.status_code, 200)\n\n t_info('Check response content type', 4)\n self.assertEqual(response['content-type'], 'application/json')\n\n t_info('Check response content', 4)\n self.assertEqual(simplejson.loads(response.content)['success'], True)\n self.assertEqual(simplejson.loads(response.content)['rfid'], new_rfid)\n\n t_info('Getting the changed NewKeycardScan obj........', 3)\n new_nks_obj = NewKeycardScan.objects.get(pk=1)\n\n t_info('Check that NewKeycardScan object has correct attributes', 4)\n self.assertFalse(new_nks_obj.waiting_for_scan)\n self.assertTrue(new_nks_obj.ready_to_assign)",
"def save(self, *args, **kwargs):\n if not self.pk:\n self.start_time_booking = datetime.date.today()\n self.end_time_booking = self.start_time_booking + datetime.timedelta(days=5)\n self.cars.quantity -= 1\n self.cars.save()\n return super(Reservation, self).save(*args, **kwargs)",
"def test_unpaid_penalty_prevents_borrow(self):\n ten_days_ago = timezone.now() - timezone.timedelta(days=10)\n Borrow.objects.create(\n book_id=1,\n student=self.students[0],\n requested_at=ten_days_ago,\n borrowed_at=ten_days_ago,\n duration=6,\n )\n client1 = APIClient()\n client1.login(username=self.manager.username, password=\"salam*123\")\n client1.post(\"/borrows/1/terminate/\")\n client2 = APIClient()\n client2.login(username=self.students[0].username, password=\"salam*123\")\n response = client2.post(\"/borrows/\", data={\"book\": 5})\n self.assertEqual(response.status_code, 400)",
"def setUp(self):\n # Create 2 users\n test_user1 = User.objects.create_user(\n username='testuser1',\n password='1X<ISRUkw+tuK'\n )\n test_user2 = User.objects.create_user(\n username='testuser2',\n password='2HJ1vRV0Z&3iD'\n )\n\n test_user1.save()\n test_user2.save()\n\n # Create a book\n test_author = Author.objects.create(first_name='John',\n last_name='Smith')\n test_genre = Genre.objects.create(name='Fantasy')\n test_book = Book.objects.create(\n title='Book Title',\n summary='The book summary',\n isbn='ABCDEFG',\n author=test_author,\n )\n\n # Assign a genre to a book\n genre_objects_for_book = Genre.objects.all()\n test_book.genre.set(genre_objects_for_book)\n test_book.save()\n\n # Create 30 exemplars from the book\n number_of_book_copies = 30\n for book_copy in range(number_of_book_copies):\n return_date = timezone.now()\\\n + datetime.timedelta(days=book_copy % 5)\n the_borrower = test_user1 if book_copy % 2 == 0 else test_user2\n status = 'm'\n BookInstance.objects.create(\n book=test_book,\n imprint='Unlikely imprint, 2016',\n due_back=return_date,\n borrower=the_borrower,\n status=status\n )",
"def test_finished_new_keycard_scan_keycard_with_same_rfid_exists(self):\n # create the RFIDkeycard whose rfid is the same as the one trying to\n # assign; create the lockuser with that rfid\n t_info('Creating new lockuser..........', 3)\n lu = LockUser.objects.create(\n first_name='Jane', last_name='Doe', email='[email protected]')\n\n # attempting to assign this one, but it already belongs to an active lock user\n duplicate_rfid = '1111111111'\n new_scan_pk = 1\n\n t_info('Creating new RFIDkeycard and assigning to our '\n 'LockUser..........', 3)\n rk = RFIDkeycard.objects.create(\n the_rfid=duplicate_rfid, lockuser=lu, assigner=self.staff_only_user)\n\n t_info('Make sure this LockUser has this keycard', 4)\n self.assertTrue(lu.is_active()) # or lu.get_current_rfid()...\n\n t_info('Creating new NewKeycardScan object we should have '\n 'at this point..........', 3)\n new_nks_obj = NewKeycardScan.objects.create(\n rfid=duplicate_rfid, assigner_user_id=self.staff_only_user.pk)\n\n t_info('Check that the NewKeycardScan object with pk '\n 'specified in the URL has the duplicate rfid', 4)\n # todo: or is this actually covered in NewKeycardScan model tests?\n self.assertTrue(NewKeycardScan.objects.filter(pk=new_scan_pk,\n rfid=duplicate_rfid))\n\n t_info('Getting response..........', 3)\n response = self.client.get('/done_scan/%d/' % new_nks_obj.pk)\n\n t_info('Check response status code', 4)\n self.assertEqual(response.status_code, 200)\n\n t_info('Check response content type', 4)\n self.assertEqual(response['content-type'], 'application/json')\n\n t_info('Check response content', 4)\n self.assertEqual(simplejson.loads(response.content)['success'], False)\n self.assertEqual(simplejson.loads(response.content)['error_mess'],\n 'A keycard with the same RFID is already assigned to %s.' % lu)",
"def test_confirm_booking(client):\n response = client.post(\n BOOKING_API_URL + '/confirm',\n data=dict(\n pickup_datetime=PICKUP_DATE,\n return_datetime=RETURN_DATE,\n car_id=1,\n ),\n content_type='multipart/form-data'\n )\n\n assert response.status_code == 200\n assert b'Your booking has been confirmed, thank you!' in response.data"
] |
[
"0.7627322",
"0.75792533",
"0.7312907",
"0.6784527",
"0.6369919",
"0.63560116",
"0.62711424",
"0.61127406",
"0.60388106",
"0.60270816",
"0.5990947",
"0.59749573",
"0.5928764",
"0.5898136",
"0.588764",
"0.5835248",
"0.57947266",
"0.5779943",
"0.5769861",
"0.5748892",
"0.5748454",
"0.5747685",
"0.5735991",
"0.57322913",
"0.57184064",
"0.5707739",
"0.5690941",
"0.5683757",
"0.56740534",
"0.56673646"
] |
0.8361254
|
0
|
Test loading a python2generated whoosh index under python3. This test passes only because of the monkeypatching of whoosh.compat.loads with knowhow.util.pickle_loads.
|
def test_index_load_python2(monkeypatch):
expected = {
"id": ("02a7cefe1189668fa85b56b52ee1e769" "1ee1821913f2031c8117263c07526468"),
"content": "Hello, from Python2",
"tag": ["python2"],
"updated": datetime.datetime(2017, 9, 15, 14, 58, 12, 441405, tzinfo=UTC),
}
import knowhow
home_dir = join(abspath(dirname(dirname(knowhow.__file__))), "data", "homepy2")
data_dir = join(home_dir, "datapy2")
monkeypatch.setenv("KNOWHOW_HOME", home_dir)
monkeypatch.setenv("KNOWHOW_DATA", data_dir)
def load_py2(data, *args, **kwargs):
try:
return pickle.loads(data, *args, **kwargs)
except UnicodeDecodeError as e:
if PYTHON2 or not e.args[0] == "ascii":
raise
result = pickle.loads(data, encoding="bytes")
# need to handle a py2-pickled dict having bytes keys, which will
# be skipped in python3, so we convert all keys to str if needed
if isinstance(result, dict):
d = {}
method = result.iteritems if PYTHON2 else result.items
for k, v in method():
if isinstance(k, bytes):
k = k.decode("ascii")
d[k] = v
if d:
result = d
return result
index = Index()
assert len(index) == 1
with index.search("tag:python2") as results:
assert len(results) == 1
with patch("whoosh.columns.loads", load_py2):
result = results[0]
assert sorted(result.fields.keys()) == sorted(expected.keys())
assert result.fields == expected
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_read_index_swift(self):\n\n indexfile = tempfile.mktemp()\n self.addCleanup(os.unlink, indexfile)\n\n TroveSwiftIndexBuilder(\"short.dat\", out=indexfile)\n\n index = TroveSwiftIndex()\n index.reload(indexfile)\n\n docs = sorted([doc for doc in index.documents])\n self.assertEquals(10, len(docs))\n\n self.assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], docs)\n\n doc = index.get_document(1)\n ref = {\"id\":\"1\",\"titleName\":\"Hello\"}\n self.assertDictEqual(ref, doc)\n\n doc = index.get_document(10)\n ref = {\"id\":\"10\",\"titleName\":\"Hello\"}\n self.assertNotEquals(None, doc)\n self.assertDictEqual(ref, doc)",
"def test_read_index(self):\n\n indexfile = tempfile.mktemp()\n self.addCleanup(os.unlink, indexfile)\n\n TroveIndexBuilder(\"test/short.dat\", out=indexfile)\n\n index = TroveIndex()\n index.reload(indexfile)\n\n docs = sorted([doc for doc in index.documents])\n self.assertEquals(10, len(docs))\n\n self.assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], docs)\n\n doc = index.get_document(1)\n ref = {u\"id\":\"1\",u\"titleName\":u\"Hello\"}\n self.assertNotEquals(None, doc, \"Document not found for id 1\")\n self.assertDictEqual(ref, doc)\n\n doc = index.get_document(10)\n ref = {\"id\":\"10\",\"titleName\":\"Hello\"}\n self.assertNotEquals(None, doc)\n self.assertDictEqual(ref, doc)",
"def _load_index(self):\n try:\n with open(self._index_path, \"rb\") as f:\n version = pickle.load(f)\n data = f.read()\n except EnvironmentError as e:\n # Index doesn't exist yet?\n if e.errno in (errno.ENOENT,):\n return {}\n raise\n if version != self._version:\n # This is another version. Avoid trying to unpickling the\n # rest of the stream, as that may fail.\n return {}\n stamp, overloads = pickle.loads(data)\n _cache_log(\"[cache] index loaded from %r\", self._index_path)\n if stamp != self._source_stamp:\n # Cache is not fresh. Stale data files will be eventually\n # overwritten, since they are numbered in incrementing order.\n return {}\n else:\n return overloads",
"def _load_index(self):\n try:\n with open(self._index_path, \"rb\") as f:\n version = pickle.load(f)\n data = f.read()\n except FileNotFoundError:\n # Index doesn't exist yet?\n return {}\n if version != self._version:\n # This is another version. Avoid trying to unpickling the\n # rest of the stream, as that may fail.\n return {}\n stamp, overloads = pickle.loads(data)\n _cache_log(\"[cache] index loaded from %r\", self._index_path)\n if stamp != self._source_stamp:\n # Cache is not fresh. Stale data files will be eventually\n # overwritten, since they are numbered in incrementing order.\n return {}\n else:\n return overloads",
"def test_import_wc2(self):\r\n tree = self.wc2_tree\r\n root = tree.getroot()\r\n assert importer.put_objects(root) == True",
"def test_creating_index_type(self):",
"def load_index(self, fn):\n name = fn.split('.pkl')[0]\n return utils.load_obj(name)",
"def test_config_load3():\n print test_config_load3.__name__\n test_config_file = BytesIO()\n # non ordered\n append_to_file_mongo_setting(test_config_file, 'mongo-oplog-shard2-rs1')\n append_to_file_mongo_setting(test_config_file, 'mongo-oplog-shard2-rs2')\n append_to_file_mongo_setting(test_config_file, 'mongo-oplog-shard1-rs1')\n append_to_file_mongo_setting(test_config_file, 'mongo-oplog-shard1-rs2')\n append_to_file_mongo_setting(test_config_file, 'mongo-oplog-shard3-rs1')\n append_to_file_mongo_setting(test_config_file, 'mongo-oplog-shard3-rs2')\n append_to_file_mongo_setting(test_config_file, 'mongo-oplog-shard1-rs3')\n append_to_file_mongo_setting(test_config_file, 'mongo-oplog-shard2-rs3')\n append_to_file_mongo_setting(test_config_file, 'mongo-oplog-shard3-rs3')\n\n test_config_file.seek(0)\n # config file processing\n config = configparser.ConfigParser()\n config.read_file(test_config_file)\n pp = PrettyPrinter()\n all_settings = load_mongo_replicas_from_setting(config, \n 'mongo-oplog')\n pp.pprint(all_settings)\n assert(3 == len(all_settings.keys()))\n assert(sorted(all_settings.keys()) == \\\n sorted(['mongo-oplog-shard1', \n 'mongo-oplog-shard2', \n 'mongo-oplog-shard3']))",
"def load_index(self, fn):\n # print('Load ', fn)\n # if fn[len(fn)-4:] == '.pkl':\n # fn = fn[0:len(fn)-4]\n fn = 'idx_bench'\n inverted_index = utils.load_obj(fn)\n return inverted_index",
"def test_other02(self):\n index_file = os.path.join(self.test_input, 'folder1#中文', 'mypage.txt')\n os.makedirs(os.path.dirname(index_file), exist_ok=True)\n with open(index_file, 'w', encoding='UTF-8') as fh:\n fh.write('ABC 中文')\n ts = datetime(2020, 1, 2, 3, 4, 5, 67000, tzinfo=timezone.utc).timestamp()\n os.utime(index_file, (ts, ts))\n\n for _info in file2wsb.run(self.test_input, self.test_output, no_preserve_filename=True):\n pass\n\n book = Host(self.test_output).books['']\n book.load_meta_files()\n book.load_toc_files()\n\n id_folder1, id_item = book.meta.keys()\n self.assertDictEqual(book.meta, {\n id_folder1: {\n 'title': 'folder1#中文',\n 'type': 'folder',\n 'create': id_folder1,\n 'modify': id_folder1,\n },\n id_item: {\n 'title': 'mypage.txt',\n 'type': 'file',\n 'index': f'{id_item}.txt',\n 'create': id_item,\n 'modify': '20200102030405067',\n 'source': '',\n 'icon': '',\n 'comment': '',\n },\n })\n self.assertDictEqual(book.toc, {\n 'root': [\n id_folder1,\n ],\n id_folder1: [\n id_item,\n ],\n })\n self.assertEqual(set(glob.iglob(os.path.join(self.test_output, '**'), recursive=True)), {\n os.path.join(self.test_output, ''),\n os.path.join(self.test_output, f'{id_item}.txt'),\n })",
"def test_export_index(self):",
"def test_create_index_swift(self):\n\n indexfile = tempfile.mktemp()\n self.addCleanup(os.unlink, indexfile)\n\n index = TroveSwiftIndexBuilder(\"short.dat\", out=indexfile)\n\n # read the index file that was created\n with open(indexfile, 'r+b') as fd:\n indextext = fd.read()\n indexlines = indextext.split('\\n')\n\n # 11 lines includes on blank line at the end\n self.assertEquals(11, len(indexlines))\n del indexlines[10]\n\n # check the first character of each line\n docs = [line[0] for line in indexlines]\n self.assertEquals(['1', '2', '3', '4', '5', '6', '7', '8', '9', '1'], docs)\n\n # check some lines from the index\n ref = \"1, 0, 31, short.dat\"\n self.assertEqual(ref, indexlines[0])\n ref = \"10, 279, 32, short.dat\"\n self.assertEqual(ref, indexlines[9])",
"def test_other01(self):\n index_file = os.path.join(self.test_input, 'folder1#中文', 'mypage.txt')\n os.makedirs(os.path.dirname(index_file), exist_ok=True)\n with open(index_file, 'w', encoding='UTF-8') as fh:\n fh.write('ABC 中文')\n ts = datetime(2020, 1, 2, 3, 4, 5, 67000, tzinfo=timezone.utc).timestamp()\n os.utime(index_file, (ts, ts))\n\n for _info in file2wsb.run(self.test_input, self.test_output):\n pass\n\n book = Host(self.test_output).books['']\n book.load_meta_files()\n book.load_toc_files()\n\n id_folder1, id_item = book.meta.keys()\n self.assertDictEqual(book.meta, {\n id_folder1: {\n 'title': 'folder1#中文',\n 'type': 'folder',\n 'create': id_folder1,\n 'modify': id_folder1,\n },\n id_item: {\n 'title': 'mypage.txt',\n 'type': 'file',\n 'index': f'{id_item}/index.html',\n 'create': id_item,\n 'modify': '20200102030405067',\n 'source': '',\n 'icon': '',\n 'comment': '',\n },\n })\n self.assertDictEqual(book.toc, {\n 'root': [\n id_folder1,\n ],\n id_folder1: [\n id_item,\n ],\n })\n self.assertEqual(set(glob.iglob(os.path.join(self.test_output, '**'), recursive=True)), {\n os.path.join(self.test_output, ''),\n os.path.join(self.test_output, id_item),\n os.path.join(self.test_output, id_item, 'index.html'),\n os.path.join(self.test_output, id_item, 'mypage.txt'),\n })",
"def load_word_index(path):\n word_index = open(path + '/word_index.pickle', 'rb')\n word_index = pickle.load(word_index)\n print('Word Index Pickle load successful\\n')\n return word_index",
"def test_level3_pathlib():\n fname = Path(get_test_data('nids/Level3_FFC_N0Q_20140407_1805.nids', as_file_obj=False))\n f = Level3File(fname)\n assert f.filename == str(fname)",
"def testPython3(self):\n resource = Resource.get()\n resource.load(self.__taskPath)\n crawler = FsCrawler.createFromPath(self.__sourcePath)\n dummyTask = Task.create('pythonMajorVerTestTask')\n dummyTask.add(crawler)\n\n wrapper = TaskWrapper.create(\"python3\")\n result = wrapper.run(dummyTask)\n self.assertTrue(len(result), 1)\n self.assertEqual(result[0].var(\"majorVer\"), 3)",
"def test_load_index(self):\n\n c = Client()\n response = c.get('/taric_books/')\n\n self.assertEqual(response.status_code, 200)",
"def test_file(h3):\n\n assert h3.list_buckets() == []\n\n assert h3.create_bucket('b1') == True\n\n with open('/dev/urandom', 'rb') as f:\n data = f.read(3 * MEGABYTE)\n\n with open('testfile', 'wb') as f:\n f.write(data)\n\n h3.create_object_from_file('b1', 'o1', 'testfile')\n h3.create_object_from_file('b1', 'o2', 'testfile')\n h3.write_object_from_file('b1', 'o2', 'testfile', offset=(3 * MEGABYTE))\n\n os.unlink('testfile')\n\n h3.read_object_to_file('b1', 'o1', 'testfile')\n with open('testfile', 'rb') as f:\n assert data == f.read()\n\n h3.read_object_to_file('b1', 'o2', 'testfile', offset=0, size=(3 * MEGABYTE))\n with open('testfile', 'rb') as f:\n assert data == f.read()\n h3.read_object_to_file('b1', 'o2', 'testfile', offset=(3 * MEGABYTE))\n with open('testfile', 'rb') as f:\n assert data == f.read()\n\n assert h3.purge_bucket('b1') == True\n\n assert h3.list_objects('b1') == []\n\n assert h3.delete_bucket('b1') == True",
"def test_loading_document(self):",
"def test_string_index():\n\tlib.backup_and_restore(\n\t\tlambda context: create_indexes(lib.create_string_index),\n\t\tNone,\n\t\tlambda context: check_indexes(lib.check_simple_index, \"foobar\")\n\t)",
"def test_load_from_odmldoc(self):\n doc = create_small_test_odml()\n self.test_table.load_from_odmldoc(doc)\n self.assertEqual(self.test_table._odmldict, self.expected_odmldict)",
"def test_index_libraries(self):\n result1 = self._create_library(slug=\"test-lib-index-1\", title=\"Title 1\", description=\"Description\")\n result2 = self._create_library(slug=\"test-lib-index-2\", title=\"Title 2\", description=\"Description\")\n\n for result in [result1, result2]:\n library_key = LibraryLocatorV2.from_string(result['id'])\n response = ContentLibraryIndexer.get_items([library_key])[0]\n\n assert response['id'] == result['id']\n assert response['title'] == result['title']\n assert response['description'] == result['description']\n assert response['uuid'] == result['bundle_uuid']\n assert response['num_blocks'] == 0\n assert response['version'] == result['version']\n assert response['last_published'] is None\n assert response['has_unpublished_changes'] is False\n assert response['has_unpublished_deletes'] is False",
"def load(cls, backend, path, obj):\n b = backend\n id = obj['id']\n name = obj['name']\n desc = obj['description']\n url = obj['url']\n path = os.path.join(path, id)\n\n # Load the index.json file.\n idx = b.read_json(os.path.join(path, 'index.json'))\n if idx['ApiVersion'] != 0:\n return None\n\n versions = []\n for vsn_obj in idx['Versions']:\n # TODO: Maybe check if the version file exists?\n versions.append(dict(\n id=vsn_obj['Id'],\n name=vsn_obj['Name'],\n ))\n\n return cls(b, id, name, desc, url, path, versions)",
"def test_quest_load_version_fail(testing_quest_page):\n testing_quest_page.save()\n\n # fetch the data\n doc = testing_quest_page.doc_ref.get()\n data = testing_quest_page.storage_model.parse_obj(doc.to_dict())\n\n # mess with the version\n data.version = str(VersionInfo.parse(data.version).bump_major())\n testing_quest_page.doc_ref.set(data.dict())\n\n # try to load with the bad version\n with pytest.raises(QuestLoadError):\n testing_quest_page.load()\n\n # cleanup\n testing_quest_page.delete()",
"def test_download_manifest(monkeypatch, gen3_index):\n rec1 = gen3_index.create_record(\n did=\"dg.TEST/f2a39f98-6ae1-48a5-8d48-825a0c52a22b\",\n hashes={\"md5\": \"a1234567891234567890123456789012\"},\n size=123,\n acl=[\"DEV\", \"test\"],\n authz=[\"/programs/DEV/projects/test\"],\n urls=[\"s3://testaws/aws/test.txt\", \"gs://test/test.txt\"],\n )\n rec2 = gen3_index.create_record(\n did=\"dg.TEST/1e9d3103-cbe2-4c39-917c-b3abad4750d2\",\n hashes={\"md5\": \"b1234567891234567890123456789012\"},\n size=234,\n acl=[\"DEV\", \"test2\"],\n authz=[\"/programs/DEV/projects/test2\", \"/programs/DEV/projects/test2bak\"],\n urls=[\"gs://test/test.txt\"],\n file_name=\"test.txt\",\n )\n rec3 = gen3_index.create_record(\n did=\"dg.TEST/ed8f4658-6acd-4f96-9dd8-3709890c959e\",\n hashes={\"md5\": \"e1234567891234567890123456789012\"},\n size=345,\n acl=[\"DEV\", \"test3\"],\n authz=[\"/programs/DEV/projects/test3\", \"/programs/DEV/projects/test3bak\"],\n urls=[\"gs://test/test3.txt\"],\n )\n # record with space\n rec4 = gen3_index.create_record(\n did=\"dg.TEST/a802e27d-4a5b-42e3-92b0-ba19e81b9dce\",\n hashes={\"md5\": \"f1234567891234567890123456789012\"},\n size=345,\n acl=[\"DEV\", \"test4\"],\n authz=[\"/programs/DEV/projects/test4\", \"/programs/DEV/projects/test4bak\"],\n urls=[\"gs://test/test4 space.txt\", \"s3://test/test4 space.txt\"],\n )\n # mock_index.return_value.get_stats.return_value = gen3_index.get(\"/_stats\")\n\n monkeypatch.setattr(download_manifest, \"INDEXD_RECORD_PAGE_SIZE\", 2)\n\n loop = get_or_create_event_loop_for_thread()\n loop.run_until_complete(\n async_download_object_manifest(\n \"http://localhost:8001\",\n output_filename=\"object-manifest.csv\",\n num_processes=1,\n )\n )\n\n records = {}\n try:\n with open(\"object-manifest.csv\") as file:\n # skip header\n next(file)\n for line in file:\n guid, urls, authz, acl, md5, file_size, file_name = line.split(\",\")\n guid = guid.strip(\"\\n\")\n urls = urls.split(\" \")\n authz = authz.split(\" \")\n acl = acl.split(\" \")\n file_size = file_size.strip(\"\\n\")\n file_name = file_name.strip(\"\\n\")\n\n records[guid] = {\n \"urls\": urls,\n \"authz\": authz,\n \"acl\": acl,\n \"md5\": md5,\n \"file_size\": file_size,\n \"file_name\": file_name,\n }\n except Exception:\n # unexpected file format, fail test\n assert False\n\n # ensure downloaded manifest populates expected info for a record\n assert \"gs://test/test.txt\" in records.get(\n \"dg.TEST/f2a39f98-6ae1-48a5-8d48-825a0c52a22b\", {}\n ).get(\"urls\", [])\n assert \"s3://testaws/aws/test.txt\" in records.get(\n \"dg.TEST/f2a39f98-6ae1-48a5-8d48-825a0c52a22b\", {}\n ).get(\"urls\", [])\n assert \"/programs/DEV/projects/test\" in records.get(\n \"dg.TEST/f2a39f98-6ae1-48a5-8d48-825a0c52a22b\", {}\n ).get(\"authz\", [])\n assert \"DEV\" in records.get(\"dg.TEST/f2a39f98-6ae1-48a5-8d48-825a0c52a22b\", {}).get(\n \"acl\", []\n )\n assert \"test\" in records.get(\n \"dg.TEST/f2a39f98-6ae1-48a5-8d48-825a0c52a22b\", {}\n ).get(\"acl\", [])\n assert \"123\" in records.get(\"dg.TEST/f2a39f98-6ae1-48a5-8d48-825a0c52a22b\", {}).get(\n \"file_size\"\n )\n assert \"a1234567891234567890123456789012\" in records.get(\n \"dg.TEST/f2a39f98-6ae1-48a5-8d48-825a0c52a22b\", {}\n ).get(\"md5\")\n assert not records.get(\"dg.TEST/f2a39f98-6ae1-48a5-8d48-825a0c52a22b\", {}).get(\n \"file_name\"\n )\n assert \"gs://test/test4%20space.txt\" in records.get(\n \"dg.TEST/a802e27d-4a5b-42e3-92b0-ba19e81b9dce\", {}\n ).get(\"urls\", [])\n assert \"s3://test/test4%20space.txt\" in records.get(\n \"dg.TEST/a802e27d-4a5b-42e3-92b0-ba19e81b9dce\", {}\n ).get(\"urls\", [])\n\n # assert other 2 records exist\n assert \"dg.TEST/ed8f4658-6acd-4f96-9dd8-3709890c959e\" in records\n assert \"dg.TEST/1e9d3103-cbe2-4c39-917c-b3abad4750d2\" in records\n assert \"test.txt\" == records.get(\n \"dg.TEST/1e9d3103-cbe2-4c39-917c-b3abad4750d2\", {}\n ).get(\"file_name\")",
"def test_fobj():\n Level3File(get_test_data('nids/Level3_FFC_N0Q_20140407_1805.nids'))",
"def test_python3(self):\n if sys.version.startswith(\"3.\"):\n self.assertTrue(_PY3)",
"def _test_python_python(basename):\n\n contents = _read_test_file(basename, 'python')\n cells = convert(contents, from_='python')\n converted = convert(cells, to='python')\n\n assert _diff(contents, converted) == ''",
"def testArchiveImport(self):\n\n archive = alembic.Abc.IArchive(\"iterator.abc\")\n top = archive.getTop()\n\n # lets check the iterators\n self.assertEqual(len(top.children), 3)\n\n curI = 0\n for i in top.children:\n self.assertEqual(len(i.children), 3)\n self.assertEqual(i.getName(), 'childObj' + str(curI))\n curI += 1\n\n curJ = 0\n for j in i.children:\n self.assertEqual(j.getName(), \"grandChild\" + str(curJ))\n curJ += 1\n self.assertEqual(len(j.getProperties().propertyheaders), 3)\n curK = 0\n for k in j.getProperties().propertyheaders:\n self.assertEqual(k.getName(), 'prop' + str(curK))\n cp = alembic.Abc.ICompoundProperty(j.getProperties(), 'prop' + str(curK))\n curK += 1\n\n sp = alembic.Abc.IStringProperty(cp, 'scalar')\n samp = sp.samples\n self.assertEqual(len(samp), 3)\n self.assertEqual(samp[0], \"a\")\n self.assertEqual(samp[1], \"b\")\n self.assertEqual(samp[2], \"c\")\n\n ap = alembic.Abc.IStringArrayProperty(cp, 'array')\n samp = ap.samples\n self.assertEqual(len(samp), 3)\n self.assertEqual(len(samp[0]), 3)\n self.assertEqual(len(samp[1]), 2)\n self.assertEqual(len(samp[2]), 1)\n self.assertEqual(samp[0][0], 'a')\n self.assertEqual(samp[0][1], 'b')\n self.assertEqual(samp[0][2], 'c')\n self.assertEqual(samp[1][0], 'd')\n self.assertEqual(samp[1][1], 'e')\n self.assertEqual(samp[2][0], 'f')",
"def test_path03(self):\n index_file = os.path.join(self.test_input, 'folder1#中文', 'folder2', 'mypage.html')\n os.makedirs(os.path.dirname(index_file), exist_ok=True)\n with open(index_file, 'w', encoding='UTF-8') as fh:\n fh.write(\"\"\"\\\n<!DOCTYPE html>\n<html\n data-scrapbook-create=\"20200101000000000\"\n data-scrapbook-modify=\"20200101000000000\"\n data-scrapbook-source=\"http://example.com\">\n<head>\n<meta charset=\"UTF-8\">\n<title>MyTitle 中文</title>\n</head>\n<body>\npage content\n</body>\n</html>\n\"\"\")\n\n for _info in file2wsb.run(self.test_input, self.test_output, no_preserve_filename=True):\n pass\n\n book = Host(self.test_output).books['']\n book.load_meta_files()\n book.load_toc_files()\n\n id_folder1, id_folder2, id_item = book.meta.keys()\n self.assertDictEqual(book.meta, {\n id_folder1: {\n 'title': 'folder1#中文',\n 'type': 'folder',\n 'create': id_folder1,\n 'modify': id_folder1,\n },\n id_folder2: {\n 'title': 'folder2',\n 'type': 'folder',\n 'create': id_folder2,\n 'modify': id_folder2,\n },\n id_item: {\n 'title': 'MyTitle 中文',\n 'type': '',\n 'index': f'{id_item}.html',\n 'create': '20200101000000000',\n 'modify': '20200101000000000',\n 'source': 'http://example.com',\n 'icon': '',\n 'comment': '',\n },\n })\n self.assertDictEqual(book.toc, {\n 'root': [\n id_folder1,\n ],\n id_folder1: [\n id_folder2,\n ],\n id_folder2: [\n id_item,\n ],\n })\n self.assertEqual(set(glob.iglob(os.path.join(self.test_output, '**'), recursive=True)), {\n os.path.join(self.test_output, ''),\n os.path.join(self.test_output, f'{id_item}.html'),\n })"
] |
[
"0.6131236",
"0.6047128",
"0.5761075",
"0.5666576",
"0.55717415",
"0.5535717",
"0.55165625",
"0.54906267",
"0.5470208",
"0.5432962",
"0.5362086",
"0.53338885",
"0.5318798",
"0.5301677",
"0.52784836",
"0.5248858",
"0.5231386",
"0.5221514",
"0.52198315",
"0.5207653",
"0.51958495",
"0.5184151",
"0.5164314",
"0.51635855",
"0.51605225",
"0.5160273",
"0.51508546",
"0.5148646",
"0.5133396",
"0.5094675"
] |
0.83080643
|
0
|
Takes in array of predicted values and verifies that each prediction has the correct label. If any of the predictions are wrong, the test fails. Returns
|
def CheckWrong(predicted, correct):
for prediction in predicted:
if prediction != correct:
print("Expected: ", correct," recieved: ", prediction)
return False
return True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def make_predictions(model, test_set, val_set):\n \n ## Uses model to predict some amount of images\n predict = model.predict_classes(test_set, batch_size=5, verbose=1)\n \n ## We use the length of these two arrays when we sift through the data to find\n ## the right predictions and wrong predictions\n images = len(test_set)\n\n ## Initialises variables for loop\n correctly_guessed = 0\n\n ## Begins loop to find total correct predictions\n for i in range(images):\n if predict[i] == np.argmax(val_set[i]):\n correctly_guessed += 1\n\n ## Returns amount of predictions were correct\n print('\\nCorrectly guessed = ', correctly_guessed)\n print('Inorrectly guessed = ', (images - correctly_guessed))",
"def test_predict(self):\n \n\n model ,vec, x_testing=setup_log_reg_classifier(self.training_data, self.training_y, self.testing_data,\"text\", method=\"count\")\n \n model2 ,vec_tfidf, x_testing2=setup_log_reg_classifier(self.training_data, self.training_y, self.testing_data,\"text\", method=\"tfidf\")\n \n \n \"\"\" Test correct data types and corrrect range of predicted values (1,0) for predict with countVectorizer\"\"\" \n \n self.assertIsInstance(predict(model,x_testing),\n np.ndarray)\n \n self.assertTrue(([0,1] ==np.unique(predict(model2,x_testing2))).all())\n\n \n \"\"\" Test correct data types and corrrect range of predicted values (1,0) for predict with tfidfVectorizer\"\"\" \n \n self.assertIsInstance(predict(model,x_testing),\n np.ndarray)\n \n self.assertTrue(([0,1] ==np.unique(predict(model2,x_testing2))).all())",
"def percent_accuracy(self, true_values, predicted_values):\n\n correct = 0\n size = len(true_values)\n for i in range(len(true_values)):\n true_labels = true_values[i]\n predicted_labels = predicted_values[i]\n predicted_index = np.argmax(predicted_labels)\n\n if true_labels[predicted_index] == 1:\n correct += 1",
"def check_prediction(self):\n predicted_scores = self.sess.run(self.NET.output_with_relu, feed_dict={self.NET.input: self.test_image if len(self.test_image.shape)==4 else [self.test_image]})\n self.original_confidence = np.max(predicted_scores)\n if np.argmax(predicted_scores,1) != self.original_label:\n print(\"Network's Prediction is Already Incorrect!\")\n return True\n else:\n return False",
"def prediction():\r\n\r\n\r\n\tpredictVal = []\r\n\taccuracy = 0.0\r\n\r\n\t# Calculate accuracy for each class in testData\r\n\tfor item in testData:\r\n\t\tclass0Prediction = posProb / 100\r\n\t\tclass1Prediction = negProb / 100\r\n\t\t\r\n\t\t# Multiply the prior probablities for negative and positive reviews by their feature likelihoods \r\n\t\tfor word in item[2]:\r\n\t\t\tclass0Prediction *= class0Dict[word]\r\n\t\t\tclass1Prediction *= class1Dict[word]\r\n\r\n\t\t# Give every item in testData a predicted value\r\n\t\tif(class0Prediction > class1Prediction):\r\n\t\t\tpredictVal.append('0')\r\n\t\telse:\r\n\t\t\tpredictVal.append('1')\r\n\r\n\tfor i in range(len(testData)):\r\n\t\tif(testData[i][1] == predictVal[i]):\r\n\t\t\taccuracy += 1\r\n\r\n\t\t\t\r\n\taccuracy = 100 * (accuracy / len(testData))\r\n\treturn(predictVal, accuracy)",
"def test_response_value(predict, y):\r\n print(\"test_response_value()...\", end = \"\")\r\n if len(set(y)) == 1:\r\n assert (predict == y).all()\r\n print(\"Passed!\")",
"def validate(predictions, answers):\n return np.array([p in answers for p in predictions])",
"def evaluate(labels, predictions):\n\n true_positives = 0\n label_positives = 0\n\n true_negatives = 0\n label_negatives = 0\n\n for i in range(len(predictions)):\n if labels[i] == predictions[i] == 1:\n true_positives += 1\n if labels[i] == 1:\n label_positives += 1\n\n if labels[i] == predictions[i] == 0:\n true_negatives += 1\n if labels[i] == 0:\n label_negatives += 1\n\n return true_positives / label_positives, true_negatives / label_negatives\n\n # raise NotImplementedError",
"def evaluate(y_test, pred_labels):\n \n # Converts one-hot code to a label (the index of 1)\n y_test_labels = np.argmax(y_test, axis=1)\n \n # Compare test labels to predicted labels\n score = accuracy_score(y_test_labels, pred_labels)\n \n return y_test_labels, score",
"def compare_predictions():\n validation_labels = np.array(pd.read_csv(val_true_labels_dir + dataset_version + 'validation_labels.csv', index_col=0))\n validation_labels = np.reshape(validation_labels, (-1))\n\n diff_between_files = []\n also1s = []\n also2s = []\n for filename1 in os.listdir(val_predictions_dir):\n if filename1.endswith(\".csv\"):\n for filename2 in os.listdir(val_predictions_dir):\n if filename2.endswith(\".csv\"):\n if filename1 < filename2:\n wrong1 = 0\n wrong2 = 0\n diff_between = 0\n also1 = 0\n also2 = 0\n diff_corr1 = 0\n diff_corr2 = 0\n f1 = np.array(pd.read_csv(val_predictions_dir + filename1, index_col=0))\n f1 = np.reshape(f1, (-1))\n f2 = np.array(pd.read_csv(val_predictions_dir + filename2, index_col=0))\n f2 = np.reshape(f2, (-1))\n for line in range(f1.shape[0]):\n if f1[line] != validation_labels[line]:\n wrong1 += 1\n if f2[line] != validation_labels[line]:\n wrong2 += 1\n if f1[line] != f2[line]:\n diff_between += 1\n if f1[line] == validation_labels[line]:\n diff_corr1 += 1\n if f2[line] == validation_labels[line]:\n diff_corr2 += 1\n if f1[line] != validation_labels[line]:\n if f2[line] != validation_labels[line]:\n also2 += 1\n if f2[line] != validation_labels[line]:\n if f1[line] != validation_labels[line]:\n also1 += 1\n\n diff_between_files.append(diff_between)\n print(filename1)\n print('Wrongly predicted by 1: ' + str(100 * wrong1 / f1.shape[0]) + '%')\n print(filename2)\n print('Wrongly predicted by 2: ' + str(100 * wrong2 / f1.shape[0]) + '%')\n print()\n print('Differences between files: ' + str(100 * diff_between / f1.shape[0]) + '%')\n print(f'\\t of which correct by 1 {100 * diff_corr1 / diff_between}%, by 2 {100 * diff_corr2 / diff_between}%')\n also1s.append(also1 / wrong2)\n also2s.append(also2 / wrong1)\n print('Wrongly predicted by other among wrong ones: ' + str(100 * also2 / wrong1) + '%, ' + str(\n 100 * also1 / wrong2) + '%\\n\\n\\n')\n\n print('Max, min and avg differences between files:')\n print(str(100 * max(diff_between_files) / validation_labels.shape[0]) + '%')\n print(str(100 * min(diff_between_files) / validation_labels.shape[0]) + '%')\n print(str(100 * np.mean(diff_between_files) / validation_labels.shape[0]) + '%')\n\n print('\\nWrongly predicted by first that were also wrongly predicted by second:')\n print('Max: ' + str(100 * max(also2s)) + '%')\n print('Min: ' + str(100 * min(also2s)) + '%')\n print('Avg: ' + str(100 * np.mean(also2s)) + '%')\n\n print('\\nWrongly predicted by second that were also wrongly predicted by first:')\n print('Max: ' + str(100 * max(also1s)) + '%')\n print('Min: ' + str(100 * min(also1s)) + '%')\n print('Avg: ' + str(100 * np.mean(also1s)) + '%')",
"def predict_label(examples_set):\n all_labels = list(('yes', 'no'))\n prediction = 'no'\n\n for label in all_labels:\n all_same_label = True\n for example in examples_set:\n if example[14] != label:\n all_same_label = False\n break\n if all_same_label:\n prediction = label\n break\n return prediction",
"def test_predict(self):\n self.regression_single.predict(self.X_test)\n self.assertTrue(len(self.regression_single.y_pred))\n self.regression_boston.predict(self.boston_x_test)\n self.assertTrue(len(self.regression_boston.y_pred))",
"def test_using_predict(self):\n [X, labels, Y] = self.gen_data()\n # Call algorithm\n bias = multiLogReg(self.sds.from_numpy(\n X), self.sds.from_numpy(Y), verbose=False).compute()\n\n [m, y_pred, acc] = multiLogRegPredict(self.sds.from_numpy(\n X), self.sds.from_numpy(bias), self.sds.from_numpy(Y), verbose=False).compute()\n\n self.assertTrue(acc > 98)",
"def accuracy(targets: List[List[float]], predict: List[List[float]]):\r\n correct = 0\r\n for i in range(len(targets)):\r\n if predict[i] == targets[i]:\r\n correct += 1\r\n return correct / len(targets) * 100",
"def evaluate(labels, predictions):\n correct_positive = 0\n correct_negative = 0\n total_positive = 0\n total_negative = 0\n\n for i in range(len(labels)):\n if labels[i] == 1:\n total_positive += 1\n if predictions[i] == 1:\n correct_positive += 1\n else:\n total_negative += 1\n if predictions[i] == 0:\n correct_negative += 1\n\n sensitivity = correct_positive / total_positive\n specificity = correct_negative / total_negative\n\n return sensitivity, specificity",
"def test_predict(self):\n\n classifier = BertCCAMClassifier()\n classifier.load_model(\"models\")\n prediction = classifier.predict([\"bartosz\"])\n self.assertEqual(prediction, [{\"labels\": (\"B\",)}])\n\n # with multiple labels\n prediction = classifier.predict([\"ala bert\"])\n self.assertEqual(prediction, [{\"labels\": (\"A\", \"B\")}])\n\n # in a batch\n prediction = classifier.predict([\"bartosz\", \"adam\"])\n self.assertEqual(prediction, [{\"labels\": (\"B\",)}, {\"labels\": (\"A\",)}])",
"def label_errors(preds, labels):\n num_correct = num_correct_fun(preds, labels)\n return (1.0 - num_correct / preds.size(0)) * 100.0",
"def evaluate(labels, predictions):\n #labels and predictions\n truePos = 0\n trueNeg = 0\n for data in range(len(labels)):\n if((predictions[data] == 1) and (predictions[data] == labels[data])):\n truePos+=1\n elif((predictions[data] == 0) and (predictions[data] == labels[data])):\n trueNeg+=1\n sensitivity = truePos/(len(labels) + 1)\n specificity = trueNeg/(len(labels) + 1)\n return (sensitivity, specificity)\n \n\n #raise NotImplementedError",
"def evaluate(labels, predictions):\n\n truePositiveCounter = 0\n trueNegativeCounter = 0\n truePositiveCorrect = 0\n trueNegativeCorrect = 0\n \n sensitivity = 0\n specificity = 0\n\n for i in range(len(labels)):\n if labels[i] == 1:\n truePositiveCounter += 1\n if(labels[i] == predictions[i]):\n truePositiveCorrect += 1\n elif labels[i] == 0:\n trueNegativeCounter += 1\n if(labels[i] == predictions[i]):\n trueNegativeCorrect += 1\n\n sensitivity = truePositiveCorrect / truePositiveCounter\n specificity = trueNegativeCorrect / trueNegativeCounter\n\n return sensitivity, specificity",
"def prediction_processing(predictions, labels, threshold, step_nb):\n new_labels = []\n new_predictions = []\n number_sequences = step_nb//50\n\n for k in range(len(labels)//number_sequences):\n total_prediction = 0\n isLabelTrue = labels[number_sequences*k]\n for i in range(number_sequences):\n total_prediction += (1/predictions[number_sequences*k+i])\n if not(isLabelTrue == (labels[number_sequences*k+i])):\n logger.error('Problem.')\n if total_prediction > threshold:\n total_prediction = False\n else:\n total_prediction = True\n new_labels.append(isLabelTrue)\n new_predictions.append(total_prediction)\n\n recall_1 = recall_score(new_labels, new_predictions)\n recall_0 = recall_score(new_labels, new_predictions, pos_label=0)\n precision_1 = precision_score(new_labels, new_predictions)\n precision_0 = precision_score(new_labels, new_predictions, pos_label=0)\n return((recall_1, recall_0, precision_1, precision_0), new_predictions, new_labels)",
"def test(self):\r\n error_count = 0\r\n N_TESTING = len(self.TESTING_DATA)\r\n for i in range(N_TESTING):\r\n x_vec = self.TESTING_DATA[i][:-1]\r\n y = self.TESTING_DATA[i][-1]\r\n\r\n result = self.bp.classify(x_vec)\r\n if result != y: error_count += 1\r\n print(error_count, \" errors on the test data, out of \", N_TESTING, \"items.\")",
"def check_predict_proba_one_binary(classifier, dataset):\n\n for x, y in dataset:\n y_pred = classifier.predict_proba_one(x)\n classifier = classifier.learn_one(x, y)\n assert set(y_pred.keys()) == {False, True}",
"def check_correctness_raw(classifier_out, test_data):\n labels = test_data.labels\n num_correct = 0\n total = len(classifier_out)\n for index, label in classifier_out:\n if labels[index] == label:\n num_correct += 1\n print(f'Got {num_correct} out of {total} correct: {(num_correct / total) * 100}%')",
"def evaluate(labels, predictions):\n actual_positive = 0\n actual_negative = 0\n predicted_positive = 0\n predicted_negative = 0\n for i, j in zip(labels, predictions):\n if i == 1:\n actual_positive += i\n predicted_positive += j\n else:\n actual_negative += 1\n if j == 0:\n predicted_negative += 1\n return predicted_positive/actual_positive, predicted_negative/actual_negative",
"def predict(self):\n self.predicted_test_summary = []\n for step in xrange(0, self.test_size // self.test_batch_size):\n print 'Predicting Batch No.:', step\n offset = (step * self.test_batch_size) % self.test_size\n batch_data_fwd = self.X_tst_fwd[offset:(offset + self.test_batch_size), :].T\n batch_data_bwd = self.X_tst_bwd[offset:(offset + self.test_batch_size), :].T\n summary_test_out = self._predict_batch(batch_data_fwd, batch_data_bwd)\n self.predicted_test_summary.extend(summary_test_out)\n\n print 'Prediction Complete. Moving Forward..'\n\n # test answers\n self.test_review = self.X_tst_fwd\n self.predicted_test_summary = self.predicted_test_summary\n self.true_summary = self.Y_tst",
"def accuracies(actual_labels, predicted_labels):\n acc = 0\n letter_acc = 0\n letter_cnt = 0\n cnt = 0\n for i in range(len(actual_labels)):\n predicted_output = predicted_labels[i]\n actual_output = actual_labels[i]\n cnt += 1\n for j in range(min(len(predicted_output), len(actual_output))):\n if predicted_output[j] == actual_output[j]:\n letter_acc += 1\n letter_cnt += max(len(predicted_output), len(actual_output))\n if actual_output == predicted_output:\n acc += 1\n final_accuracy = np.round((acc / len(actual_labels)) * 100, 2)\n final_letter_accuracy = np.round((letter_acc / letter_cnt) * 100, 2)\n return final_accuracy, final_letter_accuracy",
"def _check_binary_probabilistic_predictions(y_true, y_prob):\n assert len(y_true) == len(y_prob)\n\n labels = np.unique(y_true)\n\n if len(labels) > 2:\n raise ValueError(\"Only binary classification is supported. \"\n \"Provided labels %s.\" % labels)\n\n if y_prob.max() > 1:\n raise ValueError(\"y_prob contains values greater than 1.\")\n\n if y_prob.min() < 0:\n raise ValueError(\"y_prob contains values less than 0.\")\n\n return label_binarize(y_true, labels)[:, 0]",
"def test(self):\n y_list = []\n y_hat_list = []\n for ex_dict in ut.TEST_LIST:\n y_list.append(ex_dict[1])\n y_hat_list.append(self.predict(ex_dict[0]))\n acc = ut.compute_accuracy(y_hat_list, y_list)\n return y_hat_list, acc",
"def predictFailures (self) :\n \n while self.traceData :\n\n if self.traceData [0] == self.traceType :\n\n self.totalEvents += 1\n\n if random.random () < self.recall :\n\n self.predictedEvents += 1\n self.pTraceHandle.write (\"%d\\t%d\\t%d\\n\" % (self.traceData [0], self.traceData [1], self.traceData [2]))\n self.correctPredictions += 1\n self.totalPredictions += 1\n\n self.readNextTraceLine (self.fTraceHandle)\n\n if self.precision < 1 :\n\n wrongPredictions = int ((float (self.correctPredictions * (1 - self.precision)) / self.precision) + 0.5)\n \n interval = int ((self.endTime - self.startTime) / wrongPredictions)\n start = self.startTime\n end = start + interval\n\n for i in range (wrongPredictions) :\n \n self.pTraceHandle.write (\"%d\\t%d\\t%d\\n\" % (0, random.randint (0, self.totalNodes - 1), \\\n random.randint (start, end - 1)))\n self.totalPredictions += 1\n start = end\n end = start + interval",
"def predict_labels(model, x_test):\n \n pred = model.predict(x_test)\n #pred_labels = model.predict_classes(x_test) # depricated\n pred_labels = np.argmax(model.predict(x_test), axis=-1)\n \n return pred, pred_labels"
] |
[
"0.6987116",
"0.69262993",
"0.67940277",
"0.6764341",
"0.67562723",
"0.6734727",
"0.67149043",
"0.66110086",
"0.6591573",
"0.65899706",
"0.65859324",
"0.6563824",
"0.65305287",
"0.65157634",
"0.65003157",
"0.64948696",
"0.6471281",
"0.64597934",
"0.64489913",
"0.64332414",
"0.6431584",
"0.64248544",
"0.63777655",
"0.6363621",
"0.63608384",
"0.63527554",
"0.63435704",
"0.6333836",
"0.6325281",
"0.6322399"
] |
0.69688153
|
1
|
Tests the control environment of the system by verifying the execution of the program executes without exceptions
|
def testControlEnvironment(video1, video2):
try:
control.main(video1, video2, Verbose=True, Testing=True)
return True
except ValueError:
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_main_succeeds(app_tester: ApplicationTester) -> None:\n assert app_tester.execute(\"\") == 0",
"def test_script_integrity(capsys):\n script = os.path.abspath(\"examples/scikitlearn-iris/main.py\")\n\n return_code = subprocess.call([\"python\", script, \"0.1\"])\n\n assert return_code != 2, \"The example script does not exists.\"\n assert return_code != 1, \"The example script did not terminates its execution.\"\n assert (\n return_code == 0 and not capsys.readouterr().err\n ), \"The example script encountered an error during its execution.\"",
"def RunTest(self):\n self.TestLs()\n self.TestTerminate()\n self.TestMultipleProcesses()",
"def test_sc():\n result = run_cmd(\"sc\")\n assert 'Usage:' in result",
"def test(self):\n\n if 0 not in self.supported:\n return 'not_applicable'\n\n self.gdb.b(\"main\")\n self.gdb.c()\n\n # Machine mode\n self.gdb.p(\"$priv=3\")\n main_address = self.gdb.p(\"$pc\")\n self.gdb.stepi()\n assertEqual(\"%x\" % self.gdb.p(\"$pc\"), \"%x\" % (main_address+4))\n\n # User mode\n self.gdb.p(\"$priv=0\")\n self.gdb.stepi()\n # Should have taken an exception, so be nowhere near main.\n pc = self.gdb.p(\"$pc\")\n assertTrue(pc < main_address or pc > main_address + 0x100)",
"def test_main_succeeds_in_production_env(runner: CliRunner) -> None:\n result = runner.invoke(console.main)\n assert result.exit_code == 0",
"def execute(self):\n\n self._status = 'Running'\n\n try:\n self._init_staf_handle()\n self._ping()\n\n if self._sut.os == 'Linux':\n self._linux_power_control()\n elif self._sut.os == 'Windows':\n self._windows_power_control()\n else:\n raise CoreError(\"Unknown OS platform: {0}\".format(self._sut.os))\n\n if self._wait:\n sleep(BespokeGlobals.VM_BOOT_WAIT)\n\n self._status = 'Pass'\n except CoreError as e:\n self._status = 'Fatal'\n self._message = e.msg\n finally:\n self._close_staf_handle()\n\n #Notify TestCase that a failure occurred.\n if self._status == 'Fatal': raise FatalError(self._message)",
"def test_example(self, _, cmd):\n out = subprocess.run(cmd, shell=True)\n self.assertFalse(out.returncode)",
"def test_main_succeeds(runner: CliRunner, mock_requests_get: MockFixture) -> None:\n result = runner.invoke(console.main)\n assert result.exit_code == 0",
"def test_check_system_cmd_line(self):\n\n intro = \"Checking your system, this may take a few seconds...\"\n\n cmd = ['pydroid', 'check']\n p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n out = p.communicate()[0]\n self.assertIn(intro, out)\n self.assertTrue('Success' in out or 'Fix' in out)",
"def test_script(self) -> None:\n main()",
"def script_test(path):\n log.info(\" ... EXECUTING {}\".format(str(path)))\n\n cmd = [sys.executable, str(path)]\n cp = subprocess.run(cmd, stderr=subprocess.PIPE)\n if cp.returncode:\n log.info(\" ... FAILED\")\n log.info(\" ___ TRACEBACK\")\n log.info(cp.stderr.decode(\"utf-8\") + \"\\n\\n\")\n return False\n else:\n log.info(\" ... PASSED\")\n return True",
"def test_cli_simulation(self):\n self.run_owtf('-s')\n self.assert_is_in_logs(\n 'All jobs have been done. Exiting.',\n name='MainProcess',\n msg='OWTF did not finish properly!')\n plugin_handler = ServiceLocator.get_component(\"plugin_handler\")\n self.assertTrue(\n plugin_handler.Simulation,\n msg='OWTF should have been run in simulation mode!')",
"def sanity_check_process(self):\n assert_equals(self.proc.returncode, None)\n time.sleep(1)",
"def do_test(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tif not self.build['dotest']:\n\t\t\tself.log('Tests configured off, not running',level=logging.DEBUG)\n\t\t\treturn\n\t\t# Test in reverse order\n\t\tself.log('PHASE: test', level=logging.DEBUG)\n\t\tself.stop_all()\n\t\tself.start_all()\n\t\tfor module_id in self.module_ids(rev=True):\n\t\t\t# Only test if it's installed.\n\t\t\tif self.is_installed(self.shutit_map[module_id]):\n\t\t\t\tself.log('RUNNING TEST ON: ' + module_id, level=logging.DEBUG)\n\t\t\t\tself.login(prompt_prefix=module_id,command=shutit_global.shutit_global_object.bash_startup_command,echo=False)\n\t\t\t\tif not self.shutit_map[module_id].test(self):\n\t\t\t\t\tself.fail(module_id + ' failed on test', shutit_pexpect_child=self.get_shutit_pexpect_session_from_id('target_child').pexpect_child) # pragma: no cover\n\t\t\t\tself.logout(echo=False)",
"def test_conditions(self):\n Utils.start_home(self.serial)\n AppUtils.kill_app(self.serial, self.package)\n AppUtils.open_app(self.device, self.serial, self.app)\n Utils.wait_short()",
"def _perform_environment_check(check_auth=True):\n correct, errors = verify_environment(check_auth)\n\n if not correct:\n print_error(\n \"Cannot execute command because of problem(s) with environment:\")\n for error in errors:\n print_error(\" - \" + error)\n sys.exit(1)",
"def test_runSignaled(self):\n builder = BookBuilder()\n exc = self.assertRaises(\n CommandFailed, builder.run,\n [sys.executable, '-c',\n 'import sys; print \"hi\"; sys.stdout.flush(); '\n 'import os; os.kill(os.getpid(), 9)'])\n self.assertEquals(exc.exitSignal, 9)\n self.assertEquals(exc.exitStatus, None)\n self.assertEquals(exc.output, \"hi\\n\")",
"def execute(self):\n\n self._status = 'Running'\n\n try:\n self._init_staf_handle()\n self._prep_vm()\n self._ping()\n self._install_bespoke()\n sleep(self._post_wait)\n self._status = 'Pass'\n except CoreError as e:\n self._status = 'Fatal'\n self._message = e.msg\n finally:\n self._close_staf_handle()\n\n #Notify TestCase that a failure occurred.\n if self._status == 'Fatal': raise FatalError(self._message)",
"def test_run_and_check_result(self):\n # Run a successful command.\n result = build_cmake_project.run_and_check_result('echo hello world')\n self.assertTrue(result)\n\n # Run a failure command.\n try:\n result = build_cmake_project.run_and_check_result('unexistent --command')\n except subprocess.CalledProcessError:\n self.fail('Exception thrown when running unexistent command.')\n self.assertFalse(result)",
"def test_main_succeed_en(runner: CliRunner) -> None:\n result = runner.invoke(__main__.main, \"-c tests/clippings-en.txt\")\n assert result.exit_code == 0",
"def test_stress_scu(self):\n proc = subprocess.Popen(['ectool', 'stress'], stdout=subprocess.PIPE)\n time.sleep(5)\n proc.send_signal(subprocess.signal.SIGINT)\n stdout, _ = proc.communicate()\n self.assertIn(b'Total failures: 0\\n', stdout)",
"def test_check_system_python_api(self):\n\n errors, successes = check_system.check_system()\n self.assertTrue(len(errors) + len(successes) >= 4)",
"def test_modes(self):\n step = self.run_step('S01-first.py')\n self.assertTrue(step.success)\n self.assertTrue(step.local.is_testing)\n self.assertFalse(step.local.is_interactive)\n self.assertFalse(step.local.is_single_run)",
"def assert_cmd_success_script(self, cmd_string):\n ret = compmake_main([self.root, '--nosysexit', '-c', cmd_string])\n self.assertEqual(ret, 0)",
"def test_empty_argv(self, mock_ccread):\n with self.assertRaises(SystemExit):\n self.main()",
"def test_check_process_output(self):\n workflow = self.get_workflow(\n \"\"\"file://result <- file://source\n echo test\n \"\"\")\n workflow.pre_check_processes()\n try:\n process = workflow._processes[0]\n create_tuttle_dirs()\n workflow.run_process(process)\n assert False, \"Exception has not been not raised\"\n except ResourceError:\n assert True",
"def test_valid(self):\n args = [SIMPLE_TEMPLATE, SIMPLE_CANDIDATE]\n result = self.runner.invoke(main, args)\n self.assertEqual(0, result.exit_code)",
"def test_interactive(self):\n self.executor.command(['python']).interactive()",
"def test_system_command(self):\n process = Popen(['ubus'],stdout=PIPE)\n stdout, _ = process.communicate()\n self.assertEqual(process.returncode,0)\n self.assertIn(\"This isn't the real ubus. It's a simulator\",stdout.__str__())"
] |
[
"0.6746161",
"0.6714891",
"0.661832",
"0.65914303",
"0.6589559",
"0.6565143",
"0.6559185",
"0.6456731",
"0.64326",
"0.6430399",
"0.6406299",
"0.6397966",
"0.6391126",
"0.6380993",
"0.6365406",
"0.63643396",
"0.6348042",
"0.6301243",
"0.62988365",
"0.6285747",
"0.6282001",
"0.6262056",
"0.6245659",
"0.6229932",
"0.6212877",
"0.6202356",
"0.6197043",
"0.61860365",
"0.61841255",
"0.6169082"
] |
0.68040043
|
0
|
Function for convertion rainfall depth (in mm) to rainfall intensity (mm/h)
|
def depth2intensity(depth, interval=300):
return depth * 3600 / interval
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def mm_to_inches(rainfall_in_mm):\r\n rainfall_in_inches = rainfall_in_mm * 0.0393701\r\n return rainfall_in_inches",
"def depth_to_ata(depth):\n return (depth / 10.0) + 1.0",
"def intensity2depth(intensity, interval=300):\n return intensity * interval / 3600",
"def _get_depth(self, data): \r\n\r\n data = data.astype(np.float32)\r\n\r\n normalized = np.dot(data, [65536.0, 256.0, 1.0]) \r\n normalized /= (256 * 256 * 256 - 1)\r\n in_meters = 1000 * normalized\r\n\r\n return in_meters",
"def molar_mass_dry_air():\n return 28.9647",
"def intensity(self) -> int:",
"def hz2mel(hz):\r\n return 2595 * np.log10(1+hz/700.0)",
"def pa_to_inches(pressure_in_pa):\r\n pressure_in_inches_of_m = pressure_in_pa * 0.02953\r\n return pressure_in_inches_of_m",
"def feet2m(feet):\n return feet * 0.3048",
"def convertDepthtomm(self,depth):\n\n depth = 2.968*10**-05*depth+0.02079*depth+0.5146\n \n return depth",
"def hz2mel(hz):\n return 2595 * np.log10(1+hz/700.)",
"def hz2mel(f):\n return 1127.01048 * np.log(f/700 +1)",
"def inches_to_mm(inches):\n\tmm=inches*25.4\n\treturn mm",
"def astrom_precision(fwhm, snr):\n result = fwhm/(snr)\n return result",
"def rad(x) :#en mm!\r\n return topdia(x)/2.0",
"def hz2mel(hz):\n return 1127 * numpy.log(1+hz/700.0)",
"def get_depth_milli_metres(self):\n self.depth = (self.get_depth_metres() * 1000).astype(np.float32)\n self.full_depth = copy.copy(self.depth)\n return self.depth",
"def hz2mel(hz):\n\treturn 2595 * numpy.log10(1 + hz / 700.0)",
"def convertToSpectroGram(self):",
"def SM2m(sm):\n return sm * 1609.344",
"def convert(temp_in_c):\n \n return temp_in_c * (9/5) + 32",
"def normalize_m11(x):\n return x / 127.5 - 1",
"def hz2mel(hz):\n return 2595 * pylab.log10(1+hz/700.0)",
"def hz2mel(hz):\n return 1127 * np.log(1 + hz / 700)",
"def get_specific_heat() -> float:\n return 1006.0",
"def get_meters_to_pixels_coefficient():\n return 1/get_pixels_to_meters_coefficient()",
"def intensity(self, value: int, /) -> None:",
"def snow_depth(lon, lat, month):\n\n im = month - 1\n\n h0 = np.array( [28.01, 30.28, 33.89, 36.80, 36.93, 36.59,\n 11.02, 4.64, 15.81, 22.66, 25.57, 26.67] )\n a = np.array( [ 0.1270, 0.1056, 0.5486, 0.4046, 0.0214, 0.7021,\n 0.3008, 0.3100, 0.2119, 0.3594, 0.1496, -0.1876] )\n b = np.array( [-1.1833, -0.5908, -0.1996, -0.4005, -1.1795, -1.4819,\n -1.2591, -0.6350, -1.0292, -1.3483, -1.4643, -1.4229] )\n c = np.array( [-0.1164, -0.0263, 0.0280, 0.0256, -0.1076, -0.1195,\n -0.0811, -0.0655, -0.0868, -0.1063, -0.1409, -0.1413] )\n d = np.array( [-0.0051, -0.0049, 0.0216, 0.0024, -0.0244, -0.0009,\n -0.0043, 0.0059, -0.0177, 0.0051, -0.0079, -0.0316] )\n e = np.array( [ 0.0243, 0.0044, -0.0176, -0.0641, -0.0142, -0.0603,\n -0.0959, -0.0005, -0.0723, -0.0577, -0.0258, -0.0029] )\n\n x = (90. - lat) * np.cos( np.radians(lon) )\n y = (90. - lat) * np.sin( np.radians(lon) )\n\n h = ( h0[im] + ( a[im] * x ) + ( b[im] * y ) + ( c[im] * x * y ) +\n ( d[im] * x * x ) + ( e[im] * y * y ) )\n\n return h",
"def referenceIllum(temp, wavelength):\n ct=temp\n if ct <= 0:\n return 0\n if ct < 4000:\n return planckian(ct, wavelength)\n if ct < 5000:\n p=planckian(ct, wavelength)\n d=dseries(ct, wavelength)\n return p+(d-p)*(ct-4000)/1500.0\n return dseries(ct, wavelength)",
"def mi_to_m(radius):\n return int(float(radius) * 1609.34)"
] |
[
"0.65863734",
"0.60612977",
"0.60021347",
"0.5857708",
"0.5759889",
"0.5749766",
"0.552827",
"0.5524878",
"0.5511584",
"0.5501646",
"0.54968464",
"0.5473868",
"0.54733264",
"0.54341406",
"0.5411911",
"0.5402018",
"0.5397092",
"0.53699905",
"0.53681594",
"0.5358648",
"0.5355544",
"0.5351409",
"0.53442127",
"0.53301084",
"0.53214794",
"0.531272",
"0.53076047",
"0.5297385",
"0.52678686",
"0.5259955"
] |
0.6329385
|
1
|
Function for convertion rainfall intensity (mm/h) to rainfall depth (in mm)
|
def intensity2depth(intensity, interval=300):
return intensity * interval / 3600
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def mm_to_inches(rainfall_in_mm):\r\n rainfall_in_inches = rainfall_in_mm * 0.0393701\r\n return rainfall_in_inches",
"def _get_depth(self, data): \r\n\r\n data = data.astype(np.float32)\r\n\r\n normalized = np.dot(data, [65536.0, 256.0, 1.0]) \r\n normalized /= (256 * 256 * 256 - 1)\r\n in_meters = 1000 * normalized\r\n\r\n return in_meters",
"def depth2intensity(depth, interval=300):\n return depth * 3600 / interval",
"def depth_to_ata(depth):\n return (depth / 10.0) + 1.0",
"def convertDepthtomm(self,depth):\n\n depth = 2.968*10**-05*depth+0.02079*depth+0.5146\n \n return depth",
"def molar_mass_dry_air():\n return 28.9647",
"def get_depth_milli_metres(self):\n self.depth = (self.get_depth_metres() * 1000).astype(np.float32)\n self.full_depth = copy.copy(self.depth)\n return self.depth",
"def _read_depth(self):\n # s = openni2.wait_for_any_stream([self.depth_stream], timeout)\n # if not s:\n # return None\n # read raw uint16 buffer\n im_arr = self.depth_stream.read_frame()\n raw_buf = im_arr.get_buffer_as_uint16()\n depth_im = np.ctypeslib.as_array(raw_buf).reshape(\n (self.DEPTH_IM_HEIGHT, self.DEPTH_IM_WIDTH))\n depth_im = depth_im.astype('float64') / 1000 # 把毫米的单位转换到米\n # TODO: 不是很清楚为什么要翻转图像\n if self.flip_images:\n # 上下翻转图像\n depth_im = np.flipud(depth_im)\n else:\n # 左右翻转图像\n depth_im = np.fliplr(depth_im)\n return depth_im",
"def snow_depth(lon, lat, month):\n\n im = month - 1\n\n h0 = np.array( [28.01, 30.28, 33.89, 36.80, 36.93, 36.59,\n 11.02, 4.64, 15.81, 22.66, 25.57, 26.67] )\n a = np.array( [ 0.1270, 0.1056, 0.5486, 0.4046, 0.0214, 0.7021,\n 0.3008, 0.3100, 0.2119, 0.3594, 0.1496, -0.1876] )\n b = np.array( [-1.1833, -0.5908, -0.1996, -0.4005, -1.1795, -1.4819,\n -1.2591, -0.6350, -1.0292, -1.3483, -1.4643, -1.4229] )\n c = np.array( [-0.1164, -0.0263, 0.0280, 0.0256, -0.1076, -0.1195,\n -0.0811, -0.0655, -0.0868, -0.1063, -0.1409, -0.1413] )\n d = np.array( [-0.0051, -0.0049, 0.0216, 0.0024, -0.0244, -0.0009,\n -0.0043, 0.0059, -0.0177, 0.0051, -0.0079, -0.0316] )\n e = np.array( [ 0.0243, 0.0044, -0.0176, -0.0641, -0.0142, -0.0603,\n -0.0959, -0.0005, -0.0723, -0.0577, -0.0258, -0.0029] )\n\n x = (90. - lat) * np.cos( np.radians(lon) )\n y = (90. - lat) * np.sin( np.radians(lon) )\n\n h = ( h0[im] + ( a[im] * x ) + ( b[im] * y ) + ( c[im] * x * y ) +\n ( d[im] * x * x ) + ( e[im] * y * y ) )\n\n return h",
"def _get_depth_map_scale_subfolder(self):\n if self.im_scale <= 0.25:\n if self.im_scale <= 0.125:\n return \"Depth/0.125/\"\n else:\n return \"Depth/0.25/\"\n else: \n return \"Depth/\"",
"def intensity(self) -> int:",
"def read_depth_image(path):\n with open(path, 'rb') as f:\n depth = Image.fromarray(read_pgm(f), mode='I')\n depth = tf.keras.preprocessing.image.img_to_array(depth)\n depth = tf.squeeze(depth, axis=-1)\n\n # Rel depth -> Abs depth\n depth_params = 351.3, 1092.5\n depth = depth_params[0] / (depth_params[1] - depth)\n depth = tf.nn.relu(depth)\n\n return depth",
"def depth_conversion(point_depth, w, h, f):\n i_c = np.float(h) / 2 - 1\n j_c = np.float(w) / 2 - 1\n columns, rows = np.meshgrid(np.linspace(0, w - 1, num=w), np.linspace(0, h - 1, num=h))\n distance_from_center = ((rows - i_c) ** 2 + (columns - j_c) ** 2) ** 0.5\n return point_depth / (1 + (distance_from_center / f) ** 2) ** 0.5",
"def calculate_relative_soma_depth(data: Data) -> float:\n\n return data.relative_soma_depth",
"def pressure_to_depth(P, lat):\n a1 = 9.72659\n a2 = -2.2512e-5\n a3 = 2.279e-10\n a4 = -1.82e-15\n\n b = 1.092e-6\n\n g0 = 9.780318\n g1 = 5.2788e-3\n g2 = 2.36e-5\n\n rad = np.pi / 180.\n\n X = np.sin(lat*rad)\n X = X*X\n grav = g0 * (1.0 + (g1 + g2*X)*X) + b*P\n nom = (a1 + (a2 + (a3 + a4*P)*P)*P)*P\n\n return nom / grav",
"def convertToSpectroGram(self):",
"def hz2mel(hz):\r\n return 2595 * np.log10(1+hz/700.0)",
"def depth_to_net_dim(img, cutoff=1.0):\n assert img.shape == (480,640)\n img = depth_to_3ch(img, cutoff)\n img = depth_scaled_to_255(img)\n return img",
"def rad(x) :#en mm!\r\n return topdia(x)/2.0",
"def hz2mel(hz):\n return 2595 * np.log10(1+hz/700.)",
"def normalize_depth(val, min_v, max_v):\n return (((max_v - val) / (max_v - min_v)) * 255).astype(np.uint8)",
"def r_to_depth(x, interval):\n return x * interval / 3600.0",
"def feet2m(feet):\n return feet * 0.3048",
"def hz2mel(f):\n return 1127.01048 * np.log(f/700 +1)",
"def get_depth_pwm(self, goal):\n ddiff = goal.target_depth - self.curr_depth\n if self.curr_depth < -1:\n rospy.loginfo('Depth sensor is not initialized')\n return self.pwm_center\n zout = ddiff * self.depth_p\n # limit output if necassary\n if abs(zout) > self.depth_pmax:\n if zout < 0:\n zout = -self.depth_pmax\n else:\n zout = self.depth_pmax\n zout += self.pwm_center\n return zout",
"def model_prem(r):\n\n\t#- normalised radius\n\tx = r / 6371000.0\n\n\t#- march through the various depth levels -----------------------------------------------------\n\n\t#- upper crust\n\tif (r >= 6356000.0):\n\t\trho = 2.6\n\t\tvpv = 5.8\n\t\tvph = vpv\n\t\tvsv = 3.2\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- lower crust\n\telif (r >= 6346000.6) & (r < 6356000.0):\n\t\trho = 2.9\n\t\tvpv = 6.8\n\t\tvph = vpv\n\t\tvsv = 3.9\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- LID\n\telif (r >= 6291000.0) & (r < 6346000.6):\n\t\trho = 2.6910 + 0.6924 * x\n\t\tvpv = 0.8317 + 7.2180 * x\n\t\tvph = 3.5908 + 4.6172 * x\n\t\tvsv = 5.8582 - 1.4678 * x\n\t\tvsh = -1.0839 + 5.7176 * x\n\t\teta = 3.3687 - 2.4778 * x\n\n\t#- LVZ\n\telif (r >= 6151000.0) & (r < 6291000.0):\n\t\trho = 2.6910 + 0.6924 * x\n\t\tvpv = 0.8317 + 7.2180 * x\n\t\tvph = 3.5908 + 4.6172 * x\n\t\tvsv = 5.8582 - 1.4678 * x\n\t\tvsh = -1.0839 + 5.7176 * x\n\t\teta = 3.3687 - 2.4778 * x\n\n\t#- Transition zone 1\n\telif (r >= 5971000.0) & (r < 6151000.0):\n\t\trho = 7.1089 - 3.8045 * x\n\t\tvpv = 20.3926 - 12.2569 * x\n\t\tvph = vpv\n\t\tvsv = 8.9496 - 4.4597 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Transition zone 2\n\telif (r >= 5771000.0) & (r < 5971000.0):\n\t\trho = 11.2494 - 8.0298 * x\n\t\tvpv = 39.7027 - 32.6166 * x\n\t\tvph = vpv\n\t\tvsv = 22.3512 - 18.5856 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Transition zone 3\n\telif (r >= 5701000.0) & (r < 5771000.0):\n\t\trho = 5.3197 - 1.4836 * x\n\t\tvpv = 19.0957 - 9.8672 * x\n\t\tvph = vpv\n\t\tvsv = 9.9839 - 4.9324 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Lower mantle 1\n\telif (r >= 5600000.0) & (r < 5701000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 29.2766 - 23.6027 * x + 5.5242 * x**2 - 2.5514 * x**3\n\t\tvph = vpv\n\t\tvsv = 22.3459 - 17.2473 * x - 2.0834 * x**2 + 0.9783 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0 \n\n\t#- Lower mantle 2\n\telif (r >= 3630000.0) & (r < 5600000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 24.9520 - 40.4673 * x + 51.4832 * x**2 - 26.6419 * x**3\n\t\tvph = vpv\n\t\tvsv = 11.1671 - 13.7818 * x + 17.4575 * x**2 - 9.2777 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Lower mantle 3\n\telif (r >= 3480000.0) & (r < 3630000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 15.3891 - 5.3181 * x + 5.5242 * x**2 - 2.5514 * x**3\n\t\tvph = vpv\n\t\tvsv = 6.9254 + 1.4672 * x - 2.0834 * x**2 + 0.9783 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Outer core\n\telif (r >= 1221000.5) & (r < 3480000.0):\n\t\trho = 12.5815 - 1.2638 * x - 3.6426 * x**2 - 5.5281 * x**3\n\t\tvpv = 11.0487 - 4.0362 * x + 4.8023 * x**2 - 13.5732 * x**3\n\t\tvph = vpv\n\t\tvsv = 0.0\n\t\tvsh = 0.0\n\t\teta = 1.0\n\n\t#- Inner Core\n\telif (r >= 0.0) & (r < 1221000.5):\n\t\trho = 13.0885 - 8.8381 * x**2\n\t\tvpv = 11.2622 - 6.3640 * x**2\n\t\tvph = vpv\n\t\tvsv = 3.6678 - 4.4475 * x**2\n\t\tvsh = vsv\n\t\teta = 1.0 \n\n\t#- convert to elastic parameters --------------------------------------------------------------\n\n\trho = 1000.0 * rho\n\tvpv = 1000.0 * vpv\n\tvph = 1000.0 * vph\n\tvsv = 1000.0 * vsv\n\tvsh = 1000.0 * vsh\n\n\tA = rho * vph**2\n\tC = rho * vpv**2\n\tN = rho * vsh**2\n\tL = rho * vsv**2\n\tF = eta * (A - 2 * L)\n\n\treturn rho, A, C, F, L, N",
"def hz2mel(hz):\n\treturn 2595 * numpy.log10(1 + hz / 700.0)",
"def hz2mel(hz):\n return 1127 * numpy.log(1+hz/700.0)",
"def referenceIllum(temp, wavelength):\n ct=temp\n if ct <= 0:\n return 0\n if ct < 4000:\n return planckian(ct, wavelength)\n if ct < 5000:\n p=planckian(ct, wavelength)\n d=dseries(ct, wavelength)\n return p+(d-p)*(ct-4000)/1500.0\n return dseries(ct, wavelength)",
"def hz2mel(hz):\n return 1127 * np.log(1 + hz / 700)"
] |
[
"0.62012035",
"0.62011176",
"0.60868305",
"0.60298705",
"0.5918458",
"0.57394826",
"0.57152945",
"0.56821734",
"0.5576718",
"0.55383277",
"0.5475204",
"0.5465773",
"0.54583997",
"0.5389856",
"0.5378104",
"0.5371326",
"0.5368568",
"0.5361238",
"0.5324265",
"0.53226316",
"0.5318927",
"0.5318236",
"0.529486",
"0.5280617",
"0.52652866",
"0.52184665",
"0.5211833",
"0.5195082",
"0.51815677",
"0.5174098"
] |
0.6237912
|
0
|
Allow a snapshot, a patch, or a list of patches
|
def _patches(s):
if hasattr(s,'dict'):
pp=s.patches
elif isinstance(s,dispatch._dispatch._patch):
pp=[s]
else:
pp=s
return pp
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_patches(\n self, patch_list: List[PatchMetadata], patch_id_digits: int = 4\n ) -> None:\n if not patch_list:\n return\n\n if all(p.present_in_specfile for p in patch_list):\n logger.debug(\n \"All patches are present in the spec file, nothing to do here 🚀\"\n )\n return\n\n # we could have generated patches before (via git-format-patch)\n # so let's reload the spec\n self.reload()\n\n applied_patches: Dict[str, PatchObject] = {\n p.get_patch_name(): p for p in self.get_applied_patches()\n }\n\n for patch_metadata in patch_list:\n if patch_metadata.present_in_specfile:\n logger.debug(\n f\"Patch {patch_metadata.name} is already present in the spec file.\"\n )\n continue\n\n if patch_metadata.name in applied_patches:\n logger.debug(\n f\"Patch {patch_metadata.name} is already defined in the spec file.\"\n )\n continue\n\n self.add_patch(patch_metadata, patch_id_digits)",
"def __init__(self, *patch_tuples):\n self._patches = [\n Patch(patch_tuple[0], patch_tuple[1], patch_tuple[2])\n for patch_tuple in patch_tuples\n ]",
"def add_patch(self, pset, patch):\n car = patch.pop()\n if car in pset:\n sel = [ x for x in pset[car] if patch.path == x.path ]\n if sel:\n sel[0].combine(patch)\n else:\n pset[car].append(patch)\n else:\n pset[car] = [patch]",
"def test_patch_image(self):\n pass",
"def is_patch_binary_copy_modify_with_no_change(patch):\n diff_header = split_header(patch.text)[0]\n return patch.is_binary and is_copy_modify_with_no_change(diff_header)",
"def patch(cls, patch_ob_list):\n return cls._patch_avos(patch_ob_list)",
"def UploadSeparatePatches(issue, rpc_server, patchset, data, options):\r\n patches = SplitPatch(data)\r\n rv = []\r\n for patch in patches:\r\n if len(patch[1]) > MAX_UPLOAD_SIZE:\r\n print (\"Not uploading the patch for \" + patch[0] +\r\n \" because the file is too large.\")\r\n continue\r\n form_fields = [(\"filename\", patch[0])]\r\n if not options.download_base:\r\n form_fields.append((\"content_upload\", \"1\"))\r\n files = [(\"data\", \"data.diff\", patch[1])]\r\n ctype, body = EncodeMultipartFormData(form_fields, files)\r\n url = \"/%d/upload_patch/%d\" % (int(issue), int(patchset))\r\n print \"Uploading patch for \" + patch[0]\r\n response_body = rpc_server.Send(url, body, content_type=ctype)\r\n lines = response_body.splitlines()\r\n if not lines or lines[0] != \"OK\":\r\n StatusUpdate(\" --> %s\" % response_body)\r\n sys.exit(1)\r\n rv.append([lines[1], patch[0]])\r\n return rv",
"def UploadSeparatePatches(issue, rpc_server, patchset, data, options):\n patches = SplitPatch(data)\n rv = []\n for patch in patches:\n if len(patch[1]) > MAX_UPLOAD_SIZE:\n print (\"Not uploading the patch for \" + patch[0] +\n \" because the file is too large.\")\n continue\n form_fields = [(\"filename\", patch[0])]\n if not options.download_base:\n form_fields.append((\"content_upload\", \"1\"))\n files = [(\"data\", \"data.diff\", patch[1])]\n ctype, body = EncodeMultipartFormData(form_fields, files)\n url = \"/%d/upload_patch/%d\" % (int(issue), int(patchset))\n print \"Uploading patch for \" + patch[0]\n response_body = rpc_server.Send(url, body, content_type=ctype)\n lines = response_body.splitlines()\n if not lines or lines[0] != \"OK\":\n StatusUpdate(\" --> %s\" % response_body)\n sys.exit(1)\n rv.append([lines[1], patch[0]])\n return rv",
"def patches(*args):\n with cros_build_lib.ContextManagerStack() as stack:\n for arg in args:\n stack.Add(lambda ret=arg: ret)\n yield",
"def _validate_if_patch_supported(self, headers, uri):\n if not self._operation_allowed(headers, 'PATCH'):\n msg = ('PATCH Operation not supported on the resource '\n '\"%s\"' % uri)\n raise exception.IloError(msg)",
"def snapshot(*args, constructionHistory: bool=True, endTime: Union[time, bool]=None, increment:\n Union[time, bool]=None, motionTrail: bool=False, name: Union[AnyStr, bool]=\"\",\n startTime: Union[time, bool]=None, update: Union[AnyStr, bool]=\"always\", q=True,\n query=True, e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass",
"def add_snapshot(self,snapshot, spec):\n self.snaps.append(snapshot)\n self.spectrae.append(spec)",
"def registerPatches(self,pl):\n self.set('patchmesh.patches',pl)",
"def patch_files():\n args = parser.parse_args()\n doc = json.load(args.ORIGINAL)\n patch = json.load(args.PATCH)\n result = jsonpatch.apply_patch(doc, patch)\n print(json.dumps(result, indent=args.indent))",
"def do_genpatch(self, argv):\n #TODO:\n # - Would an optional [<files> ...] argument be useful or is\n # that overkill? E.g. 'p4 genpatch ./...' (I think that that\n # would be very useful.\n # - Could add '-f' option to only warn on 'out of sync'.\n # - Could add '-d<flag>' option to control to diff format.\n # Context and unified allowed.\n # - Handling binary files that cannot be diff'd\n # - Option to be able to control the base dir so the patch -p#\n # number can be controlled. Dunno what form that should\n # take.\n\n # Process options.\n diffFormat = 'u'\n if diffFormat == 'u':\n prefixes = ('---', '+++')\n elif diffFormat == 'c':\n prefixes = ('***', '---')\n\n # Process args.\n if not argv[1:]:\n change = 'default'\n elif len(argv[1:]) == 1:\n change = argv[1]\n try:\n change = int(change)\n except ValueError: \n # Stupidly, p4win's new Tool %c interpolation will use\n # \"Default\", on which the normal p4.exe client will die.\n change = change.lower()\n if change != 'default':\n sys.stderr.write(\"Invalid changelist number '%s'.\\n\"\\\n % change)\n return 1\n else:\n sys.stderr.write(\"Usage: genpatch [<changelist#>]\\n\")\n sys.stderr.write(\"Missing/wrong number of arguments.\\n\")\n return 1\n\n # Validate the given change number.\n p4 = p4lib.P4( **p4lib.parseOptv(self.__p4optv) )\n submitted = [c['change'] for c in p4.changes(status='submitted')]\n pending = [c['change'] for c in p4.changes(status='pending')]\n if change in submitted:\n status = 'submitted'\n elif change in pending+['default']:\n status = 'pending'\n else:\n sys.stderr.write(\"Change %s unknown.\" % change)\n return 1\n\n # Get list of files to include in patch.\n if status == 'submitted':\n d = p4.describe(change, diffFormat='u')\n desc = d['description']\n files = d['files']\n diffs = d['diff']\n elif status == 'pending':\n files = p4.opened(change=change)\n if change == 'default':\n desc = None\n else:\n desc = p4.change(change=change)['description']\n if files:\n diffs = p4.diff([f['depotFile'] for f in files],\n diffFormat='u')\n else:\n diffs = []\n\n # Make a single string from 'diffs' with appropriate delimiters\n # for the \"patch\" program.\n diffstr = ''\n timestamp = time.asctime()\n for diff in diffs:\n # Perforce std header, e.g.:\n # ==== //depot/apps/px/ReadMe.txt#5 (text) ====\n # or\n # ==== //depot/foo.doc#42 - c:\\trentm\\foo.doc ==== (binary)\n if diff.has_key('localFile'):\n diffstr += \"==== %(depotFile)s#%(rev)s - %(localFile)s ====\"\\\n % diff\n if diff['binary']:\n diffstr += \" (binary)\"\n diffstr += \"\\n\"\n else:\n diffstr += \"==== %(depotFile)s#%(rev)s (%(type)s) ====\\n\"\\\n % diff\n # Patch header, e.g. for unified diffs:\n # Index: apps/px/test/ToDo.txt\n # --- apps/px/test/ToDo.txt.~1~ Fri May 31 21:17:17 2002\n # +++ apps/px/test/ToDo.txt Fri May 31 21:17:17 2002\n # or for context diffs:\n # Index: apps/px/test/ToDo.txt\n # *** apps/px/test/ToDo.txt.~1~ Fri May 31 21:26:47 2002\n # --- apps/px/test/ToDo.txt Fri May 31 21:26:47 2002\n fname = diff['depotFile'][len('//depot/'):]\n\n if diff.has_key('text'):\n diffstr += \"Index: %s\\n\" % fname\n diffstr += \"%s %s.~1~\\t%s\\n\" % (prefixes[0], fname, timestamp)\n diffstr += \"%s %s\\t%s\\n\" % (prefixes[1], fname, timestamp)\n # The diff text.\n diffstr += ''.join(diff['text'])\n if diffstr[-1] != '\\n':\n diffstr += \"\\n\\\\ No newline at end of file\\n\"\n\n # Inline added files into the diff.\n addedfiles = [f for f in files if f['action'] in ('add', 'branch')]\n for f in addedfiles:\n # May have to get file type from 'p4 files'.\n if status == 'submitted':\n f['type'] = p4.files(f['depotFile'])[0]['type']\n # Skip file if it is binary.\n if f['type'].startswith('binary'):\n log.warn(\"Cannot inline '%s' because it is binary.\"\\\n % f['depotFile'])\n continue\n # Get the file contents.\n if status == \"pending\":\n # Read the file contents from disk.\n localFile = p4.where(f['depotFile'])[0]['localFile']\n if not os.path.exists(localFile):\n continue\n lines = open(localFile, 'r').readlines()\n else:\n # Get the file contents via 'p4 print'.\n fnameRev = \"%s#%s\" % (f['depotFile'], f['rev'])\n lines = p4.print_(fnameRev)[0]['text'].split('\\n')\n if not lines[-1]: lines = lines[:-1] # drop empty last line\n lines = [line+'\\n' for line in lines]\n # Inline the file.\n diffstr += \"\\n==== %(depotFile)s#%(rev)s (%(type)s) ====\\n\" % f\n if len(lines) < 2:\n ln = \"\"\n else:\n ln = \",\" + str(len(lines))\n fname = f['depotFile'][len('//depot/'):]\n diffstr += \"Index: %s\\n\" % fname\n diffstr += \"%s %s.~1~\\t%s\\n\" % (prefixes[0], fname, timestamp)\n diffstr += \"%s %s\\t%s\\n\" % (prefixes[1], fname, timestamp)\n diffstr += \"@@ -0,0 +1%s @@\\n\" % ln\n diffstr += '+' + '+'.join(lines)\n if diffstr[-1] != '\\n':\n diffstr += \"\\n\\\\ No newline at end of file\\n\"\n \n if diffstr: # std patch terminator\n diffstr += \"End of Patch.\"\n\n patch = p4lib.makeForm(description=desc, files=files,\n differences=diffstr)\n if patch: # ViM-specific hack to have it colorize patches as diffs.\n patch = \"diff\\n\" + patch\n\n sys.stdout.write(patch)",
"def test_patch_info(self):\n patch_info = versions.PatchInfo(iiq_dir='/some/path',\n patches_dir='/another/path',\n is_installed=False,\n specific_patch='this_patch',\n readme='data from readme',\n all_patches=('patch1', 'patch2'))\n self.assertTrue(isinstance(patch_info, versions._PatchInfo))",
"def export_patches(self):\n filename = tkFileDialog.asksaveasfilename(initialdir = self.cwd, title = \"Save patches\", filetypes = ((\"inp file\",\"*.inp\"),(\"all files\",\"*.*\")))\n if filename:\n patches = self.myGlycosylator.export_patches(self.linked_glycans)\n with open(filename, 'w') as f:\n f.write('\\n'.join(patches))",
"def assignPatches(stars, visit, nPatches=16, radiusFoV=1.8):\n maxx, maxy = gnomonic_project_toxy(0., np.radians(radiusFoV), 0., 0.)\n nsides = nPatches**0.5\n\n # This should move all coords to 0 < x < nsides-1\n px = np.floor((stars['x'] + maxy)/(2.*maxy)*nsides)\n py = np.floor((stars['y'] + maxy)/(2.*maxy)*nsides)\n\n stars['subPatch'] = px + py*nsides\n stars['patchID'] = stars['subPatch'] + visit['visitID']*nPatches\n return stars",
"def cmd_apply_patch(patchfile):\n return ['git', 'apply', patchfile]",
"def AutoPatch(self, to_patch):\n for item in to_patch:\n self.PatchObject(*item, autospec=True)",
"def test_patch(self, patch):\n self.clean()\n error = self.apply_patch(patch)\n diff = self.run(['git', 'diff', 'origin/master'])\n self.clean()\n if error != '':\n return False, error\n if diff == '':\n # No error message is returned for empty diff. The patch might be\n # empty or has been exported.\n return False, ''\n return True, ''",
"def check_checkpatch(project, commit, _desc, diff, options=None):\n tool = get_helper_path('checkpatch.pl')\n cmd = ([tool, '-', '--root', project.dir] +\n options.args(('--ignore=GERRIT_CHANGE_ID',), diff))\n return _check_cmd('checkpatch.pl', project, commit, cmd,\n input=rh.git.get_patch(commit))",
"def _makeComponentPatch(component, position, cold):\n x = position[0]\n y = position[1]\n\n if isinstance(component, Helix):\n blockPatch = matplotlib.patches.Wedge(\n (\n x\n + component.getDimension(\"helixDiameter\", cold=cold)\n / 2\n * math.cos(math.pi / 6),\n y\n + component.getDimension(\"helixDiameter\", cold=cold)\n / 2\n * math.sin(math.pi / 6),\n ),\n component.getDimension(\"od\", cold=cold) / 2,\n 0,\n 360,\n width=(component.getDimension(\"od\", cold=cold) / 2)\n - (component.getDimension(\"id\", cold=cold) / 2),\n )\n elif isinstance(component, Circle):\n\n blockPatch = matplotlib.patches.Wedge(\n (x, y),\n component.getDimension(\"od\", cold=cold) / 2,\n 0,\n 360,\n width=(component.getDimension(\"od\", cold=cold) / 2)\n - (component.getDimension(\"id\", cold=cold) / 2),\n )\n elif isinstance(component, Hexagon):\n if component.getDimension(\"ip\", cold=cold) != 0:\n innerPoints = numpy.array(\n hexagon.corners(30) * component.getDimension(\"ip\", cold=cold)\n )\n outerPoints = numpy.array(\n hexagon.corners(30) * component.getDimension(\"op\", cold=cold)\n )\n blockPatch = []\n for n in range(6):\n corners = [\n innerPoints[n],\n innerPoints[(n + 1) % 6],\n outerPoints[(n + 1) % 6],\n outerPoints[n],\n ]\n patch = matplotlib.patches.Polygon(corners, fill=True)\n blockPatch.append(patch)\n else:\n # Just make it a hexagon...\n blockPatch = matplotlib.patches.RegularPolygon(\n (x, y), 6, component.getDimension(\"op\", cold=cold) / math.sqrt(3)\n )\n\n elif isinstance(component, Rectangle):\n if component.getDimension(\"widthInner\", cold=cold) != 0:\n innerPoints = numpy.array(\n [\n [\n x + component.getDimension(\"widthInner\", cold=cold) / 2,\n y + component.getDimension(\"lengthInner\", cold=cold) / 2,\n ],\n [\n x + component.getDimension(\"widthInner\", cold=cold) / 2,\n y - component.getDimension(\"lengthInner\", cold=cold) / 2,\n ],\n [\n x - component.getDimension(\"widthInner\", cold=cold) / 2,\n y - component.getDimension(\"lengthInner\", cold=cold) / 2,\n ],\n [\n x - component.getDimension(\"widthInner\", cold=cold) / 2,\n y + component.getDimension(\"lengthInner\", cold=cold) / 2,\n ],\n ]\n )\n\n outerPoints = numpy.array(\n [\n [\n x + component.getDimension(\"widthOuter\", cold=cold) / 2,\n y + component.getDimension(\"lengthOuter\", cold=cold) / 2,\n ],\n [\n x + component.getDimension(\"widthOuter\", cold=cold) / 2,\n y - component.getDimension(\"lengthOuter\", cold=cold) / 2,\n ],\n [\n x - component.getDimension(\"widthOuter\", cold=cold) / 2,\n y - component.getDimension(\"lengthOuter\", cold=cold) / 2,\n ],\n [\n x - component.getDimension(\"widthOuter\", cold=cold) / 2,\n y + component.getDimension(\"lengthOuter\", cold=cold) / 2,\n ],\n ]\n )\n blockPatch = []\n for n in range(4):\n corners = [\n innerPoints[n],\n innerPoints[(n + 1) % 4],\n outerPoints[(n + 1) % 4],\n outerPoints[n],\n ]\n patch = matplotlib.patches.Polygon(corners, fill=True)\n blockPatch.append(patch)\n else:\n # Just make it a rectangle...\n blockPatch = matplotlib.patches.Rectangle(\n (\n x - component.getDimension(\"widthOuter\", cold=cold) / 2,\n y - component.getDimension(\"lengthOuter\", cold=cold) / 2,\n ),\n component.getDimension(\"widthOuter\", cold=cold),\n component.getDimension(\"lengthOuter\", cold=cold),\n )\n if isinstance(blockPatch, list):\n return blockPatch\n return [blockPatch]",
"def register_patch(self, file_id, raw_patch) -> None:\r\n if file_id in self.patch_history:\r\n self.patch_history[file_id].append(raw_patch)",
"def test_normalize_patch_with_modified_symlink(self):\n self.assertEqual(\n self.tool.normalize_patch(\n patch=(\n b'diff --git a/test b/test\\n'\n b'index abc1234..def4567 120000\\n'\n b'--- a/test\\n'\n b'+++ b/test\\n'\n b'@@ -1,1 +1,1 @@\\n'\n b'-old_target\\n'\n b'\\\\ No newline at end of file'\n b'+new_target\\n'\n b'\\\\ No newline at end of file'\n ),\n filename='test',\n revision='abc1234'),\n (\n b'diff --git a/test b/test\\n'\n b'index abc1234..def4567 100000\\n'\n b'--- a/test\\n'\n b'+++ b/test\\n'\n b'@@ -1,1 +1,1 @@\\n'\n b'-old_target\\n'\n b'\\\\ No newline at end of file'\n b'+new_target\\n'\n b'\\\\ No newline at end of file'\n ))",
"def patchset(request):\n patchsets = request.issue.get_patchset_info(\n request.user, request.patchset.key.id())\n for ps in patchsets:\n if ps.key.id() == request.patchset.key.id():\n patchset = ps\n return respond(request, 'patchset.html',\n {'issue': request.issue,\n 'patchset': request.patchset,\n 'patchsets': patchsets,\n 'is_editor': request.issue.edit_allowed,\n })",
"def apply_patch_on_the_image(img, patch, count=5, offset=150):\n mask = np.zeros(shape=img.shape)\n boxes = []\n prev = (0, 0)\n gen = gencoordinates(img.shape[0], img.shape[1])\n for i in range(count):\n rnd = random.choice([x for x in range(100)])\n x_offset = rnd + patch.shape[0]\n y_offset = rnd + patch.shape[1]\n x_offset += prev[0]\n y_offset += prev[1]\n if y_offset < patch.shape[1]:\n y_offset = patch.shape[1]\n if x_offset < patch.shape[0]:\n x_offset = patch.shape[0]\n img[y_offset:y_offset+patch.shape[0], x_offset:x_offset+patch.shape[1]] = patch\n mask[y_offset:y_offset+patch.shape[0], x_offset:x_offset+patch.shape[1]] = 1\n boxes.append((y_offset, patch.shape[0], x_offset, patch.shape[1]))\n prev = (x_offset, y_offset)\n return img, mask, boxes",
"def addPatch(self,p):\n indexPatch = [self.register(v) for v in p]\n self.get('patchmesh.patches').append(indexPatch)",
"def _extract_patches(img, patch_s):\n def np_extract_patches(img):\n orig = np.array(img.shape[:2])\n new = patch_s[0] * np.ceil(orig / patch_s[0]).astype(int)\n points = new - orig\n img = np.pad(img, [(0, points[0]), (0, points[1]), (0, 0)],\n mode='constant')\n patches = view_as_blocks(img, tuple(patch_s)).astype(np.float32)\n patches = patches.reshape(-1, *patch_s)\n return patches\n\n patches = tf.numpy_function(np_extract_patches, [img], tf.float32)\n return patches",
"def __select_valid_patches(self, patch_info_list, has_output_info=True):\n down_sample_ratio = self.wsi_mask.shape[0] / self.wsi_proc_shape[0]\n selected_indices = []\n for idx in range(patch_info_list.shape[0]):\n patch_info = patch_info_list[idx]\n patch_info = np.squeeze(patch_info)\n # get the box at corresponding mag of the mask\n if has_output_info:\n output_bbox = patch_info[1] * down_sample_ratio\n else:\n output_bbox = patch_info * down_sample_ratio\n output_bbox = np.rint(output_bbox).astype(np.int64)\n # coord of the output of the patch (i.e center regions)\n output_roi = self.wsi_mask[\n output_bbox[0][0] : output_bbox[1][0],\n output_bbox[0][1] : output_bbox[1][1],\n ]\n if np.sum(output_roi) > 0:\n selected_indices.append(idx)\n sub_patch_info_list = patch_info_list[selected_indices]\n return sub_patch_info_list"
] |
[
"0.5694723",
"0.5471057",
"0.546513",
"0.53766376",
"0.5329677",
"0.5255941",
"0.5181168",
"0.51639986",
"0.5138205",
"0.5134213",
"0.5023581",
"0.5022298",
"0.5013435",
"0.5005982",
"0.4865467",
"0.48619777",
"0.48432985",
"0.4824362",
"0.48217645",
"0.4808044",
"0.48046425",
"0.48036116",
"0.47779286",
"0.4768217",
"0.476351",
"0.47581998",
"0.4745364",
"0.47443864",
"0.4738127",
"0.47302082"
] |
0.57809645
|
0
|
Tests all possible keys and returns keys with highest likelihood of bring the encryption key.
|
def test_possible_keys(ciphertext):
top_keys = []
for num in range(0, 255, 1):
decrypted_str = xor_decrypt(ciphertext, num)
score = count_score(decrypted_str)
if score > 80:
top_keys.append(chr(num))
return top_keys
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def keysAll():",
"def get_all_cipher():\n return OpenSSL.cipher_algo.keys()",
"def generate_keys(self):\n self.keys = []\n key = string_to_bit_array(self.passwd)\n key = self.permutation(key, CP_1) # Perform initial permutation on the key\n g, d = split_into_n(key, 28) # Split into g (LEFT) & d (RIGHT)\n for i in range(16): # Apply the 16 rounds\n g, d = self.shift(g, d, ROUND_KEY_SHIFT[i]) # Shift the key according to the round\n tmp = g + d # Merge them\n self.keys.append(self.permutation(tmp, CP_2)) # Perform the permutation to get the Ki",
"def caesar_breaker_brute_force(ciphertext: str, dictionary: tp.Set[str]) -> int:\n best_shift = 0\n lst = []\n for key in dictionary:\n for shift in range(26):\n plaintext = decrypt_caesar(ciphertext, shift)\n if key == plaintext:\n lst.append(shift)\n for i in range(26):\n if best_shift < lst.count(i):\n best_shift = i\n return best_shift",
"def AllKeys(self) -> _n_0_t_1[str]:",
"def all_keys(self):\n return self.derivable_keys() + self.loadable_keys()",
"def expected_log_keys(learner: adaptive.BaseLearner) -> list[str]:\n # Check if the result contains the expected keys\n expected_keys = [\n \"elapsed_time\",\n \"overhead\",\n \"npoints\",\n \"cpu_usage\",\n \"mem_usage\",\n ]\n if not _at_least_adaptive_version(\"0.16.0\", raises=False) and not isinstance(\n learner,\n adaptive.SequenceLearner,\n ):\n # The loss cache for SequenceLearner was introduced in adaptive 0.16.0\n # see https://github.com/python-adaptive/adaptive/pull/411\n expected_keys.append(\"latest_loss\")\n return expected_keys",
"def extremely_stupid_naive_brute_force_crap():\n keystrokes = [l.strip() for l in open(\"keylog.txt\")]\n for i in range(1000, 10000000):\n if i % 10000 == 0:\n print i\n password = str(i)\n if all(is_subsequence(password, keys) for keys in keystrokes):\n print password\n break",
"def generate_keys(self):\n\n\t\tcondition = False\n\t\t\n\t\t\t\n\t\twhile (not condition) :\n\t\t\t# step 1 : chose random primary numbers p and q\n\t\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\t\tself._p = n\n\t\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\t\twhile(n == self._p):\n\t\t\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\t\tself._q = n\n\n\t\t\t#step 2 : compute n = pq\n\t\t\tself.n = self._p * self._q\n\t\t\t\n\t\t\ta = find_invpow(self.n,4) // 3\n\t\t\tcondition = (self._p > self._q) and (self._p < 2 * self._q)\n\t\t\tif (not condition) :\n\t\t\t\tcontinue\n\n\t\t\tprint(\"step one OK\")\n\n\t\t\t#step 3 : compute phi(n)\n\t\t\tself._phi = (self._p - 1) * (self._q - 1)\n\n\t\t\t#step 4 : chose the exponent\n\t\t\tn = randint(100,a)\n\t\t\twhile (gcd(self._phi,n) != 1):\n\t\t\t\tn = randint(100,self._phi)\n\t\t\tself._d = n\n\n\t\t\t#step 5 : compute d (private key)\n\t\t\tself.e = euclide_algorithm(self._d, self._phi)[\"U\"] % self._phi\n\n\t\t\tcondition = (self._d < a)\n\n\t\tprint(\"p = \", self._p)\n\t\tprint(\"q = \", self._q)\n\t\tprint(\"d = \", self._d)",
"def caesar_breaker_brute_force(ciphertext: str, dictionary: tp.Set[str]) -> int:\n best_shift = 0\n for i in range(26):\n if decrypt_caesar(ciphertext, i) in dictionary:\n best_shift = i\n return best_shift",
"def test_keys(self):\n storage = Storage()\n keys_to_set = {'1': 'hello',\n '2': 'bye',\n '3': [1, 2, 'three'],\n '4': {1: 'one', 2: 'two'},\n 'a': 1,\n 'aa': 2,\n 'abc':3,\n 'hello':4}\n for key in keys_to_set:\n storage.set(key, keys_to_set[key])\n\n pattern_answers = {'?': ['1','2','3','4','a'],\n '*': list(keys_to_set.keys()),\n '[13]': ['1', '3'],\n '[^a]': ['1','2','3','4'],\n '[1-3]': ['1','2','3'],\n '?[ae]*': ['aa', 'hello']}\n for pattern in pattern_answers:\n self.assertEqual(pattern_answers[pattern],\n storage.keys(pattern), f'For pattern \"{pattern}\" expected {pattern_answers[pattern]}.')",
"def get_asymm_keys(parameters):\n\tprivate_key = parameters.generate_private_key()\n\treturn private_key,private_key.public_key()",
"def checkKeys( ):\n\n if (HMACKey is None) or (AESKey is None):\n loadKeys()\n\n if (int(time.time()) - creationTime) > const.KEY_ROTATION_TIME:\n rotateKeys()",
"def test_encrypted_entropy_higher():\n unzipped_folder_loc = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"data\", \"not_encrypted\"\n )\n unzipped_file_loc = os.path.join(unzipped_folder_loc, \"s1.csv\")\n zipped_folder_loc = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"data\", \"encrypted\"\n )\n zipped_file_loc = os.path.join(zipped_folder_loc, \"s1.zip\")\n\n assert os.path.exists(unzipped_file_loc)\n assert os.path.exists(zipped_file_loc)\n\n ent = NotEncrypted()\n\n os.environ[\"OUTPUTS\"] = unzipped_folder_loc\n unzipped_is_valid = ent()\n unzipped_entropies = ent.results\n\n os.environ[\"OUTPUTS\"] = zipped_folder_loc\n zipped_is_valid = ent()\n zipped_entropies = ent.results\n\n assert all(\n e1.entropy < e2.entropy for e1, e2 in zip(unzipped_entropies, zipped_entropies)\n )",
"def generate_keys(self):\n\n\t\tmin_ext = 1 << self.size_ext - 1\n\t\tmax_ext = 1 << self.size_ext\n\t\t\n\t\t\t\n\t\t# step 1 : chose random primary numbers p and q\n\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\tself._p = n\n\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\twhile(n == self._p):\n\t\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\tself._q = n\n\n\t\t#step 2 : compute n = pq\n\t\tself.n = self._p * self._q\n\n\t\t#step 3 : compute phi(n)\n\t\tself._phi = (self._p - 1) * (self._q - 1)\n\n\t\t#step 4 : chose the exponent\n\t\textension = randint(min_ext,max_ext) << math.ceil(math.log2(self.D))\n\t\textension = extension + self.D\n\t\twhile (gcd(self._phi,n) != 1):\n\t\t\textension = randint(min_ext,max_ext) << math.ceil(math.log2(self.D))\n\t\tself._d = extension\n\n\t\t#step 5 : compute d (private key)\n\t\tself.e = euclide_algorithm(self._d, self._phi)[\"U\"] % self._phi\n\n\t\tprint(\"p = \", self._p)\n\t\tprint(\"q = \", self._q)\n\t\tprint(\"d = \", self._d)",
"def test_main_multiple_keys(self):\n args = [\n \"--layout\",\n self.layout_double_signed_path,\n \"--layout-keys\",\n self.alice_path,\n self.danny_path,\n \"--key-types\",\n \"rsa\",\n \"ed25519\",\n ]\n self.assert_cli_sys_exit(args, 0)",
"def test_main_multiple_keys(self):\n args = [\n \"--layout\",\n self.layout_double_signed_path,\n \"--layout-keys\",\n self.alice_path,\n self.danny_path,\n \"--key-types\",\n \"rsa\",\n \"ed25519\",\n ]\n self.assert_cli_sys_exit(args, 0)",
"def excessive_force(file_set):\r\n total_vals = []\r\n for a in file_set:\r\n local_vals = []\r\n if len(a)%2!=0: a = a[:-1]\r\n b = brute_force(a);\r\n local_vals = list(b); #get the keys.\r\n for a_ in (local_vals):\r\n if char_scoring(b[a_],'ascii',threshold = .97):\r\n total_vals.append((a_,b[a_],file_set.index(a)));\r\n #for b in total_vals:\r\n # if total_vals.index(b)%3==0: input()\r\n # print(b)\r\n #input()\r\n # now - I need to run the data from excessive force into a separate value system - getting the \r\n # output that makes the most sense.\r\n # guessed-key(used val?), decrypted value, global position.\r\n return total_vals;",
"def get_keys():\n SCALE_DICT = {\n 'major': [2,2,1,2,2,2,1],\n 'minor':[2,1,2,2,1,2,2],\n 'chrom':[1,1,1,1,1,1,1,1,1,1,1,1],\n 'ionanian':[2,2,1,2,2,2,1],\n 'dorian':[2,1,2,2,2,1,2],\n 'phrygian':[1,2,2,2,1,2,2],\n 'lydian':[2,2,2,1,2,2,1],\n 'mixolydian':[2,2,1,2,2,1,2],\n 'aeolian':[2,1,2,2,1,2,2],\n 'locrian':[1,2,2,1,2,2,2],\n 'minor_pent':[3,2,2,3,2],\n 'major_pent':[2,2,3,2,3],\n 'pent_6':[2,2,3,1,3],\n 'pent_2':[1,3,3,2,3],\n 'pent_3':[2,1,4,2,3],\n 'pent_5':[2,2,2,3,3],\n 'mixo_pent':[2,2,3,3,2],\n 'phryg_pent':[1,2,3,1,3],\n 'dim_pent':[2,1,3,1,3],\n 'blues':[3,2,1,1,3,2],\n 'harmonic_minor':[2,1,2,2,1,3,2],\n 'melodic_mimnor':[2,1,2,2,1,3,2],\n 'whole_tone':[2,2,2,2,2,2],\n 'whole_half':[2,1,2,1,2,1,2,1],\n 'half_whole':[1,2,1,2,1,2,1,2],\n 'lydian_flat7':[2,2,2,1,2,1,2]\n }\n\n return SCALE_DICT",
"def test_bip341_tweak(self):\n\n pubkey_cases = []\n mc = lambda h: (None, 0) if h is None else make_cbuffer(h)\n for i in range(len(JSON['scriptPubKey'])):\n case = JSON['scriptPubKey'][i]\n inter = case['intermediary']\n pubkey_cases.append((mc(case['given']['internalPubkey']),\n mc(inter['merkleRoot']), utf8(inter['tweakedPubkey'])))\n\n bytes_out, out_len = make_cbuffer('00'*33)\n for case in pubkey_cases:\n ((pub_key, pub_key_len), (merkle, merkle_len), expected) = case\n args = [pub_key, pub_key_len, merkle, merkle_len, 0, bytes_out, out_len]\n ret = wally_ec_public_key_bip341_tweak(*args)\n self.assertEqual(ret, WALLY_OK)\n self.assertEqual(expected, h(bytes_out[1:out_len]))\n\n privkey_cases = []\n mc = lambda h: (None, 0) if h is None else make_cbuffer(h)\n for i in range(len(JSON['keyPathSpending'][0]['inputSpending'])):\n case = JSON['keyPathSpending'][0]['inputSpending'][i]\n inter, given = case['intermediary'], case['given']\n privkey_cases.append((mc(given['internalPrivkey']),\n mc(given['merkleRoot']), utf8(inter['tweakedPrivkey'])))\n\n bytes_out, out_len = make_cbuffer('00'*32)\n for case in privkey_cases:\n ((priv_key, priv_key_len), (merkle, merkle_len), expected) = case\n args = [priv_key, priv_key_len, merkle, merkle_len, 0, bytes_out, out_len]\n ret = wally_ec_private_key_bip341_tweak(*args)\n self.assertEqual(ret, WALLY_OK)\n self.assertEqual(expected, h(bytes_out[:out_len]))\n\n # FIXME: Add invalid arguments cases for pub/priv keys",
"def test_endecrypt():\n\n e, d, c = keygen()\n\n test_encryption(e, c)\n test_decryption(d, c)\n key_cracker(e, c)",
"def check_keys(self):",
"def test_get(self):\n key = self.key_gen.get()\n key2 = self.key_gen.get()\n\n self.assertEqual(key, key2 - 1)",
"def combine_keys(*keys: bytes) -> bytes:\n key = hashlib.sha3_512(keys[0]).digest()\n for k in keys[1:]:\n next_key = hashlib.sha3_512(k).digest()\n\n key = bytes([\n a ^ b\n for (a, b)\n in zip(key, next_key)\n ])\n return key",
"def findBestPossibleCandidate(encryptedMessages):\n\n candidatesForSingleByteXOR = []\n for encryptedMessage in encryptedMessages:\n decryptedMessage = decryptMessage(encryptedMessage)[0] # Only the most likely message is stored\n # The below check cannot be used as the correct output consists of line feed which is not a printable character. This leads to the wrong answer.\n # messageString = convertBytesToString(message[0]) \n # if messageString.isprintable(): \n decryptedMessage[\"encryptedMessage\"] = encryptedMessage\n candidatesForSingleByteXOR.append(decryptedMessage)\n candidatesForSingleByteXOR.sort(key = lambda x:x[\"score\"])\n return candidatesForSingleByteXOR[0]",
"def key_generator(self, key_size=512) -> (int, int, int):\n self.key_size = key_size\n p = number.getPrime(key_size)\n q = number.getPrime(key_size)\n N = p * q\n phi_N = (p - 1) * (q - 1)\n e = None\n while True:\n e = random.randrange(2 ** (key_size - 1), 2 ** key_size)\n if self.__gcd(e, phi_N)[0] == 1:\n break\n if e is None:\n raise Exception('not find public key')\n\n # print(f'e = {e}')\n d = 0\n try:\n d = self.__mod_inv(e, phi_N)\n except Exception as e:\n raise Exception('not find private key')\n # print(f'd = {d}')\n return e, d, N",
"def guess_key_length(self, min_len=1, max_len=9, display=False):\n\n res = {}\n max_ic = 0\n probable_key_length = 0\n # We try different key lengths\n for i in range(min_len, max_len+1):\n\n if self._len < i*2:\n continue\n ics = []\n for j in range(i):\n var = []\n for k in range(self._len//i):\n var.append(self._s[k*i + j])\n text = VigenereLikeCipher(''.join(var))\n ics.append(text.get_ic())\n total_ic = round(sum(ics)/len(ics),4)\n if total_ic > max_ic:\n max_ic = total_ic\n probable_key_length = i\n res[i] = total_ic\n if display:\n print \"\\n[+] Visual key length IC correspondance\"\n for k,v in res.items():\n v = int(round(v*1000,0))\n print str(k) + (int(math.floor(math.log10(len(res))))+1-len(str(k)))*\" \",\n print ''.join(['|' for i in range(v//2)])\n print \"\"\n return probable_key_length",
"def test_rekey_defaults(self, settings):\n old_key = b'0' * 32\n new_key = b'1' * 32\n\n settings.CHITON_ENCRYPTION_KEY = new_key\n settings.CHITON_PREVIOUS_ENCRYPTION_KEY = old_key\n\n encrypted = encrypt('message', key=old_key)\n rekeyed = rekey(encrypted)\n\n assert decrypt(rekeyed) == 'message'",
"def get_base(ciphertext):\n # 10+6\n b16 = string.digits + 'ABCDEF'\n # 26+6+1\n b32 = string.ascii_uppercase + '234567='\n # 10+26*2+2+1\n b64 = string.digits + string.letters + '+/='\n\n bdict = {'64': b64, '32': b32, '16': b16}\n all_char = set(ciphertext)\n\n for key in sorted(bdict.keys()):\n if all_char.issubset(bdict[key]):\n return key",
"def guess_keychar (msg):\n maxkey = maxval = 0\n for key in string.printable:\n value = string_rank (xorstr (key, msg))\n if value > maxval:\n maxval = value\n maxkey = key\n return (maxkey,maxval)"
] |
[
"0.65380496",
"0.6043533",
"0.598087",
"0.5903265",
"0.57908475",
"0.5769569",
"0.5745124",
"0.5694577",
"0.5693339",
"0.5679689",
"0.567391",
"0.5673541",
"0.56516474",
"0.5635523",
"0.56282777",
"0.5624447",
"0.5624447",
"0.5583392",
"0.55720687",
"0.55549103",
"0.5538926",
"0.5534254",
"0.55202615",
"0.547427",
"0.5464007",
"0.5448519",
"0.544656",
"0.54273915",
"0.5420866",
"0.5410783"
] |
0.68798894
|
0
|
Given a `query_dict`, will attempt to return a xapian.Query
|
def parse_query(self, query_dict):
if query_dict is None:
return xapian.Query('') # Match everything
elif query_dict == {}:
return xapian.Query() # Match nothing
query_tree = self.build_query_tree(query_dict)
return query_tree.to_query(self.schema, self.database)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _adapt_query(self, translator, query):\n if isinstance(query, dict):\n return self._adapt_dict_query(translator, query)\n else:\n return self._adapt_advancedQuery_query(translator, query)",
"def to_object(cls, query_dict: Dict):\n pass",
"def find_non_default_query(query):\n nq = deepcopy(query)\n keys = nq.keys()\n\n def del_kk(k, m):\n if not m[k]:\n print(\"Deleting %s[%s]\" % (m, k))\n del m[k]\n return\n if k == 'page' and m[k] and m[k] == 1:\n del m[k]\n return\n if k == 'type' and m[k] and m[k] == 'all':\n del m[k]\n return\n if k == 'order':\n del m[k]\n return\n if k == 'group_concatenator':\n del m[k]\n return\n if k == 'and_or_filter_on':\n del m[k]\n return\n if k == 'sub_tab':\n del m[k]\n return\n if k == 'search_method' and m[k] == 'default':\n del m[k]\n return\n\n for k in keys:\n print(\"Trying key k %s\" % k)\n v = nq[k]\n if k == 'filters':\n print(\"Now, looking at %s\" %v)\n kn = v.keys()\n print(\"Now, keys are %s\" % kn)\n for kk in kn:\n del_kk(kk, v)\n\n del_kk(k, nq)\n print(\"Final query left: %s\" % nq)\n return nq",
"def conform_query(cls, query):\n return query",
"def query(self):\n if bool(self._query_parameters): # ApiGateway set key value None in case of absence params, not an empty dict\n payload = {inflection.underscore(k): v for k, v, in self._query_parameters.items()}\n else:\n payload = dict()\n QueryTuple = namedtuple('QueryTuple', sorted(payload))\n the_tuple = QueryTuple(**payload)\n return the_tuple",
"def query(self, query, view=None):\n if isinstance(query, six.string_types):\n query = Q.parse(query)\n if view is None:\n l = lambda: super(DiscoDB, self).query(query)\n else:\n if not isinstance(view, DiscoDBView):\n view = self.make_view(view)\n l = lambda: super(DiscoDB, self).query(query, view=view)\n return DiscoDBLazyInquiry(l)",
"def inspect_query(query):\n return _parse_query(query)",
"def to_object(cls, query_dict: Dict):\n question = Question()\n question.id = query_dict.get(\"id\")\n question.created_date = query_dict.get(\"created_date\")\n question.created_by = query_dict.get(\"created_by\")\n question.meet_up = query_dict.get(\"meetup\")\n question.title = query_dict.get(\"title\")\n question.body = query_dict.get(\"body\")\n question.votes = query_dict.get(\"votes\")\n question.upvotes = query_dict.get(\"upvotes\")\n question.downvotes = query_dict.get(\"downvotes\")\n question.comments = len(\n Comment.query_by_field(\"question\", question.id))\n return question",
"def _normalize_query(self, query):\n if not query:\n return {}\n\n def _parse_value(value):\n \"\"\"Checks and parses if supplied value is a float, bool, or int\"\"\"\n # Check if it is a boolean, int, or float value\n try:\n value = json.loads(value.lower())\n return value\n except ValueError:\n return value\n\n normalized = {}\n for key, val in query.items():\n if key == 'id':\n key = '_id'\n elif key in ['name']:\n key = '{}_lower'.format(key)\n val = val.lower()\n\n operator = None\n if isinstance(val, str):\n if ':' in val:\n oper, new_val = val.split(':', 1)\n if oper in QUERY_OPERATORS:\n operator = oper\n val = new_val\n # Check if they supplied comma separated value\n if ',' in val:\n val = [_parse_value(v.strip()) for v in val.split(',')]\n else:\n val = _parse_value(val)\n\n if operator:\n if operator in ['in', 'nin'] and not isinstance(val, list):\n val = [val]\n val = {'$exists': True, '${}'.format(operator): val}\n elif isinstance(val, list):\n val = {'$exists' : True, '$in' : val}\n else:\n val = {'$exists' : True, '$eq' : val}\n\n normalized[key] = val\n\n return normalized",
"def _search(self, query):\n return self._request(query)",
"def make_query_from_json(cls, json_object: Dict) -> Optional[_Query]:\n\n # extract query parameters from json object:\n context = json_object.get(\"@context\")\n url = context.get(\"results\").get(\"@id\")\n parsed_url = urllib.parse.urlparse(url)\n parsed_query = (urllib.parse.parse_qs(parsed_url.query))\n\n # make query object from instantiated parameters and json object:\n try:\n searchword = parsed_query.get(\"searchword\")[0]\n maxsearchtime = parsed_query.get(\"maxsearchtime\")[0]\n duplicates = parsed_query.get(\"duplicates\")[0]\n if duplicates == \"on\":\n duplicates = True\n else:\n duplicates = False\n disabled = parsed_query.get(\"disabled\")\n scheme = _Utility.words2scheme([searchword])\n concept = _Utility.word2concept(word=searchword, scheme_uri=scheme.uri)\n query = _Query(concept=concept,\n maxsearchtime=int(maxsearchtime),\n duplicates=duplicates,\n disabled=disabled,\n response=json_object)\n except IndexError:\n return None\n\n return query",
"def _get_query(query_name):\n query_dict = {\n 'survey_structure': f\"\"\"SELECT * FROM [Survey_Sample_A19].[dbo].[SurveyStructure]\"\"\",\n 'surveys_query': f\"\"\"SELECT SurveyId FROM [Survey_Sample_A19].[dbo].[Survey]\"\"\",\n 'questions_query': f\"\"\"SELECT * FROM (\n SELECT SurveyId, QuestionId, 1 as InSurvey\n FROM SurveyStructure\n WHERE SurveyId = @currentSurveyId \n UNION \n SELECT @currentSurveyId as SurveyId, Q.QuestionId, 0 as InSurvey \n FROM Question as Q \n WHERE NOT EXISTS (\n SELECT *\n FROM SurveyStructure as S\n WHERE S.SurveyId = @currentSurveyId \n AND S.QuestionId = Q.QuestionId \n ) \n ) as t \n ORDER BY QuestionId;\"\"\",\n 'query_template_for_answer_column': f\"\"\" COALESCE((\n SELECT a.Answer_Value \n FROM Answer as a \n WHERE a.UserId = u.UserId\n AND a.SurveyId = <SURVEY_ID> \n AND a.QuestionId = <QUESTION_ID> \n ), -1) AS ANS_Q<QUESTION_ID> \"\"\",\n 'query_template_for_null_column': f\"\"\" NULL AS ANS_Q<QUESTION_ID> \"\"\",\n 'query_template_outer_union_query': f\"\"\" SELECT UserId , <SURVEY_ID> as SurveyId, <DYNAMIC_QUESTION_ANSWERS> \n FROM [User] as u \n WHERE EXISTS (\n SELECT * \n FROM Answer as a \n WHERE u.UserId = a.UserId \n AND a.SurveyId = <SURVEY_ID>\n )\"\"\",\n 'vw_survey_data': f\"\"\"SELECT * FROM [Survey_Sample_A19].[dbo].[vw_AllSurveyData]\"\"\",\n 'edit_view': f\"\"\"CREATE OR ALTER VIEW vw_AllSurveyData AS \"\"\"\n\n }\n return query_dict.get(query_name)",
"def query(self, query):",
"def metaquery(self, query):\n if isinstance(query, six.string_types):\n query = Q.parse(query)\n return DiscoDBItemInquiry(lambda: query.metaquery(self))",
"def visit_query(self, query):\n return query",
"def dummy_search(query):\n ii = InvertedIndex()\n return ii.lookup_query(query)",
"def _get_schema_using_query(self, query: str) -> sch.Schema:\n return sch.Schema.from_tuples(self._metadata(query))",
"def fetch_querydict(self):\n query = dict()\n query[\"filtered\"] = dict()\n if self.q_dict and isinstance(self.q_dict, dict):\n query_list, filter_list = self.build_query_structure()\n if query_list:\n query[\"filtered\"][\"query\"] = {\"bool\": {\"must\": query_list}}\n if filter_list:\n query[\"filtered\"][\"filter\"] = {\"bool\": {\"must\": filter_list}}\n return query",
"def extract(query_dict, prefix=\"\"):\n\n strs = ['order_by']\n ints = ['per_page', 'page']\n\n extracted = { }\n\n for key in (strs + ints):\n if (prefix + key) in query_dict:\n val = query_dict.get(prefix + key)\n\n extracted[key] = (val\n if not key in ints\n else int(val))\n\n return extracted",
"def convert_old_catalog_query(query):\n for k, v in query.items():\n q_field = q_type = q_param = None\n if '_usage' in k:\n q_field = k.replace('_usage', '')\n usage = v.split(':')\n q_type = usage[0].strip()\n q_param = ':'.join(usage[1:]).strip()\n elif '_operator' in k:\n q_field = k.replace('_operator', '')\n q_type = 'operator'\n q_param = v\n if q_field:\n new_val = query[q_field]\n if not isinstance(v, dict):\n new_val = {'query': new_val}\n new_val[q_type] = q_param\n query[q_field] = new_val\n del query[k]\n return query",
"def lookup_query(self, query):\n return { term: self.index[term] for term in query.split('/r') if term in self.index }",
"def documents_dslquery(dsl_dict, **kwargs):\n return _dslquery('documents', dsl_dict, **kwargs)",
"def phrase_query_retrieve(self, query_str):\n query = self.process_query(query_str)\n return self.phrase_retrieve(query)",
"def make_query(self, query, data: Dict):\n return query.format_map(data)",
"def get_query_result(query_string: str) -> Any:\n table = get_template_attribute(\"_query_table.html\", \"querytable\")\n contents, types, rows = g.ledger.query_shell.execute_query(\n g.filtered.entries, query_string\n )\n if contents and \"ERROR\" in contents:\n raise FavaAPIError(contents)\n table = table(g.ledger, contents, types, rows)\n\n if types and g.ledger.charts.can_plot_query(types):\n return QueryResult(table, g.ledger.charts.query(types, rows))\n return QueryResult(table)",
"def query_dict_for_record(record, touched_queries):\n result = dict()\n if len(touched_queries) > 0:\n parsed_record = etree.parse(StringIO(record.test_data_xml()))\n result.update(dict((q_name, {'query': q_value,\n 'result': list(x.text for x in etree.ETXPath(q_value)(parsed_record))})\n for q_name, q_value in touched_queries.items()))\n return result",
"def handleQuery(self, query) -> None: # noqa\n results = []\n\n try:\n query_str = query.string.strip()\n\n # too small request - don't even send it.\n if len(query_str) < 2:\n keys_monitor.reset()\n return\n\n if len(query_str.split()) > 1:\n # pydictionary or synonyms.com don't seem to support this\n query.add(\n v0.Item(\n id=md_name,\n icon=[icon_path],\n text=\"A term must be only a single word\",\n actions=[],\n )\n )\n return\n\n # determine if we can make the request --------------------------------------------\n keys_monitor.report()\n if keys_monitor.triggered():\n results.extend(get_items_for_word(query, query_str))\n\n if not results:\n query.add(\n 0,\n v0.Item(\n id=md_name,\n icon=[icon_path],\n text=\"No results.\",\n actions=[],\n ),\n )\n\n return\n else:\n query.add(results)\n\n except Exception: # user to report error\n print(traceback.format_exc())\n query.add(\n v0.Item(\n id=md_name,\n icon=[icon_path],\n text=\"Something went wrong! Press [ENTER] to copy error and report it\",\n actions=[\n ClipAction(\n f\"Copy error - report it to {md_url[8:]}\",\n f\"{traceback.format_exc()}\",\n )\n ],\n ),\n )",
"def get_by_id(query_id):\n return SavedQuery.get_by_id(query_id)",
"def _parse_user_query(self, query):\n def _parse_basic_query(attr, value):\n if isinstance(value, str) and '*' in value:\n return MatchGlob(attr, value)\n else:\n return Eq(attr, value)\n\n if isinstance(query, dict):\n subqueries = []\n for attr, value in query.iteritems():\n if isinstance(value, (list, set, tuple)):\n # If value is a list or similar, we build an OR\n or_queries = []\n for or_query in value:\n or_queries.append( _parse_basic_query(attr, or_query) )\n subqueries.append( Or(*or_queries) )\n else:\n subqueries.append(_parse_basic_query(attr, value))\n query = And(*subqueries)\n return query",
"def build(self, query_str):\r\n try:\r\n parsed = self.parser.parseString(query_str)\r\n except ParseException,e:\r\n raise QueryException(e)\r\n\r\n source = self.__get_source(parsed)\r\n tree = self.__get_tree(parsed)\r\n handler = self.__get_handler(parsed)\r\n query = Query(tree, source, handler)\r\n return query"
] |
[
"0.64505786",
"0.6189991",
"0.60981566",
"0.5994181",
"0.5804566",
"0.5796665",
"0.57583094",
"0.5753702",
"0.5728359",
"0.5705566",
"0.5673714",
"0.56447256",
"0.5644346",
"0.558828",
"0.55776954",
"0.55712634",
"0.5543712",
"0.55340433",
"0.55187654",
"0.550855",
"0.55080646",
"0.548898",
"0.5461648",
"0.5460387",
"0.5457368",
"0.54499847",
"0.54465073",
"0.5427593",
"0.5410065",
"0.5408116"
] |
0.798138
|
0
|
A safer version of Xapian.enquire.get_mset Simply wraps the Xapian version and catches any `Xapian.DatabaseModifiedError`, attempting a `database.reopen` as needed.
|
def _get_enquire_mset(self, database, enquire, start_offset, max_offset):
try:
return enquire.get_mset(start_offset, max_offset)
except xapian.DatabaseModifiedError:
database.reopen()
return enquire.get_mset(start_offset, max_offset)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _memcache_set(*args, **kwargs):\n return ndb.get_context().memcache_set(*args, **kwargs)",
"def testMultiSet3(self):\n data_store.DB.MultiSet(self.test_row,\n {\"aff4:size\": [1],\n \"aff4:stored\": [\"2\"]},\n token=self.token)\n\n data_store.DB.MultiSet(self.test_row, {\"aff4:stored\": [\"2\"]},\n to_delete=[\"aff4:size\"],\n token=self.token)\n\n # This should be gone now\n (stored, _) = data_store.DB.Resolve(self.test_row, \"aff4:size\",\n token=self.token)\n self.assertEqual(stored, None)\n\n (stored, _) = data_store.DB.Resolve(self.test_row, \"aff4:stored\",\n token=self.token)\n self.assertEqual(stored, \"2\")",
"def patch_mongo(monkeypatch):\n mock_db = mongomock.MongoClient().todo_database\n\n def fake_get_db():\n return mock_db\n\n monkeypatch.setattr(main.data_access, \"get_db\", fake_get_db)",
"def testMultiSet2(self):\n # Specify a per element timestamp\n data_store.DB.MultiSet(self.test_row,\n {\"aff4:size\": [(1, 100)],\n \"aff4:stored\": [(\"2\", 200)]},\n token=self.token)\n\n (stored, ts) = data_store.DB.Resolve(self.test_row, \"aff4:size\",\n token=self.token)\n self.assertEqual(stored, 1)\n self.assertEqual(ts, 100)\n\n (stored, ts) = data_store.DB.Resolve(self.test_row, \"aff4:stored\",\n token=self.token)\n self.assertEqual(stored, \"2\")\n self.assertEqual(ts, 200)",
"def testMultiSet(self):\n unicode_string = u\"this is a uñîcödé string\"\n\n data_store.DB.MultiSet(self.test_row,\n {\"aff4:size\": [1],\n \"aff4:stored\": [unicode_string],\n \"aff4:unknown_attribute\": [\"hello\"]},\n token=self.token)\n\n (stored, _) = data_store.DB.Resolve(self.test_row, \"aff4:size\",\n token=self.token)\n self.assertEqual(stored, 1)\n\n (stored, _) = data_store.DB.Resolve(self.test_row, \"aff4:stored\",\n token=self.token)\n self.assertEqual(stored, unicode_string)\n\n # Make sure that unknown attributes are stored as bytes.\n (stored, _) = data_store.DB.Resolve(self.test_row, \"aff4:unknown_attribute\",\n token=self.token)\n self.assertEqual(stored, \"hello\")\n self.assertEqual(type(stored), str)",
"def get_databases ():\n return _dbobjects[:]",
"def test_custom_querysets_set_manager_directly(self):\n\n class CustomQuerySet(QuerySet):\n def not_empty(self):\n return self.count() > 0\n\n class CustomQuerySetManager(QuerySetManager):\n queryset_class = CustomQuerySet\n\n class Post(Document):\n objects = CustomQuerySetManager()\n\n Post.drop_collection()\n\n assert isinstance(Post.objects, CustomQuerySet)\n assert not Post.objects.not_empty()\n\n Post().save()\n assert Post.objects.not_empty()\n\n Post.drop_collection()",
"def update_sets(check_update=1):\n\n set_list = reapi.pull_set_catalog()\n secondary_sets.add_sets_to_database(set_list, update=check_update)",
"def _computebumpedset(repo):\n # get all possible bumped changesets\n tonode = repo.changelog.node\n publicnodes = (tonode(r) for r in repo.revs('public()'))\n successors = allsuccessors(repo.obsstore, publicnodes,\n ignoreflags=bumpedfix)\n # revision public or already obsolete don't count as bumped\n query = '%ld - obsolete() - public()'\n return set(repo.revs(query, _knownrevs(repo, successors)))",
"def test_10_import_attribute_set(self):\n with mock_api(magento_attribute_responses):\n import_record(self.session, 'magento.attribute.set',\n self.backend_id, '9')\n\n mag_attr_obj = self.registry('magento.attribute.set')\n cr, uid = self.cr, self.uid\n mag_attr_set_ids = mag_attr_obj.search(cr, uid, [\n ('magento_id', '=', '9'),\n ('backend_id', '=', self.backend_id),\n ])\n self.assertEqual(len(mag_attr_set_ids), 1)\n mag_attr_set = mag_attr_obj.browse(cr, uid, mag_attr_set_ids[0])\n self.assertEqual(mag_attr_set.attribute_set_name, 'Default')",
"def test_custom_querysets_managers_directly(self):\n\n class CustomQuerySetManager(QuerySetManager):\n @staticmethod\n def get_queryset(doc_cls, queryset):\n return queryset(is_published=True)\n\n class Post(Document):\n is_published = BooleanField(default=False)\n published = CustomQuerySetManager()\n\n Post.drop_collection()\n\n Post().save()\n Post(is_published=True).save()\n assert Post.objects.count() == 2\n assert Post.published.count() == 1\n\n Post.drop_collection()",
"def test_partial_updates(self):\r\n m1 = TestSetModel.create(int_set={1, 2, 3, 4})\r\n\r\n m1.int_set.add(5)\r\n m1.int_set.remove(1)\r\n assert m1.int_set == {2, 3, 4, 5}\r\n\r\n m1.save()\r\n\r\n m2 = TestSetModel.get(partition=m1.partition)\r\n assert m2.int_set == {2, 3, 4, 5}",
"def query_set(gen):\n\n @wraps(gen)\n def new_gen(*args, **kwargs):\n return QuerySet(gen(*args, **kwargs))\n\n return new_gen",
"def test_version_upgrade_nonpersistent(self):\n\n db_file = self.mktemp()\n\n db = Database.TestDB(db_file)\n yield db.open()\n yield db.execute(\"INSERT into TESTTYPE (KEY, VALUE) values (:1, :2)\", (\"FOO\", \"BAR\",))\n items = (yield db.query(\"SELECT * from TESTTYPE\"))\n self.assertEqual(items, ((\"FOO\", \"BAR\"),))\n db.close()\n db = None\n\n db = Database.TestDBRecreateUpgrade(db_file)\n yield self.inlineCallbackRaises(Database.TestDBRecreateUpgrade.RecreateDBException, db.open)\n items = (yield db.query(\"SELECT * from TESTTYPE\"))\n self.assertEqual(items, ())\n db.close()",
"def _rebuild_compareset(self, result, rewrapped_columns, columns):\n normalize = lambda x: x if (isinstance(x, str) or not x) else tuple(x)\n rewrapped_columns = normalize(rewrapped_columns)\n columns = normalize(columns)\n\n if rewrapped_columns == columns:\n return result # <- EXIT!\n\n missing = self._missing\n def rebuild(x):\n lookup_dict = dict(zip(rewrapped_columns, x))\n return tuple(lookup_dict.get(c, missing) for c in columns)\n return CompareSet(rebuild(x) for x in result)",
"def set_version_db(apps, schema_editor):\n Version = apps.get_model(\"reversion\", \"Version\")\n content_types = Version.objects.values_list(\"content_type\", flat=True).distinct()\n for content_type in content_types:\n model_class = content_type.model_class()\n db = router.db_for_write(model_class)\n Version.objects.filter(content_type=content_type).update(db=db)",
"def test_version_upgrade_persistent(self):\n db_file = self.mktemp()\n db = Database.TestDB(db_file, persistent=True)\n yield db.open()\n yield db.execute(\"INSERT into TESTTYPE (KEY, VALUE) values (:1, :2)\", (\"FOO\", \"BAR\",))\n items = (yield db.query(\"SELECT * from TESTTYPE\"))\n self.assertEqual(items, ((\"FOO\", \"BAR\"),))\n db.close()\n db = None\n\n db = Database.TestDBRecreateUpgrade(db_file, persistent=True)\n yield self.inlineCallbackRaises(NotImplementedError, db.open)\n self.assertTrue(os.path.exists(db_file))\n db.close()\n db = None\n\n db = Database.TestDB(db_file, persistent=True)\n yield db.open()\n items = (yield db.query(\"SELECT * from TESTTYPE\"))\n self.assertEqual(items, ((\"FOO\", \"BAR\"),))\n db.close()",
"def new_set(*, ctx: context.ContextLevel, **kwargs) -> irast.Set:\n ir_set = irast.Set(**kwargs)\n ctx.all_sets.append(ir_set)\n return ir_set",
"def test_custom_querysets_set_manager_methods(self):\n\n class CustomQuerySet(QuerySet):\n def delete(self, *args, **kwargs):\n \"\"\"Example of method when one want to change default behaviour of it\"\"\"\n return 0\n\n class CustomQuerySetManager(QuerySetManager):\n queryset_class = CustomQuerySet\n\n class Post(Document):\n objects = CustomQuerySetManager()\n\n Post.drop_collection()\n\n assert isinstance(Post.objects, CustomQuerySet)\n assert Post.objects.delete() == 0\n\n post = Post()\n post.save()\n assert Post.objects.count() == 1\n post.delete()\n assert Post.objects.count() == 1\n\n Post.drop_collection()",
"def change_db(self):\n self.db = self.database.get()\n return self.db",
"def open(self):\n if not self.filename:\n raise ValueError(\"Can only open on-disk databases\")\n self.db = dbm.open(self.filename, \"w\") #raises anydbm.error\n try:\n if self.db[\"--Reserved--type\"] != self.type:\n raise ValueError(\"Not a %s database\" % self.type)\n except KeyError:\n raise ValueError(\"Not a recognized database\")",
"def patch_set(self, *patch_tuples):\n return PatchSet(*patch_tuples)",
"def dbm(cls):\n return cls.dbmanager",
"def new_photoset():\n return core.PhotoSet()",
"def patchset(request):\n patchsets = request.issue.get_patchset_info(\n request.user, request.patchset.key.id())\n for ps in patchsets:\n if ps.key.id() == request.patchset.key.id():\n patchset = ps\n return respond(request, 'patchset.html',\n {'issue': request.issue,\n 'patchset': request.patchset,\n 'patchsets': patchsets,\n 'is_editor': request.issue.edit_allowed,\n })",
"def unwrap(self, project: \"benchbuild.project.Project\") -> WorkloadSet:\n source = primary(*project.source)\n self.rev_range.init_cache(source.fetch())\n\n revision = project.version_of_primary\n if revision in set(self.rev_range):\n return self.workload_set\n return WorkloadSet()",
"def test_set():\n assert not sdb.set_(\"sdb://mymemcached/foo\", \"bar\")",
"def _volume_metadata_set(self, volume_path, data):\n data['compat_version'] = 1\n data['version'] = self.version\n return self._metadata_set(self._volume_metadata_path(volume_path), data)",
"def load_set_by_id(set_id):\n return get_default_repo().get_set_by_id(set_id)",
"def _computeunstableset(repo):\n # revset is not efficient enough here\n # we do (obsolete()::) - obsolete() by hand\n obs = getrevs(repo, 'obsolete')\n if not obs:\n return set()\n cl = repo.changelog\n return set(r for r in cl.descendants(obs) if r not in obs)"
] |
[
"0.4793015",
"0.46737233",
"0.4635927",
"0.46030533",
"0.4529075",
"0.44538745",
"0.44417262",
"0.44326645",
"0.43842188",
"0.4382254",
"0.43772754",
"0.43699858",
"0.43466854",
"0.43008155",
"0.42580876",
"0.42529568",
"0.42522395",
"0.42500696",
"0.42464286",
"0.4227376",
"0.42061856",
"0.41952413",
"0.4194753",
"0.41828915",
"0.4172346",
"0.41712785",
"0.41642827",
"0.4141576",
"0.41371474",
"0.41261435"
] |
0.7363788
|
0
|
A safer version of Xapian.document.get_data Simply wraps the Xapian version and catches any `Xapian.DatabaseModifiedError`, attempting a `database.reopen` as needed.
|
def _get_document_data(self, database, document):
try:
return document.get_data()
except xapian.DatabaseModifiedError:
database.reopen()
return document.get_data()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_document(self, docid):\n try:\n return self.sql_session.query(Document).get(docid)\n except OperationalError:\n raise IOError(\"Sorry, this database is incompatible with the \"\n \"current version of Luminoso. If you want, you can \"\n \"delete the model directory and start again.\")",
"def data(self, refresh=False):\n self.logging.debug( \"data(%s)\" % (self.db) )\n\n if not self.db: self.validate()\n\n if refresh: self.update()\n\n return (self._clean_cache(self.cache), self._clean_cache(self.cache_error))",
"def _database(folder, writable=False):\n if writable:\n if debug:\n database = xapian.WritableDatabase(folder, xapian.DB_CREATE_OR_OVERWRITE)\n else:\n database = xapian.WritableDatabase(folder, xapian.DB_CREATE_OR_OPEN)\n else:\n try:\n database = xapian.Database(folder)\n except xapian.DatabaseOpeningError:\n raise InvalidIndexError(u'Unable to open index at %s' % folder)\n\n return database",
"def test_version_upgrade_nonpersistent(self):\n\n db_file = self.mktemp()\n\n db = Database.TestDB(db_file)\n yield db.open()\n yield db.execute(\"INSERT into TESTTYPE (KEY, VALUE) values (:1, :2)\", (\"FOO\", \"BAR\",))\n items = (yield db.query(\"SELECT * from TESTTYPE\"))\n self.assertEqual(items, ((\"FOO\", \"BAR\"),))\n db.close()\n db = None\n\n db = Database.TestDBRecreateUpgrade(db_file)\n yield self.inlineCallbackRaises(Database.TestDBRecreateUpgrade.RecreateDBException, db.open)\n items = (yield db.query(\"SELECT * from TESTTYPE\"))\n self.assertEqual(items, ())\n db.close()",
"def test_version_upgrade_persistent(self):\n db_file = self.mktemp()\n db = Database.TestDB(db_file, persistent=True)\n yield db.open()\n yield db.execute(\"INSERT into TESTTYPE (KEY, VALUE) values (:1, :2)\", (\"FOO\", \"BAR\",))\n items = (yield db.query(\"SELECT * from TESTTYPE\"))\n self.assertEqual(items, ((\"FOO\", \"BAR\"),))\n db.close()\n db = None\n\n db = Database.TestDBRecreateUpgrade(db_file, persistent=True)\n yield self.inlineCallbackRaises(NotImplementedError, db.open)\n self.assertTrue(os.path.exists(db_file))\n db.close()\n db = None\n\n db = Database.TestDB(db_file, persistent=True)\n yield db.open()\n items = (yield db.query(\"SELECT * from TESTTYPE\"))\n self.assertEqual(items, ((\"FOO\", \"BAR\"),))\n db.close()",
"def opendb(**kwargs):\n import emen2.db.proxy\n return emen2.db.proxy.opendb(**kwargs)",
"def getDocsFromDB(dbname):\n\tconn = sqlite3.connect(dbname)\n\tc = conn.cursor()\n\tretrieve_query = \"SELECT * FROM urltable\"\n\tc.execute(retrieve_query)\n\tdocs = c.fetchall()\n\treturn docs",
"def read_database(app):\n app.status.cursorToHourglass()\n app.central.closeAllSubWindows()\n app.database().scan()\n app.status.cursorToNormal() \n app.refresh()",
"def change_db(self):\n self.db = self.database.get()\n return self.db",
"def __getitem__(self, dbname):\n return Database(dbname=dbname, connection=self)",
"def _database(self):\n ...",
"def open(self):\r\n if not self.filename:\r\n raise ValueError(\"Can only open on-disk databases\")\r\n self.db = anydbm.open(self.filename, \"w\") #raises anydbm.error\r\n try:\r\n if self.db[\"--Reserved--type\"] != self.type:\r\n raise ValueError(\"Not a %s database\" % self.type)\r\n except KeyError:\r\n raise ValueError(\"Not a recognized database\")",
"def patch_mongo(monkeypatch):\n mock_db = mongomock.MongoClient().todo_database\n\n def fake_get_db():\n return mock_db\n\n monkeypatch.setattr(main.data_access, \"get_db\", fake_get_db)",
"def get_db():\n if not hasattr(g, 'db_conn'):\n g.db_conn = sqlite3.connect(\"pypatch.sqlite\")\n g.db_conn.row_factory = sqlite3.Row\n \n return g.db_conn",
"def data_upgrades():\n pass",
"def data_upgrades():\n pass",
"def get_data(db_dir, command, args = None):\n with lite.connect((db_dir)) as conn:\n try:\n cursor = conn.cursor()\n if args:\n cursor.execute(command,args)\n else:\n cursor.execute(command)\n data = cursor.fetchall()\n #print '[sql management] got all of the data requested according to:\\n--- %s ---\\n the data: %s'%(command, data)\n return data\n except:\n return None",
"def open(self):\n if not self.filename:\n raise ValueError(\"Can only open on-disk databases\")\n self.db = dbm.open(self.filename, \"w\") #raises anydbm.error\n try:\n if self.db[\"--Reserved--type\"] != self.type:\n raise ValueError(\"Not a %s database\" % self.type)\n except KeyError:\n raise ValueError(\"Not a recognized database\")",
"def db_open():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = db_connect()\n return g.sqlite_db",
"def db_get_wrapper(db: Any, key: bytes) -> Union[bytes, None]:\n counter = 0\n while True:\n try:\n result = db.get(key)\n return result\n except rocksdb.errors.RocksIOError as e:\n if counter >= RETRY_LIMIT:\n LOG.info('Too many failed retries. Stopping.')\n raise e\n if 'No such file or directory' in str(e):\n LOG.info('DB lookup failed. Retrying.')\n sleep(RETRY_SLEEP)\n counter += 1",
"def _get_data(data_type, table, force):\n if force or data_type not in _cache:\n _cache[data_type] =read_table(table)\n return _cache[data_type]",
"def get_doc_data(self):\n return self.data_read",
"def get_database_info(self):\n dashboard_root = self.get_by_id(DashboardDataRoot, self.ROOT_KEYNAME)\n if dashboard_root and dashboard_root.table is not None and \\\n dashboard_root.replication is not None:\n return {\n 'table': dashboard_root.table,\n 'replication': dashboard_root.replication\n }\n try:\n acc = self.helper.get_appcontroller_client()\n db_info = acc.get_database_information()\n if dashboard_root is None:\n dashboard_root = DashboardDataRoot(id=self.ROOT_KEYNAME)\n dashboard_root.table = db_info['table']\n dashboard_root.replication = int(db_info['replication'])\n dashboard_root.put()\n return {\n 'table': dashboard_root.table,\n 'replication': dashboard_root.replication\n }\n except Exception as err:\n logging.exception(err)\n return {\n 'table': 'unknown',\n 'replication': 0\n }",
"def get_database():\n if not REPO:\n site = pwb.Site(\"wikidata\", \"wikidata\")\n repo = site.data_repository()\n return repo\n return REPO",
"def dbRead(dbPoint):\n raise NotImplementedError('dbRead in simu mode')",
"def update_database():\n\n # We obtain the data from the official database\n df = getData.extractData()\n\n # We save the dataframe for later use in the API\n auxiliary.saveToCsv(df, 'app/resources')",
"def get_document(self, docid):\n raise NotImplementedError",
"def get_db():\n\n def dict_factory(cursor, row):\n \"\"\"\n Creates dict from row.\n\n Args:\n cursor: DB cursor.\n row: Row.\n\n Returns:\n dict: Dict of results.\n \"\"\"\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\n\n if '_database' not in g:\n g.db = sqlite3.connect(\n current_app.config['DATABASE'],\n detect_types=sqlite3.PARSE_DECLTYPES\n )\n g.db.row_factory = dict_factory\n return g.db",
"def _db(_app):\n return db",
"def fill_db(self, data):\n check_input_params(data, self.DB)\n self.db = data[self.DB]"
] |
[
"0.50801295",
"0.4965803",
"0.49481302",
"0.49248785",
"0.48873454",
"0.48322877",
"0.4831373",
"0.4829495",
"0.48187444",
"0.476649",
"0.47422433",
"0.47421846",
"0.47324145",
"0.47252893",
"0.4715714",
"0.4715714",
"0.47152796",
"0.47150412",
"0.46747407",
"0.46602058",
"0.4655002",
"0.46519914",
"0.46261084",
"0.4623735",
"0.4611549",
"0.46079668",
"0.45765454",
"0.45743886",
"0.45699686",
"0.45489964"
] |
0.74949485
|
0
|
Private method that returns the column value slot in the database for a given field.
|
def _value_column(self, field):
for field_dict in self.schema['idx_fields']:
if field_dict['field_name'] == field:
return field_dict['column']
return 0
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def column_for_field(self, field):\r\n # iterate in reverse order as columns are registered in order\r\n # of least to most specialised (i.e. Column is registered\r\n # first). This also allows user-registered columns to be\r\n # favoured.\r\n for candidate in reversed(self.columns):\r\n if not hasattr(candidate, \"from_field\"):\r\n continue\r\n column = candidate.from_field(field)\r\n if column is None:\r\n continue\r\n return column",
"def get_field(self, field):\n idx = self._keys.index(field)\n return self._data[idx]",
"def lookup_sqlacolumn_for_field(cls, fieldid):\n for field in cls.fieldlist:\n if (field.id==fieldid):\n return field.get_sqlacolumn()\n raise Exception(\"Could not find field {0}\".format(fieldid))\n #return None",
"def field(self, field):\n return self.__getitem__(field)",
"def _get_value(self, value_column):\n pass",
"def _get_column(self, name):\r\n return self.column(name)",
"def get_value(self, field):\n field = self.find_first(field)\n if field is not None:\n return field.value\n return None",
"def _get_column(cls, name):\r\n return cls._columns[name]",
"def _getValue(self, field):\n if self._contents.has_key(field):\n return self._contents[field]\n else:\n return None",
"def get_field(self, field):\n return self._dict.get(field)",
"def get_value(self, row, colName):\n\t\treturn self[row][self._columns[colName]]",
"def get_field(cls, name):\n if name not in cls.get_field_names():\n # - check field name first, next: column name -\n name = cls.get_field_name(name)\n return getattr(cls, name, None)",
"def column(self): \r\n\r\n return self._column",
"def __getitem__(self, field_name: str) -> ColumnField:\n return ColumnField(self, field_name=field_name)",
"def column(self):\n return self[\"column\"]",
"def get_value(self, col, i) :\n\n if col not in self.cols :\n raise Exception('Column %s not in data' % col)\n if len(self.rows) <= i :\n raise Exception('Table has fewer than %d rows' % i)\n return self.rows[i][self.col_indices[col]]",
"def get_field_by_key(field, key, val, session):\n sql = select([field]).where(key == val)\n value = session.execute(sql).scalar()\n return value",
"def field(self):\r\n return self.value",
"def get_real_col(self, index):\n\n return self.col2virt.get(index, index)",
"def get_quote_table_field(field, stock_ticker):\n quote_table = si.get_quote_table(stock_ticker)\n return quote_table[field]",
"def getCellValue_quick(self, row, column):\n\n\t\t\t\treturn self.thing[f\"{column}{row}\"].value",
"def get_data_column ( self, object ):\n return getattr( object, self.name )",
"def getval(col):\n #TODO move the function in ServerUtils and use it when required (e.g.: mysql LONGTEXT does not need read())\n try:\n return str(col)\n except Exception as ex: # pylint: disable=unused-variable\n return col.read()",
"def get_field_value(self, field_name):\n if field_name in self.fields.keys():\n return self.fields[field_name]\n else:\n return \"No such field\"",
"def get_column(self, key):\n return self._get_column(key)",
"def table_column(self, i):\n return self.__column_list[i]",
"def get_column(self):\n return self._column_number",
"def getValue(self):\n return self._row[self.name]",
"def col(self):\n\t\treturn self.__col",
"def getColumn(self):\n return _libsbml.SBase_getColumn(self)"
] |
[
"0.73130476",
"0.69992465",
"0.6788504",
"0.6667331",
"0.656862",
"0.6530437",
"0.6353881",
"0.63479865",
"0.6332249",
"0.6313734",
"0.6301725",
"0.62685233",
"0.62561935",
"0.62510896",
"0.62231576",
"0.6219943",
"0.61846817",
"0.6144859",
"0.6125751",
"0.6110693",
"0.61048806",
"0.6097718",
"0.6097121",
"0.60719466",
"0.6064911",
"0.60415787",
"0.60181475",
"0.6007176",
"0.5959964",
"0.5921498"
] |
0.791283
|
0
|
Given a database and enquire instance, returns the estimated number of matches.
|
def _get_hit_count(self, database, enquire):
return self._get_enquire_mset(
database, enquire, 0, database.get_doccount()
).size()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def number_matches():\n ALL_FIXED_q = \"all_fixed_queries\" + str(17)\n ALL_FIXED_dbs = \"all_fixed_dbs\" + str(17)\n try:\n with open(\"matches\", \"rb\") as file_h:\n db_length = pickle.load(file_h)\n except:\n print \"reading files..\"\n t = time.time()\n \"\"\"with open(ALL_FILE+str(max_db_size), \"rb\") as file_h:\n all_queries = pickle.load(file_h)\n with open(DB_FILE+str(max_db_size), \"rb\") as file_h:\n db = pickle.load(file_h)\"\"\"\n with open(ALL_FIXED_q, \"rb\") as file_h:\n all_queries = pickle.load(file_h)\n with open(ALL_FIXED_dbs, \"rb\") as file_h:\n db = pickle.load(file_h)\n print \"done reading\", time.time() - t\n print [len(e.keys()) for e in db]\n print len(all_queries.keys())\n db_length = {}\n curr_num = 0\n for query in all_queries.keys():\n curr_num += 1\n\n if curr_num % 1000 == 0:\n print \"MATCHES: I'M Alive!\", curr_num, query\n\n matches = get_matches(query, all_queries[query])\n db_length[query] = len(matches)\n print \"save data\"\n with open(\"matches\", \"wb\") as file_h:\n pickle.dump(db_length, file_h)\n print \"saved!\"\n #pprint(db_length)\n flag = False\n sorted_d = OrderedDict(sorted(db_length.items(), key=lambda x: x[1]))\n for x in sorted_d:\n if not flag:\n print \"FIRST\", sorted_d[x]\n flag = True\n print \"LAST\", sorted_d[x]\n print \"AVG:\", float(sum(db_length.values()))/len(db_length.keys())",
"def matching_records_count(self) -> Optional[float]:\n return pulumi.get(self, \"matching_records_count\")",
"def count_measurements(database: Database) -> int:\n return int(database.measurements.count_documents(filter={}))",
"def get_fully_solved_instances(self, db):\n numInstances = db.session.query(db.Instance).options(joinedload_all('properties')) \\\n .filter(db.Instance.experiments.contains(self)).distinct().count()\n if numInstances == 0: return 0\n num_jobs_per_instance = db.session.query(db.ExperimentResult) \\\n .filter_by(experiment=self).count() / numInstances\n instances = []\n for i in self.instances:\n if db.session.query(db.ExperimentResult) \\\n .filter(db.ExperimentResult.resultCode.like('1%')) \\\n .filter_by(experiment=self, instance=i, status=1) \\\n .count() == num_jobs_per_instance:\n instances.append(i)\n return instances",
"def compute_matches(self):\n\t\tself.local_database[\"figure_number\"] = [0] * len(self.local_database[\"feature_vectors\"])\n\t\tForensics = wbForensicsHOG(Database=self.local_database)\n\t\tForensics.KDTree_pairs(leaf_size = len(self.local_database)+1)\n\t\tForensics.d_rank(pairs=Forensics.pairs, distances=Forensics.dists, ratios=Forensics.ratios)\n\n\t\tself.local_matches = Forensics.Dist_Rank",
"def get_matches_count():\n\n return ''\n \"\"\"\n TODO: count matches\n dtr5app_flag.sender\n dtr5app_flag.receiver\n dtr5app_flag.flag\n \"\"\"",
"def test_dbscan_similarity():\n # Parameters chosen specifically for this task.\n eps = 0.15\n min_samples = 10\n # Compute similarities\n D = distance.squareform(distance.pdist(X))\n D /= np.max(D)\n # Compute DBSCAN\n core_samples, labels = dbscan(D, metric=\"precomputed\",\n eps=eps, min_samples=min_samples)\n # number of clusters, ignoring noise if present\n n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)\n\n assert_equal(n_clusters_1, n_clusters)\n\n db = DBSCAN(metric=\"precomputed\")\n labels = db.fit(D, eps=eps, min_samples=min_samples).labels_\n\n n_clusters_2 = len(set(labels)) - int(-1 in labels)\n assert_equal(n_clusters_2, n_clusters)",
"def instance_count(self) -> int:\n return pulumi.get(self, \"instance_count\")",
"def find_N(config, data, imatch=I_DEAD, itarget=10):\n\n np.random.seed(1)\n # Parse parameters\n # get just 1 base case sample corresponding to average\n\n # results = np.zeros((len(m_prior), 13, len(fwd_args['time'])))\n\n i_mod = o_calmodes[imatch]\n i_obs = i_calmodes[imatch]\n obsdead = data[:, i_obs]\n time_delay = config['time_delay']\n obsdead_index = np.where(obsdead > itarget)[0][0] + time_delay\n found = False\n icount = 0\n ncountmax = 50\n nnew = 1000\n\n ndata = np.size(data[:, 0])\n m_prior, fwd_args = parse_config(config, ndata, mode='mean')\n m_prior = reshape_prior(m_prior)\n param = m_prior[0]\n\n while not found and icount < ncountmax:\n fwd_args['locked']['n'] = nnew\n res = base_seir_model(param, fwd_args)\n moddead = res[i_mod, :]\n moddead_index = np.where(moddead > itarget)\n\n print('moddead index, obsdead index ', moddead_index[0][0], obsdead_index)\n found = moddead_index[0][0] >= obsdead_index\n if not found:\n icount += 1\n nnew = fwd_args['locked']['n'] * 2\n fwd_args['locked']['n'] = nnew\n\n return nnew",
"def count(self):\n with self.pdq:\n (count,)=self.pdq.cursor().execute('select count(*) from pdq').next()\n return count",
"def get_project_count(db):\n\n count = 0\n for element in db:\n count += 1\n return count",
"def get_number_seqs_for_primer(percent_match,\n seq_count):\n \n total_seq_use=int((1-percent_match)*seq_count)\n \n return total_seq_use",
"def get_running_instance_count(body):\n\tbody = dict(body)\n\tconn = rds.connect_to_region(region_name='us-west-2',\n\t\t\t\t\t\t\t\taws_access_key_id=body[\"aws_access_key_id\"],\n\t\t\t\t\t\t\t\taws_secret_access_key=body[\"aws_secret_access_key\"])\n\tinstance_status = conn.get_all_dbinstances()\n\trunning_instances = 0\n\tfor item in instance_status:\n\t\tif (item.state_name == 'available'):\n\t\t\trunning_instances += 1\n\treturn {\"data\":running_instances}",
"def similar(text, database):\n # TODO\n pass",
"def count(self, query):",
"def Count_Documents(db):\r\n \r\n count = db.Transaction.estimated_document_count()\r\n print(\"Number of documents in the database Transaction: \" + str(count) + \".\\n\")\r\n return count",
"def run_matchengine():\n with matchengine.internals.engine.MatchEngine(\n match_on_deceased=False,\n match_on_closed=True,\n db_name=\"matchminer\") as me_prod:\n me_prod.get_matches_for_all_trials()\n me_prod.update_all_matches()\n\n reset_elasticsearch()\n resp = Response(response=json.dumps({\"success\": True}),\n status=200,\n mimetype=\"application/json\")\n return resp",
"def _get_num_searches(self):\n self._cursor.execute(f\"\"\"\n SELECT COALESCE(MAX(rowid), 0)\n FROM {self._table_name};\"\"\", {\"table\": self._table_name})\n\n num_searches = self._cursor.fetchone()[0]\n return num_searches",
"def count():\r\n return Activation.query.count()",
"def countPlayers():\n # gets connection to tournament database in conn object\n conn = connect()\n # gets the cursor to execute queries\n c = conn.cursor()\n # executes select with count aggregate function query number of players\n # in PLAYER table\n c.execute(\"SELECT COUNT(*) FROM PLAYER;\")\n # retreives the result in count variable\n count = c.fetchone() [0]\n # closes the connection to tournament database\n conn.close()\n # returns the number of players in PLAYER table\n return count",
"def test_search_result_count(self):\n user = User.objects.create(username=\"hoge\")\n\n ref_entity = Entity.objects.create(name=\"ref_entity\", created_user=user)\n ref_entry = Entry.objects.create(name=\"ref\", schema=ref_entity, created_user=user)\n\n entity = Entity.objects.create(name=\"entity\", created_user=user)\n for name in [\"foo\", \"bar\"]:\n attr = EntityAttr.objects.create(\n name=name,\n type=AttrTypeValue[\"object\"],\n created_user=user,\n parent_entity=entity,\n )\n attr.referral.add(ref_entity)\n entity.attrs.add(attr)\n\n for i in range(0, 20):\n entry = Entry.objects.create(name=\"e%3d\" % i, schema=entity, created_user=user)\n entry.complement_attrs(user)\n\n if i < 10:\n entry.attrs.get(schema__name=\"foo\").add_value(user, ref_entry)\n else:\n entry.attrs.get(schema__name=\"bar\").add_value(user, ref_entry)\n\n entry.register_es()\n\n resp = Entry.search_entries(user, [entity.id], [{\"name\": \"foo\", \"keyword\": \"ref\"}], limit=5)\n self.assertEqual(resp[\"ret_count\"], 10)\n self.assertEqual(len(resp[\"ret_values\"]), 5)",
"def GetInstanceCount():\n return _gmat_py.GmatBase_GetInstanceCount()",
"def calculate_p(candidate, reference):\n matches = 0\n for grama in candidate:\n if grama in reference:\n matches += 1\n return matches/len(candidate)",
"def instance_count(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"instance_count\")",
"def t_announceDbCount(self, *_):\n try: self.current_count=self.dbh.getRowCount()\n except: self.current_count=0\n \n self.dprint(\"* ratings_count: current count(%s)\" % self.current_count)\n self.pub(\"ratings_count\", self.current_count)",
"def test_query_hits(config):\n psp = PostgreSQLProvider(config)\n results = psp.query(resulttype=\"hits\")\n assert results[\"numberMatched\"] == 14776\n\n results = psp.query(\n bbox=[29.3373, -3.4099, 29.3761, -3.3924], resulttype=\"hits\")\n assert results[\"numberMatched\"] == 5\n\n results = psp.query(properties=[(\"waterway\", \"stream\")], resulttype=\"hits\")\n assert results[\"numberMatched\"] == 13930",
"def _n_matches(gold_tokens, pred_tokens):\n matcher = difflib.SequenceMatcher(None, gold_tokens, pred_tokens)\n return sum(match.size for match in matcher.get_matching_blocks())",
"def size(self):\n return len(self._match_result_dict)",
"def get_num_strains():\n strains = get_required_strains(None)\n strain_count = len(strains)\n with database.make_connection() as connection:\n # In case reference is included in run\n # Supports current reference\n ref_id = get_current_reference_id()\n for e in strains:\n if e.find(ref_id) != -1:\n strain_count = strain_count-1\n break\n return strain_count",
"def fast_count(db, Model): # noqa\n return db.session.execute(\n 'SELECT n_live_tup FROM pg_stat_all_tables WHERE relname = :tablename',\n {'tablename': Model.__tablename__}\n ).scalar()"
] |
[
"0.59404796",
"0.56138366",
"0.54407555",
"0.5268415",
"0.526133",
"0.5214657",
"0.51833475",
"0.518211",
"0.5179342",
"0.514233",
"0.51320916",
"0.5122056",
"0.5115935",
"0.51133215",
"0.5101664",
"0.5045524",
"0.50377154",
"0.50153667",
"0.50091594",
"0.50074124",
"0.50045085",
"0.50006676",
"0.499928",
"0.49854273",
"0.4982933",
"0.49791795",
"0.49732426",
"0.49686816",
"0.496861",
"0.49664143"
] |
0.60220546
|
0
|
Private utility method that converts Python terms to a string for Xapian terms.
|
def _marshal_term(term):
if isinstance(term, int):
term = str(term)
return term
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def to_unicode(terms):\n unicode_terms = []\n for term in terms:\n if (isinstance(term, str)):\n unicode_terms.append(unicode(term, 'utf-8'))\n else:\n unicode_terms.append(term)\n\n return unicode_terms",
"def term_to_tsquery_string(term):\n\n def cleanup(word, whitelist_chars=',.-_'):\n result = ''.join(\n (c for c in word if c.isalnum() or c in whitelist_chars)\n )\n return f'{result}:*' if word.endswith('*') else result\n\n parts = (cleanup(part) for part in (term or '').split())\n return ' <-> '.join(tuple(part for part in parts if part))",
"def __repr__(self):\n\n return \"<Terms term=%s>\" % (self.word)",
"def __str__(self):\n value = []\n for term in self.terms:\n value += [term.termname]\n return '<formula: %s>' % ' + '.join(value)",
"def __str__(self):\n value = []\n for term in self.terms:\n value += [term.termname]\n return '<formula: %s>' % ' + '.join(value)",
"def prepare_terms(terms, search_mode):\n if search_mode in (\"contains\", \"starts_with\"):\n terms = terms.replace(\"_\", \"\\_\").replace(\"%\", \"\\%\")\n\n if search_mode == \"contains\":\n terms = \"%\" + terms + \"%\"\n elif search_mode == \"starts_with\":\n terms = terms + \"%\"\n return terms",
"def format_term(term: Union[BNode, Literal, URIRef, Variable]) -> str:\n if isinstance(term, URIRef):\n return str(term)\n elif isinstance(term, BNode):\n return '?v_' + str(term)\n elif isinstance(term, Literal):\n return format_literal(term)\n else:\n return term.n3()",
"def _get_terms(self):\n return self.__terms",
"def _get_terms(self):\n return self.__terms",
"def _get_terms(self):\n return self.__terms",
"def _get_terms(self):\n return self.__terms",
"def _get_terms(self):\n return self.__terms",
"def _get_terms(self):\n return self.__terms",
"def _get_terms(self):\n return self.__terms",
"def _get_terms(self):\n return self.__terms",
"def _get_terms(self):\n return self.__terms",
"def escapeSolrArg(term):\n\tterm = term.replace('\\\\', r'\\\\') # escape \\ first\n\treturn \"\".join([nextStr for nextStr in escapedSeq(term)])",
"def term_to_rdflib(term: str) -> Term:\n if term.startswith('?'):\n return Variable(term[1:])\n elif term.startswith(\"\\\"\"):\n return from_n3(term)\n else:\n return URIRef(term)",
"def term_to_URI(self, term) :\n\t\tif len(term) == 0 : return None\n\n\t\tif termname.match(term) :\n\t\t\t# It is a valid NCNAME\n\n\t\t\t# First of all, a @vocab nukes everything. That has to be done first...\n\t\t\tif self.default_term_uri != None :\n\t\t\t\treturn URIRef(self.default_term_uri + term)\n\n\t\t\t# For default terms, the algorithm is (see 7.4.3 of the document): first make a case sensitive match;\n\t\t\t# if that fails than make a case insensive one\n\t\t\t# 1. simple, case sensitive test:\n\t\t\tif term in self.terms :\n\t\t\t\t# yep, term is a valid key as is\n\t\t\t\t# lazy binding of the xhv prefix for terms...\n\t\t\t\tself.graph.bind(XHTML_PREFIX, XHTML_URI)\n\t\t\t\treturn self.terms[term]\n\n\t\t\t# 2. case insensitive test\n\t\t\tfor defined_term in self.terms :\n\t\t\t\tif term.lower() == defined_term.lower() :\n\t\t\t\t\t# lazy binding of the xhv prefix for terms...\n\t\t\t\t\tself.graph.bind(XHTML_PREFIX, XHTML_URI)\n\t\t\t\t\treturn self.terms[defined_term]\n\n\t\t# If it got here, it is all wrong...\n\t\treturn None",
"def get_terms(document):\n q = get_mapped(document)\n tokens = tockenizer(q)\n terms = analizer(tokens)\n\n return terms",
"def get_terms(self):\n return json.loads(self.terms)",
"def get_word(naf: KafNafParser, term: Cterm) -> str:\n tokenids = naf.get_dict_tokens_for_termid(term.get_id())\n tokens = sort_tokens(naf.get_token(tid) for tid in tokenids)\n return \" \".join(t.get_text() for t in tokens)",
"def _convert_order_terms(self, sorters):\n return [sorter.get_order_term() for sorter in sorters]",
"def terms(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"terms\")",
"def terms(self) -> Tuple[Term, ...]:\n ...",
"def _flow_terms(flow):\n return flow.synonyms",
"def getComboTerms(tuples):\t\t\t\n\t\t\t#return \"[{0}]\".format('; '.join([\"({0})\".format(','.join([text[indices[0]:indices[1]], str(indices[0])])) for indices in tuples]))\n\t\t\treturn \"{0}\".format('; '.join((\"{0}\".format(text[indices[0]:indices[1]]) for indices in tuples)))",
"def solr_terms_url(config):\n return _solr_core_url(config) + 'terms'",
"def fold_term(self, term):\n if isinstance(term, Var):\n return self.var(term.name)\n elif isinstance(term, Const):\n return self.const(term.name)\n elif isinstance(term, Dist):\n return self.dist(term.name)\n elif isinstance(term, Func):\n return self.func(\n term.funcname,\n tuple( self.fold_term(x)\n for x in term.args ))\n elif isinstance(term, Eq):\n return self.encode_eq(term)\n elif isinstance(term, Disj):\n return self.encode_disj(term)\n elif isinstance(term, Conj):\n return self.encode_conj(term)\n raise RuntimeError('Invalid term {}'.format(term))",
"def __str__(self):\n\n\t\tterms = \\\n\t\t[{att:self.att[att]} for att in self.att if type(self.att[att])==bool]\n\n\t\treturn ''.join([str(int(t.values()[0])) for t in terms])"
] |
[
"0.6238421",
"0.5838818",
"0.581563",
"0.575069",
"0.575069",
"0.5656662",
"0.56406194",
"0.5446854",
"0.5446854",
"0.5446854",
"0.5446854",
"0.5446854",
"0.5446854",
"0.5446854",
"0.5446854",
"0.5446854",
"0.5298573",
"0.52729565",
"0.5213321",
"0.5181461",
"0.51471734",
"0.51005626",
"0.5096038",
"0.50695163",
"0.50655496",
"0.49959782",
"0.49938825",
"0.49618587",
"0.4956132",
"0.4941478"
] |
0.5909471
|
1
|
Merges query dicts effectively &ing them together.
|
def _query_conjunction(self, queries):
query_ops = set()
combined_query = {}
for query in queries:
ops = set(query.keys())
# Make sure that the same operation isn't applied more than once
# to a single field
intersection = ops & query_ops
if intersection:
msg = 'Duplicate query conditions: '
raise InvalidQueryError(msg + ', '.join(intersection))
query_ops.update(ops)
combined_query.update(copy.deepcopy(query))
return combined_query
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def query_add(*query_params):\n d = {}\n for qp in query_params:\n qp = query_unflatten(qp)\n for name, value in qp.items():\n if name in d:\n d[name].extend(value)\n else:\n d[name] = value\n return d",
"def _merge_by_query(self, obj_dict):\n _res = self.__session.query(obj_dict[\"class\"]).filter_by(**obj_dict[\"query_dict\"]).first()\n\n if _res is None:\n self._add(obj_dict[\"instance\"])\n else:\n if hasattr(obj_dict[\"instance\"], 'attributes') and \\\n hasattr(obj_dict[\"instance\"], 'p_key'):\n for attr in obj_dict[\"instance\"].attributes:\n if attr not in obj_dict[\"instance\"].p_key:\n setattr(_res, attr, getattr(obj_dict[\"instance\"], attr))\n # updating the instance\n obj_dict[\"instance\"] = _res\n else:\n raise AttributeError(\"Class variable (attributes / p_key) not set for %s\" %\n (obj_dict[\"instance\"],))",
"def merge_results(res1, res2):\n empty = []\n keys = set(res1).union(res2)\n return dict((k, res1.get(k, empty) + res2.get(k, empty)) for k in keys)",
"def _get_query(self, opt: dict):\n query = {}\n for k in self._unique_keys:\n query[k] = opt[k]\n return query",
"def merge_dicts(*kwargs):\n result={}\n for dictionary in kwargs:\n result.update(dictionary)\n return result",
"def _merge_dicts(*args):\n return reduce(lambda d, s: d.update(s) or d, args)",
"def _merge_dicts(*args):\n return reduce(lambda d, s: d.update(s) or d, args)",
"def set_queries(self, **kwargs):\n for k, v in kwargs.items():\n self._query_dict[k] = v",
"def fetch_querydict(self):\n query = dict()\n query[\"filtered\"] = dict()\n if self.q_dict and isinstance(self.q_dict, dict):\n query_list, filter_list = self.build_query_structure()\n if query_list:\n query[\"filtered\"][\"query\"] = {\"bool\": {\"must\": query_list}}\n if filter_list:\n query[\"filtered\"][\"filter\"] = {\"bool\": {\"must\": filter_list}}\n return query",
"def _join_dicts(dicts):\n if dicts is None: # pragma: no cover\n return\n assembled_dict = {k: v for D in dicts for k, v in D.items()}\n return assembled_dict",
"def _query_to_dict(self, query: MultiDict):\n return {\n key: values\n if len(values := query.getall(key)) > 1 or key in self._is_multiple\n else value\n for key, value in query.items()\n }",
"def _combine_conditions(self):\n self.outpars = {}\n log.debug(\"{} step configuration parameter set(s) to be merged: {}\".format(self.step_title,\", \".join(p for p in list(self.pars_multidict.keys()))))\n for cfg_key in self.pars_multidict.keys():\n self.outpars = self._dict_merge(self.outpars, self.pars_multidict[cfg_key])",
"def _format_queries(self, body):\n for query in body:\n if \"bindVars\" in query:\n query[\"bind_vars\"] = query.pop(\"bindVars\")\n if \"runTime\" in query:\n query[\"runtime\"] = query.pop(\"runTime\")\n return body",
"def ana_merge_searches(datas):\n return dict([\n (name+'|'+path,data['searches'][path])\n for name,data in datas.viewitems()\n for path in data['searches'].viewkeys()\n ])",
"def dict_merge(*args, **kwargs) -> dict:\n # TODO: move this to upstream dependency (openeo package)?\n result = {}\n for d in args + (kwargs,):\n result.update(d)\n return result",
"def merge(*dict_args):\n result = {}\n for dictionary in dict_args:\n result.update(dictionary)\n return result",
"def updated_query(request, *args):\n # NOTE: it returns a dict not a QueryDict\n\n # recall query_to_dict returns key-val sequence\n # filter out the search key\n updated = {k: v for k, v in query_to_dict(request.GET.copy()) if\n k != \"search\"}\n\n # the args must at least have a key + value\n if len(args) < 2:\n return updated\n\n # helper function to update key-in\n def key_in(dic, keys, val):\n k = keys[0]\n # TODO : broken in the sense that I seem to be only updating\n # lists\n if len(keys) == 1:\n if isinstance(dic[k], list) and val not in dic[k]:\n dic[k].append(val)\n else:\n key_in(dic[k], keys[1:], val)\n\n # call key_in to update\n key_in(updated, args[:-1], args[-1])\n\n # return the updated dict (NOTE: this is not\n # a query dict\n return updated",
"def merge_dicts(d1, d2):\n return {**d1, **d2}",
"def merge_wheres(self, wheres, bindings):\n self.wheres = self.wheres + wheres\n self._bindings['where'] = self._bindings['where'] + bindings",
"def merge_dicts(*dict_args):\n result = {}\n for dictionary in dict_args:\n result.update(dictionary)\n return result",
"def merge_dicts(*dict_args):\n result = {}\n for dictionary in dict_args:\n result.update(dictionary)\n return result",
"def merge_dicts(*dict_args):\n result = {}\n for dictionary in dict_args:\n result.update(dictionary)\n return result",
"def merge_dicts(*dict_args):\n result = {}\n for dictionary in dict_args:\n result.update(dictionary)\n return result",
"def merge_dicts(*dict_args):\n result = {}\n for dictionary in dict_args:\n result.update(dictionary)\n return result",
"def merge_dicts(*dict_args):\n result = {}\n for dictionary in dict_args:\n result.update(dictionary)\n return result",
"def dict_merge(dict1, dict2):\n combined = dict(dict1)\n combined.update(dict2)\n return combined",
"def dict_merge(dict1, dict2):\n combined = dict(dict1)\n combined.update(dict2)\n return combined",
"def ana_merge_datas(datas):\n return {\n 'searches':ana_merge_searches(datas),\n 'senzory_map':ana_merge_senzory_map(datas)\n }",
"def merge_dicts(dict_a, dict_b):\n dict_c = dict_a.copy()\n dict_c.update(dict_b)\n return dict_c",
"def merge_dicts(*dict_args):\n result = OrderedDict()\n for dictionary in dict_args:\n result.update(dictionary)\n return result"
] |
[
"0.674077",
"0.6148979",
"0.61165804",
"0.60930693",
"0.60747015",
"0.6037567",
"0.6035048",
"0.5993466",
"0.59804136",
"0.59125394",
"0.58695716",
"0.58409864",
"0.58023703",
"0.5778066",
"0.5772979",
"0.5770433",
"0.56835896",
"0.56134117",
"0.55566716",
"0.5547177",
"0.5547177",
"0.5547177",
"0.5547177",
"0.5547177",
"0.5547177",
"0.55439115",
"0.55439115",
"0.55383784",
"0.5535359",
"0.5519527"
] |
0.6370353
|
1
|
Combine this node with another node into a QCombination object.
|
def _combine(self, other, operation):
if getattr(other, 'empty'):
return self
if self.empty:
return other
return QCombination(operation, [self, other])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def visit_combination(self, combination):\n return combination",
"def __or__(self, other):\r\n if isinstance(self, Circuit):\r\n if isinstance(other, Circuit):\r\n copy_self = copy(self)\r\n for time_step in other.operations_by_time:\r\n for op in other.operations_by_time[time_step]:\r\n operation = other.operations_by_time[time_step][op]\r\n copy_self.append(operation['operation'],\r\n operation['qubits'])\r\n return copy_self\r\n else:\r\n return copy(self).append(other, list(range(len(other.shape))))\r\n else:\r\n return Circuit().append(self, list(range(len(self.shape)))).append(other, list(range(len(other.shape))))",
"def concat(self, other):\n self.add_rules(other.cliques)\n self.prop_names.update(other.prop_names)",
"def combine(self, existing):\n return self",
"def commutes_with(self, other):\n if not isinstance(other, type(self)):\n raise TypeError(\n 'Can only test commutation with another MajoranaOperator.')\n\n if len(self.terms) == 1 and len(other.terms) == 1:\n return _majorana_terms_commute(\n list(self.terms.keys())[0],\n list(other.terms.keys())[0])\n return self * other == other * self",
"def union(self, other):\n return PermClass([S_1 + S_2 for S_1, S_2 in zip(self, other)])",
"def combine(self, action):\n next_node = Path(action.end, self.path_cost + action.cost, parent=self)\n return next_node",
"def concat(self, other):\n assert isinstance(other, Tuple)\n return Tuple(self.spaces + other.spaces)",
"def __mul__(self, other):\n if isinstance(other, Permutation):\n return Coset(other, self, dir='+')\n gens1 = [perm._array_form for perm in self.generators]\n gens2 = [perm._array_form for perm in other.generators]\n n1 = self._degree\n n2 = other._degree\n start = list(range(n1))\n end = list(range(n1, n1 + n2))\n for i in range(len(gens2)):\n gens2[i] = [x + n1 for x in gens2[i]]\n gens2 = [start + gen for gen in gens2]\n gens1 = [gen + end for gen in gens1]\n together = gens1 + gens2\n gens = [_af_new(x) for x in together]\n return PermutationGroup(gens)",
"def __add__(self, other):\n return union(self, other, check_convex=True)",
"def __add__(self, other):\n return self.concatenate(other)",
"def __add__(self, other):\n return self.concatenate(other)",
"def combine(self):\n # If the contents of this command should be hidden from the main .cfg,\n # discard them.\n if self.hide_children:\n return \"\"\n\n # Set the evaluation state of this instance to COMBINE, as its code has\n # been generated.\n self.eval_state = COMMAND_EVAL_COMBINE\n\n # output will store the contents of this instance; meaning its code and\n # the code of its children.\n output = []\n\n # Loop through children and evaluate them.\n for ch in self.children:\n # Only evaluate children if they haven't been yet (i.e., their eval\n # state is not COMMAND_EVAL_COMBINE)\n if ch.eval_state == COMMAND_EVAL_REGISTER:\n gen = ch.generate()\n if gen is not None:\n output.append('alias \"'+str(ch)+'\" \"'+gen+'\"')\n output.extend(ch.combine())\n\n return output",
"def union(self, p, q):\n self._validate(p)\n self._validate(q)\n p_parent = self.find(p)\n q_parent = self.find(q)\n if p_parent == q_parent:\n return\n for i in range(len(self.parents)):\n if self.parents[i] == q_parent:\n self.parents[i] = p_parent\n self.n -= 1",
"def __add__(self, other):\n\n other = formula(other, namespace=self.namespace)\n terms = self.terms + other.terms\n pieces = [(term.name, term) for term in terms]\n pieces.sort()\n terms = [piece[1] for piece in pieces]\n return formula(terms, namespace=self.namespace)",
"def __add__(self, other):\n if not other:\n return self.clone()\n else:\n return self.using(join(self, other))",
"def __or__(self, other):\n return self.union(other)",
"def __or__(self, other):\n return self.union(other)",
"def union(self, node1, node2):\n\n root1 = self.root(node1)\n root2 = self.root(node2)\n\n if root1 == root2:\n return\n\n if node1 < node2:\n self.set[root2] = root1\n self.root(node2)\n else:\n self.set[root1] = root2\n self.root(node1)",
"def union(self, node1, node2):\n root1 = self.find(node1)\n root2 = self.find(node2)\n if root1 != root2: # only merge if the connected components differ\n if self.ranks[root1] > self.ranks[root2]:\n self.parents[root2] = root1\n else:\n self.parents[root1] = root2\n if self.ranks[root1] == self.ranks[root2]:\n self.ranks[root2] += 1",
"def __mul__(self,other):\n return compositeORGenerator(left = self, right = other)",
"def union(self, a, b):\n if (a in self.node_id) and (b in self.node_id) and (self.node_id[a] != self.node_id[b]):\n self.merge(a, b)\n elif (a in self.node_id) or (b in self.node_id):\n self.add(a,b)\n else:\n self.create_new_group(a,b)",
"def plus(self, other):\n return self | other",
"def combine(a, b):\r\n if a.freq < b.freq:\r\n temp = b\r\n elif a.freq == b.freq:\r\n if a.char < b.char:\r\n temp = a\r\n else:\r\n temp = b\r\n else:\r\n temp = a\r\n new_node = HuffmanNode(temp.char , a.freq + b.freq)\r\n return new_node",
"def combine(a, b):\n newnode = None\n # not sure what to do if comes_before is false\n if a.char < b.char:\n newchar = a.char\n else:\n newchar = b.char\n newfreq = a.freq + b.freq\n newnode = HuffmanNode(newchar, newfreq)\n newnode.left = a\n newnode.right = b\n return newnode",
"def make_bprod(self):\n rhs1 = random.choice(self.nonterminals)\n rhs2 = random.choice(self.nonterminals)\n lhs = random.choice(self.nonterminals)\n return (lhs, (rhs1, rhs2))",
"def __add__(self, other):\n return Rabbit(0,self, other) # o-id, self-parent, other-otherparent",
"def concat(self):\n nfa2 = self.aut_stack.pop()\n nfa1 = self.aut_stack.pop()\n\n nfa1_star = nfa1.transform('X')\n nfa2_star = nfa2.transform('Y')\n\n nfa_concat = Automaton()\n nfa_concat.final = nfa2_star.final\n nfa_concat.q_0 = nfa1_star.q_0\n nfa_concat.states = list(set(nfa1_star.states).union(nfa2_star.states))\n nfa_concat.alphabet = list(set(nfa1_star.alphabet).union(nfa2_star.alphabet))\n nfa_concat.transition = dict(nfa1_star.transition, **nfa2_star.transition)\n for a in nfa1_star.final:\n key = a + ', .'\n if nfa_concat.transition.get(key, 0) == 0:\n nfa_concat.transition[key] = [nfa2_star.q_0]\n else:\n nfa_concat.transition[key].append(nfa2_star.q_0)\n\n self.aut_stack.append(nfa_concat)",
"def join_union(self, other):\n\n assert type(self) is type(other), 'Expected NestedRE instance'\n\n A = self.make_flat()\n B = other.make_flat()\n\n if A == B and A !='ϵ':\n return self.merge_union(A, [self.closure, other.closure])\n elif A == 'ϵ' and B == 'ϵ':\n return NestedRE('ϵ')\n elif A == 'ϵ':\n return NestedRE(B, '?')\n elif B == 'ϵ':\n return NestedRE(A, '?')\n else:\n return NestedRE( '(' + A + '|' + B + ')' )",
"def union(self, other):\n self.vertices.extend(other.vertices)\n self.edges.extend(other.edges)\n self.faces.extend(other.faces)\n return self"
] |
[
"0.5940539",
"0.5787354",
"0.5752724",
"0.57338166",
"0.5704518",
"0.56614375",
"0.56507266",
"0.5551194",
"0.55249304",
"0.5524432",
"0.5481714",
"0.5481714",
"0.5472268",
"0.5448143",
"0.54333144",
"0.5430127",
"0.54202735",
"0.54202735",
"0.5413891",
"0.5394813",
"0.5387263",
"0.53861094",
"0.5375067",
"0.53609353",
"0.5334022",
"0.52998424",
"0.5299444",
"0.52940273",
"0.5283183",
"0.5280947"
] |
0.756413
|
0
|
predicts the popularity of a content from a numpy array of its features returns 'popular' string if output is equal to 1 'unpopular' otherwise
|
def predict(self,X):
if (int(self.classifier.predict(self.scaler.transform(X)))==-1):
return "popular"
else:
return "unpopular"
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def postprocess(output):\n text=''\n order = output.argsort()[::-1][:6]\n # print('\\n------- predictions --------')\n for i in range(1):\n # print ('prediction ' + str(i) + ' (probability ' + str(output[order[i]]*100) + '%) is ' + gNetworkCategories[order[i]] + ' label index is: ' + str(order[i]) )\n text=text+str(gNetworkCategories[order[i]])\n\n return text",
"def tag_pred(model, vectorized_input, feature_names, top_t, nb_tag_pred=10,\n threshold=0.15):\n\n # Topic df -----------------------------------------------------------------\n topic_df = display_topics2(model, feature_names)\n # associate each topic with a list of tags\n topics_kwords_df = topic_df.T #(topic_df.isin(top_t)*topic_df).T\n topic2tags_d = {}\n # tags_per_topic = []\n for topic in topics_kwords_df:\n tag_list = []\n for e in topics_kwords_df.loc[:, topic]:\n if e is not \"\":\n tag_list.append(e)\n topic2tags_d[topic] = tag_list\n\n # Create Document Vs Topic df ----------------------------------------------\n import numpy as npy\n model_output = model.transform(vectorized_input)\n topicnames = [\"Topic\" + str(i) for i in range(model.components_.shape[0])]\n docnames = [\"Post\" + str(i) for i in range(vectorized_input.shape[0])]\n df_document_topic = pd.DataFrame(npy.round(model_output, 2),\n columns=topicnames,\n index=docnames)\n\n # Tag predictions ----------------------------------------------------------\n tag_pred_l = []\n for post in df_document_topic.index:\n tags_post = []\n topics_proba = df_document_topic.loc[post, :]\n mask = topics_proba >= threshold\n topic_pred = list(df_document_topic.loc[post, mask].index)\n tot_proba = topics_proba[topic_pred].sum()\n\n # if no major topic in this post, propose just top 10 tags\n if len(topic_pred) == 0:\n tags_post = tags_post + top_t[0:nb_tag_pred].copy()\n else:\n for topic in topic_pred:\n # pic number of top elements ~ to proba of the topic\n nb_elements = int(round(topics_proba[topic]*10/tot_proba,0))\n tags_post = tags_post + topic2tags_d[topic][0:nb_elements].copy()\n tag_pred_l.append(tags_post)\n\n return tag_pred_l",
"def most_frequent_eval(test_set, pred_tags):\n gold_tag_seqs = []\n pred_tag_seqs = []\n for sent in test_set:\n words, true_tags = zip(*sent)\n gold_tag_seqs.append(true_tags)\n\n ### YOUR CODE HERE\n DEFAULT_TAG = 'O'\n \n pred_tags_list = []\n for word in words:\n tag = DEFAULT_TAG\n if word in pred_tags:\n tag = pred_tags[word]\n pred_tags_list.append(tag)\n pred_tag_seqs.append(tuple(pred_tags_list)) \n ### END CODE HERE\n\n return evaluate_ner(gold_tag_seqs, pred_tag_seqs)",
"def predict():\n to_predict = np.zeros(5).reshape(1, 5)\n features = ['is_male', 'num_interactions_with_cust_service', 'late_on_payment', 'age', 'years_in_contract']\n for i, feat in enumerate(features):\n if request.args.get(feat) is not None:\n to_predict[0][i] = request.args.get(feat)\n\n response = clf2.predict(to_predict)\n\n if response:\n return \"The customer is likely to churn\"\n else:\n return \"He is a loyal customer\"",
"def predict(self, sentence):\n data = pd.read_csv(StringIO(sentence), names=['review'])\n X = self.preprocess(data)\n Y = self.clf.predict_proba(X)\n \n return np.argmax(Y)",
"def classify(self, documents):\n predictions = []\n for doc in documents:\n\n score_sod = math.log(self.priorSOD)\n score_pop = math.log(self.priorPOP)\n for term in doc.tokens:\n if term in self.cond_prob_sod.keys():\n score_sod += math.log(self.cond_prob_sod[term])\n if term in self.cond_prob_pop.keys():\n score_pop += math.log(self.cond_prob_pop[term])\n if(score_pop >= score_sod): #defaults to ham if score = even \n predictions.append('pop')\n else:\n predictions.append('sod')\n \n return predictions \n pass",
"def get_recall(y_true, y_pred):\n true_entities_filter = (y_true != 3).astype(\"int\") # of the words that truly has a NER class\n recall_correct_entities = (y_pred[np.where(true_entities_filter)] == y_true[np.where(true_entities_filter)]).astype(\"int\")\n recall = np.sum(recall_correct_entities)/np.sum(true_entities_filter)\n return recall",
"def evaluate(predictions, documents):\n ###TODO\n x = 0.0\n y = 0 #true ham, classified spam\n z = 0 #true spam, classified ham\n \n for i in range(0,len(predictions)):\n if(documents[i].label == 'sod' and predictions[i] == 'pop'):\n z += 1\n if(documents[i].label == 'sod' and predictions[i] == 'pop'):\n y += 1 \n \n x = (len(documents) - (y+z)) / len(documents) \n return (x,y,z)\n pass",
"def get_popularity(rest_data, item_dict):\n max_review_count = rest_data.review_count.max()\n min_review_count = rest_data.review_count.min()\n result = np.zeros((len(rest_data), 2))\n for i in range(len(rest_data)):\n result[i, 0] = item_dict[rest_data.business_id[i]]\n result[i, 1] = (((rest_data.review_count[i] - min_review_count)/(max_review_count - min_review_count))*4 + 1)\n result = result[result[:, 0].argsort()]\n return result",
"def make_output_human_readable(\n self, predictions: torch.Tensor\n ) -> Dict[str, torch.Tensor]:\n all_predictions = predictions.cpu().data.numpy()\n if all_predictions.ndim == 3:\n predictions_list = [all_predictions[i] for i in range(all_predictions.shape[0])]\n else:\n predictions_list = [all_predictions]\n all_tags = []\n for predictions in predictions_list:\n outside_index = self.vocab.get_token_index(\"O\", namespace=self.task)\n\n # @AR: Get the thresholded matrix and prepare the prediction sequence\n pred_over_thresh = (predictions >= self.threshold) * predictions\n #print(pred_over_thresh)\n sequence_token_labels = []\n maxxx = numpy.argmax(predictions, axis=-1).tolist()\n\n # @AR: For each label set, check if to apply argmax or sigmoid thresh\n j=0\n for pred in pred_over_thresh:\n num_pred_over_thresh = numpy.count_nonzero(pred)\n if (num_pred_over_thresh == 0) or (num_pred_over_thresh == 1):\n pred_idx_list = [maxxx[j]]\n\n else:\n try:\n outside_position = pred_idx_list.index(outside_index)\n except ValueError:\n outside_position = -1\n # get ranked list\n tuples = [[score, idx] for idx, score in enumerate(pred) if score > self.threshold and idx != outside_position]\n # check for max_heads\n if self.max_heads != 0 and len(tuples) > self.max_heads:\n tuples = tuples[:self.max_heads]\n if len(tuples) == 0:\n tuples = [1.0, outside_position]\n pred_idx_list = [x[1] for x in tuples]\n \n\n sequence_token_labels.append(pred_idx_list)\n j += 1\n\n # @AR: Create the list of tags to append for the output\n tags = []\n for token_labels in sequence_token_labels:\n curr_labels = []\n for token_label in token_labels:\n curr_labels.append(\n self.vocab.get_token_from_index(token_label, namespace=self.task))\n tags.append(curr_labels)\n\n all_tags.append(tags)\n return all_tags",
"def probs_for_breeds_h5(breed_predictions):\n predictions = []\n for (x), value in np.ndenumerate(breed_predictions[0]):\n predictions.append([x[0], str(dog_names[x[0]]), value])\n sorted_predictions = sorted(predictions, key=itemgetter(2), reverse=True)\n for prediction in sorted_predictions[:5]:\n print dog_names[prediction[0]] + ': ' + str(prediction[2])",
"def classify_tweet(tweet):\n pred = classifier.predict(vectorizer.transform(np.array([tweet.text])))\n\n return str(pred[0])",
"def predict(self, xFeat):\n yHat = [] # variable to store the estimated class label\n # TODO\n matrix = xFeat.to_numpy()\n for row in matrix:\n node = self.root\n while node.split != None:\n feat, val = node.split\n if row[feat] >= val: \n node = node.left\n else:\n node = node.right\n #row = np.delete(row, feat, 0)\n yHat.append(majority(node.array))\n return yHat",
"def predLabel(self, DataMatrix):\n self.predict(DataMatrix)\n # Calculamos el valor mas alto, y a partir de este obtenemos el nombre de la etiqueta\n tags = [[self.classes[np.argmax(subrow)] for subrow in row] for row in self.data]\n return tags",
"def classification(original_training_data):\n\n ''' Storing the dataframe as numpy array '''\n original_training_data_values = original_training_data.values\n\n ''' Storing the values of target attribute for finding out the counts of each recipetype'''\n target_column = original_training_data_values[:, -1]\n\n ''' Recipe_type stores the unique values of target attribute in the form of a list [Muffin Cupcake] \n cupcake_muffin_count stores the count of muffin and cupcakes in the form of a list [451 451]'''\n recipe_type, cupcake_muffin_count = np.unique(target_column, return_counts=True)\n\n ''' cupcake_muffin_count.argmax() returns the index of the highest value. In this case, it will return the index of \n muffin or cupcake count. '''\n majority_class = recipe_type[cupcake_muffin_count.argmax()]\n\n return majority_class",
"def predict(probs):\n return np.argmax(probs, axis=0)",
"def predict(probs):\n # Your code here.\n return np.argmax(probs, axis=1)",
"def predict(self, X):\n X = check_tensor(X, dtype=np.float32, n_dim=4)\n res = self._predict_proba(X)[:, :, 0, 0]\n indices = np.argsort(res, axis=1)\n indices = indices[:, -self.top_n:]\n if self.output_strings:\n class_strings = np.empty_like(indices,\n dtype=object)\n for index, value in enumerate(indices.flat):\n class_strings.flat[index] = get_overfeat_class_label(value)\n return class_strings\n else:\n return indices",
"def predLabel(self, DataMatrix):\n self.predict(DataMatrix)\n # Calculamos el valor mas alto, y a partir de este obtenemos el nombre de la etiqueta\n tags = [self.classes[np.argmax(elem)] for elem in self.data]\n return tags",
"def predict(self, text):\n features = self.vectorizer.transform([text]).toarray()\n prediction = self.model.predict(features)\n most_likely = np.argmax(prediction, axis=1)\n most_likely = [self.map2wili(index) for index in most_likely]\n return most_likely[0]",
"def predict(self, features):\n scores = self.predict_proba(features)\n return self.classes[np.argmax(scores)]",
"def feature_selection_classifier_3(array2d):\n newArray2d = np.zeros([array2d.shape[0], 18])\n # female_items / female_items + male_items\n newArray2d[:, 0] = array2d[:, 4]\n # male_items / female_items + male_items\n newArray2d[:, 1] = array2d[:, 5]\n # wapp_items / items\n newArray2d[:, 2] = array2d[:, 7]\n # wftw_items / items\n newArray2d[:, 3] = array2d[:, 8]\n # mapp_items / items\n newArray2d[:, 4] = array2d[:, 9]\n # wacc_items / items\n newArray2d[:, 5] = array2d[:, 10]\n # macc_items / items\n newArray2d[:, 6] = array2d[:, 11]\n # mftw_items / items\n newArray2d[:, 7] = array2d[:, 12]\n # curvy_items / items\n newArray2d[:, 8] = array2d[:, 15]\n # msite_orders / orders\n newArray2d[:, 9] = array2d[:, 17]\n # desktop_orders / orders\n newArray2d[:, 10] = array2d[:, 18]\n # android_orders / orders\n newArray2d[:, 11] = array2d[:, 19]\n # ios_orders / orders\n newArray2d[:, 12] = array2d[:, 20]\n # other_device_orders / orders\n newArray2d[:, 13] = array2d[:, 21]\n # home_orders / orders\n newArray2d[:, 14] = array2d[:, 23]\n # other_collection_orders / orders\n newArray2d[:, 15] = array2d[:, 25]\n # average_discount_onoffer\n newArray2d[:, 16] = array2d[:, 26]\n # average_discount_used\n newArray2d[:, 17] = array2d[:, 27]\n return newArray2d",
"def predict_from_ndarray(self, image_array):\n gray_image = image_array\n if len(image_array.shape) > 2:\n gray_image = cv2.cvtColor(image_array, code=cv2.COLOR_BGR2GRAY)\n resized_image = cv2.resize(gray_image, self.target_dimensions, interpolation=cv2.INTER_LINEAR)\n final_image = np.array([np.array([resized_image]).reshape(list(self.target_dimensions)+[self.channels])])\n prediction = self.model.predict(final_image)\n # Return the dominant expression\n dominant_expression = self._print_prediction(prediction[0])\n return dominant_expression",
"def classifier(text):\n return random.choice([True, False])",
"def evaluate(self, featureset):\r\n #sequence, tag = featureset\r\n gs, labels = [], []\r\n for s, t in featureset:\r\n gs.append(t)\r\n label = self.tagger.choose_tag(s)\r\n labels.append(label)\r\n print (t, label)\r\n\r\n assert(len(gs) == len(labels))\r\n self.write_to_file(labels)\r\n words = self.tagger.test(self.r.test_sents, word=True)\r\n print (accuracy_score(gs, labels))",
"def ensemble(all_predict, size):\n mode_pred = np.zeros(shape=(size,1))\n for i in range(np.shape(all_predict)[1]):\n pred= mode(all_predict[:,i])\n # break ties randomly\n if pred[1] == 1:\n pred_val = random.randrange(2)\n else:\n pred_val = pred[0]\n mode_pred[i,0] = pred_val\n # return most common prediction\n return mode_pred",
"def getPredictions(self):\n\t\tself.bestLabel = self.testingProbs.apply(lambda x: x.argmax(),1)",
"def popularity(self, user_list):\n item_popular = Counter(self.train['movieId'].values)\n ret = 0\n n = 0\n print('\\nCalculate popularity: ')\n for user in tqdm(user_list):\n recom_data = self._get_recommend(user)\n for rec in set([data[0] for data in recom_data]):\n ret += math.log(1 + item_popular.get(rec))\n n += 1\n ret /= n * 1.0\n print('\\npopularity: ', ret)\n return ret",
"def calculate_most_popular(text, n_populars, steam=False):\n fdist = calculate_fdist(text, steam)\n term = []\n for key, value in fdist.items():\n term.append((key, value))\n term.sort(key=lambda x: int(x[1]), reverse=True)\n return term[:n_populars]",
"def test__extract_features(self):\n text_sample = \"I really really love this movie\"\n feature_sample = ['really','love','good']\n feature_score_type = \"presence\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':1,'love':1,'good':0})\n feature_score_type = \"term_frequency\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':2,'love':1,'good':0})"
] |
[
"0.6040967",
"0.5731728",
"0.57123053",
"0.56998587",
"0.56347734",
"0.5618509",
"0.56034917",
"0.5597564",
"0.5582467",
"0.5563664",
"0.553623",
"0.55235195",
"0.5491753",
"0.5479173",
"0.54705125",
"0.5467261",
"0.5463607",
"0.5447261",
"0.54347146",
"0.5416824",
"0.5407342",
"0.5389873",
"0.5382332",
"0.5356827",
"0.53444505",
"0.53420293",
"0.53417265",
"0.5330699",
"0.5321562",
"0.53172415"
] |
0.6847979
|
0
|
performs prediction according to self.predict and prints result to standard output
|
def print_prediction_to_stdout(self,X):
sys.stdout.write(self.predict(X))
sys.stdout.flush()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def predict():\n import trace\n trace.predict()",
"def run_prediction(self):\r\n self.get_prediction_indices()\r\n self.walk_forward_prediction()",
"def eval(self): \n inputs,enc_input_weights, outputs, dec_input_weights = self.get_batch()\n predicted_ids = self.model.step(self.sess, inputs, enc_input_weights) \n print(\"=\"*20)\n for i in range(FLAGS.batch_size):\n print(\"* %dth sample target: %s\" % (i,str(outputs[i,1:]-2)))\n for predict in predicted_ids[i]:\n print(\"prediction: \"+str(predict)) \n print(\"=\"*20)",
"def get_predictions():\n\n print(\"OK1\");\n print(\"OK2\");\n return;",
"def predict(self):\n self.canv.update()\n ps = self.canv.postscript(colormode='mono')\n img = Image.open(io.BytesIO(ps.encode('utf-8')))\n img.save('result.png')\n x = Predict.transform_image(self)\n \n #prediction with multivariate regression\n Y_hat_test = self.multivariate_model.predict([x])\n C_multivariate = map(np.argmax, Y_hat_test) # classification vector\n C_multivariate = list(C_multivariate)\n multivariate_predict = C_multivariate[0]\n\n \n #prediction with Linear Discriminant Analysis (LDA)\n lda_predict = self.lda_model.predict([x])[0]\n qda_predict = self.qda_model.predict([x])[0]\n log_predict = self.log_model.predict([x])[0]\n \n baseline_label = Label(self, text='Baseline: ' + str(multivariate_predict) )\n baseline_label.grid(row=0, column=1, padx=5, pady=5)\n lda_label = Label(self, text=' LDA: '+ str(lda_predict))\n lda_label.grid(row=0, column=2, padx=5, pady=5)\n qda_label = Label(self, text='QDA: '+ str(qda_predict))\n qda_label.grid(row=1, column=1, padx=5, pady=5)\n log_label = Label(self, text=' Logistic: '+str(log_predict))\n log_label.grid(row=1, column=2, padx=5, pady=5)",
"def predict(x):\n model = Model()\n res = model.predict([x])[0][0]\n click.echo(res)",
"def __call__(self, predictor_model) -> None:\n self.save_result(self.evaluate(predictor_model))",
"def print_prediction(self, prediction, params=None, stream=None):\n _write_lines(self.format_prediction(prediction, params=params), stream)",
"def predict(self, data):\n\t\tres = self.neuralNetworks.inference(self.dataCenter.process_inference_data(data))\n\t\tprint(res)",
"def test():\n # load dataset and model\n X, observed_y = load_data('../data/dev.txt')\n\n model = pickle.load(open('test.model', 'rb'))\n model.traverse()\n\n # predict labels for dataset\n preds = model.predict(X)\n\n # print(preds)\n # output model predictions\n np.savetxt('test.predictions', preds, fmt='%s')",
"def predict(self):\n train_vec, test_vec = self.get_tfidf_vectors()\n clf = self.get_classifier()\n\n print '-'*40\n print 'Making predictions ...'\n clf.fit(train_vec, self.train_ans)\n clf_predictions = clf.predict_proba(test_vec)\n\n print 'Storing predictions in', self.pred_file\n pred_out = [\"Id,predictions\"]\n num_pred = range(30)\n for fid, pred in zip(self.test_index, clf_predictions):\n top_rec = sorted(num_pred, key=lambda k: pred[k], reverse=True)[:3]\n pred_out.append(\"%s,%s\" % (fid, ' '.join( [clf.classes_[rec] for rec in top_rec] )))\n with open(self.pred_file, 'w') as f:\n f.write('%s\\n' % ('\\n'.join(pred_out)))",
"def post(self):\n # use parser and find the user's query\n args = parser.parse_args()\n title = args['title']\n author = model.encode_author(args['author'])\n text = args['text']\n\n X = model.vector_and_stack(title=title, text=text, author=author)\n\n prediction = model.predict(X)\n\n # Output either 'Negative' or 'Positive' along with the score\n if round(prediction[0]) == 0:\n pred_text = 'Reliable News'\n else:\n pred_text = 'Unreliable News'\n\n # round the predict proba value and set to new variable\n confidence = round(prediction[0], 3)\n\n # create JSON object\n output = {'prediction': pred_text, 'fake_rate': confidence}\n\n return output, 200",
"def predict(self, data):\n return self.result.predict(data)",
"def predict(self, state: State) -> None:\n print(str(self.policy.actor_model.predict(state.state_model_input(), batch_size=1).flatten()))\n return",
"def predict(self):\n raise NotImplementedError",
"def _predict(self, x):\n pass",
"def predict(self): \n return self.model.predict(self.test_x)",
"def predict(self):\n self.predicted_test_summary = []\n for step in xrange(0, self.test_size // self.test_batch_size):\n print 'Predicting Batch No.:', step\n offset = (step * self.test_batch_size) % self.test_size\n batch_data_fwd = self.X_tst_fwd[offset:(offset + self.test_batch_size), :].T\n batch_data_bwd = self.X_tst_bwd[offset:(offset + self.test_batch_size), :].T\n summary_test_out = self._predict_batch(batch_data_fwd, batch_data_bwd)\n self.predicted_test_summary.extend(summary_test_out)\n\n print 'Prediction Complete. Moving Forward..'\n\n # test answers\n self.test_review = self.X_tst_fwd\n self.predicted_test_summary = self.predicted_test_summary\n self.true_summary = self.Y_tst",
"def fit_predict(self):\n self.classifier = self.model\n self.classifier.fit(self.X_sample, self.y_sample)\n self.y_pred = self.classifier.predict(self.X_test)",
"def _predict(self, testX):\n pass",
"def predict():\n\n predict_cfg = get_predict_args()\n device = get_device()\n print(device)\n\n # load checkpoint\n ckpt_path = find_ckpt_in_directory(predict_cfg.ckpt)\n ckpt = torch.load(ckpt_path, map_location=device)\n best_iter = ckpt[\"best_iter\"]\n cfg = ckpt[\"cfg\"]\n aspect = cfg[\"aspect\"]\n\n for k, v in cfg.items():\n print(\"{:20} : {:10}\".format(k, str(v)))\n\n eval_batch_size = 64\n\n print(\"Loading data\")\n dev_data = list(beer_reader(cfg[\"dev_path\"]))\n test_data = beer_annotations_reader(cfg[\"test_path\"], aspect=aspect)\n\n print(\"dev\", len(dev_data))\n print(\"test\", len(test_data))\n\n print(\"Loading pre-trained word embeddings\")\n vocab = Vocabulary()\n vectors = load_embeddings(cfg[\"embeddings\"], vocab) # required for vocab\n\n # build model\n model = build_model(cfg[\"model\"], vocab, cfg=cfg)\n\n # load parameters from checkpoint into model\n print(\"Loading saved model..\")\n model.load_state_dict(ckpt[\"state_dict\"])\n model.to(device)\n print(\"Done\")\n\n print(model)\n print_parameters(model)\n\n print(\"Evaluating\")\n dev_eval = evaluate_loss(\n model, dev_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n test_eval = evaluate_loss(\n model, test_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n\n if hasattr(model, \"z\"):\n path = os.path.join(\n cfg[\"save_path\"], \"final_rationales.txt\")\n test_precision, test_macro_prec = evaluate_rationale(\n model, test_data, aspect=aspect, device=device,\n batch_size=eval_batch_size, path=path)\n else:\n test_precision = 0.\n test_macro_prec = 0.\n test_eval[\"precision\"] = test_precision\n test_eval[\"macro_precision\"] = test_macro_prec\n\n dev_s = make_kv_string(dev_eval)\n test_s = make_kv_string(test_eval)\n\n print(\"best model iter {:d} dev {} test {}\".format(\n best_iter, dev_s, test_s))",
"def walk_forward_prediction(self):\r\n for output_name in self.output_names:\r\n print('\\t\\t\\t|--Prediction type: {}'.format(output_name))\r\n predictions_by_model = {}\r\n pred_metadata_by_model = {}\r\n \r\n print('\\t\\t\\t\\t|--SVM Model')\r\n svm = SupportVectorMachine()\r\n svm.pred_indices = self.pred_indices\r\n svm.full_df = self.full_df\r\n svm.feature_names = self.feature_names\r\n svm.output_name = output_name\r\n svm.svm_optimal_params = self.optimal_params_by_output[output_name]['SVM']\r\n svm.run_svm_prediction()\r\n predictions_by_model['SVM'] = svm.svm_predictions\r\n pred_metadata_by_model['SVM'] = svm.metadata\r\n \r\n self.predictions_by_output[output_name] = predictions_by_model\r\n self.pred_metadata_by_output[output_name] = pred_metadata_by_model",
"def evaluate(self, prediction_fn):\n pass",
"def evaluate(self):\n predictions = self.model.predict(self.test[0])\n accuracy = accuracy_score(self.test[1], predictions)\n print(\"Accuracy:\", str(accuracy * 100) + \"%\")\n self.plot_results(predictions)",
"def test_predict():\n args = get_layer('predict', 'manual', 'temporal', False, False, window=2, step_size=3)\n run_layer(*args)",
"def draw_predictions(self):\n self.vis.draw_predictions()",
"def predict(self, **kwargs):\n raise NotImplementedError",
"def predictions(self, predictions):\n\n self._predictions = predictions",
"def predict(self):\n path = self._artifact_repo.artifact_path(self._ARTIFACT_MODEL)\n model = tf.keras.models.load_model(path)\n\n _, _, x_test, y_test = self._load_data()\n x_test = tf.keras.utils.normalize(x_test, axis=1)\n\n preds = model.predict(x_test)\n self._show_cf_matrix(np.array([np.argmax(probas) for probas in preds]), y_test)",
"def __do_predict(self, request, features):\n dmp_predictor.DmpPredictor().predict(request, features)\n\n return defines.ReturnCode.SUCC"
] |
[
"0.76890785",
"0.760489",
"0.73122084",
"0.70064574",
"0.6908278",
"0.69034",
"0.6888294",
"0.68732065",
"0.68638957",
"0.6861249",
"0.6858539",
"0.6853077",
"0.6826687",
"0.68194574",
"0.681267",
"0.6812196",
"0.6808107",
"0.6804206",
"0.6801972",
"0.67959994",
"0.67828244",
"0.67607844",
"0.67504734",
"0.672749",
"0.6720201",
"0.67148846",
"0.66753125",
"0.6675266",
"0.66715896",
"0.66618836"
] |
0.77586794
|
1
|
performs prediction according to self.predict and prints result to standard output
|
def print_prediction_to_stdout(self,X):
sys.stdout.write(self.predict(X))
sys.stdout.flush()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def predict():\n import trace\n trace.predict()",
"def run_prediction(self):\r\n self.get_prediction_indices()\r\n self.walk_forward_prediction()",
"def eval(self): \n inputs,enc_input_weights, outputs, dec_input_weights = self.get_batch()\n predicted_ids = self.model.step(self.sess, inputs, enc_input_weights) \n print(\"=\"*20)\n for i in range(FLAGS.batch_size):\n print(\"* %dth sample target: %s\" % (i,str(outputs[i,1:]-2)))\n for predict in predicted_ids[i]:\n print(\"prediction: \"+str(predict)) \n print(\"=\"*20)",
"def get_predictions():\n\n print(\"OK1\");\n print(\"OK2\");\n return;",
"def predict(self):\n self.canv.update()\n ps = self.canv.postscript(colormode='mono')\n img = Image.open(io.BytesIO(ps.encode('utf-8')))\n img.save('result.png')\n x = Predict.transform_image(self)\n \n #prediction with multivariate regression\n Y_hat_test = self.multivariate_model.predict([x])\n C_multivariate = map(np.argmax, Y_hat_test) # classification vector\n C_multivariate = list(C_multivariate)\n multivariate_predict = C_multivariate[0]\n\n \n #prediction with Linear Discriminant Analysis (LDA)\n lda_predict = self.lda_model.predict([x])[0]\n qda_predict = self.qda_model.predict([x])[0]\n log_predict = self.log_model.predict([x])[0]\n \n baseline_label = Label(self, text='Baseline: ' + str(multivariate_predict) )\n baseline_label.grid(row=0, column=1, padx=5, pady=5)\n lda_label = Label(self, text=' LDA: '+ str(lda_predict))\n lda_label.grid(row=0, column=2, padx=5, pady=5)\n qda_label = Label(self, text='QDA: '+ str(qda_predict))\n qda_label.grid(row=1, column=1, padx=5, pady=5)\n log_label = Label(self, text=' Logistic: '+str(log_predict))\n log_label.grid(row=1, column=2, padx=5, pady=5)",
"def predict(x):\n model = Model()\n res = model.predict([x])[0][0]\n click.echo(res)",
"def __call__(self, predictor_model) -> None:\n self.save_result(self.evaluate(predictor_model))",
"def print_prediction(self, prediction, params=None, stream=None):\n _write_lines(self.format_prediction(prediction, params=params), stream)",
"def predict(self, data):\n\t\tres = self.neuralNetworks.inference(self.dataCenter.process_inference_data(data))\n\t\tprint(res)",
"def test():\n # load dataset and model\n X, observed_y = load_data('../data/dev.txt')\n\n model = pickle.load(open('test.model', 'rb'))\n model.traverse()\n\n # predict labels for dataset\n preds = model.predict(X)\n\n # print(preds)\n # output model predictions\n np.savetxt('test.predictions', preds, fmt='%s')",
"def predict(self):\n train_vec, test_vec = self.get_tfidf_vectors()\n clf = self.get_classifier()\n\n print '-'*40\n print 'Making predictions ...'\n clf.fit(train_vec, self.train_ans)\n clf_predictions = clf.predict_proba(test_vec)\n\n print 'Storing predictions in', self.pred_file\n pred_out = [\"Id,predictions\"]\n num_pred = range(30)\n for fid, pred in zip(self.test_index, clf_predictions):\n top_rec = sorted(num_pred, key=lambda k: pred[k], reverse=True)[:3]\n pred_out.append(\"%s,%s\" % (fid, ' '.join( [clf.classes_[rec] for rec in top_rec] )))\n with open(self.pred_file, 'w') as f:\n f.write('%s\\n' % ('\\n'.join(pred_out)))",
"def post(self):\n # use parser and find the user's query\n args = parser.parse_args()\n title = args['title']\n author = model.encode_author(args['author'])\n text = args['text']\n\n X = model.vector_and_stack(title=title, text=text, author=author)\n\n prediction = model.predict(X)\n\n # Output either 'Negative' or 'Positive' along with the score\n if round(prediction[0]) == 0:\n pred_text = 'Reliable News'\n else:\n pred_text = 'Unreliable News'\n\n # round the predict proba value and set to new variable\n confidence = round(prediction[0], 3)\n\n # create JSON object\n output = {'prediction': pred_text, 'fake_rate': confidence}\n\n return output, 200",
"def predict(self, data):\n return self.result.predict(data)",
"def predict(self, state: State) -> None:\n print(str(self.policy.actor_model.predict(state.state_model_input(), batch_size=1).flatten()))\n return",
"def predict(self):\n raise NotImplementedError",
"def _predict(self, x):\n pass",
"def predict(self): \n return self.model.predict(self.test_x)",
"def predict(self):\n self.predicted_test_summary = []\n for step in xrange(0, self.test_size // self.test_batch_size):\n print 'Predicting Batch No.:', step\n offset = (step * self.test_batch_size) % self.test_size\n batch_data_fwd = self.X_tst_fwd[offset:(offset + self.test_batch_size), :].T\n batch_data_bwd = self.X_tst_bwd[offset:(offset + self.test_batch_size), :].T\n summary_test_out = self._predict_batch(batch_data_fwd, batch_data_bwd)\n self.predicted_test_summary.extend(summary_test_out)\n\n print 'Prediction Complete. Moving Forward..'\n\n # test answers\n self.test_review = self.X_tst_fwd\n self.predicted_test_summary = self.predicted_test_summary\n self.true_summary = self.Y_tst",
"def fit_predict(self):\n self.classifier = self.model\n self.classifier.fit(self.X_sample, self.y_sample)\n self.y_pred = self.classifier.predict(self.X_test)",
"def _predict(self, testX):\n pass",
"def predict():\n\n predict_cfg = get_predict_args()\n device = get_device()\n print(device)\n\n # load checkpoint\n ckpt_path = find_ckpt_in_directory(predict_cfg.ckpt)\n ckpt = torch.load(ckpt_path, map_location=device)\n best_iter = ckpt[\"best_iter\"]\n cfg = ckpt[\"cfg\"]\n aspect = cfg[\"aspect\"]\n\n for k, v in cfg.items():\n print(\"{:20} : {:10}\".format(k, str(v)))\n\n eval_batch_size = 64\n\n print(\"Loading data\")\n dev_data = list(beer_reader(cfg[\"dev_path\"]))\n test_data = beer_annotations_reader(cfg[\"test_path\"], aspect=aspect)\n\n print(\"dev\", len(dev_data))\n print(\"test\", len(test_data))\n\n print(\"Loading pre-trained word embeddings\")\n vocab = Vocabulary()\n vectors = load_embeddings(cfg[\"embeddings\"], vocab) # required for vocab\n\n # build model\n model = build_model(cfg[\"model\"], vocab, cfg=cfg)\n\n # load parameters from checkpoint into model\n print(\"Loading saved model..\")\n model.load_state_dict(ckpt[\"state_dict\"])\n model.to(device)\n print(\"Done\")\n\n print(model)\n print_parameters(model)\n\n print(\"Evaluating\")\n dev_eval = evaluate_loss(\n model, dev_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n test_eval = evaluate_loss(\n model, test_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n\n if hasattr(model, \"z\"):\n path = os.path.join(\n cfg[\"save_path\"], \"final_rationales.txt\")\n test_precision, test_macro_prec = evaluate_rationale(\n model, test_data, aspect=aspect, device=device,\n batch_size=eval_batch_size, path=path)\n else:\n test_precision = 0.\n test_macro_prec = 0.\n test_eval[\"precision\"] = test_precision\n test_eval[\"macro_precision\"] = test_macro_prec\n\n dev_s = make_kv_string(dev_eval)\n test_s = make_kv_string(test_eval)\n\n print(\"best model iter {:d} dev {} test {}\".format(\n best_iter, dev_s, test_s))",
"def walk_forward_prediction(self):\r\n for output_name in self.output_names:\r\n print('\\t\\t\\t|--Prediction type: {}'.format(output_name))\r\n predictions_by_model = {}\r\n pred_metadata_by_model = {}\r\n \r\n print('\\t\\t\\t\\t|--SVM Model')\r\n svm = SupportVectorMachine()\r\n svm.pred_indices = self.pred_indices\r\n svm.full_df = self.full_df\r\n svm.feature_names = self.feature_names\r\n svm.output_name = output_name\r\n svm.svm_optimal_params = self.optimal_params_by_output[output_name]['SVM']\r\n svm.run_svm_prediction()\r\n predictions_by_model['SVM'] = svm.svm_predictions\r\n pred_metadata_by_model['SVM'] = svm.metadata\r\n \r\n self.predictions_by_output[output_name] = predictions_by_model\r\n self.pred_metadata_by_output[output_name] = pred_metadata_by_model",
"def evaluate(self, prediction_fn):\n pass",
"def evaluate(self):\n predictions = self.model.predict(self.test[0])\n accuracy = accuracy_score(self.test[1], predictions)\n print(\"Accuracy:\", str(accuracy * 100) + \"%\")\n self.plot_results(predictions)",
"def test_predict():\n args = get_layer('predict', 'manual', 'temporal', False, False, window=2, step_size=3)\n run_layer(*args)",
"def draw_predictions(self):\n self.vis.draw_predictions()",
"def predict(self, **kwargs):\n raise NotImplementedError",
"def predictions(self, predictions):\n\n self._predictions = predictions",
"def predict(self):\n path = self._artifact_repo.artifact_path(self._ARTIFACT_MODEL)\n model = tf.keras.models.load_model(path)\n\n _, _, x_test, y_test = self._load_data()\n x_test = tf.keras.utils.normalize(x_test, axis=1)\n\n preds = model.predict(x_test)\n self._show_cf_matrix(np.array([np.argmax(probas) for probas in preds]), y_test)",
"def __do_predict(self, request, features):\n dmp_predictor.DmpPredictor().predict(request, features)\n\n return defines.ReturnCode.SUCC"
] |
[
"0.76890785",
"0.760489",
"0.73122084",
"0.70064574",
"0.6908278",
"0.69034",
"0.6888294",
"0.68732065",
"0.68638957",
"0.6861249",
"0.6858539",
"0.6853077",
"0.6826687",
"0.68194574",
"0.681267",
"0.6812196",
"0.6808107",
"0.6804206",
"0.6801972",
"0.67959994",
"0.67828244",
"0.67607844",
"0.67504734",
"0.672749",
"0.6720201",
"0.67148846",
"0.66753125",
"0.6675266",
"0.66715896",
"0.66618836"
] |
0.77586794
|
0
|
Preprocess the data further by shuffling inputs and labels randomly
|
def random_preprocessing(inputs, labels):
indices = range(0, labels.shape[0])
shuffled_indices = tf.random.shuffle(indices)
inputs = tf.gather(inputs, shuffled_indices)
labels = tf.gather(labels, shuffled_indices)
return inputs, labels
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def shuffle_train(self):\r\n if self.data_container.task == 'Classify':\r\n id_train_list=[]\r\n for i in self.idx_train_list:\r\n id_train_list.append(self._random_state.choice(i,self.train_parms[0]))\r\n for j in self._random_state.choice(self.unique_value, self.train_parms[1]):\r\n id_train_list.append(self._random_state.choice(self.idx_train_list[j],1))\r\n self.idx['train'] = np.concatenate(id_train_list, axis=0)\r\n \r\n self.idx['train'] = self._random_state.permutation(self.idx['train'])",
"def shuffle_data(self):\n images = list(self.train_images)\n labels = list(self.train_labels)\n self.train_images = []\n self.train_labels = []\n\n # create list of permutated index and shuffle data accoding to list\n idx = np.random.permutation(len(labels))\n for i in idx:\n self.train_images.append(images[i])\n self.train_labels.append(labels[i])",
"def preprocess(self):\n lines = [line.rstrip() for line in open(self.attr_path, 'r')]\n all_attr_names = lines[1].split()\n for i, attr_name in enumerate(all_attr_names):\n self.attr2idx[attr_name] = i\n self.idx2attr[i] = attr_name\n\n lines = lines[2:]\n random.seed(1234)\n random.shuffle(lines)\n for i, line in enumerate(lines):\n split = line.split()\n filename = split[0]\n values = split[1:]\n\n label = []\n for attr_name in self.selected_attrs:\n idx = self.attr2idx[attr_name]\n label.append(values[idx] == '1')\n\n if (i+1) < 4:\n self.test_dataset.append([filename, label])\n else:\n self.train_dataset.append([filename, label])",
"def pre_process_data(self, all_labels, all_data):\n\n # [1] Normalizes data\n all_data = self.pre_precess_manager.normalization(all_data)\n\n data_train, data_test, label_train, label_test = train_test_split(all_data, all_labels, test_size=0.1,\n shuffle=True)\n\n return data_train, data_test, label_train, label_test",
"def inlabel_shuffle(data):\n num_zero_data = np.sum(data[:,-1]==0)\n label_zero_data = data[:num_zero_data,:]\n label_one_data = data[num_zero_data:,:]\n np.random.shuffle(label_zero_data)\n np.random.shuffle(label_one_data)\n return data",
"def preprocess(self):\n \n file_name_list = os.listdir(self.image_dir)\n random.seed(1234)\n random.shuffle(file_name_list)\n \n for i,d in enumerate(self.domains):\n self.attr2idx[d]=i \n\n for i, file_name in enumerate(file_name_list):\n if (file_name.startswith('X_')):\n continue\n \n parts = file_name.split(\"-\")\n label = int(parts[0])\n if label not in self.domains:\n continue\n img_name = file_name\n\n count=self.get_sample_count(label)\n if count<self.valid_set_size:\n # create holdout set on the fly\n utils.copy_file(self.image_dir,self.valid_set_dir,img_name)\n else:\n self.dataset.append([img_name, self.attr2idx[label]])\n \n self.increment_sample_count(label)\n\n print(\"Sample count per domain: \"+str(self.sample_count)+\" (including holdout set, holdout size per domain is: \"+str(self.valid_set_size)+\")\")\n print('Finished preprocessing the dataset...')",
"def preprocess():\n # Load the data\n random.seed(77)\n X,y = make_classification(n_samples=500, n_features=30, n_informative=8, n_redundant=2, \n n_repeated=0, n_classes=3, n_clusters_per_class=2, weights=None, \n flip_y=0.01, class_sep=1.0, hypercube=True, shift=0.0, scale=1.0, \n shuffle=True, random_state=None)\n\n x_train, x_val, y_train, y_val = train_test_split(X, y, random_state=0, test_size=0.25)\n\n # Standardize the data\n scaler = StandardScaler()\n X_train = scaler.fit_transform(x_train)\n X_val = scaler.transform(x_val)\n\n \n return X_train,y_train,X_val,y_val",
"def shuffle(self):\n self.train_nodes = np.random.permutation(self.train_nodes)\n self.batch_num = 0",
"def shuffle(self):\n self.train_nodes = np.random.permutation(self.train_nodes)\n self.batch_num = 0",
"def generate_data(data, model, samples, targeted=True, target_num=9, start=0, inception=False, seed=3, handpick=False ):\n random.seed(seed)\n inputs = []\n targets = []\n labels = []\n true_ids = []\n sample_set = []\n\n data_d = data.test_data\n labels_d = data.test_labels\n\n if handpick:\n if inception:\n deck = list(range(0, 1500))\n else:\n deck = list(range(0, 10000))\n random.shuffle(deck)\n print('Handpicking')\n\n while (len(sample_set) < samples):\n rand_int = deck.pop()\n pred = model.model.predict(data_d[rand_int:rand_int + 1])\n\n if inception:\n pred = np.reshape(pred, (labels_d[0:1].shape))\n\n if (np.argmax(pred, 1) == np.argmax(labels_d[rand_int:rand_int + 1], 1)):\n sample_set.append(rand_int)\n print('Handpicked')\n else:\n sample_set = random.sample(range(0, 10000), samples)\n\n for i in sample_set:\n if targeted:\n if inception:\n seq = random.sample(range(1, 1001), target_num)\n else:\n seq = range(labels_d.shape[1])\n\n for j in seq:\n if (j == np.argmax(labels_d[start + i])) and (inception == False):\n continue\n inputs.append(data_d[start + i])\n targets.append(np.eye(labels_d.shape[1])[j])\n labels.append(labels_d[start + i])\n true_ids.append(start + i)\n else:\n inputs.append(data_d[start + i])\n targets.append(labels_d[start + i])\n labels.append(labels_d[start + i])\n true_ids.append(start + i)\n\n inputs = np.array(inputs)\n targets = np.array(targets)\n labels = np.array(labels)\n true_ids = np.array(true_ids)\n return inputs, targets, labels, true_ids",
"def shuffle(self):\n perm = self.rng.permutation(self.inputs.shape[0])\n self._current_order = self._current_order[perm]\n self.inputs = self.inputs[perm]\n self.targets = self.targets[perm]\n self.target_ids = self.target_ids[perm]",
"def preprocess_data(self):\n\n self._preprocess_train_data()\n self._preprocess_test_data()",
"def shuffle(self):\n perm = self.rng.permutation(self.inputs.shape[0])\n self._current_order = self._current_order[perm]\n self.inputs = self.inputs[perm]\n self.targets = self.targets[perm]",
"def shuffle_labels(self):\n random.shuffle(self.y_train)\n random.shuffle(self.y_test)",
"def shuffle(self):\n self.train_edges = np.random.permutation(self.train_edges)\n self.nodes = np.random.permutation(self.nodes)\n self.batch_num = 0",
"def prep_data(self):\n\n self.fit_tokenizer(texts=self.texts)\n sequences = self.get_sequences(self.texts)\n self.text_data = pad_sequences(sequences, maxlen=self.MAX_SEQUENCE_LENGTH)\n\n self.labels = to_categorical(np.asarray(self.labels))\n print('Shape of data tensor:', self.text_data.shape)\n print('Shape of label tensor:', self.labels.shape)\n\n # split the data into a training set and a validation set\n indices = np.arange(self.text_data.shape[0])\n np.random.shuffle(indices)\n self.text_data = self.text_data[indices]\n self.labels = self.labels[indices]\n nb_validation_samples = int(self.VALIDATION_SPLIT * self.text_data.shape[0])\n\n x_train = self.text_data[:-nb_validation_samples]\n y_train = self.labels[:-nb_validation_samples]\n x_val = self.text_data[-nb_validation_samples:]\n y_val = self.labels[-nb_validation_samples:]\n\n return x_train,y_train, x_val, y_val",
"def load_data_preprocess(self):\n\n print(\"Loading the dataset ...\")\n # load the data\n c_util = CarUtils()\n train_x, train_y, test_x, test_y, classes = c_util.load_data()\n\n # set the image ordering\n K.set_image_dim_ordering(\"th\")\n\n print(\"Pre-processing the dataset ...\")\n # pre-process the data\n train_x = train_x.astype('float32')\n test_x = test_x.astype('float32')\n\n train_x = train_x / 255\n test_x = test_x / 255\n\n print(train_x.shape[0], ' train samples')\n print(test_x.shape[0], ' test samples')\n\n train_y = np_utils.to_categorical(train_y, CarsClassifierModel._nb_classes)\n test_y = np_utils.to_categorical(test_y, CarsClassifierModel._nb_classes)\n\n return train_x, train_y, test_x, test_y",
"def reset(self):\n self.epochs = 0\n # Shuffle the training data\n perm = np.arange(self.num_train)\n np.random.shuffle(perm)\n assert self.num_train == self.train_images.shape[\n 0], 'Error incorrect shuffling mask'\n self.train_images = self.train_images[perm]\n self.train_labels = self.train_labels[perm]\n self.curr_train_index = 0",
"def load_data_and_labels_without_shuffled():\n # Load data from files\n with codecs.open('./data/train_pos.txt', 'r+', 'utf-8') as f:\n train_pos = f.readlines()\n with codecs.open('./data/dev_pos.txt', 'r+', 'utf-8') as f:\n dev_pos = f.readlines()\n with codecs.open('./data/train_neg.txt', 'r+', 'utf-8') as f:\n train_neg = f.readlines()\n with codecs.open('./data/dev_neg.txt', 'r+', 'utf-8') as f:\n dev_neg = f.readlines()\n\n positive_examples1 = []\n positive_examples2 = []\n negative_examples1 = []\n negative_examples2 = []\n\n for i in train_pos:\n item1, item2 = i.split('\\t')\n item1 = remove_stop_word(item1)\n item2 = remove_stop_word(item2)\n positive_examples1.append(item1)\n positive_examples2.append(item2)\n\n for i in train_neg:\n item1, item2 = i.split('\\t')\n item1 = remove_stop_word(item1)\n item2 = remove_stop_word(item2)\n negative_examples1.append(item1)\n negative_examples2.append(item2)\n\n # Split by words\n x_text_train1 = positive_examples1 + negative_examples1\n x_text_train2 = positive_examples2 + negative_examples2\n\n positive_dev1 = []\n positive_dev2 = []\n negative_dev1 = []\n negative_dev2 = []\n\n for i in dev_pos:\n item1, item2 = i.split('\\t')\n item1 = remove_stop_word(item1)\n item2 = remove_stop_word(item2)\n positive_dev1.append(item1)\n positive_dev2.append(item2)\n\n for i in dev_neg:\n item1, item2 = i.split('\\t')\n item1 = remove_stop_word(item1)\n item2 = remove_stop_word(item2)\n negative_dev1.append(item1)\n negative_dev2.append(item2)\n\n x_text_dev1 = positive_dev1 + negative_dev1\n x_text_dev2 = positive_dev2 + negative_dev2\n\n # Generate labels\n train_positive_labels = [[0, 1] for _ in train_pos]\n dev_positive_labels = [[0, 1] for _ in dev_pos]\n train_negative_labels = [[1, 0] for _ in train_neg]\n dev_negative_labels = [[1, 0] for _ in dev_neg]\n y_train = np.concatenate([train_positive_labels, train_negative_labels], 0)\n y_dev = np.concatenate([dev_positive_labels, dev_negative_labels], 0)\n\n return [x_text_train1, x_text_train2, x_text_dev1, x_text_dev2, y_train, y_dev]",
"def train_eval_rnd(self, train_data, test_data, rand_iter, train_labels, test_labels):\n np.random.seed(0)\n shuf_tr = [np.random.permutation(train_labels)\n for _ in range(rand_iter)]\n misclass_tr, misclass_ts = [], []\n for lab in shuf_tr:\n self.class_method.fit(train_data, lab)\n misclass_tr.append(zero_one_loss(lab, self.class_method.predict(train_data)))\n misclass_ts.append(zero_one_loss(test_labels,\n _kuhn_munkres_algorithm(test_labels,\n self.class_method.predict(test_data))))\n return np.mean(misclass_tr), np.mean(misclass_ts)\n # start = time.process_time()\n # np.random.seed(0)\n # shuf_tr, shuf_val = zip(*[list(map(lambda x: np.random.permutation(x),\n # [tr_labels, val_labels])) for _ in range(rand_iter)])\n # # print(f'Shuffle train:{shuf_tr}, Shuffle val:{shuf_val}')\n # # logging.info(f'Shuffle labels: {time.process_time()-start}s')\n # # part = time.process_time()\n # model_tr = [self.class_method.fit(train_data, lab) for lab in shuf_tr]\n # # print(f'Models:{model_tr}')\n # # logging.info(f'Train model: {time.process_time()-part}s')\n # # part = time.process_time()\n # misclass_tr = [zero_one_loss(x, y.predict(train_data)) for x, y in\n # zip(shuf_tr, model_tr)]\n # # print(f'Misclassification tr:{misclass_tr}')\n # # logging.info(f'Misclass TR: {time.process_time()-part}s')\n # # part = time.process_time()\n # misclass_val = [zero_one_loss(x, _kuhn_munkres_algorithm(x,\n # y.predict(val_data))) for x, y in\n # zip(shuf_val, model_tr)]\n # # print(f'Misclassification val:{misclass_val}')\n # # logging.info(f'Misclass VAL: {time.process_time()-part}s')\n # return np.mean(misclass_tr), np.mean(misclass_val)\n # misc_avg_tr, misc_avg_ts = [], []\n # append_tr = misc_avg_tr.append\n # append_ts = misc_avg_ts.append\n # for it in range(rand_iter):\n # np.random.seed(0)\n # np.random.shuffle(tr_labels)\n # np.random.seed(0)\n # np.random.shuffle(val_labels)\n # model_tr = self.class_method.fit(train_data, tr_labels)\n # misclass_tr = zero_one_loss(tr_labels,\n # self.class_method.predict(train_data))\n # misclass_ts = zero_one_loss(val_labels,\n # _kuhn_munkres_algorithm(val_labels,\n # model_tr.predict(val_data)))\n # append_tr(misclass_tr)\n # append_ts(misclass_ts)\n # return np.mean(misc_avg_tr), np.mean(misc_avg_ts)",
"def preprocess(self, data, attr):\n # If num_workers > 0, use new RNG with unique seed for each thread.\n # Else, use default RNG.\n if torch.utils.data.get_worker_info():\n seedseq = np.random.SeedSequence(\n torch.utils.data.get_worker_info().seed +\n torch.utils.data.get_worker_info().id)\n rng = np.random.default_rng(seedseq.spawn(1)[0])\n else:\n rng = self.rng\n\n points = np.array(data['point'], dtype=np.float32)\n\n if 'label' not in data or data['label'] is None:\n labels = np.zeros((points.shape[0],), dtype=np.int32)\n else:\n labels = np.array(data['label'], dtype=np.int32).reshape((-1,))\n\n if 'feat' not in data or data['feat'] is None:\n feat = points.copy()\n else:\n feat = np.array(data['feat'], dtype=np.float32)\n\n if attr['split'] in ['training', 'train']:\n points, feat, labels = self.augmenter.augment(\n points, feat, labels, self.cfg.get('augment', None))\n\n points -= np.min(points, 0)\n\n feat = feat / 255.0 # Normalize to [0, 1]\n\n max_points_x = np.max(points[:, 0])\n max_points_y = np.max(points[:, 1])\n max_points_z = np.max(points[:, 2])\n\n x, y, z = np.split(points, (1, 2), axis=-1)\n norm_x = x / max_points_x\n norm_y = y / max_points_y\n norm_z = z / max_points_z\n\n feat = np.concatenate([x, y, z, feat, norm_x, norm_y, norm_z], axis=-1)\n\n choices = rng.choice(points.shape[0],\n self.cfg.num_points,\n replace=(points.shape[0] < self.cfg.num_points))\n points = points[choices].transpose()\n feat = feat[choices].transpose()\n labels = labels[choices]\n\n data = {}\n data['point'] = points\n data['feat'] = feat\n data['label'] = labels\n\n return data",
"def bootstrap_data(self):\n for i in range(self.bootstraps):\n df_i = self.training_df.groupby(\n self.random_effect, group_keys=False\n ).apply(\n lambda x: x.sample(len(x), replace=True)\n )\n self.models.append(self.convert(df=df_i))",
"def shuffle(self):\n self.x['train'], self.y['train'] = self._shuffle(\n self.x['train'],\n self.y['train']\n )",
"def _shuffle(self, reinit_indexes = False):\n print('Shuffling data...')\n # set seed for reproducibility\n #random.seed()\n # shuffle identities\n random.shuffle(self.identities)\n # shuffle images associated to each identity\n for identity in self.groundtruth_metadata.keys():\n random.shuffle(self.groundtruth_metadata[identity]['metadata'])\n if reinit_indexes:\n self.groundtruth_metadata[identity]['index'] = 0\n print('Finished shuffling data!')",
"def prepare_data(self):\n data = self._get_dataset(self.hparams.dataset_path)\n label_encoder = data[\"label_encoder\"]\n del data[\"label_encoder\"]\n\n click.secho(\"Building inputs and labels.\", fg=\"yellow\")\n datasets = {\n \"train\": defaultdict(list),\n \"valid\": defaultdict(list),\n \"test\": defaultdict(list),\n }\n for dataset_name, dataset in data.items():\n for sample in dataset:\n instance = self.build_input(\n self.tokenizer, sample[\"text\"], label_encoder, sample[\"label\"]\n )\n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n\n click.secho(\"Padding inputs and building tensors.\", fg=\"yellow\")\n tensor_datasets = {\"train\": [], \"valid\": [], \"test\": []}\n for dataset_name, dataset in datasets.items():\n dataset = self.pad_dataset(dataset, padding=self.tokenizer.pad_index)\n for input_name in MODEL_INPUTS:\n if input_name == \"labels\":\n tensor = torch.tensor(dataset[input_name], dtype=torch.float32)\n else:\n tensor = torch.tensor(dataset[input_name])\n tensor_datasets[dataset_name].append(tensor)\n\n self.train_dataset = TensorDataset(*tensor_datasets[\"train\"])\n self.valid_dataset = TensorDataset(*tensor_datasets[\"valid\"])\n self.test_dataset = TensorDataset(*tensor_datasets[\"test\"])\n click.secho(\n \"Train dataset (Batch, Candidates, Seq length): {}\".format(\n self.train_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Valid dataset (Batch, Candidates, Seq length): {}\".format(\n self.valid_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Test dataset (Batch, Candidates, Seq length): {}\".format(\n self.test_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )",
"def shuffle_datasets(self):\n assert self.data_tags is not None\n assert self.training_dataset is not None\n assert self.validation_dataset is not None\n self.training_dataset = self.shuffle_data_dictionary(self.training_dataset)\n self.validation_dataset = self.shuffle_data_dictionary(self.validation_dataset)",
"def train_model(self, data:List[np.ndarray]):\n d = np.vstack(data)\n np.random.shuffle(d)\n self.regressor.fit(\n X=self.input(d),\n y=self.output(d)\n )",
"def preprocess_data():\n le = preprocessing.LabelEncoder()\n # Reshape and normalize pixel values to be between 0 and 1\n train_images_reshaped = train_images.reshape(len(train_images), 1024, 1024, 1)/255.\n test_images_reshaped = test_images.reshape(len(test_images), 1024, 1024, 1)/255.\n\n return train_images_reshaped, test_images_reshaped, le.fit_transform(train_labels), le.fit_transform(test_labels)",
"def reset_training_data(self):\n logger.info(\"resetting training data\")\n if self.shuffle:\n random.shuffle(self.tweets)\n self.batch_generator = self.get_batch()",
"def preprocessing_pipeline(self):\n self.__multilabel_processing()\n self.__split_dataset()\n self.__save_datasets()"
] |
[
"0.71113944",
"0.70312476",
"0.68610984",
"0.6799098",
"0.67947114",
"0.67875975",
"0.67104065",
"0.669792",
"0.669792",
"0.66785854",
"0.662347",
"0.6609762",
"0.65880877",
"0.6575587",
"0.65448827",
"0.64898324",
"0.6479875",
"0.6458347",
"0.6436497",
"0.63990355",
"0.6395446",
"0.6347589",
"0.63422006",
"0.6317215",
"0.630786",
"0.630036",
"0.6294125",
"0.62822676",
"0.6274099",
"0.6269104"
] |
0.7546509
|
0
|
Function to train the inputted model on the provided training states and actions
|
def train(model, train_states, train_actions, batch_size):
# randomly shuffile input data to increase accuracy
shuffled_states, shuffled_actions = random_preprocessing(train_states, train_actions)
tl = train_states.shape[0] # training inputs length
mod = tl % batch_size # number of batches resulting from batch size
N = int(np.floor(tl/batch_size)) # number for splitting training data
split_details = [batch_size] * N # N elements of batch_size [batch_size, batch_size, ...]
split_details.append(mod)
shuffled_states = tf.split(shuffled_states, split_details)
shuffled_actions = tf.split(shuffled_actions, split_details)
for i in range(len(shuffled_states)):
# Implement backprop:
model.train_actor(shuffled_states[i], shuffled_actions[i])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def train_models(self, states, actions, rewards, done):\n # Compute discounted rewards and Advantage (TD. Error)\n discounted_rewards = self.discount(rewards, done, states[-1])\n state_values = self.critic.predict(np.array(states))\n advantages = discounted_rewards - np.reshape(state_values, len(state_values))\n # Networks optimization\n self.a_opt([states, actions, advantages])\n self.c_opt([states, discounted_rewards])",
"def train_models(self, states, actions, rewards):\n # Compute discounted rewards and Advantage (TD. Error)\n discounted_rewards = self.discount(rewards)\n state_values = self.critic.predict(np.array(states))\n advantages = discounted_rewards - np.reshape(state_values, len(state_values))\n # Networks optimization\n self.act_op([states, actions, advantages])\n self.cri_op([states, discounted_rewards])",
"def train_step(self, states, actions, next_states, reward=0):\n ### PROBLEM 1\n ### YOUR CODE HERE\n # raise NotImplementedError\n _, loss, _, _ = self._sess.run([self._optimizer, self._loss, self._reward_optimizer, self._reward_loss],\n feed_dict={self._state_ph: np.atleast_2d(states),\n self._action_ph: np.atleast_2d(actions),\n self._next_state_ph: np.atleast_2d(next_states),\n self._reward_ph: np.atleast_2d(reward)})\n return loss",
"def train(self):\n\n agent_step = self._num_actions_taken\n\n if agent_step >= self._train_after:\n if (agent_step % self._train_interval) == 0:\n pre_states, actions, post_states, rewards, terminals = self._memory.minibatch(self._minibatch_size)\n\n self._trainer.train_minibatch(\n self._trainer.loss_function.argument_map(\n pre_states=pre_states,\n actions=Value.one_hot(actions.reshape(-1, 1).tolist(), self.nb_actions),\n post_states=post_states,\n rewards=rewards,\n terminals=terminals\n )\n )\n\n # Update the Target Network if needed\n if (agent_step % self._target_update_interval) == 0:\n self._target_net = self._action_value_net.clone(CloneMethod.freeze)\n filename = \"models\\model%d\" % agent_step\n self._trainer.save_checkpoint(filename)",
"def train_step(_states, _actions, _rewards):\n _cumulative_rewards = get_cumulative_rewards(_rewards)\n update.run({states:_states, actions: _actions, cumulative_rewards:_cumulative_rewards})",
"def train(self, features, actions, rewards):\n self.model.fit(x=features, y=actions, sample_weight=rewards,\n verbose=False)",
"def update_model(self, states, actions):\n self.learner.add_to_data(states, actions)\n self.learner.train_learner()\n self.iterations += 1",
"def call(self, inputs, states, training):\n raise NotImplementedError(\"Please implement this method\")",
"def train_network(self):\n batch = self.memory.sample(self.batch_size)\n inputs = np.array([b[\"state\"] for b in batch]) #####\n actions = np.array([b[\"action\"] for b in batch])\n rewards = np.array([b[\"reward\"] for b in batch])\n next_inputs = np.array([b[\"next_state\"] for b in batch])\n\n actions_one_hot = np.eye(self.action_space_size)[actions]\n\n next_qvalues = np.squeeze(self.target_network.model(next_inputs))\n targets = rewards + self.discount * np.amax(next_qvalues, axis=-1)\n\n self.online_network.train_step(inputs, targets, actions_one_hot)",
"def train_model(self, *args, **kwargs):\n self.model.train(self.training, *args, **kwargs)",
"def train(self, states, actions, rewards, discount, lrn_rate):\n for i in range(len(rewards)):\n ret = self._discountedRet(rewards[i:], discount)\n dis = discount**(i)\n feed_dict = {self.state: states[i], self.action: actions[i], self.dis_pow_t: dis,\n self.ret: ret, self.lrn_rate: lrn_rate}\n self._sess.run(self._train, feed_dict)",
"def train_step(self, Xs, ys, actions):\n\n loss, _, prediction_probs, q_values = self.session.run(\n [self.loss, self.train_op, self.predictions, self.q_vals],\n feed_dict={self.input_placeholder: Xs,\n self.labels_placeholder: ys,\n self.actions_placeholder: actions\n })",
"def train(self):\n\n if(self.net.killAll):\n self._kill()\n\n empty = False\n state = []\n actions = []\n rewards = []\n while(not empty):\n example = self.globalQueue.get()\n \n for prevState, action, reward in zip(example['prevStates'], example['actions'],example['rewards']):\n state.append(np.array(prevState).reshape(-1,84,84,4))\n actions.append(np.eye(self.actionSpace)[np.array(action)].reshape(-1,self.actionSpace).astype(np.float32))\n rewards.append(np.array(reward).reshape(-1))\n empty = self.globalQueue.empty()\n \n if(len(rewards) != 0 ):\n states = np.array(state).reshape(-1, 84,84,4)\n actions = np.array(actions).reshape(-1,self.actionSpace)\n rewards = np.array(rewards).reshape(-1)\n self.net.train(states, rewards, actions)",
"def train_model(self, *args, **kwargs):\n raise NotImplementedError",
"def train(self, input_fn, steps):\n self._estimator.train(input_fn=input_fn, max_steps=steps)",
"def train():\n pass",
"def train(self, *args, **kwargs):\n raise NotImplementedError",
"def train(self, state, action, reward, next_state, done):\n self._train(state, action, reward, next_state, done)\n # store the new data into a long term memory\n self.remember(state, action, reward, next_state, done)",
"def train_step(input, target, model, loss_fn, optimizer, **unused):\r\n model.train()\r\n output = model(input)\r\n loss = loss_fn(output, target)\r\n optimizer.backward(loss)\r\n optimizer.step()",
"def train(self):\n self.emission_model(self.train_data)\n self.transition_model(self.train_data)",
"def train(opts):\n # Set number of actions\n opts.A = opts.delta_M * opts.delta_N\n # Set random seeds\n set_random_seeds(opts.seed)\n # Create actions mapping\n count_act = 0\n opts.act_to_delta = {}\n opts.delta_to_act = {}\n for i in range(-(opts.delta_N//2), opts.delta_N//2+1):\n for j in range(-(opts.delta_M//2), opts.delta_M//2+1):\n opts.act_to_delta[count_act] = (i, j)\n opts.delta_to_act[(i, j)] = count_act\n count_act += 1\n\n if opts.expert_rewards:\n from data_loader import DataLoaderExpert as DataLoader\n elif opts.expert_trajectories or opts.actorType == 'demo_sidekick' or opts.actorType == 'peek_saliency':\n from data_loader import DataLoaderExpertPolicy as DataLoader\n else:\n from data_loader import DataLoaderSimple as DataLoader\n\n if opts.dataset == 0:\n opts.num_channels = 3\n if opts.mean_subtract:\n # R, G, B means and stds\n opts.mean = [119.16, 107.68, 95.12]\n opts.std = [61.88, 61.72, 67.24]\n else:\n opts.mean = [0, 0, 0]\n opts.std = [1, 1, 1]\n elif opts.dataset == 1:\n opts.num_channels = 1\n if opts.mean_subtract:\n # R, G, B means and stds\n opts.mean = [193.0162338615919]\n opts.std = [37.716024486312811]\n else:\n opts.mean = [0]\n opts.std = [1]\n else:\n raise ValueError('Dataset %d does not exist!'%(opts.dataset))\n\n if opts.expert_trajectories:\n opts.T_sup = opts.T-1\n loader = DataLoader(opts)\n if opts.expert_trajectories:\n agent = AgentSupervised(opts)\n else:\n agent = Agent(opts)\n\n # Create tensorboard writer\n writer = SummaryWriter(log_dir=opts.save_path)\n # Set networks to train\n agent.policy.train()\n # Initiate statistics storage variables\n if opts.load_model == '': \n best_val_error = 100000\n train_history = []\n val_history = []\n epoch_start = 0\n else:\n best_val_error, train_history, val_history, epoch_start = load_module(agent, opts)\n\n # To handle job eviction and restarts\n if os.path.isfile(os.path.join(opts.save_path, 'model_latest.net')):\n print('====> Resuming training from previous checkpoint')\n # undo most of the loading done before\n loaded_model = torch.load(os.path.join(opts.save_path, 'model_latest.net'))\n opts = loaded_model['opts']\n epoch_start = loaded_model['epoch'] + 1\n\n loader = DataLoader(opts)\n if opts.expert_trajectories:\n agent = AgentSupervised(opts)\n agent.T_sup = loaded_model['T_sup']\n else:\n agent = Agent(opts) \n\n agent.policy.load_state_dict(loaded_model['state_dict'])\n train_history = loaded_model['train_history']\n val_history = loaded_model['val_history']\n #agent.optimizer.load_state_dict(loaded_model['optimizer'])\n best_val_error = loaded_model['best_val_error']\n\n # Some random selection of images to display\n rng_choices = random.sample(range(300//opts.batch_size), 2) \n # Start training\n for epoch in range(epoch_start, opts.epochs):\n # Initialize epoch specific variables\n depleted = False\n train_err = 0\n train_count = 0\n iter_count = 0\n avg_colln_loss = 0\n\n while not depleted:\n # pano - BxNxMxCx32x32\n if opts.expert_rewards:\n pano, pano_rewards, depleted = loader.next_batch('train')\n pano_maps = None\n elif opts.expert_trajectories or opts.actorType == 'demo_sidekick' or opts.actorType == 'peek_saliency':\n pano, pano_maps, depleted = loader.next_batch('train')\n pano_rewards = None\n else:\n pano, depleted = loader.next_batch('train')\n pano_rewards = None\n pano_maps = None\n # Note: This batch size is the current batch size, not the global batch size. This varies\n\n # when you reach the boundary of the dataset.\n batch_size = pano.shape[0]\n start_idx = get_starts(opts.N, opts.M, batch_size, opts.start_view)\n state = State(pano, pano_rewards, start_idx, opts)\n if opts.expert_trajectories:\n if opts.hybrid_train:\n rec_errs = agent.train_agent_hybrid(state, pano_maps, opts)\n else:\n rec_errs = agent.train_agent(state, pano_maps, opts)\n else:\n # Forward pass\n log_probs, rec_errs, rewards, entropies, decoded, values,\\\n visited_idxes, decoded_all, _ = agent.gather_trajectory(state, eval_opts=None, pano_maps=pano_maps, opts=opts)\n # Backward pass\n agent.update_policy(rewards, log_probs, rec_errs, entropies, values) \n\n # Accumulate statistics\n train_err += rec_errs[-1].data.sum()\n train_count += batch_size\n iter_count += 1\n\n train_err /= train_count\n\n # Evaluate the agent after every epoch\n val_err, _, _, decoded_images = evaluate(loader, agent, 'val', opts)\n\n # Write out statistics to tensorboard\n writer.add_scalar('data/train_error', train_err, epoch+1)\n writer.add_scalar('data/val_error', val_err, epoch+1)\n\n # Write out models and other statistics to torch format file\n train_history.append([epoch, train_err])\n val_history.append([epoch, val_err])\n if best_val_error > val_err:\n best_val_error = val_err\n save_state = {\n 'epoch': epoch,\n 'state_dict': agent.policy.state_dict(),\n 'optimizer': agent.optimizer.state_dict(),\n 'opts': opts, \n 'best_val_error': best_val_error,\n 'train_history': train_history,\n 'val_history': val_history\n }\n if opts.expert_trajectories:\n save_state['T_sup'] = agent.T_sup\n\n torch.save(save_state, os.path.join(opts.save_path, 'model_best.net'))\n\n save_state = {\n 'epoch': epoch,\n 'state_dict': agent.policy.state_dict(),\n 'optimizer': agent.optimizer.state_dict(),\n 'opts': opts, \n 'best_val_error': best_val_error,\n 'train_history': train_history,\n 'val_history': val_history\n }\n if opts.expert_trajectories:\n save_state['T_sup'] = agent.T_sup\n torch.save(save_state, os.path.join(opts.save_path, 'model_latest.net'))\n\n print('Epoch %d : Train loss: %9.6f Val loss: %9.6f'%(epoch+1, train_err, val_err))\n\n # Reduce supervision gradually\n if opts.expert_trajectories and opts.hybrid_train:\n if (epoch+1) % opts.hybrid_schedule == 0 and agent.T_sup > 0:\n agent.T_sup -= 1\n # Save the model after the first schedule is over\n if epoch+1 == opts.hybrid_schedule:\n torch.save(save_state, os.path.join(opts.save_path, 'model_after_one_schedule.net'))\n\n # Decay expert reward gradually\n if opts.expert_rewards and (epoch+1) % opts.expert_rewards_decay == 0:\n agent.reward_scale_expert /= opts.expert_rewards_decay_factor\n\n # Display three randomly selected batches of panoramas every 10 epochs\n if (epoch+1) % 10 == 0 or epoch == 0:\n for choice in rng_choices:\n for pano_count in range(decoded_images[choice].size(0)):\n x = vutils.make_grid(decoded_images[choice][pano_count], padding=5, normalize=True, scale_each=True, nrow=opts.T//2+1) \n writer.add_image('Validation batch # : %d image # : %d'%(choice, pano_count), x, 0) # Converting this to 0 to save disk space, should be epoch ideally",
"def trainModel( self, featureTrain, classTrain):",
"def train(self, training_steps=10):",
"def train_step(\n model,\n rng,\n state,\n batch,\n alpha_fn_dict,\n learning_rate_fn,\n weight_decay,\n metric_collector,\n):\n logging.info(\"train_step(batch=%s)\", batch)\n\n step = state.step + 1\n lr = learning_rate_fn(step)\n alpha_dict = jax.tree_map(lambda fn: fn(step), alpha_fn_dict)\n\n def loss_fn(params):\n variables = {\"params\": params}\n out = model.apply(variables, batch)\n\n # ------------------------------------------------------------------------\n # Compute the loss.\n pred_loss, stat_dict = out.compute_total_loss(batch, alpha_dict)\n\n # ------------------------------------------------------------------------\n # Weight Regularization\n weight_penalty_params = jax.tree_util.tree_leaves(variables[\"params\"])\n weight_l2 = sum(\n [jnp.sum(x**2) for x in weight_penalty_params if x.ndim > 1]\n )\n weight_penalty = weight_decay * 0.5 * weight_l2\n # ------------------------------------------------------------------------\n\n total_loss = pred_loss + weight_penalty\n stat_dict[\"weight_l2\"] = weight_l2\n\n return total_loss, stat_dict\n\n # ------------------------------------------------------------------------\n # Compute graidents\n grad_fn = jax.value_and_grad(loss_fn, has_aux=True)\n (loss, stat_dict), grad = grad_fn(state.params)\n\n # Compute average gradient across multiple workers.\n grad = jax.lax.pmean(grad, axis_name=\"batch\")\n\n # ------------------------------------------------------------------------\n # Update States\n new_state = state.apply_gradients(grads=grad)\n\n metrics_update = metric_collector.gather_from_model_output(\n total_loss=loss,\n learning_rate=lr,\n **stat_dict,\n )\n return new_state, metrics_update, rng",
"def train():\n # YOUR TRAINING CODE GOES HERE",
"def train_episode(self):\n state = self.env.reset()\n states = []\n actions = []\n rewards = []\n for _ in range(self.options.steps):\n probs = self.actor_baseline.predict([[state]])[0][0]\n action = np.random.choice(len(probs), p=probs)\n\n next_state, reward, done, _ = self.step(action)\n states.append(state)\n actions.append(action)\n rewards.append(reward)\n\n state = next_state\n\n if done:\n break\n\n # Compute and store returns in G\n G = np.zeros_like(rewards)\n ################################\n # YOUR IMPLEMENTATION HERE #\n ################################\n\n # One-hot encoding for actions\n actions_one_hot = np.zeros([len(actions), self.env.action_space.n])\n actions_one_hot[np.arange(len(actions)), actions] = 1\n\n # Compute one-hot encoded deltas\n ################################\n # YOUR IMPLEMENTATION HERE #\n ################################\n deltas = [[0]]\n\n # Update actor and state estimator\n self.actor_baseline.fit(x=[np.array(states)],\n y={'actor_output': deltas, 'baseline_output': returns},\n epochs=1, batch_size=self.options.batch_size, verbose=0)",
"def train(model, train_inputs, train_labels):\n print('Train starts: \\n')\n indices = tf.range(0, train_inputs.shape[0])\n indices = tf.random.shuffle(indices)\n train_inputs = tf.gather(train_inputs, indices)\n train_labels = tf.gather(train_labels, indices)\n\n N = train_inputs.shape[0] // model.batch_size\n for batch in range(N):\n start = batch * model.batch_size\n end = (batch + 1) * model.batch_size\n if (batch + 1) * model.batch_size > train_inputs.shape[0]:\n end = train_inputs.shape[0]\n inputs = train_inputs[start: end]\n labels = train_labels[start: end]\n\n with tf.GradientTape() as tape:\n probs = model.call(inputs)\n loss = model.loss_function(probs, labels)\n\n gradients = tape.gradient(loss, model.trainable_variables)\n model.optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n # if batch % 100 == 0:\n # print('Batch {} starts:'.format(batch))\n print('\\r', \"training processing : {} %\".format((batch + 1) * 100 // N), end='')",
"def train_step(self, experiences, gamma):\n states = experiences['states']\n rewards = experiences['rewards']\n actions = experiences['actions']\n next_states = experiences['next_states']\n dones = experiences['dones']\n q_values = self.main_network(states).gather(1, actions.view(-1, 1)).squeeze()\n\n # Get max predicted Q values (for next states) from target model\n next_q_values = self.target_network(next_states).detach().max(1)[0]\n\n # Compute Q targets for current states\n expected_q_value = rewards + (gamma * next_q_values * (1 - dones))\n\n # Compute loss\n loss = F.mse_loss(q_values, expected_q_value)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # Update the target network\n self.soft_update(self.main_network, self.target_network, TAU)",
"def train(args):\n # prepare environment\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n env_info = env.reset(train_mode=True)[brain_name]\n\n num_agents = len(env_info.agents)\n print('Number of agents:', num_agents)\n\n # size of each action\n action_size = brain.vector_action_space_size\n print('Size of each action:', action_size)\n\n # examine the state space\n states = env_info.vector_observations\n state_size = states.shape[1]\n print('There are {} agents. Each observes a state with length: {}'.format(\n states.shape[0], state_size))\n print('The state for the first agent looks like:', states[0])\n\n # Crate instance of MADDPG Class, mainly possible to control the model dimensions, learnrates and batch sizes\n agent = MADDPG(state_size,\n action_size,\n lr_actor=args.lr_actor,\n lr_critic=args.lr_critic,\n lr_decay=args.lr_decay,\n replay_buff_size=args.replay_buff_size,\n gamma=args.gamma,\n batch_size=args.batch_size,\n random_seed=args.random_seed,\n soft_update_tau=args.soft_update_tau,\n actor_layer_dim_1=args.actor_layer_dim_1,\n actor_layer_dim_2=args.actor_layer_dim_2,\n actor_layer_dim_3=args.actor_layer_dim_3,\n critic_layer_dim_1=args.critic_layer_dim_1,\n critic_layer_dim_2=args.critic_layer_dim_2,\n critic_layer_dim_3=args.critic_layer_dim_3\n\n )\n\n total_rewards = []\n avg_scores = []\n max_avg_score = -1\n max_score = -1\n threshold_init = 20\n noise_t = args.epsilon\n noise_decay = args.epsilon_decay\n latest_avg_score = -1\n # for early-stopping training if consistently worsen for # episodes\n worsen_tolerance = threshold_init\n for i_episode in range(1, 1+args.num_episodes):\n\n env_inst = env.reset(train_mode=True)[\n brain_name] # reset the environment\n states = env_inst.vector_observations # get the current state\n # initialize score array\n scores = np.zeros(num_agents)\n dones = [False]*num_agents\n while not np.any(dones):\n # select an action\n actions = agent.act(states, noise_t)\n # send the action to the environment\n env_inst = env.step(actions)[brain_name]\n next_states = env_inst.vector_observations # get the next state\n rewards = env_inst.rewards # get the reward\n dones = env_inst.local_done # see if episode has finished\n agent.update(states, actions, rewards, next_states, dones)\n\n noise_t *= noise_decay\n scores += rewards # update scores\n states = next_states\n\n episode_score = np.max(scores)\n total_rewards.append(episode_score)\n print(\"\\rEpisodic {} Score: {:.4f}\\t Avg Score: {:.4f}\".format(\n i_episode, episode_score, latest_avg_score), end=' ')\n\n if max_score <= episode_score:\n max_score = episode_score\n # save best model so far\n agent.save(\n \"chkpts/{}/{:02d}_best_model.checkpoint\".format(args.model_path, args.loop_counter))\n\n # record avg score for the latest 100 steps\n if len(total_rewards) >= args.test_n_run:\n latest_avg_score = sum(\n total_rewards[(len(total_rewards)-args.test_n_run):]) / args.test_n_run\n avg_scores.append(latest_avg_score)\n\n if max_avg_score <= latest_avg_score: # record better results\n worsen_tolerance = threshold_init # re-count tolerance\n max_avg_score = latest_avg_score\n else:\n if max_avg_score > 0.5:\n worsen_tolerance -= 1 # count worsening counts\n print(\"Loaded from last best model.\")\n # continue from last best-model\n agent.load(\n \"chkpts/{}/{:02d}_best_model.checkpoint\".format(args.model_path, args.loop_counter))\n if worsen_tolerance <= 0: # earliy stop training\n print(\"Early Stop Training.\")\n break\n del agent\n return total_rewards",
"def train(self, X_t_, W_previous_, pf_value_previous_, dailyReturn_t_):\n self.sess.run(self.train_op, feed_dict={self.X_t: X_t_,\n self.W_previous: W_previous_,\n self.pf_value_previous: pf_value_previous_,\n self.dailyReturn_t: dailyReturn_t_})"
] |
[
"0.72539365",
"0.72391033",
"0.7229092",
"0.71589714",
"0.6982884",
"0.6979589",
"0.69229835",
"0.684992",
"0.6848814",
"0.68216443",
"0.6794802",
"0.6790298",
"0.67815095",
"0.6685336",
"0.66696656",
"0.65886027",
"0.6585921",
"0.6581424",
"0.6577335",
"0.6570257",
"0.6564928",
"0.6560603",
"0.6552285",
"0.6538661",
"0.65161014",
"0.6502324",
"0.6493634",
"0.6475922",
"0.6460129",
"0.64520633"
] |
0.77824277
|
0
|
r""" Using parse_frequency_support, determine the mean primary beam size in each observed band
|
def approximate_primary_beam_sizes(frequency_support_str,
dish_diameter=12 * u.m, first_null=1.220):
freq_ranges = parse_frequency_support(frequency_support_str)
beam_sizes = [(first_null * fr.mean().to(u.m, u.spectral())
/ (dish_diameter)).to(u.arcsec, u.dimensionless_angles())
for fr in freq_ranges]
return u.Quantity(beam_sizes)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def DSS28_beamwidth(freq):\n return 0.54/freq",
"def meshsize_avg(self):\n nspans = self.numspans\n support = abs(self.kv[-1] - self.kv[0])\n return support / nspans",
"def get_average(self, s_freq, e_freq):\n s_ind = self.get_bin(s_freq)\n e_ind = self.get_bin(e_freq)\n lst = self.mags[s_ind:e_ind+1]\n try:\n avg = sum(lst)/len(lst)\n except:\n print(s_ind, e_ind)\n print('werid stuff')\n avg = 0\n return avg",
"def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - (self.gain - self.fr[fc_ix + n - 1:fc_ix - 1:-1])))",
"def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - self.fr[fc_ix + n - 1:fc_ix - 1:-1]))",
"def cal_ResBeam_Stats(infile, header_bmaj, header_bmin):\n\n beamlog_file = np.loadtxt(infile)\n bmaj = beamlog_file[:,1]\n bmin = beamlog_file[:,2]\n ind_nonzero_bmaj = np.nonzero(bmaj) # finding array indices of nonzero values\n ind_nonzero_bmin = np.nonzero(bmin)\n total_nbmaj = np.count_nonzero(bmaj) # count total number of bmaj non zero occurance\n total_nbmin = np.count_nonzero(bmin)\n bmaj_variance = (np.sum((bmaj[ind_nonzero_bmaj]-header_bmaj)**2.0))/total_nbmaj # using header beam value as mean \n bmin_variance = (np.sum((bmin[ind_nonzero_bmin]-header_bmin)**2.0))/total_nbmin\n bmaj_stdev = np.sqrt(bmaj_variance)\n bmin_stdev = np.sqrt(bmin_variance)\n beam_threshold = round((((header_bmaj + bmaj_stdev) * (header_bmin + bmin_stdev))/ (header_bmaj*header_bmin))-1.0, 4)\n bmaj_max = np.max(bmaj[ind_nonzero_bmaj])\n bmaj_min = np.min(bmaj[ind_nonzero_bmaj])\n bmin_max = np.max(bmin[ind_nonzero_bmin])\n bmin_min = np.min(bmin[ind_nonzero_bmin])\n max_ratio_beam_area = (bmaj_max*bmin_max)/(header_bmaj*header_bmin) # measured beam area / header beam area\n min_ratio_beam_area = (bmaj_min*bmin_min)/(header_bmaj*header_bmin)\n\n return bmaj_stdev, bmin_stdev, beam_threshold, max_ratio_beam_area, min_ratio_beam_area",
"def avgLength(afz, output=True):\n count = 0\n length = 0\n for msg in msgs:\n if msg.afz == afz:\n count = count + 1\n length = length + len(msg.msg)\n avg = length/count\n if output:\n print afz, 'heeft', count, 'berichten verzonden met een gemiddelde lengte van', avg, 'tekens.'\n return avg\n else:\n return avg",
"def _beam(self):\n\n return self._beam_factory.simple(self.detectorbase.wavelength)",
"def checkfrequency(inputgiven):\n data_size = 40000\n wav_file = wave.open(inputgiven, 'r')\n data = wav_file.readframes(data_size)\n wav_file.close()\n data = struct.unpack('{n}h'.format(n=data_size), data)\n print max(data)",
"def pd0_counts(self, pd0_beam_num: int):\r\n\r\n # Vertical Beam ONLY\r\n if self.element_multiplier == 1:\r\n beam0 = [v[0] for v in self.Amplitude] # Beam 0\r\n return [round(v * 2.0) for v in beam0] # Convert to counts\r\n\r\n if pd0_beam_num == 0 and pd0_beam_num <= self.element_multiplier:\r\n beam2 = [v[2] for v in self.Amplitude] # PD0 0 - RTB 2\r\n return [round(v * 2.0) for v in beam2] # Convert to counts\r\n\r\n if pd0_beam_num == 1 and pd0_beam_num <= self.element_multiplier:\r\n beam3 = [v[3] for v in self.Amplitude] # PD0 1 - RTB 3\r\n return [round(v * 2.0) for v in beam3] # Convert to counts\r\n\r\n if pd0_beam_num == 2 and pd0_beam_num <= self.element_multiplier:\r\n beam1 = [v[1] for v in self.Amplitude] # PD0 2 - RTB 1\r\n return [round(v * 2.0) for v in beam1] # Convert to counts\r\n\r\n if pd0_beam_num == 3 and pd0_beam_num <= self.element_multiplier:\r\n beam0 = [v[0] for v in self.Amplitude] # PD0 3 - RTB 0\r\n return [round(v * 2.0) for v in beam0] # Convert to counts\r\n\r\n return None",
"def main_beam_eff(beam=1, ZA=0, frequency=1400.): # apply the frequency at 1400\n D = 300 # m\n n_R = 1.0\n lam = 299792458./(1e6*frequency)\n theta = beam_size(beam=beam,frequency=frequency)/60. * np.pi/180.\n ape_eff = aperture_eff(beam=beam, ZA=ZA, frequency=frequency) \n mb_eff = 0.8899 * ape_eff / n_R * theta**2 * D**2 / lam**2\n\n return mb_eff",
"def cal_samples(self):\n max_omega = max(\n abs(2 * np.pi * self.u.fundamental),\n abs(2 * np.pi * self.v.fundamental),\n abs(2 * np.pi * self.w.fundamental),\n )\n max_freq = max_omega / (2 * np.pi)\n self.fake_samples_number = (\n (max_freq ** 2) * 6 * self.u.data.shape[0] / self.u.sampling_rate\n )",
"def avg_amplitude_spectro(Sxx_ampli):\r\n\r\n # average the amplitude spectrogram taking the PSD for energy conservation\r\n S_ampli_mean = np.sqrt(np.mean(Sxx_ampli**2, axis=1))\r\n\r\n return S_ampli_mean",
"def cf_mean(self):\n net_power = self['annual_field_energy'] \\\n - self['annual_thermal_consumption'] # kW-hr\n # q_pb_des is in MW, convert to kW-hr\n name_plate = self['q_pb_des'] * 8760 * 1000\n\n return net_power / name_plate",
"def fm_estimate(self):\n means = []\n for start in range(0, self.est_num, self.group_size):\n end = start + self.group_size\n means.append(_mean(self.fm_estimates[start:end]))\n return int(2 ** _median(means))",
"def cf_mean(self):\n net_power = self['annual_gross_energy'] \\\n - self['annual_thermal_consumption'] # kW-hr\n # q_pb_des is in MW, convert to kW-hr\n name_plate = self['q_pb_design'] * 8760 * 1000\n\n return net_power / name_plate",
"def average_word_length(self):\n len_words_only = [len(s) if s.isalpha() else 0 for s in self.text]\n if (len_words_only == 0):\n print('Input file contains no words.')\n return 0, 0, 0\n else:\n return sum(len_words_only) / len(len_words_only), median(len_words_only), mode(len_words_only)",
"def get_sampwidth(self):\n return self._sampwidth",
"def mean(self):\r\n\t\treturn sum(self.sample)/len(self.sample)",
"def avg_field_length(self, searcher, fieldnum):\n return searcher.field_length(fieldnum) / searcher.doc_count_all()",
"def get_scale_freq():\n return sf / 2 / (num_freq-1)",
"def __len__(self):\n return sum(self.size_freqs.values())",
"def get_effect_size(self):\n\n # search the xls file\n f = glob.glob(os.path.join(self.output, '*_peaks.xls'))\n if not os.path.exists(f[0]):\n raise ValueError('file missing in macs2 callpeak output: %s' % f)\n\n # top\n topN = 100\n counter = 0\n dep = {}\n # ip_depth = ip_scale = input_depth = input_scale = 0\n with open(f[0], 'rt') as fi:\n for line in fi:\n if not line.startswith('#'): \n continue\n if counter > 100: # nrows\n break # stop\n num = line.strip().split()[-1]\n if 'tags after filtering in treatment' in line:\n dep['ip_depth'] = num\n if 'tags in treatment' in line:\n s = 1e6 / int(num)\n dep['ip_scale'] = '%.6f' % s\n if 'tags after filtering in control' in line:\n dep['input_depth'] = num\n if 'tags in control' in line:\n s = 1e6 / int(num)\n dep['input_scale'] = '%.6f' % s\n counter += 1\n\n return dep",
"def freq_optimization(self):\n index = identify_scale(self.vz, True)\n # In case the patient is limping\n if index > 35:\n index = index / 2\n print(f\"Scale used is {index}\")",
"def meanContigLength(self):\n\t\tstats = self.scores()\n\t\treturn stats['meanContig']",
"def narrowIncandPeakInfo(self):\r\n\t\tself.narrowIncandBaseline = (np.mean(self.narrowBandIncandData[0:10]))\r\n\t\t\t\t\r\n\t\traw_narrowIncand_max = np.amax(self.narrowBandIncandData)\r\n\t\tnarrowIncand_max = raw_narrowIncand_max - self.narrowIncandBaseline\t\t\r\n\t\tnarrowIncand_max_index = np.argmax(self.narrowBandIncandData)\r\n\t\t\r\n\t\tself.narrowIncandMax =narrowIncand_max\r\n\t\tself.narrowIncandMaxPos = narrowIncand_max_index",
"def cal_beam_AvgRMS(infile):\n \n data = np.loadtxt(infile)\n rms = data[:,3]\n avg_rms = round(np.mean(rms), 3)\n \n return avg_rms",
"def avgram(self):\n return (self._total_ram['value'] / self._total_ram['count']) if self._total_ram['count'] else 0",
"def dbsnp_freq(record):\n try:\n kg = re.search(r\"(CAF=)([0-9,.e-]+)\", record[7]).group(2)\n kg_af = float(kg.split(\",\")[1])\n except:\n kg_af = -1\n\n try:\n topmed = re.search(r\"(TOPMED=)([0-9,.e-]+)\", record[7]).group(2)\n topmed_af = float(topmed.split(\",\")[1])\n except:\n topmed_af = -1\n\n return max(kg_af, topmed_af)",
"def effective_width(self, intrinsic_width, dm, bandwidth, freq):\n a = sqrt(pow(intrinsic_width, 2) + pow((8.3e6 * fabs(dm) * (bandwidth / pow(freq, 3))), 2))\n return a"
] |
[
"0.6399699",
"0.6226099",
"0.6138276",
"0.5906671",
"0.5898159",
"0.5684352",
"0.5649585",
"0.5636002",
"0.56263626",
"0.56100583",
"0.55792385",
"0.5576417",
"0.556699",
"0.5537218",
"0.55023855",
"0.5480502",
"0.5477145",
"0.5467537",
"0.54647195",
"0.544937",
"0.5445954",
"0.5439119",
"0.5401902",
"0.5396172",
"0.53660434",
"0.5355815",
"0.53499013",
"0.53434473",
"0.5342456",
"0.53388494"
] |
0.6801699
|
0
|
Gets a color for a node from the color map
|
def get_color(node, color_map):
if node in color_map:
return color_map[node]
return "black"
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_color(self, node: Node) -> str:\n\n idx = hash(node.get_kind_name()) % len(self.colors_)\n return self.colors_[idx]",
"def get_color(key):\n if _with_colors:\n return _color_map.get(key, None)\n return None",
"def which_color(node):\n for i, com in enumerate(communities):\n if node in com:\n return colors[i]\n return nx_helpers.rgb_to_hex((0, 0, 0))",
"def get_color(self):\n return COLOR_DICT[self.element]",
"def _get_color(self):\n return self.__color",
"def _get_color(self):\n return self.__color",
"def _get_color(self):\n return self.__color",
"def _get_color(self):\n return self.__color",
"def getColor(k) :\n colors = [\"#862B59\",\"#A10000\",\"#0A6308\",\"#123677\",\"#ff8100\",\"#F28686\",\"#6adf4f\",\"#58ccdd\",\"#3a3536\",\"#00ab7c\"]\n return colors[k]",
"def get_node_color(self, origin_node_id):\n origin_node_id %= 11\n if origin_node_id == 9:\n return 0.753, 0.753, 0.753, 1.\n if origin_node_id == 8:\n return 0.824, 0.412, 0.118, 1.\n if origin_node_id == 7:\n return 1.000, 0.000, 1.000, 1.\n if origin_node_id == 6:\n return 1.000, 1.000, 0.000, 1.\n if origin_node_id == 5:\n return 1.000, 0.627, 0.478, 1.\n if origin_node_id == 4:\n return 0.498, 1.000, 0.000, 1.\n if origin_node_id == 3:\n return 0.000, 1.000, 1.000, 1.\n if origin_node_id == 2:\n return 1.000, 0.922, 0.804, 1.\n if origin_node_id == 1:\n return 0.871, 0.722, 0.529, 1.\n if origin_node_id == 0:\n return 0.000, 0.749, 1.000, 1.\n if origin_node_id == 0:\n return 0.500, 0.549, 1.000, 1.\n\n return 0.8, 0.8, 0.8, 1.0",
"def get_color(self, item_to_color):\r\n\r\n color = (255, 255, 255)\r\n for item in self.data.items():\r\n if item[0] == item_to_color:\r\n existing_color = [i for i, v in enumerate(self.color_map) if v[0] == item[0]]\r\n if existing_color is not None and existing_color:\r\n return self.color_map[existing_color[0]][1]\r\n else:\r\n r = random.randint(0, 255)\r\n g = random.randint(0, 255)\r\n b = random.randint(0, 255)\r\n color = (r, g, b)\r\n self.color_map.append((item[0], color))\r\n return color\r\n self.color_map.append((item_to_color, color))\r\n return color",
"def get_color(self, coord):\n return self.board[coord[0], coord[1]]",
"def get_node_color(node_label):\n for NODE_KEY in list(NODE_TYPES.keys()):\n if node_label in NODE_TYPES[NODE_KEY]:\n return NODE_COLOR_DICT[NODE_KEY]\n try:\n x = int(node_label)\n return NODE_COLOR_DICT['Terminals']\n except:\n try:\n x = float(node_label)\n return NODE_COLOR_DICT['Terminals']\n except:\n try:\n node_label = node_label.replace(\"\\'\", \"\\\"\")\n tree = json.loads(node_label)\n for key in tree.keys():\n if key not in NODE_TYPES['Learner Params']:\n return NODE_COLOR_DICT['Uncategorized']\n else:\n try:\n x = int(tree[key])\n except:\n try:\n x = float(tree[key])\n except:\n return NODE_COLOR_DICT['Uncategorized']\n return NODE_COLOR_DICT['Learner Params']\n except:\n return NODE_COLOR_DICT['Uncategorized']\n return NODE_COLOR_DICT['Uncategorized']",
"def color(self):\n return self.container['color']",
"def get_color(self):\n return self.color",
"def color(self):\n return self['color']",
"def colorNode(node):\n # Try to find the session color manager.\n manager = _findSessionColorManager()\n\n # If one exists, use it to try to color the node.\n if manager is not None:\n manager.colorNode(node)",
"def _get_color(self, r, g, b):\n clr = (r, g, b)\n return clr",
"def get_color(self):\r\n return self._color",
"def getColor(self):\r\n return self.color",
"def getColor(self):\n return self._l[2]",
"def colorNodeByName(node):\n # Try to find the session color manager.\n manager = _findSessionColorManager()\n\n # If one exists, use it to try to color the node.\n if manager is not None:\n manager.colorNodeByName(node)",
"def get_color(self):\r\n return self.__color",
"def get_edge_color( row ):\n\n rgb = 0.5 * (\n node_color_dict[ row[ 'source' ] ] + \\\n node_color_dict[ row[ 'target' ] ] )\n\n return rgb2hex( rgb )",
"def get_color(self):\n return self._color",
"def get_color(self):\n return self._color",
"def getColor(self):\n return self.color",
"def _get_color(self, color_name):\n if not color_name:\n return 0\n\n if color_name == 'ORANGE':\n color = self.COLOR_ORANGE\n else:\n color = getattr(curses, 'COLOR_' + color_name)\n return curses.color_pair(color)",
"def color(self):\n if self._simplecell:\n self.fetch()\n return self._color",
"def get_color(self):\n\n return self.color"
] |
[
"0.78032285",
"0.73727363",
"0.73418564",
"0.69549704",
"0.68655634",
"0.68655634",
"0.68655634",
"0.68655634",
"0.6771988",
"0.676185",
"0.6759449",
"0.6740052",
"0.66710526",
"0.6658527",
"0.6653712",
"0.66476274",
"0.6644814",
"0.66379815",
"0.6580679",
"0.6572436",
"0.655299",
"0.65365",
"0.65072554",
"0.64914596",
"0.6475152",
"0.6475152",
"0.6455055",
"0.64443576",
"0.64088714",
"0.63944614"
] |
0.87923455
|
0
|
Obtain a Process Tree representation through GraphViz
|
def apply(tree, parameters=None):
if parameters is None:
parameters = {}
filename = tempfile.NamedTemporaryFile(suffix='.gv')
viz = Digraph("pt", filename=filename.name, engine='dot', graph_attr={'bgcolor': 'transparent'})
image_format = exec_utils.get_param_value(Parameters.FORMAT, parameters, "png")
color_map = exec_utils.get_param_value(Parameters.COLOR_MAP, parameters, {})
node_color = get_color(tree, color_map)
enable_deepcopy = exec_utils.get_param_value(Parameters.ENABLE_DEEPCOPY, parameters, True)
if enable_deepcopy:
# since the process tree object needs to be sorted in the visualization, make a deepcopy of it before
# proceeding
tree = deepcopy(tree)
util.tree_sort(tree)
# add first operator
if tree.operator:
viz.attr('node', shape='circle', fixedsize='true', width="0.6",
fontsize="14")
op_node_identifier = str(uuid.uuid4())
viz.node(op_node_identifier, str(tree.operator), color=node_color, fontcolor=node_color)
viz = repr_tree(tree, viz, op_node_identifier, 0, color_map, parameters)
else:
viz.attr('node', shape='box', fixedsize='true', width="2.5",
fontsize="8")
this_trans_id = str(uuid.uuid4())
if tree.label is None:
viz.node(this_trans_id, "tau", style='filled', fillcolor='black')
else:
viz.node(this_trans_id, str(tree), color=node_color, fontcolor=node_color)
viz.attr(overlap='false')
viz.attr(fontsize='11')
viz.format = image_format
return viz
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def tree2gv(tree: TreeNode) -> graphviz.Graph:\n result = graphviz.Graph(\"ni\")\n # result.attr(size='12,0')\n tree2gv_helper(tree, result, \"\")\n return result",
"def tree():\n nobv.visual_tree()",
"def generate_tree(tree, out_file='tree'):\n activities = [\n 'Working at Computer',\n 'Standing Up, Walking and Going up/down stairs',\n 'Standing',\n 'Walking',\n 'Going Up/Down Stairs',\n 'Walking and Talking with Someone',\n 'Talking while Standing',\n ]\n file_name = path.join('..', 'proposal', out_file)\n export_graphviz(tree, out_file=\"{}.dot\".format(file_name), class_names=activities, rounded=True)\n # system(\"dot -Tpng {0}.dot -o {0}.png\".format(out_file))",
"def export_graphviz(decision_tree):\n # This function is private and should not be used outside the scope of the parent function\n def _export_node(node):\n if isinstance(node, EndNode):\n print(\"\\t\\\"{}\\\" [label=\\\"{}\\\"]\\n\".format(str(id(node)),\n str(node.get_target_value())))\n return\n print(\"\\t\\\"\" + str(id(node)) + \"\\\" [label=\\\"\\\"]\\n\")\n for decision_attribute_value in node.children.keys():\n print(\"\\t\\\"{}\\\" -> \\\"{}\\\" [label=\\\"{}={}\\\"]\\n\".format(str(id(node)),\n str(id(node.get_child(decision_attribute_value))),\n str(node.get_decision_attribute()),\n str(decision_attribute_value)))\n _export_node(node.get_child(decision_attribute_value))\n print(\"digraph G {\\n\")\n _export_node(decision_tree)\n print(\"}\")",
"def graphviz_prettify(self, network):\n graph_settings = {\n 'rankdir': 'LR',\n 'dpi': 60,\n }\n network.graph.update(graph_settings)\n\n for n in network.nodes():\n if isinstance(n, Variable):\n network.nodes[n]['label'] = n.name\n elif isinstance(n, Equation):\n network.nodes[n]['shape'] = 'diamond'",
"def render(self): # pragma: no cover\n from graphviz import Digraph\n dot = Digraph(name=\"top\")\n for block in self.blocks:\n if isinstance(block, Branch):\n label = \"if \" + astor.to_source(block.cond)\n dot.node(str(id(block)), label.rstrip(), {\"shape\": \"invhouse\"})\n elif isinstance(block, Yield):\n label = astor.to_source(block.value)\n # label += \"\\nLive Ins : \" + str(block.live_ins)\n # label += \"\\nLive Outs : \" + str(block.live_outs)\n # label += \"\\nGen : \" + str(block.gen)\n # label += \"\\nKill : \" + str(block.kill)\n dot.node(str(id(block)), label.rstrip(), {\"shape\": \"oval\"})\n elif isinstance(block, BasicBlock):\n label = \"\\n\".join(astor.to_source(stmt).rstrip() for stmt in block.statements)\n # label += \"\\nLive Ins : \" + str(block.live_ins)\n # label += \"\\nLive Outs : \" + str(block.live_outs)\n # label += \"\\nGen : \" + str(block.gen)\n # label += \"\\nKill : \" + str(block.kill)\n dot.node(str(id(block)), label.rstrip(), {\"shape\": \"box\"})\n elif isinstance(block, HeadBlock):\n label = \"Initial\"\n dot.node(str(id(block)) + \"_start\", label.rstrip(), {\"shape\": \"doublecircle\"})\n label = \"\\n\".join(astor.to_source(stmt).rstrip() for stmt in block.initial_statements)\n # label += \"\\nLive Ins : \" + str(block.live_ins)\n # label += \"\\nLive Outs : \" + str(block.live_outs)\n # label += \"\\nGen : \" + str(block.gen)\n # label += \"\\nKill : \" + str(block.kill)\n dot.node(str(id(block)), label.rstrip(), {\"shape\": \"box\"})\n dot.edge(str(id(block)) + \"_start\", str(id(block)))\n else:\n raise NotImplementedError(type(block))\n # for source, sink, label in self.edges:\n for sink, label in block.outgoing_edges:\n dot.edge(str(id(block)), str(id(sink)), label)\n\n\n file_name = tempfile.mktemp(\"gv\")\n dot.render(file_name, view=True)\n # with open(\"cfg.dot\", \"w\") as file:\n # file.write(dot.source)\n # exit()",
"def gen_graph(self):",
"def repr_tree(tree, viz, current_node, rec_depth, color_map, parameters):\r\n for child in tree.children:\r\n if child.operator is None:\r\n viz.attr('node', shape='box', fixedsize='true', width=\"2.5\",\r\n fontsize=\"8\")\r\n this_trans_id = str(uuid.uuid4())\r\n if child.label is None:\r\n viz.node(this_trans_id, \"tau\", style='filled', fillcolor='black')\r\n else:\r\n node_color = get_color(child, color_map)\r\n viz.node(this_trans_id, str(child), color=node_color, fontcolor=node_color)\r\n viz.edge(current_node, this_trans_id)\r\n else:\r\n condition_wo_operator = child.operator == pt_operator.Operator.XOR and len(\r\n child.children) == 1 and child.children[0].operator is None\r\n if condition_wo_operator:\r\n childchild = child.children[0]\r\n viz.attr('node', shape='box', fixedsize='true', width=\"2.5\",\r\n fontsize=\"8\")\r\n this_trans_id = str(uuid.uuid4())\r\n if childchild.label is None:\r\n viz.node(this_trans_id, str(childchild), style='filled', fillcolor='black')\r\n else:\r\n node_color = get_color(childchild, color_map)\r\n viz.node(this_trans_id, str(childchild), color=node_color, fontcolor=node_color)\r\n viz.edge(current_node, this_trans_id)\r\n else:\r\n viz.attr('node', shape='circle', fixedsize='true', width=\"0.6\",\r\n fontsize=\"14\")\r\n op_node_identifier = str(uuid.uuid4())\r\n node_color = get_color(child, color_map)\r\n viz.node(op_node_identifier, str(child.operator), color=node_color, fontcolor=node_color)\r\n viz.edge(current_node, op_node_identifier)\r\n viz = repr_tree(child, viz, op_node_identifier, rec_depth + 1, color_map, parameters)\r\n return viz",
"def dot(self):\n d = Digraph(comment=\"VP Tree\", format=\"png\")\n for parent, left, right in self.root.preorder():\n\n if isinstance(parent,VPTreeNonLeaf):\n d.node(str(parent.uid), \"\"\"VP Node:: Key={} Median Dist = {:2.2f}\n \"\"\".format(parent.pk, parent.median_dist))\n d.edge(str(parent.uid), str(left.uid))\n d.edge(str(parent.uid), str(right.uid))\n elif isinstance(parent,VPTreeLeaf):\n d.node(str(parent.uid), \"Leaf Node:: \"+str(parent.pk_list))\n else:\n raise Exception(\"something went wrong\")\n\n return d",
"def __repr__(self):\n return self.displayTree(0)",
"def create_dot(nodes, assocs, hierarchy):\n def field_names(fields):\n return ' | '.join(sorted(fields))\n out = StringIO()\n print >> out, \"digraph phemi_class_diagram {\"\n print >> out, \" node[shape=record];\"\n for clazz, fields in nodes.iteritems():\n print >> out, ' \"%s\" [label=\"{%s | %s}\"];' % (\n fullname(clazz), clazz.__name__, field_names(fields)\n )\n for edgemap in [assocs, hierarchy]:\n for clazz, edges in edgemap.iteritems():\n for edge in edges:\n print >> out, ' \"%s\" -> \"%s\" %s' % (\n fullname(clazz), fullname(edge.dst), edge.style\n )\n print >> out, \"}\"\n return out.getvalue()",
"def _to_dot(self, detailed=False):\n g = ast_to_labeled_graph(self, detailed)\n import tulip.graphics as _graphics\n return _graphics.networkx_to_graphviz(g)",
"def graph(self):\n\n def start_graph():\n return \"digraph services {\\n\\n\"\n\n def end_graph(graph_string):\n graph_string += \"\\n}\\n\"\n return graph_string\n\n def start_cluster(graph_string, cluster_id, cluster_name):\n graph_string += \"subgraph cluster_%s {\\n\" % cluster_id\n graph_string += \" label = \\\"%s\\\";\\n\" % cluster_name\n return graph_string\n\n def end_cluster(graph_string):\n graph_string += \"\\n}\\n\"\n return graph_string\n\n def add_path(graph_string, from_node, to_node, protocol, port):\n if not from_node.name:\n cidr_blocks = [subnetwork.cidr_block for subnetwork in from_node.subnetworks]\n from_name = \",\".join(cidr_blocks)\n from_network_name = \"external\"\n else:\n from_name = from_node.name\n from_network_name = from_node.network.name\n path_template = \"\\\"%s (%s)\\\" -> \\\"%s (%s)\\\" [ label=\\\"(%s:%s)\\\" ];\\n\"\n graph_string += path_template % (from_name, from_network_name, to_node.name,\n to_node.network.name, protocol, port)\n return graph_string\n\n def add_node(graph_string, node_name, network_name):\n graph_string += \" \\\"%s (%s)\\\";\\n\" % (node_name, network_name)\n return graph_string\n\n def group_paths_by_network(paths_info):\n net_to_path = {}\n for path in paths_info:\n if path.network.name not in net_to_path:\n net_to_path[path.network.name] = []\n net_to_path[path.network.name].append(path)\n return net_to_path\n\n def group_services_by_network(services_info):\n net_to_service = {}\n for service_info in services_info:\n if service_info.network.name not in net_to_service:\n net_to_service[service_info.network.name] = []\n net_to_service[service_info.network.name].append(service_info)\n return net_to_service\n\n # First group paths and services by network\n paths_info = self.paths.list()\n net_to_path = group_paths_by_network(paths_info)\n services_info = self.service.list()\n net_to_service = group_services_by_network(services_info)\n networks_info = self.network.list()\n\n graph_string = start_graph()\n cluster_id = 0\n for network_info in networks_info:\n\n # Skip networks with no name for now\n if not network_info.name:\n continue\n\n # Each network is a \"cluster\" in graphviz terms\n graph_string = start_cluster(graph_string, cluster_id, network_info.name)\n cluster_id += 1\n\n # If the network is empty just make a placeholder node\n if network_info.name not in net_to_service and network_info.name not in net_to_path:\n graph_string = add_node(graph_string, \"Empty Network\", network_info.name)\n graph_string = end_cluster(graph_string)\n continue\n\n # Otherwise, add all the services and path in this network\n if network_info.name in net_to_service:\n for service_info in net_to_service[network_info.name]:\n graph_string = add_node(graph_string, service_info.name,\n service_info.network.name)\n graph_string = end_cluster(graph_string)\n\n # We do all paths outside the cluster so that public CIDRs will show up outside the\n # networks.\n if network_info.name in net_to_path:\n for path_info in net_to_path[network_info.name]:\n graph_string = add_path(graph_string, path_info.source, path_info.destination,\n path_info.protocol, path_info.port)\n\n graph_string = end_graph(graph_string)\n return graph_string",
"def to_networkx(self):\n import networkx as nx\n G = nx.DiGraph()\n # G.graph.update(self.config)\n\n if nx.__version__.startswith('1'):\n node_dict = G.node\n else:\n node_dict = G.nodes\n\n def _defaultstyle(node, color, shape='none', **kwargs):\n node_dict[node]['fillcolor'] = color\n node_dict[node]['style'] = 'filled'\n node_dict[node]['shape'] = shape\n node_dict[node].update(kwargs)\n # node_dict[node]['color'] = color\n\n # Add all processes\n # Make inputs and outputs nodes to prevent needing a multigraph\n for proc in self.procs.values():\n G.add_node(proc.name)\n _defaultstyle(proc.name, 'turquoise', shape='ellipse', fontsize=20)\n node_dict[proc.name]['label'] = '{}:\\n{}'.format(proc.name,\n proc.type)\n\n for iport in proc.iports.values():\n iport_name = iport.absname()\n G.add_node(iport_name)\n G.add_edge(iport_name, proc.name)\n node_dict[iport_name]['label'] = iport.name\n _defaultstyle(iport_name, '#fefefe', fontsize=14)\n\n for oport in proc.oports.values():\n oport_name = oport.absname()\n G.add_node(oport_name)\n G.add_edge(proc.name, oport_name)\n node_dict[oport_name]['label'] = oport.name\n _defaultstyle(oport_name, '#f0f0f0', fontsize=14)\n\n # Add all connections\n for proc in self.procs.values():\n for iport in proc.iports.values():\n iport_name = iport.absname()\n for oport in iport.connections:\n if oport is not None:\n oport_name = oport.absname()\n G.add_edge(oport_name, iport_name)\n return G",
"def show_tree(self):\n G, vertex_dict = self.tree().graph()\n root = self.tree().root()\n vertical_list = []\n horizontal_list = []\n no_component_list = []\n for i, xi in vertex_dict.items():\n if xi.is_equal(root):\n root_index = i\n if self.is_component(xi):\n if xi.type() == \"II\":\n vertical_list.append(i)\n else:\n horizontal_list.append(i)\n print(i, \": \", xi)\n else:\n no_component_list.append(i)\n vertex_colors = {'red': vertical_list, 'blue': horizontal_list,\n 'grey': no_component_list}\n G.show(vertex_colors=vertex_colors, tree_root=root_index, layout='tree')",
"def __repr__(self):\n return show_tree(self, lambda node: node.name,\n lambda node: node.children)",
"def visualize_tree(tree, feature_names):\n with open(\"dt.dot\", 'w') as f:\n export_graphviz(tree, out_file=f,\n feature_names=feature_names)\n\n command = [\"dot\", \"-Tpng\", \"dt.dot\", \"-o\", \"dt.png\"]\n try:\n subprocess.check_call(command)\n except:\n exit(\"Could not run dot, ie graphviz, to \"\n \"produce visualization\")",
"def visualize_tree(tree, feature_names, model_name):\n with open(model_name+\".dot\", 'w') as f:\n export_graphviz(tree, out_file=f,\n feature_names=feature_names)\n\n command = [\"dot\", \"-Tpng\", \"dt.dot\", \"-o\", \"dt.png\"]\n try:\n subprocess.check_call(command)\n except:\n exit(\"Could not run dot, ie graphviz, to \"\n \"produce visualization\")",
"def visualize_tree(tree, feature_names, filename):\n with open(\"dt.dot\", 'w') as f:\n export_graphviz(tree, out_file=f,\n feature_names=feature_names)\n\n command = [\"dot\", \"-Tpng\", \"dt.dot\", \"-o\", \"plots-decision/%s.png\" % filename]\n try:\n subprocess.check_call(command)\n except:\n exit(\"Could not run dot, ie graphviz, to \"\n \"produce visualization\")",
"def print(self):\n dot = \"digraph G {\\nrankdir = UD\\n\"\n\n for i in range(len(self.allNodes)):\n if self.allNodes[i].left is not None:\n dot += str(self.allNodes[i].key) + \" -> \" + str(self.allNodes[i].left.key) + \"\\n\"\n if self.allNodes[i].right is not None:\n dot += str(self.allNodes[i].key) + \" -> \" + str(self.allNodes[i].right.key) + \"\\n\"\n\n dot += \"}\"\n\n file = open(\"outputfiles/BinTree.dot\", \"w\")\n file.write(dot)\n file.close()\n\n os.system(\"dot outputfiles/BinTree.dot -Tpng -o outputfiles/BinTree.png\")",
"def display_nodes(self) -> None:\n\n def display_decision_node(node):\n txt = []\n txt.append(\" Type: \" + node.get(\"type\"))\n txt[-1] += (\n \" - Maximum Payoff\" if node.get(\"max\") is True else \" - Minimum Payoff\"\n )\n txt.append(\" Name: \" + node.get(\"tag\"))\n txt.append(\" Branches:\")\n txt.append(\" Value Next Node\")\n for (outcome, next_node) in node.get(\"branches\"):\n txt.append(\n \" {:12.3f} {:d}\".format(outcome, next_node)\n )\n txt.append(\"\")\n return txt\n\n def display_chance_node(node):\n txt = []\n txt.append(\" Type: \" + node.get(\"type\"))\n txt.append(\" Name: \" + node.get(\"tag\"))\n txt.append(\" Branches:\")\n txt.append(\" Chance Value Next Node\")\n for (prob, outcome, next_node) in node.get(\"branches\"):\n txt.append(\n \" {:5.2f} {:12.3f} {:d}\".format(\n prob, outcome, next_node\n )\n )\n txt.append(\"\")\n return txt\n\n def display_terminal_node(node):\n txt = []\n txt.append(\" Type: \" + node.get(\"type\"))\n if node.get(\"expr\") is None:\n txt.append(\" Expr: (cumulative)\")\n else:\n txt.append(\" Expr: (User fn)\")\n txt.append(\"\")\n return txt\n\n txt = []\n for index, node in enumerate(self.data):\n\n txt.append(\"Node {:d}\".format(index))\n\n if node.get(\"type\") == \"DECISION\":\n txt += display_decision_node(node)\n\n elif node.get(\"type\") == \"CHANCE\":\n txt += display_chance_node(node)\n\n elif node.get(\"type\") == \"TERMINAL\":\n txt += display_terminal_node(node)\n\n else:\n\n raise ValueError(\n \"Node type unknown: \" + node.tag + \", \" + node.get(\"type\")\n )\n\n print(\"\\n\".join(txt))",
"def print_tree(self):\n stack = [(self.root, 0, 0)] # (node, child no., tabs)\n ntabs = 0\n while len(stack):\n n, i, tabs = stack.pop()\n if len(n.branch):\n if i>=1 and i==len(n.children)-1:\n print(tabs*'\\t' + 'axis-' + str(n.axis) + ': >' + str(n.branch[i-1]))\n else:\n print(tabs*'\\t' + 'axis-' + str(n.axis) + ': <=' + str(n.branch[i]))\n stack.append((n, i+1, tabs))\n if i<len(n.children):\n stack.append((n.children[i], 0, tabs+1))\n else:\n avg = np.dot(n.probabilities[:,0], n.probabilities[:,1])\n print(tabs*'\\t' + 'Label: ' + str(avg) + '\\n')",
"def pprint_qpgraph(dot_file, pdf_file):\n\n # extract the body contents from the dot file\n with open(dot_file, \"rU\") as fin:\n body = re.search(\"{(.+)}\", fin.read(), re.DOTALL).group(1).strip().split(\"\\n\")\n\n # make a new direct graph\n dot = graphviz.Digraph(filename=trim_ext(pdf_file), body=body, format=\"pdf\")\n\n # remove the messy graph label\n dot.attr(\"graph\", label=\"\")\n\n # set Node defaults\n dot.node_attr[\"shape\"] = \"point\"\n dot.node_attr[\"fontname\"] = \"arial\"\n dot.node_attr[\"fontsize\"] = \"11\"\n\n # set Edge defaults\n dot.edge_attr[\"arrowhead\"] = \"vee\"\n dot.edge_attr[\"fontcolor\"] = \"#838b8b\" # grey\n dot.edge_attr[\"fontname\"] = \"arial\"\n dot.edge_attr[\"fontsize\"] = \"11\"\n\n nodes = []\n\n # extract the leaf nodes from the body of the graph\n for line in dot:\n match = re.search(r\"^ *([a-z_0-9.]+) +\\[\", line, re.IGNORECASE)\n if match:\n nodes.append(match.group(1))\n\n # sort the nodes\n nodes.sort()\n\n try:\n # TODO make node colours a CLI param\n # load the optional colour file\n colours = dict(csv.reader(open(\"nodes.list\", \"r\"), delimiter=\"\\t\"))\n except IOError:\n colours = {}\n\n # set leaf node attributes\n for idx, node in enumerate(nodes):\n colour = colours.get(node, COLOURS[idx])\n dot.node(node, shape=\"ellipse\", color=colour, fontcolor=colour)\n\n try:\n # render the graph (basename.pdf)\n dot.render(cleanup=True)\n except FileNotFoundError:\n pass",
"def dot_format(out, graph, name=\"digraph\"):\n\n out.write(\"digraph %s {\\n\" % name)\n for step, deps in each_step(graph):\n for dep in deps:\n out.write(\" \\\"%s\\\" -> \\\"%s\\\";\\n\" % (step, dep))\n\n out.write(\"}\\n\")",
"def draw_tree(t, df, size=10, ratio=0.6, precision=0):\n s=export_graphviz(t, out_file=None, feature_names=df.columns, filled=True,\n special_characters=True, rotate=True, precision=precision)\n IPython.display.display(graphviz.Source(re.sub('Tree {',\n f'Tree {{ size={size}; ratio={ratio}', s)))",
"def draw_tree(t, df, size=10, ratio=0.6, precision=0):\n s=export_graphviz(t, out_file=None, feature_names=df.columns, filled=True,\n special_characters=True, rotate=True, precision=precision)\n IPython.display.display(graphviz.Source(re.sub('Tree {',\n f'Tree {{ size={size}; ratio={ratio}', s)))",
"def export_graphviz(decision_tree, out_file=SENTINEL, max_depth=None,\n feature_names=None, class_names=None, label='all',\n filled=False, leaves_parallel=False, \n node_ids=False, proportion=False, rotate=False,\n rounded=False, special_characters=False):\n\n def get_color(value):\n # Find the appropriate color & intensity for a node\n if colors['bounds'] is None:\n # Classification tree\n color = list(colors['rgb'][np.argmax(value)])\n sorted_values = sorted(value, reverse=True)\n if len(sorted_values) == 1:\n alpha = 0\n else:\n alpha = int(np.round(255 * (sorted_values[0] - sorted_values[1]) /\n (1 - sorted_values[1]), 0))\n else:\n # Regression tree or multi-output\n color = list(colors['rgb'][0])\n alpha = int(np.round(255 * ((value - colors['bounds'][0]) /\n (colors['bounds'][1] -\n colors['bounds'][0])), 0))\n\n # Return html color code in #RRGGBBAA format\n color.append(alpha)\n hex_codes = [str(i) for i in range(10)]\n hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])\n color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]\n\n return '#' + ''.join(color)\n\n def node_to_str(tree, node_id, criterion):\n # Generate the node content string\n if tree.n_outputs == 1:\n value = tree.value[node_id][0, :]\n else:\n value = tree.value[node_id]\n\n # Should labels be shown?\n labels = (label == 'root' and node_id == 0) or label == 'all'\n\n # PostScript compatibility for special characters\n if special_characters:\n characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']\n node_string = '<'\n else:\n characters = ['#', '[', ']', '<=', '\\\\n', '\"']\n node_string = '\"'\n\n # Write node ID\n if node_ids:\n if labels:\n node_string += 'node '\n node_string += characters[0] + str(node_id) + characters[4]\n\n # Write decision criteria\n if tree.children_left[node_id] != _tree.TREE_LEAF:\n # Always write node decision criteria, except for leaves\n if feature_names is not None:\n feature = feature_names[tree.feature[node_id]]\n else:\n feature = \"X%s%s%s\" % (characters[1],\n tree.feature[node_id],\n characters[2])\n node_string += '%s %s %s%s' % (feature,\n characters[3],\n round(tree.threshold[node_id], 4),\n characters[4])\n\n\n # Write node class distribution / regression value\n if proportion and tree.n_classes[0] != 1:\n # For classification this will show the proportion of samples\n value = value / tree.weighted_n_node_samples[node_id]\n if labels:\n node_string += 'value = '\n if tree.n_classes[0] == 1:\n # Regression\n value_text = np.around(value, 4)\n elif proportion:\n # Classification\n value_text = np.around(value, 2)\n elif np.all(np.equal(np.mod(value, 1), 0)):\n # Classification without floating-point weights\n value_text = value.astype(int)\n else:\n # Classification with floating-point weights\n value_text = np.around(value, 4)\n # Strip whitespace\n value_text = str(value_text.astype('S32')).replace(\"b'\", \"'\")\n value_text = value_text.replace(\"' '\", \", \").replace(\"'\", \"\")\n if tree.n_classes[0] == 1 and tree.n_outputs == 1:\n value_text = value_text.replace(\"[\", \"\").replace(\"]\", \"\")\n value_text = value_text.replace(\"\\n \", characters[4])\n node_string += value_text + characters[4]\n\n # Write node majority class\n if (class_names is not None and\n tree.n_classes[0] != 1 and\n tree.n_outputs == 1):\n # Only done for single-output classification trees\n if labels:\n node_string += 'class = '\n if class_names is not True:\n class_name = class_names[np.argmax(value)]\n else:\n class_name = \"y%s%s%s\" % (characters[1],\n np.argmax(value),\n characters[2])\n node_string += class_name\n\n # Clean up any trailing newlines\n if node_string[-2:] == '\\\\n':\n node_string = node_string[:-2]\n if node_string[-5:] == '<br/>':\n node_string = node_string[:-5]\n\n return node_string + characters[5]\n\n def recurse(tree, node_id, criterion, parent=None, depth=0):\n if node_id == _tree.TREE_LEAF:\n raise ValueError(\"Invalid node_id %s\" % _tree.TREE_LEAF)\n\n left_child = tree.children_left[node_id]\n right_child = tree.children_right[node_id]\n\n # Add node with description\n if max_depth is None or depth <= max_depth:\n\n # Collect ranks for 'leaf' option in plot_options\n if left_child == _tree.TREE_LEAF:\n ranks['leaves'].append(str(node_id))\n elif str(depth) not in ranks:\n ranks[str(depth)] = [str(node_id)]\n else:\n ranks[str(depth)].append(str(node_id))\n\n out_file.write('%d [label=%s'\n % (node_id,\n node_to_str(tree, node_id, criterion)))\n\n if filled:\n # Fetch appropriate color for node\n if 'rgb' not in colors:\n # Initialize colors and bounds if required\n colors['rgb'] = _color_brew(tree.n_classes[0])\n if tree.n_outputs != 1:\n # Find max and min impurities for multi-output\n colors['bounds'] = (np.min(-tree.impurity),\n np.max(-tree.impurity))\n elif tree.n_classes[0] == 1 and len(np.unique(tree.value)) != 1:\n # Find max and min values in leaf nodes for regression\n colors['bounds'] = (np.min(tree.value),\n np.max(tree.value))\n else:\n # If multi-output color node by impurity\n node_val = -tree.impurity[node_id]\n out_file.write(', fillcolor=\"%s\"' % get_color(node_val))\n out_file.write('] ;\\n')\n\n if parent is not None:\n # Add edge to parent\n out_file.write('%d -> %d' % (parent, node_id))\n if parent == 0:\n # Draw True/False labels if parent is root node\n angles = np.array([45, -45]) * ((rotate - .5) * -2)\n out_file.write(' [labeldistance=2.5, labelangle=')\n if node_id == 1:\n out_file.write('%d, headlabel=\"True\"]' % angles[0])\n else:\n out_file.write('%d, headlabel=\"False\"]' % angles[1])\n out_file.write(' ;\\n')\n\n if left_child != _tree.TREE_LEAF:\n recurse(tree, left_child, criterion=criterion, parent=node_id,\n depth=depth + 1)\n recurse(tree, right_child, criterion=criterion, parent=node_id,\n depth=depth + 1)\n\n else:\n ranks['leaves'].append(str(node_id))\n\n out_file.write('%d [label=\"(...)\"' % node_id)\n if filled:\n # color cropped nodes grey\n out_file.write(', fillcolor=\"#C0C0C0\"')\n out_file.write('] ;\\n' % node_id)\n\n if parent is not None:\n # Add edge to parent\n out_file.write('%d -> %d ;\\n' % (parent, node_id))\n\n own_file = False\n return_string = False\n try:\n if out_file == SENTINEL:\n warnings.warn(\"out_file can be set to None starting from 0.18. \"\n \"This will be the default in 0.20.\",\n DeprecationWarning)\n out_file = \"tree.dot\"\n\n if isinstance(out_file, six.string_types):\n if six.PY3:\n out_file = open(out_file, \"w\", encoding=\"utf-8\")\n else:\n out_file = open(out_file, \"wb\")\n own_file = True\n\n if out_file is None:\n return_string = True\n out_file = six.StringIO()\n\n # The depth of each node for plotting with 'leaf' option\n ranks = {'leaves': []}\n # The colors to render each node with\n colors = {'bounds': None}\n\n out_file.write('digraph Tree {\\n')\n\n # Specify node aesthetics\n out_file.write('node [shape=box')\n rounded_filled = []\n if filled:\n rounded_filled.append('filled')\n if rounded:\n rounded_filled.append('rounded')\n if len(rounded_filled) > 0:\n out_file.write(', style=\"%s\", color=\"black\"'\n % \", \".join(rounded_filled))\n if rounded:\n out_file.write(', fontname=helvetica')\n out_file.write('] ;\\n')\n\n # Specify graph & edge aesthetics\n if leaves_parallel:\n out_file.write('graph [ranksep=equally, splines=polyline] ;\\n')\n if rounded:\n out_file.write('edge [fontname=helvetica] ;\\n')\n if rotate:\n out_file.write('rankdir=LR ;\\n')\n\n # Now recurse the tree and add node & edge attributes\n if isinstance(decision_tree, _tree.Tree):\n recurse(decision_tree, 0, criterion=\"impurity\")\n else:\n recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)\n\n # If required, draw leaf nodes at same depth as each other\n if leaves_parallel:\n for rank in sorted(ranks):\n out_file.write(\"{rank=same ; \" +\n \"; \".join(r for r in ranks[rank]) + \"} ;\\n\")\n out_file.write(\"}\")\n\n if return_string:\n return out_file.getvalue()\n\n finally:\n if own_file:\n out_file.close()",
"def generate(self, diagram):",
"def to_dot(self, name='BDD'): # pragma: no cover\n\t\t# print(\"to_dot\")\n\t\tparts = ['graph', name, '{']\n\t\tfor node in self.dfs_postorder():\n\t\t\tif node is BDDNODEZERO:\n\t\t\t\tparts += ['n' + str(id(node)), '[label=0,shape=box];']\n\t\t\telif node is BDDNODEONE:\n\t\t\t\tparts += ['n' + str(id(node)), '[label=1,shape=box];']\n\t\t\telse:\n\t\t\t\tv = _VARS[node.root]\n\t\t\t\tparts.append('n' + str(id(node)))\n\t\t\t\tparts.append('[label=\"{}\",shape=circle];'.format(v))\n\t\tfor node in self.dfs_postorder():\n\t\t\tif node is not BDDNODEZERO and node is not BDDNODEONE:\n\t\t\t\tparts += ['n' + str(id(node)), '--',\n\t\t\t\t\t\t 'n' + str(id(node.lo)),\n\t\t\t\t\t\t '[label=0,style=dashed];']\n\t\t\t\tparts += ['n' + str(id(node)), '--',\n\t\t\t\t\t\t 'n' + str(id(node.hi)),\n\t\t\t\t\t\t '[label=1];']\n\t\tparts.append('}')\n\t\treturn \" \".join(parts)",
"def visualize_tree(tree, feature_names, save_dir='./'):\n with open(save_dir+'/'+\"dt.dot\", 'w') as f:\n export_graphviz(tree, out_file=f,\n feature_names=feature_names)\n\n command = [\"dot\", \"-Tpng\", save_dir+\"/dt.dot\", \"-o\", save_dir+\"/dt.png\"]\n try:\n subprocess.check_call(command)\n except:\n exit(\"Could not run dot, ie graphviz, to \"\n \"produce visualization\")"
] |
[
"0.7094322",
"0.6686533",
"0.64426917",
"0.640525",
"0.6387397",
"0.637199",
"0.6317073",
"0.63015395",
"0.62655133",
"0.62164646",
"0.6206735",
"0.6197981",
"0.61905634",
"0.6177722",
"0.6103405",
"0.6046381",
"0.60189193",
"0.59846956",
"0.597069",
"0.5962589",
"0.5962081",
"0.59483427",
"0.59457916",
"0.5937437",
"0.5935845",
"0.5935845",
"0.5917045",
"0.591644",
"0.59101933",
"0.58932364"
] |
0.7527614
|
0
|
Normalize the wieghts of available sentence Data so that they add upto 1 and can be used for processing further.
|
def normalize(self):
norm_val = self.sum2/self.sum1
self.sum1=0
for sentence in self.data_set:
sentence.weight *= norm_val
self.sum1 += sentence.weight
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def normalize(self):\n for key in self.corpus.keys():\n sum_count = 0\n words = []\n counts = []\n for k, v in self.corpus[key].items():\n sum_count += v\n words.append(k)\n counts.append(v)\n prob = [float(count)/sum_count for count in counts]\n\n self.corpus[key] = [words, prob]",
"def normalize(data):\n data = lowercase(data)\n data = remove_punct(data)\n data = remove_apostrophes(data)\n data = remove_stopwords(data)\n data = num_to_words(data)\n data = lemmatize(data)\n data = stemming(data)\n data = remove_punct(data)\n data = num_to_words(data)\n data = lemmatize(data)\n data = stemming(data)\n data = remove_punct(data) #done again to remove hyphens produced by num2words\n data = remove_stopwords(data) #done agan to remove stopwords produced by num2words\n return data",
"def normalize_data(self):\r\n # quantify data for each column except classification column for noise reduction\r\n for column_header in self.classification_training_data.columns:\r\n if column_header == \"Class\":\r\n continue\r\n if column_header == \"Age\":\r\n bin_size = 2\r\n elif column_header == \"Ht\":\r\n bin_size = 5\r\n else:\r\n bin_size = 1\r\n for idx in self.classification_training_data.index:\r\n self.classification_training_data.at[idx, column_header] = math.floor(\r\n self.classification_training_data[column_header][idx] / bin_size) * bin_size",
"def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)",
"def normalize_dataset(self):",
"def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=2, dim=1)\n self.relation_embeddings.weight.data = normalize(self.relation_embeddings.weight.data,\n p=2, dim=1)\n self.ent_proj_vects.data = normalize(self.ent_proj_vects.data, p=2, dim=1)\n self.rel_proj_vects.data = normalize(self.rel_proj_vects.data, p=2, dim=1)",
"def normalize(self):\n d = learning_utils.convert_data_to_2d(self._data)\n d = learning_utils.normalize_2d(d)\n self._data = learning_utils.convert_data_to_1d(d)",
"def _unnormalized_transform(self):\n return self.n_ds + self.doc_sentiment_prior_",
"def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)\n self.normal_vectors.data = normalize(self.normal_vectors, p=2, dim=1)",
"def normalize(self):\n self._data /= self.norm()",
"def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=2, dim=1)\n self.relation_embeddings.weight.data = normalize(self.relation_embeddings.weight.data,\n p=2, dim=1)\n self.projection_matrices.data = normalize(self.projection_matrices.data, p=2, dim=2)",
"def normalize_data(self):\n self.x_mean, self.x_std = du.get_mean_std(self.x_train)\n self.x_train = du.normalize(self.x_train, self.x_mean, self.x_std)\n if self.x_test is not None and self.y_test is not None:\n self.x_test = du.normalize(self.x_test, self.x_mean, self.x_std)\n self.normalized_data = True",
"def normalize_weights(self):\n total_weight = sum(self.weights)\n self.norm_weights = self.weights / float(total_weight)",
"def normalize(self):\n if self.normed:\n return\n self._normalize()",
"def _normalize(self, dataset):\n if self.max is None: # if we are normalizing the training set\n self.max, self.min = dataset.max(), dataset.min() # find max, min value for each columns\n for row in dataset.index: # for each row in dataset\n for col in self.features: # for each feature in the instance (exclude target)\n dataset.at[row, col] = (dataset.at[row, col] - self.min[col]) / (self.max[col] - self.min[col]) if col != \"Bias\" else 1",
"def normalize(self):\n\n pass",
"def _preprocess(self):\n self.data['sentences'] = self.data['text'].apply(self._tokenize_sent)\n self.data['nouns'] = self.data['sentences'].apply(self._get_nouns)\n # self._get_frequent_features()\n # self._compactness_pruning()\n # self._redundancy_pruning()\n # self._get_features()\n self._extract_opinions()",
"def normalize_weights(self):\n \n # Set negative weights to zero\n # Normalize to sum to one.\n \n\n\n self.new_weight=[]\n for i in self._weights:\n if any(i < 0 for i in self._weights):\n self.new_weight = [0,1]\n\n elif all(i == 0 for i in self._weights):\n i = 1/len(self._weights)\n self.new_weight.append(i)\n else:\n i = i/sum(self._weights)\n self.new_weight.append(i)\n\n # If the weights are all zeros, set weights equal to 1/k, where k is the number\n # of components.\n self._weights = self.new_weight\n self._weights = np.round(self._weights,3)",
"def normalize(self):\n total = self.total()\n for x in self.d:\n self.d[x] /= total\n return total",
"def normalize_features(self, data_dict, ind):\n pre_norm_list = []\n for title in data_dict:\n pre_norm_list.append(data_dict[title][ind])\n if self.normalization_method == 'min_max':\n mini, maxi, norm_list = normalize.min_max_normalize(pre_norm_list)\n self.normalization_n.append(mini)\n self.normalization_d.append(maxi - mini)\n elif self.normalization_method == 'z_score':\n mean, var, norm_list = normalize.z_score_normalize(pre_norm_list)\n self.normalization_n.append(mean)\n self.normalization_d.append(var)\n elif self.normalization_method == 'none':\n norm_list = pre_norm_list[:]\n self.normalization_n.append(0)\n self.normalization_d.append(1)\n for i, title in enumerate(data_dict):\n data_dict[title][ind] = norm_list[i]",
"def normalize(self) -> None:\n assert hasattr(self, \"characterized_inventory\"), \"Must do lcia first\"\n if not hasattr(self, \"normalization_matrix\"):\n self.load_normalization_data()\n self.normalization_calculation()",
"def normalize(self):\n total = float(self.totalCount())\n if total != 0:\n self.divideAll(total)",
"def normalize_labels(self):\n self.y_mean, self.y_std = du.get_mean_std(self.y_train)\n self.y_train = du.normalize(self.y_train, self.y_mean, self.y_std)\n if self.x_test is not None and self.y_test is not None:\n self.y_test = du.normalize(self.y_test, self.y_mean, self.y_std)",
"def normalize(w):\n s = sum(w)\n for i in range(len(w)):\n w[i] /= s\n return w",
"def normalizeWeights(self):\n\n\t\t# Normalizing crossover and mutation handler weights, result is a CDF\n\t\ttotal = sum(self.mutation_handlers_weights)\n\t\tcumsum = 0\n\t\tfor i in range(len(self.mutation_handlers_weights)):\n\t\t\tcumsum += self.mutation_handlers_weights[i]\n\t\t\tself.mutation_handlers_weights[i] = cumsum/total\n\t\ttotal = sum(self.crossover_handlers_weights)\n\t\tcumsum = 0\n\t\tfor i in range(len(self.crossover_handlers_weights)):\n\t\t\tcumsum += self.crossover_handlers_weights[i]\n\t\t\tself.crossover_handlers_weights[i] = cumsum/total",
"def _normalize(self, word):\n return self.lemmatize(word.lower())",
"def data_normalize (self, data):\r\n data = data + (2**15)\r\n data = data / ((2**16) - 1)\r\n data = 2 * data\r\n data = data - 1\r\n\r\n return data",
"def normalize_train_data(train_data, hter=False):\n feats = train_data[:, :-1]\n labels = train_data[:, -1]\n if hter:\n labels_pw = labels\n else:\n labels_pw = labels / feats[:, 1]\n scaler = pp.StandardScaler()\n scaler.fit(feats)\n norm_feats = scaler.transform(feats)\n return np.concatenate((norm_feats, labels_pw[:, None]), axis=1), scaler",
"def __init__(self,sentences):\n self.data_set = sentences\n self.sum1=0\n for sentence in self.data_set:\n sentence.weight = 1/len(self.data_set)\n self.sum1 += sentence.weight\n\n self.sum2=1",
"def normalize_and_clip(meas):\n frequency_weights = np.zeros(len(meas))\n\n for i in range(len(meas)):\n if(meas[i] > NORMALIZE_SENS[i]):\n meas[i] = NORMALIZE_SENS[i]\n frequency_weights[i] = meas[i] / NORMALIZE_SENS[i]\n\n return frequency_weights"
] |
[
"0.7035232",
"0.6716962",
"0.6529263",
"0.65186805",
"0.6433806",
"0.63755864",
"0.62821734",
"0.62630904",
"0.6206676",
"0.6193343",
"0.61276984",
"0.6101676",
"0.6050608",
"0.6017587",
"0.601533",
"0.60010767",
"0.5976411",
"0.5958432",
"0.5951639",
"0.59349674",
"0.59256583",
"0.59249324",
"0.585513",
"0.5818273",
"0.57957727",
"0.57943267",
"0.57930326",
"0.5791976",
"0.57212526",
"0.57162374"
] |
0.7599852
|
0
|
The function is used to get all tags and tags' num from existing ps log
|
def getTagsNum(self):
self.gettags()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getTags(number=None):",
"def gettotaltags(s,refconvdf): \r\n taglist=[] \r\n for ptag in refconvdf[(refconvdf.convid==s)].tags.values:\r\n if type(ptag)==str or type(ptag)==unicode:\r\n ptag=ptag[1:-1].split(', ') #possible source of error.. added space to cater for reading in csv. \r\n if ptag:\r\n try: \r\n for ele in ptag:\r\n taglist.append(ele) \r\n except TypeError:\r\n pass\r\n return taglist",
"def tags():",
"def count_tags(tag_events):\n tagged_lines = []\n for tag_event in tag_events:\n for tag in tag_event[1][\"tag\"][\"labels\"]:\n tagged_lines.append(tag)\n tag_counts = Counter(tagged_lines)\n return tag_counts",
"def list_all_tags(self,obs):",
"def get_pos_tags(blob):\n return blob.pos_tags",
"def find_usefull_tags(tags, tagmodel, tag_count_vect):\n\n final_tags = []\n for tag in tags:\n if tag == None:\n continue\n else:\n tagpd = pd.Series(tag)\n tag_feature = tag_count_vect.transform(tagpd)\n result = tagmodel.predict(tag_feature)\n\n result = result.tolist() \n result = str(result)\n if result == '[1]':\n final_tags.append(tag)\n final_tags = list(dict.fromkeys(final_tags))\n return(final_tags)",
"def writetags(self):\r\n #FIXME: need to manually remove those tags not needed\r\n #the logic is here, just need the first tag to identify\r\n tagssections = dict()\r\n for i, pid in enumerate(self.pids):\r\n taglist = self.piddb[pid]['tags']\r\n if taglist:\r\n lproc = self.piddb[pid]['process']\r\n firsttag = taglist[0]\r\n lprio = self.defaultoccurnum\r\n #special handling, LEMON need more check\r\n if firsttag == 'LEMON':\r\n lprio = self.lemonoccurnum\r\n tagssections[firsttag] = lproc + ','+ str(lprio)\r\n\r\n self.config['tags'] = tagssections\r\n self.config.write()",
"def pos_tags(self):\n \n msg(\"Getting POS tag list...\")\n tags = []\n \n # loop through sentences\n for sent in self.tagged_sents:\n \n # loop through tagged words\n for (word, pos) in sent:\n \n # add tag if it's not already in list\n if pos not in tags:\n tags.append(pos)\n\n msg(\"done\\n\")\n \n return tags",
"def tag_counts(self, types=[]):\n if not types:\n types = self.tag_types\n for tag_type in types:\n print \"\\t%15s : %-10s\" % (tag_type, len(self.tag_dictionary[tag_type]))",
"def get_pipe_stats(logvalues):\n message_id = logvalues[5]\n print \"Message ID: {}\".format(message_id)\n print logvalues",
"def sendUpStatCountTagCounts(node, tag):\n def pushUp(node):\n t = 0\n ta = 0\n for child in node.children:\n tc, tac = pushUp(child)\n ta += tac\n t += tc\n node.tagTranscriptAnnotations += ta\n node.tagTranscripts += t\n return node.tagTranscripts, node.tagTranscriptAnnotations\n if ':' in tag:\n tag = tag.split(':')[-1]\n pushUp(node)",
"def info(self) -> list[int]:",
"def extract_age(spoiler_logs):\n return [0 if log[\"randomized_settings\"]['starting_age'] == \"child\" else 1 for log in spoiler_logs]",
"def get_num_POS_tags(data, pos_tag):\n pos_count = []\n for tweet in data:\n tokens = nltk.word_tokenize(tweet)\n tags = nltk.pos_tag(tokens)\n counts = Counter([j for i, j in tags])\n total = sum(counts.values())\n # normalized_counts = dict((word, float(count) / total) for word, count in counts.items())\n normalized_verb_count = sum(count for pos, count in counts.iteritems() if pos.startswith(pos_tag))\n # verb_counts = sum(1 for word, pos in normalized_counts if word.startswith('VB'))\n pos_count.append(normalized_verb_count / total)\n\n return np.array(pos_count).reshape(-1, 1)",
"def _list_tags(self, expression):\n try:\n for tag in self.dockerioapi.get_tags(expression):\n Msg().out(tag)\n return self.STATUS_OK\n except (KeyError, TypeError, ValueError):\n return self.STATUS_ERROR",
"def __list_all_tags(self):\n\n tags_dict = get_data.get_tagnames_dict()\n if len(tags_dict) > 0:\n first_str = 'tag'\n second_str = 'top posts scraped'\n third_str = 'recent posts scraped'\n descriptor = '{:<40} {:<20} {}'\n print('')\n print(descriptor.format(first_str, second_str, third_str))\n print(descriptor.format(len(first_str) * '-', len(second_str) * '-',\n len(third_str) * '-'))\n for number, tag in tags_dict.items():\n space_str = ' ' if len(str(number)) > 1 else ' '\n first = '[' + space_str + str(number) + '] ' + tag\n second = str(get_data.get_top_tag_post_count(tag))\n third = str(get_data.get_recent_tag_post_count(tag))\n print(descriptor.format(first, second, third))\n else:\n print('no tags found in the database')",
"def getTagStats(graph, tag):\n r = graph.getroot()\n s = graphToStatCount(r, tag)\n if ':' in tag:\n getExactBranch(s, tag)\n else:\n pruneStatCountBranches(s, tag)\n sendUpStatCountTagCounts(s, tag)\n return s",
"def pos_taggers(self, df):\n p2 = []\n post_process = df['Keyword'].tolist() \n p1 = nltk.pos_tag(post_process)\n for i in post_process:\n p2.append(nltk.pos_tag([i]))\n return p1,p2",
"def get_all_tags_count(self):\n return apps.get_model('tags', 'Tag').objects.filter(\n all_traits__source_dataset__source_study_version__study=self,\n all_traits__source_dataset__source_study_version__i_is_deprecated=False\n ).distinct().count()",
"def getTags(bufferNumber, changedTick):\n # DOC {{{\n # }}}\n\n # CODE {{{\n # define global variables\n global TAGLINENUMBERS, TAGS, BUFFERTICKS\n\n # return immediately if there's no need to update the tags {{{\n if (BUFFERTICKS.get(bufferNumber, None) == changedTick):\n return (TAGLINENUMBERS[bufferNumber], TAGS[bufferNumber],)\n # }}}\n\n # get the tags {{{\n simpleTagsParser = SimplePythonTagsParser(VimReadlineBuffer(vim.current.buffer))\n tagLineNumbers, tags = simpleTagsParser.getTags()\n # }}}\n\n # update the global variables {{{\n TAGS[bufferNumber] = tags\n TAGLINENUMBERS[bufferNumber] = tagLineNumbers\n BUFFERTICKS[bufferNumber] = changedTick\n # }}}\n\n # return the tuple (tagLineNumbers, tags,)\n return (tagLineNumbers, tags,)\n # }}}",
"def count_posTags(self):\n pos_list = ['NUM', 'AUX', 'ADV', 'DET', 'NOUN', 'ADP', 'ADJ', 'NOUN', 'VERB', 'NOUN', 'PUNCT', 'PUNCT']\n result = count_posTags(pos_list)\n self.assertEqual(result, (3, 1, 1, 1, 2))",
"def tag_counts (count_file):\r\n tagcounts = defaultdict(int)\r\n f = open(count_file, 'r')\r\n for line in f:\r\n fields = line.split()\r\n if fields[1] != 'WORDTAG':\r\n continue\r\n count = int(fields[0])\r\n tag = fields[2]\r\n tagcounts[tag] += count \r\n f.close() \r\n return tagcounts",
"def pos_tag(self,sentence):\n tagged = self.brill_tagger.tag(sentence.split())\n tagged_sentence = \" \".join([nltk.tag.tuple2str(tok) for tok in tagged])\n print tagged_sentence\n\n tag_list = [(each.split(\"/\")[0],each.split(\"/\")[1]) for each in tagged_sentence.split()]\n return tag_list",
"def get_all_tagged(self,tag_name):\n return self.tag2elements[tag_name]",
"def getLogs():",
"def getLogs():",
"def get_stack_numbers():\n\n print(\"-> start stack count\")\n tstart = time.time()\n proc = sbp.run([\"curl\",\n \"--silent\",\n \"--request\", \"POST\",\n \"--location\",\n \"--data\", \"REQUEST=doQuery\",\n \"--data\", \"PHASE=RUN\",\n \"--data\", \"FORMAT=text\",\n \"--data\", \"LANG=ADQL\",\n \"--data\", \"QUERY=SELECT distinct a.name, a.detect_stack_id from csc21_snapshot.master_stack_assoc a\",\n \"https://cda.cfa.harvard.edu/csc21_snapshot_tap/sync\"],\n check=True, stdout=sbp.PIPE)\n\n tend = time.time()\n print(f\"<- took {tend - tstart:.1f} seconds\")\n\n stacks = defaultdict(int)\n header = True\n for l in proc.stdout.decode().split(\"\\n\"):\n if header:\n if l.startswith(\"#\"):\n continue\n\n if l != \"name\\tdetect_stack_id\":\n raise ValueError(l)\n\n header = False\n continue\n\n if l == \"\":\n continue\n\n toks = l.split(\"\\t\")\n assert len(toks) == 2, l\n stacks[toks[1]] += 1\n\n # remove default nature (so we know what stacks are not known)\n #\n out = {}\n for stack, count in stacks.items():\n out[stack] = count\n\n return out",
"def get_rawlogs_by_tag(tags):\n return dict(get_rawlogs_by_tag_it(tags))",
"def tags(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"tags\")"
] |
[
"0.65967417",
"0.63102764",
"0.6142363",
"0.5939912",
"0.5799554",
"0.56801045",
"0.5604655",
"0.560121",
"0.5547236",
"0.55231816",
"0.5501453",
"0.547785",
"0.54671407",
"0.546639",
"0.5457964",
"0.5433481",
"0.54334056",
"0.5421435",
"0.5411407",
"0.54072213",
"0.53912103",
"0.5387857",
"0.53623307",
"0.53158784",
"0.53092515",
"0.52997106",
"0.52997106",
"0.52782136",
"0.5268448",
"0.5245096"
] |
0.66558516
|
0
|
Compute an upper bound on monthlength. Even if we haven't identified all the fields needed for checking leapyears yet, we can still prune if value.d is bigger than the month can ever possibly be.
|
def valid_day_of_month(value):
if value.m == 2:
month_length = 29
if value.y is not None:
if (value.y % 4) != 0:
month_length = 28
elif value.y == 0 and value.C is not None and (value.C % 4) != 0:
month_length = 28
else:
# Odd-numbered months are longer through July, then even-numbered
# months are longer for the rest of the year.
month_length = 30 + ((value.m % 2) == (value.m < 8))
return value.d <= month_length
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def max_days(self):\n month_lengths = [31, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31, 29]\n days_years = 365.24 * (self.number // 12)\n days_months = np.sum(month_lengths[:self.number % 12])\n days_frac = days_years + days_months + self.days\n return int(np.ceil(days_frac))",
"def month_bounds(year, month):\n year = int(year)\n month = int(month)\n month_start = datetime.strptime('%s,%s,1' % (year, month),'%Y,%m,%d')\n # days_in_month returns a tuple(weekday, days) where\n # weekday is the eekday the month starts on and days is the number of days in the month\n days_in_month = calendar.monthrange(year,month)\n month_end = month_start + timedelta(days=days_in_month[1]-1)\n return (month_start, month_end)",
"def max_days(self):\n # This can be refined, as there are at max 2 consecutive months with 31\n # days\n days_fractional = self.number * 365.24 + self.days\n return int(np.ceil(days_fractional))",
"def is_end_of_month(self):\n return tf.math.equal(self._days,\n _num_days_in_month(self._months, self._years))",
"def max_scans_per_month(self):\n return self._max_scans_per_month",
"def get_period_length(self) -> int:\n return (dataset.max_date - dataset.min_date).days + 1",
"def floor_end_month(date):\n return datetime(date.year, date.month, 1) + timedelta(days=-1)",
"def validate_days(year, month, day):\n total_days = calendar.monthrange(year, month)\n return ( total_days[1] if (day > total_days[1]) else day )",
"def datelst_get_month_aligned_bounds(dates_):\n dfirst = dates_[0]\n dlast = dates_[-1]\n\n bound_lo = dt.datetime(dfirst.year, dfirst.month, 1)\n bound_hi = (dt.datetime(dates_[-1].year, dates_[-1].month, 1)+dt.timedelta(days=32))\n bound_hi.replace(day=1)\n bound_hi = bound_hi.replace(day=1) - dt.timedelta(seconds=1)\n\n return (bound_lo, bound_hi)",
"def _days_before_month(year, month):\n assert 1 <= month <= 12, \"month must be in 1..12\"\n return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year))",
"def min_days(self):\n month_lengths = [28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31, 31]\n days_years = 365.24 * (self.number // 12)\n days_months = np.sum(month_lengths[:self.number % 12])\n days_frac = days_years + days_months + self.days\n return int(np.floor(days_frac))",
"def interval_months(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"interval_months\")",
"def interval_months(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"interval_months\")",
"def six_worst_months():\n if not monthly_averages:\n return [('', 0.0), ('', 0.0), ('', 0.0), ('', 0.0), ('', 0.0), ('', 0.0)]\n monthly_averages.sort(key=operator.itemgetter(1), reverse=False)\n if len(monthly_averages) < 6:\n return \"Data Error\"\n return monthly_averages[:6]",
"def days_in_month(year, month):\n\tif not 1 <= month <= 12:\n\t\treturn 'Invalid Month'\n\n\tif month == 2 and is_leap(year):\n\t\treturn 29\t\n\n\treturn month_days[month]",
"def _days_in_month(year, month):\n # Avoid _DAYS_IN_MONTH so that we don't realize the month\n assert 1 <= month <= 12, month\n if month >= 8:\n return 31 if month % 2 == 0 else 30\n else:\n if month == 2:\n return 29 if _is_leap(year) else 28\n else:\n return 30 if month % 2 == 0 else 31",
"def test_invalid_out_of_bounds_year(self):\n year, month, error = clean_year_month(2014, 100000, 1)\n self.assertEqual(year, now.year)\n self.assertEqual(month, timezone.localtime(timezone.now()).month)\n self.assertEqual(error, ERROR)",
"def number_of_days_in_month(year=2019, month=1):\n return monthrange(year, month)[1]",
"def days_in_month(year, month):\n if not 1 <= month <= 12:\n return \"Invalid Month\"\n\n if month == 2 and is_leap(year):\n return 29\n\n return month_days[month]",
"def end_month(d):\n return date(d.year, d.month, monthrange(d.year, d.month)[1])",
"def is_expunged(cls, month, year):\n return month != cls.lunar_from_fixed(HinduLunarFullMoonDate(year, month, False, 15, False).to_fixed()).month",
"def days_in_month(year, month):\n\n if not 1 <= month <= 12:\n return 'Invalid month!'\n if month == 2 and is_leap(year):\n return 29\n return month_days[month]",
"def days_in_month(month):\r\n if month==\"February\":\r\n return 28\r\n elif month==\"September\" or month==\"April\" or month==\"June\" or month==\"November\":\r\n return 30\r\n elif month==\"January\" or month==\"March\" or month==\"May\" or month==\"July\" or month==\"August\" or month==\"October\" or month==\"December\":\r\n return 31",
"def days_per_month(leap=False):\n\n ndays = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n if leap:\n ndays[1]+= 1\n return ndays",
"def diff_month(d1, d2):\n return ceil(abs((d1.year - d2.year) * 12 + d1.month - d2.month))",
"def _max_periods(self):\n return self.data.shape[0]",
"def days_this_month(year, month):\n ndays = days_per_month(isleap(year))\n return ndays[month - 1]",
"def is_valid_month (val):\n if len(val) == 2 and count_digits(val) == 2:\n month = int(val)\n return month > 0 and month < 13\n return False",
"def numDays(month, year):\n\tif month in [9, 4, 6, 11]:\n\t\treturn 30\n\telif month == 2 and year % 4 == 0 and (year % 100 != 0 or year % 400 == 0):\n\t\treturn 29\n\telif month == 2:\n\t\treturn 28\n\telse:\n\t\treturn 31",
"def daysInMonth(month,year):\n if (month == 2):\n if (isLeapYear(year)):\n return 29\n else:\n return 28\n elif (month in [1, 3, 5, 7, 8, 10, 12]):\n return 31\n else: \n return 30"
] |
[
"0.5886275",
"0.5427668",
"0.5374245",
"0.5331599",
"0.53222615",
"0.53004485",
"0.52571625",
"0.5251135",
"0.5178014",
"0.51442075",
"0.5120376",
"0.51074386",
"0.51074386",
"0.5104363",
"0.5086965",
"0.5083028",
"0.5073267",
"0.50732094",
"0.50614166",
"0.5055513",
"0.50528747",
"0.50519645",
"0.502243",
"0.5009981",
"0.49678773",
"0.49566513",
"0.4934295",
"0.49194312",
"0.48749664",
"0.48704582"
] |
0.6487694
|
0
|
Are contexts' targets reordered in a consistent way?
|
def test_context_target_reordering(self):
client = IPythonClient()
orig_targets = client.ids
ctx1 = Context(client, targets=shuffle(orig_targets[:]))
ctx2 = Context(client, targets=shuffle(orig_targets[:]))
self.assertEqual(ctx1.targets, ctx2.targets)
ctx1.close()
ctx2.close()
client.close()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def exploration_order(targets : [Exp], context : Context, pool : Pool = RUNTIME_POOL):\n\n # current policy (earlier requirements have priority):\n # - visit runtime expressions first\n # - visit low-complexity contexts first\n # - visit small expressions first\n def sort_key(tup):\n e, ctx, p = tup\n return (0 if p == RUNTIME_POOL else 1, ctx.complexity(), e.size())\n\n for target in targets:\n for e, ctx, p in sorted(unique(shred(target, context, pool=pool)), key=sort_key):\n yield (target, e, ctx, p)",
"def test_create_Context_with_targets_ranks(self):\n client = IPythonClient()\n targets = [3, 2]\n dac = Context(client, targets=targets)\n self.assertEqual(set(dac.targets), set(targets))\n dac.close()\n client.close()",
"def _order_target_list(self, targets):\r\n targets = set(t for t in targets if isinstance(t, Target))\r\n return filter(targets.__contains__, reversed(InternalTarget.sort_targets(targets)))",
"def sort(self):\r\n return self.sort_targets([self])",
"def _process_contexts(w_context):\n try:\n plugins.load()\n ctxs = []\n for ctx_name in w_context:\n ctx_cls = context.Context.get(ctx_name)\n ctxs.append((ctx_cls.get_order(), ctx_cls.get_fullname()))\n ctxs.sort()\n ctxs = [\"%s.setup\" % ctx_name for _i, ctx_name in ctxs]\n ctxs.extend([ctx.replace(\".setup\", \".cleanup\")\n for ctx in reversed(ctxs)])\n return ctxs\n except Exception:\n # the proper of context can be missed while applying the migration, it\n # should not stop us from migrating the database\n return None",
"def autofixTargets(self, local_ctx):\n pass",
"def post_process(self, relevant_targets):\r\n pass",
"def build_order(self):\n seen = set()\n\n def _already_built(node):\n # visit only once\n if node in seen:\n return True\n seen.add(node)\n\n # prune if the result is already computed\n if node.output_ready:\n return True\n\n return False\n\n for target in self.targets:\n if target in seen:\n continue\n for node in target.postorder(prune_fn=_already_built):\n yield node",
"def remap_context_labels(self):\n c_contexts = list(self.context[self.iter])\n unique_contexts = uniqify(c_contexts)\n remap_dict = dict(zip(unique_contexts,\n range(1, len(unique_contexts) + 1)))\n\n remapped = copy.deepcopy(self.context[self.iter])\n for old, new in remap_dict.iteritems():\n self.context[self.iter][remapped==old] = new",
"def dependency_order(self):\n seen = set()\n\n def _prune_visited(node):\n if node in seen:\n return True\n seen.add(node)\n return False\n\n for target in self.targets:\n if target in seen:\n continue\n for node in target.postorder(prune_fn=_prune_visited):\n yield node.data",
"def check_element_order(context_action_dict):\n for question in context_action_dict:\n elements = context_action_dict[question]\n context_action_dict[question] = sorted(elements, key=lambda x: x['indexes'][0])\n return context_action_dict",
"def test_ctrl_space_index_preservation(self):\n from omtk.modules import rigFK\n\n def check_targets_index_match(ctrl):\n self.assertEqual(len(ctrl.targets), len(ctrl.targets_indexes))\n attr_space = ctrl.node.space\n for target, target_index in attr_space.getEnums().iteritems():\n if target == 'Local':\n continue\n target = pymel.PyNode(target)\n self.assertIn(target, ctrl.targets) # Ensure the target is stored\n logical_index = ctrl.targets.index(target)\n self.assertEqual(target_index, ctrl.targets_indexes[logical_index])\n\n # Create a simple influence hierarhy\n inf_a = pymel.createNode('joint')\n inf_b = pymel.createNode('joint', parent=inf_a)\n inf_c = pymel.createNode('joint', parent=inf_b)\n inf_d = pymel.createNode('joint', parent=inf_c)\n\n # Create a simple rig\n r = omtk.create()\n mod_a = r.add_module(rigFK.FK([inf_a]))\n mod_b = r.add_module(rigFK.FK([inf_b]))\n mod_c = r.add_module(rigFK.FK([inf_c]))\n mod_d = r.add_module(rigFK.FK([inf_d]))\n\n # Build the last module\n mod_d.build()\n\n # Analyse the indexes\n c = mod_d.ctrls[0]\n old_targets = c.targets\n old_targets_indexes = c.targets_indexes\n check_targets_index_match(c)\n\n # Unbulid the last module, change the hierarchy and rebuilt it\n mod_d.unbuild()\n inf_d.setParent(inf_b)\n mod_d.build()\n\n # Analyse the indexes\n c = mod_d.ctrls[0]\n new_targets = c.targets\n new_targets_indexes = c.targets_indexes\n check_targets_index_match(c)\n\n self.assertListEqual(old_targets, new_targets)\n self.assertListEqual(old_targets_indexes, new_targets_indexes)",
"def test_token_order(self):\n tokens = [Token(1), Token(2), Token(3), Token(4)]\n tokens_equal = [Token(1), Token(1)]\n self._check_sequence_consistency(tokens)\n self._check_sequence_consistency(tokens_equal, equal=True)",
"def _list_contexts(self):\r\n return sorted(list(self._bbreader.cache.keys()))",
"def order(self):\n pairs = [(w['source'][0], w['target'][0]) for w in self.wires]\n return processing_order(len(self.modules), pairs)",
"def more_specific_context(ctx1 : Context, ctx2 : Context) -> Context:\n a = ctx1\n while a != ctx2:\n a = a.parent()\n if a == ctx2:\n return ctx1\n a = ctx2\n while a != ctx1:\n a = a.parent()\n if a == ctx1:\n return ctx2\n raise ValueError(\"no context in common: {}, {}\".format(ctx1, ctx2))",
"def _sort_and_validate_targets(self, targets):\r\n # We must check the targets in this order, to ensure correctness if invalidate_dependents=True,\r\n # since we use earlier cache keys to compute later cache keys in this case.\r\n ordered_targets = self._order_target_list(targets)\r\n\r\n # This will be a list of VersionedTargets that correspond to @targets.\r\n versioned_targets = []\r\n\r\n # This will be a mapping from each target to its corresponding VersionedTarget.\r\n versioned_targets_by_target = {}\r\n\r\n # Map from id to current fingerprint of the target with that id. We update this as we iterate,\r\n # in topological order, so when handling a target, this will already contain all its deps (in\r\n # this round).\r\n id_to_hash = {}\r\n\r\n for target in ordered_targets:\r\n dependency_keys = set()\r\n if self._invalidate_dependents and hasattr(target, 'dependencies'):\r\n # Note that we only need to do this for the immediate deps, because those will already\r\n # reflect changes in their own deps.\r\n for dep in target.dependencies:\r\n # We rely on the fact that any deps have already been processed, either in an earlier\r\n # round or because they came first in ordered_targets.\r\n # Note that only external deps (e.g., JarDependency) or targets with sources can\r\n # affect invalidation. Other targets (JarLibrary, Pants) are just dependency scaffolding.\r\n if isinstance(dep, ExternalDependency):\r\n dependency_keys.add(dep.cache_key())\r\n elif isinstance(dep, TargetWithSources):\r\n fprint = id_to_hash.get(dep.id, None)\r\n if fprint is None:\r\n # It may have been processed in a prior round, and therefore the fprint should\r\n # have been written out by the invalidator.\r\n fprint = self._invalidator.existing_hash(dep.id)\r\n # Note that fprint may still be None here. E.g., a codegen target is in the list\r\n # of deps, but its fprint is not visible to our self._invalidator (that of the\r\n # target synthesized from it is visible, so invalidation will still be correct.)\r\n #\r\n # Another case where this can happen is a dep of a codegen target on, say,\r\n # a java target that hasn't been built yet (again, the synthesized target will\r\n # depend on that same java target, so invalidation will still be correct.)\r\n # TODO(benjy): Make this simpler and more obviously correct.\r\n if fprint is not None:\r\n dependency_keys.add(fprint)\r\n elif isinstance(dep, JarLibrary) or isinstance(dep, Pants):\r\n pass\r\n else:\r\n raise ValueError('Cannot calculate a cache_key for a dependency: %s' % dep)\r\n cache_key = self._key_for(target, dependency_keys)\r\n id_to_hash[target.id] = cache_key.hash\r\n\r\n # Create a VersionedTarget corresponding to @target.\r\n versioned_target = VersionedTarget(self, target, cache_key)\r\n\r\n # Add the new VersionedTarget to the list of computed VersionedTargets.\r\n versioned_targets.append(versioned_target)\r\n\r\n # Add to the mapping from Targets to VersionedTargets, for use in hooking up VersionedTarget\r\n # dependencies below.\r\n versioned_targets_by_target[target] = versioned_target\r\n\r\n # Having created all applicable VersionedTargets, now we build the VersionedTarget dependency\r\n # graph, looking through targets that don't correspond to VersionedTargets themselves.\r\n versioned_target_deps_by_target = {}\r\n\r\n def get_versioned_target_deps_for_target(target):\r\n # For every dependency of @target, we will store its corresponding VersionedTarget here. For\r\n # dependencies that don't correspond to a VersionedTarget (e.g. pass-through dependency\r\n # wrappers), we will resolve their actual dependencies and find VersionedTargets for them.\r\n versioned_target_deps = set([])\r\n if hasattr(target, 'dependencies'):\r\n for dep in target.dependencies:\r\n for dependency in dep.resolve():\r\n if dependency in versioned_targets_by_target:\r\n # If there exists a VersionedTarget corresponding to this Target, store it and\r\n # continue.\r\n versioned_target_deps.add(versioned_targets_by_target[dependency])\r\n elif dependency in versioned_target_deps_by_target:\r\n # Otherwise, see if we've already resolved this dependency to the VersionedTargets it\r\n # depends on, and use those.\r\n versioned_target_deps.update(versioned_target_deps_by_target[dependency])\r\n else:\r\n # Otherwise, compute the VersionedTargets that correspond to this dependency's\r\n # dependencies, cache and use the computed result.\r\n versioned_target_deps_by_target[dependency] = get_versioned_target_deps_for_target(\r\n dependency)\r\n versioned_target_deps.update(versioned_target_deps_by_target[dependency])\r\n\r\n # Return the VersionedTarget dependencies that this target's VersionedTarget should depend on.\r\n return versioned_target_deps\r\n\r\n # Initialize all VersionedTargets to point to the VersionedTargets they depend on.\r\n for versioned_target in versioned_targets:\r\n versioned_target.dependencies = get_versioned_target_deps_for_target(versioned_target.target)\r\n\r\n return versioned_targets",
"def substitute_reorder(self, order_ops):\n if isinstance(order_ops, memops.ReorderFull):\n self._context.reorder_engine = reorderengines.FullReorderEngine()\n self._context.test_on_barrier = (\n self._context.reorder_engine.test_on_barrier\n )\n elif isinstance(order_ops, memops.ReorderPartial):\n # TODO add macro in valgrind or\n # parameter inside the tool to support parameters?\n self._context.reorder_engine = (\n reorderengines.RandomPartialReorderEngine(3)\n )\n self._context.test_on_barrier = (\n self._context.reorder_engine.test_on_barrier\n )\n elif isinstance(order_ops, memops.ReorderAccumulative):\n self._context.reorder_engine = (\n reorderengines.AccumulativeReorderEngine()\n )\n self._context.test_on_barrier = (\n self._context.reorder_engine.test_on_barrier\n )\n elif isinstance(order_ops, memops.ReorderReverseAccumulative):\n self._context.reorder_engine = (\n reorderengines.AccumulativeReverseReorderEngine()\n )\n self._context.test_on_barrier = (\n self._context.reorder_engine.test_on_barrier\n )\n elif isinstance(order_ops, memops.NoReorderDoCheck):\n self._context.reorder_engine = reorderengines.NoReorderEngine()\n self._context.test_on_barrier = (\n self._context.reorder_engine.test_on_barrier\n )\n elif isinstance(order_ops, memops.NoReorderNoCheck):\n self._context.reorder_engine = reorderengines.NoCheckerEngine()\n self._context.test_on_barrier = (\n self._context.reorder_engine.test_on_barrier\n )\n elif isinstance(order_ops, memops.ReorderDefault):\n self._context.reorder_engine = self._context.default_engine\n self._context.test_on_barrier = self._context.default_barrier\n else:\n raise NotSupportedOperationException(\n \"Not supported reorder engine: {}\".format(order_ops)\n )",
"def _get_target_ordering(self, order):\n\n lower_list = ['IRMSD', 'LRMSD', 'HADDOCK']\n higher_list = ['DOCKQ', 'Fnat']\n NA_list = ['binary_class', 'BIN_CLASS', 'class']\n\n if order is not None:\n self.target_ordering = order\n else:\n if self.select_target in lower_list:\n self.target_ordering = 'lower'\n elif self.select_target in higher_list:\n self.target_ordering = 'higher'\n elif self.select_target in NA_list:\n self.target_ordering = None\n else:\n warnings.warn(\n ' Target ordering unidentified. lower assumed')\n self.target_ordering = 'lower'",
"def _order_entities(self):\n entity_deps = defaultdict(set)\n for e, features in self.top_level_features.items():\n # iterate over all dependency features of the top-level features on\n # this entity. If any of these are themselves top-level features, add\n # their entities as dependencies of the current entity.\n deps = {g.hash(): g for f in features\n for g in self.feature_dependencies[f.hash()]}\n for d in deps.values():\n _, num_forward = self.entityset.find_path(e, d.entity.id,\n include_num_forward=True)\n if num_forward > 0:\n entity_deps[e].add(d.entity.id)\n\n # Do a top-sort on the new entity DAG\n self.ordered_entities = utils.topsort([self.target_eid],\n lambda e: entity_deps[e])",
"def test_GenerateDigraphOrder(self):\n\n src = EmptyImage()\n tgt = EmptyImage()\n block_image_diff = BlockImageDiff(tgt, src)\n\n transfers = block_image_diff.transfers\n t0 = Transfer(\n \"t1\", \"t1\", RangeSet(\"10-15\"), RangeSet(\"0-5\"), \"move\", transfers)\n t1 = Transfer(\n \"t2\", \"t2\", RangeSet(\"20-25\"), RangeSet(\"0-7\"), \"move\", transfers)\n t2 = Transfer(\n \"t3\", \"t3\", RangeSet(\"30-35\"), RangeSet(\"0-4\"), \"move\", transfers)\n t3 = Transfer(\n \"t4\", \"t4\", RangeSet(\"0-10\"), RangeSet(\"40-50\"), \"move\", transfers)\n\n block_image_diff.GenerateDigraph()\n t3_goes_after_copy = t3.goes_after.copy()\n\n # Elements in the set must be in the transfer evaluation order.\n elements = list(t3_goes_after_copy)\n self.assertEqual(t0, elements[0])\n self.assertEqual(t1, elements[1])\n self.assertEqual(t2, elements[2])\n\n # Now switch the order of t0, t1 and t2.\n transfers[0], transfers[1], transfers[2] = (\n transfers[2], transfers[0], transfers[1])\n t3.goes_after.clear()\n t3.goes_before.clear()\n block_image_diff.GenerateDigraph()\n\n # The goes_after must be different from last run.\n self.assertNotEqual(t3_goes_after_copy, t3.goes_after)\n\n # Assert that each element must agree with the transfer order.\n elements = list(t3.goes_after)\n self.assertEqual(t2, elements[0])\n self.assertEqual(t0, elements[1])\n self.assertEqual(t1, elements[2])",
"def test_order_cat(segm_and_cat):\n cat, segm, segm_deblend = segm_and_cat\n order = pf.order_cat(cat, 'area')\n\n source = None\n for idx in order:\n if source is not None:\n assert source.area > cat[idx].area\n\n source = cat[idx]",
"def reorder_examples(self):\n self.example_wise_shrink(Ordering, key=sort_key)",
"def pre_mutation(context):\n line = context.current_source_line.strip()\n if context.current_line_index != 0:\n prev_line = context.source_by_line_number[context.current_line_index - 1].strip()\n else:\n prev_line = \"\"\n\n if line.startswith(\"logger.\") or prev_line.startswith(\"logger.\"):\n context.skip = True\n if line.startswith(\"logger = structlog\"):\n context.skip = True\n if line.startswith(\"cls.__doc__\"):\n context.skip = True\n\n # This file is copied verbatim and is not tested\n if context.filename.endswith(\"crypt.py\"):\n context.skip = True",
"def _compute_transitive_deps_by_target(self):\r\n # Sort from least to most dependent.\r\n sorted_targets = reversed(InternalTarget.sort_targets(self._context.targets()))\r\n transitive_deps_by_target = defaultdict(set)\r\n # Iterate in dep order, to accumulate the transitive deps for each target.\r\n for target in sorted_targets:\r\n transitive_deps = set()\r\n if hasattr(target, 'dependencies'):\r\n for dep in target.dependencies:\r\n transitive_deps.update(transitive_deps_by_target.get(dep, []))\r\n transitive_deps.add(dep)\r\n transitive_deps_by_target[target] = transitive_deps\r\n return transitive_deps_by_target",
"def output(context, targets):\n # context: [[('p', ['a', 'b'])], ...]\n # targets: [(('p', ['a', 'b']), 1, [0,1,2]), ...]\n print('\\n'.join([write_r(c) for c in context]))\n for t, v, s in targets:\n print(TARGET_T.format(write_r([t]), v, ','.join(map(str, s))))",
"def calc_ply_order(constraints, targets):\r\n if constraints.sym:\r\n ply_order = np.arange(targets.n_plies // 2 + targets.n_plies % 2)\r\n return ply_order\r\n\r\n order_before_sorting = np.arange(targets.n_plies)\r\n ply_order = np.zeros((targets.n_plies,), int)\r\n ply_order[0::2] = order_before_sorting[\r\n :targets.n_plies // 2 + targets.n_plies % 2]\r\n ply_order[1::2] = order_before_sorting[\r\n targets.n_plies // 2 + targets.n_plies % 2:][::-1]\r\n return ply_order",
"def switch_context(self, context):\r\n self.context_stack.append(self.current_context)\r\n self.current_context = context",
"def get_contexts(self):\n return tuple(getattr(self, name) for name in self.__argnames__)",
"def test_create_Context_with_targets(self):\n client = IPythonClient()\n dac = Context(client, targets=[0, 1])\n self.assertIs(dac.client, client)\n dac.close()\n client.close()"
] |
[
"0.623471",
"0.5847727",
"0.5639923",
"0.54074484",
"0.5407089",
"0.52525043",
"0.5247879",
"0.52474946",
"0.5243922",
"0.52142984",
"0.50322247",
"0.49951407",
"0.4981515",
"0.49726883",
"0.49717957",
"0.4967289",
"0.49609792",
"0.49517962",
"0.49306974",
"0.49014437",
"0.48742238",
"0.4827984",
"0.48249915",
"0.47707963",
"0.47668085",
"0.47541156",
"0.47490552",
"0.47408348",
"0.47287586",
"0.47167984"
] |
0.7536339
|
0
|
Returns True, if both words share a synonym set.
|
def are_words_synonym(self, word1, word2):
return self.get_intersection((word1, word2))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_word_common(self, word):\n if word in self.stopwords:\n return True\n if re.match(r'[a-zA-Z]+[a-zA-Z]$', word):\n word = self.lemmatizer.lemmatize(word, pos='n')\n synset = wn.synsets(word)\n if len(synset) > 0:\n return True\n else:\n return False\n return False",
"def synonymIntersection(self, wSet1, wSet2, idf):\n intersection = wSet1.intersection(wSet2)\n w1 = wSet1 - intersection\n w2 = wSet2 - intersection\n if len(intersection) == 0:\n return set([])\n\n synonyms1 = self.getSynonyms(w1)\n synonyms2 = self.getSynonyms(w2)\n defaultIDF = idf['unknownToken']\n \n while len(w1) > 0:\n word1 = w1.pop()\n if word1 not in synonyms1:\n continue # no synonyms for this word\n \n for word2 in w2:\n if word2 not in synonyms2:\n continue # no synonyms for this word\n sharedSynsets = synonyms1[word1].intersection(synonyms2[word2])\n if len(sharedSynsets) > 0:\n # the two have at least one synset in common, consider them synonyms\n w2.remove(word2)\n if idf.get(word1, defaultIDF) > idf.get(word2, defaultIDF):\n intersection.add(word1)\n else:\n intersection.add(word2)\n break\n \n return intersection",
"def synSimilarity(self, wSet1, wSet2): \n nW1 = len(wSet1)\n nW2 = len(wSet2)\n if nW1 == 0 or nW2 == 0:\n return 0.0\n synonyms1 = self.getSynonyms(wSet1)\n synonyms2 = self.getSynonyms(wSet2)\n \n # easy bit: find the number of identical words in each mention\n intersection = wSet1.intersection(wSet2)\n # now remove these words and look for synonyms between those left\n w1 = wSet1 - intersection\n w2 = wSet2 - intersection\n while len(w1) > 0:\n word1 = w1.pop()\n if word1 not in synonyms1:\n continue # no synonyms for this word\n \n for word2 in w2:\n if word2 not in synonyms2:\n continue # no synonyms for this word\n sharedSynsets = synonyms1[word1].intersection(synonyms2[word2])\n if len(sharedSynsets) > 0:\n # the two have at least one synset in common, consider them synonyms\n w2.remove(word2)\n intersection.add(word1)\n \n break\n return float(2*len(intersection)) / (nW1 + nW2)",
"def exactMatch(self, mention):\n w1 = self.allWords()\n w2 = mention.allWords()\n if len(w1) == len(w2) and w1 == w2:\n return True\n else:\n return False",
"def is_same_set(self, item1, item2):\n res = False\n for s in self._data:\n if item1 in s and item2 in s:\n res = True\n break\n return res",
"def homophone_words(word_one, word_two, pron_dict):\n if word_one not in pron_dict or word_two not in pron_dict:\n return False\n return pron_dict[word_one] == pron_dict[word_two]",
"def testSynonymDuplicate(self):\n\t\t\t\tone = spinner.Word.objects.get_single('mac', True)\n\t\t\t\ttwo = spinner.Word.objects.get_single('macintosh', True)\n\t\n\t\t\t\tsyn = spinner.Synonym.objects.get_single(one, two, True)\n\t\t\t\t\n\t\t\t\tsyn2 = spinner.Synonym.objects.get_single(two, one, True)\n\n\t\t\t\tassert syn == syn2\n\n\t\t\t\tsyn.delete()\n\t\t\t\tone.delete()\n\t\t\t\ttwo.delete()",
"def get_synonyms(word):\n syns_sets = wordnet.synsets(word)\n if syns_sets:\n # if there's synonyms, take the first set\n desired = syns_sets[0].lemma_names()\n desired = [the_name.replace(\"_\", \" \") for the_name in desired]\n return desired\n\n else:\n return False",
"def areSentencesSimilarTwo(self, words1, words2, pairs):\n if not words1 and not words2:\n return True\n elif not words1 or not words2:\n return False\n elif not pairs and not self.compare(words1, words2):\n return False\n\n vocab = set()\n for w in words1:\n vocab.add(w)\n for w in words2:\n vocab.add(w)\n for (w1, w2) in pairs:\n vocab.add(w1)\n vocab.add(w2)\n\n parents = {w:w for w in vocab}\n ranks = {w:1 for w in vocab}\n\n for w1, w2 in pairs:\n pw1 = self.find(w1, parents)\n pw2 = self.find(w2, parents)\n if pw1 == pw2:\n continue\n\n if ranks[pw1] > ranks[pw2]:\n pw1, pw2 = pw2, pw1\n parents[pw1] = pw2\n ranks[pw2] += ranks[pw1]\n\n pwlist1 = []\n for w in words1:\n pw = self.find(w, parents)\n pwlist1.append(pw)\n\n pwlist2 = []\n for w in words2:\n pw = self.find(w, parents)\n pwlist2.append(pw)\n\n return self.compare(pwlist1, pwlist2)",
"def exactSetMatch(self, mention, ignoreSemanticTagList=[]):\n \n dWords = self.importantWords(ignoreSemanticTagList)\n aWords = mention.importantWords(ignoreSemanticTagList) \n\n if len(aWords) == 0:\n # annotated mention consists of \"unimportant\" words.\n # use all words in mention\n dWords = self.allWords()\n aWords = mention.allWords() \n\n if len(dWords) > 0 and dWords == aWords:\n return True\n else:\n return False",
"def synsets_similarity(s1, s2):\n lemmas_sentence_1, tagged_sentence_1 = lemmatize_sentence(s1.lower())\n lemmas_sentence_2, tagged_sentence_2 = lemmatize_sentence(s2.lower())\n\n # Disambiguate words and create list of sysnsets \n synsets_sentence_1 = []\n for (lemma, word_tag) in zip(lemmas_sentence_1, tagged_sentence_1):\n if lemma in stop_words:\n continue\n synset = lesk(lemmas_sentence_1, lemma, wordnet_pos_code(word_tag[1]))\n if synset is not None:\n synsets_sentence_1.append(synset)\n else:\n found = wordnet.synsets(lemma, wordnet_pos_code(word_tag[1]))\n if len(found) > 0:\n synsets_sentence_1.append(found[0]) \n #print(\"Warn: lemma [%s] returned no disambiguation...using synset : %s\" % (lemma, found[0])) \n\n synsets_sentence_2 = []\n for (lemma, word_tag) in zip(lemmas_sentence_2, tagged_sentence_2):\n if lemma in stop_words:\n continue\n synset = lesk(lemmas_sentence_2, lemma, wordnet_pos_code(word_tag[1]))\n if synset is not None:\n synsets_sentence_2.append(synset)\n else:\n found = wordnet.synsets(lemma, wordnet_pos_code(word_tag[1]))\n if len(found) > 0:\n synsets_sentence_2.append(found[0]) \n #print(\"Warn: lemma [%s] returned no disambiguation...using synset : %s\" % (lemma, found[0])) \n\n # Compute similarity\n if len(synsets_sentence_1) != 0 and len(synsets_sentence_2) != 0:\n similarity = 1 - jaccard_distance(set(synsets_sentence_1), set(synsets_sentence_2))\n return similarity\n else:\n return 0",
"def twoStrings(s1, s2):\n\n set1 = set(s1)\n set2 = set(s2)\n\n for char in set1:\n if char in set2:\n return True\n\n return False",
"def testSynonym(self):\n\n\t\t\t\tone = spinner.Word.objects.get_single('mac', True)\n\t\t\t\ttwo = spinner.Word.objects.get_single('macintosh', True)\n\n\t\t\t\tsyn = spinner.Synonym.objects.get_single(one, two, True)\n\t\t\t\tassert isinstance(syn, spinner.Synonym), syn\n\t\t\t\t\n\t\t\t\tsyn.delete()\n\t\t\t\tone.delete()\n\t\t\t\ttwo.delete()",
"def weed_out_synonyms(word, potential_synonyms):\n real_synonyms = set()\n for synonym in potential_synonyms:\n max_distance = abs(len(word) - len(synonym))\n abbr_len = min(len(word), len(synonym))\n forgiveness = round(1/7 * abbr_len)\n if lev.distance(word, synonym) <= max_distance + forgiveness:\n # Then it's a synonym!\n real_synonyms.add(synonym)\n return real_synonyms",
"def testTwoWords(self):\n\n\t\t\t\twords = ['business', 'directory']\n\t\t\t\tsynonyms = spinner.Synonym.objects.get_synonyms(words)\n\n\t\t\t\tassert len(synonyms)",
"def arrayStringsAreEqual1(self, word1: List[str], word2: List[str]) -> bool:\n word1str = ''.join(word1)\n word2str = ''.join(word2)\n return word1str == word2str",
"def conflateable(seg1, seg2, segment_pairs):\n for segment_pair in segment_pairs:\n seg_set = set(segment_pair)\n if seg1 in seg_set and seg2 in seg_set:\n return True\n return False",
"def similar_strings(s1, s2):\n w1 = set(re.split(r'\\W+', s1))\n w2 = set(re.split(r'\\W+', s2))\n threshold = len(w1) // 2 + 1\n return len(w1 & w2) >= threshold",
"def are_similar(left, right):\n left = left.lower()\n right = right.lower()\n if left == right:\n return True\n if left and left in right:\n return True\n if right and right in left:\n return True\n return False",
"def isAmbiguous(self, word):\n\t\treturn word in disambig_const.DISAMBIGUATATION_TABLE;",
"def isUnique(self, word):\n abbr = self.get_abbr(word)\n if abbr not in self.abbr:\n return True\n elif len(self.abbr[abbr]) == 1 and word == self.abbr[abbr][0]:\n return True\n else:\n return False",
"def get_synset_overlap(sentence_a, sentence_b):\n def synsets(word):\n sense_lemmas = []\n for pos in ('n'):#,'a'):\n for i in xrange(5):\n try:\n sense_lemmas += [lemma.name \n for lemma in wn.synset('{0}.{1}.0{2}'.format(word, pos, i)).lemmas]\n except WordNetError: \n pass\n return sense_lemmas\n\n a_set = set(lemma for word in sentence_a for lemma in synsets(word))\n b_set = set(lemma for word in sentence_b for lemma in synsets(word))\n score = len(a_set&b_set)/float(len(a_set|b_set))\n \n return score",
"def isWordSet(self):\n return len(self.getWord()) != 0",
"def consistent(self, assignment):\n for node1 in assignment:\n for node2 in assignment:\n\n if node1 != node2:\n #returns False if any assignmed words are the same\n if assignment[node1] == assignment[node2]:\n return False\n\n overlap= self.crossword.overlaps[node1,node2]\n if overlap != None:\n #checks if words assigned to node overlaps are the same letter\n if assignment[node1][overlap[0]] != assignment[node2][overlap[1]]:\n return False\n\n return True",
"def meets(self, s2):\n return set(self.keys()).intersection(list(s2.keys())) != set()",
"def shared_words(text1, text2):\r\n\r\n list1 = tokenize(text1.strip(' '))\r\n list2 = tokenize(text2.strip(' '))\r\n\r\n list3 = set(list1) & set(list2)\r\n list3.remove(' ');\r\n\r\n return list3",
"def compare_words(self, word1, word2):\n return Counter(word1) == Counter(word2)",
"def __has_conflicting_node_names(self):\n # check length of sets to determine if overlap exists\n return len({node.get_name() for node in self.get_nodeset()}) != len(self.get_nodeset())",
"def test_synonym(self): \n pass",
"def isUnique(self, word):\n if len(word) <= 1:\n n = word\n else:\n n = word[0] + str(len(word) - 2) + word[-1] #Get the abbrviation.\n if n not in self.abbrdict or (self.abbrdict[n] == 1 and word in self.origdict): #If it is not in abbrdict or the abbrevation count is 1 and the word has appeared in dictionary, return true.\n return True\n else: #Otherwise, return false.\n return False"
] |
[
"0.6931332",
"0.67537117",
"0.6713939",
"0.64805406",
"0.6460012",
"0.64492655",
"0.63962036",
"0.63711363",
"0.6323809",
"0.6230787",
"0.62301844",
"0.61946565",
"0.606518",
"0.6061799",
"0.6016105",
"0.6014903",
"0.60109985",
"0.60029316",
"0.5962531",
"0.5943674",
"0.5926784",
"0.5901584",
"0.5896107",
"0.58943254",
"0.58590454",
"0.5843649",
"0.5838688",
"0.5838522",
"0.58283776",
"0.58232886"
] |
0.8643581
|
0
|
Find the best matching word in the candidate sentence. These words can occure in a window of a few index left and right, but any offset will come with a penalty.
|
def get_word_score_in_window(self, gold, candidate, use_synonyms, index):
best_score = 0
for offset in range(self.pen.word_window_left * -1, self.pen.word_window_right + 1):
new_index = index + offset
# iterate over the window
if new_index >= 0 and new_index < len(gold) and new_index < len(candidate):
if gold[new_index] == candidate[new_index]:
# it is a lexical match
new_score = self.pen.score_for_matched_lexical
new_score -= (abs(offset) * self.pen.factor_word_offset_penalty)
best_score = max(best_score, new_score)
else: # maybe we find a semantic match
if use_synonyms and self.are_words_synonym(gold[index], candidate[index]):
# the words are synonymes
new_score = self.pen.score_for_matched_synonym
new_score -= (abs(offset) * self.pen.factor_word_offset_penalty)
best_score = max(best_score, new_score)
return best_score
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _find_best_fit(self, puzzle):\n\n word = puzzle['answer']\n\n # if first word\n print(len(self.filled_pos))\n if len(self.filled_pos) == 0:\n x = random.randint(0,4)\n y = random.randint(0,4)\n print(\"first_word: {} x:{} y:{}\".format(word, x, y))\n print(\"will_fit: {}\".format(will_fit[ACROSS](x, y, length(word, self.lang))))\n if will_fit[ACROSS](x, y, length(word, self.lang)):\n puzzle['orientation'] = \"across\"\n # puzzle['position'] = t + 1\n puzzle['startx'] = x + 1\n puzzle['starty'] = y + 1\n self._fill_word_in_matrix(word, ACROSS, (x,y))\n return puzzle\n\n # first find the location where it overlaps.. then move to the other ones to keep it interesting\n for key in self.filled_pos:\n #the orientation for this word should be perpendicular to the one we are trying to match\n pos = int(not self.filled_pos[key]['orientation'])\n # find the intersecting letters between the two words\n intersect = find_intersection(key, word, self.lang)\n print(\"trying to intersect filled_word={} with word={}\".format(key, word))\n if len(intersect) == 0:\n # no letters matched.. lets find the next\n continue\n else:\n a = [-10, -10]\n print(\"intersecting letters={}\".format(intersect))\n for letter in intersect:\n indexes1 = find_all_char_pos(key, letter, self.lang)\n for index in indexes1:\n # index = filled_pos[key]['word'].find(letter)\n print(\"location of the letter={} in word={} is {}\".format(letter, key, index))\n filled_word_pos = self.filled_pos[key]['position']\n a[pos] = filled_word_pos[pos] + index\n indexes2 = find_all_char_pos(word, letter, self.lang)\n for index2 in indexes2:\n # index2 = word.find(letter)\n print(\"location of the letter={} in word={} is {}\".format(letter, word, index2))\n a[self.filled_pos[key]['orientation']] = filled_word_pos[int(not pos)] - index2\n print(\"looking for match in location={}\".format(a))\n print(\"will_fit={}\".format(will_fit[pos](a[0], a[1], length(word, self.lang))))\n if will_fit[pos](a[0], a[1], length(word, self.lang)):\n if not self._check_overlap(word, pos, a[0], a[1]):\n self._fill_word_in_matrix(word, pos, (a[0], a[1]))\n calculate_free_rows(self.puzzle_matrix, self.height)\n puzzle['orientation'] = \"down\" if pos else \"across\"\n # puzzle['position'] = t + 1\n puzzle['startx'] = a[0] + 1\n puzzle['starty'] = a[1] + 1\n return puzzle\n # if we are still here then we havent found a place for this word\n # fill it in an empty space\n free_blocks_across = calculate_free_rows(self.puzzle_matrix, self.height)\n print(\"@@@@@@filling a random across free_blocks_across={}\".format(free_blocks_across))\n for key, val in sorted(free_blocks_across.items()):\n print(\"key={} val={}\".format(key, val))\n if key >= length(word, self.lang):\n pos = val.pop(random.randint(0, len(val)-1 ))\n if will_fit[ACROSS](pos[0], pos[1], length(word, self.lang)) and not self._check_overlap(word, ACROSS, pos[0], pos[1]):\n self._fill_word_in_matrix(word, ACROSS, (pos))\n puzzle['orientation'] = \"across\"\n puzzle['startx'] = pos[0] + 1\n puzzle['starty'] = pos[1] + 1\n return puzzle",
"def get_closest_levenshtein(word, possible_words, threshold):\n result = None\n min_distance = 10\n for possible_word in possible_words:\n word_distance = distance(word, possible_word)\n if word_distance < min_distance:\n result = possible_word\n min_distance = word_distance\n result = result if min_distance < threshold else None\n return result, min_distance",
"def word_nearest(word_list, target, condition = None, consider_phase = True):\n \n if not condition:\n condition = lambda t: True\n \n min_distance = 100\n min_word = None\n \n def word_distance(word1, word2):\n position1 = word1.position\n position2 = word2.position\n\n distance = [a-b for a, b in zip(position1, position2)]\n\n return np.sum(np.abs(distance))\n \n if isinstance(word_list, Word):\n word_list = [word_list]\n elif isinstance(word_list, list):\n #word_list = word_list\n pass\n else:\n print (word_list)\n raise TypeError()\n \n for word in word_list:\n phase = word.phase\n for word_compare in target:\n if not condition(word_compare):\n continue\n elif consider_phase and phase - word_compare.phase:\n continue\n\n distance = word_distance(word, word_compare)\n #print (word_compare, distance)\n if min_distance > distance:\n min_distance = distance\n min_word = word_compare\n elif min_distance == distance:\n pass\n # should be revised\n\n \n return min_word",
"def closest_word_to(word, some_words):\n closest = ''\n distance = len(word)\n for target in some_words:\n this_distance = len(set(target) - set(word))\n if this_distance < distance:\n distance = this_distance\n closest = target\n return closest",
"def correction(word):\r\n return max(candidates(word), key=P)",
"def _WordScore(index, normalized_command_word,\n canonical_command_word, canonical_command_length):\n score = 0\n\n # The match can go either way.\n if normalized_command_word in canonical_command_word:\n shorter_word = normalized_command_word\n longer_word = canonical_command_word\n elif canonical_command_word in normalized_command_word:\n shorter_word = canonical_command_word\n longer_word = normalized_command_word\n else:\n return score\n\n # Inner match must be a word boundary.\n hit = longer_word.find(shorter_word)\n if hit > 0 and longer_word[hit-1] != '-':\n return score\n\n # Partial hit.\n score += 10\n\n # Prefer a match in less words.\n if canonical_command_length == 1:\n score += 30\n elif canonical_command_length == 2:\n score += 20\n elif canonical_command_length == 3:\n score += 10\n\n # Prefer a match in order.\n if index == 0:\n score += 25\n elif index == 1:\n score += 15\n else:\n score += 5\n\n # Prefer matching more chars and beginning of word.\n # This also handles minor suffix diffs, like singular vs. plural.\n extra = len(longer_word) - len(shorter_word)\n if extra <= 2:\n extra = 3 - extra\n if longer_word.startswith(shorter_word):\n extra *= 2\n score += extra\n\n # Prefer matching on surface words.\n if index == 0 and canonical_command_length > 1:\n score += 30\n # Also prefer matching on group words.\n elif index > 0 and canonical_command_length > index + 1:\n score += 15\n\n return score",
"def maxcompChooseWord(hand, wordList, n):\n # 电脑给出最优解\n point = 0\n maxword = ''\n for word in wordList:\n newword1 = copy.deepcopy(word)\n newword2 = copy.deepcopy(word)\n if isValidWord(newword1, hand, wordList):\n p = getWordScore(newword2, n)\n if p > point:\n point = p\n maxword = word\n if point == 0:\n return None\n else:\n return maxword, point",
"def get_best_candidate(word, ngram_dict, threshold=0.8):\n candidates = []\n w_l = len(word)\n freq = ngram_dict[word] if word in ngram_dict else 0.0\n if w_l >= 5:\n for uniq_word in ngram_dict:\n edit_dist = 0\n if word != uniq_word:\n edit_dist = Levenshtein.distance(word, uniq_word)\n levenshtein_ratio = 1.0 - edit_dist / w_l\n if levenshtein_ratio >= threshold:\n candidates.append([uniq_word, ngram_dict[uniq_word], edit_dist])\n else:\n candidates.append([word, ngram_dict[word], edit_dist])\n\n if len(candidates) == 0:\n return word, freq, 0\n\n candidates = sorted(candidates, key=lambda item: item[1], reverse=True)\n return candidates[0]\n else:\n return word, freq, 0",
"def get_closest(target_word: str, word_to_idx: Dict, embeddings: torch.Tensor, n: int = 5) -> List[Tuple[str, torch.Tensor]]:\n\n # Calculate distances to all other words\n\n word_embedding = embeddings[word_to_idx[target_word.lower()]]\n distances = []\n for word, index in word_to_idx.items():\n if word == \"<MASK>\" or word == target_word:\n continue\n distances.append((word, torch.dist(word_embedding, embeddings[index])))\n\n results = sorted(distances, key=lambda x: x[1])[1:n + 2]\n return results",
"def find_most_compatible_match(self, candidate):\n best_matchIdx = -1\n best_matchVal = 0\n len_of_match = len(self.match)\n if not candidate.any():\n return None\n for i in candidate:\n if self.W[len_of_match][i] > best_matchVal:\n best_matchVal = self.W[len_of_match][i]\n best_matchIdx = i\n return best_matchIdx",
"def compChooseWord(hand, wordList, n):\n # BEGIN PSEUDOCODE (available within ps4b.py)bestScore = 0\n bScore = 0\n bWord = None\n for word in wordList:\n if isValidWord(word, hand, wordList):\n score = getWordScore(word, n)\n if score > bScore:\n bScore = score\n bWord = word\n return bWord",
"def findBestShift(wordList, text):\n max_real_words = 0\n best_shift = 0\n for i in range(26):\n word_list = applyShift(text, i).split(\" \")\n temp_n_words = 0\n for word in word_list:\n if isWord(wordList, word):\n temp_n_words += 1\n if temp_n_words > max_real_words:\n max_real_words = temp_n_words\n best_shift = i\n return best_shift",
"def find_nearest_repetition(paragraph):\n word_to_latest_index, nearest_repeated_distance = {}, float('inf')\n for i, word in enumerate(paragraph):\n if word in word_to_latest_index:\n latest_equal_word = word_to_latest_index[word]\n nearest_repeated_distance = min(nearest_repeated_distance, i-latest_equal_word)\n word_to_latest_index[word] = i\n return nearest_repeated_distance if nearest_repeated_distance != float('inf') else -1",
"def replace_nearest(word): \n nearest = spellcheck.correction(word)\n #When there is no valid word, the nearest word\n #is the same as the original\n if word == nearest:\n #This implies we need to try splitting it\n return split_word(word)\n return nearest",
"def findBestShift(wordList, text):\n ### TODO\n max_words = 0\n best_shift = 0\n lis = []\n for i in range(0,26):\n lis = applyShift(text, i).split(' ')\n count = 0\n for j in lis:\n if isWord(wordList, j):\n count += 1\n if count > max_words:\n max_words = count\n best_shift = i\n \n return best_shift",
"def answer(document, search_terms):\n idx = {k: [] for k in search_terms}\n doc = document.split()\n [idx[term].append(i) for i, term in enumerate(doc, start=1) if term in search_terms]\n min_score = sys.maxint\n winning_slice = None\n for term in idx.keys(): # ignore duplicate terms\n for position in idx[term]:\n positions = [position]\n for other_term in idx.keys():\n distances = \\\n [int(math.fabs(position - x)) for x in idx[other_term]]\n positions.append(\n idx[other_term][distances.index(min(distances))])\n score = max(positions) - min(positions) + 1\n if score < min_score:\n winning_slice = (min(positions) - 1, max(positions),)\n min_score = score\n return \" \".join(doc[slice(*winning_slice)])",
"def nearest_words(embedding, voc_size, word, wint, intw, n_words=10):\n similar_words = {}\n word_embed = embedding(torch.LongTensor([wint[word]]))\n for i in range(voc_size):\n emb = embedding(torch.LongTensor([i]))\n cos_sim = F.cosine_similarity(emb, word_embed)\n if len(similar_words) < n_words:\n similar_words[float(cos_sim)] = intw[i]\n else:\n if cos_sim > min(similar_words):\n min_key = min(similar_words)\n del similar_words[min_key]\n similar_words[float(cos_sim)] = intw[i]\n else:\n pass\n # Ordering dict based on the value of the cosine similarity\n return sorted(similar_words.items())[::-1]",
"def min_dist_solution(self, words, keyword_dict = None):\r\n\r\n\t\t# default settings\r\n\t\tif keyword_dict == None:\r\n\t\t\tkeyword_dict = self.keyword_dict\r\n\r\n\t\tindexed_text = list(enumerate(words))\r\n\t\t# all found keyword positions\r\n\t\tkeyword_pos = []\r\n\t\tkw_counts = [(len(kw.split()),kw) for kw in keyword_dict]\r\n\t\tkw_length_set = set((l[0] for l in kw_counts))\r\n\t\t\r\n\t\t# seperate keywords by their length\r\n\t\tfor length in kw_length_set:\r\n\t\t\tkw_lgram = ngrams(indexed_text, length)\r\n\t\t\t# start, end, ngram token\r\n\t\t\tkw_lgram_text = [(g[0][0],g[-1][0],' '.join([token[1] for token in g])) \r\n\t\t\t\t\t\t\t for g in kw_lgram]\r\n\t\t\tfixed_length_kw = [kw[1] for kw in kw_counts if kw[0] == length]\r\n\t\t\t\r\n\t\t\tfixed_keyword_pos = [(kw_s,kw_e,token) for kw_s,kw_e,token in kw_lgram_text\r\n\t\t\t \t\t\t\t\t if token in fixed_length_kw]\r\n\t\t\tkeyword_pos += fixed_keyword_pos\r\n\t\t# all found distances\r\n\t\tdistances = []\r\n\t\tfor kw_s,kw_e,kw in keyword_pos:\r\n\t\t\tdistance = keyword_dict[kw]['distance']\r\n\t\t\t# TODO handle case when value we search for is consisted of multiple words\r\n\t\t\tregex_pattern = keyword_dict[kw]['regex']\r\n\t\t\tsearch_direction = keyword_dict[kw]['search_direction']\r\n\t\t\t# start of the block\r\n\t\t\tstart = kw_s - distance if kw_s-distance > 0 else 0\r\n\t\t\t# end of the block\r\n\t\t\tend = kw_e + distance\r\n\t\t\tif search_direction == 'right':\r\n\t\t\t\tsearchable_block = indexed_text[kw_e:end]\r\n\t\t\telif search_direction == 'left':\r\n\t\t\t\tsearchable_block = indexed_text[start:kw_s]\r\n\t\t\telif search_direction == 'both':\r\n\t\t\t\tsearchable_block = indexed_text[start:end]\r\n\t\t\telse:\r\n\t\t\t\t# mb hanlde search_direction value\r\n\t\t\t\tsearchable_block = []\r\n\t\t\t\r\n\t\t\tvalue_pos = [index for index,value in searchable_block\r\n\t\t\t\t\t\t if re.search(regex_pattern,value)]\r\n\t\t\tdistance = [(self.dist(vp,kw_s,kw_e),vp,kw) for vp in value_pos]\r\n\r\n\t\t\tdistances += distance\r\n\t\tif len(distances) == 0:\r\n\t\t\treturn ('not found', None,'no kw')\r\n\t\telse:\r\n\t\t\tmin_distance,found_target_pos,kw = min(distances)\r\n\t\t\treturn words[found_target_pos],found_target_pos,kw",
"def findBestShift(wordList, text):\n \n #1. Set the maximum number of real words found to 0.\n realwords = 0\n #2. Set the best shift to 0.\n bestk = 0\n #3. For each possible shift from 0 to 26:\n for k in range(0, 26):\n\t#4. Shift the entire text by this shift.\n\tshifttext = applyShift(text, k)\n\t\n\t\n\t#5. Split the text up into a list of the individual words.\n\twords = shifttext.split(' ')\n\t#6. Count the number of valid words in this list.\n\tcount = 0\n\tfor word in words:\n\t if isWord(wordList, word):\n\t count += 1\n\t #print 'c' + str(count)\n\t#7. If this number of valid words is more than the largest number of\n\t# real words found, then:\n\tif count > realwords:\n\t #8. Record the number of valid words.\n\t realwords = count\n\t #print 'rw' + str(realwords)\n\t #9. Set the best shift to the current shift.\n\t bestk = k\n\t #print bestk\n\t#10. Increment the current possible shift by 1. Repeat the loop\n\t #starting at line 3.\n\t\n#11. Return the best shift.\n return bestk",
"def choose_word(words):\n words_with_weight = sorted([(get_word_weight(words, word), word) for word in words])\n\n # this is a bit arbitrarily chosen, but allows selecting non-optimal words\n lower_weight_offset = max(1, len(words_with_weight) // 300)\n best_weight, _ = words_with_weight[-lower_weight_offset]\n all_best = [word for weight, word in words_with_weight if weight >= best_weight]\n return random.choice(all_best)",
"def suggest(word, cutoff=0.77):\n if word in LOOKUP_TABLE:\n return LOOKUP_TABLE[word]\n\n guess = difflib.get_close_matches(word, MOST_COMMON_DOMAINS, n=1, cutoff=cutoff)\n if guess and len(guess) > 0:\n return guess[0]\n return word",
"def findNextWordForSpellcheck(text, startPos, wikiPage):\r\n return (None, None, None)",
"def correct_word(word, cutoff):\n if WORDS is not None:\n result = difflib.get_close_matches(word, WORDS, n=1, cutoff=cutoff)\n if len(result) > 0:\n return result[0]\n\n return word",
"def most_similar_matching(self):\n min_sim_thresh = float(self._conf.get(\"min_similarity_threshold\"))\n most_similar_words = self.model_request.most_similar(positive=[self.base_word],\n top_n=self._conf.get(\"max_similar_terms_threshold\"))\n\n # iterate through the most similar word list\n match_found = False\n for similar_word, sim in most_similar_words:\n if sim > min_sim_thresh and not match_found:\n\n # 4.1 - find exact match in ontology.\n result = self.graphdb.get_record_using_exact_matching(similar_word)\n\n if self.match_found(result):\n threshold_reached, result, cor_walk = self.check_match_results(result)\n if threshold_reached:\n match_found = True\n self.save_finding(self.ft_found_terms, similar_word, result,\n cor_walk, self._conf.get(\"FT_DIRECT\"))\n\n else:\n # 4.2 - Convert word into lemma and find exact match in ontology.\n match_found = self.modify_and_test_word(self.ft_found_terms, similar_word, self._conf.get(\"MOD_FT\"))\n\n artificial_results = self.graphdb.get_records_with_artificial_relation(similar_word)\n if len(artificial_results[\"results\"][\"bindings\"]) > 0:\n self.save_finding(self.artificial_found_terms, similar_word, artificial_results,\n None, self._conf.get(\"ARTIFICIAL_MATCH\"))\n\n self.sorted_ft_findings = self.sort_ft_similar_word_findings(self.ft_found_terms)",
"def mostlikelycodeword(self):\n\n # Add your code here\n stoping_int = None # Replace\n best, first_index, last_index = None, None, None # Replace all three with an integer\n for i in range(stoping_int):\n for j in range(None, stoping_int): # Replace None. \n current = self.quality(None, None)\n if None > None # Replace both Nones\n best, first_index, last_index = current, i, j\n return self.preamble[None:None]",
"def word_match(question, morph_story_sentence_words):\n verbs_more_weightage = []\n question_pos_words = p.pos_tagger(question)\n for word, pos_tag in question_pos_words.items():\n if pos_tag == \"VERB\":\n verbs_more_weightage.append(word)\n verbs_more_weightage = p.morphological_roots(verbs_more_weightage)\n\n question_no_stop_words_punct = p.removeStopWords(question)\n question_no_stop_words_punct = question_no_stop_words_punct.translate(str.maketrans('', '', string.punctuation))\n morphological_root_of_question = p.word_tokenizer(question_no_stop_words_punct)\n morphological_root_of_question = p.morphological_roots(morphological_root_of_question)\n\n score = 0\n for morph_story_word in morph_story_sentence_words:\n if morph_story_word in morphological_root_of_question:\n if morph_story_word in verbs_more_weightage:\n score = score + 6\n else:\n score = score + 3\n elif morph_story_word in verbs_more_weightage:\n score = score + 6\n return score",
"def _word_index(word, wordlist=wordlist):\n lo, hi = 0, len(wordlist) - 1\n while lo < hi:\n mid = (lo + hi) // 2\n if word <= wordlist[mid]:\n hi = mid\n else:\n lo = mid + 1\n return lo",
"def findBestShift(wordList, text):\n import string\n decoded = ''\n r = 0\n max_count = 0\n for i in range(26):\n count = 0\n decoded = applyShift(text,i)\n for word in decoded.split():\n if word.strip(string.punctuation+string.digits).lower() in wordList:\n count += 1\n if count > max_count:\n max_count = count\n r = i\n return r",
"def nearest_neighbors(self, word, dictionary):\n vectors = self.word_embeds.weight.data.cpu().numpy()\n index = dictionary.token2id[word]\n query = vectors[index]\n\n ranks = vectors.dot(query).squeeze()\n denom = query.T.dot(query).squeeze()\n denom = denom * np.sum(vectors ** 2, 1)\n denom = np.sqrt(denom)\n ranks = ranks / denom\n mostSimilar = []\n [mostSimilar.append(idx) for idx in ranks.argsort()[::-1]]\n nearest_neighbors = mostSimilar[:10]\n nearest_neighbors = [dictionary[comp] for comp in nearest_neighbors]\n\n return nearest_neighbors",
"def suggested_search(search_text):\n threshold = 0.6\n global model\n\n search_text = remove_stop_words(search_text)\n tmp_search = search_text.split()\n\n new_search = []\n for word in tmp_search:\n similar_words = get_similar_words(model, word)\n new_search = select_top_words(similar_words, new_search, threshold)\n\n new_search = list(set(new_search))\n new_search = ' '.join(new_search)\n\n return new_search + ' ' + search_text"
] |
[
"0.6923842",
"0.669848",
"0.6694236",
"0.6632936",
"0.6539453",
"0.6518705",
"0.6500288",
"0.64969873",
"0.64787245",
"0.64690685",
"0.638015",
"0.6367746",
"0.6333405",
"0.63233733",
"0.63188654",
"0.63039714",
"0.6299287",
"0.624796",
"0.6207178",
"0.62019473",
"0.61890596",
"0.6182253",
"0.61581534",
"0.61398226",
"0.6089669",
"0.6071279",
"0.6065543",
"0.6056718",
"0.6051743",
"0.604565"
] |
0.69598657
|
0
|
Returns the score of two frame elements by comparing the text and weighting it depending on a name match.
|
def get_frame_element_score(self, fe1, fe2):
name_match = fe1['name'] == fe2['name']
score = self.get_text_score(fe1['spans'][0]['text'], fe2['spans'][0]['text'])
if not name_match:
score *= self.pen.factor_name_mismatch
return score
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def wordSimilarityRatio(sent_1,sent_2):",
"def judge(name):\n score = 0\n for scoreID, scorer, weight in weights:\n subscore = scorer(name)\n score += subscore * weight\n name.scores[scoreID] = subscore\n name.score = score\n return score",
"def match_score(seq1, seq2):\n\n seq1 = get_sequence_string(seq1)\n seq2 = get_sequence_string(seq2)\n score = align.localxx(seq1, seq2)[0][2]\n return score",
"def get_score(self,sentence_1, sentence_2):\n\t return self.DELTA * self.semantic_similarity(sentence_1, sentence_2, True) + (1.0 - self.DELTA) * self.word_order_similarity(sentence_1, sentence_2)",
"def compare_word_selection(selection1,selection2):\n\n num_extra=0;\n num_total=0;\n total_score=0;\n for k in selection1.keys():\n num_total+=1;\n if k not in selection2:\n num_extra+=1;\n else:\n score=compare_sentences(selection1[k],selection2[k]);\n logging.info(\"Score: %f\" % score );\n total_score+=score;\n\n for k in selection2.keys():\n if k not in selection1:\n num_total+=1;\n num_extra+=1;\n\n if num_total==0:\n return 0;\n\n return float(total_score)/float(num_total)",
"def similarity_scores(self, other):\n word_score = compare_dictionaries(other.words, self.words)\n word_length_score = compare_dictionaries(other.word_lengths, self.words)\n stem_score = compare_dictionaries(other.stems, self.stems)\n sentence_length_score = compare_dictionaries(other.sentence_lengths, self.sentence_lengths)\n common_word_score = compare_lists(other.common_word, self.common_word)\n\n return [word_score, word_length_score, stem_score, sentence_length_score, common_word_score]",
"def match(name1, name2):\n points = getPoints([name1,name2])\n su = points[name1] + points[name2]\n\n mutate_dict(lambda x: (x/su)/4, points)\n score1 = non_linearRandomInt(10, points[name1])# + non_linearRandomInt(3,1/4)\n score2 = non_linearRandomInt(10, points[name2])\n \n return (score1,score2)",
"def analyze(self, text):\n\n score = 0.0;\n\n words = text.split(' ')\n # match each word in either the positives or negatives list adding or subtracting 1 from the score if present\n for word in words:\n for w in self.positives:\n if w == word.lower():\n score += 1.0\n continue\n \n for w in self.negatives:\n if w == word.lower():\n score -= 1.0\n continue\n\n return score",
"def compare(self) -> float:\n if not self._hadith_text1 or not self._hadith_text2:\n raise Exception('Hadith texts to compare not set. Use setHadithTexts() to set the texts...')\n\n text1 = self._hadith_text1_cleaned\n text2 = self._hadith_text2_cleaned\n\n if self._ignore_diacritics:\n text1 = self._remove_diacritics(self._hadith_text1_cleaned)\n text2 = self._remove_diacritics(self._hadith_text2_cleaned)\n\n sm = difflib.SequenceMatcher(None, text1, text2)\n return sm.ratio()",
"def similar_text(word1, word2) -> float:\n\n return textdistance.overlap.similarity(word1, word2)",
"def compareRating(df, name1, name2):\n rating1 = df.loc[df[\"name\"] == name1, \"rating\"].iloc[0]\n rating2 = df.loc[df[\"name\"] == name2, \"rating\"].iloc[0]\n return (1 + 10 ** ((rating2 - rating1) / 400.0)) ** -1",
"def run(self, args):\n self.pen.score_for_matched_lexical = args[0]\n self.pen.score_for_matched_synonym = args[1]\n self.factor_word_offset_penalty = args[2]\n self.factor_sentence_length_mismatch = args[3]\n self.factor_name_mismatch = args[4]\n self.factor_fe_offset_penalty = args[5]\n self.weight_target_frame_element = args[6]\n self.weight_frame_elements = args[7]\n self.factor_frame_offset_penalty = args[8]\n misses = []\n for row in range(self.data.get_number_of_rows()):\n ref_sentence = self.data.get_row(row)[self.data.get_gold()]\n results = {}\n for team, team_sentence in self.data.get_row_for_teams(self.evaluator.get_teams(row), row).iteritems():\n results[team] = self.get_sentence_score(ref_sentence, team_sentence)\n misses.append(self.evaluator.compare_all(results, row))\n return np.mean(misses) / 5.0",
"def calculate_score(result):\n sample1=result['Sample1']\n sample2=result['Sample2']\n string1=paragraph_to_list(sample1)\n string2=paragraph_to_list(sample2)\n \n return round( strings_similarity(string1, string2), 2)\n #method_dict=strings_count_compare(string1, string2)/ max(len(string1), len(string2))\n #return round(0.5*(method_difflab+method_dict), 2)",
"def test_score_text2(self):\n\t\t#import pdb; pdb.set_trace()\n\t\ttest = sentiment.LibraryRun(self.text3, self.lib)\n\t\tmatches = test.find_phrase_matches(self.tokens_generator3)[0]\n\t\tobj_ut, _ = test.score_text(matches, end_threshold=0.5)\n\t\tself.assertEqual(obj_ut, -1.25)",
"def similarity_score(self, lhs, rhs):\n pass",
"def similarity_scores(self, other):\n results = []\n\n words_score=compare_dictionaries(other.words, self.words)\n wordl_score=compare_dictionaries(other.word_lengths, self.word_lengths)\n stems_score=compare_dictionaries(other.stems, self.stems)\n sentl_score=compare_dictionaries(other.sentence_lengths, self.sentence_lengths)\n endings_score=compare_dictionaries(other.endings, self.endings)\n results+= [words_score]\n results+= [wordl_score]\n results+= [stems_score]\n results+= [sentl_score]\n results+= [endings_score]\n return results",
"def _get_similarity_score(self, dict1, dict2):\n try:\n majorScoreDeterminer1 = ['primaryGenreId']\n majorScoreDeterminer2 = ['genreIds']\n Score = 0 # Base Score\n for items in majorScoreDeterminer2:\n\n for item1 in self._get_app_param_info(dict1, resultCount=1, resultKey=items):\n if item1 in self._get_app_param_info(dict2, resultCount=1, resultKey=items):\n if Score == 0: # Add 50% base score for this category.\n Score += 2 * .5\n Score += 2 * .5 / len(self._get_app_param_info(dict1, resultCount=1, resultKey=items))\n\n for items in majorScoreDeterminer1:\n if str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) in str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) and str(\n self._get_app_param_info(dict1, resultCount=1, resultKey=items)):\n Score += (3 / len(majorScoreDeterminer1))\n\n nameMatchScore = difflib.SequenceMatcher(None,\n self._get_app_param_info(dict1, resultCount=1,\n resultKey='trackName'),\n self._get_app_param_info(dict2, resultCount=1,\n resultKey='trackName')).ratio()\n Score += nameMatchScore\n\n minorScoreDeterminer = ['isGameCenterEnabled', 'languageCodesISO2A', 'contentAdvisoryRating', 'artistId',\n 'formattedPrice']\n\n for items in minorScoreDeterminer:\n if items == \"formattedPrice\":\n if str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) == \"Free\" and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) == \"Free\":\n Score += (4 / (len(minorScoreDeterminer)))\n elif str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) == \"Free\" and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) != \"Free\":\n continue\n elif str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) != \"Free\" and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) == \"Free\":\n continue\n elif str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) != \"Free\" and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) != \"Free\":\n Score += (4 / (len(minorScoreDeterminer)))\n else:\n if str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) in str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)):\n Score += (4 / (len(minorScoreDeterminer)))\n Score = round(Score, 1)\n log_str = \"id\" + str(self._get_app_param_info(dict2, resultCount=1, resultKey='trackId')) + \" - \" + str(\n self._get_app_param_info(dict2, resultCount=1, resultKey='trackName')) + \"\\tScore: \" + str(Score)\n except AssertionError as e:\n print(\"Error: _get_similarity_score %s\" % e)\n sys.exit(\"Exit script with error code %s\" % e)\n except TypeError as e:\n print(\"Error: _get_similarity_score %s\" % e)\n sys.exit(\"Exit script with error code %s\" % e)\n except:\n e = sys.exc_info()[0]\n print(\"Error: _get_similarity_score %s\" % e)\n sys.exit(\"Exit script with error code %s\" % e)\n else:\n return log_str",
"def similarity_scores(self, other):\n word_score = compare_dictionaries(other.words, self.words)\n word_length_score = compare_dictionaries(other.word_lengths, self.word_lengths)\n sentence_length_score = compare_dictionaries(other.sentence_lengths, self.sentence_lengths)\n stem_score = compare_dictionaries(other.stems, self.stems)\n comma_score = compare_dictionaries(other.commas_per_sentence, self.commas_per_sentence)\n list_scores = [word_score, word_length_score, sentence_length_score, stem_score, comma_score]\n return list_scores",
"def string_similarity_score(left: str, right: str):\n return SequenceMatcher(None, left, right).ratio()",
"def score(self, text):\n logger.debug(\"score on an instance of len {0}\".format(len(text)))\n fv = self.instance2fv(text)\n fv /= np.sqrt((fv*fv).sum()) # normalize vector to len 1\n fdot = self.lprot.dot(fv) \n retval = dict(zip(self.langs, fdot))\n return retval",
"def similarity(self, char1, char2, weights=(1.0, 0.0, 0.0), as_tree=False):\n\n assert char1 in self.char_dict\n assert char2 in self.char_dict\n shape_w, sound_w, freq_w = weights\n\n if char1 in self.char_dict and char2 in self.char_dict:\n\n shape_sim = self.shape_similarity(char1, char2, as_tree=as_tree)\n sound_sim = self.pronunciation_similarity(char1, char2)\n freq_sim = 1.0 - self.char_dict[char2] / len(self.char_dict)\n\n return shape_sim * shape_w + sound_sim * sound_w + freq_sim * freq_w\n else:\n return 0.0",
"def text_similarity(self, text_1: str, text_2: str):\n txt1 = self._pre_process(text_1)\n txt2 = self._pre_process(text_2)\n\n sim = self.model.wmdistance(txt1, txt2)\n\n if sim == inf:\n sim = INF_SIMILIARITY\n return sim",
"def __score(self, name, summary):\n score = 0\n for queryTerm in self.__query:\n if queryTerm.lower() in name.lower():\n score += 4\n if queryTerm.lower() == name.lower():\n score += 4\n \n if queryTerm.lower() in summary.lower():\n if QRegExp(r'\\b{0}\\b'.format(QRegExp.escape(queryTerm)),\n Qt.CaseInsensitive).indexIn(summary) != -1:\n # word match gets even higher score\n score += 2\n else:\n score += 1\n \n return score",
"def reviewer_similarity_score(self, other: _Vertex) -> float:\n if self.degree() == 0 or other.degree == 0:\n return 0.0\n else:\n neighbours = self.neighbours\n other_neighbours = other.neighbours\n same_neighbours = neighbours.keys() & other_neighbours.keys()\n union = len(self.neighbours) + len(other.neighbours)\n sim_score_so_far = 0\n\n for vertex in same_neighbours:\n # 'bothered reviewing' bonus:\n sim_score_so_far += 1\n # 'love' bonus\n if self.neighbours[vertex] >= 9 and other.neighbours[vertex] >= 9:\n sim_score_so_far += 2\n # 'like' bonus\n elif self.neighbours[vertex] >= 7 and other.neighbours[vertex] >= 7:\n sim_score_so_far += 1\n\n return sim_score_so_far / union",
"def ratio(n1,n2, explain=0, optimize=False):\n weight_normal_form = 5.0 #distance between soundexes of normal form\n weight_normal_form_soundex = 8.0 #average distance between soundexes of normal form\n weight_geslachtsnaam1 = 10.0 #distance between soundexes of geslachtsnamen\n weight_geslachtsnaam2 = 10.0 #distance between geslachtsnaam\n weight_initials = 2 #distance between initials\n\n nf1 = n1.guess_normal_form()\n nf2 = n2.guess_normal_form()\n\n if not nf1 or not nf2:\n return 0.0\n elif nf1 == nf2:\n return 1.0\n ratio_normal_form = Similarity.average_distance(split(nf1), split(nf2))\n \n #create a simkplified soundex set for this name\n #remove stopwords\n# nf1 = remove_stopwords( nf1)\n# nf2 = remove_stopwords( nf2)\n \n se1 = n1.get_normal_form_soundex()\n se2 = n2.get_normal_form_soundex()\n ratio_normal_form_soundex = Similarity.average_distance( se1, se2)\n \n #gelachtsnaam wordt op twee manieren met elkaar vergeleken\n g1 = n1.geslachtsnaam() #or n1.get_volledige_naam()\n g2 = n2.geslachtsnaam() #or n2.get_volledige_naam()\n g1 = to_ascii(g1)\n g2 = to_ascii(g2)\n if not optimize:\n #de soundexes van de achternaam worden meegewoen\n #g1_soundex = n1.soundex_nl(g1, group=2, length=-1)\n g1_soundex = n1.geslachtsnaam_soundex()\n #g2_soundex = n2.soundex_nl(g2, group=2, length=-1)\n g2_soundex = n2.geslachtsnaam_soundex()\n ratio_geslachtsnaam1 = Similarity.average_distance(g1_soundex, g2_soundex)\n else:\n ratio_geslachtsnaam1 = 1 \n weight_geslachtsnaam1 = 0\n \n #n de afstand van de woorden in de achtenraam zelf\n ratio_geslachtsnaam2 = Similarity.average_distance(\n re.split('[ \\.\\,\\-]', g1.lower()),\n re.split('[ \\.\\,\\-]', g2.lower()),\n levenshtein_ratio)\n n1_initials = n1.initials()\n n1_initials_lower = n1_initials.lower()\n n2_initials = n2.initials()\n n2_initials_lower = n2_initials.lower()\n n1_contains_initials = n1.contains_initials()\n n2_contains_initials = n2.contains_initials()\n #count initials only if we have more than one\n #(or perhaps make this: if we know the first name)\n if len(n1_initials) == 1 or len(n2_initials) == 1:\n #initials count much less if there is only one\n weight_initials = weight_initials_if_one_name_consists_of_one_word_only\n# ratio_initials = .5\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n elif n1_contains_initials or n2_contains_initials:\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n weight_initials = weight_initials_if_one_name_is_in_initials\n elif len(n1_initials) > 1 and len(n2_initials) > 1:\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n else:\n ratio_initials = 0.7\n \n if n1_contains_initials or n2_contains_initials:\n weight_normal_form = weight_normal_form_if_one_name_is_in_initials \n weight_normal_form_soundex = weight_normal_form_soundex_if_one_name_is_in_initials\n\n counter = (ratio_normal_form * weight_normal_form +\n ratio_normal_form_soundex * weight_normal_form_soundex +\n ratio_geslachtsnaam1 * weight_geslachtsnaam1 +\n ratio_geslachtsnaam2 * weight_geslachtsnaam2 +\n ratio_initials * weight_initials)\n numerator = (weight_normal_form + weight_normal_form_soundex +\n weight_initials + weight_geslachtsnaam1 + weight_geslachtsnaam2)\n if numerator == 0:\n return 0.0\n final_ratio = counter/numerator\n\n if explain:\n s = '-' * 100 + '\\n'\n s += 'Naam1: %s [%s] [%s] %s\\n' % (n1, n1_initials, n1.guess_normal_form(), se1)\n s += 'Naam2: %s [%s] [%s] %s\\n' % (n2, n2_initials, n2.guess_normal_form(), se2)\n s += 'Similarity ratio: %s\\n' % final_ratio\n s += '--- REASONS' + '-' * 30 + '\\n'\n format_s = '%-30s | %-10s | %-10s | %-10s | %-10s | %s-10s\\n'\n s += format_s % ('\\t property', ' ratio', ' weight','relative_weight', ' r*w', 'r * relative_w')\n s += '\\t' + '-' * 100 + '\\n'\n format_s = '\\t%-30s | %-10f | %-10f | %-10f | %-10f | %-10f\\n'\n s += format_s % (' normal_form', ratio_normal_form, weight_normal_form,weight_normal_form/counter, ratio_normal_form * weight_normal_form, ratio_normal_form * weight_normal_form/counter)\n s += format_s % ('soundex van normal_form', ratio_normal_form_soundex, weight_normal_form_soundex,weight_normal_form_soundex/counter, ratio_normal_form_soundex* weight_normal_form_soundex, ratio_normal_form_soundex * weight_normal_form_soundex/counter)\n s += format_s % ('soundex van geslachtsnaam1', ratio_geslachtsnaam1, weight_geslachtsnaam1,weight_geslachtsnaam1/counter, ratio_geslachtsnaam1 * weight_geslachtsnaam1, ratio_geslachtsnaam1 * weight_geslachtsnaam1/counter)\n s += format_s % ('geslachtsnaam', ratio_geslachtsnaam2, weight_geslachtsnaam2,weight_geslachtsnaam2/counter, ratio_geslachtsnaam2 *weight_geslachtsnaam2 , ratio_geslachtsnaam2 * weight_geslachtsnaam2/counter)\n s += format_s % ('initials', ratio_initials, weight_initials, weight_initials/counter, ratio_initials *weight_initials, ratio_initials * weight_initials/counter)\n s += '\\tTOTAL (numerator) | %s (counter = %s)\\n' % (counter, numerator)\n \n return s\n return final_ratio",
"def matchscore(self):\n print(self.team1.name + \" \" + str(self.team1score) + \" - \" + str(self.team2score) + \" \" + self.team2.name)",
"def recordMatch(df, name1, name2, winner=None):\n\n expected1 = compareRating(df, name1, name2)\n expected2 = compareRating(df, name2, name1)\n\n k = 42\n\n rating1 = getPlayerRating(df, name1)\n rating2 = getPlayerRating(df, name2)\n\n if winner == name1:\n score1 = 1.0\n score2 = 0.0\n elif winner == name2:\n score1 = 0.0\n score2 = 1.0\n else:\n raise InputError(\"One of the names must be the winner\")\n\n newRating1 = int(rating1 + k * (score1 - expected1))\n newRating2 = int(rating2 + k * (score2 - expected2))\n\n if newRating1 < 0:\n newRating1 = 0\n newRating2 = rating2 - rating1\n\n elif newRating2 < 0:\n newRating2 = 0\n newRating1 = rating1 - rating2\n\n # df = updatePlayerRating(df, name1, newRating1)\n # df = updatePlayerRating(df, name2, newRating2)\n return newRating1, newRating2",
"def score_match(phrase, song):\n return SequenceMatcher(None, phrase, song.title).ratio()\n ## Examples of other score metrics and modifiers:\n ## Penalize based on difference in phrase length (word count)\n # return -abs(len(song.split()) - len(phrase.split()))\n ## Penalize based on missing words\n # return -len([w for w in phrase.split() if w not in song.split()])",
"def wup_measure(self,a, b, similarity_threshold = 0.925, debug = False):\n if debug: print('Original', a, b)\n #if word_pair_dict.has_key(a+','+b):\n if a+','+b in self.word_pair_dict.keys():\n return self.word_pair_dict[a+','+b]\n\n def get_semantic_field(a):\n return wn.synsets(a, pos=wn.NOUN)\n\n if a == b: return 1.0\n\n interp_a = get_semantic_field(a)\n interp_b = get_semantic_field(b)\n if debug: print(interp_a)\n\n if interp_a == [] or interp_b == []:\n return 0.0\n\n if debug: print('Stem', a, b)\n global_max=0.0\n for x in interp_a:\n for y in interp_b:\n local_score=x.wup_similarity(y)\n if debug: print('Local', local_score)\n if local_score > global_max:\n global_max=local_score\n if debug: print('Global', global_max)\n\n # we need to use the semantic fields and therefore we downweight\n # unless the score is high which indicates both are synonyms\n if global_max < similarity_threshold:\n interp_weight = 0.1\n else:\n interp_weight = 1.0\n\n final_score = global_max * interp_weight\n self.word_pair_dict[a+','+b] = final_score\n return final_score",
"def similarity(self, other):\n part = self.__part_converter(self.part)\n if part != self.__part_converter(other.part):\n return 0\n tresh = 0.2\n sss = wn.synsets(self.string, part)\n sso = wn.synsets(other.string, part)\n best_sim = 0\n for ss in sss:\n # if not match('^' + self.string + '\\..+', ss.name()):\n # continue\n for so in sso:\n # if not match('^' + other.string + '\\..+', so.name()):\n # continue\n sim = ss.wup_similarity(so)\n if (tresh < sim) and (best_sim < sim):\n best_sim = sim\n return best_sim"
] |
[
"0.6383207",
"0.629914",
"0.6173675",
"0.6157977",
"0.61087143",
"0.6092695",
"0.6014092",
"0.593099",
"0.5915258",
"0.59143925",
"0.5904347",
"0.589253",
"0.5890898",
"0.5858196",
"0.5838747",
"0.58362615",
"0.58156574",
"0.57967263",
"0.5784012",
"0.5770046",
"0.5751338",
"0.57302856",
"0.57227635",
"0.5700489",
"0.5697654",
"0.56945956",
"0.5682105",
"0.5680777",
"0.56732047",
"0.5670662"
] |
0.78827935
|
0
|
Calculate the score for the whole sentence by comparing all frames within them. This will go on recursivly to the frame elements and to the words themselves.
|
def get_sentence_score(self, gold_sentence, candidate_sentence):
# Collect all frames
gold_frames = gold_sentence['frames']
candidate_frames = candidate_sentence['frames']
# Check first in the actual order
score = 0
length = min(len(gold_frames), len(candidate_frames))
if length == 0:
return 0.0
for index in range(length):
score += self.get_frame_score_in_window(gold_frames, candidate_frames, index)
score /= float(length)
return min(score, 1.0)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def analyze(self, text):\n\n score = 0.0;\n\n words = text.split(' ')\n # match each word in either the positives or negatives list adding or subtracting 1 from the score if present\n for word in words:\n for w in self.positives:\n if w == word.lower():\n score += 1.0\n continue\n \n for w in self.negatives:\n if w == word.lower():\n score -= 1.0\n continue\n\n return score",
"def score(self, sentence):\n\n\n # TODO your code here\n score = 0.0 \n prevWord = \"\"\n prevPrevWord = \"\"\n newSentence = []\n for word in sentence:\n newSentence += word.split()\n for currentWord in sentence:\n currentWord = currentWord.strip(STRIP_CHARS)\n currentWord = currentWord.lower()\n if prevWord != \"\":\n if prevPrevWord != \"\":\n trigram = (prevPrevWord, prevWord, currentWord)\n trigramCount = self.trigramCounts[trigram]\n if trigramCount > 0:\n score += math.log(max(self.trigramCounts[trigram] - DISCOUNT, 0)*len(self.trigramCounts) + DISCOUNT*self.followingCounts[(prevPrevWord, prevWord)]*self.continuationCounts[currentWord])\n # Subtraction by 1 removes the add one count from the laplace\n # smoothing\n score -= math.log((self.bigramCounts[(prevPrevWord, prevWord)]) * len(self.trigramCounts))\n elif self.bigramCounts[(prevWord, currentWord)] > 0:\n score += math.log(self.bigramCounts[(prevWord, currentWord)]*BI_BACKOFF_COEFFICIENT)\n score -= math.log(self.totalBigramCounts)\n else:\n count = self.unigramCounts[currentWord]\n score += math.log(count * UNI_BACKOFF_COEFFICIENT)\n score -= math.log(self.total)\n else:\n prevPrevWord = prevWord\n prevWord = currentWord\n else:\n prevWord = currentWord\n return -score",
"def score(self, sentence):\n score = 0.0\n prev_word = None\n for token in sentence:\n two_words_count = self.bigram_count[prev_word][token]\n prev_word_count = self.unigram_count[prev_word]\n if (two_words_count > 0):\n score += math.log(two_words_count)\n score -= math.log(prev_word_count)\n else:\n score += math.log(self.backoff_multiplier)\n score += math.log(self.unigram_count[token] + 1.0)\n score -= math.log(self.num_words + self.vocabulary_size)\n prev_word = token\n return score",
"def compute_score(self):\n for i in xrange(FRAMES):\n # STRIKE\n if self.frames[i][0] == 10:\n # CONSECUTIVE STRIKE\n if self.frames[i + 1][0] == 10:\n self.scores.append(self.frames[i][0] +\n self.frames[i + 1][0] +\n self.frames[i + 2][0])\n else:\n self.scores.append(self.frames[i][0] +\n self.frames[i + 1][0] +\n self.frames[i + 1][1])\n # SPARE\n elif (self.frames[i][0] + self.frames[i][1] == 10):\n self.scores.append(self.frames[i][0] + self.frames[i][1] +\n self.frames[i + 1][0])\n # NEITHER\n else:\n self.scores.append(self.frames[i][0] + self.frames[i][1])\n # Total Score\n for score in self.scores:\n self.score += score",
"def get_frame_element_score(self, fe1, fe2):\n name_match = fe1['name'] == fe2['name']\n score = self.get_text_score(fe1['spans'][0]['text'], fe2['spans'][0]['text'])\n if not name_match:\n score *= self.pen.factor_name_mismatch\n return score",
"def word_match(question, morph_story_sentence_words):\n verbs_more_weightage = []\n question_pos_words = p.pos_tagger(question)\n for word, pos_tag in question_pos_words.items():\n if pos_tag == \"VERB\":\n verbs_more_weightage.append(word)\n verbs_more_weightage = p.morphological_roots(verbs_more_weightage)\n\n question_no_stop_words_punct = p.removeStopWords(question)\n question_no_stop_words_punct = question_no_stop_words_punct.translate(str.maketrans('', '', string.punctuation))\n morphological_root_of_question = p.word_tokenizer(question_no_stop_words_punct)\n morphological_root_of_question = p.morphological_roots(morphological_root_of_question)\n\n score = 0\n for morph_story_word in morph_story_sentence_words:\n if morph_story_word in morphological_root_of_question:\n if morph_story_word in verbs_more_weightage:\n score = score + 6\n else:\n score = score + 3\n elif morph_story_word in verbs_more_weightage:\n score = score + 6\n return score",
"def score(self, sentence):\n score = 0.0\n V = len(self.f1) # vocabulary size\n for token in sentence:\n if token in self.f1: score += self.f1[token]\n else: score -= math.log10(self.total + V)\t\t # OOV \n return score",
"def score(self, sentence):\n score = 0.0\n lastToken = \"#\"\n\n for token in sentence:\n bigramCount = self.bigramCount[lastToken][token] + 1e-6\n lastTokenCount = self.unigramCount[lastToken]\n d = self.delta(bigramCount)\n\n if (lastTokenCount == 0 ):\n lastTokenCount = 999999\n\n # equivalent to: |{ w : c(w_i-1,w) > 0}|\n bigramsInitiatedByPreviousToken = len(self.bigramCount[lastToken].items()) + 0.001\n\n # d\n # lambda (w_i-1) = -------- |{ w : c(w_i-1, w) > 0}|\n # c(w_i-1)\n l = (d / lastTokenCount) * bigramsInitiatedByPreviousToken\n\n # |{ w_i-1 : c(w_i-1,w_i) > }| => Bigrams terminated in w_i\n # P (w_i) = ------------------------------\n # continuation |{ w_j-1 : c(w_j-1, w_j) > 0}| => Total number of bigram types\n continuationProb = float(len(self.reverseBigramCount[token].items())) / self.total\n\n score += math.log(bigramCount/lastTokenCount + l * continuationProb)\n\n lastToken = token\n return score",
"def score_sentence(self, sentence):\n\t\t\n\t\t# YOUR CODE HERE",
"def run(self, args):\n self.pen.score_for_matched_lexical = args[0]\n self.pen.score_for_matched_synonym = args[1]\n self.factor_word_offset_penalty = args[2]\n self.factor_sentence_length_mismatch = args[3]\n self.factor_name_mismatch = args[4]\n self.factor_fe_offset_penalty = args[5]\n self.weight_target_frame_element = args[6]\n self.weight_frame_elements = args[7]\n self.factor_frame_offset_penalty = args[8]\n misses = []\n for row in range(self.data.get_number_of_rows()):\n ref_sentence = self.data.get_row(row)[self.data.get_gold()]\n results = {}\n for team, team_sentence in self.data.get_row_for_teams(self.evaluator.get_teams(row), row).iteritems():\n results[team] = self.get_sentence_score(ref_sentence, team_sentence)\n misses.append(self.evaluator.compare_all(results, row))\n return np.mean(misses) / 5.0",
"def analyze(self, text):\n \n total_words = len(text)\n \n negatives_length = len(self.negatives)\n positives_length = len(self.positives)\n \n posneg_sum = 0\n \n for word in text:\n \n for j in range(0, positives_length):\n if word == self.positives[j][:-1]:\n posneg_sum += 1\n \n for k in range(0, negatives_length):\n if word == self.negatives[k][:-1]:\n posneg_sum -= 1\n\n return posneg_sum",
"def analyze(self, text):\n\n text = tknzr.tokenize(text)\n\n score = 0\n \n for word in text:\n if self.positiveWords.count(word.lower()) > 0:\n score += 1\n elif self.negativeWords.count(word.lower()) > 0:\n score -= 1\n \n return score",
"def wordSimilarityRatio(sent_1,sent_2):",
"def compare_word_selection(selection1,selection2):\n\n num_extra=0;\n num_total=0;\n total_score=0;\n for k in selection1.keys():\n num_total+=1;\n if k not in selection2:\n num_extra+=1;\n else:\n score=compare_sentences(selection1[k],selection2[k]);\n logging.info(\"Score: %f\" % score );\n total_score+=score;\n\n for k in selection2.keys():\n if k not in selection1:\n num_total+=1;\n num_extra+=1;\n\n if num_total==0:\n return 0;\n\n return float(total_score)/float(num_total)",
"def score(self, sentence):\n score = 0.0\n last_token = None\n for token in sentence:\n if not last_token:\n last_token = token\n continue\n tup = (last_token, token)\n if tup in self.counts:\n score += self.s[tup]\n else: # stupid backoff to add-one smoothed unigram\n if self.s[token]: score += self.s[token]\n else: score += math.log(1.0 * (self.counts[token] + 1) / (self.ntokens * 2))\n last_token = token\n return score",
"def score(self, sentence):\n # TODO your code here\n score = 0.0\n for i,token in enumerate(sentence[1:]):\n prev = sentence[i]\n current = token\n freq = self.vocab[current][prev] + self.epsilon\n\n score += math.log(freq)\n score -= math.log(self.word_counts[prev] + self.epsilon * self.v)\n return score",
"def score_seq(self, seq, verbose=False):\n score = 0.0\n count = 0\n # Start at third word, since we need a full context.\n for i in range(2, len(seq)):\n if (seq[i] == \"<s>\" or seq[i] == \"</s>\"):\n continue # Don't count special tokens in score.\n s = np.log2(self.next_word_proba(seq[i], seq[i-2:i]))\n score += s\n count += 1\n # DEBUG\n if verbose:\n print(\"log P({:s} | {:s}) = {.03f}\".format(seq[i], \" \".join(seq[i-2:i]), s))\n return score, count",
"def bowling_score(frames):\n score = 0\n frames = frames.split(' ')\n for frame in range(len(frames)):\n current = frames[frame]\n\n # If on final frame bonuses do not apply\n if frame == len(frames) - 1:\n score += simple_score(current)\n break\n\n # Not on final frame\n if current.isdigit():\n # Normal scoring, no strikes or spares.\n score += simple_score(current)\n\n else:\n # Spares are always two rolls and strikes are always one,\n # so merging the three next rolls starting with the beginning\n # of this frame works in both scenarios.\n current = ''.join(frames[frame:frame+3])[:3]\n score += simple_score(current)\n\n return score",
"def score_function(word1, word2):\n if word2 == '':\n return len(word1)\n elif word1 == '':\n return len(word2)\n elif word1 == word2:\n return 0\n elif word1[0] == word2[0]:\n return 0 + score_function(word1[1:], word2[1:])\n else:\n add_char = 1 + score_function(word2[0] + word1[:], word2[:])\n sub_char = 1 + score_function(word1[1:], word2[1:])\n remove_char = 1 + score_function(word1[1:], word2[:])\n return min(add_char, sub_char, remove_char)\n # if word1 == word2: # Fill in the condition\n # # BEGIN Q6\n # return 0\n # # END Q6\n # elif word1[0] == word2[0]: # Feel free to remove or add additional cases\n # # BEGIN Q6\n # return score_function(word1[1:], word2[1:])\n # # END Q6\n # else:\n #\n # elif len(word1) > len(word2):\n # difference = len(word2) - len(word1)\n # word2 = word2[0:len(word1)]\n # return difference + score_function(word1, word2)\n # elif len(word1) == len(word2):\n # total = 0\n # for i in word1:\n # if word1[i] == word2[i]:\n # total += 1\n # return total\n # elif len(word1) < len(word2):\n # difference = len(word2) - len(word1)\n # word1 = word1 + word2[len(word1):]\n # return difference + score_function(word1, word2)\n # else:\n #\n # def add_char(word1, word2):\n # while len(word1) <= len(word2):\n # return swap_score(word1, word2)\n # return 0\n #\n # def remove_char(word1, word2):\n # while len(word1) >= len(word2):\n # if word1[0] != word2[0]:\n # return 1 + remove_char(word1[1:], word2[1:])\n # return 0\n #\n # def sub_char(word1, word2):\n # if len(word1) == len(word2):\n # if word1[0] == word2[0]:\n # return 0\n # else:\n # return 1 + sub_char(word1[1:], word2[1:])\n # return 0\n # # BEGIN Q6\n # return add_char(word1, word2) + remove_char(word1, word2) + sub_char(word1, word2)\n # # END Q6",
"def score(self, sentence):\n s = 0;\n\n #for every word\n for i in xrange(len(sentence)):\n score = self.getBackOff(tuple(sentence[:i+1]));\n if(score != 0):\n s += math.log(score);\n\n return s",
"def getScore(self, sentence):\r\n \r\n score = 0\r\n \r\n for word in sentence.words:\r\n score += len(word)\r\n \r\n return score",
"def get_frame_element_matches_window(self, gold, candidate):\n score = 0\n length = min(len(gold), len(candidate))\n if length == 0:\n return 0\n for index in range(length):\n score += self.get_frame_element_score_in_window(gold, candidate, index)\n return score",
"def compare_words(word1, word2):\n word1 = word1.lower()\n word2 = word2.lower()\n seg_scores = []\n if len(word1) >= len(word2):\n for i in range(0, len(word1) - len(word2) + 1):\n seg_scores.append(find_difference(word1[i:i+len(word2)], word2))\n else:\n for i in range(0, len(word2) - len(word1) + 1):\n seg_scores.append(find_difference(word2[i:i+len(word1)], word1))\n return round(min(seg_scores) + abs(len(word1) - len(word2))/float(len(max([word1, word2]))),2)",
"def analyze(self, text):\n\n tknzr = nltk.tokenize.TweetTokenizer()\n words = tknzr.tokenize(text)\n \n score = 0\n \n for word in words:\n if word.lower() in self.positives:\n score += 1\n elif word.lower() in self.negatives:\n score -= 1\n else:\n continue\n \n return score",
"def get_score(self,sentence_1, sentence_2):\n\t return self.DELTA * self.semantic_similarity(sentence_1, sentence_2, True) + (1.0 - self.DELTA) * self.word_order_similarity(sentence_1, sentence_2)",
"def score_match(phrase, song):\n return SequenceMatcher(None, phrase, song.title).ratio()\n ## Examples of other score metrics and modifiers:\n ## Penalize based on difference in phrase length (word count)\n # return -abs(len(song.split()) - len(phrase.split()))\n ## Penalize based on missing words\n # return -len([w for w in phrase.split() if w not in song.split()])",
"def test_score_text2(self):\n\t\t#import pdb; pdb.set_trace()\n\t\ttest = sentiment.LibraryRun(self.text3, self.lib)\n\t\tmatches = test.find_phrase_matches(self.tokens_generator3)[0]\n\t\tobj_ut, _ = test.score_text(matches, end_threshold=0.5)\n\t\tself.assertEqual(obj_ut, -1.25)",
"def test_score_text4(self):\n\t\ttest = sentiment.LibraryRun(self.text3, self.lib)\n\t\tmatches = test.find_phrase_matches(self.tokens_generator3)[0]\n\t\t_, obj_ut = test.score_text(matches, end_threshold=0.5)\n\t\tself.assertEqual(obj_ut, {'not good': [[2, -1, 0]], \n\t\t\t'not very good': [[4, -1.5, 0]]})",
"def score(self, sentence):\n\n score = 0.0\n i = 0\n temp = \"\"\n for token in sentence:\n count = self.unigramCounts[token]\n if (i == 0):\n i = i + 1\n temp = token\n continue\n\n key = temp + \",\" + token\n bicount = self.bigramCounts[key]\n unicount = self.unigramCounts[temp]\n temp = token\n if bicount > 0 :\n\n score += (math.log(bicount) - math.log(unicount))\n else:\n unicount = self.unigramCounts[token]\n score += math.log(unicount + 1) + math.log(0.4)\n score -= math.log(self.total + len(self.unigramCounts))\n\n return score",
"def word_similarity(self):\n y_true = []\n y_pred = []\n for i in open(\"data/word_sim_dataset.txt\").read().split('\\n'):\n i = self.preprocessor(i)\n w1 = i.split()[-1]\n w2 = i.split()[-2] \n st = float(i.split()[-3]) / 4 #dataset has scale from 0 to 4\n \n try:\n w1 = self.embeddings_index[w1] \n w2 = self.embeddings_index[w2] \n w1 = w1 / np.linalg.norm(w1)\n w2 = w2 / np.linalg.norm(w2)\n y_pred.append(np.dot(w1,w2))\n y_true.append(st)\n except:\n pass\n if y_true == []:\n return 1.0\n return mean_squared_error(y_true, y_pred, squared=False)"
] |
[
"0.6548635",
"0.63201207",
"0.6306467",
"0.6282123",
"0.6157068",
"0.61255497",
"0.61066025",
"0.6098733",
"0.603776",
"0.60062015",
"0.6001164",
"0.599811",
"0.59679884",
"0.59551257",
"0.5931475",
"0.58964103",
"0.5882361",
"0.58487177",
"0.58317465",
"0.582618",
"0.57746524",
"0.57710403",
"0.56886315",
"0.5677311",
"0.56700444",
"0.5666765",
"0.56617653",
"0.5659409",
"0.5648617",
"0.5646497"
] |
0.6558668
|
0
|
Testing pack, unpacking and the Frame class.
|
def testFramepack1(self):
# Check bad frame generation:
frame = stomper.Frame()
def bad():
frame.cmd = 'SOME UNNOWN CMD'
self.assertRaises(stomper.FrameError, bad)
# Generate a MESSAGE frame:
frame = stomper.Frame()
frame.cmd = 'MESSAGE'
frame.headers['destination'] = '/queue/a'
frame.headers['message-id'] = 'card_data'
frame.body = "hello queue a"
result = frame.pack()
# print "\n-- result " + "----" * 10
# pprint.pprint(result)
# print
# Try bad message unpack catching:
bad_frame = stomper.Frame()
self.assertRaises(stomper.FrameError, bad_frame.unpack, None)
self.assertRaises(stomper.FrameError, bad_frame.unpack, '')
# Try to read the generated frame back in
# and then check the variables are set up
# correctly:
frame2 = stomper.Frame()
frame2.unpack(result)
self.assertEqual(frame2.cmd, 'MESSAGE')
self.assertEqual(frame2.headers['destination'], '/queue/a')
self.assertEqual(frame2.headers['message-id'], 'card_data')
self.assertEqual(frame2.body, 'hello queue a')
result = frame2.pack()
correct = "MESSAGE\ndestination:/queue/a\nmessage-id:card_data\n\nhello queue a\x00\n"
# print "result: "
# pprint.pprint(result)
# print
# print "correct: "
# pprint.pprint(correct)
# print
#
self.assertEqual(result, correct)
result = stomper.unpack_frame(result)
self.assertEqual(result['cmd'], 'MESSAGE')
self.assertEqual(result['headers']['destination'], '/queue/a')
self.assertEqual(result['headers']['message-id'], 'card_data')
self.assertEqual(result['body'], 'hello queue a')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def testFramepack2(self):\n # Check bad frame generation:\n frame = stomper.Frame()\n frame.cmd = 'DISCONNECT'\n result = frame.pack()\n correct = 'DISCONNECT\\n\\n\\x00\\n'\n self.assertEqual(result, correct)",
"def testFrameUnpack2(self):\n msg = \"\"\"MESSAGE\ndestination:/queue/a\nmessage-id: card_data\n\nhello queue a\"\"\"\n\n result = stomper.unpack_frame(msg)\n\n self.assertEqual(result['cmd'], 'MESSAGE')\n self.assertEqual(result['headers']['destination'], '/queue/a')\n self.assertEqual(result['headers']['message-id'], 'card_data')\n self.assertEqual(result['body'], 'hello queue a')",
"def test_frame_rw(self):\n from ..io.frame import read_frame, write_frame, read_meta_frame\n from ..io.fibermap import empty_fibermap\n nspec, nwave, ndiag = 5, 10, 3\n flux = np.random.uniform(size=(nspec, nwave))\n ivar = np.random.uniform(size=(nspec, nwave))\n meta = dict(BLAT=0, FOO='abc', FIBERMIN=500, FLAVOR='science')\n mask_int = np.zeros((nspec, nwave), dtype=int)\n mask_uint = np.zeros((nspec, nwave), dtype=np.uint32)\n wave = np.arange(nwave)\n R = np.random.uniform( size=(nspec, ndiag, nwave) )\n\n for mask in (mask_int, mask_uint):\n frx = Frame(wave, flux, ivar, mask, R, meta=meta)\n write_frame(self.testfile, frx)\n frame = read_frame(self.testfile)\n read_meta = read_meta_frame(self.testfile)\n\n flux2 = flux.astype('f4').astype('f8')\n ivar2 = ivar.astype('f4').astype('f8')\n wave2 = wave.astype('f4').astype('f8')\n R2 = R.astype('f4').astype('f8')\n\n self.assertTrue(frame.wave.dtype == np.float64)\n self.assertTrue(frame.flux.dtype == np.float64)\n self.assertTrue(frame.ivar.dtype == np.float64)\n self.assertTrue(frame.resolution_data.dtype == np.float64)\n\n self.assertTrue(np.all(flux2 == frame.flux))\n self.assertTrue(np.all(ivar2 == frame.ivar))\n self.assertTrue(np.all(wave2 == frame.wave))\n self.assertTrue(np.all(mask == frame.mask))\n self.assertTrue(np.all(R2 == frame.resolution_data))\n self.assertTrue(frame.resolution_data.dtype.isnative)\n self.assertEqual(frame.meta['BLAT'], meta['BLAT'])\n self.assertEqual(frame.meta['FOO'], meta['FOO'])\n self.assertEqual(frame.meta['BLAT'], read_meta['BLAT'])\n self.assertEqual(frame.meta['FOO'], read_meta['FOO'])\n\n #- read_frame works even with \"wrong\" .fits / .fits.gz\n if self.testfile.endswith('.fits'):\n frame = read_frame(self.testfile + '.gz') # finds file anyway\n elif self.testfile.endswith('.fits.gz'):\n frame = read_frame(self.testfile[:-3]) # finds file anyway\n else:\n raise ValueError(f'unrecognized extension for {self.testfile=}')\n\n #- Test float32 on disk vs. float64 in memory\n for extname in ['FLUX', 'IVAR', 'RESOLUTION']:\n data = fits.getdata(self.testfile, extname)\n self.assertEqual(data.dtype, np.dtype('>f4'), '{} not type >f4'.format(extname))\n for extname in ['WAVELENGTH']:\n data = fits.getdata(self.testfile, extname)\n self.assertEqual(data.dtype, np.dtype('>f8'), '{} not type >f8'.format(extname))\n\n #- with and without units\n frx = Frame(wave, flux, ivar, mask, R, meta=meta)\n write_frame(self.testfile, frx)\n frame = read_frame(self.testfile)\n self.assertTrue('BUNIT' not in frame.meta)\n write_frame(self.testfile, frx, units='photon/bin')\n frame = read_frame(self.testfile)\n self.assertEqual(frame.meta['BUNIT'], 'photon/bin')\n frx.meta['BUNIT'] = 'blatfoo'\n write_frame(self.testfile, frx)\n frame = read_frame(self.testfile)\n self.assertEqual(frame.meta['BUNIT'], 'blatfoo')\n #- function argument trumps pre-existing BUNIT\n write_frame(self.testfile, frx, units='quat')\n frame = read_frame(self.testfile)\n self.assertEqual(frame.meta['BUNIT'], 'quat')\n\n #- with and without fibermap\n self.assertEqual(frame.fibermap, None)\n fibermap = empty_fibermap(nspec)\n fibermap['TARGETID'] = np.arange(nspec)*2\n frx = Frame(wave, flux, ivar, mask, R, fibermap=fibermap, meta=dict(FLAVOR='science'))\n write_frame(self.testfile, frx)\n frame = read_frame(self.testfile)\n for name in fibermap.dtype.names:\n match = np.all(fibermap[name] == frame.fibermap[name])\n self.assertTrue(match, 'Fibermap column {} mismatch'.format(name))",
"def test_construct_frame_tag(attributes):\n frame_ = Frame(**attributes)\n assert frame_.construct() == frame.render(attributes)",
"def _pack(self):\n pass",
"def test_FRAME_DUoperations():\n tmFrame0 = CCSDS.FRAME.TMframe()\n print(\"tmFrame0 =\", tmFrame0)\n print(\"\")\n tmFrame1 = CCSDS.FRAME.TMframe(testData.TM_FRAME_01)\n print(\"tmFrame1 =\", tmFrame1)\n if tmFrame1.versionNumber != testData.TM_FRAME_01_versionNumber:\n print(\"tmFrame1 versionNumber wrong:\", tmFrame1.versionNumber, \"- should be\", testData.TM_FRAME_01_versionNumber)\n return False\n if tmFrame1.spacecraftId != testData.TM_FRAME_01_spacecraftId:\n print(\"tmFrame1 spacecraftId wrong:\", tmFrame1.spacecraftId, \"- should be\", testData.TM_FRAME_01_spacecraftId)\n return False\n if tmFrame1.virtualChannelId != testData.TM_FRAME_01_virtualChannelId:\n print(\"tmFrame1 virtualChannelId wrong:\", tmFrame1.virtualChannelId, \"- should be\", testData.TM_FRAME_01_virtualChannelId)\n return False\n if tmFrame1.operationalControlField != testData.TM_FRAME_01_operationalControlField:\n print(\"tmFrame1 operationalControlField wrong:\", tmFrame1.operationalControlField, \"- should be\", testData.TM_FRAME_01_operationalControlField)\n return False\n if tmFrame1.masterChannelFrameCount != testData.TM_FRAME_01_masterChannelFrameCount:\n print(\"tmFrame1 masterChannelFrameCount wrong:\", tmFrame1.masterChannelFrameCount, \"- should be\", testData.TM_FRAME_01_masterChannelFrameCount)\n return False\n if tmFrame1.virtualChannelFCountLow != testData.TM_FRAME_01_virtualChannelFCountLow:\n print(\"tmFrame1 virtualChannelFCountLow wrong:\", tmFrame1.virtualChannelFCountLow, \"- should be\", testData.TM_FRAME_01_virtualChannelFCountLow)\n return False\n if tmFrame1.secondaryHeaderFlag != testData.TM_FRAME_01_secondaryHeaderFlag:\n print(\"tmFrame1 secondaryHeaderFlag wrong:\", tmFrame1.secondaryHeaderFlag, \"- should be\", testData.TM_FRAME_01_secondaryHeaderFlag)\n return False\n if tmFrame1.synchronisationFlag != testData.TM_FRAME_01_synchronisationFlag:\n print(\"tmFrame1 synchronisationFlag wrong:\", tmFrame1.synchronisationFlag, \"- should be\", testData.TM_FRAME_01_synchronisationFlag)\n return False\n if tmFrame1.packetOrderFlag != testData.TM_FRAME_01_packetOrderFlag:\n print(\"tmFrame1 packetOrderFlag wrong:\", tmFrame1.packetOrderFlag, \"- should be\", testData.TM_FRAME_01_packetOrderFlag)\n return False\n if tmFrame1.segmentLengthId != testData.TM_FRAME_01_segmentLengthId:\n print(\"tmFrame1 segmentLengthId wrong:\", tmFrame1.segmentLengthId, \"- should be\", testData.TM_FRAME_01_segmentLengthId)\n return False\n if tmFrame1.firstHeaderPointer != testData.TM_FRAME_01_firstHeaderPointer:\n print(\"tmFrame1 firstHeaderPointer wrong:\", tmFrame1.firstHeaderPointer, \"- should be\", testData.TM_FRAME_01_firstHeaderPointer)\n return False\n # extract packets and check it\n leadingFragment, packets, trailingFragment = tmFrame1.getPackets()\n if leadingFragment != testData.TM_FRAME_01_leadingFragment:\n print(\"tmFrame1 leadingFragment wrong:\", leadingFragment, \"- should be\", testData.TM_FRAME_01_leadingFragment)\n return False\n if len(packets) != testData.TM_FRAME_01_nrPackets:\n print(\"tmFrame1 nr. of packets wrong:\", len(packets), \"- should be\", testData.TM_FRAME_01_nrPackets)\n return False\n if trailingFragment != testData.TM_FRAME_01_trailingFragment:\n print(\"tmFrame1 trailingFragment wrong:\", trailingFragment, \"- should be\", testData.TM_FRAME_01_trailingFragment)\n return False\n print(\"\")\n tcFrame1 = CCSDS.FRAME.TCframe(testData.TC_FRAME_01)\n print(\"tcFrame1 =\", tcFrame1)\n if tcFrame1.versionNumber != testData.TC_FRAME_01_versionNumber:\n print(\"tcFrame1 versionNumber wrong:\", tcFrame1.versionNumber, \"- should be\", testData.TC_FRAME_01_versionNumber)\n return False\n if tcFrame1.reservedFieldB != testData.TC_FRAME_01_reservedFieldB:\n print(\"tcFrame1 reservedFieldB wrong:\", tcFrame1.reservedFieldB, \"- should be\", testData.TC_FRAME_01_reservedFieldB)\n return False\n if tcFrame1.virtualChannelId != testData.TC_FRAME_01_virtualChannelId:\n print(\"tcFrame1 virtualChannelId wrong:\", tcFrame1.virtualChannelId, \"- should be\", testData.TC_FRAME_01_virtualChannelId)\n return False\n if tcFrame1.controlCommandFlag != testData.TC_FRAME_01_controlCommandFlag:\n print(\"tcFrame1 controlCommandFlag wrong:\", tcFrame1.controlCommandFlag, \"- should be\", testData.TC_FRAME_01_controlCommandFlag)\n return False\n if tcFrame1.reservedFieldA != testData.TC_FRAME_01_reservedFieldA:\n print(\"tcFrame1 reservedFieldA wrong:\", tcFrame1.reservedFieldA, \"- should be\", testData.TC_FRAME_01_reservedFieldA)\n return False\n if tcFrame1.frameLength != testData.TC_FRAME_01_frameLength:\n print(\"tcFrame1 frameLength wrong:\", tcFrame1.frameLength, \"- should be\", testData.TC_FRAME_01_frameLength)\n return False\n if tcFrame1.sequenceNumber != testData.TC_FRAME_01_sequenceNumber:\n print(\"tcFrame1 sequenceNumber wrong:\", tcFrame1.sequenceNumber, \"- should be\", testData.TC_FRAME_01_sequenceNumber)\n return False\n if tcFrame1.spacecraftId != testData.TC_FRAME_01_spacecraftId:\n print(\"tcFrame1 spacecraftId wrong:\", tcFrame1.spacecraftId, \"- should be\", testData.TC_FRAME_01_spacecraftId)\n return False\n if tcFrame1.bypassFlag != testData.TC_FRAME_01_bypassFlag:\n print(\"tcFrame1 bypassFlag wrong:\", tcFrame1.bypassFlag, \"- should be\", testData.TC_FRAME_01_bypassFlag)\n return False\n tcFrame2 = CCSDS.FRAME.TCframe(testData.TC_FRAME_02)\n if tcFrame2.versionNumber != testData.TC_FRAME_02_versionNumber:\n print(\"tcFrame2 versionNumber wrong:\", tcFrame2.versionNumber, \"- should be\", testData.TC_FRAME_02_versionNumber)\n return False\n if tcFrame2.reservedFieldB != testData.TC_FRAME_02_reservedFieldB:\n print(\"tcFrame2 reservedFieldB wrong:\", tcFrame2.reservedFieldB, \"- should be\", testData.TC_FRAME_02_reservedFieldB)\n return False\n if tcFrame2.virtualChannelId != testData.TC_FRAME_02_virtualChannelId:\n print(\"tcFrame2 virtualChannelId wrong:\", tcFrame2.virtualChannelId, \"- should be\", testData.TC_FRAME_02_virtualChannelId)\n return False\n if tcFrame2.controlCommandFlag != testData.TC_FRAME_02_controlCommandFlag:\n print(\"tcFrame2 controlCommandFlag wrong:\", tcFrame2.controlCommandFlag, \"- should be\", testData.TC_FRAME_02_controlCommandFlag)\n return False\n if tcFrame2.reservedFieldA != testData.TC_FRAME_02_reservedFieldA:\n print(\"tcFrame2 reservedFieldA wrong:\", tcFrame2.reservedFieldA, \"- should be\", testData.TC_FRAME_02_reservedFieldA)\n return False\n if tcFrame2.frameLength != testData.TC_FRAME_02_frameLength:\n print(\"tcFrame2 frameLength wrong:\", tcFrame2.frameLength, \"- should be\", testData.TC_FRAME_02_frameLength)\n return False\n if tcFrame2.sequenceNumber != testData.TC_FRAME_02_sequenceNumber:\n print(\"tcFrame2 sequenceNumber wrong:\", tcFrame2.sequenceNumber, \"- should be\", testData.TC_FRAME_02_sequenceNumber)\n return False\n if tcFrame2.spacecraftId != testData.TC_FRAME_02_spacecraftId:\n print(\"tcFrame2 spacecraftId wrong:\", tcFrame2.spacecraftId, \"- should be\", testData.TC_FRAME_02_spacecraftId)\n return False\n if tcFrame2.bypassFlag != testData.TC_FRAME_02_bypassFlag:\n print(\"tcFrame2 bypassFlag wrong:\", tcFrame2.bypassFlag, \"- should be\", testData.TC_FRAME_02_bypassFlag)\n return False\n clcw = CCSDS.FRAME.CLCW()\n print(\"clcw =\", clcw)\n return True",
"def testFrameUnpack3(self):\n msg = \"\"\"CONNECTED\nsession:ID:snorky.local-49191-1185461799654-3:18\n\"\"\"\n result = stomper.unpack_frame(msg)\n\n self.assertEqual(result['cmd'], 'CONNECTED')\n self.assertEqual(result['headers']['session'], 'ID:snorky.local-49191-1185461799654-3:18')\n self.assertEqual(result['body'], '')",
"def test_decode(self):\n pass # TODO(tlarsen)",
"def test_strframe():\n obj = pmisc.strframe\n\n def check_basic_frame(lines):\n fname = pmisc.normalize_windows_fname(os.path.realpath(__file__))\n assert lines[0].startswith(\"\\x1b[33mFrame object ID: 0x\")\n assert lines[1].startswith(\n \"File name......: {0}\".format(fname.replace(\".pyc\", \".py\"))\n )\n assert lines[2].startswith(\"Line number....: \")\n assert lines[3] == \"Function name..: test_strframe\"\n assert lines[4] == r\"Context........: [' fobj = inspect.stack()[0]\\n']\"\n assert lines[5] == \"Index..........: 0\"\n\n fobj = inspect.stack()[0]\n lines = obj(fobj).split(\"\\n\")\n check_basic_frame(lines)\n assert len(lines) == 6\n lines = [\n line\n for num, line in enumerate(obj(fobj, extended=True).split(\"\\n\"))\n if (num < 6) or line.startswith(\"f_\")\n ]\n check_basic_frame(lines)\n assert lines[6].startswith(\"f_back ID......: 0x\")\n assert lines[7].startswith(\"f_builtins.....: {\")\n assert lines[8].startswith(\"f_code.........: \" \"<code object test_strframe at \")\n assert lines[9].startswith(\"f_globals......: {\")\n assert lines[10].startswith(\"f_lasti........: \")\n assert lines[11].startswith(\"f_lineno.......: \")\n assert lines[12].startswith(\"f_locals.......: {\")\n if sys.hexversion < 0x03000000:\n assert lines[13] == \"f_restricted...: False\"\n assert lines[14].startswith(\"f_trace........: \")\n assert len(lines) == 15\n else:\n assert lines[13].startswith(\"f_trace........: \")\n assert len(lines) == 14",
"def test_frame_change(self):\n converter = InstructionToQobjConverter(PulseQobjInstruction, meas_level=2)\n command = FrameChange(phase=0.1)\n instruction = command(self.device.q[0].drive)\n\n valid_qobj = PulseQobjInstruction(\n name='fc',\n ch='d0',\n t0=0,\n phase=0.1\n )\n\n self.assertEqual(converter(0, instruction), valid_qobj)",
"def __post_init__(self) -> None:\n _validate_struct_class(self.struct_class)",
"def __post_init__(self) -> None:\n _validate_struct_class(self.struct_class)",
"def capture_frame(self):\n\n # search for the sync bytes which indicate the start of one frame\n # sync_bytes = [None, None, None, None]\n # while True:\n # sync_bytes[3] = sync_bytes[2]\n # sync_bytes[2] = sync_bytes[1]\n # sync_bytes[1] = sync_bytes[0]\n # sync_bytes[0] = binascii.hexlify(self.ser.read())\n #\n # # check the content\n # try:\n # if (sync_bytes[0] + sync_bytes[1] + sync_bytes[2]\n # + sync_bytes[3] == b'fffefdfc'):\n # print(\"Frame captured!\")\n # break\n # except TypeError:\n # pass\n\n while not self.lookup_sync():\n pass\n\n # print('Frame captured!')\n self.msg_size = int.from_bytes(self.ser.read(2),\n byteorder='little',\n signed=True)\n # print('Msg Size: {}'.format(self.msg_size))\n\n # raw message info (cmd + options + data)\n self.message = self.ser.read(self.msg_size)\n\n # command info\n self.cmd = int.from_bytes(self.message[:2], byteorder='little', signed=False)\n\n # raw data info (plane_num + distance values)\n self.p3_msg = self.message[-549:]\n self.p1_msg = self.message[-1098: -549]\n self.p4_msg = self.message[-1647: -1098]\n self.p2_msg = self.message[-2196: -1647]\n # print(len(self.p3_msg), len(self.p1_msg), len(self.p4_msg), len(self.p2_msg))\n\n # examine the msg size\n try:\n assert (self.p3_msg[0], self.p1_msg[0], self.p4_msg[0], self.p2_msg[0]) \\\n == (3, 2, 1, 0), \"Fail to interpret the msg\"\n except AssertionError:\n # print(\"error\\n\\n\")\n return -1\n\n # convert bytes to integers (ignore the plane_num)\n self.p3_dists = [int.from_bytes([self.p3_msg[2 * i + 1], self.p3_msg[2 * i + 2]],\n byteorder='little', signed=True)\n for i in range((len(self.p3_msg) - 1) // 2)]\n self.p1_dists = [int.from_bytes([self.p1_msg[2 * i + 1], self.p1_msg[2 * i + 2]],\n byteorder='little', signed=True)\n for i in range((len(self.p1_msg) - 1) // 2)]\n self.p4_dists = [int.from_bytes([self.p4_msg[2 * i + 1], self.p4_msg[2 * i + 2]],\n byteorder='little', signed=True)\n for i in range((len(self.p4_msg) - 1) // 2)]\n self.p2_dists = [int.from_bytes([self.p2_msg[2 * i + 1], self.p2_msg[2 * i + 2]],\n byteorder='little', signed=True)\n for i in range((len(self.p2_msg) - 1) // 2)]\n\n # convert list into np array for further processing\n self.p3_dists = np.asarray(self.p3_dists).astype('float32').reshape(274, 1)\n self.p1_dists = np.asarray(self.p1_dists).astype('float32').reshape(274, 1)\n self.p4_dists = np.asarray(self.p4_dists).astype('float32').reshape(274, 1)\n self.p2_dists = np.asarray(self.p2_dists).astype('float32').reshape(274, 1)\n\n # print(self.p3_dists[132:142])\n # print(self.p1_dists[132:142])\n # print(self.p4_dists[132:142])\n # print(self.p2_dists[132:142])\n\n # Compute the position info\n # print(self.converter)\n # print(self.thetas.shape)\n self.p3_points = self.converter * np.array([[np.cos(self.alphas[2]), np.cos(self.alphas[2]),\n np.sin(self.alphas[2])]], dtype='float32') * self.p3_dists\n self.p1_points = self.converter * np.array([[np.cos(self.alphas[0]), np.cos(self.alphas[0]),\n np.sin(self.alphas[0])]], dtype='float32') * self.p1_dists\n self.p4_points = self.converter * np.array([[np.cos(self.alphas[3]), np.cos(self.alphas[3]),\n np.sin(self.alphas[3])]], dtype='float32') * self.p4_dists\n self.p2_points = self.converter * np.array([[np.cos(self.alphas[1]), np.cos(self.alphas[1]),\n np.sin(self.alphas[1])]], dtype='float32') * self.p2_dists\n # print(self.p1_points[132:142])\n\n return 0",
"def parse_frame(data):\n test = binascii.hexlify(data)\n # defines the format of received LoRa frame header\n tap_header_format = 'bbhiibbbbib'\n phy_header_format = 'bbb'\n header_format = tap_header_format + phy_header_format\n print header_format\n header_len = struct.calcsize(header_format)\n data_len = len(data)\n if header_len > data_len:\n print 'packet too short'\n return (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,)\n else:\n # defines the frame format based on header and length of frame\n data_format = header_format + str(data_len - header_len) + 's'\n print data_format\n # print \"tap header: \", header_len\n # print \"data length: \", data_len\n # print \"test length: \", len(test)\n\n unpacked = struct.unpack(data_format, data)\n print unpacked\n # print '-----------------------------------------------------'\n # print \"bin \" + data\n # print 'hex ' + test\n return unpacked",
"def test_frame_change(self):\n cmd = FrameChange(phase=0.1)\n instruction = cmd(MeasureChannel(0))\n\n qobj = PulseQobjInstruction(name='fc', ch='m0', t0=0, phase=0.1)\n converted_instruction = self.converter(qobj)\n\n self.assertEqual(converted_instruction.timeslots, instruction.timeslots)\n self.assertEqual(converted_instruction.instructions[0][-1].command, cmd)",
"def unpack_frame(self, frame_bytes):\n byte_index = 0\n for i in iter(EITFrame.block_sizes.keys()):\n setattr(self, i, struct.unpack(EITFrame.block_types[i], frame_bytes[byte_index:(byte_index+EITFrame.block_sizes[i])]))\n byte_index = byte_index + EITFrame.block_sizes[i]",
"def test_subclassed_tuple(self):\n class Foo(tuple):\n pass\n\n x = Foo([1, 2])\n\n self.encoder.send(x)\n\n self.assertEqual(next(self.encoder), b'\\t\\x05\\x01\\x04\\x01\\x04\\x02')",
"def test_consistency_with_repeated_pack_and_unpack(self):\n\n buf, off = make_monster_from_generated_code()\n\n # Turns a buffer into Python object (T class).\n monster1 = _MONSTER.Monster.GetRootAs(buf, off)\n monsterT1 = _MONSTER.MonsterT.InitFromObj(monster1)\n\n for sizePrefix in [True, False]:\n # Re-serialize the data into a buffer.\n b1 = flatbuffers.Builder(0)\n if sizePrefix:\n b1.FinishSizePrefixed(monsterT1.Pack(b1))\n else:\n b1.Finish(monsterT1.Pack(b1))\n CheckReadBuffer(b1.Bytes, b1.Head(), sizePrefix)\n\n # Deserializes the buffer into Python object again.\n monster2 = _MONSTER.Monster.GetRootAs(b1.Bytes, b1.Head())\n # Re-serializes the data into a buffer for one more time.\n monsterT2 = _MONSTER.MonsterT.InitFromObj(monster2)\n for sizePrefix in [True, False]:\n # Re-serializes the data into a buffer\n b2 = flatbuffers.Builder(0)\n if sizePrefix:\n b2.FinishSizePrefixed(monsterT2.Pack(b2))\n else:\n b2.Finish(monsterT2.Pack(b2))\n CheckReadBuffer(b2.Bytes, b2.Head(), sizePrefix)",
"def _decode_frame(self):\n\n self._processed.eth_frame.log(level=logging_helper.INFO)\n\n # Parse IP packets, protocol=0x8\n if hex(self._processed.eth_frame.protocol) == u'0x8':\n self._processed.ip_frame = IPFrame(self._processed.eth_frame.payload)\n self._processed.ip_frame.log(level=logging_helper.INFO)\n\n if self._processed.ip_frame.payload is not None:\n self._processed.ip_frame.payload.log(level=logging_helper.INFO)\n\n else:\n logging.info(u'Not an IP payload')\n\n logging.info(self._processed)",
"def testSplit(self):\n\n protocol_factory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory()\n bigstring = \"\".join(chr(byte) for byte in range(ord(\"a\"), ord(\"z\")+1))\n\n databuf = TTransport.TMemoryBuffer()\n prot = protocol_factory.getProtocol(databuf)\n prot.writeI32(42)\n prot.writeString(bigstring)\n prot.writeI16(24)\n data = databuf.getvalue()\n cutpoint = len(data)/2\n parts = [ data[:cutpoint], data[cutpoint:] ]\n\n framed_buffer = TTransport.TMemoryBuffer()\n framed_writer = TTransport.TFramedTransport(framed_buffer)\n for part in parts:\n framed_writer.write(part)\n framed_writer.flush()\n self.assertEquals(len(framed_buffer.getvalue()), len(data) + 8)\n\n # Recreate framed_buffer so we can read from it.\n framed_buffer = TTransport.TMemoryBuffer(framed_buffer.getvalue())\n framed_reader = TTransport.TFramedTransport(framed_buffer)\n prot = protocol_factory.getProtocol(framed_reader)\n self.assertEqual(prot.readI32(), 42)\n self.assertEqual(prot.readString(), bigstring)\n self.assertEqual(prot.readI16(), 24)",
"def test_default_values_with_pack_and_unpack(self):\n # Creates a flatbuffer with default values.\n b1 = flatbuffers.Builder(0)\n _MONSTER.MonsterStart(b1)\n gen_mon = _MONSTER.MonsterEnd(b1)\n b1.Finish(gen_mon)\n\n # Converts the flatbuffer into the object class.\n monster1 = _MONSTER.Monster.GetRootAs(b1.Bytes, b1.Head())\n monsterT1 = _MONSTER.MonsterT.InitFromObj(monster1)\n\n # Packs the object class into another flatbuffer.\n b2 = flatbuffers.Builder(0)\n b2.Finish(monsterT1.Pack(b2))\n monster2 = _MONSTER.Monster.GetRootAs(b2.Bytes, b2.Head())\n # Checks the default values.\n self.assertTrue(monster2.Pos() is None)\n self.assertEqual(monster2.Mana(), 150)\n self.assertEqual(monster2.Hp(), 100)\n self.assertTrue(monster2.Name() is None)\n self.assertEqual(monster2.Inventory(0), 0)\n self.assertEqual(monster2.InventoryAsNumpy(), 0)\n self.assertEqual(monster2.InventoryLength(), 0)\n self.assertTrue(monster2.InventoryIsNone())\n self.assertEqual(monster2.Color(), 8)\n self.assertEqual(monster2.TestType(), 0)\n self.assertTrue(monster2.Test() is None)\n self.assertTrue(monster2.Test4(0) is None)\n self.assertEqual(monster2.Test4Length(), 0)\n self.assertTrue(monster2.Test4IsNone())\n self.assertEqual(monster2.Testarrayofstring(0), '')\n self.assertEqual(monster2.TestarrayofstringLength(), 0)\n self.assertTrue(monster2.TestarrayofstringIsNone())\n self.assertTrue(monster2.Testarrayoftables(0) is None)\n self.assertEqual(monster2.TestarrayoftablesLength(), 0)\n self.assertTrue(monster2.TestarrayoftablesIsNone())\n self.assertTrue(monster2.Enemy() is None)\n self.assertEqual(monster2.Testnestedflatbuffer(0), 0)\n self.assertEqual(monster2.TestnestedflatbufferAsNumpy(), 0)\n self.assertEqual(monster2.TestnestedflatbufferLength(), 0)\n self.assertTrue(monster2.TestnestedflatbufferIsNone())\n self.assertTrue(monster2.Testempty() is None)\n self.assertFalse(monster2.Testbool())\n self.assertEqual(monster2.Testhashs32Fnv1(), 0)\n self.assertEqual(monster2.Testhashu32Fnv1(), 0)\n self.assertEqual(monster2.Testhashs64Fnv1(), 0)\n self.assertEqual(monster2.Testhashu64Fnv1(), 0)\n self.assertEqual(monster2.Testhashs32Fnv1a(), 0)\n self.assertEqual(monster2.Testhashu32Fnv1a(), 0)\n self.assertEqual(monster2.Testhashs64Fnv1a(), 0)\n self.assertEqual(monster2.Testhashu64Fnv1a(), 0)\n self.assertEqual(monster2.Testarrayofbools(0), 0)\n self.assertEqual(monster2.TestarrayofboolsAsNumpy(), 0)\n self.assertEqual(monster2.TestarrayofboolsLength(), 0)\n self.assertTrue(monster2.TestarrayofboolsIsNone())\n self.assertEqual(monster2.Testf(), 3.14159)\n self.assertEqual(monster2.Testf2(), 3.0)\n self.assertEqual(monster2.Testf3(), 0.0)\n self.assertEqual(monster2.Testarrayofstring2(0), '')\n self.assertEqual(monster2.Testarrayofstring2Length(), 0)\n self.assertTrue(monster2.Testarrayofstring2IsNone())\n self.assertTrue(monster2.Testarrayofsortedstruct(0) is None)\n self.assertEqual(monster2.TestarrayofsortedstructLength(), 0)\n self.assertTrue(monster2.TestarrayofsortedstructIsNone())\n self.assertEqual(monster2.Flex(0), 0)\n self.assertEqual(monster2.FlexAsNumpy(), 0)\n self.assertEqual(monster2.FlexLength(), 0)\n self.assertTrue(monster2.FlexIsNone())\n self.assertTrue(monster2.Test5(0) is None)\n self.assertEqual(monster2.Test5Length(), 0)\n self.assertTrue(monster2.Test5IsNone())\n self.assertEqual(monster2.VectorOfLongs(0), 0)\n self.assertEqual(monster2.VectorOfLongsAsNumpy(), 0)\n self.assertEqual(monster2.VectorOfLongsLength(), 0)\n self.assertTrue(monster2.VectorOfLongsIsNone())\n self.assertEqual(monster2.VectorOfDoubles(0), 0)\n self.assertEqual(monster2.VectorOfDoublesAsNumpy(), 0)\n self.assertEqual(monster2.VectorOfDoublesLength(), 0)\n self.assertTrue(monster2.VectorOfDoublesIsNone())\n self.assertTrue(monster2.ParentNamespaceTest() is None)\n self.assertTrue(monster2.VectorOfReferrables(0) is None)\n self.assertEqual(monster2.VectorOfReferrablesLength(), 0)\n self.assertTrue(monster2.VectorOfReferrablesIsNone())\n self.assertEqual(monster2.SingleWeakReference(), 0)\n self.assertEqual(monster2.VectorOfWeakReferences(0), 0)\n self.assertEqual(monster2.VectorOfWeakReferencesAsNumpy(), 0)\n self.assertEqual(monster2.VectorOfWeakReferencesLength(), 0)\n self.assertTrue(monster2.VectorOfWeakReferencesIsNone())\n self.assertTrue(monster2.VectorOfStrongReferrables(0) is None)\n self.assertEqual(monster2.VectorOfStrongReferrablesLength(), 0)\n self.assertTrue(monster2.VectorOfStrongReferrablesIsNone())\n self.assertEqual(monster2.CoOwningReference(), 0)\n self.assertEqual(monster2.VectorOfCoOwningReferences(0), 0)\n self.assertEqual(monster2.VectorOfCoOwningReferencesAsNumpy(), 0)\n self.assertEqual(monster2.VectorOfCoOwningReferencesLength(), 0)\n self.assertTrue(monster2.VectorOfCoOwningReferencesIsNone())\n self.assertEqual(monster2.NonOwningReference(), 0)\n self.assertEqual(monster2.VectorOfNonOwningReferences(0), 0)\n self.assertEqual(monster2.VectorOfNonOwningReferencesAsNumpy(), 0)\n self.assertEqual(monster2.VectorOfNonOwningReferencesLength(), 0)\n self.assertTrue(monster2.VectorOfNonOwningReferencesIsNone())\n self.assertEqual(monster2.AnyUniqueType(), 0)\n self.assertTrue(monster2.AnyUnique() is None)\n self.assertEqual(monster2.AnyAmbiguousType(), 0)\n self.assertTrue(monster2.AnyAmbiguous() is None)\n self.assertEqual(monster2.VectorOfEnums(0), 0)\n self.assertEqual(monster2.VectorOfEnumsAsNumpy(), 0)\n self.assertEqual(monster2.VectorOfEnumsLength(), 0)\n self.assertTrue(monster2.VectorOfEnumsIsNone())",
"def test_unpack_dataframe(self, batched_df, expected):\n unpacked_list = _BatchingManager.split_dataframe(batched_df, 1)\n assert len(unpacked_list) == 1\n # On windows, conversion dtype is not preserved.\n check_dtype = not os.name == \"nt\"\n pd.testing.assert_frame_equal(\n unpacked_list[0].reset_index(drop=True),\n expected.reset_index(drop=True),\n check_dtype=check_dtype,\n )",
"def frames():\n raise RuntimeError('Must be implemented by subclasses.')",
"def test_encoding_on_pack_big_endian(self):\n\n try:\n rfh2 = pymqi.RFH2()\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_FLOAT_S390)[4:8], b\"\\x00\\x00\\x00\\x02\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_INTEGER_NORMAL)[4:8], b\"\\x00\\x00\\x00\\x02\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_DECIMAL_NORMAL)[4:8], b\"\\x00\\x00\\x00\\x02\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_FLOAT_IEEE_NORMAL)[4:8], b\"\\x00\\x00\\x00\\x02\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_INTEGER_NORMAL + CMQC.MQENC_DECIMAL_NORMAL)[4:8], b\"\\x00\\x00\\x00\\x02\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_INTEGER_NORMAL + CMQC.MQENC_FLOAT_IEEE_NORMAL)[4:8], b\"\\x00\\x00\\x00\\x02\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_DECIMAL_NORMAL + CMQC.MQENC_FLOAT_IEEE_NORMAL)[4:8], b\"\\x00\\x00\\x00\\x02\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_INTEGER_NORMAL + CMQC.MQENC_DECIMAL_NORMAL + CMQC.MQENC_FLOAT_IEEE_NORMAL)[4:8], b\"\\x00\\x00\\x00\\x02\")\n except Exception as e:\n self.fail(e)",
"def test_encoding_on_pack_small_endian(self):\n\n try:\n rfh2 = pymqi.RFH2()\n self.assertEqual(rfh2.pack()[4:8], b\"\\x02\\x00\\x00\\x00\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_NATIVE)[4:8], b\"\\x02\\x00\\x00\\x00\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_INTEGER_REVERSED)[4:8], b\"\\x02\\x00\\x00\\x00\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_DECIMAL_REVERSED)[4:8], b\"\\x02\\x00\\x00\\x00\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_FLOAT_IEEE_REVERSED)[4:8], b\"\\x02\\x00\\x00\\x00\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_INTEGER_REVERSED + CMQC.MQENC_DECIMAL_REVERSED)[4:8], b\"\\x02\\x00\\x00\\x00\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_INTEGER_REVERSED + CMQC.MQENC_FLOAT_IEEE_REVERSED)[4:8], b\"\\x02\\x00\\x00\\x00\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_DECIMAL_REVERSED + CMQC.MQENC_FLOAT_IEEE_REVERSED)[4:8], b\"\\x02\\x00\\x00\\x00\")\n except Exception as e:\n self.fail(e)",
"def isframe(object):\r\n return isinstance(object, types.FrameType)",
"def build_frame(self, message):\r\n header = BytesIO()\r\n if 0x3 <= self.opcode <= 0x7 or 0xB <= self.opcode:\r\n raise WebSocketProtocolError('Opcode cannot be a reserved opcode')\r\n ## +-+-+-+-+-------+\r\n ## |F|R|R|R| opcode|\r\n ## |I|S|S|S| (4) |\r\n ## |N|V|V|V| |\r\n ## | |1|2|3| |\r\n ## +-+-+-+-+-------+\r\n header.write(i2b(((self.fin << 7)\r\n | (self.rsv1 << 6)\r\n | (self.rsv2 << 5)\r\n | (self.rsv3 << 4)\r\n | self.opcode)))\r\n ## +-+-------------+-------------------------------+\r\n ## |M| Payload len | Extended payload length |\r\n ## |A| (7) | (16/63) |\r\n ## |S| | (if payload len==126/127) |\r\n ## |K| | |\r\n ## +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - +\r\n ## | Extended payload length continued, if payload len == 127 |\r\n ## + - - - - - - - - - - - - - - - +-------------------------------+\r\n if self.masking_key:\r\n mask_bit = 1 << 7\r\n else:\r\n mask_bit = 0\r\n length = self.payload_length \r\n if length < 126:\r\n header.write(i2b(mask_bit | length))\r\n elif length < (1 << 16):\r\n header.write(i2b(mask_bit | 126))\r\n header.write(pack('!H', length))\r\n elif length < (1 << 63):\r\n header.write(i2b(mask_bit | 127))\r\n header.write(pack('!Q', length))\r\n else:\r\n raise WebSocketProtocolError('Frame too large')\r\n ## + - - - - - - - - - - - - - - - +-------------------------------+\r\n ## | |Masking-key, if MASK set to 1 |\r\n ## +-------------------------------+-------------------------------+\r\n ## | Masking-key (continued) | Payload Data |\r\n ## +-------------------------------- - - - - - - - - - - - - - - - +\r\n ## : Payload Data continued ... :\r\n ## + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +\r\n ## | Payload Data continued ... |\r\n ## +---------------------------------------------------------------+\r\n if not self.masking_key:\r\n header.write(message)\r\n else:\r\n header.write(self.masking_key)\r\n header.write(self.mask(message))\r\n return header.getvalue()",
"def test_decode():",
"def is_struct(self):\n return False",
"def unpack(self, packedData):\n raise NotImplementedError, 'Method need to be overriden'"
] |
[
"0.6719955",
"0.6473041",
"0.63044375",
"0.6166511",
"0.57701236",
"0.57502604",
"0.56620747",
"0.55210936",
"0.55098057",
"0.54613936",
"0.54554623",
"0.54554623",
"0.54369926",
"0.5431739",
"0.5402827",
"0.53195024",
"0.5303696",
"0.5282925",
"0.5262848",
"0.5235965",
"0.52233565",
"0.5196982",
"0.51702195",
"0.5126689",
"0.512528",
"0.5124504",
"0.5122837",
"0.5112912",
"0.51026154",
"0.50899917"
] |
0.7690478
|
0
|
Testing pack, unpacking and the Frame class.
|
def testFramepack2(self):
# Check bad frame generation:
frame = stomper.Frame()
frame.cmd = 'DISCONNECT'
result = frame.pack()
correct = 'DISCONNECT\n\n\x00\n'
self.assertEqual(result, correct)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def testFramepack1(self):\n # Check bad frame generation:\n frame = stomper.Frame()\n\n def bad():\n frame.cmd = 'SOME UNNOWN CMD'\n\n self.assertRaises(stomper.FrameError, bad)\n\n # Generate a MESSAGE frame:\n frame = stomper.Frame()\n frame.cmd = 'MESSAGE'\n frame.headers['destination'] = '/queue/a'\n frame.headers['message-id'] = 'card_data'\n frame.body = \"hello queue a\"\n result = frame.pack()\n\n# print \"\\n-- result \" + \"----\" * 10\n# pprint.pprint(result)\n# print\n\n # Try bad message unpack catching:\n bad_frame = stomper.Frame()\n self.assertRaises(stomper.FrameError, bad_frame.unpack, None)\n self.assertRaises(stomper.FrameError, bad_frame.unpack, '')\n\n # Try to read the generated frame back in\n # and then check the variables are set up\n # correctly:\n frame2 = stomper.Frame()\n frame2.unpack(result)\n\n self.assertEqual(frame2.cmd, 'MESSAGE')\n self.assertEqual(frame2.headers['destination'], '/queue/a')\n self.assertEqual(frame2.headers['message-id'], 'card_data')\n self.assertEqual(frame2.body, 'hello queue a')\n result = frame2.pack()\n\n correct = \"MESSAGE\\ndestination:/queue/a\\nmessage-id:card_data\\n\\nhello queue a\\x00\\n\"\n\n# print \"result: \"\n# pprint.pprint(result)\n# print\n# print \"correct: \"\n# pprint.pprint(correct)\n# print\n#\n self.assertEqual(result, correct)\n\n result = stomper.unpack_frame(result)\n\n self.assertEqual(result['cmd'], 'MESSAGE')\n self.assertEqual(result['headers']['destination'], '/queue/a')\n self.assertEqual(result['headers']['message-id'], 'card_data')\n self.assertEqual(result['body'], 'hello queue a')",
"def testFrameUnpack2(self):\n msg = \"\"\"MESSAGE\ndestination:/queue/a\nmessage-id: card_data\n\nhello queue a\"\"\"\n\n result = stomper.unpack_frame(msg)\n\n self.assertEqual(result['cmd'], 'MESSAGE')\n self.assertEqual(result['headers']['destination'], '/queue/a')\n self.assertEqual(result['headers']['message-id'], 'card_data')\n self.assertEqual(result['body'], 'hello queue a')",
"def test_frame_rw(self):\n from ..io.frame import read_frame, write_frame, read_meta_frame\n from ..io.fibermap import empty_fibermap\n nspec, nwave, ndiag = 5, 10, 3\n flux = np.random.uniform(size=(nspec, nwave))\n ivar = np.random.uniform(size=(nspec, nwave))\n meta = dict(BLAT=0, FOO='abc', FIBERMIN=500, FLAVOR='science')\n mask_int = np.zeros((nspec, nwave), dtype=int)\n mask_uint = np.zeros((nspec, nwave), dtype=np.uint32)\n wave = np.arange(nwave)\n R = np.random.uniform( size=(nspec, ndiag, nwave) )\n\n for mask in (mask_int, mask_uint):\n frx = Frame(wave, flux, ivar, mask, R, meta=meta)\n write_frame(self.testfile, frx)\n frame = read_frame(self.testfile)\n read_meta = read_meta_frame(self.testfile)\n\n flux2 = flux.astype('f4').astype('f8')\n ivar2 = ivar.astype('f4').astype('f8')\n wave2 = wave.astype('f4').astype('f8')\n R2 = R.astype('f4').astype('f8')\n\n self.assertTrue(frame.wave.dtype == np.float64)\n self.assertTrue(frame.flux.dtype == np.float64)\n self.assertTrue(frame.ivar.dtype == np.float64)\n self.assertTrue(frame.resolution_data.dtype == np.float64)\n\n self.assertTrue(np.all(flux2 == frame.flux))\n self.assertTrue(np.all(ivar2 == frame.ivar))\n self.assertTrue(np.all(wave2 == frame.wave))\n self.assertTrue(np.all(mask == frame.mask))\n self.assertTrue(np.all(R2 == frame.resolution_data))\n self.assertTrue(frame.resolution_data.dtype.isnative)\n self.assertEqual(frame.meta['BLAT'], meta['BLAT'])\n self.assertEqual(frame.meta['FOO'], meta['FOO'])\n self.assertEqual(frame.meta['BLAT'], read_meta['BLAT'])\n self.assertEqual(frame.meta['FOO'], read_meta['FOO'])\n\n #- read_frame works even with \"wrong\" .fits / .fits.gz\n if self.testfile.endswith('.fits'):\n frame = read_frame(self.testfile + '.gz') # finds file anyway\n elif self.testfile.endswith('.fits.gz'):\n frame = read_frame(self.testfile[:-3]) # finds file anyway\n else:\n raise ValueError(f'unrecognized extension for {self.testfile=}')\n\n #- Test float32 on disk vs. float64 in memory\n for extname in ['FLUX', 'IVAR', 'RESOLUTION']:\n data = fits.getdata(self.testfile, extname)\n self.assertEqual(data.dtype, np.dtype('>f4'), '{} not type >f4'.format(extname))\n for extname in ['WAVELENGTH']:\n data = fits.getdata(self.testfile, extname)\n self.assertEqual(data.dtype, np.dtype('>f8'), '{} not type >f8'.format(extname))\n\n #- with and without units\n frx = Frame(wave, flux, ivar, mask, R, meta=meta)\n write_frame(self.testfile, frx)\n frame = read_frame(self.testfile)\n self.assertTrue('BUNIT' not in frame.meta)\n write_frame(self.testfile, frx, units='photon/bin')\n frame = read_frame(self.testfile)\n self.assertEqual(frame.meta['BUNIT'], 'photon/bin')\n frx.meta['BUNIT'] = 'blatfoo'\n write_frame(self.testfile, frx)\n frame = read_frame(self.testfile)\n self.assertEqual(frame.meta['BUNIT'], 'blatfoo')\n #- function argument trumps pre-existing BUNIT\n write_frame(self.testfile, frx, units='quat')\n frame = read_frame(self.testfile)\n self.assertEqual(frame.meta['BUNIT'], 'quat')\n\n #- with and without fibermap\n self.assertEqual(frame.fibermap, None)\n fibermap = empty_fibermap(nspec)\n fibermap['TARGETID'] = np.arange(nspec)*2\n frx = Frame(wave, flux, ivar, mask, R, fibermap=fibermap, meta=dict(FLAVOR='science'))\n write_frame(self.testfile, frx)\n frame = read_frame(self.testfile)\n for name in fibermap.dtype.names:\n match = np.all(fibermap[name] == frame.fibermap[name])\n self.assertTrue(match, 'Fibermap column {} mismatch'.format(name))",
"def test_construct_frame_tag(attributes):\n frame_ = Frame(**attributes)\n assert frame_.construct() == frame.render(attributes)",
"def _pack(self):\n pass",
"def test_FRAME_DUoperations():\n tmFrame0 = CCSDS.FRAME.TMframe()\n print(\"tmFrame0 =\", tmFrame0)\n print(\"\")\n tmFrame1 = CCSDS.FRAME.TMframe(testData.TM_FRAME_01)\n print(\"tmFrame1 =\", tmFrame1)\n if tmFrame1.versionNumber != testData.TM_FRAME_01_versionNumber:\n print(\"tmFrame1 versionNumber wrong:\", tmFrame1.versionNumber, \"- should be\", testData.TM_FRAME_01_versionNumber)\n return False\n if tmFrame1.spacecraftId != testData.TM_FRAME_01_spacecraftId:\n print(\"tmFrame1 spacecraftId wrong:\", tmFrame1.spacecraftId, \"- should be\", testData.TM_FRAME_01_spacecraftId)\n return False\n if tmFrame1.virtualChannelId != testData.TM_FRAME_01_virtualChannelId:\n print(\"tmFrame1 virtualChannelId wrong:\", tmFrame1.virtualChannelId, \"- should be\", testData.TM_FRAME_01_virtualChannelId)\n return False\n if tmFrame1.operationalControlField != testData.TM_FRAME_01_operationalControlField:\n print(\"tmFrame1 operationalControlField wrong:\", tmFrame1.operationalControlField, \"- should be\", testData.TM_FRAME_01_operationalControlField)\n return False\n if tmFrame1.masterChannelFrameCount != testData.TM_FRAME_01_masterChannelFrameCount:\n print(\"tmFrame1 masterChannelFrameCount wrong:\", tmFrame1.masterChannelFrameCount, \"- should be\", testData.TM_FRAME_01_masterChannelFrameCount)\n return False\n if tmFrame1.virtualChannelFCountLow != testData.TM_FRAME_01_virtualChannelFCountLow:\n print(\"tmFrame1 virtualChannelFCountLow wrong:\", tmFrame1.virtualChannelFCountLow, \"- should be\", testData.TM_FRAME_01_virtualChannelFCountLow)\n return False\n if tmFrame1.secondaryHeaderFlag != testData.TM_FRAME_01_secondaryHeaderFlag:\n print(\"tmFrame1 secondaryHeaderFlag wrong:\", tmFrame1.secondaryHeaderFlag, \"- should be\", testData.TM_FRAME_01_secondaryHeaderFlag)\n return False\n if tmFrame1.synchronisationFlag != testData.TM_FRAME_01_synchronisationFlag:\n print(\"tmFrame1 synchronisationFlag wrong:\", tmFrame1.synchronisationFlag, \"- should be\", testData.TM_FRAME_01_synchronisationFlag)\n return False\n if tmFrame1.packetOrderFlag != testData.TM_FRAME_01_packetOrderFlag:\n print(\"tmFrame1 packetOrderFlag wrong:\", tmFrame1.packetOrderFlag, \"- should be\", testData.TM_FRAME_01_packetOrderFlag)\n return False\n if tmFrame1.segmentLengthId != testData.TM_FRAME_01_segmentLengthId:\n print(\"tmFrame1 segmentLengthId wrong:\", tmFrame1.segmentLengthId, \"- should be\", testData.TM_FRAME_01_segmentLengthId)\n return False\n if tmFrame1.firstHeaderPointer != testData.TM_FRAME_01_firstHeaderPointer:\n print(\"tmFrame1 firstHeaderPointer wrong:\", tmFrame1.firstHeaderPointer, \"- should be\", testData.TM_FRAME_01_firstHeaderPointer)\n return False\n # extract packets and check it\n leadingFragment, packets, trailingFragment = tmFrame1.getPackets()\n if leadingFragment != testData.TM_FRAME_01_leadingFragment:\n print(\"tmFrame1 leadingFragment wrong:\", leadingFragment, \"- should be\", testData.TM_FRAME_01_leadingFragment)\n return False\n if len(packets) != testData.TM_FRAME_01_nrPackets:\n print(\"tmFrame1 nr. of packets wrong:\", len(packets), \"- should be\", testData.TM_FRAME_01_nrPackets)\n return False\n if trailingFragment != testData.TM_FRAME_01_trailingFragment:\n print(\"tmFrame1 trailingFragment wrong:\", trailingFragment, \"- should be\", testData.TM_FRAME_01_trailingFragment)\n return False\n print(\"\")\n tcFrame1 = CCSDS.FRAME.TCframe(testData.TC_FRAME_01)\n print(\"tcFrame1 =\", tcFrame1)\n if tcFrame1.versionNumber != testData.TC_FRAME_01_versionNumber:\n print(\"tcFrame1 versionNumber wrong:\", tcFrame1.versionNumber, \"- should be\", testData.TC_FRAME_01_versionNumber)\n return False\n if tcFrame1.reservedFieldB != testData.TC_FRAME_01_reservedFieldB:\n print(\"tcFrame1 reservedFieldB wrong:\", tcFrame1.reservedFieldB, \"- should be\", testData.TC_FRAME_01_reservedFieldB)\n return False\n if tcFrame1.virtualChannelId != testData.TC_FRAME_01_virtualChannelId:\n print(\"tcFrame1 virtualChannelId wrong:\", tcFrame1.virtualChannelId, \"- should be\", testData.TC_FRAME_01_virtualChannelId)\n return False\n if tcFrame1.controlCommandFlag != testData.TC_FRAME_01_controlCommandFlag:\n print(\"tcFrame1 controlCommandFlag wrong:\", tcFrame1.controlCommandFlag, \"- should be\", testData.TC_FRAME_01_controlCommandFlag)\n return False\n if tcFrame1.reservedFieldA != testData.TC_FRAME_01_reservedFieldA:\n print(\"tcFrame1 reservedFieldA wrong:\", tcFrame1.reservedFieldA, \"- should be\", testData.TC_FRAME_01_reservedFieldA)\n return False\n if tcFrame1.frameLength != testData.TC_FRAME_01_frameLength:\n print(\"tcFrame1 frameLength wrong:\", tcFrame1.frameLength, \"- should be\", testData.TC_FRAME_01_frameLength)\n return False\n if tcFrame1.sequenceNumber != testData.TC_FRAME_01_sequenceNumber:\n print(\"tcFrame1 sequenceNumber wrong:\", tcFrame1.sequenceNumber, \"- should be\", testData.TC_FRAME_01_sequenceNumber)\n return False\n if tcFrame1.spacecraftId != testData.TC_FRAME_01_spacecraftId:\n print(\"tcFrame1 spacecraftId wrong:\", tcFrame1.spacecraftId, \"- should be\", testData.TC_FRAME_01_spacecraftId)\n return False\n if tcFrame1.bypassFlag != testData.TC_FRAME_01_bypassFlag:\n print(\"tcFrame1 bypassFlag wrong:\", tcFrame1.bypassFlag, \"- should be\", testData.TC_FRAME_01_bypassFlag)\n return False\n tcFrame2 = CCSDS.FRAME.TCframe(testData.TC_FRAME_02)\n if tcFrame2.versionNumber != testData.TC_FRAME_02_versionNumber:\n print(\"tcFrame2 versionNumber wrong:\", tcFrame2.versionNumber, \"- should be\", testData.TC_FRAME_02_versionNumber)\n return False\n if tcFrame2.reservedFieldB != testData.TC_FRAME_02_reservedFieldB:\n print(\"tcFrame2 reservedFieldB wrong:\", tcFrame2.reservedFieldB, \"- should be\", testData.TC_FRAME_02_reservedFieldB)\n return False\n if tcFrame2.virtualChannelId != testData.TC_FRAME_02_virtualChannelId:\n print(\"tcFrame2 virtualChannelId wrong:\", tcFrame2.virtualChannelId, \"- should be\", testData.TC_FRAME_02_virtualChannelId)\n return False\n if tcFrame2.controlCommandFlag != testData.TC_FRAME_02_controlCommandFlag:\n print(\"tcFrame2 controlCommandFlag wrong:\", tcFrame2.controlCommandFlag, \"- should be\", testData.TC_FRAME_02_controlCommandFlag)\n return False\n if tcFrame2.reservedFieldA != testData.TC_FRAME_02_reservedFieldA:\n print(\"tcFrame2 reservedFieldA wrong:\", tcFrame2.reservedFieldA, \"- should be\", testData.TC_FRAME_02_reservedFieldA)\n return False\n if tcFrame2.frameLength != testData.TC_FRAME_02_frameLength:\n print(\"tcFrame2 frameLength wrong:\", tcFrame2.frameLength, \"- should be\", testData.TC_FRAME_02_frameLength)\n return False\n if tcFrame2.sequenceNumber != testData.TC_FRAME_02_sequenceNumber:\n print(\"tcFrame2 sequenceNumber wrong:\", tcFrame2.sequenceNumber, \"- should be\", testData.TC_FRAME_02_sequenceNumber)\n return False\n if tcFrame2.spacecraftId != testData.TC_FRAME_02_spacecraftId:\n print(\"tcFrame2 spacecraftId wrong:\", tcFrame2.spacecraftId, \"- should be\", testData.TC_FRAME_02_spacecraftId)\n return False\n if tcFrame2.bypassFlag != testData.TC_FRAME_02_bypassFlag:\n print(\"tcFrame2 bypassFlag wrong:\", tcFrame2.bypassFlag, \"- should be\", testData.TC_FRAME_02_bypassFlag)\n return False\n clcw = CCSDS.FRAME.CLCW()\n print(\"clcw =\", clcw)\n return True",
"def testFrameUnpack3(self):\n msg = \"\"\"CONNECTED\nsession:ID:snorky.local-49191-1185461799654-3:18\n\"\"\"\n result = stomper.unpack_frame(msg)\n\n self.assertEqual(result['cmd'], 'CONNECTED')\n self.assertEqual(result['headers']['session'], 'ID:snorky.local-49191-1185461799654-3:18')\n self.assertEqual(result['body'], '')",
"def test_decode(self):\n pass # TODO(tlarsen)",
"def test_strframe():\n obj = pmisc.strframe\n\n def check_basic_frame(lines):\n fname = pmisc.normalize_windows_fname(os.path.realpath(__file__))\n assert lines[0].startswith(\"\\x1b[33mFrame object ID: 0x\")\n assert lines[1].startswith(\n \"File name......: {0}\".format(fname.replace(\".pyc\", \".py\"))\n )\n assert lines[2].startswith(\"Line number....: \")\n assert lines[3] == \"Function name..: test_strframe\"\n assert lines[4] == r\"Context........: [' fobj = inspect.stack()[0]\\n']\"\n assert lines[5] == \"Index..........: 0\"\n\n fobj = inspect.stack()[0]\n lines = obj(fobj).split(\"\\n\")\n check_basic_frame(lines)\n assert len(lines) == 6\n lines = [\n line\n for num, line in enumerate(obj(fobj, extended=True).split(\"\\n\"))\n if (num < 6) or line.startswith(\"f_\")\n ]\n check_basic_frame(lines)\n assert lines[6].startswith(\"f_back ID......: 0x\")\n assert lines[7].startswith(\"f_builtins.....: {\")\n assert lines[8].startswith(\"f_code.........: \" \"<code object test_strframe at \")\n assert lines[9].startswith(\"f_globals......: {\")\n assert lines[10].startswith(\"f_lasti........: \")\n assert lines[11].startswith(\"f_lineno.......: \")\n assert lines[12].startswith(\"f_locals.......: {\")\n if sys.hexversion < 0x03000000:\n assert lines[13] == \"f_restricted...: False\"\n assert lines[14].startswith(\"f_trace........: \")\n assert len(lines) == 15\n else:\n assert lines[13].startswith(\"f_trace........: \")\n assert len(lines) == 14",
"def test_frame_change(self):\n converter = InstructionToQobjConverter(PulseQobjInstruction, meas_level=2)\n command = FrameChange(phase=0.1)\n instruction = command(self.device.q[0].drive)\n\n valid_qobj = PulseQobjInstruction(\n name='fc',\n ch='d0',\n t0=0,\n phase=0.1\n )\n\n self.assertEqual(converter(0, instruction), valid_qobj)",
"def __post_init__(self) -> None:\n _validate_struct_class(self.struct_class)",
"def __post_init__(self) -> None:\n _validate_struct_class(self.struct_class)",
"def capture_frame(self):\n\n # search for the sync bytes which indicate the start of one frame\n # sync_bytes = [None, None, None, None]\n # while True:\n # sync_bytes[3] = sync_bytes[2]\n # sync_bytes[2] = sync_bytes[1]\n # sync_bytes[1] = sync_bytes[0]\n # sync_bytes[0] = binascii.hexlify(self.ser.read())\n #\n # # check the content\n # try:\n # if (sync_bytes[0] + sync_bytes[1] + sync_bytes[2]\n # + sync_bytes[3] == b'fffefdfc'):\n # print(\"Frame captured!\")\n # break\n # except TypeError:\n # pass\n\n while not self.lookup_sync():\n pass\n\n # print('Frame captured!')\n self.msg_size = int.from_bytes(self.ser.read(2),\n byteorder='little',\n signed=True)\n # print('Msg Size: {}'.format(self.msg_size))\n\n # raw message info (cmd + options + data)\n self.message = self.ser.read(self.msg_size)\n\n # command info\n self.cmd = int.from_bytes(self.message[:2], byteorder='little', signed=False)\n\n # raw data info (plane_num + distance values)\n self.p3_msg = self.message[-549:]\n self.p1_msg = self.message[-1098: -549]\n self.p4_msg = self.message[-1647: -1098]\n self.p2_msg = self.message[-2196: -1647]\n # print(len(self.p3_msg), len(self.p1_msg), len(self.p4_msg), len(self.p2_msg))\n\n # examine the msg size\n try:\n assert (self.p3_msg[0], self.p1_msg[0], self.p4_msg[0], self.p2_msg[0]) \\\n == (3, 2, 1, 0), \"Fail to interpret the msg\"\n except AssertionError:\n # print(\"error\\n\\n\")\n return -1\n\n # convert bytes to integers (ignore the plane_num)\n self.p3_dists = [int.from_bytes([self.p3_msg[2 * i + 1], self.p3_msg[2 * i + 2]],\n byteorder='little', signed=True)\n for i in range((len(self.p3_msg) - 1) // 2)]\n self.p1_dists = [int.from_bytes([self.p1_msg[2 * i + 1], self.p1_msg[2 * i + 2]],\n byteorder='little', signed=True)\n for i in range((len(self.p1_msg) - 1) // 2)]\n self.p4_dists = [int.from_bytes([self.p4_msg[2 * i + 1], self.p4_msg[2 * i + 2]],\n byteorder='little', signed=True)\n for i in range((len(self.p4_msg) - 1) // 2)]\n self.p2_dists = [int.from_bytes([self.p2_msg[2 * i + 1], self.p2_msg[2 * i + 2]],\n byteorder='little', signed=True)\n for i in range((len(self.p2_msg) - 1) // 2)]\n\n # convert list into np array for further processing\n self.p3_dists = np.asarray(self.p3_dists).astype('float32').reshape(274, 1)\n self.p1_dists = np.asarray(self.p1_dists).astype('float32').reshape(274, 1)\n self.p4_dists = np.asarray(self.p4_dists).astype('float32').reshape(274, 1)\n self.p2_dists = np.asarray(self.p2_dists).astype('float32').reshape(274, 1)\n\n # print(self.p3_dists[132:142])\n # print(self.p1_dists[132:142])\n # print(self.p4_dists[132:142])\n # print(self.p2_dists[132:142])\n\n # Compute the position info\n # print(self.converter)\n # print(self.thetas.shape)\n self.p3_points = self.converter * np.array([[np.cos(self.alphas[2]), np.cos(self.alphas[2]),\n np.sin(self.alphas[2])]], dtype='float32') * self.p3_dists\n self.p1_points = self.converter * np.array([[np.cos(self.alphas[0]), np.cos(self.alphas[0]),\n np.sin(self.alphas[0])]], dtype='float32') * self.p1_dists\n self.p4_points = self.converter * np.array([[np.cos(self.alphas[3]), np.cos(self.alphas[3]),\n np.sin(self.alphas[3])]], dtype='float32') * self.p4_dists\n self.p2_points = self.converter * np.array([[np.cos(self.alphas[1]), np.cos(self.alphas[1]),\n np.sin(self.alphas[1])]], dtype='float32') * self.p2_dists\n # print(self.p1_points[132:142])\n\n return 0",
"def parse_frame(data):\n test = binascii.hexlify(data)\n # defines the format of received LoRa frame header\n tap_header_format = 'bbhiibbbbib'\n phy_header_format = 'bbb'\n header_format = tap_header_format + phy_header_format\n print header_format\n header_len = struct.calcsize(header_format)\n data_len = len(data)\n if header_len > data_len:\n print 'packet too short'\n return (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,)\n else:\n # defines the frame format based on header and length of frame\n data_format = header_format + str(data_len - header_len) + 's'\n print data_format\n # print \"tap header: \", header_len\n # print \"data length: \", data_len\n # print \"test length: \", len(test)\n\n unpacked = struct.unpack(data_format, data)\n print unpacked\n # print '-----------------------------------------------------'\n # print \"bin \" + data\n # print 'hex ' + test\n return unpacked",
"def test_frame_change(self):\n cmd = FrameChange(phase=0.1)\n instruction = cmd(MeasureChannel(0))\n\n qobj = PulseQobjInstruction(name='fc', ch='m0', t0=0, phase=0.1)\n converted_instruction = self.converter(qobj)\n\n self.assertEqual(converted_instruction.timeslots, instruction.timeslots)\n self.assertEqual(converted_instruction.instructions[0][-1].command, cmd)",
"def unpack_frame(self, frame_bytes):\n byte_index = 0\n for i in iter(EITFrame.block_sizes.keys()):\n setattr(self, i, struct.unpack(EITFrame.block_types[i], frame_bytes[byte_index:(byte_index+EITFrame.block_sizes[i])]))\n byte_index = byte_index + EITFrame.block_sizes[i]",
"def test_subclassed_tuple(self):\n class Foo(tuple):\n pass\n\n x = Foo([1, 2])\n\n self.encoder.send(x)\n\n self.assertEqual(next(self.encoder), b'\\t\\x05\\x01\\x04\\x01\\x04\\x02')",
"def test_consistency_with_repeated_pack_and_unpack(self):\n\n buf, off = make_monster_from_generated_code()\n\n # Turns a buffer into Python object (T class).\n monster1 = _MONSTER.Monster.GetRootAs(buf, off)\n monsterT1 = _MONSTER.MonsterT.InitFromObj(monster1)\n\n for sizePrefix in [True, False]:\n # Re-serialize the data into a buffer.\n b1 = flatbuffers.Builder(0)\n if sizePrefix:\n b1.FinishSizePrefixed(monsterT1.Pack(b1))\n else:\n b1.Finish(monsterT1.Pack(b1))\n CheckReadBuffer(b1.Bytes, b1.Head(), sizePrefix)\n\n # Deserializes the buffer into Python object again.\n monster2 = _MONSTER.Monster.GetRootAs(b1.Bytes, b1.Head())\n # Re-serializes the data into a buffer for one more time.\n monsterT2 = _MONSTER.MonsterT.InitFromObj(monster2)\n for sizePrefix in [True, False]:\n # Re-serializes the data into a buffer\n b2 = flatbuffers.Builder(0)\n if sizePrefix:\n b2.FinishSizePrefixed(monsterT2.Pack(b2))\n else:\n b2.Finish(monsterT2.Pack(b2))\n CheckReadBuffer(b2.Bytes, b2.Head(), sizePrefix)",
"def _decode_frame(self):\n\n self._processed.eth_frame.log(level=logging_helper.INFO)\n\n # Parse IP packets, protocol=0x8\n if hex(self._processed.eth_frame.protocol) == u'0x8':\n self._processed.ip_frame = IPFrame(self._processed.eth_frame.payload)\n self._processed.ip_frame.log(level=logging_helper.INFO)\n\n if self._processed.ip_frame.payload is not None:\n self._processed.ip_frame.payload.log(level=logging_helper.INFO)\n\n else:\n logging.info(u'Not an IP payload')\n\n logging.info(self._processed)",
"def testSplit(self):\n\n protocol_factory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory()\n bigstring = \"\".join(chr(byte) for byte in range(ord(\"a\"), ord(\"z\")+1))\n\n databuf = TTransport.TMemoryBuffer()\n prot = protocol_factory.getProtocol(databuf)\n prot.writeI32(42)\n prot.writeString(bigstring)\n prot.writeI16(24)\n data = databuf.getvalue()\n cutpoint = len(data)/2\n parts = [ data[:cutpoint], data[cutpoint:] ]\n\n framed_buffer = TTransport.TMemoryBuffer()\n framed_writer = TTransport.TFramedTransport(framed_buffer)\n for part in parts:\n framed_writer.write(part)\n framed_writer.flush()\n self.assertEquals(len(framed_buffer.getvalue()), len(data) + 8)\n\n # Recreate framed_buffer so we can read from it.\n framed_buffer = TTransport.TMemoryBuffer(framed_buffer.getvalue())\n framed_reader = TTransport.TFramedTransport(framed_buffer)\n prot = protocol_factory.getProtocol(framed_reader)\n self.assertEqual(prot.readI32(), 42)\n self.assertEqual(prot.readString(), bigstring)\n self.assertEqual(prot.readI16(), 24)",
"def test_default_values_with_pack_and_unpack(self):\n # Creates a flatbuffer with default values.\n b1 = flatbuffers.Builder(0)\n _MONSTER.MonsterStart(b1)\n gen_mon = _MONSTER.MonsterEnd(b1)\n b1.Finish(gen_mon)\n\n # Converts the flatbuffer into the object class.\n monster1 = _MONSTER.Monster.GetRootAs(b1.Bytes, b1.Head())\n monsterT1 = _MONSTER.MonsterT.InitFromObj(monster1)\n\n # Packs the object class into another flatbuffer.\n b2 = flatbuffers.Builder(0)\n b2.Finish(monsterT1.Pack(b2))\n monster2 = _MONSTER.Monster.GetRootAs(b2.Bytes, b2.Head())\n # Checks the default values.\n self.assertTrue(monster2.Pos() is None)\n self.assertEqual(monster2.Mana(), 150)\n self.assertEqual(monster2.Hp(), 100)\n self.assertTrue(monster2.Name() is None)\n self.assertEqual(monster2.Inventory(0), 0)\n self.assertEqual(monster2.InventoryAsNumpy(), 0)\n self.assertEqual(monster2.InventoryLength(), 0)\n self.assertTrue(monster2.InventoryIsNone())\n self.assertEqual(monster2.Color(), 8)\n self.assertEqual(monster2.TestType(), 0)\n self.assertTrue(monster2.Test() is None)\n self.assertTrue(monster2.Test4(0) is None)\n self.assertEqual(monster2.Test4Length(), 0)\n self.assertTrue(monster2.Test4IsNone())\n self.assertEqual(monster2.Testarrayofstring(0), '')\n self.assertEqual(monster2.TestarrayofstringLength(), 0)\n self.assertTrue(monster2.TestarrayofstringIsNone())\n self.assertTrue(monster2.Testarrayoftables(0) is None)\n self.assertEqual(monster2.TestarrayoftablesLength(), 0)\n self.assertTrue(monster2.TestarrayoftablesIsNone())\n self.assertTrue(monster2.Enemy() is None)\n self.assertEqual(monster2.Testnestedflatbuffer(0), 0)\n self.assertEqual(monster2.TestnestedflatbufferAsNumpy(), 0)\n self.assertEqual(monster2.TestnestedflatbufferLength(), 0)\n self.assertTrue(monster2.TestnestedflatbufferIsNone())\n self.assertTrue(monster2.Testempty() is None)\n self.assertFalse(monster2.Testbool())\n self.assertEqual(monster2.Testhashs32Fnv1(), 0)\n self.assertEqual(monster2.Testhashu32Fnv1(), 0)\n self.assertEqual(monster2.Testhashs64Fnv1(), 0)\n self.assertEqual(monster2.Testhashu64Fnv1(), 0)\n self.assertEqual(monster2.Testhashs32Fnv1a(), 0)\n self.assertEqual(monster2.Testhashu32Fnv1a(), 0)\n self.assertEqual(monster2.Testhashs64Fnv1a(), 0)\n self.assertEqual(monster2.Testhashu64Fnv1a(), 0)\n self.assertEqual(monster2.Testarrayofbools(0), 0)\n self.assertEqual(monster2.TestarrayofboolsAsNumpy(), 0)\n self.assertEqual(monster2.TestarrayofboolsLength(), 0)\n self.assertTrue(monster2.TestarrayofboolsIsNone())\n self.assertEqual(monster2.Testf(), 3.14159)\n self.assertEqual(monster2.Testf2(), 3.0)\n self.assertEqual(monster2.Testf3(), 0.0)\n self.assertEqual(monster2.Testarrayofstring2(0), '')\n self.assertEqual(monster2.Testarrayofstring2Length(), 0)\n self.assertTrue(monster2.Testarrayofstring2IsNone())\n self.assertTrue(monster2.Testarrayofsortedstruct(0) is None)\n self.assertEqual(monster2.TestarrayofsortedstructLength(), 0)\n self.assertTrue(monster2.TestarrayofsortedstructIsNone())\n self.assertEqual(monster2.Flex(0), 0)\n self.assertEqual(monster2.FlexAsNumpy(), 0)\n self.assertEqual(monster2.FlexLength(), 0)\n self.assertTrue(monster2.FlexIsNone())\n self.assertTrue(monster2.Test5(0) is None)\n self.assertEqual(monster2.Test5Length(), 0)\n self.assertTrue(monster2.Test5IsNone())\n self.assertEqual(monster2.VectorOfLongs(0), 0)\n self.assertEqual(monster2.VectorOfLongsAsNumpy(), 0)\n self.assertEqual(monster2.VectorOfLongsLength(), 0)\n self.assertTrue(monster2.VectorOfLongsIsNone())\n self.assertEqual(monster2.VectorOfDoubles(0), 0)\n self.assertEqual(monster2.VectorOfDoublesAsNumpy(), 0)\n self.assertEqual(monster2.VectorOfDoublesLength(), 0)\n self.assertTrue(monster2.VectorOfDoublesIsNone())\n self.assertTrue(monster2.ParentNamespaceTest() is None)\n self.assertTrue(monster2.VectorOfReferrables(0) is None)\n self.assertEqual(monster2.VectorOfReferrablesLength(), 0)\n self.assertTrue(monster2.VectorOfReferrablesIsNone())\n self.assertEqual(monster2.SingleWeakReference(), 0)\n self.assertEqual(monster2.VectorOfWeakReferences(0), 0)\n self.assertEqual(monster2.VectorOfWeakReferencesAsNumpy(), 0)\n self.assertEqual(monster2.VectorOfWeakReferencesLength(), 0)\n self.assertTrue(monster2.VectorOfWeakReferencesIsNone())\n self.assertTrue(monster2.VectorOfStrongReferrables(0) is None)\n self.assertEqual(monster2.VectorOfStrongReferrablesLength(), 0)\n self.assertTrue(monster2.VectorOfStrongReferrablesIsNone())\n self.assertEqual(monster2.CoOwningReference(), 0)\n self.assertEqual(monster2.VectorOfCoOwningReferences(0), 0)\n self.assertEqual(monster2.VectorOfCoOwningReferencesAsNumpy(), 0)\n self.assertEqual(monster2.VectorOfCoOwningReferencesLength(), 0)\n self.assertTrue(monster2.VectorOfCoOwningReferencesIsNone())\n self.assertEqual(monster2.NonOwningReference(), 0)\n self.assertEqual(monster2.VectorOfNonOwningReferences(0), 0)\n self.assertEqual(monster2.VectorOfNonOwningReferencesAsNumpy(), 0)\n self.assertEqual(monster2.VectorOfNonOwningReferencesLength(), 0)\n self.assertTrue(monster2.VectorOfNonOwningReferencesIsNone())\n self.assertEqual(monster2.AnyUniqueType(), 0)\n self.assertTrue(monster2.AnyUnique() is None)\n self.assertEqual(monster2.AnyAmbiguousType(), 0)\n self.assertTrue(monster2.AnyAmbiguous() is None)\n self.assertEqual(monster2.VectorOfEnums(0), 0)\n self.assertEqual(monster2.VectorOfEnumsAsNumpy(), 0)\n self.assertEqual(monster2.VectorOfEnumsLength(), 0)\n self.assertTrue(monster2.VectorOfEnumsIsNone())",
"def test_unpack_dataframe(self, batched_df, expected):\n unpacked_list = _BatchingManager.split_dataframe(batched_df, 1)\n assert len(unpacked_list) == 1\n # On windows, conversion dtype is not preserved.\n check_dtype = not os.name == \"nt\"\n pd.testing.assert_frame_equal(\n unpacked_list[0].reset_index(drop=True),\n expected.reset_index(drop=True),\n check_dtype=check_dtype,\n )",
"def frames():\n raise RuntimeError('Must be implemented by subclasses.')",
"def isframe(object):\r\n return isinstance(object, types.FrameType)",
"def test_encoding_on_pack_big_endian(self):\n\n try:\n rfh2 = pymqi.RFH2()\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_FLOAT_S390)[4:8], b\"\\x00\\x00\\x00\\x02\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_INTEGER_NORMAL)[4:8], b\"\\x00\\x00\\x00\\x02\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_DECIMAL_NORMAL)[4:8], b\"\\x00\\x00\\x00\\x02\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_FLOAT_IEEE_NORMAL)[4:8], b\"\\x00\\x00\\x00\\x02\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_INTEGER_NORMAL + CMQC.MQENC_DECIMAL_NORMAL)[4:8], b\"\\x00\\x00\\x00\\x02\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_INTEGER_NORMAL + CMQC.MQENC_FLOAT_IEEE_NORMAL)[4:8], b\"\\x00\\x00\\x00\\x02\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_DECIMAL_NORMAL + CMQC.MQENC_FLOAT_IEEE_NORMAL)[4:8], b\"\\x00\\x00\\x00\\x02\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_INTEGER_NORMAL + CMQC.MQENC_DECIMAL_NORMAL + CMQC.MQENC_FLOAT_IEEE_NORMAL)[4:8], b\"\\x00\\x00\\x00\\x02\")\n except Exception as e:\n self.fail(e)",
"def build_frame(self, message):\r\n header = BytesIO()\r\n if 0x3 <= self.opcode <= 0x7 or 0xB <= self.opcode:\r\n raise WebSocketProtocolError('Opcode cannot be a reserved opcode')\r\n ## +-+-+-+-+-------+\r\n ## |F|R|R|R| opcode|\r\n ## |I|S|S|S| (4) |\r\n ## |N|V|V|V| |\r\n ## | |1|2|3| |\r\n ## +-+-+-+-+-------+\r\n header.write(i2b(((self.fin << 7)\r\n | (self.rsv1 << 6)\r\n | (self.rsv2 << 5)\r\n | (self.rsv3 << 4)\r\n | self.opcode)))\r\n ## +-+-------------+-------------------------------+\r\n ## |M| Payload len | Extended payload length |\r\n ## |A| (7) | (16/63) |\r\n ## |S| | (if payload len==126/127) |\r\n ## |K| | |\r\n ## +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - +\r\n ## | Extended payload length continued, if payload len == 127 |\r\n ## + - - - - - - - - - - - - - - - +-------------------------------+\r\n if self.masking_key:\r\n mask_bit = 1 << 7\r\n else:\r\n mask_bit = 0\r\n length = self.payload_length \r\n if length < 126:\r\n header.write(i2b(mask_bit | length))\r\n elif length < (1 << 16):\r\n header.write(i2b(mask_bit | 126))\r\n header.write(pack('!H', length))\r\n elif length < (1 << 63):\r\n header.write(i2b(mask_bit | 127))\r\n header.write(pack('!Q', length))\r\n else:\r\n raise WebSocketProtocolError('Frame too large')\r\n ## + - - - - - - - - - - - - - - - +-------------------------------+\r\n ## | |Masking-key, if MASK set to 1 |\r\n ## +-------------------------------+-------------------------------+\r\n ## | Masking-key (continued) | Payload Data |\r\n ## +-------------------------------- - - - - - - - - - - - - - - - +\r\n ## : Payload Data continued ... :\r\n ## + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +\r\n ## | Payload Data continued ... |\r\n ## +---------------------------------------------------------------+\r\n if not self.masking_key:\r\n header.write(message)\r\n else:\r\n header.write(self.masking_key)\r\n header.write(self.mask(message))\r\n return header.getvalue()",
"def test_encoding_on_pack_small_endian(self):\n\n try:\n rfh2 = pymqi.RFH2()\n self.assertEqual(rfh2.pack()[4:8], b\"\\x02\\x00\\x00\\x00\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_NATIVE)[4:8], b\"\\x02\\x00\\x00\\x00\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_INTEGER_REVERSED)[4:8], b\"\\x02\\x00\\x00\\x00\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_DECIMAL_REVERSED)[4:8], b\"\\x02\\x00\\x00\\x00\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_FLOAT_IEEE_REVERSED)[4:8], b\"\\x02\\x00\\x00\\x00\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_INTEGER_REVERSED + CMQC.MQENC_DECIMAL_REVERSED)[4:8], b\"\\x02\\x00\\x00\\x00\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_INTEGER_REVERSED + CMQC.MQENC_FLOAT_IEEE_REVERSED)[4:8], b\"\\x02\\x00\\x00\\x00\")\n self.assertEqual(rfh2.pack(encoding=CMQC.MQENC_DECIMAL_REVERSED + CMQC.MQENC_FLOAT_IEEE_REVERSED)[4:8], b\"\\x02\\x00\\x00\\x00\")\n except Exception as e:\n self.fail(e)",
"def test_decode():",
"def is_struct(self):\n return False",
"def dispatch_frame(self, frame):"
] |
[
"0.769042",
"0.64738643",
"0.6304517",
"0.6169357",
"0.5768204",
"0.57520175",
"0.5662974",
"0.55218434",
"0.55126536",
"0.546263",
"0.5456736",
"0.5456736",
"0.543818",
"0.54336375",
"0.5404497",
"0.5320587",
"0.5302061",
"0.52817917",
"0.5264384",
"0.5235212",
"0.52228606",
"0.51959676",
"0.5173786",
"0.51281106",
"0.5126155",
"0.5125919",
"0.51244295",
"0.51129866",
"0.51035666",
"0.50920504"
] |
0.6720689
|
1
|
Testing unpack frame function against MESSAGE
|
def testFrameUnpack2(self):
msg = """MESSAGE
destination:/queue/a
message-id: card_data
hello queue a"""
result = stomper.unpack_frame(msg)
self.assertEqual(result['cmd'], 'MESSAGE')
self.assertEqual(result['headers']['destination'], '/queue/a')
self.assertEqual(result['headers']['message-id'], 'card_data')
self.assertEqual(result['body'], 'hello queue a')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def testFramepack1(self):\n # Check bad frame generation:\n frame = stomper.Frame()\n\n def bad():\n frame.cmd = 'SOME UNNOWN CMD'\n\n self.assertRaises(stomper.FrameError, bad)\n\n # Generate a MESSAGE frame:\n frame = stomper.Frame()\n frame.cmd = 'MESSAGE'\n frame.headers['destination'] = '/queue/a'\n frame.headers['message-id'] = 'card_data'\n frame.body = \"hello queue a\"\n result = frame.pack()\n\n# print \"\\n-- result \" + \"----\" * 10\n# pprint.pprint(result)\n# print\n\n # Try bad message unpack catching:\n bad_frame = stomper.Frame()\n self.assertRaises(stomper.FrameError, bad_frame.unpack, None)\n self.assertRaises(stomper.FrameError, bad_frame.unpack, '')\n\n # Try to read the generated frame back in\n # and then check the variables are set up\n # correctly:\n frame2 = stomper.Frame()\n frame2.unpack(result)\n\n self.assertEqual(frame2.cmd, 'MESSAGE')\n self.assertEqual(frame2.headers['destination'], '/queue/a')\n self.assertEqual(frame2.headers['message-id'], 'card_data')\n self.assertEqual(frame2.body, 'hello queue a')\n result = frame2.pack()\n\n correct = \"MESSAGE\\ndestination:/queue/a\\nmessage-id:card_data\\n\\nhello queue a\\x00\\n\"\n\n# print \"result: \"\n# pprint.pprint(result)\n# print\n# print \"correct: \"\n# pprint.pprint(correct)\n# print\n#\n self.assertEqual(result, correct)\n\n result = stomper.unpack_frame(result)\n\n self.assertEqual(result['cmd'], 'MESSAGE')\n self.assertEqual(result['headers']['destination'], '/queue/a')\n self.assertEqual(result['headers']['message-id'], 'card_data')\n self.assertEqual(result['body'], 'hello queue a')",
"def testFrameUnpack3(self):\n msg = \"\"\"CONNECTED\nsession:ID:snorky.local-49191-1185461799654-3:18\n\"\"\"\n result = stomper.unpack_frame(msg)\n\n self.assertEqual(result['cmd'], 'CONNECTED')\n self.assertEqual(result['headers']['session'], 'ID:snorky.local-49191-1185461799654-3:18')\n self.assertEqual(result['body'], '')",
"def decode_message(self, buf, message_type=None):\n self.debugStack = 0\n value, typedef, _ = self._decode_message(\"\", buf, message_type)\n return value, typedef",
"def onMessageFrame(self, payload):",
"def testFramepack2(self):\n # Check bad frame generation:\n frame = stomper.Frame()\n frame.cmd = 'DISCONNECT'\n result = frame.pack()\n correct = 'DISCONNECT\\n\\n\\x00\\n'\n self.assertEqual(result, correct)",
"def onMessageFrameData(self, payload):",
"def receive_message(self, message):",
"def handleMessage(msg):",
"def _parse_msg(self, b):\n msg = None\n r = self.matcher.match(b)\n if r:\n address = int(r.group(1), 16)\n function = int(r.group(2), 16)\n # Convert data into bytes\n data = []\n for i in range(0, len(r.group(3)), 2):\n datum = int(r.group(3)[i:i+2], 16)\n data.append(datum)\n # Construct message\n msg = ModbusMessage(address, function, data, int(time.time() * 1000))\n # Verify LRC\n msg_lrc = int(r.group(5), 16)\n if msg_lrc != msg.compute_lrc():\n self.logger.warning('LRC mismatch, frame dropped.')\n msg = None\n return msg",
"def _decode(self, message):\n raise NotImplementedError(\"_decode needs to be implemented in {} subclass\".format(type(self).__name__))",
"def process_message(self, msg, src):",
"def receive(self, message):",
"def unpack(self, raw_message):\n return self._msg_struct.unpack(raw_message)",
"def handle_message(self, message):",
"def _decode_message(self, label: str, buf, typedef=None, pos=0, end=None, group=False):\n print(str(pos) + \" decode_message \" + label)\n if end is None:\n end = len(buf)\n\n if typedef is None:\n typedef = {}\n else:\n # Don't want to accidentally modify the original\n typedef = copy.deepcopy(typedef)\n output = {}\n\n while pos < end:\n oldpos = pos\n tag, pos = decoder._DecodeVarint(buf, pos)\n try:\n field_number, wire_type = wire_format.UnpackTag(tag)\n except Exception as exc:\n raise (ValueError,\n 'Could not read valid tag at pos %d. Ensure it is a valid protobuf message: %s'\n % (pos-len(tag), exc), sys.exc_info()[2])\n # Convert to str\n field_number = str(field_number)\n orig_field_number = field_number\n \n field_typedef = None\n if field_number in typedef:\n field_typedef = typedef[field_number]\n else:\n field_typedef = {}\n field_typedef['type'] = self.wire_type_defaults[wire_type]\n field_type = field_typedef['type']\n if self.debug:\n ft = field_type\n if ft == None:\n ft = \"None\"\n print(\"@\" + str(oldpos) + \"-\" + str(pos-1) + \":\" + label + \" field_number \" +\n str(field_number) +\n \" wire_type \" + str(wire_type) +\n \" field_type \" + str(ft))\n # If field_type is None, its either an unsupported wire type, length delim or group\n # length delim we have to try and decode first\n field_out = None\n if field_type == 'LD':\n field_out, pos = self.decode_message_LD(label, buf, pos, field_typedef)\n elif field_type == 'endGroup':\n # TODO Should probably match the field_number to START_GROUP\n if not group:\n raise ValueError(\"Found END_GROUP before START_GROUP\")\n # exit out\n return output, typedef, pos\n elif field_type == 'message':\n field_out, pos = self.decode_message_message(\n label, buf, pos, field_typedef, field_number)\n elif field_type == 'group':\n group_typedef = None\n # Check for a anonymous type\n if 'group_typedef' in field_typedef:\n group_typedef = field_typedef['group_typedef']\n field_out, group_typedef, pos = self.decode_group(\n label, buf, group_typedef, pos)\n # Save type definition\n field_typedef['group_typedef'] = group_typedef\n else:\n # Verify wiretype matches\n if self.wiretypes[field_type] != wire_type:\n raise ValueError(\"Invalid wiretype for field number %s. %s is not wiretype %s\"\n % (field_number, field_type, wire_type))\n # Simple type, just look up the decoder\n field_out, pos = self.decoders[field_type](buf, pos)\n field_typedef['type'] = field_type\n if 'name' not in field_typedef:\n field_typedef['name'] = ''\n field_key = field_number\n if '-' not in field_number and 'name' in field_typedef and field_typedef['name'] != '':\n field_key = field_typedef['name']\n # Deal with repeats\n if field_key in output:\n if isinstance(field_out, list):\n if isinstance(output[field_number], list):\n output[field_key] += field_out\n else:\n output[field_key] = field_out.append(output[field_key])\n else:\n if isinstance(output[field_number], list):\n output[field_key].append(field_out)\n else:\n output[field_key] = [output[field_key], field_out]\n else:\n output[field_key] = field_out\n typedef[orig_field_number] = field_typedef\n if self.debug:\n print(str(field_key) + \" field_out:\" + str(field_out))\n if pos > end:\n raise decoder._DecodeError(\"Invalid Message Length, pos=\" +\n str(pos) + \" end=\" + str(end))\n # Should never hit here as a group\n if group:\n raise ValueError(\"Got START_GROUP with no END_GROUP.\")\n print(\"decode_message finish \" + str(pos))\n return output, typedef, pos",
"def parse_mess_bin(self):\n unpacked_mess = self.bin_unpacker.unpack(self.scan_message) # Unpack message data\n len_mess = unpacked_mess[3] # Length of message as defined within the message\n if len_mess != len(self.scan_message):\n print('Warning!!! Expected message of length %i bytes but got message of %i bytes' % (len_mess, len(self.scan_message)))\n return unpacked_mess",
"def _parse_msg(self, msg):\n try:\n self.received_msg += msg.decode()\n except:\n self.log.warning(\"invalid parse frame '%s'\" % msg)\n\n while True:\n pos = self.received_msg.find('\\r')\n if pos == -1: # no full msg\n break\n m = self.received_msg[:pos].strip()\n if not len(m):\n break\n self.platform.process_received_message(m)\n self.received_msg = self.received_msg[pos + 1:]",
"def decode_message(self, message):\r\n\r\n\t\tprint(\"Decoding message '{}'\".format(message))\r\n\r\n\t\tmessage_split = message[1:-1].split('||')\r\n\r\n\t\tif len(message_split) > 1: # Several messages are queued\r\n\t\t\tfor m in message_split:\r\n\t\t\t\tself.decode_message('|' + m + '|')\r\n\t\t\treturn\r\n\t\telse:\r\n\t\t\tmessage = message_split[0]\r\n\r\n\t\tmessage_split = message.split('|')\r\n\r\n\t\tif message_split[0] == 'LA':\r\n\r\n\t\t\tlist_bars = message_split[1].split(',')\r\n\t\t\tself.send_bar_names.emit(list_bars) # Sending the list to the UI\r\n\r\n\t\telif message_split[0] == 'ME':\r\n\r\n\t\t\tprint(\"New message received : '{}'\".format(message))\r\n\r\n\t\t\tif len(message_split) == 3: # Author was found\r\n\t\t\t\tinfos = (message_split[2], message_split[1])\r\n\t\t\telif len(message_split) == 2: # No author\r\n\t\t\t\tinfos = (message_split[1],)\r\n\t\t\ttry:\r\n\t\t\t\tself.message_received.emit(infos)\r\n\t\t\texcept UnboundLocalError:\r\n\t\t\t\tself._window.open_dialog(\"Message de chat incompréhensible\",\r\n\t\t\t\t\t\t\t\t\t\t \"Le message de chat suivant n'a pas pu être décodé : {}\".format(message),\r\n\t\t\t\t\t\t\t\t\t\t type=\"warning\")\r\n\r\n\t\telif message_split[0] == 'LO': # Message is '|LO|' so just ignoring it\r\n\r\n\t\t\tself.name_set.emit() # Warning the UI about the name being set\r\n\r\n\t\telif message_split[0] == \"CH\":\r\n\r\n\t\t\tpass\r\n\t\t\r\n\t\telif message_split[0] == 'UR':\r\n\r\n\t\t\tprint(\"New message received : '{}'\".format(message))\r\n\r\n\t\t\tif len(message_split) == 3: # Author was found\r\n\t\t\t\tinfos = (message_split[2], message_split[1])\r\n\t\t\telif len(message_split) == 2: # No author\r\n\t\t\t\tinfos = (message_split[1],)\r\n\t\t\ttry:\r\n\t\t\t\tself.urgent_message_received.emit(infos)\r\n\t\t\texcept UnboundLocalError:\r\n\t\t\t\tself._window.open_dialog(\"Message de chat incompréhensible\",\r\n\t\t\t\t\t\t\t\t\t\t \"Le message de chat suivant n'a pas pu être décodé : {}\".format(message),\r\n\t\t\t\t\t\t\t\t\t\t type=\"warning\")\r\n\t\t\t\r\n\t\telif message_split[0] == \"LE\": # Getting the list of products\r\n\r\n\t\t\tif message_split[1]:\r\n\t\t\t\ttuples = message_split[1].split(',')\r\n\t\t\t\tfor t in tuples:\r\n\t\t\t\t\ti, f = t.split(':')\r\n\t\t\t\t\tself.__food[int(i)] = f\r\n\r\n\t\telif message_split[0] == \"RS\": # A new order for Restal\r\n\r\n\t\t\ttry:\r\n\t\t\t\tfood = self.__food[int(message_split[2])]\r\n\t\t\texcept KeyError:\r\n\t\t\t\tfood = \"Inconnue\"\r\n\t\t\t\tprint(\"Unable to get the name of food '{}'\".format(message_split[2]))\r\n\t\t\tprint(message_split[1],message_split[3],message_split[2])\r\n\t\t\tself.add_order.emit(message_split[1], food, int(message_split[3]))\r\n\r\n\t\telse:\r\n\t\t\tself._window.open_dialog(\"Message du serveur incompréhensible\",\r\n\t\t\t\t\t\t\t\t\t \"Le message suivant n'a pas pu être décodé : {}\".format(message), type=\"warning\")\r\n\t\t\tprint(\"Error : message '{}' could not be decoded\".format(message))",
"def __nanojsonrpc_unpack(self, msg):\n try:\n pack = json.loads(msg)\n if 'method' not in pack:\n return None\n else:\n return pack\n except:\n traceback.print_exc()\n return None",
"def test_decode_messages():\n decoding1 = d.decode()\n decoding2 = s.decode_messages()\n assert decoding1 == decoding2\n decoding3 = SecretGarden(filename, \"HELLO, STUDENTS.\").decode_messages()\n assert decoding1 != decoding3",
"def _messageReceived(self, msg):\r\n if len(msg) < 17:\r\n self.transport.loseConnection()\r\n\r\n flag = msg[:1]\r\n\r\n if flag == self._TRUE:\r\n destID = UUID(bytes=msg[1:17])\r\n offset = 17\r\n elif flag == self._FALSE:\r\n destID = None\r\n offset = 1\r\n else:\r\n log.msg('Protocol Error: Could not identify flag.')\r\n self.transport.loseConnection()\r\n return\r\n\r\n remoteID = UUID(bytes=msg[offset:offset + 16])\r\n offset += 16\r\n\r\n idLen, = self._MSG_ID_STRUCT.unpack(msg[offset:offset + 1])\r\n offset += 1\r\n\r\n msgID = msg[offset:offset + idLen]\r\n offset += idLen\r\n\r\n self.messageReceived(remoteID, buffer(msg, offset), msgID, destID)",
"def onMessageFrameBegin(self, length):",
"def ReceiveMessageFromPacketInfo(self) -> IPPacketInformation:",
"def parse_message(self, message):\n pass",
"def handle_protobuf(self, message: protobuf.ProtocolMessage) -> None:",
"def test_message_mixed():\n result = True\n\n message = msg.Message()\n size = 0\n for i in range(num_it):\n message.appendInt(8848)\n message.appendBoolean(True)\n message.appendFloat(128.789456)\n message.appendString(str(i) + \"azertyuiopmlkjhgfdsqwxcvbn\")\n\n size += msg.intStruct.size + msg.boolStruct.size + msg.floatStruct.size + msg.intStruct.size + len(str(i) + \"azertyuiopqsdfghjklmwxcvbn\")\n if message.length != msg.HEADER_SIZE + size:\n print(\"Size is \", message.length, \" but should be \", msg.HEADER_SIZE + size)\n print(\"Error : message.appendMixed\")\n result = False\n\n message.resetCursor()\n for i in range(num_it):\n a = message.readInt()\n b = message.readBoolean()\n c = message.readFloat()\n d = message.readString()\n if a != 8848:\n print(\"Error in int\", i, a)\n result = False\n if not b is True:\n print(\"Errro in boolean\", i, b)\n result = False\n if abs(c- 128.789456) > 0.00001:\n print(\"Error in float\", i, c)\n result = False\n if d != str(i) + \"azertyuiopmlkjhgfdsqwxcvbn\":\n print(\"Error in string\", i, d)\n result = False\n\n return result\n\n\n # // mixed\n # message = new Message();\n # for(int j = 0 ; j < 1024 ; j++){\n # message.resetCursor();\n # message.appendInt(8848);\n # message.appendBoolean(true);\n # message.appendFloat((float) 128.789456);\n # message.appendString(\"azertyuiopmlkjhgfdsqwxcvbn\");\n # message.resetCursor();\n # if(message.readInt() != 8848){\n # System.out.println(\"Error in Int\");\n # System.exit(0);\n # }\n # if(message.readBoolean() != true){\n # System.out.println(\"Error in Boolean\");\n # System.exit(0);\n # }\n # if(message.readFloat() != (float) 128.789456){\n # System.out.println(\"Error in Float\");\n # System.exit(0);\n # }\n # if(message.readString().compareTo(\"azertyuiopmlkjhgfdsqwxcvbn\") != 0){\n # System.out.println(\"Error in String\");\n # System.exit(0);\n # }\n # }\n # System.out.println(\"OK : mixed types\");",
"def processReceivedMessage(iTag, clsName, msgID, msg): #@NoSelf",
"def decode_data(self, msg):\n if len(msg) < 6:\n raise ValueError(\"Data message is too short - minimum length 6 bytes, got %d bytes\" % len(msg))\n\n (x, TIME) = struct.unpack(\"<HL\", msg[0:6])\n\n if x & (2**15) != 0:\n raise ValueError(\"Expected a data message, found a command message instead\")\n\n ID = (x & self.ID_MASK) >> 4\n LEN = x & self.LEN_MASK\n\n if LEN < 0 or LEN > 8:\n raise ValueError(\"Invalid CAN payload length - %d bytes not in [0,8] bytes\" % LEN)\n \n if ID in self.descriptors:\n desc = self.descriptors[ID]\n if \"format\" not in desc:\n raise ValueError(\"No format specified for %#x:%s\" % (ID, desc[\"name\"]))\n if LEN != struct.calcsize(\"<\" + str(desc[\"format\"])):\n raise ValueError(\"Error in decoding message id=%#x name=%s - length field %d mismatches descriptor %d\"\n % (ID, desc[\"name\"], LEN, struct.calcsize(\"<\" + str(desc[\"format\"]))))\n\n DATA = struct.unpack(\"<\" + str(desc[\"format\"]), msg[6:6+LEN])\n \n return (TIME, ID, desc, DATA)\n else:\n raise ValueError(\"Unknown message id=%#x, time=%d, len=%d, data=%r\" % (ID, TIME, LEN, msg[6:]))",
"def test_decode(self):\n pass # TODO(tlarsen)",
"def check_message(m, n_frames, tx_id, data):\n assert len(m.frames) == n_frames\n assert m.tx_id == tx_id\n assert m.data == bytearray(data)"
] |
[
"0.6846625",
"0.67204636",
"0.6646124",
"0.6544158",
"0.6350713",
"0.6292274",
"0.6244153",
"0.62392515",
"0.61773825",
"0.61642474",
"0.6163974",
"0.60963815",
"0.60218555",
"0.5993113",
"0.5992668",
"0.59667337",
"0.59657395",
"0.5963074",
"0.5938992",
"0.59179807",
"0.58712023",
"0.58347446",
"0.5794647",
"0.57931036",
"0.57743937",
"0.5736824",
"0.57343346",
"0.57311356",
"0.5722018",
"0.57144856"
] |
0.7443139
|
0
|
Testing unpack frame function against CONNECTED
|
def testFrameUnpack3(self):
msg = """CONNECTED
session:ID:snorky.local-49191-1185461799654-3:18
"""
result = stomper.unpack_frame(msg)
self.assertEqual(result['cmd'], 'CONNECTED')
self.assertEqual(result['headers']['session'], 'ID:snorky.local-49191-1185461799654-3:18')
self.assertEqual(result['body'], '')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def testFramepack2(self):\n # Check bad frame generation:\n frame = stomper.Frame()\n frame.cmd = 'DISCONNECT'\n result = frame.pack()\n correct = 'DISCONNECT\\n\\n\\x00\\n'\n self.assertEqual(result, correct)",
"def testFrameUnpack2(self):\n msg = \"\"\"MESSAGE\ndestination:/queue/a\nmessage-id: card_data\n\nhello queue a\"\"\"\n\n result = stomper.unpack_frame(msg)\n\n self.assertEqual(result['cmd'], 'MESSAGE')\n self.assertEqual(result['headers']['destination'], '/queue/a')\n self.assertEqual(result['headers']['message-id'], 'card_data')\n self.assertEqual(result['body'], 'hello queue a')",
"def _handle_DeferredConnectionIn (self, event, flow, packet):\n pass",
"def testFramepack1(self):\n # Check bad frame generation:\n frame = stomper.Frame()\n\n def bad():\n frame.cmd = 'SOME UNNOWN CMD'\n\n self.assertRaises(stomper.FrameError, bad)\n\n # Generate a MESSAGE frame:\n frame = stomper.Frame()\n frame.cmd = 'MESSAGE'\n frame.headers['destination'] = '/queue/a'\n frame.headers['message-id'] = 'card_data'\n frame.body = \"hello queue a\"\n result = frame.pack()\n\n# print \"\\n-- result \" + \"----\" * 10\n# pprint.pprint(result)\n# print\n\n # Try bad message unpack catching:\n bad_frame = stomper.Frame()\n self.assertRaises(stomper.FrameError, bad_frame.unpack, None)\n self.assertRaises(stomper.FrameError, bad_frame.unpack, '')\n\n # Try to read the generated frame back in\n # and then check the variables are set up\n # correctly:\n frame2 = stomper.Frame()\n frame2.unpack(result)\n\n self.assertEqual(frame2.cmd, 'MESSAGE')\n self.assertEqual(frame2.headers['destination'], '/queue/a')\n self.assertEqual(frame2.headers['message-id'], 'card_data')\n self.assertEqual(frame2.body, 'hello queue a')\n result = frame2.pack()\n\n correct = \"MESSAGE\\ndestination:/queue/a\\nmessage-id:card_data\\n\\nhello queue a\\x00\\n\"\n\n# print \"result: \"\n# pprint.pprint(result)\n# print\n# print \"correct: \"\n# pprint.pprint(correct)\n# print\n#\n self.assertEqual(result, correct)\n\n result = stomper.unpack_frame(result)\n\n self.assertEqual(result['cmd'], 'MESSAGE')\n self.assertEqual(result['headers']['destination'], '/queue/a')\n self.assertEqual(result['headers']['message-id'], 'card_data')\n self.assertEqual(result['body'], 'hello queue a')",
"def from_physical_layer(conn, FRAME_LENGTH, FORMAT):\r\n frame = conn.recv(FRAME_LENGTH).decode(FORMAT)\r\n print(f\"[from_physical_layer] frame:{frame}\")\r\n return frame",
"def onMessageFrame(self, payload):",
"def _decode_frame(self):\n\n self._processed.eth_frame.log(level=logging_helper.INFO)\n\n # Parse IP packets, protocol=0x8\n if hex(self._processed.eth_frame.protocol) == u'0x8':\n self._processed.ip_frame = IPFrame(self._processed.eth_frame.payload)\n self._processed.ip_frame.log(level=logging_helper.INFO)\n\n if self._processed.ip_frame.payload is not None:\n self._processed.ip_frame.payload.log(level=logging_helper.INFO)\n\n else:\n logging.info(u'Not an IP payload')\n\n logging.info(self._processed)",
"def onMessageFrameData(self, payload):",
"def stateless_unpack(buff, to_server):\n\tdecoder = PacketDecoder(to_server)\n\tdecoder.buff = buff\n\tpacket = decoder.read_packet()\n\treturn packet, decoder.buff",
"def onMessageFrameBegin(self, length):",
"def capture_frame(self):\n\n # search for the sync bytes which indicate the start of one frame\n # sync_bytes = [None, None, None, None]\n # while True:\n # sync_bytes[3] = sync_bytes[2]\n # sync_bytes[2] = sync_bytes[1]\n # sync_bytes[1] = sync_bytes[0]\n # sync_bytes[0] = binascii.hexlify(self.ser.read())\n #\n # # check the content\n # try:\n # if (sync_bytes[0] + sync_bytes[1] + sync_bytes[2]\n # + sync_bytes[3] == b'fffefdfc'):\n # print(\"Frame captured!\")\n # break\n # except TypeError:\n # pass\n\n while not self.lookup_sync():\n pass\n\n # print('Frame captured!')\n self.msg_size = int.from_bytes(self.ser.read(2),\n byteorder='little',\n signed=True)\n # print('Msg Size: {}'.format(self.msg_size))\n\n # raw message info (cmd + options + data)\n self.message = self.ser.read(self.msg_size)\n\n # command info\n self.cmd = int.from_bytes(self.message[:2], byteorder='little', signed=False)\n\n # raw data info (plane_num + distance values)\n self.p3_msg = self.message[-549:]\n self.p1_msg = self.message[-1098: -549]\n self.p4_msg = self.message[-1647: -1098]\n self.p2_msg = self.message[-2196: -1647]\n # print(len(self.p3_msg), len(self.p1_msg), len(self.p4_msg), len(self.p2_msg))\n\n # examine the msg size\n try:\n assert (self.p3_msg[0], self.p1_msg[0], self.p4_msg[0], self.p2_msg[0]) \\\n == (3, 2, 1, 0), \"Fail to interpret the msg\"\n except AssertionError:\n # print(\"error\\n\\n\")\n return -1\n\n # convert bytes to integers (ignore the plane_num)\n self.p3_dists = [int.from_bytes([self.p3_msg[2 * i + 1], self.p3_msg[2 * i + 2]],\n byteorder='little', signed=True)\n for i in range((len(self.p3_msg) - 1) // 2)]\n self.p1_dists = [int.from_bytes([self.p1_msg[2 * i + 1], self.p1_msg[2 * i + 2]],\n byteorder='little', signed=True)\n for i in range((len(self.p1_msg) - 1) // 2)]\n self.p4_dists = [int.from_bytes([self.p4_msg[2 * i + 1], self.p4_msg[2 * i + 2]],\n byteorder='little', signed=True)\n for i in range((len(self.p4_msg) - 1) // 2)]\n self.p2_dists = [int.from_bytes([self.p2_msg[2 * i + 1], self.p2_msg[2 * i + 2]],\n byteorder='little', signed=True)\n for i in range((len(self.p2_msg) - 1) // 2)]\n\n # convert list into np array for further processing\n self.p3_dists = np.asarray(self.p3_dists).astype('float32').reshape(274, 1)\n self.p1_dists = np.asarray(self.p1_dists).astype('float32').reshape(274, 1)\n self.p4_dists = np.asarray(self.p4_dists).astype('float32').reshape(274, 1)\n self.p2_dists = np.asarray(self.p2_dists).astype('float32').reshape(274, 1)\n\n # print(self.p3_dists[132:142])\n # print(self.p1_dists[132:142])\n # print(self.p4_dists[132:142])\n # print(self.p2_dists[132:142])\n\n # Compute the position info\n # print(self.converter)\n # print(self.thetas.shape)\n self.p3_points = self.converter * np.array([[np.cos(self.alphas[2]), np.cos(self.alphas[2]),\n np.sin(self.alphas[2])]], dtype='float32') * self.p3_dists\n self.p1_points = self.converter * np.array([[np.cos(self.alphas[0]), np.cos(self.alphas[0]),\n np.sin(self.alphas[0])]], dtype='float32') * self.p1_dists\n self.p4_points = self.converter * np.array([[np.cos(self.alphas[3]), np.cos(self.alphas[3]),\n np.sin(self.alphas[3])]], dtype='float32') * self.p4_dists\n self.p2_points = self.converter * np.array([[np.cos(self.alphas[1]), np.cos(self.alphas[1]),\n np.sin(self.alphas[1])]], dtype='float32') * self.p2_dists\n # print(self.p1_points[132:142])\n\n return 0",
"def gotProtocol(self,p): \n p.send_hello()",
"def test_process_packet_connect(self):\n pkt = {'type': 'connect',\n 'endpoint': '/tobi',\n 'qs': ''\n }\n self.ns.process_packet(pkt)\n assert not self.environ['socketio'].error.called\n\n # processing a connection packet with query string\n pkt = {'type': 'connect',\n 'endpoint': '/test',\n 'qs': '?test=1'\n }\n self.ns.process_packet(pkt)\n assert not self.environ['socketio'].error.called",
"def isConnected():",
"def on_frame(self, frame: str) -> None:\n\n logger.debug(\"Frame: {}\".format(frame))\n try:\n message = json.loads(frame)\n except:\n logger.exception(\"Could not decode the JSON message\")\n self.transport.close()\n return\n\n mtype = message.get('type', None)\n self.log_state(mtype)\n if mtype == 'NEGOTIATION_RESPONSE':\n logger.debug(\"NEGOTIATION RESPONSE\")\n\n # Receive the chosen algorithms by the server \n self.process_negotiation_response(message)\n\n # Generate DH client private and public keys\n bytes_public_key,p,g,y=self.crypto.dh_client()\n \n message = {'type':'DH_PARAMETERS','parameters':{'p':p,'g':g,'public_key':str(bytes_public_key,'ISO-8859-1')}}\n self._send(message)\n self.state = STATE_DH\n \n return\n\n elif mtype == 'DH_PARAMETERS_RESPONSE':\n logger.debug('DH_PARAMETERS_RESPONSE')\n public_key=bytes(message['parameters']['public_key'],'ISO-8859-1')\n \n #Create shared key with the server public key\n self.crypto.create_shared_key(public_key)\n \n # Generate a symmetric key\n self.crypto.symmetric_key_gen()\n logger.debug(\"Key: {}\".format(self.crypto.symmetric_key))\n\n if self.state == STATE_ROTATION:\n self.state = STATE_OPEN\n self.send_file(self.file_name)\n \n elif self.state == STATE_DH:\n secure_message = self.encrypt_message({'type': 'OPEN', 'file_name': self.file_name})\n self._send(secure_message)\n self.send_mac()\n self.state = STATE_OPEN\n\n return\n\n elif mtype == 'INTEGRITY_CONTROL':\n flag = message['data']\n if flag == 'True':\n self._send(self.encrypt_message({'type': 'CLOSE'}))\n self.send_mac()\n logger.info(\"File transfer finished. Closing transport\")\n self.transport.close()\n\n elif mtype == 'OK': # Server replied OK. We can advance the state\n if self.state == STATE_OPEN:\n logger.info(\"Channel open\")\n self.send_file(self.file_name)\n elif self.state == STATE_DATA: # Got an OK during a message transfer.\n # Reserved for future use\n pass\n else:\n logger.warning(\"Ignoring message from server\")\n return\n\n elif mtype == 'ERROR':\n logger.warning(\"Got error from server: {}\".format(message.get('data', None)))\n \n else:\n logger.warning(\"Invalid message type\")\n\n logger.debug('Closing')\n self.transport.close()\n self.loop.stop()",
"def test_decode_failure(self):\n\n def handle(event):\n return 0x0000, event.attribute_list\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(ModalityPerformedProcedureStep)\n\n handlers = [(evt.EVT_N_CREATE, handle)]\n scp = ae.start_server((\"localhost\", 11112), evt_handlers=handlers, block=False)\n\n ae.add_requested_context(ModalityPerformedProcedureStep)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n class DummyReply:\n def getvalue(self):\n def test():\n pass\n\n return test\n\n class DummyMessage:\n is_valid_response = True\n is_valid_request = False\n AttributeList = DummyReply()\n Status = 0x0000\n STATUS_OPTIONAL_KEYWORDS = []\n\n class DummyDIMSE:\n msg_queue = queue.Queue()\n gotten = False\n\n def send_msg(*args, **kwargs):\n return\n\n def get_msg(self, *args, **kwargs):\n if not self.gotten:\n self.gotten = True\n return 1, DummyMessage()\n return None, None\n\n assoc._reactor_checkpoint.clear()\n while not assoc._is_paused:\n time.sleep(0.01)\n assoc.dimse = DummyDIMSE()\n ds = Dataset()\n ds.PatientName = \"Test^test\"\n status, ds = assoc.send_n_create(\n ds, ModalityPerformedProcedureStep, \"1.2.840.10008.5.1.1.40.1\"\n )\n assert status.Status == 0x0110\n assert ds is None\n assoc.release()\n assert assoc.is_released\n\n scp.shutdown()",
"def _decode1(self, body, data):\r\n if \" \" in body:\r\n evtype,body = body.split(\" \",1)\r\n else:\r\n evtype,body = body,\"\"\r\n evtype = evtype.upper()\r\n if evtype == \"CIRC\":\r\n m = re.match(r\"(\\d+)\\s+(\\S+)(\\s\\S+)?(\\s\\S+)?(\\s\\S+)?(\\s\\S+)?\", body)\r\n if not m:\r\n raise ProtocolError(\"CIRC event misformatted.\")\r\n ident,status,path,purpose,reason,remote = m.groups()\r\n ident = int(ident)\r\n if path:\r\n if \"PURPOSE=\" in path:\r\n remote = reason\r\n reason = purpose\r\n purpose=path\r\n path=[]\r\n elif \"REASON=\" in path:\r\n remote = reason\r\n reason = path\r\n purpose = \"\"\r\n path=[]\r\n else:\r\n path_verb = path.strip().split(\",\")\r\n path = []\r\n for p in path_verb:\r\n path.append(p.replace(\"~\", \"=\").split(\"=\")[0])\r\n else:\r\n path = []\r\n\r\n if purpose and \"REASON=\" in purpose:\r\n remote=reason\r\n reason=purpose\r\n purpose=\"\"\r\n\r\n if purpose: purpose = purpose[9:]\r\n if reason: reason = reason[8:]\r\n if remote: remote = remote[15:]\r\n event = CircuitEvent(evtype, ident, status, path, purpose, reason,\r\n remote, body)\r\n elif evtype == \"STREAM\":\r\n #plog(\"DEBUG\", \"STREAM: \"+body)\r\n m = re.match(r\"(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)?:(\\d+)(\\sREASON=\\S+)?(\\sREMOTE_REASON=\\S+)?(\\sSOURCE=\\S+)?(\\sSOURCE_ADDR=\\S+)?(\\s+PURPOSE=\\S+)?\", body)\r\n if not m:\r\n raise ProtocolError(\"STREAM event misformatted.\")\r\n ident,status,circ,target_host,target_port,reason,remote,source,source_addr,purpose = m.groups()\r\n ident,circ = map(int, (ident,circ))\r\n if not target_host: # This can happen on SOCKS_PROTOCOL failures\r\n target_host = \"(none)\"\r\n if reason: reason = reason[8:]\r\n if remote: remote = remote[15:]\r\n if source: source = source[8:]\r\n if source_addr: source_addr = source_addr[13:]\r\n if purpose:\r\n purpose = purpose.lstrip()\r\n purpose = purpose[8:]\r\n event = StreamEvent(evtype, ident, status, circ, target_host,\r\n int(target_port), reason, remote, source, source_addr,\r\n purpose, body)\r\n elif evtype == \"ORCONN\":\r\n m = re.match(r\"(\\S+)\\s+(\\S+)(\\sAGE=\\S+)?(\\sREAD=\\S+)?(\\sWRITTEN=\\S+)?(\\sREASON=\\S+)?(\\sNCIRCS=\\S+)?\", body)\r\n if not m:\r\n raise ProtocolError(\"ORCONN event misformatted.\")\r\n target, status, age, read, wrote, reason, ncircs = m.groups()\r\n\r\n #plog(\"DEBUG\", \"ORCONN: \"+body)\r\n if ncircs: ncircs = int(ncircs[8:])\r\n else: ncircs = 0\r\n if reason: reason = reason[8:]\r\n if age: age = int(age[5:])\r\n else: age = 0\r\n if read: read = int(read[6:])\r\n else: read = 0\r\n if wrote: wrote = int(wrote[9:])\r\n else: wrote = 0\r\n event = ORConnEvent(evtype, status, target, age, read, wrote,\r\n reason, ncircs, body)\r\n elif evtype == \"STREAM_BW\":\r\n m = re.match(r\"(\\d+)\\s+(\\d+)\\s+(\\d+)\", body)\r\n if not m:\r\n raise ProtocolError(\"STREAM_BW event misformatted.\")\r\n event = StreamBwEvent(evtype, body, *m.groups())\r\n elif evtype == \"BW\":\r\n m = re.match(r\"(\\d+)\\s+(\\d+)\", body)\r\n if not m:\r\n raise ProtocolError(\"BANDWIDTH event misformatted.\")\r\n read, written = map(long, m.groups())\r\n event = BWEvent(evtype, read, written, body)\r\n elif evtype in (\"DEBUG\", \"INFO\", \"NOTICE\", \"WARN\", \"ERR\"):\r\n event = LogEvent(evtype, body)\r\n elif evtype == \"NEWDESC\":\r\n ids_verb = body.split(\" \")\r\n ids = []\r\n for i in ids_verb:\r\n ids.append(i.replace(\"~\", \"=\").split(\"=\")[0].replace(\"$\",\"\"))\r\n event = NewDescEvent(evtype, ids, body)\r\n elif evtype == \"ADDRMAP\":\r\n # TODO: Also parse errors and GMTExpiry\r\n m = re.match(r'(\\S+)\\s+(\\S+)\\s+(\\\"[^\"]+\\\"|\\w+)', body)\r\n if not m:\r\n raise ProtocolError(\"ADDRMAP event misformatted.\")\r\n fromaddr, toaddr, when = m.groups()\r\n if when.upper() == \"NEVER\": \r\n when = None\r\n else:\r\n when = time.strptime(when[1:-1], \"%Y-%m-%d %H:%M:%S\")\r\n event = AddrMapEvent(evtype, fromaddr, toaddr, when, body)\r\n elif evtype == \"NS\":\r\n event = NetworkStatusEvent(evtype, parse_ns_body(data), data)\r\n elif evtype == \"NEWCONSENSUS\":\r\n event = NewConsensusEvent(evtype, parse_ns_body(data), data)\r\n elif evtype == \"BUILDTIMEOUT_SET\":\r\n m = re.match(\r\n r\"(\\S+)\\sTOTAL_TIMES=(\\d+)\\sTIMEOUT_MS=(\\d+)\\sXM=(\\d+)\\sALPHA=(\\S+)\\sCUTOFF_QUANTILE=(\\S+)\",\r\n body)\r\n set_type, total_times, timeout_ms, xm, alpha, quantile = m.groups()\r\n event = BuildTimeoutSetEvent(evtype, set_type, int(total_times),\r\n int(timeout_ms), int(xm), float(alpha),\r\n float(quantile), body)\r\n elif evtype == \"GUARD\":\r\n m = re.match(r\"(\\S+)\\s(\\S+)\\s(\\S+)\", body)\r\n entry, guard, status = m.groups()\r\n event = GuardEvent(evtype, entry, guard, status, body)\r\n elif evtype == \"TORCTL_TIMER\":\r\n event = TimerEvent(evtype, data)\r\n else:\r\n event = UnknownEvent(evtype, body)\r\n\r\n return event",
"def spoof_packet(packet):",
"def onMessageFrameEnd(self):",
"def test_decode_failure(self):\n\n def handle(event):\n ds = Dataset()\n ds.PatientName = \"Test^test\"\n return 0x0000, ds\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 0.4\n ae.network_timeout = 5\n ae.add_supported_context(ModalityPerformedProcedureStep)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_SET, handle)]\n )\n\n ae.add_requested_context(ModalityPerformedProcedureStep, ExplicitVRLittleEndian)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n class DummyReply:\n def getvalue(self):\n def test():\n pass\n\n return test\n\n class DummyMessage:\n is_valid_response = True\n AttributeList = DummyReply()\n Status = 0x0000\n STATUS_OPTIONAL_KEYWORDS = []\n\n class DummyDIMSE:\n msg_queue = queue.Queue()\n gotten = False\n\n def send_msg(*args, **kwargs):\n return\n\n def get_msg(self, *args, **kwargs):\n if not self.gotten:\n self.gotten = True\n return 1, DummyMessage()\n return None, None\n\n assoc._reactor_checkpoint.clear()\n while not assoc._is_paused:\n time.sleep(0.01)\n assoc.dimse = DummyDIMSE()\n assert assoc.is_established\n mod_list = Dataset()\n mod_list.PatientName = \"Test^test\"\n status, ds = assoc.send_n_set(\n mod_list, ModalityPerformedProcedureStep, \"1.2.840.10008.5.1.1.40.1\"\n )\n\n assert status.Status == 0x0110\n assert ds is None\n\n scp.shutdown()",
"def verify_state(self):\n # If we're in CONNECTING state - send 'o' message to the client\n if self.state == CONNECTING:\n self.handler.send_pack(proto.CONNECT)\n\n # Call parent implementation\n super(Session, self).verify_state()",
"def process_frame():\n return \"OK\"",
"def recv_frame(self):\r\n header_bytes = self._recv_strict(2)\r\n if not header_bytes:\r\n return None\r\n b1 = ord(header_bytes[0])\r\n fin = b1 >> 7 & 1\r\n rsv1 = b1 >> 6 & 1\r\n rsv2 = b1 >> 5 & 1\r\n rsv3 = b1 >> 4 & 1\r\n opcode = b1 & 0xf\r\n b2 = ord(header_bytes[1])\r\n mask = b2 >> 7 & 1\r\n length = b2 & 0x7f\r\n\r\n length_data = \"\"\r\n if length == 0x7e:\r\n length_data = self._recv_strict(2)\r\n length = struct.unpack(\"!H\", length_data)[0]\r\n elif length == 0x7f:\r\n length_data = self._recv_strict(8)\r\n length = struct.unpack(\"!Q\", length_data)[0]\r\n\r\n mask_key = \"\"\r\n if mask:\r\n mask_key = self._recv_strict(4)\r\n data = self._recv_strict(length)\r\n if traceEnabled:\r\n recieved = header_bytes + length_data + mask_key + data\r\n logger.debug(\"recv: \" + repr(recieved))\r\n\r\n if mask:\r\n data = ABNF.mask(mask_key, data)\r\n\r\n frame = ABNF(fin, rsv1, rsv2, rsv3, opcode, mask, data)\r\n return frame",
"def test_decode_failure(self):\n\n def handle(event):\n def test():\n pass\n\n return 0x0000, test\n\n self.ae = ae = AE()\n ae.add_requested_context(\n ModalityPerformedProcedureStepNotification, ExplicitVRLittleEndian\n )\n ae.add_supported_context(ModalityPerformedProcedureStepNotification)\n\n handlers = [(evt.EVT_N_EVENT_REPORT, handle)]\n scp = ae.start_server((\"localhost\", 11112), evt_handlers=handlers, block=False)\n\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n assoc = ae.associate(\"localhost\", 11112)\n\n class DummyReply:\n def getvalue(self):\n def test():\n pass\n\n return test\n\n class DummyMessage:\n is_valid_response = True\n EventReply = DummyReply()\n Status = 0x0000\n STATUS_OPTIONAL_KEYWORDS = []\n\n class DummyDIMSE:\n msg_queue = queue.Queue()\n gotten = False\n\n def send_msg(*args, **kwargs):\n return\n\n def get_msg(self, *args, **kwargs):\n if not self.gotten:\n self.gotten = True\n return 1, DummyMessage()\n return None, None\n\n assoc._reactor_checkpoint.clear()\n while not assoc._is_paused:\n time.sleep(0.01)\n assoc.dimse = DummyDIMSE()\n assert assoc.is_established\n\n # Event Information\n ds = Dataset()\n ds.PatientName = \"Test^test\"\n status, ds = assoc.send_n_event_report(\n ds,\n 1,\n ModalityPerformedProcedureStepNotification,\n \"1.2.840.10008.5.1.1.40.1\",\n )\n\n assert status.Status == 0x0110\n assert ds is None\n\n assoc.release()\n scp.shutdown()",
"def test_decode_trace(self):\n self.assertEqual(td.trace(), decoder.decode_trace(BytesIO(td.trace(True))))",
"def recv_data(self):\r\n while True:\r\n frame = self.recv_frame()\r\n if not frame:\r\n # handle error:\r\n # 'NoneType' object has no attribute 'opcode'\r\n raise WebSocketException(\"Not a valid frame %s\" % frame)\r\n elif frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):\r\n return (frame.opcode, frame.data)\r\n elif frame.opcode == ABNF.OPCODE_CLOSE:\r\n self.send_close()\r\n return (frame.opcode, None)\r\n elif frame.opcode == ABNF.OPCODE_PING:\r\n self.pong(frame.data)",
"def Connected(self) -> bool:",
"def Connected(self) -> bool:",
"def packetReceived(self, ident, payload):\n pass",
"def parse_frame(data):\n test = binascii.hexlify(data)\n # defines the format of received LoRa frame header\n tap_header_format = 'bbhiibbbbib'\n phy_header_format = 'bbb'\n header_format = tap_header_format + phy_header_format\n print header_format\n header_len = struct.calcsize(header_format)\n data_len = len(data)\n if header_len > data_len:\n print 'packet too short'\n return (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,)\n else:\n # defines the frame format based on header and length of frame\n data_format = header_format + str(data_len - header_len) + 's'\n print data_format\n # print \"tap header: \", header_len\n # print \"data length: \", data_len\n # print \"test length: \", len(test)\n\n unpacked = struct.unpack(data_format, data)\n print unpacked\n # print '-----------------------------------------------------'\n # print \"bin \" + data\n # print 'hex ' + test\n return unpacked"
] |
[
"0.6529449",
"0.62154406",
"0.5931618",
"0.5910129",
"0.5869515",
"0.56991196",
"0.5643126",
"0.5555427",
"0.5497953",
"0.5459658",
"0.5446381",
"0.5324627",
"0.5304809",
"0.52132446",
"0.5212872",
"0.52032244",
"0.5189915",
"0.5188491",
"0.51803774",
"0.5159421",
"0.51445866",
"0.51332915",
"0.5123043",
"0.5103863",
"0.51018476",
"0.5092569",
"0.50842506",
"0.50842506",
"0.5081327",
"0.50690615"
] |
0.68718064
|
0
|
Returns a diff for field if it's changed and None otherwise.
|
def get_field_diff(self, field_name):
return self.diff.get(field_name, None)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_updated_field(self):\n return None",
"def _field_was_changed(self):\n field_map = self._field_map\n for field in field_map.itervalues():\n if field.was_changed():\n return True\n return False",
"def getFieldValue (self, fieldname):\n return self._modified_values.get(fieldname, None) or self._original_values[fieldname]",
"def diff(self, old_field_sig):\n if not isinstance(old_field_sig, FieldSignature):\n raise TypeError('Must provide a FieldSignature to diff against, '\n 'not a %s.' % type(old_field_sig))\n\n changed_attrs = [\n attr\n for attr in (set(old_field_sig.field_attrs) |\n set(self.field_attrs))\n if self.get_attr_value(attr) != old_field_sig.get_attr_value(attr)\n ]\n\n # See if the field type has changed.\n old_field_type = old_field_sig.field_type\n new_field_type = self.field_type\n\n if old_field_type is not new_field_type:\n try:\n old_field = old_field_type(**old_field_sig.field_attrs)\n new_field = new_field_type(**self.field_attrs)\n\n field_type_changed = (old_field.get_internal_type() !=\n new_field.get_internal_type())\n except TypeError:\n # We can't instantiate those, so assume the field\n # type has indeed changed.\n field_type_changed = True\n\n if field_type_changed:\n changed_attrs.append('field_type')\n\n # FieldSignature.related_model is not a field attribute,\n # but we do need to track its changes.\n if old_field_sig.related_model != self.related_model:\n changed_attrs.append('related_model')\n\n return sorted(changed_attrs)",
"def field_changes(self):\n return self._field_changes",
"def diff(self):\n\t\tif len(self.v) < 4:\n\t\t\treturn None\n\t\tif self.poli == None:\n\t\t\tself.generar_polinomio()\n\t\tif x != None:\n\t\t\treturn diff(self.poli)(x)\n\t\treturn diff(self.poli)",
"def diff(self, old_field_sig):\n if not isinstance(old_field_sig, FieldSignature):\n raise TypeError('Must provide a FieldSignature to diff against, '\n 'not a %s.' % type(old_field_sig))\n\n changed_attrs = [\n attr\n for attr in (set(old_field_sig.field_attrs) |\n set(self.field_attrs))\n if self.get_attr_value(attr) != old_field_sig.get_attr_value(attr)\n ]\n\n # See if the field type has changed.\n old_field_type = old_field_sig.field_type\n new_field_type = self.field_type\n\n if old_field_type is not new_field_type:\n try:\n field_type_changed = (old_field_type().get_internal_type() !=\n new_field_type().get_internal_type())\n except TypeError:\n # We can't instantiate those, so assume the field\n # type has indeed changed.\n field_type_changed = True\n\n if field_type_changed:\n changed_attrs.append('field_type')\n\n # FieldSignature.related_model is not a field attribute,\n # but we do need to track its changes.\n if old_field_sig.related_model != self.related_model:\n changed_attrs.append('related_model')\n\n return sorted(changed_attrs)",
"def _diff(self, param, diff):\n pass",
"def get_changed() -> bool:\n return g.ledger.changed()",
"def get_diff(*args, **kwargs):\n return get_diff_async(*args, **kwargs).get_result()",
"def get_change(self, ):\n return self.get_parameter('change')",
"def svn_client_diff_summarize_t_prop_changed_get(svn_client_diff_summarize_t_self): # real signature unknown; restored from __doc__\n pass",
"def diff(self):\n if self.event == 'Create':\n old = ''\n else:\n # Get the Change just ahead of _this_ change because that has the\n # state of the Resource before this Change occurred.\n # TODO(nickpegg): Get rid of this if we change the behavior of\n # Change to store the previous version of the object\n old_change = Change.objects.filter(\n change_at__lt=self.change_at,\n resource_id=self.resource_id,\n resource_name=self.resource_name\n ).order_by(\n '-change_at'\n ).first()\n old = json.dumps(old_change._resource, indent=2, sort_keys=True)\n\n if self.event == 'Delete':\n current = ''\n else:\n resource = apps.get_model(self._meta.app_label, self.resource_name)\n obj = resource.objects.get(pk=self.resource_id)\n\n serializer_class = self.get_serializer_for_resource(\n self.resource_name)\n serializer = serializer_class(obj)\n current = json.dumps(serializer.data, indent=2, sort_keys=True)\n\n diff = \"\\n\".join(difflib.ndiff(\n old.splitlines(),\n current.splitlines()\n ))\n\n return diff",
"def partial_change(self):\n return self.attempted_change() and not all(self._get_field_data())",
"def _get_sum_delta(self, instance, mode, previous):\n new_value = self._get_value_from_instance(instance)\n if mode == CHANGING:\n old_value = self._get_value_from_instance(previous)\n if new_value - old_value == 0:\n # updates not needed\n return None\n return F(self.field) + new_value - old_value\n # mode is ENTERING or LEAVING, only new_value matters.\n return F(self.field) + new_value * mode",
"def get_diff(self):\n folders, files_data = self.get_root_data()\n missing_files, added_files, moved_files = self.get_files_diff(files_data)\n missing_folders, added_folders = self.get_folders_diff(folders)\n if any([missing_folders, added_folders, missing_files, added_files, moved_files]):\n return dict(\n removed_folders=missing_folders,\n added_folders=added_folders,\n removed_files=missing_files,\n added_files=added_files,\n moved_files=moved_files\n )\n return None",
"def hadChanged(self):\n return self.changed",
"def diff(self):\n return differential(self)",
"def changed_version(self):\r\n try:\r\n return CampaignChange.objects.get(campaign__pk=self.pk)\r\n except CampaignChange.DoesNotExist:\r\n return None",
"def diff(self, x = None):\n\t\tif len(self.v) < 2:\n\t\t\treturn None\n\t\tif self.poli == None:\n\t\t\tself.generar_polinomio()\n\t\tif x != None:\n\t\t\treturn diff(self.poli)(x)\n\t\treturn diff(self.poli)",
"def _get_tracked_fields(self, updated_fields):\n tracked_fields = []\n for name, field in self._fields.items():\n if getattr(field, 'string'):\n tracked_fields.append(name)\n\n if tracked_fields:\n return self.fields_get(tracked_fields)\n return {}",
"def getChanged(self,key,default=None):\n if default != None and key not in self.data:\n self.data[key] = default\n self.setChanged(key)\n return self.data.get(key)",
"def diff(self):\n return self.client.api.diff(self.id)",
"def get_diff(self, diff_id: int):\n return self.phab.differential.getdiff(diff_id=diff_id)",
"def changed(self):\r\n return self.value != self.previous_value",
"def changed(self):\r\n return self.value != self.previous_value",
"def _get_delta(self,\n instance: models.Model,\n mode: int,\n previous: Optional[models.Model] = None,\n ) -> Optional[expressions.Expression]:\n callback_name = f'_get_{self.aggregate.name.lower()}_delta'\n try:\n callback = getattr(self, callback_name)\n return callback(instance, mode, previous)\n except AttributeError: # pragma: no cover\n raise NotImplementedError()",
"def attempted_change(self):\n return any(self._get_field_data())",
"def has_changed(self):\n return self.get_old_value() != self.get_current_value()",
"def next(self):\n qs = self.search()\n self.instance = qs.exclude(pk__in=self.seen).first()\n\n if not self.instance:\n return None\n text1 = getattr(self.instance, self.field)\n text2 = text1.replace(self.search_term, self.replace)\n fname = \"%s.%s\" % (self.model_description, self.field)\n diff = difflib.unified_diff(text1.split(\"\\n\"),\n text2.split(\"\\n\"),\n fromfile=fname,\n tofile=fname,\n lineterm=\"\")\n self.diff = list(diff)\n return self.diff"
] |
[
"0.6489824",
"0.60495067",
"0.5896543",
"0.5798435",
"0.57247734",
"0.5697826",
"0.5696986",
"0.56731516",
"0.55192393",
"0.55002016",
"0.5471943",
"0.547045",
"0.54299885",
"0.535557",
"0.52621925",
"0.52565867",
"0.52482855",
"0.5244059",
"0.52217674",
"0.5214849",
"0.5204913",
"0.5111232",
"0.5102342",
"0.50943196",
"0.50648034",
"0.50648034",
"0.5034412",
"0.5025408",
"0.5010984",
"0.49957228"
] |
0.7749743
|
1
|
Check if default_backend() suport cipher and mode combination
|
def test_compatibility(cipher, mode):
chiper_obj = cipher_params(cipher, os.urandom(length_by_cipher[cipher]))[0] #need to be object, not interface, to validate_for_algorithm work
if chiper_obj.name == "ChaCha20":
return True
mode_object = None
if mode == 'CBC':
mode_object = modes.CBC(os.urandom(16))
elif mode == 'GCM':
mode_object = modes.GCM(os.urandom(16), os.urandom(16))
else:
return False
return default_backend().cipher_supported(chiper_obj, mode_object)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_encryption_mode():\r\n\r\n msg = 'Do you want to encrypt ({0}) or decrypt ({1})? '.format(\r\n cipher_functions.ENCRYPT, cipher_functions.DECRYPT)\r\n mode = input(msg)\r\n while not (mode == cipher_functions.ENCRYPT or\r\n mode == cipher_functions.DECRYPT):\r\n print('Invalid mode.')\r\n mode = input(msg)\r\n return mode",
"def detection_oracle(ct):\n return Mode.ECB if is_ecb(ct, 16) else Mode.CBC",
"def _is_cas_backend(session):\n if session:\n backend = session.get(BACKEND_SESSION_KEY)\n return backend == cas_backend\n return None",
"def get_backend():\n from cryptography.hazmat.backends import default_backend\n return default_backend()",
"def isDefaultMode():\n\treturn 0",
"def in_easy_mode(mode: str) -> bool:\n return mode == EASY",
"def encrypted(self):\n return self.encryption_type is not None",
"def detect_mode(ciphertext: bytes) -> str:\n threshold_chunks = 0\n return \"ECB\" if count_identical_chunks(ciphertext) > threshold_chunks else \"CBC\"",
"def is_local_backend(backend):\n return backend.configuration().local",
"def crypto_key_backend(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"crypto_key_backend\")",
"def aes_encryption(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"aes_encryption\")",
"def DualMode(self) -> bool:",
"def is_basicaer_provider(backend):\n return isinstance(backend.provider(), BasicAerProvider)",
"def test_preferred_cipher(host, method):\n ssl_method = getattr(SSL, method.replace('.', '_') + '_METHOD')\n context = SSL.Context(ssl_method)\n context.set_cipher_list(\"ALL:COMPLEMENTOFALL\")\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n sock = SSL.Connection(context, sock)\n sock.connect(host.address)\n\n headers = make_request(sock, host.server)\n\n preferred = sock.cipher()\n host.report_preferred(method, preferred[0], preferred[2])\n except SSL.Error as e:\n pass\n finally:\n sock.close()",
"def sslmode(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sslmode\")",
"def test_get_cipher_name_before_connect(self):\n ctx = Context(SSLv23_METHOD)\n conn = Connection(ctx, None)\n assert conn.get_cipher_name() is None",
"def __get_verify_mode(self):\n ...",
"def is_strategy_supported(self, mode: CalculationStrategy) -> bool:\n return mode == self.calculation_strategy",
"def determine_default_mode(operator_tag, is_sha_digest):\n version_supports_restricted = check_if_tag_supports_restricted(operator_tag, is_sha_digest)\n if operator_tag == \"\" or version_supports_restricted:\n return MODE_RESTRICTED\n\n return MODE_ALL",
"def is_server_crypto(self, username):\n try:\n user_option = super(UserOptionsManager, self).get(\n email=username, option_key=KEY_SERVER_CRYPTO)\n return bool(int(user_option.option_val))\n except UserOptions.DoesNotExist:\n raise CryptoOptionNotSetError",
"def ssl(self):\n return self.protocol != \"SASL_PLAINTEXT\"",
"def __check_mode(self):\n self.mode[\"auto_mode\"] = self.communications.get_mode()",
"def allowed(self, request):\n try:\n storage_backend = stx_api.sysinv.get_storage_backend(request)\n if stx_api.sysinv.STORAGE_BACKEND_CEPH in storage_backend:\n return True\n except Exception:\n pass\n return False",
"def _get_active_backend(\n prefer=default_parallel_config[\"prefer\"],\n require=default_parallel_config[\"require\"],\n verbose=default_parallel_config[\"verbose\"],\n):\n\n backend_config = getattr(_backend, \"config\", default_parallel_config)\n\n backend = _get_config_param(\n default_parallel_config['backend'], backend_config, \"backend\"\n )\n prefer = _get_config_param(prefer, backend_config, \"prefer\")\n require = _get_config_param(require, backend_config, \"require\")\n verbose = _get_config_param(verbose, backend_config, \"verbose\")\n\n if prefer not in VALID_BACKEND_HINTS:\n raise ValueError(\n f\"prefer={prefer} is not a valid backend hint, \"\n f\"expected one of {VALID_BACKEND_HINTS}\"\n )\n if require not in VALID_BACKEND_CONSTRAINTS:\n raise ValueError(\n f\"require={require} is not a valid backend constraint, \"\n f\"expected one of {VALID_BACKEND_CONSTRAINTS}\"\n )\n if prefer == 'processes' and require == 'sharedmem':\n raise ValueError(\n \"prefer == 'processes' and require == 'sharedmem'\"\n \" are inconsistent settings\"\n )\n\n explicit_backend = True\n if backend is None:\n\n # We are either outside of the scope of any parallel_(config/backend)\n # context manager or the context manager did not set a backend.\n # create the default backend instance now.\n backend = BACKENDS[DEFAULT_BACKEND](nesting_level=0)\n explicit_backend = False\n\n # Try to use the backend set by the user with the context manager.\n\n nesting_level = backend.nesting_level\n uses_threads = getattr(backend, 'uses_threads', False)\n supports_sharedmem = getattr(backend, 'supports_sharedmem', False)\n # Force to use thread-based backend if the provided backend does not\n # match the shared memory constraint or if the backend is not explicitely\n # given and threads are prefered.\n force_threads = (require == 'sharedmem' and not supports_sharedmem)\n force_threads |= (\n not explicit_backend and prefer == 'threads' and not uses_threads\n )\n if force_threads:\n # This backend does not match the shared memory constraint:\n # fallback to the default thead-based backend.\n sharedmem_backend = BACKENDS[DEFAULT_THREAD_BACKEND](\n nesting_level=nesting_level\n )\n # Warn the user if we forced the backend to thread-based, while the\n # user explicitely specified a non-thread-based backend.\n if verbose >= 10 and explicit_backend:\n print(\n f\"Using {sharedmem_backend.__class__.__name__} as \"\n f\"joblib backend instead of {backend.__class__.__name__} \"\n \"as the latter does not provide shared memory semantics.\"\n )\n # Force to n_jobs=1 by default\n thread_config = backend_config.copy()\n thread_config['n_jobs'] = 1\n return sharedmem_backend, thread_config\n\n return backend, backend_config",
"def getCipherImplementation(self):\r\n if not self._writeState.encContext:\r\n return None\r\n return self._writeState.encContext.implementation",
"def check_config_mode(self):\n return False",
"def detect_encryption_oracle():\n\n pt = 'YELLOW SUBMARINE' * 3\n ct = encryption_oracle(pt)\n return ct[16:32] == ct[32:48]",
"def patch_crypto_be_discovery():\n\n from cryptography.hazmat import backends\n\n try:\n from cryptography.hazmat.backends.commoncrypto.backend import \\\n backend as be_cc\n except ImportError:\n be_cc = None\n\n try:\n from cryptography.hazmat.backends.openssl.backend import \\\n backend as be_ossl\n except ImportError:\n be_ossl = None\n\n backends._available_backends_list = [\n be for be in (be_cc, be_ossl) if be is not None\n ]",
"def provider(provider):\n if provider in (\"alditalk\", \"netzclub\", \"congstar\"):\n return True\n else:\n return False",
"def detect_backend():\n try:\n from termpixels.unix import UnixBackend\n return UnixBackend()\n except:\n try:\n from termpixels.win32_vt import Win32VtBackend\n return Win32VtBackend()\n except Exception as e:\n raise e\n from termpixels.win32 import Win32Backend\n return Win32Backend()"
] |
[
"0.6207802",
"0.60368025",
"0.5887069",
"0.5850579",
"0.5812958",
"0.5783021",
"0.57796377",
"0.5690944",
"0.5689572",
"0.56412655",
"0.55962986",
"0.5582347",
"0.5557559",
"0.55446714",
"0.55280775",
"0.5516546",
"0.54812735",
"0.54729825",
"0.5470359",
"0.54567057",
"0.5441087",
"0.5439313",
"0.5428473",
"0.542313",
"0.54201305",
"0.54054326",
"0.54019636",
"0.5400874",
"0.5373648",
"0.53733677"
] |
0.672798
|
0
|
clip(arr,thresh=3.5) Simple sigmaclipping algorithm. Returns avg,std of clipped array.
|
def clip(arr,thresh=3.5):
a = numpy.array(arr)
avg,std = a.mean(),a.std()
while 1:
avg,std,size = a.mean(),a.std(),a.size
a = a[abs(a-avg)<thresh*std]
if size==a.size:
break
return avg,std
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def clip(arr,thresh=3.5):\n\ta = arr.copy()\n\n\tavg,std = a.mean(),a.std()\n\twhile 1:\n\t\tsize = a.size\n\t\ta = a[abs(a-avg)<thresh*std]\n\t\tavg,std = a.mean(),a.std()\n\t\tif size==a.size:\n\t\t\tbreak\n\treturn avg,std",
"def sigma_clip(arr,sigma=3):\n\n cliparr = range(len(arr)) # initialize\n arr = n.append(arr,[1]) # append superfluous item to trigger loop\n while len(cliparr) != len(arr):\n arr = arr[cliparr]\n mean = arr.mean()\n std = arr.std()\n cliparr = n.where((arr < mean + sigma*std) & (arr > mean - sigma*std) & (arr != 0) )[0]\n# print 'Clipping %d from array of length %d' % (len(arr) - len(cliparr), len(arr))\n return mean - sigma*std, mean + sigma*std",
"def sigma_clipping(self, low_thresh=3, high_thresh=3,\n func='mean', dev_func='std', **kwd):\n\n # Remove in 3.0\n _ = kwd.pop('use_astropy', True)\n\n self.data_arr.mask = sigma_clip(self.data_arr.data,\n sigma_lower=low_thresh,\n sigma_upper=high_thresh,\n axis=kwd.get('axis', 0),\n copy=kwd.get('copy', False),\n maxiters=kwd.get('maxiters', 1),\n cenfunc=func,\n stdfunc=dev_func,\n masked=True,\n **kwd).mask",
"def clipstats(yarr, thresh, iter):\n mean = yarr.mean()\n std = yarr.std()\n for i in range(iter):\n mask = (abs(yarr - mean) < thresh * std)\n if mask.sum() <= 1:\n return yarr.mean(), yarr.std()\n mean = yarr[mask].mean()\n std = yarr[mask].std()\n\n return mean, std",
"def clip2(data, robust=True):\n \n for j in xrange(params.sc_passes):\n mask = data.mask*1\n \n for i in range(data.shape[1]):\n i0 = max([0, i-params.sc_bp_window_f/2])\n i1 = min([i+params.sc_bp_window_f/2, data.shape[1]-1])\n try:\n assert(robust)\n mn, st = robust.mean(data[:,i0:i1+1]), robust.std(data[:,i0:i1+1])\n except:\n mn, st = np.ma.mean(data[:,i0:i1+1]), np.ma.std(data[:,i0:i1+1])\n bad = np.where(np.abs(data[:,i]-1) > params.sigma*st)[0]\n mask[bad,i] |= True\n \n data.mask = mask*1\n return data.mask",
"def clip_outliers(df, std_threshold: float = 3):\n df_std = df.std(axis=0, skipna=True)\n df_mean = df.mean(axis=0, skipna=True)\n\n lower = df_mean - (df_std * std_threshold)\n upper = df_mean + (df_std * std_threshold)\n df2 = df.clip(lower=lower, upper=upper, axis=1)\n\n return df2",
"def _clip_feature(self, feature):\n\n w = self.clip_factor\n for ic in range(self.data_shape[0]):\n if len(feature[ic]) > 0:\n minv = self.feature_mean[ic] - w * self.feature_std[ic]\n maxv = self.feature_mean[ic] + w * self.feature_std[ic]\n if minv != maxv:\n feature[ic] = np.clip(feature[ic], minv, maxv)\n #feature[ic] = self._mad_based_outliers(feature[ic],minv,maxv)\n return feature",
"def clip_signal(signal, clipping_thresh=1000, clipped_value=215):\n index_factor = rate / CHUNK\n while index_factor * np.argmax(signal) >= clipping_thresh:\n signal[np.argmax(signal)] = 0\n return signal",
"def sigma_clip(data, max_sigma):\n mn = np.mean(data)\n std = np.std(data)\n diff = data - mn\n sigmas = diff / std\n mask = np.abs(sigmas) < max_sigma\n return mask",
"def quartiled_mean(arr, clip=25):\n if clip >= 50:\n return None\n arr = np.array(arr)\n arr_len = arr.size\n left_index = int((clip) / 100.0 * arr_len)\n right_index = int((100.0 - clip) / 100.0 * arr_len)\n arr = np.sort(arr)\n arr = arr[left_index:right_index + 1]\n # print(\"Out of {}, only middle {} [{}, {}] are considered\".\n # format(arr_len, arr.size, left_index, right_index))\n return arr.sum() / arr.size",
"def clip(data,clip):\n data[data > clip] = clip\n data[data < -clip] = -clip\n return data",
"def blurthresh(arrayin,thresh=0.1e0,blur=8):\r\n arrayout = np.array(arrayin,dtype=np.float64)\r\n arrayout = ndimage.gaussian_filter(arrayout,blur)\r\n thresh2 = np.max(np.abs(arrayout))*thresh\r\n arrayout = np.array(1.0 * (np.abs(arrayout) > thresh2),dtype=arrayin.dtype) \r\n return arrayout",
"def make_clipping_av(ts, window):\n try:\n ts = core.to_np_array(ts)\n except ValueError:\n raise ValueError('make_clipping_av expects ts to be array-like')\n\n if not core.is_one_dimensional(ts):\n raise ValueError('make_clipping_av expects ts to be one-dimensional')\n\n if not isinstance(window, int):\n raise ValueError('make_clipping_av expects window to be an integer')\n\n av = np.zeros(len(ts) - window + 1)\n\n max_val, min_val = np.max(ts), np.min(ts)\n for i in range(len(av)):\n num_clip = 0.0\n for j in range(window):\n if ts[i + j] == max_val or ts[i + j] == min_val:\n num_clip += 1\n av[i] = num_clip\n\n min_val = np.min(av)\n av -= min_val\n\n max_val = np.max(av)\n if max_val == 0:\n av = np.zeros(len(av))\n else:\n av = 1 - av / max_val\n\n return av",
"def outlier(arr, as_nan=True, thresh=0.05, show=False, report=False):\n if len(arr) < 3:\n return arr\n if show:\n plt.subplot(1,2,1) # Plot part 1 first\n plt.plot(np.random.random(len(arr)), thing1, 'o', color='blue',\n markeredgecolor='none', alpha=0.4)\n plt.title('With outliers')\n \n med_res = [(np.median(arr)-i)**2 for i in arr] \n med_res_ix = [u for u in med_res] # Create index\n arr_copy = [u for u in arr] # The copy will be edited first\n stds = []\n med_res.sort(reverse=True) # Largest to smallest\n # print(med_res[:10])\n numPts = max([int(len(arr)*thresh), 2])\n # print('Testing largest %i residuals' %numPts)\n \n # Pretend to remove 10% of points\n for i in range(numPts): #for i in range(int(len(arr)*.1)): #\n stds.append(np.std(arr_copy))\n rm_ix = med_res_ix.index(med_res[i])\n try:\n rm = arr[rm_ix]\n except:\n print('tried to remove ix %i but arr is len %i'\n %(rm_ix, len(arr)))\n try: \n arr_copy.pop(arr_copy.index(rm))\n except:\n print('tried to remove %f but not in arr_copy' %rm)\n \n # Find the greatest d(std)\n dstd = np.diff(stds)\n dstd = [abs(i) for i in dstd]\n rm_to = list(dstd).index(max(dstd))+1 # len(diff) = len(arr)-1\n\n #print('Mean d(std): %.3f, removing all above %.3f (%i pts)'\n # %(np.mean(dstd), dstd[rm_to-1], rm_to))\n \n for i in range(rm_to):\n arr[med_res_ix.index(med_res[i])] = np.nan\n \n if show: # Show\n plt.subplot(1,2,2)\n plt.plot(np.random.random(len(arr)), arr, 'o',\n color='red', markeredgecolor='none', alpha=0.4)\n plt.title('Without outliers')\n plt.show()\n if as_nan:\n return arr\n return [i for i in arr if not pd.isnull(i)] # Else just eliminate it.",
"def meanStdCut(array, cut=None):\n\n array = np.array(array)\n\n if cut == None: return array.mean(), array.std()\n\n array = array[np.abs(array - array.mean()) < cut*array.std()]\n return array.mean(), array.std()",
"def blurthresh_mask(arrayin,thresh=0.1e0,blur=8):\r\n arrayout = np.array(arrayin,dtype=np.float64)\r\n arrayout = ndimage.gaussian_filter(arrayout,blur)\r\n thresh2 = np.max(np.abs(arrayout))*thresh\r\n arrayout = np.array(1.0 * (np.abs(arrayout) > thresh2),dtype=np.bool) \r\n return arrayout",
"def clip(a, a_min, a_max):\n return _make.clip(a, a_min, a_max)",
"def isigclip( valarray, sigclip, igood=[], maxiter=10, thisiter=0 ) :\n if not type(valarray)==np.ndarray :\n valarray = np.array( valarray )\n if not len(igood) : igood = range(len(valarray))\n \n Ngood = len(igood)\n mnval = np.mean( valarray[igood] )\n sigma = np.std( valarray[igood] )\n igood = np.where( (np.abs(valarray-mnval)<(sigclip*sigma)) )[0]\n\n # import pdb; pdb.set_trace()\n if len(igood) == Ngood : return( igood )\n if thisiter>=maxiter : \n print(\"WARNING : Stopping after %i recursions\"%maxiter)\n return( igood )\n thisiter+=1\n igood = isigclip( valarray, sigclip, igood=igood, maxiter=maxiter, thisiter=thisiter )\n return( igood )",
"def _thresh_clip(self, xmin, ymin, zmin, xmax, ymax, zmax):\n\n for p in self.points:\n if p.y > ymax or p.y < ymin:\n print p, 1\n self.raster = False\n break\n elif p.x > xmax or p.x < xmin:\n print p, 2\n self.raster = False\n break\n elif p.z > zmax or p.z < zmin:\n print p, 3\n self.raster = False\n break",
"def make_clipper(lims):\n lims = np.array(lims)\n\n low, high = lims[..., 0], lims[..., 1]\n if lims.shape[-1] != 2:\n raise ValueError(\"Trailing shape must be (2,)\")\n elif not np.all(low <= high):\n raise ValueError(\"Upper values must meet or exceed lower values.\")\n\n def clipper(x):\n x = np.where(x < low, low, x)\n x = np.where(x > high, high, x)\n return x\n\n return clipper",
"def np_clip_(x, min=None, max=None):\n return np.clip(x, min, max, out=x)",
"def _scale_array(arr, clip=True):\n if clip:\n scaled = np.clip(arr, 0, 255)\n else:\n scale_range = (max([arr.min(), 0]), min([arr.max(), 255]))\n scaled = _min_max_scale(arr, new_range=scale_range)\n\n return scaled",
"def sigma_clip(x, nsigma=3):\n\n m = np.ones(len(x)) == 1\n newx = x*1\n oldm = np.array([False])\n i = 0\n while sum(oldm) != sum(m):\n oldm = m*1\n sigma = np.std(newx)\n m &= np.abs(np.median(newx) - x)/sigma < nsigma\n # m &= m\n newx = x[m]\n i += 1\n return x[m], m",
"def imsave_clip(outfile,img):\n img = img_as_float(img).clip(0.,1.)\n io.imsave(outfile,img)",
"def std(X,trimming=0):\n \n if trimming==0:\n s = np.power(np.var(X,axis=0),.5)\n s = np.array(s).reshape(-1)\n else: \n var = sps.trim_mean(np.square(X - sps.trim_mean(X,trimming,0)),\n trimming,0)\n s = np.sqrt(var) \n return s",
"def clip(wavelength, spectra, threshold, substitute=None):\n\n if substitute == None: # remove threshold violations\n mask = np.any(spectra > threshold, axis=1)\n spectra = spectra[~mask, :]\n wavelength = wavelength[~mask]\n else: # substitute threshold violations with a value\n spectra[spectra > threshold] = substitute\n return wavelength, spectra\n\n return wavelength, spectra",
"def clip_output(original, warped, mode, cval, clip):\n if not clip:\n return\n\n min_val = np.nanmin(original)\n max_val = np.nanmax(original)\n nan_cval = np.isnan(cval)\n if mode == 'constant':\n if nan_cval:\n preserve_cval = True\n else:\n preserve_cval = min_val <= cval <= max_val\n else:\n preserve_cval = False\n\n if preserve_cval:\n if nan_cval:\n cval_mask = np.isnan(warped)\n else:\n cval_mask = warped == cval\n else:\n cval_mask = None\n\n np.clip(warped, min_val, max_val, out=warped)\n if cval_mask is not None:\n warped[cval_mask] = cval",
"def testStatsStdevclip(self):\n image2 = self.image.Factory(self.image, True)\n\n stats = afwMath.makeStatistics(image2, afwMath.STDEVCLIP | afwMath.NPOINT | afwMath.SUM)\n self.assertEqual(stats.getValue(afwMath.STDEVCLIP), 0)\n #\n # Check we get the correct sum even when clipping\n #\n self.assertEqual(stats.getValue(afwMath.NPOINT)*\n afwMath.makeStatistics(image2, afwMath.MEAN).getValue(),\n stats.getValue(afwMath.SUM))",
"def clip_filters(W, threshold=0.5, pad=3):\n W_clipped = []\n for w in W:\n L, A = w.shape\n entropy = np.log2(4) + np.sum(w * np.log2(w + 1e-7), axis=1)\n index = np.where(entropy > threshold)[0]\n if index.any():\n start = np.maximum(np.min(index) - pad, 0)\n end = np.minimum(np.max(index) + pad + 1, L)\n W_clipped.append(w[start:end, :])\n else:\n W_clipped.append(w)\n\n return W_clipped",
"def remove_outlier(data, Nstd=2, mask= None): #---- remove extreme data\r\n M = data.shape[0]; \r\n if mask is None:\r\n mask = np.ones((M,M)); # if mask not existed\r\n for k in range(0,M): mask[k,k]= 0; # create one and remove diagnol\r\n N = np.sum(mask); # total effective data number \r\n sumx= np.sum(data* mask);\r\n mean= sumx/ N; # new mean\r\n sum_square = np.sum(((data-mean)*mask)**2); #\r\n std = np.sqrt( sum_square/ (N-1) ); # new standard deviation\r\n #--- ---\r\n larger = data > (mean+ Nstd*std); # data too large\r\n smaller= data < (mean- Nstd*std); # data too small\r\n maskh = mask.copy();\r\n maskh[larger] = 0; maskh[smaller]= 0; # remove outlier data\r\n return maskh, mean"
] |
[
"0.88836473",
"0.74306214",
"0.70505095",
"0.70455706",
"0.6405306",
"0.63617873",
"0.63274765",
"0.63037956",
"0.628521",
"0.6187328",
"0.6141885",
"0.6097245",
"0.6067281",
"0.6067228",
"0.60558444",
"0.6035898",
"0.60233146",
"0.600569",
"0.59593225",
"0.5902682",
"0.5822035",
"0.5819187",
"0.57782525",
"0.5769208",
"0.57658",
"0.57610184",
"0.574428",
"0.5734878",
"0.572387",
"0.57181245"
] |
0.8775539
|
1
|
check_star(peaks,data) Determines whether or not a slit looks like it is a starbox. This is done by simply checking the 3pixels bordering each peak and ensuring that none are less than half of the peak (ie that the FWHM>7 pixels). Returns True if more than half of the peaks look like boxes, otherwise returns False.
|
def check_star(peaks,data):
star = 0
for i in peaks:
max = data[i]
if i<3 or i+4>data.size:
continue
mean = data[i-3:i+4].mean()
if (max-mean)<0.1*max:
star += 1
if star*2>peaks.size:
return True
else:
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def measure_stars(data, mod, hmin=5, fmin=1E3, pmax=7E4,\n qmax=.5, cmin=-1., cmax=.1, ncpu=None, focus_only=False):\n\n # Get filtered images to get peak measurements\n filt_image, max_4sum = _filter_images(data, hmin)\n # Find source candidates\n xs, ys = _find_sources(data, filt_image, max_4sum, fmin, pmax)\n skies = estimate_all_backgrounds(xs, ys, 8.5, 13.5, data)\n\n # Give buffer of 10% for peak pixel tolerance\n max_peak_val = _max_peakiness(mod) + .1\n\n # Throw out sources that are more peaked than PSF allows\n # Throwing bad ones now improves performance\n mask = reject_sources(xs, ys, data, max_4sum, skies, max_peak_val)\n xs = xs[mask]\n ys = ys[mask]\n skies = skies[mask]\n\n if isinstance(mod, SlowGriddedFocusPSFModel):\n foc, foc_tbl = focus_peak(xs, ys, data, max_4sum[ys, xs],\n skies, mod, ncpu)\n mod.interp_focus(foc)\n if focus_only:\n foc_tbl.meta['focus'] = foc\n return foc_tbl\n\n # Measure the remaining candidates\n\n tbl = do_stars_mp(xs, ys, skies, mod, data, ncpu)\n # Last rejection pass\n final_good_mask = (tbl['q']<qmax) & (tbl['cx']<cmax) & (tbl['cx']>cmin)\n output_tbl = tbl[final_good_mask]\n rej = np.sum(~final_good_mask)\n print('Rejected {} more sources after qmax and excess clip'.format(rej))\n\n return output_tbl",
"def test_find_peaks_withnoise(self):\n sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0]\n num_points = 500\n test_data, act_locs = _gen_gaussians_even(sigmas, num_points)\n widths = np.arange(0.1, max(sigmas))\n noise_amp = 0.07\n np.random.seed(18181911)\n test_data += (np.random.rand(num_points) - 0.5)*(2*noise_amp)\n found_locs = find_peaks_cwt(test_data, widths, min_length=15,\n gap_thresh=1, min_snr=noise_amp / 5)\n\n np.testing.assert_equal(len(found_locs), len(act_locs), 'Different number' +\n 'of peaks found than expected')\n diffs = np.abs(found_locs - act_locs)\n max_diffs = np.array(sigmas) / 5\n np.testing.assert_array_less(diffs, max_diffs, 'Maximum location differed' +\n 'by more than %s' % (max_diffs))",
"def can_place_star(self, row, col):\n\n # check neighbors (no start can neighbor another)\n for i, j in itertools.product(range(-1, 2), range(-1, 2)):\n r = row + i\n c = col + j\n\n if (i == 0 and j == 0) or not self._board.is_valid_cell(r, c):\n continue\n\n if self[r][c]:\n return False\n\n # check counts for areas, rows, cols\n\n # determine if we need to add one based on whether the currect cell has a star already\n add = 0 if self[row][col] else 1\n\n sol_sum = lambda it: sum(map(bool, it))\n\n return all(\n count + add <= self.stars\n for count in (\n sol_sum(self[row]), # stars in the row\n sol_sum(self[i][col] for i in range(self.size)), # stars in the column\n sol_sum(\n self[i][j] for i, j in self._board.area_for_cell(row, col)\n ), # stars in the area\n )\n )",
"def is_valid_grasp(point, focus_mask):\n ymid = int(point[0])\n xmid = int(point[1])\n d = cfg.CHECK_RANGE\n\n check_box = focus_mask.data[ymid - d:ymid + d, xmid - d:xmid + d]\n num_nonzero = np.sum(check_box > 0)\n\n fraction_nonzero = (num_nonzero * 1.0)/((2 * d)**2)\n return fraction_nonzero < 0.2",
"def id_slits(flat_data,findstars=True):\n\n\ty_axis = flat_data.shape[0]\n\n\tdata = flat_data.mean(axis=1)\n\td = data.copy()\n\n\t\"\"\"\n\tThe slits tend to be demarcated by when the sorted data begins to\n\t grow at an accelerating rate; the first derivative tends to be an\n\t acceptable proxy, though. The edges are masked for bad pixels/star\n\t boxes.\n\t\"\"\"\n\tsrt = scipy.sort(d)\n\tbrk = signal.convolve(srt,[-1.,1.],mode='same')\n\tpix = brk[brk.size/10:brk.size*9/10].argmin()+brk.size/10\n\n\tlowvals = srt[pix]\n\n\td[d<lowvals] = 0.\n\td[d>0.] = 1.\n\n\n\t\"\"\"\n\tThis needs to be tweaked to properly account for slits at the top and\n\t bottom of the mask.\n\t\"\"\"\n\tedges = signal.convolve(d,[-1.,1.],mode='same')\n\tleft = scipy.where(edges<0)[0]\n\tright = scipy.where(edges>0)[0]\n\n\tslits = []\n\tfor i in range(left.size):\n\t\tslits.append([left[i],right[i]-1])\n\n\tif findstars is False:\n\t\treturn slits\n\n\t\"\"\"\n\tThe star boxes are identified by locating where the slit amplitudes\n\t begin to spike. The current criterion is that a slit amplitude is\n\t more than one sigma greater than the previous slit.\n\t\"\"\"\n\tamps = []\n\tfor l,r in slits:\n\t\tamps.append(scipy.median(data[l:r]))\n\tamps = scipy.asarray(amps)\n\targs = amps.argsort()\n\tamps.sort()\n\n\tindx = amps.size-1\n\tfor i in range(amps.size/2,amps.size):\n\t\tstd = amps[:i].std()\n\t\tif amps[i]>amps[i-1]+std:\n\t\t\tindx = i\n\t\t\tbreak\n\tstarindx = args[indx:]\n\tstarindx.sort()\n\tstars = []\n\tfor i in starindx:\n\t\tstars.append(slits[i])\n\tfor i in starindx[::-1]:\n\t\tdel slits[i]\n\n\treturn slits,stars",
"def _construct_star_mask(self):\n # Produce a separate star mask for EACH image in the stack\n starMasks = self._produce_individual_star_masks()\n\n # Accumulate these pixels into the final star mask\n starMask = starMasks.sum(axis=0)\n\n # Cleanup temporary variables\n del starMasks\n\n # Compute final star mask based on which pixels were masked more than\n # 10% of the time.\n numImg = self.numberOfImages\n starMask = (starMask > np.ceil(0.1*numImg)).astype(float)\n\n # Check that at least one star was detected (more than 15 pixels masked)\n if np.sum(starMask) > 15:\n # Now smooth the star mask with a gaussian to dialate it\n starMask1 = ndimage.gaussian_filter(starMask, (4, 4))\n\n # Grab any pixels (and indices) above 0.05 value post-smoothing\n starMask = (starMask1 > 0.05)\n numInStarPix = np.sum(starMask)\n\n # Notify user how many \"in-star pixels\" were masked\n print('\\n\\nMasked a total of {0} pixels'.format(numInStarPix))\n else:\n print('\\n\\nNo pixels masked as \"in-star\" pixels')\n starMask = False\n\n return starMask",
"def bad_pix_flat_detect(flat_input,n_sigma=7,min_cutoff=0.5,max_cutoff=1.5):\n \n if isinstance(flat_input,str):\n flat = fits.getdata(flat_input)\n elif isinstance(flat_input,np.ndarray):\n flat = flat_input\n \n med = np.nanmedian(flat)\n mad = np.nanmedian(np.abs(flat-med))\n \n hot_pix = (flat - med) > (n_sigma*mad*1.4826) # number converts MAD to St.Dev.\n cold_pix = (flat - med) < (-n_sigma*mad*1.4826) # number converts MAD to St.Dev.\n \n # Apply the cutoffs in response\n hot_pix += (flat > max_cutoff)\n cold_pix += (flat < min_cutoff)\n \n bad_pix = hot_pix + cold_pix\n \n print('Found: '+str(bad_pix.sum())+' bad pixels from flat')\n \n return bad_pix",
"def check_win():\r\n for mark in markers:\r\n if loc[0] == mark and loc[1] == mark and loc[2] == mark:\r\n return True\r\n if loc[0] == mark and loc[3] == mark and loc[6] == mark:\r\n return True\r\n if loc[0] == mark and loc[4] == mark and loc[8] == mark:\r\n return True\r\n if loc[1] == mark and loc[4] == mark and loc[7] == mark:\r\n return True\r\n if loc[2] == mark and loc[4] == mark and loc[6] == mark:\r\n return True\r\n if loc[2] == mark and loc[5] == mark and loc[8] == mark:\r\n return True\r\n if loc[3] == mark and loc[4] == mark and loc[5] == mark:\r\n return True\r\n if loc[6] == mark and loc[7] == mark and loc[8] == mark:\r\n return True\r\n else:\r\n return False",
"def test_find_peaks_exact(self):\n sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0]\n num_points = 500\n test_data, act_locs = _gen_gaussians_even(sigmas, num_points)\n widths = np.arange(0.1, max(sigmas))\n found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=0,\n min_length=None)\n np.testing.assert_array_equal(found_locs, act_locs,\n \"Found maximum locations did not equal those expected\")",
"def marker_validity(table, train, thresh=1.0):\n shift = len(table) - len(train)\n\n if shift >= 0:\n start_d = train[0] - table.time.values[0]\n end_d = train[-1] - table.time.values[len(table)-shift-1]\n if np.abs(start_d - end_d) <= thresh:\n return shift # valid\n else:\n raise ValueError(\"marker value not match!\")\n else:\n raise ValueError(\"electrode markers exceed stimulus markers!\")\n\n return False",
"def check_masked (self, pos : list,) :\n count = 0\n total = 0\n for x in range(pos[0],min(pos[0] + AUTO_width1, self.m_x)) :\n for y in range(pos[1], min(pos[1] + AUTO_width1, self.m_y)) :\n total += 1\n if self.current_grid[x][y] :\n count += 1\n if count/total > 0.5 :\n return True\n else :\n return False",
"def is_coelution(spectrum_in, ms2_precursor, da_after_precursor = 1.3, delta_mz = 0.03, percentage_intensity_not_coelution = 10, percentage_accetable_coelution = False):\n\n upper_mz = ms2_precursor + da_after_precursor\n\n precursor_mz_upper = ms2_precursor + delta_mz\n precursor_mz_lower = ms2_precursor - delta_mz\n\n # Ion +1 to ignore in the spectrum\n ignore_peak_mz = ms2_precursor + 1\n ignore_upper_mz = ignore_peak_mz + delta_mz\n ignore_lower_mz = ignore_peak_mz - delta_mz\n\n peaks = spectrum_in.get_peaks()\n reverse_peaks = reversed(peaks)\n\n position = 0\n for peak in reverse_peaks:\n mz = peak.get_mz()\n\n if mz <= precursor_mz_upper and mz >= precursor_mz_lower:\n precursor_mz = mz\n precursor_intensity = peak.get_intensity()\n precursor_peak = peak\n # print(\"Found precursor in MS1: Mz:\", precursor_mz, \"Intensity:\", precursor_intensity)\n break\n position += 1\n\n # print(spectrum_in.get_size())\n position = spectrum_in.get_size() - position\n\n # Intensity of peak to consider as coelution calculation\n # Below this threshold, nothing is considered coelution\n not_coelution_threshold = precursor_intensity * percentage_intensity_not_coelution / 100\n # Below this threshold, coelution is considered acceptable\n if percentage_accetable_coelution != False:\n acceptable_coelution_threshold = precursor_intensity * percentage_accetable_coelution / 100\n\n acceptable_coelution = list()\n proper_coelution = list()\n coelution = [proper_coelution, acceptable_coelution, precursor_peak]\n\n for peak in peaks[position:]:\n mz = peak.get_mz()\n\n if mz < upper_mz:\n \n # We search for peaks different to the ion +1\n if mz > ignore_upper_mz or mz < ignore_lower_mz:\n intensity = peak.get_intensity()\n \n if intensity > not_coelution_threshold:\n \n if percentage_accetable_coelution == False:\n coelution[0].append(peak)\n\n else:\n \n if intensity > acceptable_coelution_threshold:\n coelution[0].append(peak)\n else:\n coelution[1].append(peak) \n\n else:\n break\n\n \"\"\"\n print(\"Coelution_list\")\n print(\"Proper_coelution:\", end=\"\")\n for peak in coelution[0]:\n print(\"MZ:\", peak.get_mz(), \"Intensity\", peak.get_intensity(), end=\",\")\n print(\"\\nAcceptable_coelution:\", end=\"\")\n for peak in coelution[1]:\n print(\"MZ:\", peak.get_mz(), \"Intensity\", peak.get_intensity(), end=\",\")\n print(\"\")\n \"\"\"\n\n return(coelution)",
"def is_solved(self):\n\n marker = self._marker\n amount_of_pegs = 0\n for row in marker:\n for i in row:\n if i == \"*\":\n amount_of_pegs += 1\n return amount_of_pegs == 1",
"def hit_wall(s):\n if s == [1, 1]: # We would enter the None-field\n return True\n elif s[0] < 0 or s[0] > 2 or s[1] < 0 or s[1] > 3: # We would be out of bounds\n return True\n else:\n return False",
"def is_solved(self):\n peg_count = 0\n for row in self._marker:\n for item in row:\n if item == '*':\n peg_count += 1\n return peg_count == 1",
"def is_valid(data):\n check = [0 for i in range(4)]\n # calculate how many ships are with different lengths\n for i in range(10):\n for j in range(10):\n if type(data[i][j]) == Ship:\n check[data[i][j]._Ship__length - 1] += 1\n # check ships\n for i in range(4):\n if check[i] != (i + 1) * (4 - i):\n return False\n # check corners\n for i in range(1, 10):\n for j in range(10):\n try:\n if type(data[i - 1][j + 1]) == Ship and \\\n type(data[i][j]) == Ship:\n return False\n except:\n pass\n try:\n if type(data[i - 1][j - 1]) == Ship and \\\n type(data[i][j]) == Ship:\n return False\n except:\n pass\n return True",
"def has_neighbor(peak, peak_list, min_dist):\n for testpeak in peak_list:\n if (distance.euclidean(peak, testpeak) < min_dist):\n return True\n return False",
"def is_in_box(self, mz, rt):\n hits = self.check_point(mz, rt)\n if len(hits) > 0:\n return True\n else:\n return False",
"def _has_noise(self) -> bool:\n min = self.array.min()\n max = self.array.max()\n near_min, near_max = np.percentile(self.array, [0.5, 99.5])\n max_is_extreme = max > near_max * 1.25\n min_is_extreme = (min < near_min * 0.75) and (\n abs(min - near_min) > 0.1 * (near_max - near_min)\n )\n return max_is_extreme or min_is_extreme",
"def test_constructed_is_small(self):\n self.assertTrue(all(elt<10 for elt in goodwinsheaf.checkradii()))#check all entries have small radii",
"def _is_skull_stripped(imgs):\n\n def _check_img(img):\n data = np.abs(nb.load(img).get_fdata(dtype=np.float32))\n sidevals = (\n data[0, :, :].sum()\n + data[-1, :, :].sum()\n + data[:, 0, :].sum()\n + data[:, -1, :].sum()\n + data[:, :, 0].sum()\n + data[:, :, -1].sum()\n )\n return sidevals < 10\n\n return all(_check_img(img) for img in imgs)",
"def isMWSSTAR_colors(gflux=None, rflux=None, zflux=None,\n w1flux=None, w2flux=None, primary=None, south=True):\n # ----- Old stars, g-r > 0\n if primary is None:\n primary = np.ones_like(gflux, dtype='?')\n mwsstar = primary.copy()\n\n # - colors g-r > 0\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n grcolor = 2.5 * np.log10(rflux / gflux)\n # Assume no difference in north vs south color-cuts.\n if south:\n mwsstar &= (grcolor > 0.0)\n else:\n mwsstar &= (grcolor > 0.0)\n\n return mwsstar",
"def isPeakAssigned(peak, fully=True):\n\n n = 0\n for peakDim in peak.peakDims:\n if len(peakDim.peakDimContribs) > 0:\n n +=1\n \n if n == len(peak.peakDims):\n return True\n \n elif n > 0:\n if fully:\n return False\n else:\n return True\n \n else:\n return False",
"def test_find_peaks_nopeak(self):\n noise_amp = 1.0\n num_points = 100\n np.random.seed(181819141)\n test_data = (np.random.rand(num_points) - 0.5)*(2*noise_amp)\n widths = np.arange(10, 50)\n found_locs = find_peaks_cwt(test_data, widths, min_snr=5, noise_perc=30)\n np.testing.assert_equal(len(found_locs), 0)",
"def is_solved(self):\n i = 0\n for row in self._marker:\n for x in row:\n if x == \"*\":\n i += 1\n if i > 1:\n return False\n return True",
"def check(self, grain=50):\r\n opengles.glDisable(GL_SCISSOR_TEST)\r\n self.s_flg = False\r\n opengles.glReadPixels(0, self.y0, self.ix, 1,\r\n GL_RGB, GL_UNSIGNED_BYTE,\r\n ctypes.byref(self.img))\r\n r0 = self.img[0:3]\r\n for i in xrange(0, self.img_sz, self.step):\r\n if self.img[i:(i+3)] != r0:\r\n return True\r\n\r\n return False",
"def is_peak_hours(time):\n if not 1 <= time.isoweekday() <= 5:\n return False\n if time.hour in [6, 7, 8, 18, 19, 20]:\n return True\n\n return False",
"def peak_in(self, mz, rt):\n if self.rt_match(rt) and self.mz_match(mz):\n return True\n else:\n return False",
"def in_pixel_range(self, pixmin: int, pixmax: int) -> bool:\n \n if any(i < pixmin or i > pixmax or np.isnan(i) for i in self.datapos):\n return False\n\n return True",
"def _is_blank(im):\n \n # Take the r% center\n r = 0.2\n h1 = int(float(im.shape[0]) * r)\n h2 = im.shape[0] - h1\n w1 = int(float(im.shape[1]) * r) \n w2 = im.shape[1] - w1\n #\n im_center = im[h1:h2, w1:w2]\n \n if np.mean(im_center) < 0.06:\n return True\n else:\n return False"
] |
[
"0.58288115",
"0.5654559",
"0.561577",
"0.559006",
"0.5559171",
"0.5494257",
"0.5473655",
"0.5460491",
"0.54178053",
"0.5310197",
"0.52972597",
"0.5219187",
"0.5214601",
"0.5144117",
"0.5144112",
"0.5132752",
"0.5132509",
"0.50744927",
"0.5068108",
"0.505384",
"0.50513923",
"0.5043543",
"0.5042934",
"0.5030421",
"0.50052553",
"0.4978443",
"0.49775252",
"0.49597767",
"0.49575067",
"0.4954294"
] |
0.83904934
|
0
|
findlines(z) Quickly find the peaks of arclines. Returns a list containing the peak locations.
|
def findlines(z,bgsub=True,SATURATED=57000.):
z = z.copy()
s = z.copy()
""" First identify peaks. """
max = ndimage.maximum_filter(z,9)
p = scipy.where((max==z)&(z<SATURATED)&(max>0))[0]
s = z[p]
""" Reject low peaks. """
bg = ndimage.percentile_filter(s,10,21)
peaks = scipy.where(s>bg*5.)[0]
return p[peaks]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def find_lines(name, num):\n fn = '%s/disp/%s.1d.fits' % (name, num)\n hdulist = pyfits.open(fn)\n data = hdulist[0].data\n header = hdulist[0].header\n locations = []\n for line in LINES:\n line_loc = get_wavelength_location(header, line)\n locations.append(find_line_peak(data, line_loc, 5))\n return locations",
"def findpeakl(data, zp, noise, bottom, top, r):\n length = len(data)\n width = (top - bottom)/(length - 1)\n absdata = []\n peak = []\n for i in range(length):\n absdata.append(abs(data[i] - zp[i]))\n i = 0\n fsnum = 0#fitting section number\n fsec = []#fitting section\n fdata = []#signal of fitting section\n fnum = []#fitting number\n fm = []#index of max and min points\n while(i<length):\n if absdata[i]>noise*r:\n fsnum = fsnum + 1\n fsec.append([])\n fdata.append([])\n tempmax = absdata[i]\n tempmin = absdata[i]\n inma = i\n inmi = i\n fnum.append(0)\n fm.append([])\n direction = 1#1:rising,0:descending\n while(absdata[i]>noise*r):\n if direction==1:\n if absdata[i]>tempmax:\n tempmax = absdata[i]\n inma = i\n elif absdata[i]<tempmax - noise*r:\n direction = 0\n fm[fsnum - 1].append([inma,inmi])\n tempmin = absdata[i]\n inmi = i\n fnum[fsnum - 1] = fnum[fsnum - 1] + 1\n elif direction==0:\n if absdata[i]<tempmin:\n tempmin = absdata[i]\n inmi = i\n elif absdata[i]>tempmin + noise*r:\n direction = 1\n tempmax = absdata[i]\n inma = i\n fsec[fsnum - 1].append(bottom + width*i)\n fdata[fsnum - 1].append(absdata[i])\n i = i + 1\n if i>=length:\n break\n if fm[fsnum - 1]==[]:\n del fsec[fsnum - 1]\n del fdata[fsnum - 1]\n del fnum[fsnum - 1]\n del fm[fsnum - 1]\n fsnum = fsnum - 1\n i = i + 1\n for i in range(fsnum):\n pars = Parameters()\n j = 0\n mod = LorentzianModel(prefix = 'l1_')\n pars.update(LorentzianModel(prefix = 'l%i_'%(j + 1)).make_params())\n sigma0 = abs(width*(fm[i][j][0] - fm[i][j][1]))/math.sqrt(absdata[fm[i][j][0]]/absdata[fm[i][j][1]] - 1)\n pars['l%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][ - 1])\n pars['l%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['l%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]*sigma0/0.3183099,min = noise*r*sigma0/0.3183099,max = absdata[fm[i][j][0]]*20*sigma0/0.3183099)\n for j in range(1,fnum[i]):\n mod = mod + LorentzianModel(prefix = 'l%i_'%(j + 1))\n pars.update(LorentzianModel(prefix = 'l%i_'%(j + 1)).make_params())\n sigma0 = abs(width*(fm[i][j][0] - fm[i][j][1]))/math.sqrt(absdata[fm[i][j][0]]/absdata[fm[i][j][1]] - 1)\n pars['l%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][ - 1])\n pars['l%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['l%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]*sigma0/0.3183099,min = noise*r*sigma0/0.3183099,max = absdata[fm[i][j][0]]*20*sigma0/0.3183099)\n# =============================================================================\n# result = mod.fit(fdata[i],pars,x = fsec[i])\n# #print(result.fit_report())\n# plt.plot(fsec[i],fdata[i],'bo',label = 'original')\n# plt.plot(fsec[i],result.best_fit,'r-',label = 'fitting')\n# plt.title('Lorentzian fitting')\n# plt.show()\n# =============================================================================\n tempbo = int((fsec[i][0] - bottom)/width)\n tempto = int((fsec[i][ - 1] - bottom)/width)\n for k in range(fnum[i]):\n gama2 = (pars['l%i_sigma'%(k + 1)].value)**2\n amplitude = pars['l%i_height'%(k + 1)].value*gama2\n miu = pars['l%i_center'%(k + 1)].value\n sum1 = 0\n for p in range(tempbo,tempto + 1):\n v = abs(amplitude/((bottom + width*p - miu)*(bottom + width*p - miu) + gama2))\n sum1 = sum1 + (v - absdata[k])*(v - absdata[k])\n sum1 = sum1/(tempto - tempbo + 1)\n peak.append([gama2,miu,amplitude,sum1,tempbo,tempto])\n return peak",
"def find_lines(fluxes, smoothed, corrected, threshold=1., fraction_pts=0.2,\n wavs=None, plot=False):\n line_indices = []\n window_width = int(len(fluxes) * fraction_pts / 2) * 2 + 1\n half_width = window_width // 2\n # Reflect the endpoints of the spectrum.\n x = numpy.concatenate((fluxes[half_width:0:-1], fluxes,\n fluxes[-2:-half_width - 2:-1]))\n xs = numpy.concatenate((smoothed[half_width:0:-1], smoothed,\n smoothed[-2:-half_width - 2:-1]))\n # Residual decribes the noise.\n residual = numpy.abs(x - xs)\n # The noise is the average residual over a window of width\n # ``window_width``. noise has same length as fluxes\n noise = numpy.convolve(residual, numpy.ones(window_width) / window_width,\n 'valid')\n\n # If the plot flag is on, show the line complex and the lines that are\n # found.\n if plot:\n if wavs is None:\n wavs = range(len(fluxes))\n plt.plot(wavs, corrected, wavs, -noise)\n\n # A line complex is found if the flux value is smaller than ``threshold``\n # times the average noise.\n pos = 0\n max_pos = len(corrected)\n while pos < max_pos:\n min_pos = pos # Lowest index for line\n while (pos < max_pos and corrected[pos] < -threshold * noise[pos]):\n pos += 1\n # Check if a line complex has been found\n if pos > min_pos:\n if plot:\n plt.axvspan(wavs[min_pos], wavs[pos - 1], color='g', alpha=0.1)\n # Find the line center for each line in the complex.\n centers = min_pos + find_centers(corrected[min_pos:pos])\n\n # Calculate signal to noise ratio for the line.\n for center in centers:\n sn = residual[center + half_width] / noise[center]\n line_indices.append((center, sn))\n pos += 1\n\n if plot:\n for line, sn in line_indices:\n plt.axvline(x=wavs[line], color='r', alpha=0.3, linewidth=2)\n plt.show()\n\n return line_indices",
"def find_peaks(s, wrapped=False):\n return _extrema(s, lambda x: x != 0, wrapped)",
"def parse_peaks(self):\n peaks = []\n if self._mir_root.tag == 'method':\n for peak in self._mir_root[0].findall(\"peak\"):\n p = dict(peak.items())\n peaks.append(Peak(float(p['m_z']), float(p['tolerance'])))\n return sorted(peaks, key=lambda x: x[0])",
"def stichAnchors(chrom, loops, margin=1):\n cov = set()\n for i, loop in enumerate(loops):\n cov.update(range(loop.x_start, loop.x_end + 1))\n cov.update(range(loop.y_start, loop.y_end + 1))\n cov = list(cov)\n cov.sort()\n npeaks = []\n i = 0\n while i < len(cov) - 1:\n j = i + 1\n while j < len(cov):\n if cov[j] - cov[j - 1] > margin:\n break\n else:\n j += 1\n peak = Peak()\n peak.chrom = chrom\n peak.start = cov[i]\n peak.end = cov[j - 1]\n peak.length = cov[j - 1] - cov[i] + 1\n npeaks.append(peak)\n i = j #update search start\n return npeaks",
"def simple_peak_find(s, init_slope=500, start_slope=500, end_slope=200,\n min_peak_height=50, max_peak_width=1.5):\n point_gap = 10\n\n def slid_win(itr, size=2):\n \"\"\"Returns a sliding window of size 'size' along itr.\"\"\"\n itr, buf = iter(itr), []\n for _ in range(size):\n try:\n buf += [next(itr)]\n except StopIteration:\n return\n for new_item in itr:\n yield buf\n buf = buf[1:] + [new_item]\n yield buf\n\n # TODO: check these smoothing defaults\n y, t = s.values, s.index.astype(float)\n smooth_y = movingaverage(y, 9)\n dxdt = np.gradient(smooth_y) / np.gradient(t)\n # dxdt = -savitzkygolay(ts, 5, 3, deriv=1).y / np.gradient(t)\n\n init_slopes = np.arange(len(dxdt))[dxdt > init_slope]\n if len(init_slopes) == 0:\n return []\n # get the first points of any \"runs\" as a peak start\n # runs can have a gap of up to 10 points in them\n peak_sts = [init_slopes[0]]\n peak_sts += [j for i, j in slid_win(init_slopes, 2) if j - i > 10]\n peak_sts.sort()\n\n en_slopes = np.arange(len(dxdt))[dxdt < -end_slope]\n if len(en_slopes) == 0:\n return []\n # filter out any lone points farther than 10 away from their neighbors\n en_slopes = [en_slopes[0]]\n en_slopes += [i[1] for i in slid_win(en_slopes, 3)\n if i[1] - i[0] < point_gap or i[2] - i[1] < point_gap]\n en_slopes += [en_slopes[-1]]\n # get the last points of any \"runs\" as a peak end\n peak_ens = [j for i, j in slid_win(en_slopes[::-1], 2)\n if i - j > point_gap] + [en_slopes[-1]]\n peak_ens.sort()\n # avals = np.arange(len(t))[np.abs(t - 0.675) < 0.25]\n # print([i for i in en_slopes if i in avals])\n # print([(t[i], i) for i in peak_ens if i in avals])\n\n peak_list = []\n pk2 = 0\n for pk in peak_sts:\n # don't allow overlapping peaks\n if pk < pk2:\n continue\n\n # track backwards to find the true start\n while dxdt[pk] > start_slope and pk > 0:\n pk -= 1\n\n # now find where the peak ends\n dist_to_end = np.array(peak_ens) - pk\n pos_end = pk + dist_to_end[dist_to_end > 0]\n for pk2 in pos_end:\n if (y[pk2] - y[pk]) / (t[pk2] - t[pk]) > start_slope:\n # if the baseline beneath the peak is too large, let's\n # keep going to the next dip\n peak_list.append({'t0': t[pk], 't1': t[pk2]})\n pk = pk2\n elif t[pk2] - t[pk] > max_peak_width:\n # make sure that peak is short enough\n pk2 = pk + np.abs(t[pk:] - t[pk] - max_peak_width).argmin()\n break\n else:\n break\n else:\n # if no end point is found, the end point\n # is the end of the timeseries\n pk2 = len(t) - 1\n\n if pk == pk2:\n continue\n pk_hgt = max(y[pk:pk2]) - min(y[pk:pk2])\n if pk_hgt < min_peak_height:\n continue\n peak_list.append({'t0': t[pk], 't1': t[pk2]})\n return peak_list",
"def peakdetect_parabole(y_axis, x_axis, points = 9):\n # check input data\n x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis)\n # make the points argument odd\n points += 1 - points % 2\n #points += 1 - int(points) & 1 slower when int conversion needed\n \n # get raw peaks\n max_raw, min_raw = peakdetect_zero_crossing(y_axis)\n \n # define output variable\n max_peaks = []\n min_peaks = []\n \n max_ = _peakdetect_parabole_fitter(max_raw, x_axis, y_axis, points)\n min_ = _peakdetect_parabole_fitter(min_raw, x_axis, y_axis, points)\n \n max_peaks = map(lambda x: [x[0], x[1]], max_)\n max_fitted = map(lambda x: x[-1], max_)\n min_peaks = map(lambda x: [x[0], x[1]], min_)\n min_fitted = map(lambda x: x[-1], min_)\n \n \n #pylab.plot(x_axis, y_axis)\n #pylab.hold(True)\n #for max_p, max_f in zip(max_peaks, max_fitted):\n # pylab.plot(max_p[0], max_p[1], 'x')\n # pylab.plot(max_f[0], max_f[1], 'o', markersize = 2)\n #for min_p, min_f in zip(min_peaks, min_fitted):\n # pylab.plot(min_p[0], min_p[1], 'x')\n # pylab.plot(min_f[0], min_f[1], 'o', markersize = 2)\n #pylab.show()\n \n return [max_peaks, min_peaks]",
"def findRMpeaks(self, pix, threshold):\n\t\tsigma = np.std(self.getz(pix))\n\t\tdetections = []\n\t\tfor i, phi in enumerate(self.getz(pix)):\n \t\t \tif phi > threshold*sigma: detections.append(i)\n \t \treturn detections",
"def find_peaks(x, y, widthrange, rel_threshold=0.1):\n dx = abs(x[1] - x[0])\n minwidth, maxwidth = widthrange\n widths = np.arange(floor(minwidth/dx), ceil(maxwidth/dx))\n peakpos = find_peaks_cwt(y, widths)\n maxy = max(y)\n return [pos for pos in peakpos if y[pos] >= rel_threshold*maxy]",
"def findPeaks(self, fit_peaks_image):\n self.pf_iterations += 1\n \n # Use pre-specified peak locations if available, e.g. bead calibration.\n if self.peak_locations is not None:\n return [self.peak_locations, self.peak_locations_type, True]\n \n # Otherwise, identify local maxima in the image.\n new_peaks = self.peakFinder(fit_peaks_image)\n\n # Update new peak identification threshold (if necessary).\n # Also, while threshold is greater than min_threshold we\n # are automatically not done.\n if (self.cur_threshold > self.threshold):\n self.cur_threshold -= 1.0\n return [new_peaks, \"finder\", False]\n\n # If we did not find any new peaks then we may be done.\n if (new_peaks[\"x\"].size == 0):\n return [new_peaks, \"finder\", True]\n else:\n return [new_peaks, \"finder\", False]",
"def points_to_line_segments(peaks, prediction_model, window=10, sigma_cutoff=2):\n peaks.reset_assignment()\n\n def score_matrix(line_list, time, coord):\n return build_score_matrix(\n line_list, time, coord, prediction_model, sigma_cutoff=sigma_cutoff\n ).flatten()\n\n lines = []\n for frame in peaks.frames:\n # Give precedence to lines with higher peak amplitudes\n for starting_point in np.argsort(-frame.peak_amplitudes * frame.unassigned):\n if frame.unassigned[starting_point]:\n line = KymoLineData(\n np.array([frame.time_points[starting_point]]),\n np.array([frame.coordinates[starting_point]]),\n )\n frame.unassigned[starting_point] = False\n\n extend_line(line, peaks, window, score_matrix)\n lines.append(line)\n\n return lines",
"def get_peaks(self):\n peaks = np.array([i for i in range(self.npks)\n if self.polar_angle[i] < self.polar_max])\n x, y, z = (np.rint(self.xp[peaks]).astype(np.int16),\n np.rint(self.yp[peaks]).astype(np.int16),\n np.rint(self.zp[peaks]).astype(np.int16))\n polar, azi = self.polar_angle[peaks], self.azimuthal_angle[peaks]\n intensity = self.intensity[peaks]\n if self.Umat is not None:\n H, K, L = self.get_hkls()\n H = np.array(H)[peaks]\n K = np.array(K)[peaks]\n L = np.array(L)[peaks]\n diffs = np.array([self.diff(i) for i in peaks])\n else:\n H = K = L = diffs = np.zeros(peaks.shape, dtype=float)\n return list(zip(peaks, x, y, z, polar, azi, intensity, H, K, L, diffs))",
"def find_lines(self):\n return []",
"def whichPeaks(trace):\n peaks = []\n df = np.diff(trace)\n for t in range(len(df)-4):\n if df[t] > 0 and df[t+1] > 0:\n if df[t+2] < 0 and df[t+3] < 0: # Potential peak\n if trace[t+2] > np.mean(trace):\n peaks.append([t+2, trace[t+2]])\n return peaks",
"def findpeakg(data, zp, noise, bottom, top, r):\n length = len(data)\n width = (top - bottom)/(length - 1)\n absdata = []\n peak = []\n for i in range(length):\n absdata.append(abs(data[i] - zp[i]))\n i = 0\n fsnum = 0#fitting section number\n fsec = []#fitting section\n fdata = []#signal of fitting section\n fnum = []#fitting number\n fm = []#index of max and min points\n while(i<length):\n if absdata[i]>noise*r:\n fsnum = fsnum + 1\n fsec.append([])\n fdata.append([])\n tempmax = absdata[i]\n tempmin = absdata[i]\n inma = i\n inmi = i\n fnum.append(0)\n fm.append([])\n direction = 1#1:rising,0:descending\n while(absdata[i]>noise*r):\n if direction==1:\n if absdata[i]>tempmax:\n tempmax = absdata[i]\n inma = i\n elif absdata[i]<tempmax - noise*r:\n direction = 0\n fm[fsnum - 1].append([inma,inmi])\n tempmin = absdata[i]\n inmi = i\n fnum[fsnum - 1] = fnum[fsnum - 1] + 1\n elif direction==0:\n if absdata[i]<tempmin:\n tempmin = absdata[i]\n inmi = i\n elif absdata[i]>tempmin + noise*r:\n direction = 1\n tempmax = absdata[i]\n inma = i\n fsec[fsnum - 1].append(bottom + width*i)\n fdata[fsnum - 1].append(absdata[i])\n i = i + 1\n if i>=length:\n break\n if fm[fsnum - 1]==[]:\n del fsec[fsnum - 1]\n del fdata[fsnum - 1]\n del fnum[fsnum - 1]\n del fm[fsnum - 1]\n fsnum = fsnum - 1\n i = i + 1\n for i in range(fsnum):\n pars = Parameters()\n j = 0\n mod = GaussianModel(prefix = 'g1_')\n pars.update(GaussianModel(prefix = 'g%i_'%(j + 1)).make_params())\n sigma0 = math.sqrt((width*(fm[i][j][0] - fm[i][j][1]))**2/(2*math.log(absdata[fm[i][j][0]]/absdata[fm[i][j][1]])))\n pars['g%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][ - 1])\n pars['g%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['g%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]/0.3989423*sigma0,min = noise*r/0.3989423*sigma0,max = absdata[fm[i][j][0]]*20/0.3989423*sigma0)\n for j in range(1,fnum[i]):\n mod = mod + GaussianModel(prefix = 'g%i_'%(j + 1))\n pars.update(GaussianModel(prefix = 'g%i_'%(j + 1)).make_params())\n sigma0 = math.sqrt((width*(fm[i][j][0] - fm[i][j][1]))**2/(2*math.log(absdata[fm[i][j][0]]/absdata[fm[i][j][1]])))\n pars['g%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][-1])\n pars['g%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['g%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]/0.3989423*sigma0,min = noise*r/0.3989423*sigma0,max = absdata[fm[i][j][0]]*20/0.3989423*sigma0)\n# =============================================================================\n# result = mod.fit(fdata[i],pars,x = fsec[i])\n# #print(result.fit_report())\n# plt.plot(fsec[i],fdata[i],'bo',label = 'original')\n# plt.plot(fsec[i],result.best_fit,'r-',label = 'fitting')\n# plt.title('Gaussian fitting')\n# plt.show()\n# =============================================================================\n tempbo = int((fsec[i][0] - bottom)/width)\n tempto = int((fsec[i][-1] - bottom)/width)\n for k in range(fnum[i]):\n amplitude = pars['g%i_height'%(k + 1)].value\n sigma = pars['g%i_sigma'%(k + 1)].value\n miu = pars['g%i_center'%(k + 1)].value\n sum1 = 0\n for p in range(tempbo,tempto + 1):\n v = abs(amplitude*math.exp( - (bottom + width*p - miu)*(bottom + width*p - miu)/(2*sigma*sigma)))\n sum1 = sum1 + (v - absdata[k])*(v - absdata[k])\n sum1 = sum1/(tempto - tempbo + 1)\n peak.append([sigma,miu,amplitude,sum1,tempbo,tempto])\n return peak",
"def find_peaks(self, t_measure):\n self._check_time(t_measure)\n #widths = np.arange(2,7) # range of widths to check by find_peaks_cwt\n #peak_nodes = find_peaks_cwt(self.get_velocities(t_measure), widths, min_snr=2.0,noise_perc=30.0)\n peak_beads = peakutils.peak.indexes(self.get_velocities(t_measure), thres=0.75, min_dist=7)\n return peak_beads",
"def lineshape_from_peaklist(peaklist, w=0.5, points=800, limits=None):\n peaklist.sort()\n if limits:\n try:\n l_limit, r_limit = limits\n l_limit = float(l_limit)\n r_limit = float(r_limit)\n except Exception as e:\n print(e)\n print('limits must be a tuple of two numbers')\n raise\n if l_limit > r_limit:\n l_limit, r_limit = r_limit, l_limit\n else:\n l_limit = peaklist[0][0] - 50\n r_limit = peaklist[-1][0] + 50\n x = np.linspace(l_limit, r_limit, points)\n y = add_lorentzians(x, peaklist, w)\n return x, y",
"def find_peaks(x, height=None, threshold=None,\n distance=None, prominence=None, width=None,\n wlen=None, rel_height=0.5):\n peaks, _ = signal.find_peaks(\n x, height, threshold, distance, prominence, width, wlen, rel_height\n )\n return peaks",
"def findpeaks(series, DELTA):\n # Set inital values\n mn, mx = np.Inf, -np.Inf\n minpeaks = []\n maxpeaks = []\n lookformax = True\n start = True\n # Iterate over items in series\n for time_pos, value in series.iteritems():\n if value > mx:\n mx = value\n mxpos = time_pos\n if value < mn:\n mn = value\n mnpos = time_pos\n if lookformax:\n if value < mx-DELTA:\n # a local maxima\n maxpeaks.append((mxpos, mx))\n mn = value\n mnpos = time_pos\n lookformax = False\n elif start:\n # a local minima at beginning\n minpeaks.append((mnpos, mn))\n mx = value\n mxpos = time_pos\n start = False\n else:\n if value > mn+DELTA:\n # a local minima\n minpeaks.append((mnpos, mn))\n mx = value\n mxpos = time_pos\n lookformax = True\n # check for extrema at end\n if value > mn+DELTA:\n maxpeaks.append((mxpos, mx))\n elif value < mx-DELTA:\n minpeaks.append((mnpos, mn))\n return minpeaks, maxpeaks",
"def collect_lines(xy, BL, bs, climv):\n lines = [zip(xy[BL[i, :], 0], xy[BL[i, :], 1]) for i in range(len(BL))]\n line_segments = LineCollection(lines, # Make a sequence of x,y pairs\n linewidths=1., # could iterate over list\n linestyles='solid',\n cmap='coolwarm',\n norm=plt.Normalize(vmin=-climv, vmax=climv))\n line_segments.set_array(bs)\n print(lines)\n return line_segments",
"def detect_lines_from_geometry(\n masked_derivative,\n positions,\n normals,\n start_threshold,\n continuation_threshold,\n max_lines,\n angle_weight,\n force_dir,\n):\n\n def to_absolute_threshold(filtered_image, threshold):\n f = -filtered_image[filtered_image < 0]\n mn = np.min(f)\n mx = np.max(f)\n\n return -((mx - mn) * threshold + mn)\n\n thresh = to_absolute_threshold(masked_derivative, start_threshold)\n proceed = (\n thresh\n if not continuation_threshold\n else to_absolute_threshold(masked_derivative, continuation_threshold)\n )\n\n # Generate lookup table which convert normal angle into table of points to be trialed\n candidates = get_candidate_generator()\n\n lines = []\n for flat_idx in np.argsort(masked_derivative.flatten()):\n idx = np.unravel_index(flat_idx, masked_derivative.shape)\n\n if masked_derivative[idx[0], idx[1]] == KymoCode.seen:\n continue\n\n if masked_derivative[idx[0], idx[1]] >= thresh or len(lines) >= max_lines:\n break\n\n # Traverse the line. Note that traverse_line modifies the masked_derivative image by marking some as seen.\n line = traverse_line(\n idx, masked_derivative, positions, normals, proceed, candidates, angle_weight, force_dir\n )\n\n if line:\n lines.append(line)\n\n return lines",
"def findExtremeLines(lines):\r\n\r\n leftV = [[1000, 1000]]\r\n rightV = [[-1000, -1000]]\r\n topH = [[1000, 1000]]\r\n bottomH = [[-1000, -1000]]\r\n leftX = 100000\r\n rightX = 0\r\n\r\n for line in lines:\r\n\r\n rho = line[0][0]\r\n theta = line[0][1]\r\n\r\n xIntercept = rho / np.cos(theta)\r\n\r\n # Line is horizontal\r\n if theta > np.pi * 45 / 180 and theta < np.pi * 135 / 180:\r\n if rho < topH[0][0]:\r\n topH = line\r\n if rho > bottomH[0][0]:\r\n bottomH = line\r\n\r\n # Line is vertical\r\n else:\r\n if xIntercept > rightX:\r\n rightV = line\r\n rightX = xIntercept\r\n elif xIntercept <= leftX:\r\n leftV = line\r\n leftX = xIntercept\r\n\r\n return [[leftV, rightV], [topH, bottomH]]",
"def points_on_lines(hyperplanes):\n intersections = []\n for row in hyperplanes:\n intersections.append(an_intersection(row[:-1], -row[-1]))\n return np.array(intersections)",
"def _peakdetect_parabole_fitter(raw_peaks, x_axis, y_axis, points):\n func = lambda x, k, tau, m: k * ((x - tau) ** 2) + m\n fitted_peaks = []\n for peak in raw_peaks:\n index = peak[0]\n x_data = x_axis[index - points // 2: index + points // 2 + 1]\n y_data = y_axis[index - points // 2: index + points // 2 + 1]\n # get a first approximation of tau (peak position in time)\n tau = x_axis[index]\n # get a first approximation of peak amplitude\n m = peak[1]\n \n # build list of approximations\n # k = -m as first approximation?\n p0 = (-m, tau, m)\n popt, pcov = curve_fit(func, x_data, y_data, p0)\n # retrieve tau and m i.e x and y value of peak\n x, y = popt[1:3]\n \n # create a high resolution data set for the fitted waveform\n x2 = np.linspace(x_data[0], x_data[-1], points * 10)\n y2 = func(x2, *popt)\n \n fitted_peaks.append([x, y, [x2, y2]])\n \n return fitted_peaks",
"def peak_indices(self, **kwargs):\n kwarg_defaults = {\n 'width': 5, # ensure small spikes are ignored\n }\n kwarg_defaults.update(kwargs)\n return signal.find_peaks(self.ys, **kwarg_defaults)",
"def find_peaks_(image):\n\n height, width = image.shape[:2]\n img_matrix = [sum(i)/len(i) for i in image]\n x=[i for i in range(height)]\n y = [255-i for i in img_matrix]\n y = gaussian_filter(y, sigma=20)\n maxs, _ = find_peaks(y)\n maxs = maxs.tolist()\n\n return maxs",
"def marker_lines(self) -> list[Line]:\n upper_point = (\n self.leaf_center_px - self.leaf_width_px / 2 * self._analysis_ratio\n )\n lower_point = (\n self.leaf_center_px + self.leaf_width_px / 2 * self._analysis_ratio\n )\n\n lines = []\n for mlc_position in self.position:\n if self._orientation == Orientation.UP_DOWN:\n line = Line((mlc_position, upper_point), (mlc_position, lower_point))\n else:\n line = Line((upper_point, mlc_position), (lower_point, mlc_position))\n lines.append(line)\n return lines",
"def search_peaks(wavelength, flux, smooth_points=20, lmin=0, lmax=0, fmin=0.5, fmax=3., \n emission_line_file=\"lineas_c89_python.dat\", brightest_line=\"Ha\", cut=1.2, \n check_redshift = 0.0003, only_id_lines=True, plot=True, verbose=True, fig_size=12): \n # Setup wavelength limits\n if lmin == 0 :\n lmin = np.nanmin(wavelength)\n if lmax == 0 :\n lmax = np.nanmax(wavelength)\n \n # Fit a smooth continuum\n #smooth_points = 20 # Points in the interval\n step = np.int(len(wavelength)/smooth_points) # step\n w_cont_smooth = np.zeros(smooth_points) \n f_cont_smooth = np.zeros(smooth_points) \n\n for j in range(smooth_points):\n w_cont_smooth[j] = np.nanmedian([wavelength[i] for i in range(len(wavelength)) if (i > step*j and i<step*(j+1))])\n f_cont_smooth[j] = np.nanmedian([flux[i] for i in range(len(wavelength)) if (i > step*j and i<step*(j+1))]) # / np.nanmedian(spectrum)\n #print j,w_cont_smooth[j], f_cont_smooth[j]\n\n interpolated_continuum_smooth = interpolate.splrep(w_cont_smooth, f_cont_smooth, s=0)\n interpolated_continuum = interpolate.splev(wavelength, interpolated_continuum_smooth, der=0)\n\n\n funcion = flux/interpolated_continuum\n \n # Searching for peaks using cut = 1.2 by default\n peaks = []\n index_low = 0\n for i in range(len(wavelength)):\n if funcion[i] > cut and funcion[i-1] < cut :\n index_low = i\n if funcion[i] < cut and funcion[i-1] > cut :\n index_high = i\n if index_high != 0 :\n pfun = np.nanmax([funcion[j] for j in range(len(wavelength)) if (j > index_low and j<index_high+1 )])\n peak = wavelength[funcion.tolist().index(pfun)]\n if (index_high - index_low) > 1 :\n peaks.append(peak)\n \n # Identify lines\n # Read file with data of emission lines: \n # 6300.30 [OI] -0.263 15 5 5 15\n # el_center el_name el_fnl lowlow lowhigh highlow highigh \n # Only el_center and el_name are needed\n el_center,el_name,el_fnl,el_lowlow,el_lowhigh,el_highlow,el_highhigh = read_table(emission_line_file, [\"f\", \"s\", \"f\", \"f\", \"f\", \"f\", \"f\"] )\n #for i in range(len(el_name)):\n # print \" %8.2f %9s %6.3f %4.1f %4.1f %4.1f %4.1f\" % (el_center[i],el_name[i],el_fnl[i],el_lowlow[i], el_lowhigh[i], el_highlow[i], el_highhigh[i])\n #el_center,el_name = read_table(\"lineas_c89_python.dat\", [\"f\", \"s\"] )\n\n # In case this is needed in the future...\n# el_center = [6300.30, 6312.10, 6363.78, 6548.03, 6562.82, 6583.41, 6678.15, 6716.47, 6730.85, 7065.28, 7135.78, 7318.39, 7329.66]\n# el_fnl = [-0.263, -0.264, -0.271, -0.296, -0.298, -0.300, -0.313, -0.318, -0.320, -0.364, -0.374, -0.398, -0.400 ]\n# el_name = [\"[OI]\", \"[SIII]\", \"[OI]\", \"[NII]\", \"Ha\", \"[NII]\", \"HeI\", \"[SII]\", \"[SII]\", \"HeI\", \"[ArIII]\", \"[OII]\", \"[OII]\" ]\n\n # Search for the brightest line in given spectrum (\"Ha\" by default)\n peaks_flux = np.zeros(len(peaks))\n for i in range(len(peaks)):\n peaks_flux[i] = flux[wavelength.tolist().index(peaks[i])]\n Ha_w_obs = peaks[peaks_flux.tolist().index(np.nanmax(peaks_flux))] \n \n # Estimate redshift of the brightest line ( Halpha line by default)\n Ha_index_list = el_name.tolist().index(brightest_line)\n Ha_w_rest = el_center[Ha_index_list]\n Ha_redshift = (Ha_w_obs-Ha_w_rest)/Ha_w_rest\n if verbose: print(\"\\n> Detected %i emission lines using %8s at %8.2f A as brightest line!!\\n\" % (len(peaks),brightest_line, Ha_w_rest)) \n# if verbose: print \" Using %8s at %8.2f A as brightest line --> Found in %8.2f with a redshift %.6f \" % (brightest_line, Ha_w_rest, Ha_w_obs, Ha_redshift)\n \n # Identify lines using brightest line (Halpha by default) as reference. \n # If abs(wavelength) > 2.5 we don't consider it identified.\n peaks_name = [None] * len(peaks)\n peaks_rest = np.zeros(len(peaks))\n peaks_redshift = np.zeros(len(peaks))\n peaks_lowlow = np.zeros(len(peaks)) \n peaks_lowhigh = np.zeros(len(peaks))\n peaks_highlow = np.zeros(len(peaks))\n peaks_highhigh = np.zeros(len(peaks))\n\n for i in range(len(peaks)):\n minimo_w = np.abs(peaks[i]/(1+Ha_redshift)-el_center)\n if np.nanmin(minimo_w) < 2.5:\n indice = minimo_w.tolist().index(np.nanmin(minimo_w))\n peaks_name[i]=el_name[indice]\n peaks_rest[i]=el_center[indice]\n peaks_redshift[i] = (peaks[i]-el_center[indice])/el_center[indice]\n peaks_lowlow[i] = el_lowlow[indice]\n peaks_lowhigh[i] = el_lowhigh[indice]\n peaks_highlow[i] = el_highlow[indice]\n peaks_highhigh[i] = el_highhigh[indice]\n if verbose: print(\"%9s %8.2f found in %8.2f at z=%.6f |z-zref| = %.6f\" % (peaks_name[i], peaks_rest[i],peaks[i], peaks_redshift[i],np.abs(peaks_redshift[i]- Ha_redshift) ))\n #print peaks_lowlow[i],peaks_lowhigh[i],peaks_highlow[i],peaks_highhigh[i]\n # Check if all redshifts are similar, assuming check_redshift = 0.0003 by default\n # If OK, add id_peaks[i]=1, if not, id_peaks[i]=0 \n id_peaks=[]\n for i in range(len(peaks_redshift)):\n if np.abs(peaks_redshift[i]-Ha_redshift) > check_redshift:\n if verbose: print(\" WARNING!!! Line %8s in w = %.2f has redshift z=%.6f, different than zref=%.6f\" %(peaks_name[i],peaks[i],peaks_redshift[i], Ha_redshift))\n id_peaks.append(0)\n else:\n id_peaks.append(1)\n\n if plot:\n plt.figure(figsize=(fig_size, fig_size/2.5)) \n plt.plot(wavelength, funcion, \"r\", lw=1, alpha = 0.5)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n plt.ylabel(\"Flux / continuum\")\n \n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n plt.axhline(y=cut, color='k', linestyle=':', alpha=0.5) \n for i in range(len(peaks)):\n plt.axvline(x=peaks[i], color='k', linestyle=':', alpha=0.5)\n label=peaks_name[i]\n plt.text(peaks[i], 1.8, label) \n plt.show() \n \n continuum_limits = [peaks_lowlow, peaks_lowhigh, peaks_highlow, peaks_highhigh]\n \n if only_id_lines:\n peaks_r=[]\n peaks_name_r=[]\n peaks_rest_r=[]\n peaks_lowlow_r=[]\n peaks_lowhigh_r=[]\n peaks_highlow_r=[]\n peaks_highhigh_r=[]\n \n for i in range(len(peaks)): \n if id_peaks[i] == 1:\n peaks_r.append(peaks[i])\n peaks_name_r.append(peaks_name[i])\n peaks_rest_r.append(peaks_rest[i])\n peaks_lowlow_r.append(peaks_lowlow[i])\n peaks_lowhigh_r.append(peaks_lowhigh[i])\n peaks_highlow_r.append(peaks_highlow[i])\n peaks_highhigh_r.append(peaks_highhigh[i])\n continuum_limits_r=[peaks_lowlow_r,peaks_lowhigh_r,peaks_highlow_r,peaks_highhigh_r] \n\n return peaks_r, peaks_name_r , peaks_rest_r, continuum_limits_r \n else: \n return peaks, peaks_name , peaks_rest, continuum_limits",
"def peakdetect_sine_locked(y_axis, x_axis, points = 9):\n return peakdetect_sine(y_axis, x_axis, points, True)"
] |
[
"0.61656934",
"0.60634553",
"0.574901",
"0.5706759",
"0.5654833",
"0.5622347",
"0.5612087",
"0.5597109",
"0.55510885",
"0.55373585",
"0.5508165",
"0.5473418",
"0.5457143",
"0.54282737",
"0.5375191",
"0.53597057",
"0.53382623",
"0.53099495",
"0.52996916",
"0.5296617",
"0.52950287",
"0.527017",
"0.5261386",
"0.52248454",
"0.52059853",
"0.5203704",
"0.51978517",
"0.51904094",
"0.5161891",
"0.5159137"
] |
0.70562565
|
0
|
id_slits(arc,find_stars=True) Determine the top/bottom of each slit in the 2d ycorrected arc image. If find_stars is True, returns slit,starbox, otherwise returns slit.
|
def id_slits(arc,find_stars=True,chilimit=2.5,SATURATED=57000.,useLines=True):
arc = arc.copy()
""" Attempt to avoid saturated lines """
w = arc.shape[1]
tmp = arc.copy()
tmp[tmp>SATURATED] = 0.
tmpSorted = scipy.sort(tmp,axis=1)
flux = tmpSorted[:,w*0.97:w*0.98].mean(axis=1)
minflux = scipy.median(flux)/4.
del tmp
if find_stars:
starbox = []
slit = []
if useLines==False:
flux = scipy.sort(arc,1)[:,w*4/5]
minflux = scipy.median(flux[flux.size/3:flux.size*2/3])/2.
mask = scipy.where(flux>minflux,1.,0.)
inSlit = False
tmp = []
meds = []
for i in range(mask.size):
if inSlit:
if mask[i]==0:
inSlit = False
end = i-1
if end-start>8:
tmp.append([start+1,end-1])
slit = arc[start+3:end-3,100:-100].mean(0)
meds.append(slit.max())
elif mask[i]==1:
start = i
inSlit = True
if inSlit:
end = i
if end-start>8:
tmp.append([start+1,end-1])
slit = arc[start+3:end-3,100:-100].mean(0)
meds.append(slit.max())
meds = numpy.array(meds)
if find_stars:
slit = []
starbox = []
m,s = Clip(meds,nsig=3.,locut=0.,hicut=0.75)
for i in range(len(tmp)):
if meds[i]<m+s*5:
slit.append(tmp[i])
else:
starbox.append(tmp[i])
return slit,starbox
return tmp
m,s = clip(tmpSorted[arc.shape[0]/2,:w*0.05],2.)
inSlit = False
i = 0
while i<arc.shape[0]:
# lines = findlines(arc[i])
if useLines:
lines = findlines(arc[i])
else:
med = scipy.median(arc[i])
if med>m+5*s:
lines = [0]*10
else:
lines = [0]
if len(lines)<9 and inSlit==False:
i += 1
continue
elif len(lines)>9 and inSlit==False:
inSlit = True
start = i
i += 1
continue
bestchi = 1e29
if len(lines)>9:
#bestchi = 1e29
x = scipy.arange(arc[i].size)
smooth = ndimage.gaussian_filter(arc[i],1.)
model = interpolate.splrep(x,smooth,k=3,s=0)
comp = ndimage.gaussian_filter(arc[i-1],1.)
usedpix = arc[i-1][10:-10]>scipy.median(arc[i-1])
for o in range(30):
offset = float(o-15.)/5.
row = interpolate.splev(x[10:-10]+offset,model)
chi = (comp[10:-10]-row)**2/(abs(comp[10:-10]))
chi = chi[usedpix]
chi.sort()
chi = chi[:-chi.size/100] # Reject the five highest points
if chilimit>6. and i>600 and o>6 and 1==2:
import pylab
pylab.plot(row)
pylab.plot(comp[10:-10])
pylab.figure()
pylab.plot((row-comp[10:-10])**2/(abs(comp[10:-10])+16.))
pylab.show()
if chi.sum()/chi.size<bestchi:
bestchi = chi.sum()/chi.size
if inSlit is True and (bestchi>chilimit or len(lines)<9):
""" The row is at the top edge of the slit. """
inSlit = False
end = i
i += 1
if end-start<3:
continue
"""
Conservatively shrink the edges. A better approach
might be to use the flatfield data and set the edge
to where the flux is, say, 1 sigma below the nominal
level for the slit.
"""
# if start!=0:
# start += 2
# end -= 2
""" Check if the slit is a starbox (if requested) """
if find_stars:
mid = (start+end)/2
peaks = findlines(arc[mid],False)
is_star = check_star(peaks,arc[mid])
else: is_star = False
""" Skip small slits """
if not is_star and end-start<11:
continue
elif is_star and end-start<9:
continue
"""
Conservatively shrink the edges. A better approach
might be to use the flatfield data and set the edge
to where the flux is, say, 1 sigma below the nominal
level for the slit.
"""
if is_star:
starbox.append([start,end])
else:
while flux[start+1]-flux[start]>3.*flux[start]**0.5:
start += 1
while flux[end-1]-flux[end]>3.*flux[end]**0.5:
end -= 1
if flux[start:end].mean()<minflux:
continue
slit.append([start,end])
elif i+1==arc.shape[0] and end<start:
""" The top of the mask is also the top of a slit. """
end = i+1
if find_stars:
mid = (start+end)/2
peaks = findlines(arc[mid],False)
is_star = check_star(peaks,arc[mid])
else: is_star = False
if not is_star and end-start<11:
continue
elif is_star and end-start<9:
continue
if is_star:
starbox.append([start+2,end])
else:
while flux[start+1]-flux[start]>3.*flux[start]**0.5:
start += 1
if flux[start:end].mean()<minflux:
continue
slit.append([start,end])
break
else:
""" In the middle of the slit, nothing to do.... """
i += 1
if find_stars:
return slit,starbox
return slit
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def id_slits(flat_data,findstars=True):\n\n\ty_axis = flat_data.shape[0]\n\n\tdata = flat_data.mean(axis=1)\n\td = data.copy()\n\n\t\"\"\"\n\tThe slits tend to be demarcated by when the sorted data begins to\n\t grow at an accelerating rate; the first derivative tends to be an\n\t acceptable proxy, though. The edges are masked for bad pixels/star\n\t boxes.\n\t\"\"\"\n\tsrt = scipy.sort(d)\n\tbrk = signal.convolve(srt,[-1.,1.],mode='same')\n\tpix = brk[brk.size/10:brk.size*9/10].argmin()+brk.size/10\n\n\tlowvals = srt[pix]\n\n\td[d<lowvals] = 0.\n\td[d>0.] = 1.\n\n\n\t\"\"\"\n\tThis needs to be tweaked to properly account for slits at the top and\n\t bottom of the mask.\n\t\"\"\"\n\tedges = signal.convolve(d,[-1.,1.],mode='same')\n\tleft = scipy.where(edges<0)[0]\n\tright = scipy.where(edges>0)[0]\n\n\tslits = []\n\tfor i in range(left.size):\n\t\tslits.append([left[i],right[i]-1])\n\n\tif findstars is False:\n\t\treturn slits\n\n\t\"\"\"\n\tThe star boxes are identified by locating where the slit amplitudes\n\t begin to spike. The current criterion is that a slit amplitude is\n\t more than one sigma greater than the previous slit.\n\t\"\"\"\n\tamps = []\n\tfor l,r in slits:\n\t\tamps.append(scipy.median(data[l:r]))\n\tamps = scipy.asarray(amps)\n\targs = amps.argsort()\n\tamps.sort()\n\n\tindx = amps.size-1\n\tfor i in range(amps.size/2,amps.size):\n\t\tstd = amps[:i].std()\n\t\tif amps[i]>amps[i-1]+std:\n\t\t\tindx = i\n\t\t\tbreak\n\tstarindx = args[indx:]\n\tstarindx.sort()\n\tstars = []\n\tfor i in starindx:\n\t\tstars.append(slits[i])\n\tfor i in starindx[::-1]:\n\t\tdel slits[i]\n\n\treturn slits,stars",
"def set_star_ids(aca):\n from chandra_aca.transform import radec_to_yagzag\n from Quaternion import Quat\n\n from kadi.commands import conf\n\n obs = aca[\"meta\"]\n q_att = Quat(obs[\"att\"])\n stars = get_agasc_cone_fast(\n q_att.ra, q_att.dec, radius=1.2, date=obs[\"date\"], matlab_pm_bug=True\n )\n yang_stars, zang_stars = radec_to_yagzag(\n stars[\"RA_PMCORR\"], stars[\"DEC_PMCORR\"], q_att\n )\n idxs_aca = np.where(np.isin(aca[\"type\"], (\"ACQ\", \"GUI\", \"BOT\")))[0]\n for idx_aca in idxs_aca:\n yang = aca[\"yang\"][idx_aca]\n zang = aca[\"zang\"][idx_aca]\n dys = np.abs(yang - yang_stars)\n dzs = np.abs(zang - zang_stars)\n\n # Get the brightest star within a box (default = 1.5 arcsec halfwidth)\n halfw = conf.star_id_match_halfwidth\n ok = (dys < halfw) & (dzs < halfw)\n if np.any(ok):\n idx = np.argmin(stars[\"MAG_ACA\"][ok])\n aca[\"id\"][idx_aca] = int(stars[\"AGASC_ID\"][ok][idx])\n aca[\"mag\"][idx_aca] = float(stars[\"MAG_ACA\"][ok][idx])\n else:\n logger.info(\n f\"WARNING: star idx {idx_aca + 1} not found in obsid {obs['obsid']} at \"\n f\"{obs['date']}\"\n )",
"def cut(id=0):\n global stars_\n n = len(stars_)\n if n == 0:\n print \"No stars have been selected, go use 'stars()'\"\n return\n if id <= 0 or id > n:\n print \"Illegal id, valid are 1..%d\" % n\n return\n if id == 1:\n stars_ = stars_[1:]\n elif id == n:\n stars_ = stars_[:n-1]\n else:\n stars_ = stars_[:id-1] + stars_[id:]",
"def drawstars(slist=[], best=None, outfile='/tmp/stars.jpg'):\n img = Image.new('RGB', (xmax,ymax), backcol) #blank 8-bit color image\n draw = ImageDraw.Draw(img)\n\n x,y,radius = 400, 300, hole_radius*Cscale\n draw.rectangle( (400+Xmin*Cscale, 300-Ymin*Cscale, 400+Xmax*Cscale, 300-Ymax*Cscale), outline=(0,128,0), fill=None)\n draw.chord( (int(x-radius+0.5),int(y-radius+0.5),int(x+radius+0.5),int(y+radius+0.5)),\n 0, 360, outline=(0,128,0), fill=None)\n\n for i in range(len(slist)):\n x,y,radius = 400+slist[i].x*Sscale, 300-slist[i].y*Sscale, rscale(slist[i].mag)\n draw.chord( (int(x-radius+0.5),int(y-radius+0.5),int(x+radius+0.5),int(y+radius+0.5)),\n 0, 360, outline=(0,0,0), fill=(0,0,0))\n draw.text( (400+slist[i].x*Sscale+3, 300-slist[i].y*Sscale+3), `i`, fill=(0,0,0) )\n\n i = best #Redraw the 'best' star in red\n try:\n x,y,radius = 400+slist[i].x*Sscale, 300-slist[i].y*Sscale, rscale(slist[i].mag)\n draw.chord( (int(x-radius+0.5),int(y-radius+0.5),int(x+radius+0.5),int(y+radius+0.5)),\n 0, 360, outline=(192,0,0), fill=(192,0,0))\n draw.text( (400+slist[i].x*Sscale+3, 300-slist[i].y*Sscale+3), `i`, fill=(192,0,0) )\n except TypeError,IndexError:\n pass #There is no 'best' star\n\n img.save(outfile, quality=90)",
"def ggpl_spiral_staircase(dx,dy,dz):\n\tnstep = int(dy*2.7)+1\n\t\"\"\" steps parameters \"\"\"\n\triserHeight = (0.50*dy)/nstep\n\ttreadDept = (0.6300-riserHeight)/2.0\n\t\"\"\" number of steps and length of landing for each side \"\"\"\n\tlandingLengthY=dy-((nstep+1)*treadDept)\n\tif dx>dy:\n\t\tstepWidth = landingLengthY\n\telse:\n\t\tstepWidth = dx/2.5\n\t\tlandingLengthY = stepWidth\n\tnsteplatox = int(((dx-2*stepWidth)/treadDept)+0.5) \n\tlandingLengthX=stepWidth\n\tnsteplatoy = int(((dy-stepWidth-landingLengthY)/treadDept)+0.5)\n\t\"\"\" skeleton of the box that contains the stair \"\"\"\n\tbox = SKEL_1(CUBOID([dx,dy,dz]))\n\t\"\"\" total steps \"\"\"\n\ttotalSteps = int((dz/riserHeight))\n\t\"\"\" number and height of floor \"\"\"\n\tnfloor = int(round(dz/2)+1)\n\theightfloor = (nsteplatoy)*riserHeight\n\t\"\"\" first stair \"\"\"\n\tstair=make_stair(nsteplatoy,treadDept,riserHeight,landingLengthY+treadDept,stepWidth,1)\n\tstair = T(2)([dy-((nsteplatoy+2)*treadDept)-landingLengthY]) (stair)\n\t\"\"\" variable that takes into account the number of steps made \"\"\"\n\trealizedStep = nsteplatoy\n\tr =4\n\n\t\"\"\" realization of the stairs \"\"\"\n\tfor j in range(int(nfloor)*2):\n\t\t\"\"\" condition for the realization of the final stair \"\"\"\n\t\tif (totalSteps-realizedStep<=nsteplatox) or (totalSteps-realizedStep<=nsteplatoy):\n\t\t\tif (totalSteps-realizedStep<=nsteplatox) and r%2==1:\n\t\t\t\tfinalStair = make_stair((totalSteps-realizedStep-1),treadDept,riserHeight,dy-stepWidth-(totalSteps-realizedStep-1)*treadDept,stepWidth,2)\n\t\t\telse:\n\t\t\t\tfinalStair = make_stair((totalSteps-realizedStep-1),treadDept,riserHeight,dx-stepWidth-(totalSteps-realizedStep-1)*treadDept,stepWidth,2)\n\t\t\t\t\"\"\" rotation and translation of the scale in the correct position \"\"\"\n\t\t\tif r==4:\n\t\t\t\tfinalStair=R([1,2])(3*PI/2)(finalStair)\n\t\t\t\tfinalStair = T([1,2,3])([stepWidth-treadDept,dy,heightfloor])(finalStair)\n\t\t\t\tstair = STRUCT([stair,finalStair])\n\t\t\t\tbreak\n\t\t\tif r==1:\n\t\t\t\tfinalStair = R([1,2])(PI)(finalStair)\n\t\t\t\tfinalStair = T([1,2,3])([dx,dy-landingLengthY+treadDept ,heightfloor])(finalStair)\n\t\t\t\tstair = STRUCT([stair,finalStair])\n\t\t\t\tbreak\n\t\t\tif r==2:\n\t\t\t\tfinalStair = R([1,2])(PI/2)(finalStair)\n\t\t\t\tfinalStair = T([1,2,3])([dx-landingLengthY+treadDept,0,heightfloor])(finalStair)\n\t\t\t\tstair = STRUCT([stair,finalStair])\n\t\t\t\tbreak\n\t\t\tif r==3:\n\t\t\t\tfinalStair = T([1,2,3])([0,stepWidth-treadDept,heightfloor])(finalStair)\n\t\t\t\tstair = STRUCT([stair,finalStair])\n\t\t\t\tbreak\n\n\t\telse:\n\t\t\tif j%4== 0:\n\t\t\t\tstepsX = make_stair(nsteplatox,treadDept,riserHeight,landingLengthX,stepWidth,1)\n\t\t\t\tstepsX = R([1,2])(3*PI/2)(stepsX)\n\t\t\t\tstepsX = T([1,2,3])([stepWidth-treadDept,dy,heightfloor])(stepsX)\n\t\t\t\tstair = STRUCT([stair,stepsX])\n\t\t\t\theightfloor += (nsteplatox+1)*riserHeight \n\t\t\t\trealizedStep += nsteplatox+1\n\t\t\t\tr=1\n\t\t\tif j%4== 1:\n\t\t\t\tstepsY = make_stair(nsteplatoy,treadDept,riserHeight,dy-nsteplatoy*treadDept-stepWidth,stepWidth,1)\n\t\t\t\tstepsY = R([1,2])(PI)(stepsY)\n\t\t\t\tstepsY = T([1,2,3])([dx,dy-landingLengthY+treadDept ,heightfloor])(stepsY)\n\t\t\t\tstair = STRUCT([stair,stepsY])\n\t\t\t\theightfloor += (nsteplatoy+1)*riserHeight \n\t\t\t\trealizedStep += nsteplatoy+1\n\t\t\t\tr=2\n\t\t\tif j%4== 2:\n\t\t\t\tstepsX = make_stair(nsteplatox,treadDept,riserHeight,landingLengthX,stepWidth,1)\n\t\t\t\tstepsX = R([1,2])(PI/2)(stepsX)\n\t\t\t\tstepsX = T([1,2,3])([dx-landingLengthY+treadDept,0,heightfloor])(stepsX)\n\t\t\t\tstair = STRUCT([stair,stepsX])\n\t\t\t\theightfloor += (nsteplatox+1)*riserHeight \n\t\t\t\trealizedStep += nsteplatox+1\n\t\t\t\tr=3\n\t\t\tif j%4== 3:\n\t\t\t\tstepsY = make_stair(nsteplatoy,treadDept,riserHeight,landingLengthY,stepWidth,1)\n\t\t\t\tstepsY = T([1,2,3])([0,stepWidth-treadDept,heightfloor])(stepsY)\n\t\t\t\tstair = STRUCT([stair,stepsY])\n\t\t\t\theightfloor += (nsteplatoy+1)*riserHeight \n\t\t\t\trealizedStep += nsteplatoy+1\n\t\t\t\tr=4\n\t\"\"\"floor of the stair\"\"\"\n\tfloor = CUBOID([dx,dy,0.05])\n\tfloor = TEXTURE(\"texture/floorStair.jpg\")(floor)\n\n\treturn STRUCT([stair,floor,box])",
"def draw_star(x=0,y=0,radius=10):\n cx = x\n cy = y+radius\n bx = cx * math.cos(2*math.pi/3) - ( cy * math.sin(2*math.pi/3) )\n by = cx * math.sin(2*math.pi/3) + ( cy * math.cos(2*math.pi/3) )\n ax = cx * math.cos(4*math.pi/3) - ( cy * math.sin(4*math.pi/3) )\n ay = cx * math.sin(4*math.pi/3) + ( cy * math.cos(4*math.pi/3) )\n my_turtle.penup()\n my_turtle.goto(cx, cy)\n my_turtle.pendown()\n my_turtle.goto(bx, by)\n my_turtle.goto(ax, ay)\n my_turtle.goto(cx, cy)\n my_turtle.penup()\n cy = y-radius\n bx = cx * math.cos(2*math.pi/3) - ( cy * math.sin(2*math.pi/3) )\n by = cx * math.sin(2*math.pi/3) + ( cy * math.cos(2*math.pi/3) )\n ax = cx * math.cos(4*math.pi/3) - ( cy * math.sin(4*math.pi/3) )\n ay = cx * math.sin(4*math.pi/3) + ( cy * math.cos(4*math.pi/3) )\n my_turtle.penup()\n my_turtle.goto(cx, cy)\n my_turtle.pendown()\n my_turtle.goto(bx, by)\n my_turtle.goto(ax, ay)\n my_turtle.goto(cx, cy)\n my_turtle.penup()",
"def LSSTPointing(xc, yc, angle_rot=0., area=None, maxbound=None):\n\n \"\"\"\n arr = [[3, 0], [12, 0], [12, 1], [13, 1], [13, 2], [14, 2], [14, 3], [15, 3],\n [15, 12], [14, 12], [14, 13], [13, 13], [\n 13, 14], [12, 14], [12, 15],\n [3, 15], [3, 14], [2, 14], [2, 13], [1, 13], [1, 12], [0, 12],\n [0, 3], [1, 3], [1, 2], [2, 2], [2, 1], [3, 1]]\n \"\"\"\n # this is a quarter of LSST FP (with corner rafts)\n arr = [[0.0, 7.5], [4.5, 7.5], [4.5, 6.5], [5.5, 6.5], [\n 5.5, 5.5], [6.5, 5.5], [6.5, 4.5], [7.5, 4.5], [7.5, 0.0]]\n\n # this is a quarter of LSST FP (without corner rafts)\n arr = [[0.0, 7.5], [4.5, 7.5], [4.5, 4.5], [7.5, 4.5], [7.5, 0.0]]\n if maxbound is not None:\n arr = [[0.0, maxbound], [maxbound*4.5/7.5, maxbound], [maxbound*4.5 /\n 7.5, maxbound*4.5/7.5], [maxbound, maxbound*4.5/7.5], [maxbound, 0.0]]\n # symmetry I: y -> -y\n arrcp = list(arr)\n for val in arr[::-1]:\n if val[1] > 0.:\n arrcp.append([val[0], -val[1]])\n\n # symmetry II: x -> -x\n arr = list(arrcp)\n for val in arrcp[::-1]:\n if val[0] > 0.:\n arr.append([-val[0], val[1]])\n\n # build polygon\n poly_orig = geometry.Polygon(arr)\n\n # set area\n if area is not None:\n poly_orig = affinity.scale(poly_orig, xfact=np.sqrt(\n area/poly_orig.area), yfact=np.sqrt(area/poly_orig.area))\n\n # set rotation angle\n rotated_poly = affinity.rotate(poly_orig, angle_rot)\n\n return affinity.translate(rotated_poly,\n xoff=xc-rotated_poly.centroid.x,\n yoff=yc-rotated_poly.centroid.y)",
"def shooting_star(ohlc_df):\r\n df = ohlc_df.copy()\r\n df[\"sstar\"] = (((df[\"high\"] - df[\"low\"])>3*(df[\"open\"] - df[\"close\"])) & \\\r\n ((df[\"high\"] - df[\"close\"])/(.001 + df[\"high\"] - df[\"low\"]) > 0.6) & \\\r\n ((df[\"high\"] - df[\"open\"])/(.001 + df[\"high\"] - df[\"low\"]) > 0.6)) & \\\r\n (abs(df[\"close\"] - df[\"open\"]) > 0.1* (df[\"high\"] - df[\"low\"]))\r\n return df",
"def shooting_star(ohlc_df):\n df = ohlc_df.copy()\n df[\"sstar\"] = (((df[\"high\"] - df[\"low\"])>3*(df[\"open\"] - df[\"close\"])) & \\\n ((df[\"high\"] - df[\"close\"])/(.001 + df[\"high\"] - df[\"low\"]) > 0.6) & \\\n ((df[\"high\"] - df[\"open\"])/(.001 + df[\"high\"] - df[\"low\"]) > 0.6)) & \\\n (abs(df[\"close\"] - df[\"open\"]) > 0.1* (df[\"high\"] - df[\"low\"]))\n return df",
"def stars(elmin=0,magmax=100,sort='az',northSouth='all',doBackwards=False,binSize=20.0):\n global stars_\n if northSouth == 'north' : cutAz = 269.0\n elif northSouth == 'south' : cutAz = 0.0\n else : cutAz = 330.0\n localLat = 37.0+16.0/60.0+49.37285/3600.0\n def cmpa(x,y):\n # sorting helper for azimuth (note the breakpoint at cutAz!!!)\n def optaz(a):\n if a<cutAz: return a\n return a-360\n a=optaz(x[1])\n b=optaz(y[1])\n if a<b: return -1\n if a>b: return 1\n return 0\n def cmpz(x,y):\n # sorting helper for reverse azimuth (note the breakpoint at cutAz!!!)\n def optaz(a):\n if a<cutAz: return a\n return a-360\n a=optaz(x[1])\n b=optaz(y[1])\n if a<b: return 1\n if a>b: return -1\n return 0\n def cmpe(x,y):\n # sorting helper for elevation\n if x[2]<y[2]: return -1\n if x[2]>y[2]: return 1\n return 0\n def cmpza(x,y) :\n # sorting helper for zenith angle\n if x[2]<y[2]: return 1\n if x[2]>y[2]: return -1\n return 0\n def cmpm(x,y):\n # sorting helper for optical magnitude\n if x[3]<y[3]: return -1\n if x[3]>y[3]: return 1\n return 0\n # report\n if elmin < -99 and magmax > 99: print \"Warning: Selecting all stars, use elmin= or magmax=\"\n if elmin > -99: print \"Selecting stars above elevation %g deg\" % elmin\n if magmax < 99: print \"Selecting stars brighter than %g mag\" % magmax\n print \"Sorting mode: \",sort\n # sorting mode\n if sort == 'el':\n my_cmp=cmpe\n elif sort == 'mag':\n my_cmp=cmpm\n elif sort == 'za':\n my_cmp=cmpza\n elif sort == 'az':\n my_cmp=cmpa\n elif sort == '-az' :\n my_cmp=cmpz\n else:\n print \"Warning: sorting mode %s not supported, using az\" % sort\n my_cmp=cmpa\n # empty the list again\n stars_=[]\n # Keep the user happy\n print \"Hang on, marching through the ephemeris of %d stars\" % len(ostars_)\n for s in ostars_:\n s1=sazelmag(s)\n if s1[2] < elmin or s1[3] > magmax:\n continue\n #if (((short.getDec(s) < localLat) and northSouth == 'north') or (short.getDec(s) > localLat) and (northSouth == 'south')) :\n dec = (SAC.getRaDec(s))[1]\n if (((dec < localLat) and northSouth == 'north') or \\\n (dec > localLat) and (northSouth == 'south')) :\n continue\n stars_.append(s1)\n stars_.sort(cmpa)\n bins = []\n breakIndex = [0]\n starsPart = []\n starsTemp = []\n starsAz_ = []\n for i in range(len(stars_)) :\n starsAz_.append(stars_[i][1])\n if starsAz_[i] > cutAz : starsAz_[i]=starsAz_[i]-360.0\n startAz = cutAz-360.0 # degrees\n for i in range(int(360/binSize+1)) : bins.append(int(startAz+binSize*i))\n j=0\n for i in range(len(bins)-1) :\n while((starsAz_[j] < bins[i+1]) and (starsAz_[j] >= bins[i]) and (j < len(stars_)-1)) : j=j+1\n breakIndex=breakIndex+[j]\n breakIndex[len(breakIndex)-1]=breakIndex[len(breakIndex)-1]+1\n for i in range(len(bins)-1) :\n if i%2 : my_cmp = cmpe\n else : my_cmp = cmpza\n starsPart = stars_[breakIndex[i]:breakIndex[i+1]]\n# Last bin sort in AZ ONLY!!! Saves you alot of trouble later!\n# if i==(len(bins)-2) :\n# starsTemp=starsTemp+starsPart\n# else :\n starsPart.sort(my_cmp)\n starsTemp=starsTemp+starsPart\n stars_ = starsTemp\n counter = 0\n while counter < len(stars_)-1 :\n az1 = int(stars_[counter][1]*10.0)\n az2 = int(stars_[counter+1][1]*10.0)\n el1 = int(stars_[counter][2]*10.0)\n el2 = int(stars_[counter+1][2]*10.0)\n if ((az1 == az2) and (el1== el2)) : cut(counter+2)\n else : counter=counter+1\n i=0\n if doBackwards : stars_.reverse()\n print \" i name az(deg) el(deg) magn\"\n print \"-------------------------------\"\n for s in stars_:\n i=i+1\n if len(s[0]) == 4 : \n print \"%3d %s %6.1f %6.1f %6.2f\" % (i,s[0],s[1],s[2],s[3])\n else : print \"%3d %s %6.1f %6.1f %6.2f\" % (i,s[0],s[1],s[2],s[3])\n print \"-------------------------------\"",
"def trace_tilt(ordcen, rordloc, lordloc, det, msarc, slitnum, satval,\n idsonly=False, censpec=None, maskval=-999999.9, tracethresh=20.0,\n nsmth=0, method=\"fweight\", wv_calib=None, nonlinear_counts = 1e10):\n def pad_dict(indict):\n \"\"\" If an arc line is considered bad, fill the\n dictionary arrays with null values\n \"\"\"\n indict[\"xtfit\"].append(None)\n indict[\"ytfit\"].append(None)\n indict[\"wmask\"].append(None)\n return indict\n\n dnum = parse.get_dnum(det)\n\n msgs.work(\"Detecting lines for slit {0:d}\".format(slitnum+1))\n tampl, tampl_cont, tcent, twid, _, w, _ , tnsig = arc.detect_lines(censpec, fit_frac_fwhm=1.75, nonlinear_counts=nonlinear_counts)\n\n # TODO: Validate satval value?\n# satval = settings_det['saturation']*settings_det['nonlinear']\n # Order of the polynomials to be used when fitting the tilts.\n arcdet = (tcent[w]+0.5).astype(np.int)\n nsig = tnsig[w]\n\n # Determine the best lines to use to trace the tilts\n ncont = 15\n aduse = np.zeros(arcdet.size, dtype=np.bool) # Which lines should be used to trace the tilts\n w = np.where(nsig >= tracethresh)\n aduse[w] = 1\n # Remove lines that are within ncont pixels\n nuse = np.sum(aduse)\n detuse = arcdet[aduse]\n idxuse = np.arange(arcdet.size)[aduse]\n olduse = aduse.copy()\n for s in range(nuse):\n w = np.where((np.abs(arcdet-detuse[s]) <= ncont) & (np.abs(arcdet-detuse[s]) >= 1.0))[0]\n for u in range(w.size):\n if nsig[w[u]] > nsig[olduse][s]:\n aduse[idxuse[s]] = False\n break\n # TODO Perhaps a more robust version of this code would only use the lines that were used in the wavelength solution. I guess\n # that would filter out these ghosts and it would also filter out blends for which the tracing will be less robust becuase\n # you are trace_fweighting a blended line?\n\n # Restricted to ID lines? [introduced to avoid LRIS ghosts]\n if idsonly:\n ids_pix = np.round(np.array(wv_calib[str(slitnum)]['xfit'])*(msarc.shape[0]-1))\n idxuse = np.arange(arcdet.size)[aduse]\n for s in idxuse:\n if np.min(np.abs(arcdet[s]-ids_pix)) > 2:\n msgs.info(\"Ignoring line at row={:d}\".format(arcdet[s]))\n aduse[s] = False\n\n # Divide the detector into Nseg segments,\n # and find the brightest lines in each segment.\n # The total number of lines used to trace the tilts will be = Nseg*Nuse + Nadd\n # Nseg = 4\n # Nuse = 2\n # Nadd = 8\n # segsz = msarc.shape[0]/float(Nseg)\n # aduse = np.zeros(arcdet.size, dtype=np.bool) # Which lines should be used to trace the tilts\n # for s in range(Nseg):\n # w = np.where((arcdet > s*segsz) & (arcdet <= (s+1)*segsz))[0]\n # segampl = tampl[w]\n # asrt = np.argsort(segampl)[::-1]\n # for u in range(Nuse):\n # aduse[w[asrt[u]]] = True\n # # Now include some additional bright lines\n # asrt = np.argsort(tampl)[::-1]\n # s, u = 0, 0\n # while u < Nadd:\n # if not aduse[asrt[s]]:\n # aduse[asrt[s]] = True\n # u += 1\n # s += 1\n\n # Setup the trace dictionary\n trcdict = {\"xtfit\":[], \"ytfit\":[], \"wmask\":[], \"arcdet\":arcdet, \"aduse\":aduse, \"badlines\":0}\n\n msgs.info(\"Modelling arc line tilts with {0:d} arc lines\".format(np.sum(aduse)))\n if np.sum(aduse) == 0:\n msgs.warn(\"No arc lines were deemed usable in slit {0:d} for spectral tilt\".format(slitnum))\n return None\n # Go along each order and trace the tilts\n # Start by masking every row, then later unmask the rows with usable arc lines\n msgs.work(\"This next step could be multiprocessed to speed up the reduction\")\n nspecfit = 3\n badlines = 0\n for j in range(arcdet.size):\n # For each detection in this order\n #msgs.info(\"Tracing tilt of arc line {0:d}/{1:d}\".format(j+1, arcdet.size))\n # Check if this is a saturated line\n ysat = msarc[arcdet[j]-nspecfit:arcdet[j]+nspecfit+1, ordcen[arcdet[j], slitnum]-nsmth:ordcen[arcdet[j], slitnum]+nsmth+1]\n if np.where(ysat > satval)[0].size != 0:\n aduse[j] = False\n badlines += 1\n trcdict = pad_dict(trcdict)\n continue\n # Get the size of the slit\n sz = int(np.floor(np.abs(rordloc[arcdet[j], slitnum]-lordloc[arcdet[j], slitnum])/2.0)) - 2\n xtfit = np.zeros(2*sz+1)\n ytfit = np.ones(2*sz+1)*maskval # Fitted centroid\n etfit = np.zeros(2*sz+1) # Fitted centroid error\n mtfit = np.ones(2*sz+1, dtype=np.int) # Mask of bad fits\n #apfit = np.zeros(2*sz+1) # Fitted Amplitude\n xfit = np.arange(-nspecfit, nspecfit+1, 1.0)\n wfit = np.ones(xfit.size, dtype=np.float)\n tstcc = True # A boolean to tell the loop once a good set of pixels has been found to cross-correlate with\n # Fit up\n pcen = arcdet[j]\n if (pcen < nspecfit) or (pcen > msarc.shape[0]-(nspecfit+1)):\n # Too close to the end of the spectrum\n aduse[j] = False\n badlines += 1\n trcdict = pad_dict(trcdict)\n continue\n offchip = False\n centv = None\n for k in range(0, sz+1-nsmth):\n if (pcen < nspecfit) or (pcen > msarc.shape[0]-(nspecfit+1)):\n offchip = True\n break\n if ordcen[pcen, slitnum]+k >= msarc.shape[1]:\n offchip = True\n break\n # yfit = msarc[pcen-nspecfit:pcen+nspecfit+1,ordcen[arcdet[j],0]+k]\n yfit = msarc[pcen-nspecfit:pcen+nspecfit+1, ordcen[arcdet[j], slitnum]+k-nsmth:ordcen[arcdet[j], slitnum]+k+nsmth+1]\n if np.size(yfit) == 0:\n offchip = True\n break\n if len(yfit.shape) == 2:\n yfit = np.median(yfit, axis=1)\n # wgd = np.where((yfit<satval)&(yfit!=maskval))\n wgd = np.where(yfit == maskval)\n if wgd[0].size != 0:\n continue\n if method == \"fweight\":\n if centv is None:\n centv = np.sum(yfit * (pcen+xfit))/np.sum(yfit)\n wfit[0] = 0.5 + (pcen-centv)\n wfit[-1] = 0.5 - (pcen-centv)\n sumxw = yfit * (pcen+xfit) * wfit\n sumw = yfit * wfit\n centv = np.sum(sumxw)/np.sum(sumw)\n fail = False\n elif method == \"cc\":\n # Get a copy of the array that will be used to cross-correlate\n if tstcc:\n # ccyfit = msarc[arcdet[j]-nspecfit:arcdet[j]+nspecfit+1,ordcen[arcdet[j],0]+k]\n ccyfit = msarc[arcdet[j]-nspecfit:arcdet[j]+nspecfit+1,ordcen[arcdet[j],slitnum]+k-nsmth:ordcen[arcdet[j],slitnum]+k+nsmth+1]\n if len(ccyfit.shape) == 2:\n ccyfit = np.median(ccyfit, axis=1)\n wgd = np.where(ccyfit == maskval)\n if wgd[0].size != 0:\n continue\n ccval = arcdet[j] + np.sum(xfit*ccyfit)/np.sum(ccyfit)\n tstcc = False # Once we have an array, there's no need to keep looking\n cc = np.correlate(ccyfit, yfit, mode='same')\n params, fail = utils.gauss_lsqfit(xfit, cc, 0.0)\n centv = ccval + pcen - arcdet[j] - params[1]\n xtfit[k+sz] = ordcen[arcdet[j], slitnum] + k\n ytfit[k+sz] = centv\n etfit[k+sz] = 0.02\n #apfit[k+sz] = params[0]\n if fail:\n mtfit[k+sz] = 1\n else:\n pcen = int(0.5 + centv)\n mtfit[k+sz] = 0\n if offchip:\n # Don't use lines that go off the chip (could lead to a bad trace)\n aduse[j] = False\n badlines += 1\n trcdict = pad_dict(trcdict)\n continue\n for k in range(sz+1-nsmth, sz+1):\n xtfit[k+sz] = ordcen[arcdet[j], slitnum]+k\n # Fit down\n pcen = arcdet[j]\n centv = None\n for k in range(1,sz+1-nsmth):\n if (pcen < nspecfit) or (pcen > msarc.shape[0]-(nspecfit+1)):\n offchip = True\n break\n if ordcen[pcen, slitnum]-k < 0:\n offchip = True\n break\n # yfit = msarc[pcen-nspecfit:pcen+nspecfit+1,ordcen[arcdet[j],0]-k]\n yfit = msarc[pcen - nspecfit:pcen + nspecfit + 1,\n ordcen[arcdet[j], slitnum] - k - nsmth:ordcen[arcdet[j], slitnum] - k + nsmth + 1]\n # check whether the yfit is offchip FW\n if np.size(yfit) == 0:\n offchip = True\n break\n elif len(yfit.shape) == 2:\n yfit = np.median(yfit, axis=1)\n else:\n pass\n wgd = np.where(yfit == maskval)\n if wgd[0].size != 0:\n continue\n if method == \"fweight\":\n if centv is None:\n centv = np.sum(yfit * (pcen+xfit))/np.sum(yfit)\n wfit[0] = 0.5 + (pcen-centv)\n wfit[-1] = 0.5 - (pcen-centv)\n sumxw = yfit * (pcen+xfit) * wfit\n sumw = yfit * wfit\n centv = np.sum(sumxw)/np.sum(sumw)\n #if np.isfinite(centv) == False: # debugging\n # from IPython import embed\n # embed()\n fail = False\n elif method == \"cc\":\n # Get a copy of the array that will be used to cross-correlate\n # (testcc is probably already False from the Fit Up part of the loop, but it's best to be sure)\n if tstcc:\n # ccyfit = msarc[arcdet[j]-nspecfit:arcdet[j]+nspecfit+1,ordcen[arcdet[j],0]-k]\n ccyfit = msarc[arcdet[j]-nspecfit:arcdet[j]+nspecfit+1,ordcen[arcdet[j],slitnum]-k-nsmth:ordcen[arcdet[j],slitnum]-k+nsmth+1]\n if len(ccyfit.shape) == 2:\n ccyfit = np.median(ccyfit, axis=1)\n wgd = np.where(ccyfit == maskval)\n if wgd[0].size != 0:\n continue\n ccval = arcdet[j] + np.sum(xfit*ccyfit)/np.sum(ccyfit)\n tstcc = False # Once we have an array, there's no need to keep looking\n cc = np.correlate(ccyfit, yfit, mode='same')\n params, fail = utils.gauss_lsqfit(xfit, cc, 0.0)\n centv = ccval + pcen - arcdet[j] - params[1]\n xtfit[sz-k] = ordcen[arcdet[j], slitnum] - k\n ytfit[sz-k] = centv\n etfit[sz-k] = 0.02\n #apfit[sz-k] = params[0]\n if fail:\n mtfit[sz-k] = 1\n else:\n #from IPython import embed\n if np.isfinite(centv) == False: debugger.set_trace() #embed()\n pcen = int(0.5 + centv)\n mtfit[sz-k] = 0\n\n if offchip:\n # Don't use lines that go off the chip (could lead to a bad trace)\n aduse[j] = False\n badlines += 1\n trcdict = pad_dict(trcdict)\n continue\n for k in range(sz+1-nsmth, sz+1):\n xtfit[sz-k] = ordcen[arcdet[j], slitnum]-k\n\n wmask = np.where(mtfit == 0)\n ytfit[np.where(mtfit == 1)] = maskval\n\n # Append the trace information into the dictionary\n trcdict[\"xtfit\"].append(xtfit.copy())\n trcdict[\"ytfit\"].append(ytfit.copy())\n trcdict[\"wmask\"].append(wmask[0].copy())\n trcdict[\"aduse\"] = aduse\n trcdict[\"badlines\"] = badlines\n msgs.info(\"Completed spectral tilt tracing\".format(np.sum(aduse)))\n return trcdict",
"def find_stars_close_to_target(star_catalog, target, tol, log):\n\n tol = tol / 60.0 # Select stars within 2 arcmin of target\n det_stars = SkyCoord(star_catalog['RA'], star_catalog['DEC'], unit=\"deg\")\n\n t = SkyCoord(target.ra, target.dec, unit=\"deg\")\n\n seps = det_stars.separation(t)\n\n jdx = np.where(seps.deg < tol)[0]\n\n log.info('Identified '+str(len(jdx))+' stars within '+str(round(tol*60.0,1))+\\\n 'arcmin of the target')\n\n return jdx",
"def snell(self, indices, theta_0):\n return sp.arcsin(np.real_if_close(n_list[0]*np.sin(th_0) / n_list))",
"def arc_fit_qa(slf, fit, outfile, ids_only=False, title=None):\n\n plt.rcdefaults()\n plt.rcParams['font.family']= 'times new roman'\n\n arc_spec = fit['spec']\n\n # Begin\n if not ids_only:\n plt.figure(figsize=(8, 4.0))\n plt.clf()\n gs = gridspec.GridSpec(2, 2)\n idfont = 'xx-small'\n else:\n plt.figure(figsize=(11, 8.5))\n plt.clf()\n gs = gridspec.GridSpec(1, 1)\n idfont = 'small'\n\n # Simple spectrum plot\n ax_spec = plt.subplot(gs[:,0])\n ax_spec.plot(np.arange(len(arc_spec)), arc_spec)\n ymin, ymax = 0., np.max(arc_spec)\n ysep = ymax*0.03\n for kk, x in enumerate(fit['xfit']*fit['xnorm']):\n yline = np.max(arc_spec[int(x)-2:int(x)+2])\n # Tick mark\n ax_spec.plot([x,x], [yline+ysep*0.25, yline+ysep], 'g-')\n # label\n ax_spec.text(x, yline+ysep*1.3,\n '{:s} {:g}'.format(fit['ions'][kk], fit['yfit'][kk]), ha='center', va='bottom',\n size=idfont, rotation=90., color='green')\n ax_spec.set_xlim(0., len(arc_spec))\n ax_spec.set_ylim(ymin, ymax*1.2)\n ax_spec.set_xlabel('Pixel')\n ax_spec.set_ylabel('Flux')\n if title is not None:\n ax_spec.text(0.04, 0.93, title, transform=ax_spec.transAxes,\n size='x-large', ha='left')#, bbox={'facecolor':'white'})\n if ids_only:\n plt.tight_layout(pad=0.2, h_pad=0.0, w_pad=0.0)\n plt.savefig(outfile, dpi=800)\n plt.close()\n return\n\n # Arc Fit\n ax_fit = plt.subplot(gs[0, 1])\n # Points\n ax_fit.scatter(fit['xfit']*fit['xnorm'], fit['yfit'], marker='x')\n if len(fit['xrej']) > 0:\n ax_fit.scatter(fit['xrej']*fit['xnorm'], fit['yrej'], marker='o',\n edgecolor='gray', facecolor='none')\n # Solution\n xval = np.arange(len(arc_spec))\n wave = func_val(fit['fitc'], xval/fit['xnorm'], 'legendre',\n minv=fit['fmin'], maxv=fit['fmax'])\n ax_fit.plot(xval, wave, 'r-')\n xmin, xmax = 0., len(arc_spec)\n ax_fit.set_xlim(xmin, xmax)\n ymin,ymax = np.min(wave)*.95, np.max(wave)*1.05\n ax_fit.set_ylim(np.min(wave)*.95, np.max(wave)*1.05)\n ax_fit.set_ylabel('Wavelength')\n ax_fit.get_xaxis().set_ticks([]) # Suppress labeling\n # Stats\n wave_fit = func_val(fit['fitc'], fit['xfit'], 'legendre',\n minv=fit['fmin'], maxv=fit['fmax'])\n rms = np.sqrt(np.sum((fit['yfit']-wave_fit)**2)/len(fit['xfit'])) # Ang\n dwv_pix = np.median(np.abs(wave-np.roll(wave,1)))\n ax_fit.text(0.1*len(arc_spec), 0.90*ymin+(ymax-ymin),\n r'$\\Delta\\lambda$={:.3f}$\\AA$ (per pix)'.format(dwv_pix), size='small')\n ax_fit.text(0.1*len(arc_spec), 0.80*ymin+(ymax-ymin),\n 'RMS={:.3f} (pixels)'.format(rms/dwv_pix), size='small')\n # Arc Residuals\n ax_res = plt.subplot(gs[1,1])\n res = fit['yfit']-wave_fit\n ax_res.scatter(fit['xfit']*fit['xnorm'], res/dwv_pix, marker='x')\n ax_res.plot([xmin,xmax], [0.,0], 'k--')\n ax_res.set_xlim(xmin, xmax)\n ax_res.set_xlabel('Pixel')\n ax_res.set_ylabel('Residuals (Pix)')\n\n # Finish\n plt.tight_layout(pad=0.2, h_pad=0.0, w_pad=0.0)\n plt.savefig(outfile, dpi=800)\n plt.close()\n\n plt.rcdefaults()\n\n return",
"def hatch(S, dist, angle=0., flip_horizontal=False, get_hole_count=False, max_count=1000000, eps=1e-10): \n if not is_compound(S):\n S = [S]\n\n hole_count = [0 for i in range(len(S))]\n solid_count = [0 for i in range(len(S))]\n\n if not S:\n return []\n \n # Rotate shape for oriented hatches \n theta = radians(angle)\n mat = rot_2d(-theta, affine=True)\n S = [affine_transform(mat, P) for P in S]\n\n box = bounding_box(S)\n\n # build edge table\n ET = []\n for i, P in enumerate(S):\n P = np.array(P)\n n = P.shape[0]\n if n <= 2:\n continue\n for j in range(n):\n a, b = P[j], P[(j+1)%n]\n # reorder increasing y\n if a[1] > b[1]:\n a, b = b, a\n # slope\n dx = (b[0] - a[0]) \n dy = (b[1] - a[1])\n if abs(dx) > eps:\n m = dy/dx \n else:\n m = 1e15\n if abs(m) < eps:\n m = None\n ET.append(Edge(a=a, b=b, m=m, i=i))\n\n # sort by increasing y of first point\n ET = sorted(ET, key=lambda e: e.a[1])\n\n # intersection x\n def ex(e, y):\n if e.m is None:\n return None\n return e.a[0] + (y - e.a[1])/e.m\n\n y = box[0][1]\n scanlines = []\n\n AET = [] # active edge table\n\n flip = 0\n c = 0\n while ET or AET:\n if y > box[1][1]:\n break\n if c >= max_count:\n print(\"scanlines: reached max number of iterations\")\n break\n c += 1\n\n # move from ET to AET\n i = 0\n for e in ET:\n if e.a[1] <= y:\n AET.append(e)\n i += 1\n else:\n break\n if i < len(ET):\n ET = ET[i:]\n else:\n ET = []\n \n # remove passed edges\n AET = sorted(AET, key=lambda e: e.b[1])\n AET = [e for e in AET if e.b[1] > y] \n \n xs = [(ex(e, y), e.i) for e in AET]\n #brk()\n xs = [xi for xi in xs if xi[0] is not None]\n # sort Xs (flipped each scanline for more efficent plotting )\n if flip:\n xs = sorted(xs, key=lambda v: -v[0])\n else:\n xs = sorted(xs, key=lambda v: v[0])\n \n if flip_horizontal:\n flip = not flip\n \n even_odd = [0 for i in range(len(S))]\n\n if len(xs) > 1:\n #brk()\n parity = 1\n for (x1,i1), (x2,i2) in zip(xs, xs[1:]): \n a, b = (np.array([x1, y]),\n np.array([x2, y]))\n if parity:\n scanlines += [a, b]\n even_odd[i2] += 1\n else:\n # If se are outside of a shape and we enounter \n # an unvisited contour, it means that this is a separate \n # outer contour, so don't count. Otherwise...\n if even_odd[i2]:\n even_odd[i2] += 1\n pass\n parity = not parity\n\n # increment\n y = y + dist\n\n # unrotate\n if scanlines:\n scanlines = affine_transform(mat.T, scanlines) #np.array(scanlines))\n # make list of hatch segments\n scanlines = [[a, b] for a, b in zip(scanlines[0::2], scanlines[1::2])]\n return scanlines",
"def inters_segment(self, s):\r\n x1 = s.start[0] - self.center[0]\r\n y1 = s.start[1] - self.center[1]\r\n x2 = s.end[0] - self.center[0]\r\n y2 = s.end[1] - self.center[1]\r\n dx = x2 - x1\r\n dy = y2 - y1\r\n dr = math.sqrt(dx * dx + dy * dy)\r\n D = x1 * y2 - x2 * y1\r\n dr2 = dr * dr\r\n d = self.radius * self.radius * dr2 - D * D \r\n \r\n if d < 0:\r\n return []\r\n else: \r\n if dy < 0:\r\n sgndy = -1\r\n else:\r\n sgndy = 1 \r\n \r\n Ddy = D * dy\r\n mDdx = -D * dx\r\n sgndydxsqrtd = sgndy * dx * math.sqrt(d)\r\n absdysqrtd = abs(dy) * math.sqrt(d) \r\n \r\n xa = float(Ddy + sgndydxsqrtd) / dr2 + self.center[0]\r\n ya = float(mDdx + absdysqrtd) / dr2 + self.center[1]\r\n \r\n xb = (Ddy - sgndydxsqrtd) / dr2 + self.center[0]\r\n yb = (mDdx - absdysqrtd) / dr2 + self.center[1]\r\n \r\n if (d == 0) or not s.contains_point(xb, yb):\r\n if s.contains_point(xa, ya):\r\n return [(int(xa), int(ya))]\r\n else:\r\n return []\r\n else:\r\n if s.contains_point(xa, ya):\r\n return [(int(xa), int(ya)), (int(xb), int(yb))]\r\n else:\r\n return [(int(xb), int(yb))]",
"def _locate_finder_in_square(image, transform, size):\n radius = int(round(size/2))\n center = transform.trans\n angle = transform.rot\n\n rotated = image.rotate(angle, center)\n\n sx1, sy1 = center.x-radius, center.y-radius\n sx2, sy2 = center.x+radius, center.y+radius\n thick = int(round(size / 14))\n\n # Top\n x1, y1 = sx1, sy1\n x2, y2 = sx2, sy1 + thick\n top = np.sum(rotated.img[y1:y2, x1:x2]) / (size * thick)\n\n # Left\n x1, y1 = sx1, sy1\n x2, y2 = sx1 + thick, sy2\n left = np.sum(rotated.img[y1:y2, x1:x2]) / (size * thick)\n\n # Bottom\n x1, y1 = sx1, sy2 - thick\n x2, y2 = sx2, sy2\n bottom = np.sum(rotated.img[y1:y2, x1:x2]) / (size * thick)\n\n # Right\n x1, y1 = sx2 - thick, sy1\n x2, y2 = sx2, sy2\n right = np.sum(rotated.img[y1:y2, x1:x2]) / (size * thick)\n\n # Identify finder edges\n if top < bottom and left < right:\n c1 = [sx1, sy1]\n c2 = [sx1, sy2]\n c3 = [sx2, sy1]\n elif top < bottom and right < left:\n c1 = [sx2, sy1]\n c2 = [sx1, sy1]\n c3 = [sx2, sy2]\n elif bottom < top and left < right:\n c1 = [sx1, sy2]\n c2 = [sx2, sy2]\n c3 = [sx1, sy1]\n elif bottom < top and right < left:\n c1 = [sx2, sy2]\n c2 = [sx2, sy1]\n c3 = [sx1, sy2]\n else:\n return None\n\n # rotate points around center of square\n c1 = _rotate_around_point(Point.from_array(c1), angle, center)\n c2 = _rotate_around_point(Point.from_array(c2), angle, center)\n c3 = _rotate_around_point(Point.from_array(c3), angle, center)\n\n # Create finder pattern\n c1 = c1.intify()\n side1 = (c2 - c1).intify()\n side2 = (c3 - c1).intify()\n fp = FinderPattern(c1, side1, side2)\n\n return fp",
"def snap_verts(shp,tolerance=0.001,arc=True):\n kmtol = tolerance/1000.\n\n data = numpy.concatenate([rec.vertices for rec in shp])\n \n if arc:\n kd = pysal.cg.KDTree(data,distance_metric=\"Arc\",radius = pysal.cg.sphere.RADIUS_EARTH_KM)\n else:\n kd = pysal.cg.KDTree(data)\n q = kd.query_ball_tree(kd,kmtol)\n ### Next three lines assert that snappings are mutual... if 1 snaps to 8, 8 must snap to 1.\n for r,a in enumerate(q):\n for o in a:\n assert a==q[o]\n ### non-mutual snapping can happen.\n ### consider the three points, A (-1,0), B (0,0), C (1,0) and a snapping tolerance of 1.\n ### A-> B\n ### B-> A,C\n ### C-> B\n ### For now, try lowering adjusting the tolerance to avoid this.\n\n data2 = numpy.empty_like(data)\n for i,r in enumerate(q):\n data2[i] = data[r].mean(0)\n pos=0\n for rec in shp:\n vrts = rec.vertices\n n = len(vrts)\n nrec = pysal.cg.Chain(map(tuple,data2[pos:pos+n]))\n pos+=n\n yield nrec",
"def image_to_catalog_match(self, max_image_catalog_sep):\n\n catalog = self._spt_catalog\n\n # Create astropy skycoord object of the SZ centers.\n sz_centers = SkyCoord(catalog['RA'], catalog['DEC'], unit=u.degree)\n\n for cluster in self._catalog_dictionary.values():\n # Get the RA and Dec of the center pixel in the image.\n w = WCS(cluster['ch1_sci_path'])\n center_pixel = np.array(w.array_shape) // 2\n\n # Create astropy skycoord object for the reference pixel of the image.\n img_coord = SkyCoord.from_pixel(center_pixel[1], center_pixel[0], wcs=w, origin=0)\n\n # Match the reference pixel to the SZ centers\n idx, sep, _ = img_coord.match_to_catalog_sky(sz_centers)\n\n # Add the (nearest) catalog id and separation (in arcsec) to the output array.\n cluster.update({'SPT_cat_idx': idx, 'center_sep': sep})\n\n # Reject any match with a separation larger than 1 arcminute.\n large_sep_clusters = [cluster_id for cluster_id, cluster_info in self._catalog_dictionary.items()\n if cluster_info['center_sep'].to(u.arcmin) > max_image_catalog_sep]\n for cluster_id in large_sep_clusters:\n self._catalog_dictionary.pop(cluster_id, None)\n\n # If there are any duplicate matches in the sample remaining we need to remove the match that is the poorer\n # match. We will only keep the closest matches.\n match_info = Table(rows=[[cluster['SPT_cat_idx'], cluster['center_sep'], cluster_id]\n for cluster_id, cluster in self._catalog_dictionary.items()],\n names=['SPT_cat_idx', 'center_sep', 'cluster_id'])\n\n # Sort the table by the catalog index.\n match_info.sort(['SPT_cat_idx', 'center_sep'])\n\n # Use Astropy's unique function to remove the duplicate rows. Because the table rows will be subsorted by the\n # separation column we only need to keep the first incidence of the catalog index as our best match.\n match_info = unique(match_info, keys='SPT_cat_idx', keep='first')\n\n # Remove the duplicate clusters\n duplicate_clusters = set(match_info['cluster_id']).symmetric_difference(self._catalog_dictionary.keys())\n for cluster_id in duplicate_clusters:\n self._catalog_dictionary.pop(cluster_id, None)",
"def _exit_slits(self, hdr):\n # Does not exist separately in OpenMIMS, part of ApSecondaryNano and AnalysisParam\n d = {}\n # Each detector exit slit has:\n # - a position (0, 1, 2)\n # - a size (normal, large, xl)\n # The exit slits widths (and heights) are a 3x5 matrix where\n # coordinate (size, pos) returns actual width (height). positions 4\n # and 5 are 0 (for future expansion?) Size XL not stored in same part\n # of header, and only in analysis version >= 5, so we return a list of\n # length 5 with 0s here. Slits 0, 1, 2 are called slit 1, slit 2,\n # slit 3, so add labels to avoid confusion.\n\n d['exit slit'], d['exit slit size'] = \\\n unpack(self._bo + '2i', hdr.read(8))\n d['exit slit label'] = _exit_slit_labels.get(d['exit slit'], str(d['exit slit']))\n d['exit slit size label'] = _exit_slit_size_labels.get(d['exit slit size'], str(d['exit slit size']))\n\n w0 = tuple(unpack(self._bo + '5i', hdr.read(20)))\n w1 = tuple(unpack(self._bo + '5i', hdr.read(20)))\n w2 = (0, 0, 0, 0, 0)\n d['exit slit widths'] = (w0, w1, w2)\n h0 = tuple(unpack(self._bo + '5i', hdr.read(20)))\n h1 = tuple(unpack(self._bo + '5i', hdr.read(20)))\n h2 = (0, 0, 0, 0, 0)\n d['exit slit heights'] = (h0, h1, h2)\n return d",
"def make_square_mask(img, size, xy_center=None, angle=None):\n offset = 2 # from center\n xcen, ycen = img.shape[0] // 2, img.shape[1] // 2\n if xy_center is None: # use the middle of the image\n y, x = np.unravel_index(np.argmax(img), img.shape)\n xy_center = [x, y]\n # check if near edge\n if np.any([abs(x - xcen) > offset, abs(y - ycen) > offset]):\n print(\"Brightest star detected is far from the center.\")\n print(\"Aperture mask is placed at the center instead.\\n\")\n xy_center = [xcen, ycen]\n mask = np.zeros_like(img, dtype=bool)\n mask[ycen - size : ycen + size + 1, xcen - size : xcen + size + 1] = True\n # if angle:\n # #rotate mask\n # mask = rotate(mask, angle, axes=(1, 0), reshape=True, output=bool, order=0)\n return mask",
"def draw_sec(self, frame, i_sec):\n if self in self.supermarket.customers: \n row_i = self.path_row_col[i_sec,0]\n col_i = self.path_row_col[i_sec,1]\n if self.supermarketmap.contents[row_i][col_i] == '.':\n x = col_i * constants.TILE_SIZE\n y = row_i * constants.TILE_SIZE\n frame[y:y+constants.TILE_SIZE, x:x+constants.TILE_SIZE] = self.avatar\n # to do : avoide overlapping customer",
"def find_nearest_los_seat(seats, occupied_seats, i, j, dx, dy):\n n_rows, n_cols = occupied_seats.shape\n while True:\n i += dx\n j += dy\n if i < 0 or j < 0 or i >= n_rows or j >= n_cols:\n return 0\n if (i, j) in seats:\n return occupied_seats[i, j]",
"def check_central_star(all_images,x_star0,y_star0,all_titles,all_filt,Dx=100,Dy=50):\n index=0\n \n x_star = []\n y_star = []\n \n for image in all_images:\n x0=int(x_star0[index])\n y0=int(y_star0[index])\n \n old_x0=x0-(x0-Dx)\n old_y0=y0-(y0-Dy)\n \n sub_image=np.copy(image[y0-Dy:y0+Dy,x0-Dx:x0+Dx])\n NX=sub_image.shape[1]\n NY=sub_image.shape[0]\n \n profile_X=np.sum(sub_image,axis=0)\n profile_Y=np.sum(sub_image,axis=1)\n X_=np.arange(NX)\n Y_=np.arange(NY)\n \n profile_X_max=np.max(profile_X)*1.2\n profile_Y_max=np.max(profile_Y)*1.2\n \n avX,sigX=weighted_avg_and_std(X_,profile_X**4) ### better if weight squared\n avY,sigY=weighted_avg_and_std(Y_,profile_Y**4) ### really avoid plateau contribution\n #print index,'\\t',avX,avY,'\\t',sigX,sigY\n \n f, (ax1, ax2,ax3) = plt.subplots(1,3, figsize=(20,4))\n\n ax1.imshow(sub_image,origin='lower',vmin=0,vmax=10000,cmap='rainbow')\n ax1.plot([avX],[avY],'ko')\n ax1.grid(True)\n ax1.set_xlabel('X - pixel')\n ax1.set_ylabel('Y - pixel')\n \n ax2.plot(X_,profile_X,'r-',lw=2)\n ax2.plot([old_x0,old_x0],[0,profile_X_max],'y-',label='old',lw=2)\n ax2.plot([avX,avX],[0,profile_X_max],'b-',label='new',lw=2)\n \n \n ax2.grid(True)\n ax2.set_xlabel('X - pixel')\n ax2.legend(loc=1)\n \n ax3.plot(Y_,profile_Y,'r-',lw=2)\n ax3.plot([old_y0,old_y0],[0,profile_Y_max],'y-',label='old',lw=2)\n ax3.plot([avY,avY],[0,profile_Y_max],'b-',label='new',lw=2)\n \n ax3.grid(True)\n ax3.set_xlabel('Y - pixel')\n ax3.legend(loc=1)\n \n \n thetitle=\"{} : {} , {} \".format(index,all_titles[index],all_filt[index])\n f.suptitle(thetitle, fontsize=16)\n \n theX=x0-Dx+avX\n theY=y0-Dy+avY\n \n x_star.append(theX)\n y_star.append(theY)\n \n \n index+=1\n \n x_star=np.array(x_star)\n y_star=np.array(y_star)\n \n return x_star,y_star",
"def mark_star(self, star_id):\n\n ra, dec = self.db.get_star(star_id)[2:4]\n kwargs = dict(layer = self.MARKERS_LAYER,\n edgecolor = '#24ff29',\n s = self.MARK_RADIUS)\n self.aplpy_plot.show_markers(ra, dec, **kwargs)\n self.navig.home()\n\n self.selected_star_id = star_id\n self.goto_button.set_sensitive(True)",
"def stick(screen, color, x, y, n, size, angle):\n a = []\n b = 1\n if x[0] < y[0]:\n b = -1\n if angle < 0:\n b = b * (-1)\n for k in range(1, 101):\n a.append((x[0] + k / 100 * (y[0] - x[0]), x[1] + ((k / 100) ** (2 ** b)) * (y[1] - x[1])))\n lines(screen, color, False, a, width=2)\n for k in range(0, n):\n ell(screen, color, x[0] + (y[0] - x[0]) * (0.5 * abs((b - 1)) / 2 + k / n / 2),\n x[1] + (y[1] - x[1]) * (0.5 * abs((b - 1)) / 2 + k / n / 2), size, angle)",
"def simbad_brightstars(image_file=\"../nro_maps/12CO_20161002_FOREST-BEARS_spheroidal_xyb_grid7.5_0.099kms.fits\",\n brighter_than='G0', extra_criteria=\"(ra < 84.4 | dec < -6.66)\", otypes=\"Star\",\n replace_ra='hourangle', replace_dec='deg', add_sptype_letter_column=True,\n output=None, output_format='fits'):\n try:\n wcs = WCS(image_file).celestial #Drop non-celestial axes (like velocity and stokes). \n except:\n raise(\"image_file must be a fits image or cube with wcs in header.\")\n\n footprint = wcs.calc_footprint()\n\n \n ### ra_min/max, dec_min/max need to be in degrees.\n ### In the fits headers I have they are, but this may not always be true.\n ###\n ra_min, ra_max = footprint[:,0].min(), footprint[:,0].max()\n dec_min, dec_max = footprint[:,1].min(), footprint[:,1].max()\n\n s = Simbad()\n s.add_votable_fields('sptype')\n\n if extra_criteria:\n stars = s.query_criteria(\"ra > {} & ra < {} & dec > {} & dec < {} & sptypes < {} & {}\".format(\n ra_min, ra_max, dec_min, dec_max, brighter_than, extra_criteria), otypes=\"Star\")\n else:\n stars = s.query_criteria(\"ra > {} & ra < {} & dec > {} & dec < {} & sptypes < {}\".format(\n ra_min, ra_max, dec_min, dec_max, brighter_than), otypes=\"Star\")\n\n stars_coord = coord.SkyCoord(stars['RA'], stars['DEC'], unit=(u.hourangle, u.deg))\n\n if replace_ra:\n stars.replace_column('RA', Column(stars_coord.ra, name='RA', unit=replace_ra))\n if replace_dec:\n stars.replace_column('DEC', Column(stars_coord.dec, name='DEC', unit=replace_dec))\n\n if add_sptype_letter_column:\n stars.add_column(Column([sptype[0] for sptype in stars['SP_TYPE'].astype('str')], name='SP_LETTER', unit='str'))\n\n if output:\n stars.write(output, format=output_format)##\n else:\n return stars",
"def spiral_search():\n #spiral inward to outward making a larger circle each pass (currently squares)\n #------------check the RSSI readings as it spins------------------\n #replace max rssi with new largest and record degrees coordinates\n rssi_max = -120\n max_x = 0\n max_y = 0\n\n count = 0\n while (count < 5):\n move(ccw_msg)\n time.sleep((.1+count))\n move(up_ccw_msg)\n time.sleep((.05+count))\n move(up_msg)\n time.sleep((.05+count))\n move(up_cw_msg)\n time.sleep((.05+count))\n move(cw_msg)\n time.sleep(2*(.1+count))\n move(down_cw_msg)\n time.sleep((.05*count))\n move(down_msg)\n time.sleep(2*(.05+(.05*count)))\n move(down_ccw_msg)\n time.sleep(.05*count)\n count+=1\n #this method isn't really ideal with using timer to determine movement length",
"def ls_sr_band_correction(self,\n img):\n return \\\n ee.Algorithms.If(\n ee.String(img.get('SATELLITE')).compareTo('LANDSAT_8'),\n ee.Algorithms.If(ee.String(img.get('SATELLITE')).compareTo('LANDSAT_5'),\n ee.Image(img.select(['B1', 'B2', 'B3', 'B4', 'B5', 'B7', 'pixel_qa', 'radsat_qa'],\n ['BLUE', 'GREEN', 'RED', 'NIR', 'SWIR1', 'SWIR2', 'PIXEL_QA',\n 'RADSAT_QA'])\n .int16()\n .copyProperties(img)\n .copyProperties(img,\n ['system:time_start',\n 'system:time_end',\n 'system:index',\n 'system:footprint'])),\n ee.Algorithms.If(ee.Number(int(self.auto_ls5_correction)),\n ee.Image(EEHelper.ls5_sr_corr(img)),\n ee.Image(img.select(\n ['B1', 'B2', 'B3', 'B4', 'B5', 'B7', 'pixel_qa', 'radsat_qa'],\n ['BLUE', 'GREEN', 'RED', 'NIR', 'SWIR1', 'SWIR2', 'PIXEL_QA',\n 'RADSAT_QA'])\n .int16()\n .copyProperties(img)\n .copyProperties(img,\n ['system:time_start',\n 'system:time_end',\n 'system:index',\n 'system:footprint']))\n )\n ),\n ee.Algorithms.If(ee.Number(int(self.auto_ls8_correction)),\n ee.Image(EEHelper.ls8_sr_corr(img)),\n ee.Image(img.select(['B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'pixel_qa', 'radsat_qa'],\n ['BLUE', 'GREEN', 'RED', 'NIR', 'SWIR1', 'SWIR2', 'PIXEL_QA',\n 'RADSAT_QA'])\n .int16()\n .copyProperties(img)\n .copyProperties(img,\n ['system:time_start',\n 'system:time_end',\n 'system:index',\n 'system:footprint']))\n )\n )",
"def board_star(self, board_star_id):\r\n return BoardStar(self, board_star_id)"
] |
[
"0.65125364",
"0.5271238",
"0.5177924",
"0.5146718",
"0.5063657",
"0.49875027",
"0.49333033",
"0.47619992",
"0.47554895",
"0.47422874",
"0.4738101",
"0.47289777",
"0.46929607",
"0.4634357",
"0.46069604",
"0.45719075",
"0.45473403",
"0.453219",
"0.45096353",
"0.45056877",
"0.44996634",
"0.4495331",
"0.44804582",
"0.4477237",
"0.44701472",
"0.44665512",
"0.44454998",
"0.44357777",
"0.44284457",
"0.44182044"
] |
0.77866113
|
0
|
Recursively merge new into base.
|
def merge_dict(base, new, extend=True):
if isinstance(new, dict):
for key, value in new.items():
if key not in base:
base[key] = value
elif extend and isinstance(value, dict):
base[key] = merge_dict(
base=base.get(key, {}), new=value, extend=extend
)
elif extend and isinstance(value, list):
base[key].extend(value)
elif extend and isinstance(value, (tuple, set)):
if isinstance(base.get(key), tuple):
base[key] += tuple(value)
elif isinstance(base.get(key), list):
base[key].extend(list(value))
else:
base[key] = new[key]
elif isinstance(new, list):
if extend:
base.extend(new)
else:
base = new
return base
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def merge(self):\n collapsable = self.findSameSubtrees()\n\n dummy = GraphiusNode(-1, None)\n for i, node in self.nodes.items():\n dummy.addNeighbor(node)\n\n # Perform the merge\n self.mergeHelper(dummy, collapsable)\n\n # Regenerate trees\n newNodes = self.dfs(dummy)\n assert(-1 not in newNodes)\n self.nodes = newNodes",
"def merge_trees(data, new_data):\n for key, val in new_data.items():\n if isinstance(val, dict):\n if key not in data:\n data[key] = new_data[key]\n else:\n merge_trees(data[key], new_data[key])\n\n else:\n if key not in data:\n data[key] = val\n else:\n data[key] = data[key] + val",
"def mergeWith(self, others):",
"def merge(target, source):\n for key, value in source.items():\n if key not in target:\n target[key] = value\n elif type(target[key]) is dict:\n if key in self.OVERRIDE_ON_EXTENDS:\n target[key].update(value)\n else:\n merge(target[key], value)\n elif type(target[key]) is list:\n target[key] += value\n return target",
"def merge_nodes(self, parent, child):\n parent.key += child.key\n parent.real = child.real\n parent.value = child.value\n parent.children = child.children",
"def dict_merge(base, upd, inplace=False):\n assert quacks_like_dict(base), quacks_like_dict(upd)\n dst = base if inplace else deepcopy(base)\n\n stack = [(dst, upd)]\n while stack:\n current_dst, current_src = stack.pop()\n for key in current_src:\n if key not in current_dst:\n current_dst[key] = current_src[key]\n else:\n if quacks_like_dict(current_src[key]) and quacks_like_dict(current_dst[key]) :\n stack.append((current_dst[key], current_src[key]))\n else:\n current_dst[key] = current_src[key]\n return dst",
"def merge(self, other):\n\n for child in other.children:\n self.add_deep_copy_of(child, merged=True)",
"def merge(): #Status: WIP\r\n pass",
"def _merge_accumulator(acc, new_acc):\n if not acc:\n return new_acc\n\n if not new_acc:\n new_acc.append(acc[0])\n return _merge_accumulator(acc[1:], new_acc)\n\n elif acc[0][0]['level'] == new_acc[-1][-1]['level']:\n new_acc[-1] += acc[0]\n return _merge_accumulator(acc[1:], new_acc)\n\n else:\n new_acc.append(acc[0])\n return _merge_accumulator(acc[1:], new_acc)",
"def merge(self, a, b, path=None):\n if path is None: path = []\n for key in b:\n if key in a:\n if isinstance(a[key], dict) and isinstance(b[key], dict):\n if key == 'attributes':\n self.merge_attribute_defs(b, a)\n else:\n self.merge(a[key], b[key], path + [str(key)])\n elif a[key] == b[key]:\n pass # same leaf value\n else:\n # raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))\n self.append_or_replace(a,b,key, '/'.join(path + [str(key)]));\n else:\n a[key] = b[key]\n return a",
"def overwrite_dict(dict_base, dict_new, base_path=None):\n assert isinstance(dict_new, dict)\n for k in dict_new:\n # Add the current key to the path\n k_path = str(k) if base_path is None else f'{base_path}.{str(k)}'\n # Make sure that the key in the new dictionary matches one from the base dictionary\n assert k in dict_base, f'Could not find path {k_path} in the base dictionary'\n # Check that the types match between the base dictionary entry and the new one\n if dict_base[k] is not None:\n assert isinstance(type(dict_base[k]), type(dict_new[k])), \\\n 'The types at {} in the base dictionary do not match (expected {}, got {})'.format(\n k_path, str(type(dict_base[k])), str(type(dict_new[k])))\n # Recursively replace dictionary entries\n if isinstance(dict_base[k], dict):\n overwrite_dict(dict_base[k], dict_new[k], k_path)\n else:\n # Simply copy over leaf entries\n dict_base[k] = dict_new[k]",
"def merge_from(self, other):\n assert not self.is_final\n if self.parent is not None:\n assert other.parent is not None\n self.parent.merge_from(other.parent)\n self.isolated_names.update(other.isolated_names)\n self.read.update(other.read)\n self.modified.update(other.modified)\n self.bound.update(other.bound)\n self.deleted.update(other.deleted)\n self.annotations.update(other.annotations)\n self.params.update(other.params)",
"def merge(t1, t2):\n if t2 is None:\n return t1\n if t1 is None:\n return t2\n\n t1 = _splay(_find_max(t1))\n t1.right = t2\n t2.parent = t1\n return t1",
"def _merge(self):\n raise NotImplementedError",
"def merge_dict_recursive(target, src):\r\n for k in src.keys():\r\n if ((k in target and isinstance(target[k], dict) and\r\n isinstance(src[k], collections.Mapping))):\r\n merge_dict_recursive(target[k], src[k])\r\n else:\r\n target[k] = src[k]",
"def merge(a, b, path=None, update=True):\n # print(\"\\nMerging: a=\" + str(a) + \" b=\" + str(b) + \" path=\" + str(path) )\n if path is None:\n path = []\n for key in b:\n if key in a:\n if isinstance(a[key], dict) and isinstance(b[key], dict):\n merge(a[key], b[key], path + [str(key)])\n elif a[key] == b[key]:\n pass # same leaf value\n elif isinstance(a[key], list) and isinstance(b[key], list):\n for idx, val in enumerate(b[key]):\n a[key][idx] = merge(a[key][idx],\n b[key][idx],\n path + [str(key), str(idx)],\n update=update)\n elif update:\n a[key] = b[key]\n else:\n raise Exception('Conflict at %s' %\n '.'.join(path + [str(key)]))\n else:\n a[key] = b[key]\n return a",
"def merge(self, other_btree):\n pass",
"def deep_merge(source, dest):\n for key, value in source.iteritems():\n if key in dest:\n if isinstance(value, dict) and isinstance(dest[key], dict):\n deep_merge(value, dest[key])\n continue\n elif isinstance(value, list) and isinstance(dest[key], list):\n for item in value:\n if item not in dest[key]:\n dest[key].append(item)\n continue\n dest[key] = value",
"def merge_values(src, new):\n if isinstance(src, dict) and isinstance(new, dict):\n return merge_dicts(src, new)\n else:\n if not isinstance(src, list):\n src = [src]\n if not isinstance(new, list):\n new = [new]\n\n return merge_lists(src, new)",
"def mergeNodes(new, t1, t2):\n \n if t1 and t2:\n new.val = t1.val + t2.val\n elif not t1:\n new.val = t2.val\n elif not t2:\n new.val = t1.val",
"def _merge_new_into_all_paths(self):\n self.all_source_paths.update(self.new_source_paths)",
"def merge(source, destination):\n for key, value in source.items():\n if isinstance(value, dict):\n # get node or create one\n node = destination.setdefault(key, {})\n merge(value, node)\n else:\n destination[key] = value\n\n return destination",
"def deep_merge(a, b, path=None, append=False):\n for key in b:\n if key in a:\n if isinstance(a[key], dict) and isinstance(b[key], dict):\n deep_merge(a[key], b[key], (path or []) + [str(key)], append)\n continue\n is_lists = isinstance(a[key], list) and isinstance(b[key], list)\n if is_lists and append:\n a[key] += b[key]\n else:\n a[key] = b[key]\n else:\n a[key] = b[key]\n return a",
"def merge_dicts(base, changes):\n for k, v in changes.items():\n if isinstance(v, dict):\n merge_dicts(base.setdefault(k, {}), v)\n else:\n base.setdefault(k, v)",
"def merge_fields(d, new):\n if not new:\n return\n\n for k, v in new.iteritems():\n if k not in d:\n d[k] = v\n elif isinstance(v, list) and isinstance(d[k], list):\n d[k].extend(v)\n elif isinstance(v, dict) and isinstance(d[k], dict):\n d[k].update(v)\n else:\n d[k] = v",
"def merge(incoming={}, output={}, overwrite=False):\n\t_output = output.copy()\n\tfor _key, _value in incoming.items(): # loop through each key/value pair\n\t\tif (_key in _output) and isinstance(_value, dict): # detect when we need to recurse\n\t\t\t_output[_key] = merge(_value, _output[_key]) # recurse\n\t\telse: # _key is not in output\n\t\t\tif _key in _output and overwrite == False: # we check if it already exists, and if we care\n\t\t\t\tcontinue # don't overwrite existing values unless overwrite is 'True'\n\t\t\t_output[_key] = _value # add key/value pair\n\n\treturn _output # give back the merged dict",
"def test_recursive_merge(self):\n dict_1 = {\n 'key': {\n 'deep_key_1': 'original_value_1',\n 'deep_key_2': 'original_value_2'\n }\n }\n dict_2 = {\n 'key': {\n 'deep_key_2': 'new_value_2',\n 'deep_key_3': 'new_value_3'\n }\n }\n\n result = deep_dict_merge(dict_1, dict_2)\n\n assert dict_1 == {\n 'key': {\n 'deep_key_1': 'original_value_1',\n 'deep_key_2': 'original_value_2'\n }\n }\n assert dict_2 == {\n 'key': {\n 'deep_key_2': 'new_value_2',\n 'deep_key_3': 'new_value_3'\n }\n }\n assert result == {\n 'key': {\n 'deep_key_1': 'original_value_1',\n 'deep_key_2': 'new_value_2',\n 'deep_key_3': 'new_value_3'\n }\n }",
"def deep_merge(d1, d2):\n for k, v in d1.copy().items():\n if k in d2:\n if all(isinstance(e, MutableMapping) for e in (v, d2[k])):\n d2[k] = ConfigManager.deep_merge(v, d2[k])\n\n if k == \"*\":\n for _k, _v in d2.items():\n if all(isinstance(e, MutableMapping) for e in (v, d2[_k])):\n d2[_k] = ConfigManager.deep_merge(v, d2[_k])\n del d1[k]\n d3 = d1.copy()\n d3.update(d2)\n return d3",
"def merge_node(start):\n if start.next == None:\n return start\n\n mid = count(start) // 2\n\n # scan to the middle\n scanner = start\n for i in range(0, mid-1):\n scanner = scanner.next\n\n # set mid node right after the scan point\n mid_node = scanner.next\n # break at the mid point\n scanner.next = None\n mid_node.prev = None\n\n merged_left = merge_node(start)\n merged_right = merge_node(mid_node)\n\n return merge(merged_left, merged_right)",
"def mergeWith(self, newFL):\n srcMods = self.srcMods\n for levls, newLevls in ((self.levcs,newFL.levcs),(self.levis,newFL.levis)):\n for listId, newLevl in newLevls.items():\n if listId not in srcMods: \n srcMods[listId] = [newFL.fileInfo.name]\n levl = levls[listId] = copy.deepcopy(newLevl)\n self.records.append(levl)\n else:\n srcMods[listId].append(newFL.fileInfo.name)\n levls[listId].mergeWith(newLevl)"
] |
[
"0.6242751",
"0.61828357",
"0.59670585",
"0.59168214",
"0.5873907",
"0.57541686",
"0.57490855",
"0.5668129",
"0.56648463",
"0.5660273",
"0.5635293",
"0.5633576",
"0.56290585",
"0.560243",
"0.558158",
"0.55591655",
"0.5555937",
"0.55533767",
"0.5547018",
"0.549763",
"0.5479188",
"0.54753923",
"0.54597116",
"0.54327106",
"0.54097867",
"0.5384413",
"0.5381769",
"0.53781116",
"0.53564495",
"0.5343193"
] |
0.6269359
|
0
|
Return the SHA256 sum of a given object. The object used for generating a SHA256 must be JSON compatible.
|
def object_sha256(obj):
return hashlib.sha256(json.dumps(obj).encode()).hexdigest()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def hash(obj):\n \n import hashlib\n import pickle\n \n sha = hashlib.sha256()\n sha.update(pickle.dumps(obj))\n \n return sha.hexdigest()",
"def SHA256(self) -> _n_0_t_3[_n_0_t_9]:",
"def checksum(item):\n return hashlib.sha256(obj_to_str(item).encode('utf-8')).hexdigest()",
"def compute_hash(self):\n block_string = json.dumps(self.__dict__, sort_keys=True)\n return sha256(block_string.encode()).hexdigest()",
"def compute_hash(self):\n block_string = json.dumps(self.__dict__, sort_keys=True)\n return sha256(block_string.encode()).hexdigest()",
"def object_sha1(obj):\n\n return hashlib.sha1(json.dumps(obj).encode()).hexdigest()",
"def sha256(data):\n\n d = rpki.POW.Digest(rpki.POW.SHA256_DIGEST)\n d.update(data)\n return d.digest()",
"def sha256Sum(self, data):\n data = str(data)\n m = hashlib.sha256()\n if os.path.isfile(data):\n try:\n f = file(data, 'rb')\n except:\n return 'ERROR: unable to open %s' % data\n while True:\n d = f.read(8096)\n if not d:\n break\n m.update(d)\n f.close()\n # Otherwise it could be either 1) a directory 2) miscellaneous data (like json)\n else:\n m.update(data)\n return m.hexdigest()",
"def hash_data(obj):\n collect = sha1()\n for text in bytes_iter(obj):\n if isinstance(text, six.text_type):\n text = text.encode('utf-8')\n collect.update(text)\n return collect.hexdigest()",
"def compute_hash(block):\n block_string = json.dumps(self.__dict__, sort_keys= True)\n return sha256(block_string.encode()).hexdigest()",
"def checksum(*objects):\n hasher = hashlib.md5()\n _checksum(hasher, objects)\n return hasher.hexdigest()",
"def sha256(cls, value):\n assert type(value) is str\n return int(sha256(value.encode()).hexdigest(), 16)",
"def sha256sum(filename):\n content = open(filename, 'rb').read()\n sha256_obj = hashlib.sha256(content)\n return sha256_obj.hexdigest()",
"def digest(o):\n ser = serialize(o)\n return _truncated_digest(ser.encode(enc)).decode(enc)",
"def sha3_256(x):\n return hashlib.sha3_256(x).digest()",
"def checksum(payload):\n return hashlib.sha256(hashlib.sha256(payload).digest()).digest()[:4]",
"def compute_hash(self) -> str:\r\n #block_dict = self.__dict__.pop('hash', None) # Remove hash field value before calculating hash\r\n block_dict = self.__dict__.copy()\r\n block_dict.pop('hash', None) # Remove hash field value before calculating hash\r\n block_string = json.dumps(block_dict, sort_keys=True).encode('utf-8')\r\n return sha256(block_string).hexdigest()",
"def object_hash(obj):\n try:\n code = obj.__code__.co_code\n except AttributeError:\n attrlist = [getattr(obj, name) for name in dir(obj)\n if not name.startswith('__')]\n codelist = [attr.__code__.co_code for attr in attrlist\n if hasattr(attr, '__code__')]\n code = b','.join(codelist)\n digest = hashlib.md5(code).hexdigest()\n return digest",
"def sha256(content):\n content = content.encode('utf-8')\n return hashlib.sha256(content).hexdigest()",
"def do_hash(dat: typing.Any) -> str:\n return hashlib.sha1(json.dumps(dat, sort_keys=True).encode('utf-8')).hexdigest()",
"def compute_hash(self):\n '''\n s = \"\"\n s += str(self.index)\n for i in range(len(self.transactions)):\n s += self.transactions[i]\n s += str(self.timestamp)\n s += self.previous_hash\n s += str(self.nonce)\n\n s_json = json.dumps(s)\n x = sha256()\n x.update(s_json.encode())\n h = x.hexdigest()\n return h\n '''\n\n block_string = json.dumps(self.__dict__, sort_keys=True)\n return sha256(block_string.encode()).hexdigest()",
"def Sha256(data: Union[bytes, str]) -> bytes:\n return hashlib.sha256(AlgoUtils.Encode(data)).digest()",
"def convert_to_SHA256(x):\n result = hashlib.sha256(x.encode())\n result = result.hexdigest()\n return result",
"def convert_to_SHA256(x):\n result = hashlib.sha256(x.encode())\n result = result.hexdigest()\n return result",
"def sha256(value):\n return hashlib.sha256(value).hexdigest()",
"def hash(block):\n\n # We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()",
"def hash(block):\n\n # We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()",
"def hash(block):\n\n # We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()",
"def hash(block):\n\n # We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()",
"def hash(block):\n\n # We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()"
] |
[
"0.7034035",
"0.6582454",
"0.6568041",
"0.65337396",
"0.65337396",
"0.6494412",
"0.6442198",
"0.63768536",
"0.62840503",
"0.6225997",
"0.62037086",
"0.6184975",
"0.61726046",
"0.6153254",
"0.613591",
"0.613288",
"0.6131862",
"0.612951",
"0.6064961",
"0.60636646",
"0.6060272",
"0.6060036",
"0.6055269",
"0.6055269",
"0.60480183",
"0.60476154",
"0.60476154",
"0.60476154",
"0.60476154",
"0.60476154"
] |
0.80336756
|
0
|
Return the SHA1 sum of a given object. The object used for generating a SHA1 must be JSON compatible.
|
def object_sha1(obj):
return hashlib.sha1(json.dumps(obj).encode()).hexdigest()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _sha1_hash_json(self, value):\n hash = hashlib.new(\"sha1\")\n binary_value = value.encode(\"ascii\")\n hash.update(binary_value)\n sha1_res = hash.hexdigest()\n return sha1_res",
"def sha1(data):\n\n d = rpki.POW.Digest(rpki.POW.SHA1_DIGEST)\n d.update(data)\n return d.digest()",
"def sha1(self) -> str:\n return self.data.sha1",
"def checksum(*objects):\n hasher = hashlib.md5()\n _checksum(hasher, objects)\n return hasher.hexdigest()",
"def SHA1(self) -> _n_0_t_3[_n_0_t_9]:",
"def object_sha256(obj):\n\n return hashlib.sha256(json.dumps(obj).encode()).hexdigest()",
"def do_hash(dat: typing.Any) -> str:\n return hashlib.sha1(json.dumps(dat, sort_keys=True).encode('utf-8')).hexdigest()",
"def _sha1(self):\n return hashlib.sha1(self._blob).hexdigest()",
"def sha1(self, s):\n\t\tself.sha1_calls += 1\n\t\treturn int(hashlib.sha1(s).hexdigest(), 16)",
"def checksum(item):\n return hashlib.sha256(obj_to_str(item).encode('utf-8')).hexdigest()",
"def hexdigest(jsonable):\n string = json.dumps(jsonable, sort_keys=True).encode()\n return hashlib.sha1(string).hexdigest()",
"def calculate_hash(stuff):\n\tsha1 = hashlib.sha1()\n\tsha1.update(stuff)\n\treturn sha1.hexdigest()",
"def hash(obj):\n \n import hashlib\n import pickle\n \n sha = hashlib.sha256()\n sha.update(pickle.dumps(obj))\n \n return sha.hexdigest()",
"def hash_data(obj):\n collect = sha1()\n for text in bytes_iter(obj):\n if isinstance(text, six.text_type):\n text = text.encode('utf-8')\n collect.update(text)\n return collect.hexdigest()",
"def hash(self):\n return hashlib.sha1(str(self._dict))",
"def hash_obj(self, obj):\r\n md5er = hashlib.md5()\r\n update_hash(md5er, obj)\r\n return md5er.hexdigest()",
"def _calc_sha1(path):\n calc = hashlib.sha1()\n with open(path, 'r') as f:\n calc.update(f.read())\n return calc.hexdigest()",
"def dict_hash(obj, start=''):\n h = hashlib.sha1(to_bytes(start))\n h.update(to_bytes(obj.__class__.__name__))\n if isinstance(obj, dict):\n for key, value in sorted(obj.items()):\n h.update(to_bytes(key))\n h.update(to_bytes(dict_hash(value)))\n elif isinstance(obj, (list, tuple)):\n for el in obj:\n h.update(to_bytes(dict_hash(el)))\n else:\n # basic types\n if isinstance(obj, bool):\n value = str(int(obj))\n elif isinstance(obj, (six.integer_types, float)):\n value = str(obj)\n elif isinstance(obj, (six.text_type, bytes)):\n value = obj\n elif obj is None:\n value = b''\n else:\n raise ValueError(\"Unsupported value type: %s\" % obj.__class__)\n h.update(to_bytes(value))\n return h.hexdigest()",
"def sha1(self):\n return self.tag(\"sha1\")",
"def checksum_from_sha1(value):\n # More constrained regex at lexer level\n CHECKSUM_RE = re.compile('SHA1:\\\\s*([\\\\S]+)', re.UNICODE)\n match = CHECKSUM_RE.match(value)\n if match:\n return checksum.Algorithm(identifier='SHA1', value=match.group(1))\n else:\n return None",
"def __get_hashstr(_config_object: dict):\n hashobj = hashlib.md5()\n json_str = json.dumps(_config_object, sort_keys=True).encode('utf-8')\n hashobj.update(json_str)\n dig = hashobj.hexdigest()\n return dig\n # return hashobj.update(json.dumps(_config_object, sort_keys=True).encode('utf-8')).hexdigest()",
"def digest(o):\n ser = serialize(o)\n return _truncated_digest(ser.encode(enc)).decode(enc)",
"def _get_checksum(self, text):\n # Compute the new checksum over everything but the sha1sum line.\n # This will fail if sha1sum appears for some other reason. It won't ;-)\n text = \"\".join([line for line in text.splitlines(True) if \"sha1sum\" not in line])\n return utils.str_checksum(text)",
"def hash_simple_obj_to_hex(obj):\n\n hash_ = sha256()\n try:\n update_hash(hash_, obj)\n except ValueError as e:\n raise ValueError(\"%s (full object was %r)\" % (e, obj))\n return hash_.hexdigest()",
"def calc_hash(self, record: dict) -> str:\n return sha1(orjson.dumps(record, option=orjson.OPT_SORT_KEYS)).hexdigest()",
"def get_checksum(str):\n hash_object = hashlib.sha1(b'%s' % str)\n hex_dig = hash_object.hexdigest()\n return hex_dig",
"def hex_sha1_of_bytes(data: bytes) -> Sha1HexDigest:\n return Sha1HexDigest(hashlib.sha1(data).hexdigest())",
"def get_report_hash(self, consolidated):\n jsonstr = json.dumps(consolidated, sort_keys=True)\n hashobj = hashlib.sha1(jsonstr)\n hexval = hashobj.hexdigest()\n return hexval",
"def sha1hex(doc):\n doc_id = doc.pop('_id',None)\n doc_rev = doc.get('_rev',None)\n doc_string = str(doc)\n\n if doc_id is not None:\n doc['_id'] = doc_id\n\n if doc_rev is not None:\n doc['_rev'] = doc_rev\n\n return hashlib.sha1(doc_string).hexdigest().upper()",
"def sign_rsa_sha1(client, request):\n base_string = generate_signature_base_string(request)\n return rsa_sha1_signature(base_string, client.rsa_key)"
] |
[
"0.72167873",
"0.66763383",
"0.66553843",
"0.655533",
"0.6483608",
"0.6467267",
"0.6432472",
"0.6383441",
"0.62883264",
"0.628797",
"0.62626314",
"0.6250575",
"0.62494814",
"0.61954755",
"0.6177797",
"0.6159965",
"0.6142139",
"0.6118138",
"0.611029",
"0.5966402",
"0.59195644",
"0.59135175",
"0.5871034",
"0.5824055",
"0.58105296",
"0.58014613",
"0.58013785",
"0.57776546",
"0.5757048",
"0.57206815"
] |
0.8017245
|
0
|
Constructor. iReader is the IndexReader object on which the search should be performed
|
def __init__(self, iReader):
self.ireader = iReader
self.num_of_doc = iReader.getNumberOfDocuments()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __init__(self, iReader):\n self.__index_reader = iReader",
"def __init__ (self, input) :\r\n ReaderA.__init__(self) # call parent\r\n # print '************************* input = ', input, type(input)\r\n self.buffer_ = input # this is any thing that can be indexed\r\n self.current_ = 0",
"def __init__(self, index, document_id, client=None):\n self.index = index\n self.document_id = document_id\n self.client = client or current_search_client",
"def __init__(self):\n\n self.reader = reader.Reader()",
"def init_index(self):\n raise NotImplementedError",
"def __init__(self):\n super().__init__()\n self.index_dir = self.base_dir + \"user/\"\n self.index_schema = self.__get_index_schema()\n if not os.path.exists(self.index_dir):\n os.makedirs(self.index_dir)\n self.indexer = index.create_in(self.index_dir, self.index_schema) # creates the index\n else:\n self.indexer = index.open_dir(self.index_dir) # opens the index if it already exists",
"def __init__(self):\n self.inverted_index = OrderedDict({})\n self.i=0\n self.unique_doc_ids=set()",
"def __init__(self, genomeReader=None):\n self.genomeReader = genomeReader",
"def __init__(self):\n super().__init__()\n self.index_dir = self.base_dir + \"ticket/\"\n self.index_schema = self.__get_index_schema()\n if not os.path.exists(self.index_dir):\n os.makedirs(self.index_dir)\n self.indexer = index.create_in(self.index_dir, self.index_schema)\n else:\n self.indexer = index.open_dir(self.index_dir)",
"def __init__(self):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n NexusReaderBase.__init__(self, -1)\n self.taxa = None\n self._data_matrices = None",
"def __init__(self, tokenizer=simple_tokenize):\n # Set tokenizer to use for tokenizing new documents\n self.tokenize = tokenizer\n # The term document matrix is a sparse matrix represented as a\n # list of dictionaries. Each dictionary contains the word\n # counts for a document.\n self.sparse = []\n # Keep track of the number of documents containing the word.\n self.doc_count = {}",
"def __init__(self, index):\n self._index = index",
"def __init__(self, index):\n self.index = index",
"def __init__(self, index):\n self.index = index",
"def build(self):\n\t\tself.documents = self.get_items_to_index()\n\t\tself.build_index()",
"def __init__(self, using=None, index=None, doc_type=None, extra=None):\n self._using = using\n\n self._index = None\n if isinstance(index, (tuple, list)):\n self._index = list(index)\n elif index:\n self._index = [index]\n\n self._doc_type = None\n if isinstance(doc_type, (tuple, list)):\n self._doc_type = list(doc_type)\n elif doc_type:\n self._doc_type = [doc_type]\n\n self.query = ProxyQuery(self, 'query')\n self.filter = ProxyFilter(self, 'filter')\n self.post_filter = ProxyFilter(self, 'post_filter')\n self.aggs = AggsProxy(self)\n self._sort = []\n self._extra = extra or {}\n self._params = {}",
"def __init__(self, input=None):\r\n BaseAnalyzer.__init__(self, input)",
"def __init__(self, input=None):\r\n BaseAnalyzer.__init__(self, input)",
"def __init__(self, driver, output_folder, search_parameters):\n self.driver = driver\n self.search_results = SearchResults(output_folder, search_parameters)\n\n self.version = search_parameters[\"version\"]\n self.region = search_parameters[\"community\"]\n self.province = search_parameters[\"province\"]\n self.entity_type = search_parameters[\"entity_type\"]\n self.name = search_parameters[\"name\"]\n self.cif = search_parameters[\"cif\"]\n\n self.do_search()",
"def __init__(self, index=None):\n self.index = index or {}",
"def __init__(self):\r\n\t\t\r\n\t\tself.redis = redis.Redis()\r\n\t\tself.info_to_get = ['text', 'created_at', 'user']\r\n\t\tself.search_results = {}\r\n\t\tself.raw_data_directory_name = \"raw_mining_data\"\r\n\t\tself.filtered_data_directory_name = \"filtered_mining_data\"\r\n\t\tenglish_file = pjoin( sys.path[0], \"sentiment_word_files\", \"Nielsen2010Responsible_english.csv\")\r\n\t\tself.analyzeEnglish = dict(map(lambda (w,e): (w, int(e)), \\\r\n\t\t\t\t\t\t\t\t\t[ line.strip().lower().split('\\t') for line in open(english_file) ]))\r\n\t\tself.tweets_count = 0",
"def init_index(self, index_name):\n return Index(self, index_name)",
"def __init__(self,file_reader):\n self.file_reader = file_reader",
"def __init__(self, name, size, resource_storage):\n if not (name and resource_storage and size):\n raise IndexWriterError(\n f\"Either ResourceStorage: {resource_storage} or name: {name} or size:\"\n \"{size} not provided.\")\n\n self._name = name\n self._index_size = size\n self._fout = resource_storage.get(f'{self._name}_index', False)",
"def _init_index(self):\n\n if self._check_idx:\n self._index = bamnostic.bai.Bai(self._index_path)\n self.__nocoordinate = self._index.n_no_coor\n self.__mapped = sum(self._index.unmapped[mapped].n_mapped for mapped in self._index.unmapped) + self.nocoordinate\n self.__unmapped = sum(self._index.unmapped[unmapped].n_unmapped for unmapped in self._index.unmapped) + self.nocoordinate",
"def read_idx(self, searchString=None):\n assert self.idx is not None, f\"No index file for {self.grib}.\"\n \n # Open the idx file\n r = requests.get(self.idx)\n assert r.ok, f\"Index file does not exist: {self.idx}\" \n\n read_idx = r.text.split('\\n')[:-1] # last line is empty\n df = pd.DataFrame([i.split(':') for i in read_idx], \n columns=['grib_message', 'start_byte', \n 'reference_time', 'variable', \n 'level', 'forecast_time', 'none'])\n\n # Format the DataFrame\n df['grib_message'] = df['grib_message'].astype(int)\n df['reference_time'] = pd.to_datetime(df.reference_time, format='d=%Y%m%d%H')\n df['valid_time'] = df['reference_time'] + pd.to_timedelta(f\"{self.fxx}H\")\n df['start_byte'] = df['start_byte'].astype(int)\n df['end_byte'] = df['start_byte'].shift(-1, fill_value='')\n df['range'] = df.start_byte.astype(str) + '-' + df.end_byte.astype(str)\n df = df.drop(columns='none')\n df = df.set_index('grib_message')\n df = df.reindex(columns=['start_byte', 'end_byte', 'range', \n 'reference_time', 'valid_time', \n 'variable', 'level', 'forecast_time'])\n df.attrs = dict(\n source=self.idx_source, \n description='Index (.idx) file for the GRIB2 file.', \n model=self.model, \n field=self.field, \n lead_time=self.fxx, \n datetime=self.date\n )\n\n # Filter DataFrame by searchString\n if searchString not in [None, ':']:\n columns_to_search = df[['variable', 'level', 'forecast_time']].apply(lambda x: ':'.join(x), axis=1)\n logic = columns_to_search.str.contains(searchString)\n if logic.sum() == 0:\n print(f\"No GRIB messages found. There might be something wrong with {searchString=}\")\n print(_searchString_help(searchString))\n df = df.loc[logic]\n return df",
"def __init__(self, start_index: int, tag: str):\n self.start_index = start_index\n self.limit = 10\n self.tag = tag.lower()",
"def __init__(self):\n\t\tself.index = {\"items\" : {}, \"authorities\" : []}\n\t\tself.indexed = {}\n\t\tself.field = \"scopeAndContent\"\n\t\tself.debug = False\n\t\tself.ids = {}\n\t\tself.threshold = (1, 1218)\n\t\t\n\t\tself.outputNodes = \"auth-nodes.csv\"\n\t\tself.outputEdges = \"auth-edges.csv\"",
"def __init__(self, args, parsers):\n self.parsers = parsers\n self.search_fields = args.search_field if args.search_field else []",
"def __init__(self,index):\n self.index=index"
] |
[
"0.8396916",
"0.650846",
"0.6117651",
"0.610951",
"0.5918099",
"0.58073515",
"0.57342076",
"0.57264924",
"0.5707911",
"0.56755626",
"0.56056577",
"0.56042516",
"0.5564753",
"0.5564753",
"0.55574864",
"0.55394095",
"0.54976076",
"0.54976076",
"0.5472114",
"0.54673475",
"0.545125",
"0.54297316",
"0.54267704",
"0.54266614",
"0.542526",
"0.54066414",
"0.5403819",
"0.539966",
"0.53981465",
"0.53792435"
] |
0.71863604
|
1
|
Sets the nucleus_security_id of this MdHistoryRequestCO.
|
def nucleus_security_id(self, nucleus_security_id):
self._nucleus_security_id = nucleus_security_id
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def security_user_id(self, security_user_id):\n\n self._security_user_id = security_user_id",
"def security_user_id(self, security_user_id):\n\n self._security_user_id = security_user_id",
"def security_identities(self, security_identities):\n\n self._security_identities = security_identities",
"def set_id(self, ssc_id):\r\n self.ssc_id = ssc_id",
"def srs_id(self, srs_id):\n self.logger.debug(\"In 'srs_id' setter.\")\n\n if len(srs_id) < 3:\n raise Exception(\"SRS ID is too short, must be more than 3 characters.\")\n\n self._srs_id = srs_id",
"def security(self, security):\n\n self._security = security",
"def security_hash(self, security_hash):\n if security_hash is not None and len(security_hash) < 1:\n raise ValueError(\"Invalid value for `security_hash`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._security_hash = security_hash",
"def sso_id(self, sso_id):\n\n self._sso_id = sso_id",
"def security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"security_group_id\")",
"def security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"security_group_id\")",
"def set_student_id(self, student_id):\n self._student_id = student_id",
"def set_id(self, uid):\n self.nccl_id = uid\n return self.nccl_id",
"def set_network_id(self, sNetworkId):\n\t\tcall_sdk_function('PrlVirtNet_SetNetworkId', self.handle, sNetworkId)",
"def security_policy_num(self, security_policy_num):\n\n self._security_policy_num = security_policy_num",
"def security_group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"security_group_id\")",
"def security_group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"security_group_id\")",
"def created_by_security_user_id(self, created_by_security_user_id):\n\n self._created_by_security_user_id = created_by_security_user_id",
"def set_id_number(self, id_number):\n self.id_number = id_number",
"def security_groups(self, security_groups):\n\n self._security_groups = security_groups",
"def source_group_id(self, source_group_id):\n\n self._source_group_id = source_group_id",
"def security(self, security: ModelDeploymentSecurityConfig):\n\n self._security = security",
"def trace_id(self, trace_id):\n\n self._trace_id = trace_id",
"def trace_id(self, trace_id):\n\n self._trace_id = trace_id",
"def trace_id_set(trace_id: tuple[str, str]) -> None:\n trace_id_cv.set(trace_id)",
"def set_domain_sid(self, sid):\n dsdb._samdb_set_domain_sid(self, sid)",
"def thread_id(self, thread_id):\n\n self._thread_id = thread_id",
"def thread_id(self, thread_id):\n\n self._thread_id = thread_id",
"def thread_id(self, thread_id):\n\n self._thread_id = thread_id",
"def security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"security_group_id\")",
"def security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"security_group_id\")"
] |
[
"0.6567464",
"0.6567464",
"0.52219766",
"0.51711905",
"0.5101832",
"0.5018672",
"0.5015538",
"0.4997469",
"0.49538937",
"0.49538937",
"0.4811074",
"0.4785215",
"0.4778403",
"0.4775735",
"0.47065136",
"0.47065136",
"0.46921664",
"0.46891037",
"0.46483314",
"0.45931828",
"0.45859134",
"0.45701373",
"0.45701373",
"0.45678264",
"0.45333526",
"0.45233306",
"0.45233306",
"0.45233306",
"0.44722682",
"0.44722682"
] |
0.7827064
|
0
|
Perform experiment on bivariate dataset generated from 3 Gaussians.
|
def perform_bivariate_3_gaussians_exp(N, pp, mu_1, mu_2, mu_3,
sigma_1, sigma_2, sigma_3,
truncation_bounds, censoring_bounds,
max_iteration=50, seed=100):
# Fix the random state
random.seed(seed)
np.random.seed(seed)
# Stack and reorder the means and covariance matrices into unified matrices
mu = np.stack([mu_1, mu_2, mu_3], axis=0)
sigma = np.stack([sigma_1, sigma_2, sigma_3], axis=2)
pp, mu, sigma = reorder_gmm_compoments(pp, mu, sigma)
K = mu.shape[0]
# Generate GMM data
print("Step #1: Generating a Gaussian-Mixture-Model dataset")
print("True parameters:")
print("pp: \n{}\n".format(pp))
print("mu: \n{}\n".format(mu))
print("sigma: \n{}\n".format(sigma.T))
y = generate_gmm_data(pp, mu, sigma, N)
# Plot the GMM data
plt.figure()
ax = plot_gmm_data(y, mu, sigma, point_color='black')
plt.title("The Original Data Generated by Three Gaussian Components")
print("\n" + "*"*80)
# Perform censoring and truncation on the original data
print("Step #2: Censoring and truncating the data")
x = censor_and_truncate_data(y)
# Plot the censored and truncated data
plt.figure()
ax = plot_gmm_data(y, mu, sigma, point_color='red')
plt.title("Truncated and Censored Data")
print("\n" + "*"*80)
# Init parameters using K-means
print("Step #3: Initializing parameters using K-means")
par = init_kmeans(x, K)
print("\n" + "*"*80)
# Estimating parameters using truncated and censored EM
print("Step #4: Estimating parameters using truncated and censored EM")
tc_em_results = perform_truncated_em(x, K,
truncation_bounds, censoring_bounds,
par['pp'], par['mu'], par['sigma'],
max_iteration)
print("Estimated parameters by standard EM:")
print("pp: \n{}\n".format(tc_em_results['pp']))
print("mu: \n{}\n".format(tc_em_results['mu']))
print("sigma: \n{}\n".format(tc_em_results['sigma'].T))
plt.figure()
plt.plot(range(len(tc_em_results['ll_hist'])), tc_em_results['ll_hist'])
plt.title("Learning Curve of the Truncated and Censored EM")
plt.xlabel("Iteration")
plt.ylabel("Log-likelihood")
plt.figure()
ax = plot_gmm_data(x, tc_em_results['mu'], tc_em_results['sigma'])
plt.title("Truncated and Censored EM")
print("\n" + "*"*80)
print("Step #5: Estimating parameters using standard EM")
std_em_results = perform_standard_em(x, K, seed)
print("Estimated parameters by standard EM:")
print("pp: \n{}\n".format(std_em_results['pp']))
print("mu: \n{}\n".format(std_em_results['mu']))
print("sigma: \n{}\n".format(std_em_results['sigma'].T))
plt.figure()
ax = plot_gmm_data(x, std_em_results['mu'], std_em_results['sigma'])
plt.title("Standard EM")
print("\n" + "*"*80)
# Evaluate the KL-Divergence between true distribution and estimated
# distributions
print("Step #6: Evaluating the estimated parameters")
pp, mu, sigma = reorder_gmm_compoments(pp, mu, sigma)
tc_em_results['pp'], tc_em_results['mu'], tc_em_results['sigma'] =\
reorder_gmm_compoments(tc_em_results['pp'], tc_em_results['mu'],
tc_em_results['sigma'])
std_em_results['pp'], std_em_results['mu'], std_em_results['sigma'] =\
reorder_gmm_compoments(std_em_results['pp'], std_em_results['mu'],
std_em_results['sigma'])
true_gmm = build_GMM_model(pp, mu, sigma, seed)
tc_gmm = build_GMM_model(tc_em_results['pp'], tc_em_results['mu'],
tc_em_results['sigma'], seed)
std_gmm = build_GMM_model(std_em_results['pp'], std_em_results['mu'],
std_em_results['sigma'], seed)
tc_kl = estimate_kl_divergence_gmm(true_gmm, tc_gmm)
std_kl = estimate_kl_divergence_gmm(true_gmm, std_gmm)
print("\t* KL-Divergence corresponding to truncated and censored EM: {}".\
format(tc_kl))
print("\t* KL-Divergence corresponding to standard EM: {}".\
format(std_kl))
print("\n" + "*"*80)
# Show the plots
print("Step #7: Showing the plots")
plt.show()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def gauss3(x,a1,c1,w1,a2,c2,w2,a3,c3,w3):\n return gaussian(x,a1,c1,w1)+gaussian(x,a2,c2,w2)+gaussian(x,a3,c3,w3)",
"def make_synthetic_experiment(sample_data, model, make_gibbs_fn, explicit_ub=None):\n rng_key = random.PRNGKey(0)\n\n # Sampling parameters\n n_values = [100, 1000, 10000]\n\n # DPMM/PYMM parameters\n T = 20 # max number of component in the truncated stick breaking representation\n t = np.arange(T + 1)\n alpha = 1\n sigma = 0\n\n # Plotting parameters\n fig, (ax0, ax1) = plt.subplots(1, 2)\n\n priors = compute_PY_prior(alpha, sigma, n_values)\n for Npoints, prior in zip(n_values, priors):\n cluster_count = np.zeros(T + 1) # cluster count histogram\n upper_bound = np.zeros(T + 1)\n \n cluster_size = np.zeros(Npoints+1)\n\n # Repeat the experiment \n for _ in range(REPEATS):\n data = sample_data(rng_key, Npoints)\n z = sample_posterior(rng_key, model, data, N_SAMPLES,\n T=T, alpha=1,\n gibbs_fn=make_gibbs_fn(data) if USE_GIBBS else None,\n gibbs_sites=['z'] if USE_GIBBS else None,\n )\n\n cluster_count += compute_n_clusters_distribution(z, T)\n if explicit_ub is not None:\n upper_bound += explicit_ub(data, t, params_PY=(alpha, sigma))\n\n cluster_size += compute_cluster_size_distribution(z)\n\n cluster_count /= REPEATS\n cluster_size /= REPEATS\n\n # Plot cluster count histograms (ax0)\n ax0.plot(t, cluster_count, label=f\"N={Npoints}\")\n\n color = ax0.lines[-1].get_color()\n ax0.plot(t, prior[:T+1], label=f\"Prior N={Npoints}\", color=color, linestyle='dashed', lw=1)\n\n if explicit_ub is not None:\n upper_bound /= REPEATS\n ax0.plot(t[1:], upper_bound[1:], label=f\"Upper bound N={Npoints}\", color=color, linestyle='dotted', lw=1)\n\n # Plot cluster size histograms (ax1)\n bins = np.linspace(0, 1, 10, endpoint=True)\n frac = np.arange(0, Npoints + 1) / Npoints\n\n # TODO : use an actual histogram ?\n # Overlaying histograms doesn't really look good.\n hist, edges = np.histogram(frac, bins, density=True, weights=cluster_size)\n ax1.plot(0.5 * (edges[1:] + edges[:-1]), hist, color=color, label=f\"N={Npoints}\")\n\n ax0.axhline(y=1, color='black', linewidth=0.3, linestyle='dotted')\n ax0.set(title=r\"Number of clusters\", xlabel=\"$t$\", ylabel=r\"$P(T_n=t|X_{1:N})$\")\n ax0.legend()\n\n ax1.set(xlabel=\"Fraction of total size\", title=\"Size of clusters\")\n ax1.legend()\n\n plt.show()",
"def main():\n # Model setup\n source = np.array([1500, 8, 10, 5]) # assume source concentration and 3D coordinates\n u, pg_stability = 2, 'F' # setup environment\n sample_path = r\"data/ObservedData.csv\"\n # Build model object\n func = GaussianPlumeEAAI(lower=(10, -500, -500, 0), upper=(5000, 500, 500, 10), u=u,\n pg_stability=pg_stability, sample_path=sample_path)\n # Generate sample observed data\n func.generate_observed_data(source[0], source[1], source[2], source[3])\n\n # Reverse search source use observed data and PSO (assume unknown the source)\n pso_search_with_recommended_param(func)\n pso_search_with_optimized_param(func)",
"def experiment2(trials=100):\n scatter(*transpose([map(lambda xs:mean(map(log,xs)),experiment2_()) for i in trange(trials)]))",
"def experiment1():\n scatter(*transpose([map(lambda xs:mean(map(log,xs)),experiment1_()) for i in trange(100)]))",
"def main(DATASET='campbell', N_AGE_MIX=1):\n files = glob(f'resources/SN*_{DATASET}_chain.tsv')\n N_SNE = len(files)\n # end = -11 - len(DATASET)\n # get the numbers after the SN.\n snids = map(lambda x: re.search('(?<=SN)\\d*', x).group(0), files)\n snids = list(map(int, snids))\n\n\n model = GaussianMixture(N_AGE_MIX)\n amplitudes = np.zeros((N_SNE, N_AGE_MIX))\n means = np.zeros((N_SNE, N_AGE_MIX))\n stds = np.zeros((N_SNE, N_AGE_MIX))\n\n print(f'Fitting ages to {N_AGE_MIX} Gaussians')\n pdf = PdfPages(f'resources/age_{DATASET}_{N_AGE_MIX}gaus_representation_preview.pdf')\n\n for i, f in enumerate(files):\n data = np.genfromtxt(f, delimiter='\\t')\n data = data[:, 7]\n\n model.fit(np.expand_dims(data, 1))\n\n amplitudes[i] = model.weights_.reshape(N_AGE_MIX)\n means[i] = model.means_.reshape(N_AGE_MIX)\n stds[i] = np.sqrt(model.covariances_).reshape(N_AGE_MIX)\n\n plt.figure()\n plt.hist(data, bins=np.linspace(-5, 20, 200))\n plt.hist(model.sample(1020000)[0], alpha=0.5, bins=np.linspace(-5, 20, 200))\n plt.title(f)\n \n pdf.savefig()\n plt.close()\n\n if (i+1)%10 == 0:\n print(f'Finished with the {i+1}th age fit')\n\n pdf.close()\n\n # if DATASET != 'both':\n ages = np.column_stack((snids, amplitudes, means, stds))\n # todo update the header to match the number of Gaussians used.\n np.savetxt(f'resources/age_{DATASET}_{N_AGE_MIX}gaus_representation.csv', ages, delimiter=',',\n header='sn id, amp_1, amp_2, amp_3, mean_1, mean_2, mean_2, std_1, std_2, std_3')\n \n print(f'Done with {N_AGE_MIX} Gaussian mixture for {DATASET}.')",
"def prob3():\n#raise NotImplementedError(\"Problem 3 Incomplete\")\n n = np.linspace(5000,500000,100)\n h = lambda x : x > 10\n MC_estimates = []\n for N in xrange(5000,505000,5000):\n X = np.random.gamma(9,scale=0.5,size=N)\n MC = 1./N*np.sum(h(X))\n MC_estimates.append(MC)\n MC_estimates = np.array(MC_estimates)\n MC_est = prob2()\n exact = np.array([1-stats.gamma(a=9,scale=0.5).cdf(10)]*n.shape[0])\n print n.shape,MC_estimates.shape\n plt.plot(n,abs(MC_estimates-exact),'r-',label=\"Monte Carlo\")\n plt.plot(n,abs(MC_est-exact),'b-',label=\"Importance\")\n plt.title(\"Error or Approx.\")\n plt.legend(loc=1)\n plt.show()",
"def test_GA():\n\tgenerationSize = 150\n\tmutationProb = 0.01\n\tgenerations = 500\n\tX = []\n\tT = []\n\tY = [] \n\tfitnesses = [0]*generationSize\n\tfor i in range(DATA_POINTS_NUM):\n\t\tX.append((i - DATA_POINTS_NUM/2)*0.1)\n\t\tT.append(polynomi_3N(REFERENCE, X[-1]))\n\t\tY.append(0)\n\t\n\tga = GA.GA(generationSize, 4, mutationProb)\n\tgenomes = ga.seedGenomes()\n\t#plot initial genomes\n\tplt.figure(1)\n\tplt.title('Initial genomes')\n\tfor i in range(len(genomes)):\n\t\tGenome = prescale(genomes[i])\n\t\tprint Genome\n\t\tfor j in range(DATA_POINTS_NUM):\n\t\t\tY[j] = (polynomi_3N(Genome, X[j]))\n\t\tfitnesses[i] = calculate_fitness(T, Y)\n\t\tplt.plot(X,Y, 'b-')\n\tplt.plot(X,T, 'r-')\n\t\t\n\t\n\t#live and learn\n\tfor k in range(generations):\n\t\tprint \".\",\n\t\tfor i in range(len(genomes)):\n\t\t\tGenome = prescale(genomes[i])\n\t\t\tfor j in range(DATA_POINTS_NUM):\n\t\t\t\tY[j] = (polynomi_3N(Genome,X[j]))\n\t\t\tfitnesses[i] = calculate_fitness(T, Y)\n\t\tga.fitnessUpdate(fitnesses)\n\t\tgenomes = ga.createNextGeneration()\n\t\t\n\t#plot final genomes\n\tplt.figure(2)\n\tplt.title('Final genomes')\n\tprint \"\\nfinal Genomes\"\n\tfor i in range(len(genomes)):\n\t\tGenome = prescale(genomes[i])\n\t\tfor j in range(DATA_POINTS_NUM):\n\t\t\tY[j] = (polynomi_3N(Genome,X[j]))\n\t\tprint \"fit:%5.1f [%7.4f, %7.4f, %7.4f, %7.4f]\"%\\\n\t\t (calculate_fitness(T, Y), Genome[0],\n\t\t Genome[1], Genome[2], Genome[3])\n\t\tplt.plot(X,Y, 'b-')\n\tplt.plot(X,T, 'r-')\n\t\t\n\t#plot progress\n\tP = []\n\thistory = ga.generations[:]\n\tfor f in history:\n\t\t#f[1].sort()\n\t\tP.append(max(f[1]))\n\tplt.figure(3)\n\tplt.title('progress')\n\tplt.plot(P)\n\tplt.show()\n\t\n\t#print the result:\t\n\tbestGene = fitnesses.index(max(fitnesses))\n\tG = prescale(genomes[bestGene])\n print \"\"\n\tprint \"And the result is:\"\n\tprint \"%.4f => %.4f (%.4f)\"%(A, G[0], abs(A - G[0]))\n\tprint \"%.4f => %.4f (%.4f)\"%(B, G[1], abs(B - G[1]))\n\tprint \"%.4f => %.4f (%.4f)\"%(C, G[2], abs(C - G[2]))\n\tprint \"%.4f => %.4f (%.4f)\"%(D, G[3], abs(D - G[3]))",
"def main():\n # Load data and pre-process it\n path = \"data2.csv\"\n features, labels, df = load_data(path)\n\n # Learning rates - including our own\n learning_rates = [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 0.75]\n\n # Num iterations\n num_iterations = [100 for i in range(9)] + [1000]\n\n # Keep track of all final weights for different learning rates\n lines = []\n\n # Compute weights for each learning rate\n for rate, num_iters in zip(learning_rates, num_iterations):\n\n # Get weights from gradient descent and add to weights list\n weights = gradient_descent(features, labels, rate, num_iters)\n lines.append([rate, num_iters] + weights)\n\n # Now write 'lines' to file\n with open('results2.csv', \"w\") as out_file:\n for line in lines:\n out_file.write(\"{}, {}, {}, {}, {} \\n\".format(line[0], line[1],\n line[2], line[3], line[4]))\n out_file.close()\n\n # Select which weights to use for plotting\n index = -1\n\n\n plot_db.visualize_3d(df, lin_reg_weights=lines[index][2:],\n feat1='norm_x1', feat2='norm_x2', labels='label',\n xlim=(-1, 1), ylim=(-1, 1), zlim=(0, 3),\n alpha=learning_rates[index], xlabel='age',\n ylabel='weight', zlabel='height',\n title='')",
"def executeMixtureOfGaussians(questionTitle, K, dataType, hasValid, numEpoch, learningRate):\n logStdOut(questionTitle)\n print questionTitle\n trainData = 0\n validData = 0\n # Load data with seeded randomization\n dataInitializer = DataInitializer()\n if hasValid:\n trainData, validData = dataInitializer.getData(dataType, hasValid)\n else: \n trainData = dataInitializer.getData(dataType, hasValid)\n\n # Execute algorithm \n kObject = MixtureOfGaussians(questionTitle, K, trainData, validData, hasValid, dataType, numEpoch, learningRate)\n logElapsedTime(questionTitle + \"K\" + str(K) + \"NumEpoch\" + str(numEpoch))",
"def main():\n\n\n ## Groups showing similar noise profile\n #grp1 = [ 1, 4, 5, 8, 9 ]\n #grp2 = [ 18, 19, 22, 23, 30, 31 ]\n grp1 = [ 0, 1, 6, 7, 4, 5 ]\n grp2 = [ 12, 13, 16, 17, 18, 19 ]\n #grp3 = [ 18, 19, 22, 23, 26, 27 ]\n with tb.open_file(sys.argv[1], 'r') as dataF:\n\n npm = len(dataF.root.Sensors.DataPMT)#len(dataF.root.RD.pmtrwf[0])\n nevt = len(dataF.root.RD.pmtrwf)\n\n ## Filter definition\n fSample = 40E6\n freqLPF = 100E3\n freqLPFd = 2*freqLPF / fSample\n b, a = signal.butter(1, freqLPFd, 'low', analog=False)\n ##\n fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(20,6))\n #fig.tight_layout()\n fig.show()\n wf_len = len(dataF.root.RD.pmtrwf[0][0])\n if len(sys.argv) > 3:\n wf_len = wf_len/2+1 \n elif len(sys.argv) == 3:\n g1_first = np.zeros(wf_len, np.float64)\n g2_first = np.zeros(wf_len, np.float64)\n g3_first = np.zeros(wf_len, np.float64)\n mean_first = np.zeros(wf_len, np.float64)\n ##\n for ievt in range(nevt):\n ## clear the axies\n for ax in axes.flatten():\n ax.cla()\n plt_frq = np.zeros(wf_len, np.float64)\n fwf_mean = np.zeros(wf_len, np.float64)\n wf_mean = np.zeros(wf_len, np.float64) # No filter\n g1_mean = np.zeros(wf_len, np.float64)\n g2_mean = np.zeros(wf_len, np.float64)\n g3_mean = np.zeros(wf_len, np.float64)\n for ipm in range(npm):\n\n sg = getWF(dataF, ipm, ievt)\n sg = sg - np.mean(sg)\n\n sgf = signal.lfilter(b, a, sg)\n ## remove mean again just in case\n sgf = sgf - np.mean(sgf)\n #sgf = sg\n\n pmID = getPMid(dataF, ipm)\n\n if len(sys.argv) == 3:\n axes[0][0].plot(sgf, label='pmt '+str(pmID))\n fwf_mean += sgf/npm\n wf_mean += sg/npm\n if pmID in grp1:\n g1_mean += sgf/len(grp1)\n elif pmID in grp2:\n g2_mean += sgf/len(grp2)\n elif pmID in grp3:\n g3_mean += sgf/len(grp3)\n else:\n ft = np.fft.rfft(sgf)\n freq = np.fft.rfftfreq(len(sgf), d=25E-9)\n if ipm == 0:\n plt_frq = freq\n if sys.argv[2] == 'mag':\n ft_mag = np.absolute(ft)\n axes[0][0].plot(freq, ft_mag, label='pmt '+str(pmID))\n fwf_mean += ft_mag/npm\n if pmID in grp1:\n g1_mean += ft_mag/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_mag/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_mag/len(grp3)\n elif sys.argv[2] == 'phase':\n ft_pha = np.angle(ft)\n axes[0][0].plot(freq, ft_pha, label='pmt '+str(pmID))\n fwf_mean += ft_pha/npm\n if pmID in grp1:\n g1_mean += ft_pha/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_pha/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_pha/len(grp3)\n \n \n ## The axes not set\n if len(sys.argv) == 3:\n axes[0][1].plot(g1_mean)\n axes[0][1].set_title('Group 1 mean waveform')\n axes[1][0].plot(g2_mean)\n axes[1][0].set_title('Group 2 mean waveform')\n axes[1][1].plot(g3_mean)\n axes[1][1].set_title('Group 3 mean waveform')\n axes[2][0].plot(fwf_mean)\n axes[2][0].set_title('Mean waveform')\n if ievt == 0:\n g1_first = g1_mean\n g2_first = g2_mean\n g3_first = g3_mean\n mean_first = fwf_mean\n else:\n axes[0][1].plot(g1_first)\n axes[1][0].plot(g2_first)\n axes[1][1].plot(g3_first)\n axes[2][0].plot(mean_first)\n axes[2][1].plot(wf_mean)\n axes[2][1].set_title('Mean waveform and corrected')\n axes[2][1].plot(wf_mean-fwf_mean)\n axes[2][1].set_xlim(0, 1000)\n else:\n axes[0][0].set_xlim(0,50000)\n axes[0][1].plot(plt_frq, g1_mean)\n axes[0][1].set_title('Group 1 mean '+sys.argv[2])\n axes[0][1].set_xlim(0,50000)\n axes[1][0].plot(plt_frq, g2_mean)\n axes[1][0].set_title('Group 2 mean '+sys.argv[2])\n axes[1][0].set_xlim(0,50000)\n axes[1][1].plot(plt_frq, g3_mean)\n axes[1][1].set_title('Group 3 mean '+sys.argv[2])\n axes[1][1].set_xlim(0,50000)\n axes[2][0].plot(plt_frq, fwf_mean)\n axes[2][0].set_title('Mean '+sys.argv[2])\n axes[2][0].set_xlim(0,50000)\n plt.draw()\n #fig.legend(loc=0)\n catcher = input(\"next plot?\")\n if catcher == 'q':\n exit()\n plt.cla()",
"def add_elec_bunch_gaussian( sim, sig_r, sig_z, n_emit, gamma0, sig_gamma,\n Q, N, tf=0., zf=0., boost=None,\n filter_currents=True, save_beam=None ):\n # Get Gaussian particle distribution in x,y,z\n x = np.random.normal(0., sig_r, N)\n y = np.random.normal(0., sig_r, N)\n z = np.random.normal(zf, sig_z, N) # with offset in z\n # Define sigma of ux and uy based on normalized emittance\n sig_ur = (n_emit/sig_r)\n # Get Gaussian distribution of transverse normalized momenta ux, uy\n ux = np.random.normal(0., sig_ur, N)\n uy = np.random.normal(0., sig_ur, N)\n # Now we imprint an energy spread on the gammas of each particle\n if sig_gamma > 0.:\n gamma = np.random.normal(gamma0, sig_gamma, N)\n else:\n # Or set it to zero\n gamma = np.full(N, gamma0)\n if sig_gamma < 0.:\n print(\"Warning: Negative energy spread sig_gamma detected.\"\n \" sig_gamma will be set to zero. \\n\")\n # Finally we calculate the uz of each particle\n # from the gamma and the transverse momenta ux, uy\n uz = np.sqrt((gamma**2-1) - ux**2 - uy**2)\n # Get inverse gamma\n inv_gamma = 1./gamma\n # Get weight of each particle\n w = -1. * Q / N * np.ones_like(x)\n\n # Propagate distribution to an out-of-focus position tf.\n # (without taking space charge effects into account)\n if tf != 0.:\n x = x - ux*inv_gamma*c*tf\n y = y - uy*inv_gamma*c*tf\n z = z - uz*inv_gamma*c*tf\n\n # Save beam distribution to an .npz file\n if save_beam is not None:\n np.savez(save_beam, x=x, y=y, z=z, ux=ux, uy=uy, uz=uz,\n inv_gamma=inv_gamma, w=w)\n\n # Add the electrons to the simulation\n add_elec_bunch_from_arrays( sim, x, y, z, ux, uy, uz, w,\n boost=boost, filter_currents=filter_currents )",
"def useThibautsData(log, output, bcgr=72.2, sigma=0.75, iterations=4, loc=1900, galaxies=1000,\n datadir='/Users/smn2/EUCLID/CTItesting/uniform/',\n thibautCDM03=False, beta=False, serial=1, parallel=1):\n files = g.glob(datadir + '*.fits')\n #pick randomly\n files = np.random.choice(files, galaxies, replace=False)\n\n #trap parameters: parallel\n if thibautCDM03:\n f1 = '/Users/smn2/EUCLID/vissim-python/data/cdm_thibaut_parallel.dat'\n f2 = '/Users/smn2/EUCLID/vissim-python/data/cdm_thibaut_serial.dat'\n params = ThibautsCDM03params()\n params.update(dict(parallelTrapfile=f1, serialTrapfile=f2, rdose=8.0e9, serial=serial, parallel=parallel))\n else:\n f1 = '/Users/smn2/EUCLID/vissim-python/data/cdm_euclid_parallel.dat'\n f2 = '/Users/smn2/EUCLID/vissim-python/data/cdm_euclid_serial.dat'\n params = MSSLCDM03params()\n params.update(dict(parallelTrapfile=f1, serialTrapfile=f2, rdose=8.0e9, serial=serial, parallel=parallel))\n if beta:\n params.update(dict(beta_p=0.6, beta_s=0.6))\n\n print f1, f2\n\n #store shapes\n eclean = []\n e1clean = []\n e2clean = []\n R2clean = []\n xclean = []\n yclean = []\n eCTI = []\n e1CTI = []\n e2CTI = []\n R2CTI = []\n xCTI = []\n yCTI = []\n eCTIfixed = []\n e1CTIfixed = []\n e2CTIfixed = []\n R2CTIfixed = []\n xCTIfixed = []\n yCTIfixed = []\n\n fh = open(output.replace('.pk', '.csv'), 'w')\n fh.write('#files: %s and %s\\n' % (f1, f2))\n for key in params:\n print key, params[key]\n fh.write('# %s = %s\\n' % (key, str(params[key])))\n fh.write('#file, delta_e, delta_e1, delta_e2, delta_R2, delta_x, delta_y\\n')\n for f in files:\n print 'Processing: ', f\n\n #load data\n nocti = pf.getdata(f)\n\n #scale to SNR about 10 (average galaxy, a single exposure)\n nocti /= np.sum(nocti)\n nocti *= 1500.\n\n #place it on canvas\n tmp = np.zeros((2066, 2048))\n ysize, xsize = nocti.shape\n ysize /= 2\n xsize /= 2\n tmp[loc-ysize:loc+ysize, loc-xsize:loc+xsize] = nocti.copy()\n\n #add background\n tmp += bcgr\n\n #run CDM03\n c = CTI.CDM03bidir(params, [])\n tmp = c.applyRadiationDamage(tmp.copy().transpose()).transpose()\n\n #remove background and make a cutout\n CTIdata = tmp[loc-ysize:loc+ysize, loc-xsize:loc+xsize]\n CTIdata -= bcgr\n CTIdata[CTIdata < 0.] = 0.\n\n #write files\n #fileIO.writeFITS(nocti, f.replace('.fits', 'noCTI.fits'), int=False)\n #fileIO.writeFITS(CTI, f.replace('.fits', 'CTI.fits'), int=False)\n\n #reset settings\n settings = dict(sigma=sigma, iterations=iterations)\n\n #calculate shapes\n sh = shape.shapeMeasurement(nocti.copy(), log, **settings)\n results = sh.measureRefinedEllipticity()\n\n eclean.append(results['ellipticity'])\n e1clean.append(results['e1'])\n e2clean.append(results['e2'])\n R2clean.append(results['R2'])\n xclean.append(results['centreX'])\n yclean.append(results['centreY'])\n\n #CTI, fitted centroid\n sh = shape.shapeMeasurement(CTIdata.copy(), log, **settings)\n results2 = sh.measureRefinedEllipticity()\n\n eCTI.append(results2['ellipticity'])\n e1CTI.append(results2['e1'])\n e2CTI.append(results2['e2'])\n R2CTI.append(results2['R2'])\n xCTI.append(results2['centreX'])\n yCTI.append(results2['centreY'])\n\n #fixed centroid\n settings['fixedPosition'] = True\n settings['fixedX'] = results['centreX']\n settings['fixedY'] = results['centreY']\n settings['iterations'] = 1\n sh = shape.shapeMeasurement(CTIdata.copy(), log, **settings)\n results3 = sh.measureRefinedEllipticity()\n\n eCTIfixed.append(results3['ellipticity'])\n e1CTIfixed.append(results3['e1'])\n e2CTIfixed.append(results3['e2'])\n R2CTIfixed.append(results3['R2'])\n xCTIfixed.append(results3['centreX'])\n yCTIfixed.append(results3['centreY'])\n\n text = '%s,%e,%e,%e,%e,%e,%e\\n' % (f, results['ellipticity'] - results2['ellipticity'],\n results['e1'] - results2['e1'], results['e2'] - results2['e2'],\n results['R2'] - results2['R2'],\n results['centreX'] - results2['centreX'],\n results['centreY'] - results2['centreY'])\n fh.write(text)\n print text\n\n fh.close()\n\n results = {'eclean': np.asarray(eclean),\n 'e1clean': np.asarray(e1clean),\n 'e2clean': np.asarray(e2clean),\n 'R2clean': np.asarray(R2clean),\n 'xclean': np.asarray(xclean),\n 'yclean': np.asarray(yclean),\n 'eCTI': np.asarray(eCTI),\n 'e1CTI': np.asarray(e1CTI),\n 'e2CTI': np.asarray(e2CTI),\n 'R2CTI': np.asarray(R2CTI),\n 'xCTI': np.asarray(xCTI),\n 'yCTI': np.asarray(yCTI),\n 'eCTIfixed': np.asarray(eCTIfixed),\n 'e1CTIfixed': np.asarray(e1CTIfixed),\n 'e2CTIfixed': np.asarray(e2CTIfixed),\n 'R2CTIfixed': np.asarray(R2CTIfixed),\n 'xCTIfixed': np.asarray(xCTIfixed),\n 'yCTIfixed': np.asarray(yCTIfixed)}\n\n #save to a file\n fileIO.cPickleDumpDictionary(results, output)\n\n return results",
"def run_test(d):\n\n ######### Problem Specification\n\n # Data generation parameters\n prior_mu_z = np.zeros(d, dtype=np.float32) # Prior mean\n prior_sigma_z = np.eye(d, dtype=np.float32) # Prior covariance matrix\n\n # True model parameters\n num_range = np.arange(-(d-1)/2, (d+1)/2, dtype=np.float32)\n\n t_delta = num_range / 5 \n\n if d == 1:\n t_sigma = np.ones(1)\n else: \n # Allow sigma to range from 0.1 to 1\n t_sigma = 36/(10*(d-1)**2) * num_range**2 + 0.1 \n\n ######### Variable Initialization\n\n # Initial model parameters - same across all methods\n init_delta = prior_mu_z.copy()\n init_log_sigma = 3 * np.ones(d)\n\n # Initial HVAE variational parameters\n init_T = 5.\n init_eps = 0.005 * np.ones(d)\n max_eps = params['max_eps'] * np.ones(d)\n init_logit_eps = np.log(init_eps/(max_eps - init_eps))\n init_log_T_0 = np.log(init_T - 1)\n\n # Initial NF variational parameters\n init_u_pre_reparam = scipy.stats.truncnorm.rvs(-2, 2, scale=0.1, size=d)\n init_w = scipy.stats.truncnorm.rvs(-2, 2, scale=0.1, size=d)\n init_b = 0.1\n\n # Initial VAE parameters\n init_mu_z = prior_mu_z.copy()\n init_log_sigma_z = np.ones(d)\n\n ######### Set up models\n\n HVAE_model_1 = HVAE(\n ['delta', 'log_sigma', 'logit_eps', 'log_T_0'],\n [init_delta, init_log_sigma, init_logit_eps, init_log_T_0], \n 'HVAE_1', d, params['HVAE_K_1'])\n HVAE_model_2 = HVAE(\n ['delta', 'log_sigma', 'logit_eps', 'log_T_0'],\n [init_delta, init_log_sigma, init_logit_eps, init_log_T_0], \n 'HVAE_2', d, params['HVAE_K_2'])\n\n HVAE_model_notemp_1 = HVAE(\n ['delta', 'log_sigma', 'logit_eps'],\n [init_delta, init_log_sigma, init_logit_eps], \n 'HVAE_notemp_1', d, params['HVAE_K_1'])\n HVAE_model_notemp_2 = HVAE(\n ['delta', 'log_sigma', 'logit_eps'], \n [init_delta, init_log_sigma, init_logit_eps],\n 'HVAE_notemp_2', d, params['HVAE_K_2'])\n\n NF_model_1 = NF(\n ['delta', 'log_sigma', 'u_pre_reparam', 'w', 'b'],\n [init_delta, init_log_sigma, init_u_pre_reparam, init_w, init_b],\n 'NF_1', d, params['NF_K_1'])\n NF_model_2 = NF(\n ['delta', 'log_sigma', 'u_pre_reparam', 'w', 'b'],\n [init_delta, init_log_sigma, init_u_pre_reparam, init_w, init_b],\n 'NF_2', d, params['NF_K_2'])\n\n VB_model = VB(['delta', 'log_sigma', 'mu_z', 'log_sigma_z'], \n [init_delta, init_log_sigma, init_mu_z, init_log_sigma_z], 'VB', d)\n\n model_list = [HVAE_model_1, HVAE_model_2, HVAE_model_notemp_1, \n HVAE_model_notemp_2, NF_model_1, NF_model_2, VB_model]\n \n ######### Generate Training Data & Save - One for each test\n\n train_data_list = []\n\n for i in range(params['n_tests']):\n z = np.random.multivariate_normal(prior_mu_z, prior_sigma_z)\n x = np.random.multivariate_normal(z + t_delta, np.diag(t_sigma**2), \n size=params['n_data'])\n train_data_list.append(x)\n\n # Folder should have already been created in the initializations\n data_path = os.path.join('save', str(d), 'train_data.p')\n pickle.dump(train_data_list, open(data_path, 'wb')) \n\n ######### Train models\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n # Store the final parameter values for all test runs in this dictionary\n final_params = {}\n\n for m in model_list:\n\n final_values = []\n\n for i in range(params['n_tests']):\n (delta, sigma) = m.train(sess, train_data_list[i], i)\n final_values.append((delta, sigma))\n\n final_params[m.model_name] = final_values.copy()\n\n ######### Test models using difference between parameters\n\n param_diffs = {}\n\n for m in model_list:\n\n diffs = []\n\n for i in range(params['n_tests']):\n delta = final_params[m.model_name][i][0]\n sigma = final_params[m.model_name][i][1]\n\n delta_diff = np.sum((delta - t_delta)**2)\n sigma_diff = np.sum((sigma - t_sigma)**2)\n\n diffs.append((delta_diff, sigma_diff))\n\n param_diffs[m.model_name] = diffs.copy()\n\n # Save parameter differences in a pickle file\n diff_path = os.path.join('save', str(d), 'all_diffs.p')\n pickle.dump(param_diffs, open(diff_path, 'wb'))",
"def E_step(X, pi, mu, sigma):\n N = X.shape[0] # number of objects\n C = pi.shape[0] # number of clusters\n d = mu.shape[1] # dimension of each object\n gamma = np.zeros((N, C)) # distribution q(T)\n\n ### YOUR CODE HERE\n # For all objects in dataset X\n for i in range(N):\n z = 0\n # Likelihood: P(x_i|t_i=c,theta) = N(x_i|mu_c, sigma_c²)\n # N(x_i|mu_c, sigma_c²) = (1/sqrt((2pi)^n*sigma_c_det)) * exp(-0.5*(x_i-mu_c).T*sigma_c⁻1*(x_i-mu_c))\n x_i = X[i]\n # For all clusters in mixture distribution\n for c in range(C):\n # parameters for cluster c\n pi_c = pi[c] # Prior prob. p(ti=c)\n mu_c = mu[c, :] # vector of means\n sigma_c = sigma[c, :] # covariance matrix\n # Covariance matrix determinant\n sigma_c_det = np.linalg.det(sigma_c)\n # Compute inverse as y = A⁻1*x (trick2)\n x = x_i - mu_c\n y = np.linalg.solve(sigma_c, x)\n exp = np.exp(-0.5*np.matmul(x, y))\n # Constant term\n norm_ct_c = pi_c / np.sqrt(sigma_c_det)\n # c component of q distribution for x_i\n gamma[i, c] = norm_ct_c * exp\n z += gamma[i, c]\n for c in range(C):\n gamma[i, c] /= z\n # # Normalize cluster distribution q(t_i=c): Softmax (trick1)\n # numerator = np.exp(gamma[i, :] - np.max(gamma[i, :]))\n # denominator = numerator.sum()\n # gamma[i, :] = numerator / denominator\n \n return gamma",
"def prob3():\n\n h = lambda x: x > 10\n\n N = range(5000,500001, 5000)\n\n estimates = []\n\n for n in N:\n random_draw = np.random.gamma(9, scale = 0.5, size = n)\n\n estimate = 1./n * np.sum(h(random_draw))\n estimates.append(estimate)\n\n # arrayify it\n estimates = np.array(estimates)\n\n m = 1 - stats.gamma(a = 9, scale = 0.5).cdf(10)\n \n y = abs(estimates - m)\n y_2 = abs(prob2() - m)\n\n plt.plot(N,y)\n plt.plot(N,y_2)\n\n plt.show()",
"def generate_training_data_3D():\n c11 = np.random.uniform(0.05, 1.50, 20)\n c12 = np.random.uniform(-1.50, 1.50, 20)\n c13 = np.random.uniform(-2.50, -0.05, 20)\n c21 = np.random.uniform(-1.50, -0.05, 20)\n c22 = np.random.uniform(-1.50, 1.50, 20)\n c23 = np.random.uniform(0.05, 2.50, 20)\n c1 = np.array([[i, j, k] for i, j, k in zip(c11, c12, c13)])\n c2 = np.array([[i, j, k] for i, j, k in zip(c21, c22, c23)])\n\n points = plt.figure()\n ax = points.add_subplot(111, projection='3d')\n ax.scatter(c1[:, 0], c1[:, 1], c1[:, 2], c='r', marker='^')\n ax.scatter(c2[:, 0], c2[:, 1], c2[:, 2], c='b', marker='*')\n plt.show()\n plt.close()\n\n return c1, c2",
"def run_test(dim=3):\n\n traces = []\n\n for smoothing in range(10, 101, 10):\n pencilbeams = []\n num_sight_lines = 100\n\n # Construct our pencilbeams\n for ix in range(0, num_sight_lines+1):\n # Make impact parameters covering the full\n # particle in x\n x = ix / (1. * num_sight_lines) * smoothing\n \n pencilbeams.append(\n dict(x=x, y=0),\n )\n\n results = []\n for pencilbeam in pencilbeams:\n result = testsph(h=smoothing, dim=dim, **pencilbeam)\n results.append(result)\n\n # Integrate the pencilbeam weightings to find the full SPH weighting\n # This is the plane x-z from origin along +ve x-axis (sitting at y=0)\n particle_integral = integrate.trapz([x for x in results], [x['x'] for x in pencilbeams])\n \n # \"All smoothing lengths should integrate to the same value \"\n\n # We've sampled a quadrant in x-y and integrated entirely along z, so mulitply by 4\n print particle_integral * 4.\n\n traces.append(go.Scatter(y=[x for x in results], x=[y['x'] for y in pencilbeams]))\n\n # The mass of a particle should be the area under each of these curves(?)\n plot(traces)",
"def hxb_data_experiment(directory):\n\n #######################################################################\n #LOADING DATA\n #######################################################################\n\n #Loading all files in the folder\n xyz_files = []\n blur_files = []\n\n #Spliting files in xyz coordinates and voxels data.\n files = os.listdir(directory)\n files.sort()\n for f in files:\n if os.path.isfile(os.path.join(directory, f)):\n if f.startswith('C'):\n xyz_files.append(f)\n if f.startswith('F'):\n blur_files.append(f)\n\n #Loading xyz data\n dict_xyz = {}\n for f in xyz_files:\n #The name of the subject is given by the four last letter.\n dict_xyz[f[1]] = np.genfromtxt(os.path.join(directory, f),\n dtype = float)\n print \"xyz_file for subject %s was loaded.\" %(f[1])\n\n #Loading voxels data and creating the intermediate representation objects\n inter_reps = []\n for f in blur_files:\n #Name of the subject is always in positions [7:11]\n s_name = f[1]\n #Class is in possition 5\n cls = str(f[3:-4])\n arr_voxels = np.genfromtxt(os.path.join(directory, f), dtype = float, delimiter = ' ')\n inter_reps.append(ir.IntermRep(arr_voxels, dict_xyz[s_name], s_name,\n cls))\n\n print \"Intermediate representation for subject %s and class %s created.\" %(s_name, cls)\n\n #######################################################################\n\n\n #######################################################################\n #Computing the Graph Encoding\n #######################################################################\n\n graphs = []\n classes = []\n subjects = []\n\n #Cluster based graph encoding with a fixed threshold\n #-----------------------------\n# fc = ge.GE_ClusterBased()\n# for i_rep in inter_reps:\n# graphs.append(fc.encode(i_rep, clust_alg = 'MiniBatchKMeans',\n# n_clusters = -1, clust_ratio = 170,\n# similarity_measure=\"pearson\",\n# threshold=0.1, n_jobs = 1))\n# classes.append(i_rep.cls)\n# subjects.append(i_rep.subj_name)\n# print \"Graph built for subject %s and class %s.\" %(i_rep.subj_name, i_rep.cls)\n# print \"Number of nodes: %i, number of edges: %i\" %(graphs[-1].number_of_nodes(),\n# graphs[-1].number_of_edges())\n# print \"\"\n #---------------------------\n\n #Cluster based graph encoding with percentage of completeness\n #-----------------------------\n# fc = gpc.GE_ClustBased_PercCompleteness()\n# for i_rep in inter_reps:\n# graphs.append(fc.encode(i_rep, clust_alg = 'MiniBatchKMeans',\n# n_clusters = -1, clust_ratio = 110,\n# similarity_measure=\"pearson\",\n# completeness=0.3, n_jobs = 1))\n# classes.append(i_rep.cls)\n# subjects.append(i_rep.subj_name)\n# print \"Graph built for subject %s and class %s.\" %(i_rep.subj_name, i_rep.cls)\n# print \"Number of nodes: %i, number of edges: %i\" %(graphs[-1].number_of_nodes(),\n# graphs[-1].number_of_edges())\n# print \"\"\n #---------------------------\n\n #Cluster based graph encoding with categories for node degree\n #-----------------------------\n# fc = gnd.GE_ClustBased_DiscNodeDegree()\n# for i_rep in inter_reps:\n# graphs.append(fc.encode(i_rep, clust_alg = 'MiniBatchKMeans',\n# n_clusters = -1, clust_ratio = 170,\n# similarity_measure=\"pearson\",\n# threshold=0.1, n_categ=10, n_jobs = 1))\n# classes.append(i_rep.cls)\n# subjects.append(i_rep.subj_name)\n# print \"Graph built for subject %s and class %s.\" %(i_rep.subj_name, i_rep.cls)\n# print \"Number of nodes: %i, number of edges: %i\" %(graphs[-1].number_of_nodes(),\n# graphs[-1].number_of_edges())\n# print \"\"\n #---------------------------\n\n\n #Weighted version of graph encoding with a fixed threshold\n #-----------------------------\n# fc = gwe.GE_FuncConn_WeightedEncoding()\n# for i_rep in inter_reps:\n# graphs.append(fc.encode(i_rep, clust_alg = 'MiniBatchKMeans',\n# n_clusters = -1, clust_ratio = 200,\n# similarity_measure=\"pearson\",\n# threshold=0.5, n_jobs = 1))\n# classes.append(i_rep.cls)\n# subjects.append(i_rep.subj_name)\n# print \"Graph built for subject %s and class %s.\" %(i_rep.subj_name, i_rep.cls)\n# print \"Number of nodes: %i, number of edges: %i\" %(graphs[-1].number_of_nodes(),\n# graphs[-1].number_of_edges())\n# print \"\"\n #---------------------------\n\n # Graph encoding based on Neirghboring connections and hierarchical clustering algorithm.\n #-----------------------------\n fc = gnc.GE_NeighConst_HCA()\n for i_rep in inter_reps:\n graphs.append(fc.encode(i_rep, clust_ratio=8, encoding='geometrical', threshold=0.2))\n classes.append(i_rep.cls)\n subjects.append(i_rep.subj_name)\n print \"Graph built for subject %s and class %s.\" %(i_rep.subj_name, i_rep.cls)\n print \"Number of nodes: %i, number of edges: %i\" %(graphs[-1].number_of_nodes(),\n graphs[-1].number_of_edges())\n print \"\"\n #---------------------------\n\n #######################################################################\n\n\n #######################################################################\n #Reordering data for the leave-one-subject-out cross-validation\n #######################################################################\n\n #Permutting elements for a further leave-two-out cv (leaving out\n #two samples corresponding to the same subject avoiding problems with\n #unbalanced data).\n# nm_graphs = [None] * len(graphs)\n# nm_classes = [None] * len(classes)\n# nm_subjects = [None] * len(subjects)\n#\n# for i in range(len(graphs) / 2):\n# nm_graphs[i*2] = graphs[i]\n# nm_graphs[i*2 + 1] = graphs[(len(graphs) / 2) + i]\n# nm_classes[i*2] = classes[i]\n# nm_classes[i*2 + 1] = classes[(len(classes) / 2) + i]\n# nm_subjects[i*2] = subjects[i]\n# nm_subjects[i*2 + 1] = subjects[(len(subjects) / 2) + i]\n#\n print subjects\n print classes\n\n\n# #Testing if I get chance level when I permutted the class label...\n# np.random.shuffle(nm_classes)\n# np.random.shuffle(nm_classes)\n# print nm_classes\n\n\n #######################################################################\n\n\n #######################################################################\n #Computing the Graph Kernel\n #######################################################################\n\n #Computing the kernel matrix by using WL graph kernel.\n gk_wl = gk.GK_WL()\n k_matrix = gk_wl.compare_list_normalized(graphs, h = 1, nl = True)\n\n #Computing the kernel matrix with the normalized vectors graph kernel\n# gk_wl = gknv.GK_WL_NV()\n# k_norm_mat = gk_wl.compare_list_normalized(nm_graphs, h = 2, nl = False)\n\n #Computing the kernel matrix by using the weighted version of WL.\n# gk_wl = gkw.GK_WL_Weights()\n# k_matrix = gk_wl.compare_list_normalized(nm_graphs, h = 0, nl = False)\n\n #######################################################################\n\n\n #######################################################################\n #Ploting the similarity matrix\n #######################################################################\n\n #Ploting the similarity matrix, the matrix is permuted to have all\n #samples belonging to the first class at the beggining.\n# perm_matrix = ut.PermuteMatrix(k_matrix, nm_classes)\n# vs.PlotMatrix(perm_matrix)\n\n #Making a list with number of nodes and edges of all graphs. They will be\n #used in the plotting.\n n_nodes = []\n n_edges = []\n for g in graphs:\n n_nodes.append(g.number_of_nodes())\n n_edges.append(g.number_of_edges())\n\n #Plotting the vectorial representation of each graph. In the picture we\n #include number_of_nodes and number_of_edges, original_vectors and\n #normalized_vectors.\n #vs.PlotFeatureVectors(n_nodes, n_edges, gk_wl.vectors, gk_wl_nv.vectors, nm_classes)\n vs.PlotFeatureVectors(n_nodes, n_edges, gk_wl.vectors, gk_wl.vectors, classes)\n\n #######################################################################\n\n\n #######################################################################\n #Leave-one-subject-out cross-validation\n #######################################################################\n\n preds, scores = cl.subject_fold_cv(k_matrix, np.array(classes),\n n_subjects = 6)\n print \"\"\n print \"Predictions: \"\n print preds\n print \"\"\n print \"Scores:\"\n print scores\n\n #######################################################################\n\n\n return scores",
"def run_experiment(m1,m2,m3,N):\r\n bandits = [Bandit(m1),Bandit(m2),Bandit(m3)]\r\n \r\n #for PLOTTING ONLY\r\n #this is not needed for functioning\r\n data = np.empty(N)\r\n #play game N times\r\n for i in range(N):\r\n j = np.argmax([b.mean for b in bandits])\r\n #pull the one we chose\r\n x = bandits[j].pull()\r\n #whether explore or exploit, we update our knowledge\r\n bandits[j].update(x)\r\n #for PLOTTING\r\n data[i] = x\r\n #this is basically the rate your agent sees at the win rate per bandit\r\n #once the experiment is over\r\n cumulative_average = np.cumsum(data)/(np.arange(N)+1)\r\n plt.plot(cumulative_average)\r\n #just lines, like grid lines\r\n for b in bandits:\r\n plt.plot(b.signal)\r\n plt.title(\"Sin Bandits\")\r\n #plt.xscale('log')\r\n plt.show()\r\n return cumulative_average",
"def test_Gaussian_NB_estimators():",
"def multivariate_gauss_prob(observed, mean, covariance):\n\n return None",
"def sample_values(self, positions, gibbs):\n \"\"\"Sample important values\"\"\"\n\n if gibbs:\n self.local_energy = self.h.local_energy_gibbs(positions)\n self.accumulate_energy += self.h.local_energy_gibbs(positions)\n self.accumulate_energy_sq += self.local_energy*self.local_energy\n gradient_wf_a = 0.5*self.w.gradient_wavefunction_a(positions)\n gradient_wf_b = 0.5*self.w.gradient_wavefunction_b(positions)\n gradient_wf_W = 0.5*self.w.gradient_wavefunction_W(positions)\n else:\n self.local_energy = self.h.local_energy(positions)\n self.accumulate_energy += self.h.local_energy(positions)\n self.accumulate_energy_sq += self.local_energy*self.local_energy\n gradient_wf_a = self.w.gradient_wavefunction_a(positions)\n gradient_wf_b = self.w.gradient_wavefunction_b(positions)\n gradient_wf_W = self.w.gradient_wavefunction_W(positions)\n # self.local_energy = self.h.local_energy_numerical(positions)\n # self.accumulate_energy += self.h.local_energy_numerical(positions)\n # gradient_wf_a = np.zeros(self.w.M)\n # gradient_wf_b = np.zeros(self.w.N)\n # gradient_wf_W = np.zeros((self.w.M, self.w.N))\n\n self.accumulate_psi_term_a += gradient_wf_a\n self.accumulate_psi_term_b += gradient_wf_b\n self.accumulate_psi_term_W += gradient_wf_W\n self.accumulate_both_a += gradient_wf_a*self.local_energy\n self.accumulate_both_b += gradient_wf_b*self.local_energy\n self.accumulate_both_W += gradient_wf_W*self.local_energy",
"def sampling(args):\n t_mean, t_log_var = args\n # YOUR CODE HERE\n epsilon = K.random_normal(t_mean.shape)\n z = epsilon * K.exp(0.5 * t_log_var) + t_mean\n return z",
"def generate_x(number_dimensions, T_train, T_test, mu, feature_model):\n number_training_obeservations = T_train.shape[0]\n number_testing_obeservations = T_test.shape[0]\n\n X_train = np.zeros((number_training_obeservations,number_dimensions))\n X_test = np.zeros((number_testing_obeservations,number_dimensions))\n\n mixture_indicator_train = generate_mixture_indicator(number_training_obeservations)\n mixture_indicator_test = generate_mixture_indicator(number_testing_obeservations)\n\n G = np.random.normal(0,1,(number_dimensions,number_dimensions))\n q, r = np.linalg.qr(G)\n\n mu1 = mu*np.ones(number_dimensions)\n mu2 = -mu*np.ones(number_dimensions)\n\n if feature_model == \"A\":\n eigenvalues1 = np.random.uniform(0,1,(number_dimensions,1))\n eigenvalues1 = np.sort(eigenvalues1, axis = 0)/np.sum(eigenvalues1)\n lambda1 = np.identity(number_dimensions)\n np.fill_diagonal(lambda1,eigenvalues1)\n cov1 = q@[email protected]\n\n for i in range(number_training_obeservations):\n if T_train[i] == 0:\n X_train[i,:] = np.random.multivariate_normal(mu1,cov1,1)\n else:\n X_train[i,:] = np.random.multivariate_normal(mu2,cov1,1)\n \n for i in range(number_testing_obeservations):\n if T_test[i] == 0:\n X_test[i,:] = np.random.multivariate_normal(mu1,cov1,1)\n else:\n X_test[i,:] = np.random.multivariate_normal(mu2,cov1,1)\n\n\n elif feature_model == \"B\":\n eigenvalues1 = np.random.uniform(0,1,(number_dimensions,1))\n eigenvalues1 = np.sort(eigenvalues1, axis = 0)/np.sum(eigenvalues1)\n lambda1 = np.identity(number_dimensions)\n np.fill_diagonal(lambda1,eigenvalues1)\n cov1 = q@[email protected]\n\n eigenvalues2 = np.random.uniform(0,1,(number_dimensions,1))\n eigenvalues2 = np.sort(eigenvalues2, axis = 0)[::-1]/np.sum(eigenvalues2)\n lambda2 = np.identity(number_dimensions)\n np.fill_diagonal(lambda2,eigenvalues2)\n cov2 = q@[email protected]\n\n\n for i in range(number_training_obeservations):\n if T_train[i] == 0:\n if mixture_indicator_train[i] == 0:\n X_train[i,:] = np.random.multivariate_normal(mu1,cov1,1)\n else:\n X_train[i,:] = np.random.multivariate_normal(mu1,cov2,1)\n else:\n if mixture_indicator_train[i] == 0:\n X_train[i,:] = np.random.multivariate_normal(mu2,cov1,1)\n else:\n X_train[i,:] = np.random.multivariate_normal(mu2,cov2,1)\n \n for i in range(number_testing_obeservations):\n if T_test[i] == 0:\n if mixture_indicator_test[i] == 0:\n X_test[i,:] = np.random.multivariate_normal(mu1,cov1,1)\n else:\n X_test[i,:] = np.random.multivariate_normal(mu1,cov2,1)\n else:\n if mixture_indicator_test[i] == 0:\n X_test[i,:] = np.random.multivariate_normal(mu2,cov1,1)\n else:\n X_test[i,:] = np.random.multivariate_normal(mu2,cov2,1)\n\n train_mean = np.mean(X_train, axis = 0)\n train_std = np.std(X_train, axis = 0)\n X_train = (X_train - train_mean)/train_std\n X_test = (X_test - train_mean)/train_std\n \n return X_train, X_test",
"def Script3():\n # In non-script code, use getLogger(__name__) at module scope instead.\n logger = logging.getLogger(\"Script3\") \n gal_flux = 1.e5 # ADU\n gal_n = 3.5 #\n gal_re = 3.7 # pixels\n g1 = -0.23 #\n g2 = 0.15 #\n atmos_a_sigma=2.1 # pixels\n atmos_a_g1 = -0.13 # (shear for \"a\")\n atmos_a_g2 = -0.09 #\n atmos_fa=0.2 # (fraction of flux in \"a\")\n atmos_b_sigma=0.9 # pixels\n atmos_b_g1 = 0.02 # (shear for \"b\")\n atmos_b_g2 = -0.04 #\n opt_defocus=0.53 # wavelengths\n opt_a1=-0.29 # wavelengths\n opt_a2=0.12 # wavelengths\n opt_c1=0.64 # wavelengths\n opt_c2=-0.33 # wavelengths\n opt_padFactor=6 # multiples of Airy padding required to avoid folding for aberrated PSFs\n lam = 800 # nm NB: don't use lambda - that's a reserved word.\n tel_diam = 4. # meters \n pixel_scale = 0.23 # arcsec / pixel\n wcs_g1 = -0.02 #\n wcs_g2 = 0.01 #\n sky_level = 1.e3 # ADU / pixel\n gain = 1.7 # ADU / e-\n read_noise = 0.3 # ADU / pixel\n\n logger.info('Starting script 3 using:')\n logger.info(' - sheared (%.2f,%.2f) Sersic galaxy (flux = %.1e, n = %.1f, re = %.2f),', \n g1, g2, gal_flux, gal_n, gal_re)\n logger.info(' - sheared double-Gaussian atmospheric PSF')\n logger.info(' First component: sigma = %.2f, shear = (%.2f,%.2f), frac = %.2f',\n atmos_a_sigma, atmos_a_g1, atmos_a_g2, atmos_fa)\n logger.info(' Second component: sigma = %.2f, shear = (%.2f,%.2f), frac = %.2f',\n atmos_b_sigma, atmos_b_g1, atmos_b_g2, 1-atmos_fa)\n logger.info(' - optical PSF with defocus = %.2f, astigmatism = (%.2f,%.2f),',\n opt_defocus, opt_a1, opt_a2)\n logger.info(' coma = (%.2f,%.2f), lambda = %.0f nm, D = %.1f m', \n opt_c1, opt_c2, lam, tel_diam)\n logger.info(' - pixel scale = %.2f,',pixel_scale)\n logger.info(' - WCS distortion = (%.2f,%.2f),',wcs_g1,wcs_g2)\n logger.info(' - Poisson noise (sky level = %.1e, gain = %.1f).',sky_level, gain)\n logger.info(' - Gaussian read noise (sigma = %.2f).',read_noise)\n\n \n # Define the galaxy profile.\n gal = galsim.Sersic(gal_n, flux=gal_flux, re=gal_re)\n\n # Shear the galaxy by some value.\n gal.applyShear(g1, g2)\n logger.info('Made galaxy profile')\n\n # Define the atmospheric part of the PSF.\n atmos_a = galsim.Gaussian(flux=atmos_fa, sigma=atmos_a_sigma)\n atmos_a.applyShear(atmos_a_g1 , atmos_a_g2)\n atmos_b = galsim.Gaussian(flux=1-atmos_fa, sigma=atmos_b_sigma)\n atmos_b.applyShear(atmos_b_g1 , atmos_b_g2)\n atmos = galsim.Add([atmos_a, atmos_b])\n logger.info('Made atmospheric PSF profile')\n\n # Define the optical part of the PSF.\n # The first argument of OpticalPSF below is lambda/D,\n # which needs to be in pixel units, so do the calculation:\n lam_over_D = lam * 1.e-9 / tel_diam # radians\n lam_over_D *= 206265 # arcsec\n lam_over_D *= pixel_scale # pixels\n logger.info('Calculated lambda over D = %f pixels', lam_over_D)\n # The rest of the values here should be given in units of the \n # wavelength of the incident light. padFactor is used to here to reduce 'folding' for these\n # quite strong aberration values\n optics = galsim.OpticalPSF(lam_over_D, \n defocus=opt_defocus, coma1=opt_c1, coma2=opt_c2, astig1=opt_a1,\n astig2=opt_a2, padFactor=opt_padFactor)\n logger.info('Made optical PSF profile')\n\n # Start with square pixels\n pix = galsim.Pixel(xw=pixel_scale, yw=pixel_scale)\n # Then shear them slightly by the negative of the wcs shear.\n # This way the later distortion of the full image will bring them back to square.\n pix.applyShear(-wcs_g1, -wcs_g2)\n logger.info('Made pixel profile')\n\n # Final profile is the convolution of these.\n final = galsim.Convolve([gal, atmos, optics, pix])\n final_epsf = galsim.Convolve([atmos, optics, pix])\n logger.info('Convolved components into final profile')\n\n # Now apply the wcs shear to the final image.\n final.applyShear(wcs_g1, wcs_g2)\n final_epsf.applyShear(wcs_g1, wcs_g2)\n logger.info('Applied WCS distortion')\n\n # Draw the image with a particular pixel scale.\n image = final.draw(dx=pixel_scale)\n image_epsf = final_epsf.draw(dx=pixel_scale)\n # Draw the optical PSF component at its Nyquist sample rate\n image_opticalpsf = optics.draw(dx=lam_over_D/2.)\n logger.info('Made image of the profile')\n\n # Add a constant sky level to the image.\n sky_image = galsim.ImageF(bounds=image.getBounds(), initValue=sky_level)\n image += sky_image\n\n # Add Poisson noise to the image.\n rng = galsim.UniformDeviate(1314662)\n galsim.noise.addPoisson(image, rng, gain=gain)\n\n # Also add (Gaussian) read noise.\n galsim.noise.addGaussian(image, rng, sigma=read_noise)\n\n # Subtract off the sky.\n image -= sky_image\n logger.info('Added Gaussian and Poisson noise')\n\n # Write the image to a file\n if not os.path.isdir('output'):\n os.mkdir('output')\n file_name = os.path.join('output', 'demo3.fits')\n file_name_opticalpsf = os.path.join('output','demo3_opticalpsf.fits')\n file_name_epsf = os.path.join('output','demo3_epsf.fits')\n \n image.write(file_name, clobber=True)\n image_opticalpsf.write(file_name_opticalpsf, clobber=True)\n image_epsf.write(file_name_epsf, clobber=True)\n logger.info('Wrote image to %r', file_name)\n logger.info('Wrote optics-only PSF image (Nyquist sampled) to %r', file_name_opticalpsf)\n logger.info('Wrote effective PSF image to %r', file_name_epsf)\n\n moments = HSM_Moments(file_name)\n moments_corr = HSM_Regauss(file_name, file_name_epsf, image.array.shape)\n\n logger.info('HSM reports that the image has measured moments:')\n logger.info(' Mxx = %.3f, Myy = %.3f, Mxy = %.3f', moments.mxx, moments.myy, moments.mxy)\n logger.info('When carrying out Regaussianization PSF correction, HSM reports')\n logger.info(' g1,g2 = %f,%f', moments_corr.g1, moments_corr.g2)\n logger.info('Expected values in the limit that noise and non-Gaussianity are negligible:')\n logger.info(' g1,g2 = %f,%f', g1+wcs_g1,g2+wcs_g2)\n print",
"def run_example(num_points_to_sample=1000, verbose=True, **kwargs):\n\n exp = Experiment([[1, 52], [0, 6], [1, 52]]) # 2D experiment, we build a tensor product domain\n # Bootstrap with some known or already sampled point(s)\n exp.historical_data.append_sample_points([\n SamplePoint([26, 2, 46], get_fitness([26, 2, 35]), 0.5), # Iterables of the form [point, f_val, f_var] are also allowed\n ])\n # Sample num_points_to_sample points\n for i in range(num_points_to_sample):\n # Use MOE to determine what is the point with highest Expected Improvement to use next\n next_point_to_sample = map(round, gp_next_points(exp, **kwargs)[0]) # in [A, X, B] form, rounded integers\n value_of_next_point = get_fitness(next_point_to_sample)\n\n if verbose:\n if in_results(next_point_to_sample):\n print '***', \"Sampled f({0:s}) = {1:.18E}\".format(str(next_point_to_sample), value_of_next_point), '***'\n else:\n print \"Sampled f({0:s}) = {1:.18E}\".format(str(next_point_to_sample), value_of_next_point)\n\n bank[i,0:3] = next_point_to_sample\n bank[i,3] = value_of_next_point\n # Add the information about the point to the experiment historical data to inform the GP\n exp.historical_data.append_sample_points([SamplePoint(next_point_to_sample, value_of_next_point, 0.01)]) # We can add some noise",
"def test_gaussian_basis_hon(self):\n def row_generator():\n return [random.gauss(0, 1) for i in range(self.d)]\n\n self._test_sample_basis_hon(row_generator)",
"def __init__(self, quantity, dist_weights, gauss_params, upper_bound, lower_bound):\n self.dist_weights = dist_weights\n self.lower_bound = lower_bound\n self.upper_bound = upper_bound\n if len(self.dist_weights) != len(gauss_params):\n print(\n \"Number of distribution weights do not match number of distributions!\"\n )\n diff = len(gauss_params) - len(dist_weights)\n if diff < 0:\n print(\"Ignoring trailing distribution weights\")\n self.dist_weights = self.dist_weights[: len(dist_weights) + diff]\n else:\n print(\"Assuming default weights of 1\")\n self.dist_weights.extend([1] * diff)\n # normalize weights\n self.dist_weights = np.array(\n [float(i) / sum(self.dist_weights) for i in self.dist_weights]\n )\n # create samples\n self.samples = []\n self.gauss_params = gauss_params\n sample_size = quantity\n self.sample_min, self.sample_max = [float(\"inf\"), -float(\"inf\")]\n while True:\n # determine the gaussian to sample from for each sample\n mixture_idx = np.random.choice(\n len(self.dist_weights),\n size=sample_size,\n replace=True,\n p=self.dist_weights,\n )\n # create the samples from the respective gaussian\n temp = np.fromiter(\n (ss.norm.rvs(*(gauss_params[i])) for i in mixture_idx), dtype=np.float64\n )\n # remember mixed sampled extremas for plotting\n self.sample_min = min(self.sample_min, temp.min())\n self.sample_max = max(self.sample_max, temp.max())\n # add those samples that are within the bounds\n self.samples = np.concatenate(\n [\n self.samples,\n np.fromiter(\n [x for x in temp if x <= upper_bound and x >= lower_bound],\n dtype=np.float64,\n ),\n ]\n )\n sample_size = quantity - len(self.samples)\n if sample_size == 0:\n break",
"def model_gauss(xsigma, nx, ny=1, nz=1, ysigma=None, zsigma=None, xcenter=None, ycenter=None, zcenter=None):\n\te = EMData()\n\te.set_size(nx, ny, nz)\n\tif( ysigma == None ) : ysigma = xsigma\n\tif( zsigma == None ) : zsigma = xsigma\n\tif( xcenter == None ) : xcenter = nx//2\n\tif( ycenter == None ) : ycenter = ny//2\n\tif( zcenter == None ) : zcenter = nz//2\n\te.process_inplace(\"testimage.puregaussian\", {\"x_sigma\":xsigma,\"y_sigma\":ysigma,\"z_sigma\":zsigma,\"x_center\":xcenter,\"y_center\":ycenter,\"z_center\":zcenter} )\n\treturn e"
] |
[
"0.58995914",
"0.5848525",
"0.5785762",
"0.56935716",
"0.5683083",
"0.5672991",
"0.5658776",
"0.5653742",
"0.5651552",
"0.5644565",
"0.5640411",
"0.5552061",
"0.54944736",
"0.5456843",
"0.54314345",
"0.5376272",
"0.5372925",
"0.536842",
"0.5357922",
"0.53472793",
"0.5326309",
"0.5309391",
"0.5305133",
"0.53045255",
"0.5299053",
"0.5298955",
"0.52885556",
"0.5278383",
"0.5271083",
"0.52648515"
] |
0.6641757
|
0
|
Gets the horizontal and vertical fairlead force in a 2D plane along the straightline line. Must ensure update_states() is called before accessing this function. The function will not solve the forces for a new vessel position if it updated. , otherwise the fairlead forces are not updated with the new
|
def get_fairlead_force_2d(self, index):
H_ref = c_double(-999.9)
V_ref = c_double(-999.9)
Map.lib.map_get_fairlead_force_2d( pointer(H_ref), pointer(V_ref),self.f_type_d, index, self.status, pointer(self.ierr))
return H_ref.value, V_ref.value
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _compute_forces(self):\n # get new coeffs\n self._get_coeffs()\n\n # instead of writing many time\n awa = self.awa / 180.0 * np.pi\n\n # lift and drag\n self.lift = 0.5 * self.rho * self.aws ** 2 * self.area * self.cl\n self.drag = 0.5 * self.rho * self.aws ** 2 * self.area * self.cd + self._get_Rw(awa)\n\n # project into yacht coordinate system\n self.Fx = self.lift * np.sin(awa) - self.drag * np.cos(awa)\n self.Fy = self.lift * np.cos(awa) + self.drag * np.sin(awa)\n\n # heeling moment\n self.Mx = self.Fy * self._vce() * np.cos(self.phi / 180.0 * np.pi)\n\n # side-force is horizontal component of Fh\n self.Fy *= np.cos(np.deg2rad(self.phi))",
"def determine_doubler_plate(self, connection_type, steel, left_beam, right_beam, bottom_column, top_column):\r\n if connection_type == 'top exterior':\r\n # Connection has one left beam and one bottom column\r\n h_bot = bottom_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch\r\n db = left_beam.section['d']\r\n tf = left_beam.section['tf']\r\n self.shear_force['Vc'] = (self.moment['Mf1']+0) / (h_bot/2+0)\r\n self.shear_force['Ru'] = (self.moment['Mf1']+0)/(db-tf) - self.shear_force['Vc']\r\n elif connection_type == 'typical exterior':\r\n # Connection has one left beam and two columns\r\n h_bot = bottom_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch\r\n h_top = top_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch\r\n db = left_beam.section['d']\r\n tf = left_beam.section['tf']\r\n self.shear_force['Vc'] = (self.moment['Mf1']+0) / (h_bot/2+h_top/2)\r\n self.shear_force['Ru'] = (self.moment['Mf1']+0)/(db-tf) - self.shear_force['Vc']\r\n elif connection_type == 'top interior':\r\n # Connection has two beams and one bottom column\r\n h_bot = bottom_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch\r\n # Actually left and right beams have the identical sizes\r\n db = (left_beam.section['d'] + right_beam.section['d'])/2\r\n tf = (left_beam.section['tf'] + right_beam.section['tf'])/2\r\n self.shear_force['Vc'] = (self.moment['Mf1']+self.moment['Mf2']) / (h_bot/2)\r\n self.shear_force['Ru'] = (self.moment['Mf1']+self.moment['Mf2'])/(db-tf) - self.shear_force['Vc']\r\n elif connection_type == 'typical interior':\r\n # Connection has two beams and two columns\r\n h_bot = bottom_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch\r\n h_top = top_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch\r\n db = (left_beam.section['d'] + right_beam.section['d']) / 2\r\n tf = (left_beam.section['tf'] + right_beam.section['tf']) / 2\r\n self.shear_force['Vc'] = (self.moment['Mf1']+self.moment['Mf2']) / (h_bot/2+h_top/2)\r\n self.shear_force['Ru'] = (self.moment['Mf1']+self.moment['Mf2'])/(db-tf) - self.shear_force['Vc']\r\n else:\r\n sys.stderr.write('Error: wrong type of connection specified!\\nNo such keyword for connection exists!\\n')\r\n sys.exit(2)\r\n # Compute the shear strength of the panel zone\r\n phi = 1.0\r\n dc = bottom_column.section['d']\r\n tw = bottom_column.section['tw']\r\n bcf = bottom_column.section['bf']\r\n tcf = bottom_column.section['tf']\r\n db = left_beam.section['d']\r\n self.shear_force['Rn'] = 0.60 * steel.Fy * dc * tw * (1+(3*bcf*tcf**2)/(db*dc*tw))\r\n # Compute the doubler plate thickness\r\n if phi*self.shear_force['Rn'] >= self.shear_force['Ru']:\r\n # Panel zone shear strength is sufficient ==> no need for doubler plate\r\n self.doubler_plate_thickness = 0\r\n else:\r\n # Panel zone shear strength is not sufficient ==> need doubler plate\r\n required_tp = (self.shear_force['Ru'] - 0.60*steel.Fy*(3*bcf*tcf**2)/db) / (0.60*steel.Fy*dc)\r\n tp = 0.25 # Assumed doubler plate thickness\r\n while tp < required_tp:\r\n tp += 0.25 # Update the thickness at an increment of 0.25 until it reaches the requirement\r\n self.doubler_plate_thickness = tp",
"def vlinecomp(self):\n m_h, c_h = self.fitline(0,2) # Computes the equation for a line joining the points on the outside of the gear on opposites sides of the edm cut\n\n m_v_avg = self.average_grad() # Computes the average gradient of the constructed vertical line\n\n m_v_avg, c_v = self.line_through_point(m_v_avg,4) # Equation of line with average gradient though crack start point\n\n x_intersect,y_intersect = self.intersect_point(m_h, c_h, m_v_avg, c_v)\n\n coord_top = [x_intersect,y_intersect]\n coord_bot = [self.points[4, 0], self.points[4, 1]]\n\n distance = self.distance(coord_bot,coord_top)\n\n return coord_top, coord_bot, distance",
"def get_force(self):\n # @todo: Probably need to check the state of the landing gear for this (e.g. are they on the track?).\n # Note: you can get the state of the landing gear by going through self.sim \n return 0.0",
"def CalcForce_aeroframe_DEP(V, CoefMatrix, x, rho, g):\r\n\r\n #Compute aero forces\r\n # here x must be of the form (alpha, beta, p, q, r, da, dr, de) (last one punctualy used)\r\n # set non dim for p,q,r\r\n nonDim=np.ones(7)\r\n nonDim[2]=g.b/(2*V)\r\n nonDim[3]=g.c/(2*V)\r\n nonDim[4]=g.b/(2*V)\r\n # F=np.dot(CoefMatrix,x[0:7]) # commented form, modification to account for symmetric drag increase of side slip\r\n F=np.zeros((3))\r\n M=np.zeros((3))\r\n xsym=np.copy(x[0:-1])\r\n xsym[1]=abs(xsym[1]) # make beta always positive since derivatives have already correct sign for drag and lift only\r\n xsym[-3]=abs(xsym[-3]) # make ailerons deflection always positive for drag increase and lift decrease\r\n xsym[-1]=abs(xsym[-1]) # make rudder deflection always positive for drag increase and lift decrease\r\n F[0]=np.dot(CoefMatrix[0],xsym)\r\n F[1]=np.dot(CoefMatrix[1],x[0:-1]) #side force\r\n F[2]=np.dot(CoefMatrix[2],xsym)\r\n M=np.dot(CoefMatrix[3:6,:],x[0:-1])\r\n# print(\"Printing moment coeff\")\r\n# print(M)\r\n\r\n \r\n #No need to project\r\n# alpha=x[0]\r\n# beta=x[1]\r\n# H=np.array([[math.cos(alpha)*math.sin(beta), -math.cos(alpha)*math.sin(beta), -math.sin(alpha)],[math.sin(beta), math.cos(beta), 0],[math.sin(alpha)*math.cos(beta), -math.sin(alpha)*math.sin(beta), math.cos(alpha)]])\r\n if V<=71 :\r\n Fbody=np.array([-F[0]-g.Cd0_fl,F[1],-F[2]-g.CL0_fl]) # add alpha=0 coefficients\r\n Moment=M+np.array([0,x[-1]*g.Cm_de+g.Cm0_fl,0])\r\n else:\r\n Fbody=np.array([-F[0]-g.Cd0,F[1],-F[2]-g.CL0]) # add alpha=0 coefficients\r\n Moment=M+np.array([0,x[-1]*g.Cm_de+g.Cm0,0])\r\n \r\n\r\n Fbody=0.5*V**2.0*rho*g.S*Fbody\r\n Moment=0.5*V**2.0*rho*g.S*g.b*Moment\r\n \r\n return np.append(Fbody, Moment)",
"def car_dynamics(self,x, t, u, p):\n # f = vehicle_dynamics_ks(x, u, p)\n f = vehicle_dynamics_st(x, u, p)\n # f = vehicle_dynamics_std(x, u, p)\n # f = vehicle_dynamics_mb(x, u, p)\n return f",
"def straight_line(vel, init_pos, final_pos):\n\n acc = np.zeros (3)\n yaw = 1.0\n yawdot = 1.0\n\n p = (final_pos[2] - init_pos[2])/vel [2]\n #acc = (final_pos[2] - init_pos[2])/dt ** 2\n\n # constant velocity\n pos = init_pos + np.array([0, 0, p])\n FinalState = namedtuple('FinalState', 'pos vel acc yaw yawdot')\n return FinalState(pos, vel, acc, yaw, yawdot)",
"def sectional_force(self,strain):\r\n\t\treturn self.steel_total_force(strain) + \\\r\n\t\t\tself.concrete_total_force(strain)",
"def get3DCoord(self, colour, startCoord, rowDirection, columnDirection, points, isLine = 1, arrangement = \"LowestY\"):\r\n #We create a list to store the coordinates results\r\n\r\n meshFrontLocation = self.getFrontCoord(colour, startCoord, rowDirection, columnDirection, arrangement, isLine, points)\r\n print \"mesh front location has %s points\" % len(meshFrontLocation)\r\n\r\n #We check if any colour is detected in the front view first before we scan side view\r\n if len(meshFrontLocation) == 0:\r\n return\r\n\r\n #Reset the startCoord\r\n meshSideLocation = self.getSideCoord(colour, startCoord, rowDirection, columnDirection, arrangement, isLine)\r\n\r\n\r\n #We check if any colour is detected in the side view. If there is no colour, there is an error\r\n if len(meshSideLocation) == 0:\r\n print \"Detected colour [%s, %s, %s] in front but not side. Please check\" % (colour[0], colour[1], colour[2])\r\n return\r\n\r\n print \"mesh side location has %s points\" % len(meshSideLocation)\r\n\r\n #We now fix the offset between the front and the side location\r\n #We get the highest and lowest y value from the front location\r\n TempFront = meshFrontLocation\r\n FrontLowestY = self.rearrange(TempFront, \"LowestY\")[0][1]\r\n FrontHighestY = self.rearrange(TempFront, \"HighestY\")[0][1]\r\n TempSide = meshSideLocation\r\n SideLowestY = self.rearrange(TempSide, \"LowestY\")[0][1]\r\n SideHighestY = self.rearrange(TempSide, \"HighestY\")[0][1]\r\n\r\n #We then get the middle y for the 2 views and get the offset\r\n #We keep it to int as pixels doesn't exist as floats.\r\n FrontMiddleY = (FrontLowestY + FrontHighestY) / 2\r\n SideMiddleY = (SideLowestY + SideHighestY) / 2\r\n MidOffset = FrontMiddleY - SideMiddleY\r\n\r\n #We add the offset to the meshSideLocation\r\n for i in range(0, len(meshSideLocation)):\r\n meshSideLocation[i][1] += MidOffset\r\n\r\n #We check if the front y range is bigger then the side y range\r\n FrontRangeY = FrontHighestY - FrontLowestY\r\n SideRangeY = SideHighestY - SideLowestY\r\n if FrontRangeY > SideRangeY:\r\n cmds.warning(\"The side image for colour [%s, %s, %s] range is smaller then front image. Not enough data to calculate 3D values\" % (colour[0], colour[1], colour[2]))\r\n cmds.warning(\"The range values are FrontRange: %s, SideRange: %s\" % (FrontRangeY, SideRangeY))\r\n cmds.warning(\"The frontHighest Y is %s and the frontLowestY is %s\" % (FrontHighestY, FrontLowestY))\r\n cmds.warning(\"The sideHighest Y is %s and the sideLowestY is %s\" % (SideHighestY, SideLowestY))\r\n return\r\n\r\n #We then get the matching Y coordinates from the meshFrontLocation and the meshSideLocation\r\n mesh3DCoord = []\r\n\r\n for i in range(0, len(meshFrontLocation)):\r\n match = 0\r\n for j in range(0, len(meshSideLocation)):\r\n if meshFrontLocation[i][1] == meshSideLocation[j][1]:\r\n mesh3DCoord.append([meshFrontLocation[i][0], meshFrontLocation[i][1], meshSideLocation[j][0]])\r\n match = 1\r\n break\r\n if match==0:\r\n print \"There is no match found for meshFrontLocation[i][1] = %s\" % meshFrontLocation[i][1]\r\n print \"Ensure your side view has more pixels then front\"\r\n\r\n\r\n return mesh3DCoord",
"def get_fairlead_force_3d(self, index):\n fx = c_double(-999.9)\n fy = c_double(-999.9)\n fz = c_double(-999.9)\n Map.lib.map_get_fairlead_force_3d( pointer(fx), pointer(fy), pointer(fz), self.f_type_d, index, self.status, pointer(self.ierr))\n return fx.value, fy.value, fz.value",
"def test_force(self):\n group = hoomd.group.all()\n\n # compute forces\n f = azplugins.restrain.plane(group=group, point=(0,0,0), normal=(1,0,0), k=2.0)\n hoomd.run(1)\n np.testing.assert_array_almost_equal(f.forces[0].force, (-2.,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].force, ( 2.,0,0))\n np.testing.assert_array_almost_equal(f.forces[2].force, ( 6.,0,0))\n self.assertAlmostEqual(f.forces[0].energy, 1.)\n self.assertAlmostEqual(f.forces[1].energy, 1.)\n self.assertAlmostEqual(f.forces[2].energy, 9.)\n np.testing.assert_array_almost_equal(f.forces[0].virial, (-2.,0,0,0,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].virial, (-2.,0,4.,0,0,0))\n np.testing.assert_array_almost_equal(f.forces[2].virial, (12.,0,0,0,0,0))\n\n # change the spring constant\n f.set_params(k=1.0)\n hoomd.run(1)\n np.testing.assert_array_almost_equal(f.forces[0].force, (-1.,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].force, ( 1.,0,0))\n np.testing.assert_array_almost_equal(f.forces[2].force, ( 3.,0,0))\n self.assertAlmostEqual(f.forces[0].energy, 0.5)\n self.assertAlmostEqual(f.forces[1].energy, 0.5)\n self.assertAlmostEqual(f.forces[2].energy, 4.5)\n\n # shift the plane down\n f.set_params(point=(-1,0,0))\n hoomd.run(1)\n np.testing.assert_array_almost_equal(f.forces[0].force, (-2.,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].force, ( 0.,0,0))\n np.testing.assert_array_almost_equal(f.forces[2].force, ( 2.,0,0))\n self.assertAlmostEqual(f.forces[0].energy, 2.0)\n self.assertAlmostEqual(f.forces[1].energy, 0.0)\n self.assertAlmostEqual(f.forces[2].energy, 2.0)\n\n # rotate the plane so that only particle 1 is off the line\n f.set_params(point=(0,0,0), normal=(0,0,1))\n hoomd.run(1)\n np.testing.assert_array_almost_equal(f.forces[0].force, (0,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].force, (0,0,-2))\n np.testing.assert_array_almost_equal(f.forces[2].force, (0,0,0))\n self.assertAlmostEqual(f.forces[0].energy, 0.0)\n self.assertAlmostEqual(f.forces[1].energy, 2.0)\n self.assertAlmostEqual(f.forces[2].energy, 0.0)",
"def _calc_side(self):\n\n # Calculation of the side of the car with respect to the trajectory\n next_index = self.index + 1\n\n if next_index == len(self.x_trajectory):\n next_index = self.index\n\n trajectory_vector = ((self.x_trajectory[next_index]\n - self.x_trajectory[self.index]),\n (self.y_trajectory[next_index]\n - self.y_trajectory[self.index]))\n\n x_diff = self.x - self.x_trajectory[self.index]\n y_diff = self.y - self.y_trajectory[self.index]\n\n ugv_vector = (x_diff, y_diff)\n\n vector_z = ugv_vector[0] * trajectory_vector[1] \\\n - ugv_vector[1] * trajectory_vector[0]\n\n if vector_z >= 0:\n\n # It is in the right side\n self.sign = 1\n\n else:\n\n # It is in the left side\n self.sign = -1\n\n return self.sign",
"def vertical_landing(\n conn: Client,\n landing_speed: float = 5.0,\n auto_stage: bool = True,\n stop_stage: int = 0,\n target_lat: Optional[float] = None,\n target_lon: Optional[float] = None,\n deploy_legs_on_entry: bool = True,\n retract_palens_on_entry: bool = True,\n use_rcs_on_entry: bool = False,\n entry_attitude: str = \"Retrograde\",\n entry_attitude_func: Callable[[Vessel], None] = None,\n deploy_legs_on_decent: bool = True,\n retract_palens_on_decent: bool = True,\n use_rcs_on_landing: bool = False,\n use_parachute: bool = True,\n) -> None:\n vessel = conn.space_center.active_vessel\n body = vessel.orbit.body\n\n # check retrograde and radial hold capability\n use_sas = False\n try:\n vessel.control.sas = True\n vessel.control.sas_mode = vessel.control.sas_mode.retrograde\n vessel.control.sas_mode = vessel.control.sas_mode.radial\n use_sas = True\n except Exception:\n pass\n vessel.control.sas = False\n\n # Set up dialog and stream\n dialog = StatusDialog(conn)\n surface_gravity = body.surface_gravity\n equatorial_radius = body.equatorial_radius\n has_atmosphere = body.has_atmosphere\n atmosphere_depth = body.atmosphere_depth\n\n ref_frame = conn.space_center.ReferenceFrame.create_hybrid(\n position=body.reference_frame, rotation=vessel.surface_reference_frame\n )\n flight = vessel.flight(ref_frame)\n\n ut = conn.add_stream(getattr, conn.space_center, \"ut\")\n mass = conn.add_stream(getattr, vessel, \"mass\")\n available_thrust = conn.add_stream(getattr, vessel, \"available_thrust\")\n radius = conn.add_stream(getattr, vessel.orbit, \"radius\")\n altitude = conn.add_stream(getattr, flight, \"surface_altitude\")\n mean_altitude = conn.add_stream(getattr, flight, \"mean_altitude\")\n speed = conn.add_stream(getattr, flight, \"speed\")\n vertical_speed = conn.add_stream(getattr, flight, \"vertical_speed\")\n horizontal_speed = conn.add_stream(getattr, flight, \"horizontal_speed\")\n\n vessel.control.sas = True\n vessel.control.speed_mode = vessel.control.speed_mode.surface\n\n # set staging\n if auto_stage:\n set_autostaging(conn, stop_stage=stop_stage)\n\n # check unguided or guided\n guided_landing = True\n if target_lat is None or target_lon is None:\n guided_landing = False\n\n if not guided_landing:\n kill_horizontal_velocity(conn, use_sas)\n\n ####\n # pre-entry phase\n vessel.control.rcs = use_rcs_on_entry\n\n # pre-entry guidance\n if guided_landing:\n vessel.auto_pilot.reference_frame = ref_frame\n vessel.auto_pilot.engage()\n\n last_ut = ut()\n bearing, distance, landing_position_error = landing_target_steering(\n vessel, target_lat, target_lon, ut()\n )\n last_landing_position_error = landing_position_error\n last_throttle = 0\n\n while True:\n a100 = available_thrust() / mass()\n bounding_box = vessel.bounding_box(vessel.surface_reference_frame)\n lower_bound = bounding_box[0][0]\n\n landing_radius = equatorial_radius + lower_bound\n if guided_landing:\n landing_radius = max(\n landing_radius,\n landing_radius\n + body.surface_height(target_lat, target_lon),\n )\n\n bearing, distance, landing_position_error = landing_target_steering(\n vessel, target_lat, target_lon, ut()\n )\n\n if has_atmosphere:\n atmosphere_radius = equatorial_radius + atmosphere_depth\n if atmosphere_depth > altitude() and vertical_speed() < 0:\n break\n\n entry_ut, entry_speed = time_to_radius(\n vessel.orbit, atmosphere_radius, ut()\n )\n if entry_ut is None:\n break\n entry_lead_time = entry_ut - ut()\n\n if landing_position_error / distance < 0.05:\n if entry_lead_time > 120:\n conn.space_center.warp_to(entry_ut - 60)\n else:\n break\n else:\n impact_ut, terminal_speed = time_to_radius(\n vessel.orbit, landing_radius, ut()\n )\n burn_time = burn_prediction(terminal_speed, a100)\n burn_ut = impact_ut - burn_time\n burn_lead_time = burn_ut - ut()\n if burn_lead_time < 30:\n break\n if landing_position_error / distance < 0.05:\n if burn_lead_time > 10:\n conn.space_center.warp_to(burn_ut - 60)\n else:\n break\n\n vessel.auto_pilot.target_pitch_and_heading(0, bearing)\n\n if vessel.auto_pilot.heading_error < 1:\n try:\n landing_pos_corrected = (\n last_landing_position_error - landing_position_error\n )\n dt = ut() - last_ut\n instant_rate_per_throttle = (\n landing_pos_corrected / dt / last_throttle\n )\n instant_rate_per_throttle = max(\n 1.0, instant_rate_per_throttle\n )\n vessel.control.throttle = min(\n 1.0,\n max(\n 0.05,\n landing_position_error / instant_rate_per_throttle,\n ),\n )\n except Exception:\n vessel.control.throttle = 0.05\n else:\n vessel.control.throttle = 0\n\n dialog.status_update(\n f\"landing_position error: {landing_position_error: 5.3f}, bearing: {bearing: 5.3f}\"\n )\n\n last_ut = ut()\n last_landing_position_error = landing_position_error\n last_throttle = vessel.control.throttle\n\n vessel.control.throttle = 0\n vessel.auto_pilot.disengage()\n\n ####\n # entry\n vessel.control.sas = True\n vessel.control.sas_mode = vessel.control.sas_mode.retrograde\n\n # on entry: deploy leg, retract panel\n if deploy_legs_on_entry:\n deploy_legs(conn)\n if retract_palens_on_entry:\n retract_panels(conn)\n\n # wait for entry\n if has_atmosphere and atmosphere_depth < altitude():\n warp_to_radius = atmosphere_depth + equatorial_radius\n entry_ut, terminal_speed = time_to_radius(\n vessel.orbit, warp_to_radius, ut()\n )\n sec_until_entry = entry_ut - ut()\n if sec_until_entry > 30:\n dialog.status_update(\n f\"Warp for entry - 5sec: {sec_until_entry: 5.3f}\"\n )\n conn.space_center.warp_to(entry_ut - 5)\n time.sleep(5)\n\n ####\n # landing phase\n vessel.control.rcs = use_rcs_on_landing\n\n # warp for burn\n last_ut = ut()\n while True:\n a100 = available_thrust() / mass()\n lower_bound = vessel.bounding_box(vessel.surface_reference_frame)[0][0]\n\n landing_radius = equatorial_radius + lower_bound\n landing_altitude = altitude() + lower_bound\n if guided_landing:\n landing_radius = max(\n landing_radius,\n landing_radius + body.surface_height(target_lat, target_lon),\n )\n landing_altitude = max(\n landing_altitude,\n landing_altitude + body.surface_height(target_lat, target_lon),\n )\n\n impact_ut, terminal_speed = impact_prediction(\n radius(),\n landing_altitude,\n vertical_speed(),\n horizontal_speed(),\n surface_gravity,\n ut(),\n )\n burn_time = burn_prediction(terminal_speed, a100)\n burn_lead_time = impact_ut - burn_time - ut()\n\n if burn_lead_time and burn_lead_time > (ut() - last_ut) * 1.5 + 2:\n if not has_atmosphere and burn_lead_time > 30:\n dialog.status_update(\n f\"Warp for decereration burn - 30sec: {burn_lead_time: 5.3f}\"\n )\n conn.space_center.warp_to(ut() + burn_lead_time - 30)\n time.sleep(5)\n else:\n dialog.status_update(\n f\"Wait for decereration burn: {burn_lead_time: 5.3f} sec; ut: {ut(): 5.3f}\"\n )\n else:\n break\n last_ut = ut()\n time.sleep(0.1)\n\n # on decent: deploy leg, retract panel\n if deploy_legs_on_decent:\n deploy_legs(conn)\n if retract_palens_on_decent:\n retract_panels(conn)\n\n if not guided_landing:\n # kill horizontal velocity again\n kill_horizontal_velocity(conn, use_sas)\n\n # Main decent loop\n last_sas_mode = vessel.control.sas_mode\n while True:\n a100 = available_thrust() / mass()\n bounding_box = vessel.bounding_box(vessel.surface_reference_frame)\n lower_bound = bounding_box[0][0]\n\n landing_radius = mean_altitude() + lower_bound\n landing_altitude = altitude() + lower_bound\n impact_ut, terminal_speed = impact_prediction(\n radius(),\n landing_altitude,\n vertical_speed(),\n horizontal_speed(),\n surface_gravity,\n ut(),\n )\n burn_time = burn_prediction(terminal_speed, a100)\n burn_lead_time = impact_ut - burn_time - ut()\n\n dialog.status_update(\n f\"Alt: {altitude(): 5.3f}, Speed {speed(): 5.3f} m/s (H: {horizontal_speed(): 5.3f}, V: {vertical_speed(): 5.3f}), \"\n f\"a: {a100: 5.3f}, g: {surface_gravity: 5.3f}, \"\n f\"landing in: {impact_ut - ut(): 5.3f} sec, burn lead time: {burn_lead_time: 5.3f} sec\"\n )\n\n if use_sas:\n if horizontal_speed() > 0.5 and speed() > 1.0:\n if last_sas_mode != vessel.control.sas_mode.retrograde:\n vessel.control.sas_mode = vessel.control.sas_mode.retrograde\n last_sas_mode = vessel.control.sas_mode.retrograde\n elif last_sas_mode != vessel.control.sas_mode.radial:\n vessel.control.sas_mode = vessel.control.sas_mode.radial\n last_sas_mode = vessel.control.sas_mode.radial\n else:\n # TODO: auto-pilot\n pass\n\n throttle = max(\n 0,\n min(\n 1.0,\n (-vertical_speed() + surface_gravity - landing_speed) / a100,\n ),\n )\n vessel.control.throttle = throttle\n\n if is_grounded(vessel):\n vessel.control.sas_mode.radial\n vessel.control.throttle = 0\n break\n\n last_ut = ut()\n\n dialog.status_update(\"Landed\")\n\n # keep sas on for a bit to maintain landing stability\n time.sleep(5)\n\n if use_sas:\n vessel.control.sas = False\n else:\n vessel.auto_pilot.disengage()\n\n if auto_stage:\n unset_autostaging()\n\n return",
"def _curvature(self):\n y_eval = self.left_fitx.shape[0] - 10\n left_curverad = (((1 + (2 * self.left_fit[0] * y_eval + self.left_fit[1]) ** 2) ** 1.5) /\n np.absolute(2 * self.left_fit[0]))\n right_curverad = (((1 + (2 * self.right_fit[0] * y_eval + self.right_fit[1]) ** 2) ** 1.5) /\n np.absolute(2 * self.right_fit[0]))\n return left_curverad, right_curverad",
"def force ( box, strain, r ):\n\n import numpy as np\n from itertools import product\n import math\n \n # It is assumed that positions are in units where box = 1\n # Forces are calculated in units where sigma = 1 and epsilon = 1\n # Lees-Edwards boundaries, in sliding brick arrangement\n # Flow/gradient/vorticity directions are x/y/z == 0/1/2\n # Uses neighbour lists\n\n n = r.shape[0]\n\n # Set up vectors to half the cells in neighbourhood of 3x3x3 cells in cubic lattice\n # The cells are chosen so that if (d0,d1,d2) appears, then (-d0,-d1,-d2) does not.\n # The last three cells are extra ones, to cope with the sheared system\n d = np.array ( [ [ 0, 0, 0], [ 1, 0, 0], [ 1, 0, 1], [-1, 0, 1], [ 0, 0, 1], # 5 cells with d1=0\n [ 1, 1, -1], [ 1, 1, 0], [ 1, 1, 1], # 3 cells with d0= 1, d1=1\n [ 0, 1, -1], [ 0, 1, 0], [ 0, 1, 1], # 3 cells with d0= 0, d1=1\n [-1, 1, -1], [-1, 1, 0], [-1, 1, 1], # 3 cells with d0=-1, d1=1\n [-2, 1, -1], [-2, 1, 0], [-2, 1, 1] ] ) # 3 cells with d0=-2, d1=1\n\n r[:,0] = r[:,0] - np.rint(r[:,1])*strain # Extra correction in box=1 units\n r = r - np.rint(r) # Ensure all atoms in periodic box\n \n sr2_ovr = 1.77 # Overlap threshold (pot > 100)\n r_cut_box = r_cut / box\n r_cut_box_sq = r_cut_box ** 2\n box_sq = box ** 2\n\n # Initialize\n f = np.zeros_like(r)\n total = PotentialType ( pot=0.0, vir=0.0, pyx=0.0, lap=0.0, ovr=False )\n\n # Calculate cell index triplets\n sc = math.floor(box/r_cut) # Number of cells along box edge\n assert sc >= 3, 'System is too small for cells' # Guard against box being too small\n c = np.floor((r+0.5)*sc).astype(np.int_) # N*3 array of cell indices for all atoms\n assert np.all(c>=0) and np.all(c<sc), 'Index error' # Simplistic \"guard\" against roundoff\n\n shift = math.floor(strain*sc) # Strain measured in cell lengths\n\n if fast:\n \n # Build list of arrays, each array holding positions of atoms in a cell\n # At the same time, define a matching set of force arrays in each cell\n # i and j number the atoms in each cell; we do not refer explicitly to indices in r\n rc, fc = [], [] # Initially empty lists of positions and forces\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n rc.append(r[mask,:]) # Copy atom coordinates into array, add to list\n fc.append(np.zeros_like(rc[-1])) # Zero corresponding forces, add to list\n\n for ci1, rci in enumerate(rc): # Loop over i-cells, getting all atoms in each i-cell as an array\n ci = np.unravel_index(ci1,(sc,sc,sc)) # Get i-cell triple-indices\n if rci.size==0: # Handle empty cell\n continue\n\n # Set up correct neighbour cell indices\n if ci[1]==sc-1: # i-cell is in the top layer\n dd = d.copy() # Standard list copied, including extra 3 cells\n dd[5:,0] = d[5:,0] - shift # All those looking up need adjustment in the x direction\n else: # i-cell is not in top layer\n dd = d[:-3,:].copy() # Last three extra cells are not needed; shift is not needed\n \n for dj in dd: # Loop over neighbouring j-cells\n cj = ci + dj # Compute neighbour j-cell triple-indices\n cj1 = np.ravel_multi_index(cj,(sc,sc,sc),mode='wrap') # Convert j-cell to single-index\n rcj = rc[cj1] # Get atoms in j-cell as an array\n if rcj.size==0: # Handle empty cell\n continue\n\n rij = rci[:,np.newaxis,:]-rcj[np.newaxis,:,:] # Separation vectors for all i and j\n rij[:,:,0] = rij[:,:,0] - np.rint(rij[:,:,1])*strain # Extra correction in box=1 units\n rij = rij - np.rint(rij) # PBCs in box=1 units\n rij_sq = np.sum(rij**2,axis=2) # Squared separations\n in_range = rij_sq < r_cut_box_sq # Set flags for within cutoff\n\n if ci1==cj1:\n np.fill_diagonal(in_range,False) # Eliminate i==j when i-cell==j-cell\n np.fill_diagonal(rij_sq,1.0) # Avoid divide-by-zero below\n\n rij_sq = rij_sq * box_sq # Now in sigma=1 units\n rij = rij * box # Now in sigma=1 units\n sr2 = np.where ( in_range, 1.0/rij_sq, 0.0 ) # (sigma/rij)**2, only if in range\n ovr = sr2 > sr2_ovr # Overlap if too close\n sr6 = sr2 ** 3\n sr12 = sr6 ** 2\n pot = sr12 - sr6 # LJ potential (cut but not shifted)\n vir = pot + sr12 # LJ virial\n pot = np.where ( in_range, pot+0.25, 0.0 ) # WCA LJ pair potential (cut-and-shifted)\n lap = ( 22.0*sr12 - 5.0*sr6 ) * sr2 # LJ Laplacian\n fij = vir * sr2 # LJ scalar part of forces\n fij = rij * fij[:,:,np.newaxis] # LJ pair forces\n pyx = rij[:,:,1]*fij[:,:,0] # Off-diagonal element of pressure tensor\n\n if ci1==cj1: # Correct for double-counting ij and ji when i-cell==j-cell\n fij = fij / 2\n total = total + PotentialType ( pot=np.sum(pot)/2, vir=np.sum(vir)/2, \n pyx=np.sum(pyx)/2, lap=np.sum(lap)/2, ovr=np.any(ovr) )\n else:\n total = total + PotentialType ( pot=np.sum(pot), vir=np.sum(vir), \n pyx=np.sum(pyx), lap=np.sum(lap), ovr=np.any(ovr) )\n\n fc[ci1][:,:] = fc[ci1][:,:] + np.sum(fij,axis=1) # Aggregate force on atoms in i-cell\n fc[cj1][:,:] = fc[cj1][:,:] - np.sum(fij,axis=0) # Aggregate force on atoms in j-cell\n\n # Copy forces from list of cell arrays to main force array\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n ci1 = np.ravel_multi_index(ci,(sc,sc,sc),mode='wrap') # Single-index\n f[mask,:] = fc[ci1] # Copy atom forces from correct cell\n\n else:\n \n # Build list of arrays, each array holding indices of atoms in a cell\n # ki and kj are atom indices in the r array; i and j number the atoms in each cell\n k_array = np.arange(n) # Atom indices 0..N-1\n kc = [] # Initially empty list of indices\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n kc.append(k_array[mask]) # Copy atom indices into array, add to list\n\n for ci1, kci in enumerate(kc): # Loop over i-cells, getting atom indices as an array\n ci = np.unravel_index(ci1,(sc,sc,sc)) # Get i-cell triple-indices\n\n # Set up correct neighbour cell indices\n if ci[1]==sc-1: # i-cell is in the top layer\n dd = d # Standard list copied, including extra 3 cells\n dd[5:,0] = dd[5:,0] - shift # All those looking up need adjustment in the x direction\n else:\n dd = d[:-3,:] # Last three extra cells are not needed; shift is not needed\n\n for dj in dd: # Loop over neighbouring j-cells\n cj = ci + dj # Compute neighbour j-cell triple-indices\n cj1 = np.ravel_multi_index(cj,(sc,sc,sc),mode='wrap') # Convert to single-index\n kcj = kc[cj1] # Get indices of atoms in j-cell as an array\n\n for i, ki in enumerate(kci): # Loop over individual atoms in i-cell\n j0 = i+1 if cj1==ci1 else 0 # Only look upwards if i-cell==j-cell\n if j0 >= kcj.size: # Handles (redundantly) empty j-cell and the case \n continue # where j-cell==i-cell and i is last atom\n\n for kj in kcj[j0:]: # Loop over individual atoms in j-cell\n rij = r[ki,:]-r[kj,:] # Separation vector\n rij[0] = rij[0] - np.rint(rij[1])*strain # Extra correction in box=1 units\n rij = rij - np.rint(rij) # Periodic boundary conditions in box=1 units\n rij_sq = np.sum(rij**2) # Squared separation\n\n if rij_sq < r_cut_box_sq: # Check within cutoff\n rij_sq = rij_sq * box_sq # Now in sigma=1 units\n rij = rij * box # Now in sigma=1 units\n sr2 = 1.0 / rij_sq # (sigma/rij)**2\n ovr = sr2 > sr2_ovr # Overlap if too close\n sr6 = sr2 ** 3\n sr12 = sr6 ** 2\n pot = sr12 - sr6 # LJ potential (cut but not shifted)\n vir = pot + sr12 # LJ virial\n pot = pot + 0.25 # WCA LJ potential (cut-and-shifted)\n lap = ( 22.0*sr12 - 5.0*sr6 ) * sr2 # LJ Laplacian\n fij = rij * vir * sr2 # LJ forces\n pyx = rij[1]*fij[0] # Off-diagonal element of pressure tensor\n total = total + PotentialType ( pot=pot, vir=vir, pyx=pyx, lap=lap, ovr=ovr )\n f[ki,:] = f[ki,:] + fij\n f[kj,:] = f[kj,:] - fij\n\n # Multiply results by numerical factors\n f = f * 24.0 # 24*epsilon\n total.pot = total.pot * 4.0 # 4*epsilon\n total.vir = total.vir * 24.0 / 3.0 # 24*epsilon and divide virial by 3\n total.pyx = total.pyx * 24.0 # 24*epsilon\n total.lap = total.lap * 24.0 * 2.0 # 24*epsilon and factor 2 for ij and ji\n \n return total, f",
"def get_right_and_left_lanelet(self): \n if self.scenario is not None:\n possible_lanelet_ids = self.scenario.lanelet_network.find_lanelet_by_position([np.array(list(self.current_pos))])[0]\n self.current_lanelet = None\n self.right_lanelet = None\n self.left_lanelet = None \n for lane_id in possible_lanelet_ids: \n self.current_lanelet = self.scenario.lanelet_network.find_lanelet_by_id(lane_id) \n if self.current_lanelet is not None:\n if self.current_lanelet.adj_left is not None:\n self.left_lanelet = self.scenario.lanelet_network.find_lanelet_by_id(self.current_lanelet.adj_left)\n if self.current_lanelet.adj_right is not None:\n self.right_lanelet = self.scenario.lanelet_network.find_lanelet_by_id(self.current_lanelet.adj_right)",
"def calculate_forces(v0, mu, density_m, CD, diameter_b, \\\n area_b, volume_b, density_b, \\\n dt, T):\n \n # Gravitational const. m/s^2\n g = 9.81 \n # Proportionality constant for\n # Reynolds number\n Re_const = diameter_b*density_m/mu\n \n a_s = 3*math.pi*diameter_b*mu/(density_b*volume_b)\n a_q = 0.5*CD*density_m*area_b/(density_b*volume_b)\n b = g*(density_m/density_b - 1.0)\n \n # Numerical solution gives velocity as \n # a function of time.\n v, t = vm.solver(v0, a_s, a_q, b, Re_const, T, dt) \n\n # Initialize vectors\n Fg = zeros(len(v))\n Fb = zeros(len(v))\n Fd = zeros(len(v))\n\n # Loop over time steps\n for n in range(0, len(v)):\n # Evaluate Reynolds number\n Re = Re_const*v[n] \n \n # Gravity force\n Fg[n] = -density_b*volume_b*g\n # Bouyancy force\n Fb[n] = density_m*g*volume_b\n \n # Drag force\n if abs(Re) < 1:\n # If Re < 1, use Stokes' drag force \n Fd[n] = -3.0*math.pi*diameter_b*mu*v[n]\n else:\n # If Re >= 1, use the quadratic\n # drag force\n Fd[n] = -0.5*CD*density_m*area_b*abs(v[n])*v[n]\n\n \n return Fg, Fb, Fd, t",
"def _GetHorizonAnglesLegacy(its_elev, height_cbsd, height_rx, refractivity):\n num_points = int(its_elev[0])\n step = its_elev[1]\n dist = num_points * step\n\n # Find the refractivity at the average terrain height\n start_avg = int(3.0 + 0.1 * num_points)\n end_avg = num_points - start_avg + 6\n zsys = np.mean(its_elev[start_avg-1:end_avg])\n refractivity *= np.exp(-zsys/9460.0)\n\n # Find the ray down-curvature per meter\n gma = 157e-9\n gme = gma*(1.0 - 0.04665 * np.exp(refractivity/179.3))\n\n alt_cbsd = its_elev[2] + height_cbsd\n alt_rx = its_elev[num_points+2] + height_rx\n qc = 0.5 * gme\n q = qc * dist\n # theta0 and theta1 the slopes, dl0 and dl1 the horizon distances\n theta1 = (alt_rx - alt_cbsd) / dist\n theta0 = theta1 - q\n theta1 = -theta1 - q\n dl0 = dist\n dl1 = dist\n\n if num_points >= 2:\n sa = 0.0\n sb = dist\n wq = True\n for i in range(1, num_points):\n sa += step\n sb -= step\n q = its_elev[i+2] - (qc*sa + theta0) * sa - alt_cbsd\n if q > 0.0:\n theta0 += q/sa\n dl0 = sa\n wq = False\n if not wq:\n q = its_elev[i+2] - (qc*sb + theta1) * sb - alt_rx\n if q > 0.0:\n theta1 += q/sb\n dl1 = sb\n\n return (np.arctan(theta0) * 180/np.pi,\n np.arctan(theta1) * 180/np.pi,\n dl0,\n dl1)",
"def _compute_solar_torque(self, curr_date):\n if self._to_add[2]:\n inertial2Sat = self.spacecraft_state.getAttitude().getRotation()\n\n ratio = self.SolarModel.getLightingRatio(self.satPos_i,\n self.in_frame,\n curr_date)\n\n sunPos = inertial2Sat.applyTo(\n self.sun.getPVCoordinates(curr_date,\n self.in_frame).getPosition())\n self._sTorque = Vector3D.ZERO\n\n iterator = itertools.izip(self.meshDA['CoM'],\n self.meshDA['Normal'],\n self.meshDA['Area'],\n self.meshDA['Coefs'])\n\n for CoM, normal, area, coefs in iterator:\n position = self.satPos_s.add(CoM)\n\n # compute flux in inertial frame\n sunSatVector = \\\n position.subtract(sunPos)\n r2 = sunSatVector.getNormSq()\n\n rawP = ratio * self.K_REF / r2\n flux = Vector3D(rawP / sqrt(r2), sunSatVector)\n\n # compute Radiation Pressure Force:\n if flux.getNormSq() > Precision.SAFE_MIN:\n # illumination (we are not in umbra)\n # rotate flux to spacecraft frame:\n dot = self.V3_dot(normal, flux)\n\n if dot > 0:\n # the solar array is illuminated backward,\n # fix signs to compute contribution correctly\n dot = -dot\n normal = normal.negate()\n absorbCoeff = coefs[0]\n specularReflCoeff = coefs[1]\n diffuseReflCoeff = 1 - (absorbCoeff + specularReflCoeff)\n try:\n assert(diffuseReflCoeff >= 0)\n except AssertionError:\n raise AssertionError(\n \"Negative diffuse reflection coefficient not possible!\")\n psr = flux.getNorm()\n # Vallado's equation uses different parameters which are\n # related to our parameters as:\n # cos (phi) = - dot / (psr*area)\n # n = N (n...unit vector)\n # s = -fluxSat / psr (s...unit vector)\n cN = 2 * area * dot * (diffuseReflCoeff / 3 -\n specularReflCoeff * dot / psr)\n cS = (area * dot / psr) * (specularReflCoeff - 1)\n Force = Vector3D(float(cN), normal, float(cS), flux)\n # Force already in spacecraft frame. No need to convert\n self._sTorque = self._sTorque.add(self.V3_cross(CoM, Force))\n\n else:\n self._sTorque = Vector3D.ZERO",
"def compute_forces_mesh(self):\n f = self.ptclgrid.grid[:self.size,:self.size]*self.grad_phi_mesh()\n return f",
"def get_edfdv_sl(x, v):\n\n xm, vm = np.meshgrid(x, v, indexing=\"ij\")\n xm = xm.flatten()\n vm = vm.flatten()\n\n v_pad = _get_padded_grid_(v)\n f_pad = np.zeros((x.size, v.size + 2))\n\n def update_velocity_adv_sl(f, e, dt):\n \"\"\"\n evolution of df/dt = e df/dv according to the Backward Semi-Lagrangian technique popularized by [1]\n\n [1] - Cheng, C. ., & Knorr, G. (1976). The integration of the vlasov equation in configuration space.\n Journal of Computational Physics, 22(3), 330–351. https://doi.org/10.1016/0021-9991(76)90053-X\n\n :param f: distribution function. (numpy array of shape (nx, nv))\n :param e: electric field (numpy array of shape (nx,))\n :param dt: timestep (single float value)\n :return:\n \"\"\"\n\n f_pad[:, 1:-1] = f\n f_pad[:, 0] = f[:, -1]\n f_pad[:, -1] = f[:, 0]\n\n e_fit = interpolate.interp1d(x, e, kind=\"cubic\")\n\n em = e_fit(xm)\n\n f_interpolator = interpolate.RectBivariateSpline(x, v_pad, f_pad)\n f_out = f_interpolator(xm, vm - em * dt, grid=False).reshape((x.size, v.size))\n\n return f_out\n\n return update_velocity_adv_sl",
"def lofted_car(self):\n return LoftedSolid(profiles=self.chamfered_curve,\n mesh_deflection=1e-4)",
"def neighbour_force(self, point):\n left_vector = np.array([\n self.neighbour1.x - point[0],\n self.neighbour1.y - point[1],\n ])\n right_vector = np.array([\n self.neighbour2.x - point[0],\n self.neighbour2.y - point[1],\n ])\n return 0.1 * (left_vector + right_vector)",
"def update_vehicle_state(self):\n #vel = self.v + self.commands['throttle']/self.m/self.simulation_rate\n\n vel = self.commands['speed']\n steer = self.commands['steering_angle']\n\n if steer > 0.5:\n steer_cmd = 25\n elif steer < -0.5:\n steer_cmd = 185\n else:\n steer_cmd = 100 - 160*steer ##linear\n #steer_cmd = 100 - 640*steer**3 ##cubic\n\n #rospy.logwarn('Velocity command is '+ str(vel))\n # 130 is the lowest vel_cmd that makes the truck move.\n if vel > 12:\n vel_cmd = 161\n elif vel < 0:\n vel_cmd = 0\n else:\n vel_cmd = 3.77*vel + 117\n # rospy.logerr('throttle: ' + str(throttle))\n hw_port.set_command(vel_cmd,steer_cmd,self.vehicle_id)",
"def get_force(self):\n displ = self.get_displ()\n equil = displ / np.linalg.norm(displ) * self.L0\n return self.k * (displ - equil)",
"def compute_dual_line(P):\n return Line(P.x, -P.y)",
"def update(self):\n\n next_x = self.__pos[0] + self.__speed[0]\n next_y = self.__pos[1] + self.__speed[1]\n\n boundary_left = 0\n boundary_right = LEVEL_WIDTH - SCREEN_WIDTH\n boundary_top = 0\n boundary_bottom = LEVEL_HEIGHT - SCREEN_HEIGHT\n\n if next_x < boundary_left:\n next_x = boundary_left\n if next_x > boundary_right:\n next_x = boundary_right\n if next_y < boundary_top:\n next_y = boundary_top\n if next_y > boundary_bottom:\n next_y = boundary_top\n\n return (next_x, next_y)",
"def step_two_wheeled_env(curr_x, u, dt, method=\"Oylar\"):\n B = np.array([[np.cos(curr_x[-1]), 0.],\n [np.sin(curr_x[-1]), 0.],\n [0., 1.]])\n \n x_dot = np.matmul(B, u[:, np.newaxis])\n\n next_x = x_dot.flatten() * dt + curr_x\n\n return next_x",
"def update_vehicle_state(self):\n sim_timestep = 1. / self.simulation_rate\n # Decompose v into x and y component.\n if self.v != self.commands['speed']:\n self.v = self.commands['speed']\n vx = numpy.cos(self.yaw) * self.v\n vy = numpy.sin(self.yaw) * self.v\n # Update vehicles position\n self.x += vx * sim_timestep\n self.y += vy * sim_timestep\n self.yaw += ((self.v / self.axles_distance) *\n numpy.tan(self.commands['steering_angle']) *\n sim_timestep)\n # Make sure self.yaw is never negative.\n # self.yaw 0..2pi\n if self.yaw > 2*numpy.pi:\n self.yaw = 0.\n elif self.yaw < 0.:\n self.yaw += 2*numpy.pi",
"def lane_emden_step(x,y,dx,n,w):\n _solver.rk4(x,y[0],y[1],dx,n,w)\n out = _solver.rk4out\n return np.array([out.z0,out.z1])"
] |
[
"0.59395546",
"0.5330673",
"0.5312082",
"0.5219329",
"0.5200467",
"0.5179556",
"0.51686174",
"0.51159114",
"0.51100034",
"0.5095978",
"0.503672",
"0.50160944",
"0.5001314",
"0.49821773",
"0.4980897",
"0.4960312",
"0.49429363",
"0.49155977",
"0.490984",
"0.48997304",
"0.48746848",
"0.48666477",
"0.48646134",
"0.4864057",
"0.4863744",
"0.4862745",
"0.48607856",
"0.48525453",
"0.48482928",
"0.48298448"
] |
0.5961753
|
0
|
Gets the horizontal and vertical fairlead force in a 3D frame along relative referene global axis. Must ensure update_states() is called before accessing this function. The function will not solve the forces for a new vessel position if it updated. , otherwise the fairlead forces are not updated with the new
|
def get_fairlead_force_3d(self, index):
fx = c_double(-999.9)
fy = c_double(-999.9)
fz = c_double(-999.9)
Map.lib.map_get_fairlead_force_3d( pointer(fx), pointer(fy), pointer(fz), self.f_type_d, index, self.status, pointer(self.ierr))
return fx.value, fy.value, fz.value
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_force(self):\n # @todo: Probably need to check the state of the landing gear for this (e.g. are they on the track?).\n # Note: you can get the state of the landing gear by going through self.sim \n return 0.0",
"def _compute_forces(self):\n # get new coeffs\n self._get_coeffs()\n\n # instead of writing many time\n awa = self.awa / 180.0 * np.pi\n\n # lift and drag\n self.lift = 0.5 * self.rho * self.aws ** 2 * self.area * self.cl\n self.drag = 0.5 * self.rho * self.aws ** 2 * self.area * self.cd + self._get_Rw(awa)\n\n # project into yacht coordinate system\n self.Fx = self.lift * np.sin(awa) - self.drag * np.cos(awa)\n self.Fy = self.lift * np.cos(awa) + self.drag * np.sin(awa)\n\n # heeling moment\n self.Mx = self.Fy * self._vce() * np.cos(self.phi / 180.0 * np.pi)\n\n # side-force is horizontal component of Fh\n self.Fy *= np.cos(np.deg2rad(self.phi))",
"def CalcForce_aeroframe_DEP(V, CoefMatrix, x, rho, g):\r\n\r\n #Compute aero forces\r\n # here x must be of the form (alpha, beta, p, q, r, da, dr, de) (last one punctualy used)\r\n # set non dim for p,q,r\r\n nonDim=np.ones(7)\r\n nonDim[2]=g.b/(2*V)\r\n nonDim[3]=g.c/(2*V)\r\n nonDim[4]=g.b/(2*V)\r\n # F=np.dot(CoefMatrix,x[0:7]) # commented form, modification to account for symmetric drag increase of side slip\r\n F=np.zeros((3))\r\n M=np.zeros((3))\r\n xsym=np.copy(x[0:-1])\r\n xsym[1]=abs(xsym[1]) # make beta always positive since derivatives have already correct sign for drag and lift only\r\n xsym[-3]=abs(xsym[-3]) # make ailerons deflection always positive for drag increase and lift decrease\r\n xsym[-1]=abs(xsym[-1]) # make rudder deflection always positive for drag increase and lift decrease\r\n F[0]=np.dot(CoefMatrix[0],xsym)\r\n F[1]=np.dot(CoefMatrix[1],x[0:-1]) #side force\r\n F[2]=np.dot(CoefMatrix[2],xsym)\r\n M=np.dot(CoefMatrix[3:6,:],x[0:-1])\r\n# print(\"Printing moment coeff\")\r\n# print(M)\r\n\r\n \r\n #No need to project\r\n# alpha=x[0]\r\n# beta=x[1]\r\n# H=np.array([[math.cos(alpha)*math.sin(beta), -math.cos(alpha)*math.sin(beta), -math.sin(alpha)],[math.sin(beta), math.cos(beta), 0],[math.sin(alpha)*math.cos(beta), -math.sin(alpha)*math.sin(beta), math.cos(alpha)]])\r\n if V<=71 :\r\n Fbody=np.array([-F[0]-g.Cd0_fl,F[1],-F[2]-g.CL0_fl]) # add alpha=0 coefficients\r\n Moment=M+np.array([0,x[-1]*g.Cm_de+g.Cm0_fl,0])\r\n else:\r\n Fbody=np.array([-F[0]-g.Cd0,F[1],-F[2]-g.CL0]) # add alpha=0 coefficients\r\n Moment=M+np.array([0,x[-1]*g.Cm_de+g.Cm0,0])\r\n \r\n\r\n Fbody=0.5*V**2.0*rho*g.S*Fbody\r\n Moment=0.5*V**2.0*rho*g.S*g.b*Moment\r\n \r\n return np.append(Fbody, Moment)",
"def compute_forces_mesh(self):\n f = self.ptclgrid.grid[:self.size,:self.size]*self.grad_phi_mesh()\n return f",
"def get_bforce_wm_ws_Gx_surf(self):\n\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w'] \n \n self.Gm1 = np.zeros([Ly])\n self.dGm1_dS = np.zeros([Ly]) \n self.Gt1 = np.zeros([Ly])\n self.dGt1_dS = np.zeros([Ly]) \n self.Bfsfc_bl = np.zeros([Ly])\n self.Av_bl = np.zeros([Ly])\n self.dAv_bl = np.zeros([Ly])\n \n #debugging\n self.wm_surf = np.zeros([Ly])\n self.ws_surf = np.zeros([Ly]) \n\n #---> j-loop\n for j in range(Ly): \n k_w = self.kbl[j] # KBL is \"new bl index after calling find_new_kbl()\n z_bl = z_u_w[j,N] - self.hbls[j]\n zscale = self.hbls[j] \n \n if self.swr_frac[j,k_w-1] > 0:\n Bfsfc = self.Bo[j] + self.Bosol[j] * ( 1. - self.swr_frac[j,k_w-1]\\\n * self.swr_frac[j,k_w] * ( z_u_w[j,k_w] - z_u_w[j,k_w-1] )\\\n / (self.swr_frac[j,k_w] * (z_u_w[j,k_w] - z_bl)\\\n + self.swr_frac[j,k_w-1] * (z_bl - z_u_w[j,k_w-1]) ))\n \n else:\n Bfsfc = self.Bo[j] + self.Bosol[j]\n \n # CALCUALTE TURBULENT VELOCITY SCALES\n wm,ws = self.lmd_wscale_wm_and_ws(Bfsfc,zscale,self.ustar[j],self.hbls[j])\n self.wm_surf[j] = wm\n self.ws_surf[j] = ws \n\n if self.LIMIT_UNSTABLE_ONLY:\n f1 = 5. * np.max([0,Bfsfc]) * self.vonKar / (self.ustar[j]**4+self.eps)\n else:\n f1 = 0\n\n \n cff = 1. / (z_u_w[j,k_w] - z_u_w[j,k_w-1])\n cff_up = cff * (z_bl - z_u_w[j,k_w])\n cff_dn = cff * (z_u_w[j,k_w] - z_bl)\n\n #MOMENTUM \n Av_bl = cff_up * self.Kv_old[j,k_w] + cff_dn * self.Kv_old[j,k_w-1]\n dAv_bl = cff * (self.Kv_old[j,k_w] - self.Kv_old[j,k_w-1])\n self.Av_bl[j] = Av_bl\n self.dAv_bl[j] = dAv_bl\n self.Gm1[j] = Av_bl / (self.hbls[j] * wm + self.eps)\n self.dGm1_dS[j] = np.min([0.,Av_bl*f1-dAv_bl/(wm+self.eps)]) \n\n #TEMPERATURE(BUOYANCY)\n At_bl = cff_up * self.Kt_old[j,k_w] + cff_dn * self.Kt_old[j,k_w-1]\n dAt_bl = cff * (self.Kt_old[j,k_w] - self.Kt_old[j,k_w-1])\n self.Gt1[j] = At_bl / (self.hbls[j] * ws + self.eps)\n self.dGt1_dS[j] = np.min([0.,At_bl*f1-dAt_bl/(ws+self.eps)]) \n\n self.Bfsfc_bl[j] = Bfsfc",
"def _compute_solar_torque(self, curr_date):\n if self._to_add[2]:\n inertial2Sat = self.spacecraft_state.getAttitude().getRotation()\n\n ratio = self.SolarModel.getLightingRatio(self.satPos_i,\n self.in_frame,\n curr_date)\n\n sunPos = inertial2Sat.applyTo(\n self.sun.getPVCoordinates(curr_date,\n self.in_frame).getPosition())\n self._sTorque = Vector3D.ZERO\n\n iterator = itertools.izip(self.meshDA['CoM'],\n self.meshDA['Normal'],\n self.meshDA['Area'],\n self.meshDA['Coefs'])\n\n for CoM, normal, area, coefs in iterator:\n position = self.satPos_s.add(CoM)\n\n # compute flux in inertial frame\n sunSatVector = \\\n position.subtract(sunPos)\n r2 = sunSatVector.getNormSq()\n\n rawP = ratio * self.K_REF / r2\n flux = Vector3D(rawP / sqrt(r2), sunSatVector)\n\n # compute Radiation Pressure Force:\n if flux.getNormSq() > Precision.SAFE_MIN:\n # illumination (we are not in umbra)\n # rotate flux to spacecraft frame:\n dot = self.V3_dot(normal, flux)\n\n if dot > 0:\n # the solar array is illuminated backward,\n # fix signs to compute contribution correctly\n dot = -dot\n normal = normal.negate()\n absorbCoeff = coefs[0]\n specularReflCoeff = coefs[1]\n diffuseReflCoeff = 1 - (absorbCoeff + specularReflCoeff)\n try:\n assert(diffuseReflCoeff >= 0)\n except AssertionError:\n raise AssertionError(\n \"Negative diffuse reflection coefficient not possible!\")\n psr = flux.getNorm()\n # Vallado's equation uses different parameters which are\n # related to our parameters as:\n # cos (phi) = - dot / (psr*area)\n # n = N (n...unit vector)\n # s = -fluxSat / psr (s...unit vector)\n cN = 2 * area * dot * (diffuseReflCoeff / 3 -\n specularReflCoeff * dot / psr)\n cS = (area * dot / psr) * (specularReflCoeff - 1)\n Force = Vector3D(float(cN), normal, float(cS), flux)\n # Force already in spacecraft frame. No need to convert\n self._sTorque = self._sTorque.add(self.V3_cross(CoM, Force))\n\n else:\n self._sTorque = Vector3D.ZERO",
"def get_force(self):\n displ = self.get_displ()\n equil = displ / np.linalg.norm(displ) * self.L0\n return self.k * (displ - equil)",
"def get_fairlead_force_2d(self, index):\n H_ref = c_double(-999.9)\n V_ref = c_double(-999.9)\n Map.lib.map_get_fairlead_force_2d( pointer(H_ref), pointer(V_ref),self.f_type_d, index, self.status, pointer(self.ierr))\n return H_ref.value, V_ref.value",
"def update_forces(self):\r\n # update all the functions\r\n self.compute_gravity()\r\n self.compute_tides()\r\n self.compute_centrifugal()\r\n self.compute_coriolis()\r\n\r\n # add together the forces into the summation function\r\n self.forcing.assign(self.ftides+self.gravity +\r\n self.centrifugal+self.coriolis)",
"def force ( box, strain, r ):\n\n import numpy as np\n from itertools import product\n import math\n \n # It is assumed that positions are in units where box = 1\n # Forces are calculated in units where sigma = 1 and epsilon = 1\n # Lees-Edwards boundaries, in sliding brick arrangement\n # Flow/gradient/vorticity directions are x/y/z == 0/1/2\n # Uses neighbour lists\n\n n = r.shape[0]\n\n # Set up vectors to half the cells in neighbourhood of 3x3x3 cells in cubic lattice\n # The cells are chosen so that if (d0,d1,d2) appears, then (-d0,-d1,-d2) does not.\n # The last three cells are extra ones, to cope with the sheared system\n d = np.array ( [ [ 0, 0, 0], [ 1, 0, 0], [ 1, 0, 1], [-1, 0, 1], [ 0, 0, 1], # 5 cells with d1=0\n [ 1, 1, -1], [ 1, 1, 0], [ 1, 1, 1], # 3 cells with d0= 1, d1=1\n [ 0, 1, -1], [ 0, 1, 0], [ 0, 1, 1], # 3 cells with d0= 0, d1=1\n [-1, 1, -1], [-1, 1, 0], [-1, 1, 1], # 3 cells with d0=-1, d1=1\n [-2, 1, -1], [-2, 1, 0], [-2, 1, 1] ] ) # 3 cells with d0=-2, d1=1\n\n r[:,0] = r[:,0] - np.rint(r[:,1])*strain # Extra correction in box=1 units\n r = r - np.rint(r) # Ensure all atoms in periodic box\n \n sr2_ovr = 1.77 # Overlap threshold (pot > 100)\n r_cut_box = r_cut / box\n r_cut_box_sq = r_cut_box ** 2\n box_sq = box ** 2\n\n # Initialize\n f = np.zeros_like(r)\n total = PotentialType ( pot=0.0, vir=0.0, pyx=0.0, lap=0.0, ovr=False )\n\n # Calculate cell index triplets\n sc = math.floor(box/r_cut) # Number of cells along box edge\n assert sc >= 3, 'System is too small for cells' # Guard against box being too small\n c = np.floor((r+0.5)*sc).astype(np.int_) # N*3 array of cell indices for all atoms\n assert np.all(c>=0) and np.all(c<sc), 'Index error' # Simplistic \"guard\" against roundoff\n\n shift = math.floor(strain*sc) # Strain measured in cell lengths\n\n if fast:\n \n # Build list of arrays, each array holding positions of atoms in a cell\n # At the same time, define a matching set of force arrays in each cell\n # i and j number the atoms in each cell; we do not refer explicitly to indices in r\n rc, fc = [], [] # Initially empty lists of positions and forces\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n rc.append(r[mask,:]) # Copy atom coordinates into array, add to list\n fc.append(np.zeros_like(rc[-1])) # Zero corresponding forces, add to list\n\n for ci1, rci in enumerate(rc): # Loop over i-cells, getting all atoms in each i-cell as an array\n ci = np.unravel_index(ci1,(sc,sc,sc)) # Get i-cell triple-indices\n if rci.size==0: # Handle empty cell\n continue\n\n # Set up correct neighbour cell indices\n if ci[1]==sc-1: # i-cell is in the top layer\n dd = d.copy() # Standard list copied, including extra 3 cells\n dd[5:,0] = d[5:,0] - shift # All those looking up need adjustment in the x direction\n else: # i-cell is not in top layer\n dd = d[:-3,:].copy() # Last three extra cells are not needed; shift is not needed\n \n for dj in dd: # Loop over neighbouring j-cells\n cj = ci + dj # Compute neighbour j-cell triple-indices\n cj1 = np.ravel_multi_index(cj,(sc,sc,sc),mode='wrap') # Convert j-cell to single-index\n rcj = rc[cj1] # Get atoms in j-cell as an array\n if rcj.size==0: # Handle empty cell\n continue\n\n rij = rci[:,np.newaxis,:]-rcj[np.newaxis,:,:] # Separation vectors for all i and j\n rij[:,:,0] = rij[:,:,0] - np.rint(rij[:,:,1])*strain # Extra correction in box=1 units\n rij = rij - np.rint(rij) # PBCs in box=1 units\n rij_sq = np.sum(rij**2,axis=2) # Squared separations\n in_range = rij_sq < r_cut_box_sq # Set flags for within cutoff\n\n if ci1==cj1:\n np.fill_diagonal(in_range,False) # Eliminate i==j when i-cell==j-cell\n np.fill_diagonal(rij_sq,1.0) # Avoid divide-by-zero below\n\n rij_sq = rij_sq * box_sq # Now in sigma=1 units\n rij = rij * box # Now in sigma=1 units\n sr2 = np.where ( in_range, 1.0/rij_sq, 0.0 ) # (sigma/rij)**2, only if in range\n ovr = sr2 > sr2_ovr # Overlap if too close\n sr6 = sr2 ** 3\n sr12 = sr6 ** 2\n pot = sr12 - sr6 # LJ potential (cut but not shifted)\n vir = pot + sr12 # LJ virial\n pot = np.where ( in_range, pot+0.25, 0.0 ) # WCA LJ pair potential (cut-and-shifted)\n lap = ( 22.0*sr12 - 5.0*sr6 ) * sr2 # LJ Laplacian\n fij = vir * sr2 # LJ scalar part of forces\n fij = rij * fij[:,:,np.newaxis] # LJ pair forces\n pyx = rij[:,:,1]*fij[:,:,0] # Off-diagonal element of pressure tensor\n\n if ci1==cj1: # Correct for double-counting ij and ji when i-cell==j-cell\n fij = fij / 2\n total = total + PotentialType ( pot=np.sum(pot)/2, vir=np.sum(vir)/2, \n pyx=np.sum(pyx)/2, lap=np.sum(lap)/2, ovr=np.any(ovr) )\n else:\n total = total + PotentialType ( pot=np.sum(pot), vir=np.sum(vir), \n pyx=np.sum(pyx), lap=np.sum(lap), ovr=np.any(ovr) )\n\n fc[ci1][:,:] = fc[ci1][:,:] + np.sum(fij,axis=1) # Aggregate force on atoms in i-cell\n fc[cj1][:,:] = fc[cj1][:,:] - np.sum(fij,axis=0) # Aggregate force on atoms in j-cell\n\n # Copy forces from list of cell arrays to main force array\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n ci1 = np.ravel_multi_index(ci,(sc,sc,sc),mode='wrap') # Single-index\n f[mask,:] = fc[ci1] # Copy atom forces from correct cell\n\n else:\n \n # Build list of arrays, each array holding indices of atoms in a cell\n # ki and kj are atom indices in the r array; i and j number the atoms in each cell\n k_array = np.arange(n) # Atom indices 0..N-1\n kc = [] # Initially empty list of indices\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n kc.append(k_array[mask]) # Copy atom indices into array, add to list\n\n for ci1, kci in enumerate(kc): # Loop over i-cells, getting atom indices as an array\n ci = np.unravel_index(ci1,(sc,sc,sc)) # Get i-cell triple-indices\n\n # Set up correct neighbour cell indices\n if ci[1]==sc-1: # i-cell is in the top layer\n dd = d # Standard list copied, including extra 3 cells\n dd[5:,0] = dd[5:,0] - shift # All those looking up need adjustment in the x direction\n else:\n dd = d[:-3,:] # Last three extra cells are not needed; shift is not needed\n\n for dj in dd: # Loop over neighbouring j-cells\n cj = ci + dj # Compute neighbour j-cell triple-indices\n cj1 = np.ravel_multi_index(cj,(sc,sc,sc),mode='wrap') # Convert to single-index\n kcj = kc[cj1] # Get indices of atoms in j-cell as an array\n\n for i, ki in enumerate(kci): # Loop over individual atoms in i-cell\n j0 = i+1 if cj1==ci1 else 0 # Only look upwards if i-cell==j-cell\n if j0 >= kcj.size: # Handles (redundantly) empty j-cell and the case \n continue # where j-cell==i-cell and i is last atom\n\n for kj in kcj[j0:]: # Loop over individual atoms in j-cell\n rij = r[ki,:]-r[kj,:] # Separation vector\n rij[0] = rij[0] - np.rint(rij[1])*strain # Extra correction in box=1 units\n rij = rij - np.rint(rij) # Periodic boundary conditions in box=1 units\n rij_sq = np.sum(rij**2) # Squared separation\n\n if rij_sq < r_cut_box_sq: # Check within cutoff\n rij_sq = rij_sq * box_sq # Now in sigma=1 units\n rij = rij * box # Now in sigma=1 units\n sr2 = 1.0 / rij_sq # (sigma/rij)**2\n ovr = sr2 > sr2_ovr # Overlap if too close\n sr6 = sr2 ** 3\n sr12 = sr6 ** 2\n pot = sr12 - sr6 # LJ potential (cut but not shifted)\n vir = pot + sr12 # LJ virial\n pot = pot + 0.25 # WCA LJ potential (cut-and-shifted)\n lap = ( 22.0*sr12 - 5.0*sr6 ) * sr2 # LJ Laplacian\n fij = rij * vir * sr2 # LJ forces\n pyx = rij[1]*fij[0] # Off-diagonal element of pressure tensor\n total = total + PotentialType ( pot=pot, vir=vir, pyx=pyx, lap=lap, ovr=ovr )\n f[ki,:] = f[ki,:] + fij\n f[kj,:] = f[kj,:] - fij\n\n # Multiply results by numerical factors\n f = f * 24.0 # 24*epsilon\n total.pot = total.pot * 4.0 # 4*epsilon\n total.vir = total.vir * 24.0 / 3.0 # 24*epsilon and divide virial by 3\n total.pyx = total.pyx * 24.0 # 24*epsilon\n total.lap = total.lap * 24.0 * 2.0 # 24*epsilon and factor 2 for ij and ji\n \n return total, f",
"def sectional_force(self,strain):\r\n\t\treturn self.steel_total_force(strain) + \\\r\n\t\t\tself.concrete_total_force(strain)",
"def test_force(self):\n group = hoomd.group.all()\n\n # compute forces\n f = azplugins.restrain.plane(group=group, point=(0,0,0), normal=(1,0,0), k=2.0)\n hoomd.run(1)\n np.testing.assert_array_almost_equal(f.forces[0].force, (-2.,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].force, ( 2.,0,0))\n np.testing.assert_array_almost_equal(f.forces[2].force, ( 6.,0,0))\n self.assertAlmostEqual(f.forces[0].energy, 1.)\n self.assertAlmostEqual(f.forces[1].energy, 1.)\n self.assertAlmostEqual(f.forces[2].energy, 9.)\n np.testing.assert_array_almost_equal(f.forces[0].virial, (-2.,0,0,0,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].virial, (-2.,0,4.,0,0,0))\n np.testing.assert_array_almost_equal(f.forces[2].virial, (12.,0,0,0,0,0))\n\n # change the spring constant\n f.set_params(k=1.0)\n hoomd.run(1)\n np.testing.assert_array_almost_equal(f.forces[0].force, (-1.,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].force, ( 1.,0,0))\n np.testing.assert_array_almost_equal(f.forces[2].force, ( 3.,0,0))\n self.assertAlmostEqual(f.forces[0].energy, 0.5)\n self.assertAlmostEqual(f.forces[1].energy, 0.5)\n self.assertAlmostEqual(f.forces[2].energy, 4.5)\n\n # shift the plane down\n f.set_params(point=(-1,0,0))\n hoomd.run(1)\n np.testing.assert_array_almost_equal(f.forces[0].force, (-2.,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].force, ( 0.,0,0))\n np.testing.assert_array_almost_equal(f.forces[2].force, ( 2.,0,0))\n self.assertAlmostEqual(f.forces[0].energy, 2.0)\n self.assertAlmostEqual(f.forces[1].energy, 0.0)\n self.assertAlmostEqual(f.forces[2].energy, 2.0)\n\n # rotate the plane so that only particle 1 is off the line\n f.set_params(point=(0,0,0), normal=(0,0,1))\n hoomd.run(1)\n np.testing.assert_array_almost_equal(f.forces[0].force, (0,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].force, (0,0,-2))\n np.testing.assert_array_almost_equal(f.forces[2].force, (0,0,0))\n self.assertAlmostEqual(f.forces[0].energy, 0.0)\n self.assertAlmostEqual(f.forces[1].energy, 2.0)\n self.assertAlmostEqual(f.forces[2].energy, 0.0)",
"def Force_on_aircraft_in_body_reference_frame(m, V_B, V_dot_B, omega_B):\n return m * (V_dot_B + omega_B.cross(V_B))",
"def calculate_forces(v0, mu, density_m, CD, diameter_b, \\\n area_b, volume_b, density_b, \\\n dt, T):\n \n # Gravitational const. m/s^2\n g = 9.81 \n # Proportionality constant for\n # Reynolds number\n Re_const = diameter_b*density_m/mu\n \n a_s = 3*math.pi*diameter_b*mu/(density_b*volume_b)\n a_q = 0.5*CD*density_m*area_b/(density_b*volume_b)\n b = g*(density_m/density_b - 1.0)\n \n # Numerical solution gives velocity as \n # a function of time.\n v, t = vm.solver(v0, a_s, a_q, b, Re_const, T, dt) \n\n # Initialize vectors\n Fg = zeros(len(v))\n Fb = zeros(len(v))\n Fd = zeros(len(v))\n\n # Loop over time steps\n for n in range(0, len(v)):\n # Evaluate Reynolds number\n Re = Re_const*v[n] \n \n # Gravity force\n Fg[n] = -density_b*volume_b*g\n # Bouyancy force\n Fb[n] = density_m*g*volume_b\n \n # Drag force\n if abs(Re) < 1:\n # If Re < 1, use Stokes' drag force \n Fd[n] = -3.0*math.pi*diameter_b*mu*v[n]\n else:\n # If Re >= 1, use the quadratic\n # drag force\n Fd[n] = -0.5*CD*density_m*area_b*abs(v[n])*v[n]\n\n \n return Fg, Fb, Fd, t",
"def _compute_solar_torque(self, curr_date):\n if self._to_add[2]:\n ratio = self.SolarModel.getLightingRatio(self.satPos_i,\n self.in_frame,\n curr_date)\n\n sunPos = self.inertial2Sat.applyTo(\n self.sun.getPVCoordinates(curr_date,\n self.in_frame).getPosition())\n sunPos = np.array([sunPos.x, sunPos.y, sunPos.z], dtype='float64')\n\n CoM = self.meshDA['CoM_np']\n normal = self.meshDA['Normal_np']\n area = self.meshDA['Area_np']\n coefs = self.meshDA['Coefs_np']\n\n sunSatVector = self.satPos_s + CoM - sunPos\n r = np.linalg.norm(sunSatVector, axis=1)\n rawP = ratio * self.K_REF / (r**2)\n flux = (rawP / r)[:, None] * sunSatVector\n # eliminate arrays where zero flux\n fluxNorm = np.linalg.norm(flux, axis=1)\n Condflux = fluxNorm**2 > Precision.SAFE_MIN\n flux = flux[Condflux]\n normal = normal[Condflux]\n\n # dot product for multidimensional arrays:\n dot = np.einsum('ij,ij->i', flux, normal)\n dot[dot > 0] = dot[dot > 0] * (-1.0)\n if dot.size > 0:\n normal[dot > 0] = normal[dot > 0] * (-1.0)\n\n cN = 2 * area * dot * (coefs[:, 2] / 3 - coefs[:, 1] * dot / fluxNorm)\n cS = (area * dot / fluxNorm) * (coefs[:, 1] - 1)\n force = cN[:, None] * normal + cS[:, None] * flux\n\n sT = np.sum(np.cross(CoM, force), axis=0)\n\n self._sTorque = Vector3D(float(sT[0]), float(sT[1]), float(sT[2]))\n\n else:\n self._sTorque = Vector3D.ZERO",
"def define_ufl_body_force(self):\n\n if hasattr(self, 'ufl_body_force'):\n return None\n\n # Set to None and exit if key is not in config dictionary.\n if self.config['formulation']['body_force'] is None:\n self.ufl_body_force = 0\n self.ufl_body_force0 = 0\n return None\n\n rho = self.config['material']['density']\n b = self.config['formulation']['body_force']\n xi = self.test_vector\n\n self.ufl_body_force = dlf.dot(xi, rho*b)*dlf.dx\n\n # Create a copy of the body force term to use at a different time step.\n if self.config['formulation']['time']['unsteady'] and hasattr(b,'t'):\n try:\n cppcode = b.cppcode\n except AttributeError:\n cppcode = b._cppcode\n b0 = dlf.Expression(cppcode, t=0.0,\n element=self.vectorSpace.ufl_element())\n self.ufl_body_force0 = dlf.dot(xi, rho*b0)*dlf.dx\n else:\n self.ufl_body_force0 = 0\n\n return None",
"def car_dynamics(self,x, t, u, p):\n # f = vehicle_dynamics_ks(x, u, p)\n f = vehicle_dynamics_st(x, u, p)\n # f = vehicle_dynamics_std(x, u, p)\n # f = vehicle_dynamics_mb(x, u, p)\n return f",
"def _compute_gravity_torque(self, curr_date):\n if self._to_add[0]:\n # return gravity gradient torque in satellite frame\n body2inertial = self.earth.getBodyFrame().getTransformTo(self.in_frame, curr_date)\n body2sat = self.inertial2Sat.applyTo(body2inertial.getRotation())\n body2satRot = PyRotation(body2sat.q0,\n body2sat.q1,\n body2sat.q2,\n body2sat.q3)\n sat2bodyRot = body2satRot.revert()\n body2sat = body2satRot.getMatrix()\n sat2body = sat2bodyRot.getMatrix()\n\n satM = self.spacecraft_state.getMass()\n mCub = self.inCub['dm'] * satM\n # add booms\n if \"dm_boom\" in self.inCub:\n mCub = np.concatenate((mCub, self.inCub['dm_boom']), axis=0) # boom store with mass\n CoM = self.inCub['CoM_np']\n\n dmPos_s = CoM + self.satPos_s\n\n gNewton = (-self.muGM / np.linalg.norm(dmPos_s,\n axis=1,\n keepdims=True)**3) * dmPos_s\n\n # rotate vectors:\n dmPos_b = np.einsum('ij,kj->ki', sat2body, dmPos_s)\n\n gDist = np.empty(dmPos_b.shape)\n for i in xrange(0, dmPos_b.shape[0]):\n gDist[i, :] = np.asarray(\n self.GravityModel.gradient(curr_date,\n Vector3D(float(dmPos_b[i, 0]),\n float(dmPos_b[i, 1]),\n float(dmPos_b[i, 2])),\n self.muGM))\n\n gDist_s = np.einsum('ij,kj->ki', body2sat, gDist)\n\n gT = np.sum(np.cross(CoM, mCub*(gNewton + gDist_s)), axis=0)\n\n self._gTorque = Vector3D(float(gT[0]), float(gT[1]), float(gT[2]))\n\n else:\n self._gTorque = Vector3D.ZERO",
"def fvm(states: States, grid: Gridlines, topo: Topography, config: Config, runtime: DummyDict):\n # pylint: disable=invalid-name\n\n # calculate source term contributed from topography gradients\n states = topography_gradient(states, topo, config.params.gravity)\n\n # calculate slopes of piecewise linear approximation\n states = minmod_slope(states, grid, config.params.theta, runtime.tol)\n\n # interpolate to get discontinuous conservative quantities at cell faces\n states = get_discontinuous_cnsrv_q(states, grid)\n\n # fix non-physical negative depth\n states = correct_negative_depth(states, topo)\n\n # get non-conservative variables at cell faces\n states = decompose_variables(states, topo, runtime.epsilon)\n\n # get local speed at cell faces\n states = get_local_speed(states, config.params.gravity)\n\n # get discontinuous PDE flux at cell faces\n states = get_discontinuous_flux(states, topo, config.params.gravity)\n\n # get common/continuous numerical flux at cell faces\n states = central_scheme(states, runtime.tol)\n\n # get final right hand side\n states.rhs.w = \\\n (states.face.x.num_flux.w[:, :-1] - states.face.x.num_flux.w[:, 1:]) / grid.x.delta + \\\n (states.face.y.num_flux.w[:-1, :] - states.face.y.num_flux.w[1:, :]) / grid.y.delta + \\\n states.src.w\n\n states.rhs.hu = \\\n (states.face.x.num_flux.hu[:, :-1] - states.face.x.num_flux.hu[:, 1:]) / grid.x.delta + \\\n (states.face.y.num_flux.hu[:-1, :] - states.face.y.num_flux.hu[1:, :]) / grid.y.delta + \\\n states.src.hu\n\n states.rhs.hv = \\\n (states.face.x.num_flux.hv[:, :-1] - states.face.x.num_flux.hv[:, 1:]) / grid.x.delta + \\\n (states.face.y.num_flux.hv[:-1, :] - states.face.y.num_flux.hv[1:, :]) / grid.y.delta + \\\n states.src.hv\n\n # remove rounding errors\n states.rhs = remove_rounding_errors(states.rhs, runtime.tol)\n\n # obtain the maximum safe dt\n amax = nplike.max(nplike.maximum(states.face.x.plus.a, -states.face.x.minus.a))\n bmax = nplike.max(nplike.maximum(states.face.y.plus.a, -states.face.y.minus.a))\n max_dt = min(0.25*grid.x.delta/amax, 0.25*grid.y.delta/bmax)\n\n return states, max_dt",
"def ha(env, cstate=0):\n T1 = 10\n T2 = 10\n thM = 20\n thm = 5\n vr = 10.5\n v1 = -1.3\n v2 = -2.7\n assert(T1 == T2)\n\n delta = None # None to cause failure\n # The continous variables used in this ha\n x = T1 # clock1 variable\n y = T2 # clock2 variable\n th = 11.5 # The reactor temperature\n\n # You need vtol here, because of floating point error.\n loc0_ode_x = ODE(env, S.sympify('diff(x(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc0_ode_y = ODE(env, S.sympify('diff(y(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc0_ode_th = ODE(env, S.sympify('diff(th(t))'), S.sympify(vr),\n ttol=10**-3, iterations=100, vtol=10**-10)\n loc0_FT = False\n\n loc1_ode_x = ODE(env, S.sympify('diff(x(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc1_ode_y = ODE(env, S.sympify('diff(y(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc1_ode_th = ODE(env, S.sympify('diff(th(t))'), S.sympify(v1),\n ttol=10**-3, iterations=100, vtol=10**-10)\n loc1_FT = False\n\n loc2_ode_x = ODE(env, S.sympify('diff(x(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc2_ode_y = ODE(env, S.sympify('diff(y(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc2_ode_th = ODE(env, S.sympify('diff(th(t))'), S.sympify(v2),\n ttol=10**-3, iterations=100, vtol=10**-10)\n loc2_FT = False\n\n # Location 3 is reactor shutdown\n loc3_FT = False\n\n # Location 0\n def location0(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n vals = {S.sympify('x(t)'): x,\n S.sympify('y(t)'): y,\n S.sympify('th(t)'): th}\n curr_time = env.now\n # The edge guard takes preference\n if th == thM and x >= T1:\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 1, 0, x, y, th, None, True, None, None, curr_time\n elif th == thM and y >= T2:\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 2, 0, x, y, th, None, None, True, None, curr_time\n elif th == thM and x < T1 and y < T2:\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 3, 0, x, y, th, None, None, None, True, curr_time\n # The invariant\n elif th <= thM:\n if not loc0_FT:\n x = loc0_ode_x.compute(vals, curr_time-prev_time)\n y = loc0_ode_y.compute(vals, curr_time-prev_time)\n th = loc0_ode_th.compute(vals, curr_time-prev_time)\n loc0_FT = True\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n if abs(th-thM) > loc0_ode_th.vtol:\n deltath = loc0_ode_th.delta(vals, quanta=(thM-th))\n else:\n th = thM\n deltath = 0\n return 0, deltath, x, y, th, False, None, None, None, curr_time\n else:\n # print('th:', th)\n raise RuntimeError('Reached unreachable branch'\n ' in location 0')\n\n def location1(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n vals = {S.sympify('x(t)'): x,\n S.sympify('y(t)'): y,\n S.sympify('th(t)'): th}\n curr_time = env.now\n # The edge guard takes preference\n if th == thm:\n x = 0 # Reset\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 0, 0, x, y, th, True, None, None, None, curr_time\n # The invariant\n elif th >= thm:\n if not loc1_FT:\n x = loc1_ode_x.compute(vals, curr_time-prev_time)\n y = loc1_ode_y.compute(vals, curr_time-prev_time)\n th = loc1_ode_th.compute(vals, curr_time-prev_time)\n loc1_FT = True\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n if abs(th-thm) > loc1_ode_th.vtol:\n deltath = loc1_ode_th.delta(vals, quanta=(thm-th))\n else:\n th = thm\n deltath = 0\n return 1, deltath, x, y, th, False, None, None, None, curr_time\n else:\n raise RuntimeError('Reached unreachable branch'\n ' in location 1')\n\n def location2(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n vals = {S.sympify('x(t)'): x,\n S.sympify('y(t)'): y,\n S.sympify('th(t)'): th}\n curr_time = env.now\n # The edge guard takes preference\n if th == thm:\n y = 0 # Reset\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 0, 0, x, y, th, True, None, None, None, curr_time\n # The invariant\n elif th >= thm:\n if not loc2_FT:\n x = loc2_ode_x.compute(vals, curr_time-prev_time)\n y = loc2_ode_y.compute(vals, curr_time-prev_time)\n th = loc2_ode_th.compute(vals, curr_time-prev_time)\n loc2_FT = True\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n if abs(th-thm) > loc2_ode_th.vtol:\n deltath = loc2_ode_th.delta(vals, quanta=(thm-th))\n else:\n th = thm\n deltath = 0\n return 2, deltath, x, y, th, False, None, None, None, curr_time\n else:\n raise RuntimeError('Reached unreachable branch'\n ' in location 2')\n\n def location3(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n global step\n # print('total steps: ', step)\n # Done\n print(time.time()-start)\n sys.exit(1)\n\n # The dictionary for the switch statement.\n switch_case = {\n 0: location0,\n 1: location1,\n 2: location2,\n 3: location3\n }\n\n prev_time = env.now\n while(True):\n (cstate, delta, x, y, th,\n loc0_FT, loc1_FT, loc2_FT, loc3_FT,\n prev_time) = switch_case[cstate](x, y, th,\n loc0_FT,\n loc1_FT,\n loc2_FT,\n loc3_FT,\n prev_time)\n # This should always be the final statement in this function\n global step\n step += 1\n yield env.timeout(delta)",
"def calculate_forces(self, loaded_bus_mass, acce, vels): \n\n\n # Physical parameters\n air_density = 1.2 # air density in kg/m3; consant for now,\n # eventaully input from weather API\n v_wind = 0.0 # wind speed in km per hour; figure out component,\n # and also will come from weather API\n \n\n # List of Bus Parameters for 40 foot bus\n # if self.mass_array is None:\n # loaded_bus_mass = self.unloaded_bus_mass # Mass of bus in kg\n # else:\n \n width = 2.6 # in m\n height = 3.3 # in m\n bus_front_area = width * height\n drag_coeff = 0.6\n #rw = 0.5 # radius of wheel in m\n factor = 1.1\n \n\n # Calculate the aerodynamic drag\n aero_drag = (\n drag_coeff\n *\n bus_front_area\n *\n (air_density/2)\n *\n (vels-v_wind)**2\n )\n\n # Calculate the inertial force\n inertia = factor*loaded_bus_mass * acce\n\n return (aero_drag, inertia)",
"def update_forces(self):\n\n pass",
"def fglidingHST_exact(xy, v, NL, KL, BM, Mm, params):\n I1 = params['I1']\n I3 = params['I3']\n l = params['l']\n g = params['g']\n k = params['k']\n\n try:\n NP, NN = np.shape(NL)\n except:\n '''There is only one particle'''\n NP = 1\n NN = 0\n\n # print 'xy = ', xy\n # print 'v = ', v\n\n x = xy[:, 0].ravel() # .reshape(NP,1);\n y = xy[:, 1].ravel() # .reshape(NP,1);\n theta = xy[:, 2].ravel() # .reshape(NP,1);\n phi = xy[:, 3].ravel() # .reshape(NP,1);\n psi = xy[:, 4].ravel() # .reshape(NP,1);\n vx = v[:, 0].ravel() # .reshape(NP,1);\n vy = v[:, 1].ravel() # .reshape(NP,1);\n vtheta = v[:, 2].ravel() # .reshape(NP,1);\n vphi = v[:, 3].ravel() # .reshape(NP,1);\n vpsi = v[:, 4].ravel() # .reshape(NP,1)\n\n # if theta is very nearly pi, push it back\n close_pi = 3.1415\n # xout[xy[:,2] > close_pi,2] = close_pi\n theta[theta > close_pi] = close_pi\n\n # w3 = vpsi + vphi*np.cos(theta)\n w3 = params['w3']\n # if not isinstance(w3,np.ndarray):\n # print 'w3 --> ndarray'\n # w3 = np.array(w3)\n\n # SPRING FORCE\n vecx = np.array([[KL[i, j] * (xy[i, 0] - xy[NL[i, j], 0]) for j in range(NN)] for i in range(NP)])\n vecy = np.array([[KL[i, j] * (xy[i, 1] - xy[NL[i, j], 1]) for j in range(NN)] for i in range(NP)])\n mag = np.sqrt(vecx ** 2 + vecy ** 2)\n # KLnoz = KL.copy() #no zeros\n # KLnoz[KLnoz ==0] = 1. #same value as mag[mag==0], so that stretch=0 for those\n stretch = mag - BM\n mag[mag == 0.] = 1. # avoid divide by zero error\n # print(stretch)\n springx = k * np.sum(stretch * vecx / mag, axis=-1)\n springy = k * np.sum(stretch * vecy / mag, axis=-1)\n # print 'stretch = ', stretch\n\n # add them up\n FX = - springx.ravel() # .reshape(NP,1)\n FY = - springy.ravel() # .reshape(NP,1)\n\n # Set force on fixed particles to zero\n if 'BIND' in params:\n if len(params['BIND']) > 0:\n FX[params['BIND']] = 0.\n FY[params['BIND']] = 0.\n\n # Transform into A frame\n Fx = FX * np.cos(phi) + FY * np.sin(phi)\n Fy = -FX * np.sin(phi) + FY * np.cos(phi)\n\n # print '\\n Fx =', Fx\n\n # VERTICAL REACTION FORCE\n # print 'T1 = ', Mm*g*I1\n # print 'T2 =', - Mm*l*(I1*np.cos(theta)*(vtheta**2 + vphi**2*np.sin(theta)**2))\n # print 'T3a = ', I3*w3\n # print 'T3b = ', vphi*np.sin(theta)**2\n # print 'T3 = ', I3*w3*vphi*np.sin(theta)**2\n # print 'T4 = ', - l* np.sin(theta)*np.cos(theta)*Fx\n # print 'denom = ', I1 + Mm*l**2*np.sin(theta)**2\n gn = (Mm * g * I1 - Mm * l * (I1 * np.cos(theta) * (vtheta ** 2 + vphi ** 2 * np.sin(theta) ** 2) - \\\n I3 * w3 * vphi * (np.sin(theta) ** 2) - l * np.sin(theta) * np.cos(theta) * Fx)) / (\n I1 + Mm * l ** 2 * np.sin(theta) ** 2)\n\n # print 'gn_ term 1 = ', Mm*g*I1\n # print 'gn_ denominator = ', (I1 + Mm*l**2*np.sin(theta)**2)\n # print 'gn_ denom term 2 = ', Mm*l**2\n print 'gn_exact = ', gn\n # print 'gn = ', gn\n\n # EULER EQUATIONS\n # print 'denominator = ',I1*np.sin(theta)\n dvphi = (I3 * w3 * vtheta - 2 * I1 * vphi * vtheta * np.cos(theta) - l * Fy) / (I1 * np.sin(theta))\n # print 'dvtheta -- term 1:', l*gn[4]*np.sin(theta[4])\n # print 'dvtheta -- term 2:', -l*Fx[4]*np.cos(theta[4])\n # print 'dvtheta -- term 3:', I1*vphi[4]**2*np.sin(theta[4])*np.cos(theta[4])\n # print 'dvtheta -- term 4:', I3*w3[4]*vphi[4]*np.sin(theta[4])\n dvtheta = (1. / I1) * (l * gn * np.sin(theta) - l * Fx * np.cos(theta) + I1 * vphi ** 2 * np.sin(theta) * np.cos(\n theta) - I3 * w3 * vphi * np.sin(theta))\n dvpsi = - dvphi * np.cos(theta) + vphi * np.sin(theta) * vtheta\n\n # print 'shape(dvphi)=', np.shape(dvphi)\n # print 'shape(Fx)=', np.shape(Fx)\n\n # SPRING EQUATIONS\n # print 'dvtheta =', dvtheta\n wx = l * (dvtheta * np.cos(theta) - vtheta ** 2 * np.sin(theta) - vphi ** 2 * np.sin(theta))\n wy = l * (dvphi * np.sin(theta) + 2 * vphi * vtheta * np.cos(theta))\n wX = wx * np.cos(phi) - wy * np.sin(phi)\n wY = wx * np.sin(phi) + wy * np.cos(phi)\n dvX = (FX / Mm) - wX\n dvY = (FY / Mm) - wY\n\n # print 'shapes = ', np.shape(dvX), np.shape(dvY),np.shape(dvtheta),np.shape(dvphi),np.shape(dvpsi)\n ftx = np.dstack((dvX, dvY, dvtheta, dvphi, dvpsi))[0]\n # print 'Resulting second derivative: ', ftx[1,:]\n\n if 'BIND' in params:\n if len(params['BIND']) > 0:\n ftx[params['BIND'], 0:2] = [0., 0.]\n # ftx[params['BIND']] = [0.,0.,0.,0.,0.]\n\n # print 'ftx = ', ftx[4,:]\n # gn_check = Mm*g - Mm*l*(dvtheta*np.sin(theta) + vtheta**2*np.cos(theta))\n # print 'gn_check = ', gn_check-gn\n # if sum(abs(gn_check -gn)) > 1e-8:\n # print 'gn vertical reaction force does not match up!'\n # print 'gn_check - gn = ', gn_check-gn\n print 'ftx = ', ftx\n\n return ftx",
"def determine_doubler_plate(self, connection_type, steel, left_beam, right_beam, bottom_column, top_column):\r\n if connection_type == 'top exterior':\r\n # Connection has one left beam and one bottom column\r\n h_bot = bottom_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch\r\n db = left_beam.section['d']\r\n tf = left_beam.section['tf']\r\n self.shear_force['Vc'] = (self.moment['Mf1']+0) / (h_bot/2+0)\r\n self.shear_force['Ru'] = (self.moment['Mf1']+0)/(db-tf) - self.shear_force['Vc']\r\n elif connection_type == 'typical exterior':\r\n # Connection has one left beam and two columns\r\n h_bot = bottom_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch\r\n h_top = top_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch\r\n db = left_beam.section['d']\r\n tf = left_beam.section['tf']\r\n self.shear_force['Vc'] = (self.moment['Mf1']+0) / (h_bot/2+h_top/2)\r\n self.shear_force['Ru'] = (self.moment['Mf1']+0)/(db-tf) - self.shear_force['Vc']\r\n elif connection_type == 'top interior':\r\n # Connection has two beams and one bottom column\r\n h_bot = bottom_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch\r\n # Actually left and right beams have the identical sizes\r\n db = (left_beam.section['d'] + right_beam.section['d'])/2\r\n tf = (left_beam.section['tf'] + right_beam.section['tf'])/2\r\n self.shear_force['Vc'] = (self.moment['Mf1']+self.moment['Mf2']) / (h_bot/2)\r\n self.shear_force['Ru'] = (self.moment['Mf1']+self.moment['Mf2'])/(db-tf) - self.shear_force['Vc']\r\n elif connection_type == 'typical interior':\r\n # Connection has two beams and two columns\r\n h_bot = bottom_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch\r\n h_top = top_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch\r\n db = (left_beam.section['d'] + right_beam.section['d']) / 2\r\n tf = (left_beam.section['tf'] + right_beam.section['tf']) / 2\r\n self.shear_force['Vc'] = (self.moment['Mf1']+self.moment['Mf2']) / (h_bot/2+h_top/2)\r\n self.shear_force['Ru'] = (self.moment['Mf1']+self.moment['Mf2'])/(db-tf) - self.shear_force['Vc']\r\n else:\r\n sys.stderr.write('Error: wrong type of connection specified!\\nNo such keyword for connection exists!\\n')\r\n sys.exit(2)\r\n # Compute the shear strength of the panel zone\r\n phi = 1.0\r\n dc = bottom_column.section['d']\r\n tw = bottom_column.section['tw']\r\n bcf = bottom_column.section['bf']\r\n tcf = bottom_column.section['tf']\r\n db = left_beam.section['d']\r\n self.shear_force['Rn'] = 0.60 * steel.Fy * dc * tw * (1+(3*bcf*tcf**2)/(db*dc*tw))\r\n # Compute the doubler plate thickness\r\n if phi*self.shear_force['Rn'] >= self.shear_force['Ru']:\r\n # Panel zone shear strength is sufficient ==> no need for doubler plate\r\n self.doubler_plate_thickness = 0\r\n else:\r\n # Panel zone shear strength is not sufficient ==> need doubler plate\r\n required_tp = (self.shear_force['Ru'] - 0.60*steel.Fy*(3*bcf*tcf**2)/db) / (0.60*steel.Fy*dc)\r\n tp = 0.25 # Assumed doubler plate thickness\r\n while tp < required_tp:\r\n tp += 0.25 # Update the thickness at an increment of 0.25 until it reaches the requirement\r\n self.doubler_plate_thickness = tp",
"def local_forces(elements, mats, nodes, neq, DME_mat , UC):\r\n IELCON = np.zeros([2], dtype=np.integer)\r\n nels = elements.shape[0]\r\n nnodes = 2\r\n#\r\n for el in range(nels):\r\n iet = np.int(elements[el , 1])\r\n if iet == 0:\r\n ndof = 6\r\n FG = np.zeros((nels, 6))\r\n ul = np.zeros(6)\r\n fl = np.zeros(6)\r\n elif iet == 1:\r\n ndof = 4\r\n FG = np.zeros((nels, 4))\r\n ul = np.zeros(4)\r\n fl = np.zeros(4) \r\n#\r\n for el in range(nels):\r\n#\r\n iet = np.int(elements[el , 1]) \r\n#\r\n elcoor = np.zeros([nnodes, 2])\r\n im = np.int(elements[el , 2])\r\n par0 = mats[im , 0] # Iz\r\n par1 = mats[im , 1] # Emod\r\n par2 = mats[im , 2] # A\r\n for j in range(nnodes):\r\n IELCON[j] = elements[el , j+3]\r\n elcoor[j, 0] = nodes[IELCON[j] , 1]\r\n elcoor[j, 1] = nodes[IELCON[j] , 2] \r\n for j in range(ndof):\r\n ig = DME_mat[el, j]\r\n ul[j] = UC[ig] \r\n if iet == 0: \r\n fl = reac_beam2D(elcoor , par0, par1 , par2 , ul)\r\n elif iet == 1: \r\n fl = reac_beam2DU(elcoor , par0, par1 , ul)\r\n FG[el , :] = fl[:]\r\n \r\n return FG",
"def driftRHS_3D(field,drift_velocity,t,x):\n f = field.getValue(x)\n fs = np.sqrt(f[0]**2 + f[1]**2 + f[2]**2)\n f = f/fs\n return -f*drift_velocity(fs)",
"def get_virtual_force(self, sigma_list):\n f_k = []\n for sigma in sigma_list:\n if not sigma.is_robot:\n f = self.compute_fk(\n sigma.range, self.sigma_des_obj, self.C_obj, self.c_obs)\n f_k.append(f)\n else:\n # c_robot is a bias used to keep robots at a distance\n f = self.compute_fk(\n sigma.range, self.sigma_des_robot, self.C_robot, self.c_robot)\n f_k.append(f)\n sum_x, sum_y = 0, 0\n for k, f in enumerate(f_k):\n phi = (math.pi / 4) * k\n sum_x += f * math.cos(phi)\n sum_y += f * math.sin(phi)\n # current heading of the robot in rect. coords.\n a_c = self.heading_vector\n # get the heading angle of the robot\n angle_a_c = np.arctan2(a_c[1], a_c[0])\n # get the angle of the resultant force in robot fixed frame\n angle_p = np.arctan2(sum_y, sum_x)\n # get the mag. of the resultant force\n mag_p = linalg.norm([sum_x, sum_y])\n # transform vector to fixed frame\n angle_p_bff = angle_p + angle_a_c\n # resolve resultant force in fixed frame preserve the mag.\n p = [mag_p * math.cos(angle_p_bff), mag_p * math.sin(angle_p_bff)]\n return p",
"def calc_repelling_forces(self):\n # TODO WIe sorgge ich dafuer das die dispalcement lsite hier garantiert lange genug ist\n # Diese MEthode berechnet die abstossenden Kraefte\n # BUG Ist diese(displacement list) Liste hier moeglicherweise\n # mit nicht nullwerete gefuellt was den algo. kaputmacht\n for node in self.graph_visuals.graph_nodes:\n for nodes in self.graph_visuals.graph_nodes:\n if node.id != nodes.id:\n diff = node.position - nodes.position\n diff_length = diff.abs()\n scaled_x = diff.to_unit().x * self.fr(diff_length)\n scaled_y = diff.to_unit().y * self.fr(diff_length)\n scaled_unit = Vector(scaled_x, scaled_y)\n new_force = self.displacement_list[node.id] + scaled_unit\n self.displacement_list[node.id] = new_force",
"def nonlinear_electroelastodynamics(optimise=True):\n\n mesh = Mesh()\n mesh.Parallelepiped(upper_right_front_point=(1,1,0.001),nx=10,ny=10,nz=1, element_type=\"hex\")\n\n mu = 5.0e4\n mu1 = mu\n mu2 = mu\n eps_2 = 4.0*8.8541e-12\n v = 0.4\n lamb = 2.*mu*v/(1-2.*v)\n material = IsotropicElectroMechanics_108(3, mu1=mu1, mu2=mu2, lamb=lamb, eps_2=eps_2, rho=1200.)\n\n formulation = DisplacementPotentialFormulation(mesh)\n\n\n def dirichlet_function(mesh):\n\n boundary_data = np.zeros((mesh.points.shape[0],4))+np.NAN\n\n Z_0 = np.logical_and(np.isclose(mesh.points[:,0],0.),np.isclose(mesh.points[:,2],0.))\n boundary_data[Z_0,:3] = 0.\n Z_0 = np.logical_and(np.isclose(mesh.points[:,1],0.),np.isclose(mesh.points[:,2],0.))\n boundary_data[Z_0,:3] = 0.\n Z_0 = np.logical_and(np.isclose(mesh.points[:,0],1),np.isclose(mesh.points[:,2],0.))\n boundary_data[Z_0,:3] = 0.\n Z_0 = np.logical_and(np.isclose(mesh.points[:,1],1),np.isclose(mesh.points[:,2],0.))\n boundary_data[Z_0,:3] = 0.\n\n Z_0 = np.isclose(mesh.points[:,2],0.)\n boundary_data[Z_0,3] = 0.\n\n Z_0 = np.isclose(mesh.points[:,2],.001)\n boundary_data[Z_0,3] = 9e3\n\n return boundary_data\n\n boundary_condition = BoundaryCondition()\n boundary_condition.SetDirichletCriteria(dirichlet_function, mesh)\n\n nonlinear_static_solver = FEMSolver(total_time=60.,\n number_of_load_increments=25,\n analysis_nature=\"nonlinear\",\n analysis_type=\"static\",\n newton_raphson_tolerance=1e-5,\n newton_raphson_solution_tolerance=1e-11,\n optimise=optimise,\n print_incremental_log=True,\n )\n\n nonlinear_static_results = nonlinear_static_solver.Solve(formulation=formulation, mesh=mesh,\n material=material, boundary_condition=boundary_condition)\n\n\n nonlinear_dynamic_solver = FEMSolver(total_time=60.,\n number_of_load_increments=250,\n analysis_nature=\"nonlinear\",\n analysis_type=\"dynamic\",\n newton_raphson_tolerance=1e-5,\n newton_raphson_solution_tolerance=1e-11,\n optimise=optimise,\n print_incremental_log=True,\n compute_energy_dissipation=True,\n compute_linear_momentum_dissipation=True,\n )\n\n nonlinear_dynamic_results = nonlinear_dynamic_solver.Solve(formulation=formulation, mesh=mesh,\n material=material, boundary_condition=boundary_condition)\n\n\n # boundary_condition.__reset_state__()\n # boundary_condition.SetDirichletCriteria(dirichlet_function, mesh)\n\n # nonlinear_dynamic_solver_exp = FEMSolver(total_time=6.,\n # number_of_load_increments=200000,\n # save_frequency=200000,\n # analysis_nature=\"nonlinear\",\n # analysis_type=\"dynamic\",\n # analysis_subtype=\"explicit\",\n # newton_raphson_tolerance=1e-5,\n # newton_raphson_solution_tolerance=1e-11,\n # optimise=optimise,\n # print_incremental_log=True,\n # )\n\n # nonlinear_dynamic_results_exp = nonlinear_dynamic_solver_exp.Solve(formulation=formulation, mesh=mesh,\n # material=material, boundary_condition=boundary_condition)\n\n\n boundary_condition.__reset_state__()\n boundary_condition.SetDirichletCriteria(dirichlet_function, mesh)\n\n linear_static_solver = FEMSolver(total_time=60.,\n number_of_load_increments=250,\n analysis_nature=\"linear\",\n analysis_type=\"static\",\n newton_raphson_tolerance=1e-5,\n newton_raphson_solution_tolerance=1e-11,\n optimise=optimise,\n print_incremental_log=True,\n )\n\n linear_static_results = linear_static_solver.Solve(formulation=formulation, mesh=mesh,\n material=material, boundary_condition=boundary_condition)\n\n\n boundary_condition.__reset_state__()\n boundary_condition.SetDirichletCriteria(dirichlet_function, mesh)\n\n linear_dynamic_solver = FEMSolver(total_time=60.,\n number_of_load_increments=1000,\n analysis_nature=\"linear\",\n analysis_type=\"dynamic\",\n newton_raphson_tolerance=1e-5,\n newton_raphson_solution_tolerance=1e-11,\n optimise=optimise,\n print_incremental_log=True,\n break_at_increment=100,\n )\n\n linear_dynamic_results = linear_dynamic_solver.Solve(formulation=formulation, mesh=mesh,\n material=material, boundary_condition=boundary_condition)\n\n\n s1 = nonlinear_static_results.GetSolutionVectors()\n s2 = nonlinear_dynamic_results.GetSolutionVectors()\n # s3 = nonlinear_dynamic_results_exp.GetSolutionVectors()\n s4 = linear_static_results.GetSolutionVectors()\n s5 = linear_dynamic_results.GetSolutionVectors()\n\n norm = lambda x: np.linalg.norm(x[:,2,-1])\n assert norm(s1) > 0.13 and norm(s1) < 0.15\n assert norm(s2) > 0.13 and norm(s2) < 0.15\n assert norm(s4) > 0.13 and norm(s4) < 0.15",
"def global_forces(elements, mats, nodes, neq, DME_mat , UC):\r\n IELCON = np.zeros([2], dtype=np.integer)\r\n nels = elements.shape[0]\r\n nnodes = 2\r\n#\r\n for el in range(nels):\r\n iet = np.int(elements[el , 1])\r\n if iet == 0:\r\n ndof = 6\r\n FG = np.zeros((nels, 6))\r\n ul = np.zeros(6)\r\n fl = np.zeros(6)\r\n elif iet == 1:\r\n ndof = 4\r\n FG = np.zeros((nels, 4))\r\n ul = np.zeros(4)\r\n fl = np.zeros(4) \r\n#\r\n for el in range(nels):\r\n#\r\n iet = np.int(elements[el , 1]) \r\n#\r\n elcoor = np.zeros([nnodes, 2])\r\n im = np.int(elements[el , 2])\r\n par0 = mats[im , 0] # Iz\r\n par1 = mats[im , 1] # Emod\r\n par2 = mats[im , 2] # A\r\n for j in range(nnodes):\r\n IELCON[j] = elements[el , j+3]\r\n elcoor[j, 0] = nodes[IELCON[j] , 1]\r\n elcoor[j, 1] = nodes[IELCON[j] , 2] \r\n for j in range(ndof):\r\n ig = DME_mat[el, j]\r\n ul[j] = UC[ig] \r\n if iet == 0: \r\n fl = reac_beam2D_global(elcoor , par0, par1 , par2 , ul)\r\n elif iet == 1: \r\n fl = reac_beam2DU_global(elcoor , par0, par1 , ul)\r\n FG[el , :] = fl[:]\r\n \r\n return FG"
] |
[
"0.6209693",
"0.61231375",
"0.60808545",
"0.5834242",
"0.5751801",
"0.5747734",
"0.5688679",
"0.5622616",
"0.55450886",
"0.55335766",
"0.55251175",
"0.54527116",
"0.5413672",
"0.54064393",
"0.5396999",
"0.5393756",
"0.53756493",
"0.5375087",
"0.53007513",
"0.5273948",
"0.5251551",
"0.52446896",
"0.52252394",
"0.5210335",
"0.5199494",
"0.51987946",
"0.51805645",
"0.5167889",
"0.5138989",
"0.51383495"
] |
0.62398463
|
0
|
US02 Birth should occur before the marriage of an individual
|
def birthBeforeMarriage(individual):
birthDate = individual.get_birth_data()[0]
marriageDates = gedcom_parser.get_marriages(individual)
if marriageDates and birthDate:
earliestMarriageDate = (min(convertGedcomDate(
date[0]) for date in marriageDates))
birthDate = convertGedcomDate(birthDate)
if earliestMarriageDate < birthDate:
print(
f"Error US02: Marriage of {individual.get_name()[0]} {individual.get_name()[1]} ({individual.get_pointer()}) occurs before their birth")
return False
else:
return True
return None
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def marriageBeforeDeath(individual):\n deathDate = individual.get_death_data()[0]\n marriageDates = gedcom_parser.get_marriages(individual)\n\n if marriageDates and deathDate:\n latestMarriageDate = (max(convertGedcomDate(\n date[0]) for date in marriageDates))\n deathDate = convertGedcomDate(deathDate)\n if latestMarriageDate > deathDate:\n print(\n f\"Error US05: Marriage of {individual.get_name()[0]} {individual.get_name()[1]} ({individual.get_pointer()}) occurs after their death\")\n return False\n else:\n return True",
"def birthBeforeDeath(individual):\n birthdate = individual.get_birth_data()[0]\n deathdate = individual.get_death_data()[0]\n if birthdate and deathdate:\n birthdate = convertGedcomDate(birthdate)\n deathdate = convertGedcomDate(deathdate)\n if deathdate < birthdate:\n print(\n f\"Error US03: Death of {individual.get_name()[0]} {individual.get_name()[1]} ({individual.get_pointer()}) occurs before their birth\")\n return False\n else:\n return True\n return None",
"def user_story_3(self):\n for person in self.individuals.values():\n if person.birthday != 'NA' and person.death != 'NA':\n if person.birthday > person.death:\n print(f'US03 - {person.name} birthday after death date on line {person._birthday_line}')",
"def check_bday(self):\n for fam in self.families.values():\n if fam.children != 'NA':\n # fam.children is either a set or 'NA' string\n for child in fam.children:\n bday = self.individuals[child].birthday\n marr = fam.married\n div = fam.divorced\n\n # if child is born before marriage date, and not yet divorced\n if marr != 'NA' and bday < marr and div == 'NA':\n print(f'US08 - {self.individuals[child].name} birthday before marriage on line {self.individuals[child]._birthday_line}')\n # if child is born more than 9 months after divorce\n if div != 'NA' and bday > div + relativedelta(months=9):\n print(f'US08 - {self.individuals[child].name} birthday before marriage on line {self.individuals[child]._birthday_line}')\n\n if fam.husb_id and fam.wife_id:\n dad = self.individuals[fam.husb_id]\n mom = self.individuals[fam.wife_id]\n # if child is born any time after mother dies\n if not mom.alive and mom.death < bday:\n print(f'US09 - {self.individuals[child].name} birthday after mom death date on line {self.individuals[child]._birthday_line}')\n # if child dies later than nine months after father dies\n if not dad.alive and dad.death + relativedelta(months=9) < bday:\n print(f'US09 - {self.individuals[child].name} birthday after dads death date on line {self.individuals[child]._birthday_line}')",
"def noBigamy(individual):\n\n\n families = gedcom_parser.get_families(individual)\n\n marriageDateRanges = []\n for family in families:\n marriageDate = None\n divorceDate = None\n for element in family.get_child_elements():\n if element.get_tag() == \"MARR\":\n marriageDate = convertGedcomDate(element.get_child_elements()[0].get_value())\n\n if element.get_tag() == \"DIV\":\n divorceDate = convertGedcomDate(element.get_child_elements()[0].get_value())\n\n if divorceDate == None:\n divorceDate = dt.now()\n\n marriageDateRanges.append((marriageDate, divorceDate))\n \n marriageDateIntervals = pandas.arrays.IntervalArray.from_tuples(marriageDateRanges)\n\n\n if marriageDateIntervals.is_non_overlapping_monotonic:\n return True\n else:\n print(\n f\"Error US11: Marriage of {individual.get_name()[0]} {individual.get_name()[1]} ({individual.get_pointer()}) occurs during another marriage\")\n return False",
"def user_story_13(self):\n for family in self.families.values():\n if family.children != 'NA':\n bday_dict = dict() # { iid1: bday1, iid2: bday1, iid3: bday2 }\n for child in family.children:\n bday_dict[child] = self.individuals[child].birthday\n for i1, i2 in itertools.combinations(bday_dict, 2):\n older = bday_dict[i1] if bday_dict[i1] < bday_dict[i2] else bday_dict[i2]\n younger = bday_dict[i1] if bday_dict[i1] >= bday_dict[i2] else bday_dict[i2]\n if older + relativedelta(days=1) < younger and younger < older + relativedelta(months=8):\n print(f'US13 - {min(self.individuals[i1].name, self.individuals[i2].name)} and {max(self.individuals[i1].name, self.individuals[i2].name)} have birthdays that are too close together on lines {min(self.individuals[i1]._birthday_line, self.individuals[i2]._birthday_line)} and {max(self.individuals[i1]._birthday_line, self.individuals[i2]._birthday_line)}')",
"def birth_check(self):\r\n if random.random() < 0.00017: # 0.0121, or 1.21%, is the yearly birth rate.\r\n birth_flag_list.append(1)\r\n # This makes the birth rate for every 5 days (73 'checks' a year) 0.00017%,\r\n # because 1 - 0.0121 = 0.9879; 98.79% is the chance of not giving birth that year.\r\n # 0.99983 ^73 = 0.9879 are the 5-day chances compounded 73 times, and 1 - 0.99983 = 0.00017.\r\n # or you could use the yearly birth rate and have birth_check only occur randomly\r\n # around once a year.\r\n if birth_flag_list != [] and self.gender == 2 and self.marriage == 1 and self.age < 55:\r\n if self.last_birth_time >= 2: # 2 years is the set birth interval; can modify\r\n self.last_birth_time = 0 # reset counter\r\n birth_flag_list.remove(1)\r\n last = self.model.number_of_humans\r\n # build more attributes\r\n age = 0\r\n gender = random.choice([1, 2])\r\n education = 0\r\n work_status = 0\r\n marriage = 0\r\n if gender == 1:\r\n age_category = 0\r\n elif gender == 2:\r\n age_category = 1\r\n ind = Human(last + 1, self.model, self.current_position, self.hh_id, age, self.resource_check,\r\n self.home_position, self.resource_position, self.resource_frequency, gender,\r\n education, work_status, marriage, self.past_hh_id, self.mig_years,\r\n self.migration_status, self.gtgp_part, self.non_gtgp_area,\r\n self.migration_network, self.mig_remittances, self.income_local_off_farm,\r\n self.last_birth_time, self.death_rate, age_category)\r\n self.model.schedule.add(ind)\r\n self.model.number_of_humans += 1\r\n hh_size_list[self.hh_id] += 1\r\n human_birth_list.append(last + 1)\r\n if ind.gender == 1:\r\n human_demographic_structure_list[0] += 1\r\n elif ind.gender == 2:\r\n human_demographic_structure_list[10] += 1",
"def birth_date_or_min_year(individual):\n year = fuzzy_date_year(individual.birth_date)\n if year:\n return year\n return 0",
"def user_story_01(self):\n td=datetime.today()\n for person in self.individuals.values():\n pb=person.birthday\n pd=person.death\n if pb !=\"NA\" and pb>td:\n print(f'US01 - {person.name} birthday after today on line {person._birthday_line}')\n if pd !=\"NA\" and pd>td:\n print(f'US01 - {person.name} death after today on line {person._death_line}')\n for family in self.families.values():\n fm=family.married \n fd=family.divorced\n if fm !=\"NA\" and fm>td:\n print(f'US01 - {self.individuals[family.wife_id].name} marriage after today on line {family._married_line}')\n if fd !=\"NA\" and fd>td:\n print(f'US01 - {self.individuals[family.husb_id].name} divorce after today on line {family._divorced_line}')",
"def eligiblePresident(age,bornInHomeland):\n return (age>=35) and bornInHomeland",
"def user_story_4(self):\n for family in self.families.values():\n if family.married != 'NA':\n if family.wife_id != 'NA' and family.husb_id != 'NA' and family.divorced != 'NA':\n if family.divorced < family.married:\n print(\n f'US04 - {self.individuals[family.wife_id].name} and {self.individuals[family.husb_id].name} married after divorce on line {family._married_line}')",
"def valid_age(line):\n dob = line.o_DOB\n if not _is_21(dob):\n rule = 'Allowed age'\n new_row = Error(e_name=rule, order_key=line.primary_key)\n line.errors.append(new_row)\n return False\n return True",
"def test_birth_validation(self):",
"def DeathBeforeLife(User):\n if User.date_of_death is None: return\n if User.date_of_death < User.date_of_birth:\n raise interface.Invalid(\n _(u\"One cannot die before being born\"),\n \"date_of_death\", \n \"date_of_birth\")",
"def compare_bachelors_1980(data):\n d = data[(data['Year'] == 1980) & (data['Min degree'] == 'bachelor\\'s')]\\\n .groupby(['Sex'])['Total'].sum().loc[['M', 'F']]\n return (d[0], d[1])",
"def datesBeforeCurrentDate(individual):\n birthdate = individual.get_birth_data()[0]\n deathdate = individual.get_death_data()[0]\n marriageDates = gedcom_parser.get_marriages(individual)\n\n fams = gedcom_parser.get_families(individual)\n childElements = [(fam.get_child_elements()) for fam in fams]\n\n divorceDates = []\n for elements in childElements:\n for element in elements:\n if element.get_tag() == \"DIV\":\n divorceDates.append(element.get_child_elements()[0].get_value())\n\n\n latestDivorceDate = max(convertGedcomDate(date)\n for date in divorceDates) if divorceDates else None\n latestMarriageDate = max(convertGedcomDate(\n date[0]) for date in marriageDates) if marriageDates else None\n birthdate = convertGedcomDate(birthdate) if birthdate else None\n deathdate = convertGedcomDate(deathdate) if deathdate else None\n\n comparisonDates = [birthdate, deathdate,\n latestMarriageDate, latestDivorceDate]\n\n if any(day > dt.now() for day in comparisonDates if day):\n print(\n f\"Error US05: Date associated with {individual.get_name()[0]} {individual.get_name()[1]} ({individual.get_pointer()}) occurs after current date\")\n return False\n else:\n return True",
"def user_story_5(self):\n for family in self.families.values():\n if family.married != 'NA':\n if family.wife_id != 'NA':\n if self.individuals[family.wife_id].death != 'NA':\n if self.individuals[family.wife_id].death < family.married:\n print(\n f'US05 - {self.individuals[family.wife_id].name} married after individual death date on line {family._married_line}')\n\n if family.husb_id != 'NA':\n if self.individuals[family.husb_id].death != 'NA':\n if self.individuals[family.husb_id].death < family.married:\n print(\n f'US05 - {self.individuals[family.husb_id].name} married after individual death date on line {family._married_line}')",
"def test_date_of_birth_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_date_of_birth(input_val)\n self.assertEqual(output_val, self.line.date_of_birth)",
"def test_last_name(self, unromanized, romanized, expected):\n with mute_signals(post_save):\n profile = ExamProfileFactory(\n profile__last_name=unromanized,\n profile__romanized_last_name=romanized,\n )\n assert CDDWriter.last_name(profile) == expected",
"def test_patient_one_date_of_birth(self):\r\n self.assertEqual(self.test_patient.dateOfBirth, datetime.date(2000, 2, 13))",
"def test_ave_age_range(step):\n diff = step[\"ave_birth\"] - step[\"birth\"]\n assert 0 < diff < 15E6",
"def test_date_by_gt_yr_mo(self):\n spi_search = \"find date > 1978-10-21\"\n inv_search = 'year:1978-10-21->9999'\n self._compare_searches(inv_search, spi_search)",
"def test_first_last_middle_name(self):\n\t\tformatted_name = get_formatted_name('Wolfgang','mozart','amadues')\n\t\tself.assertEqual(formatted_name,'Wolfgang Amadues Mozart')",
"def test_patient_date_of_birth(self):\r\n self.assertEqual(self.test_patient.dateOfBirth, '2000-01-01')",
"def test_date_by_lt_yr_mo(self):\n spi_search = \"find date < 1978-10-21\"\n inv_search = 'year:0->1978-10-21'\n self._compare_searches(inv_search, spi_search)",
"def test_last_millenium(self):\n term, rmd = util.parse_date(\"old paper 9505\")\n ym = util.parse_date_partial(term)\n self.assertEqual(ym, \"1995-05\")\n self.assertEqual(rmd, \"old paper\", \"Should have a remainder\")",
"def test_init_invalid_order(self):\n with self.assertRaises(ValueError):\n Person(\n self.initial_year, self.name, self.birth_date,\n retirement_date=self.birth_date - relativedelta(days=1))",
"def test_first_last_name(self):\n formatted_name = get_formatted_name('janis', 'joplin')\n # Asserting that formatted_name equals 'Janis Joplin'\n self.assertEqual(formatted_name, 'Janis Joplin')",
"def test_first_last_middle_name(self):\n formatted_name = get_formatted_name('marie', 'curie', 'francis')\n self.assertEqual(formatted_name, 'Marie Francis Curie')",
"def validate_birth_year(passport: map) -> bool:\n if passport.get('byr'):\n if int(passport['byr']) >= 1920 and int(passport['byr']) <= 2002:\n return True\n\n return False"
] |
[
"0.7354835",
"0.69403464",
"0.6383716",
"0.606869",
"0.5849964",
"0.5793744",
"0.57224643",
"0.5720761",
"0.57011163",
"0.569727",
"0.56274605",
"0.5543742",
"0.55336505",
"0.545788",
"0.54456013",
"0.5355954",
"0.53553337",
"0.5348341",
"0.5312725",
"0.5301524",
"0.5298418",
"0.529182",
"0.52893156",
"0.5253411",
"0.5239463",
"0.52154267",
"0.52075946",
"0.5203826",
"0.5203248",
"0.5202828"
] |
0.8175209
|
0
|
US03 Birth should occur before death death of an individual
|
def birthBeforeDeath(individual):
birthdate = individual.get_birth_data()[0]
deathdate = individual.get_death_data()[0]
if birthdate and deathdate:
birthdate = convertGedcomDate(birthdate)
deathdate = convertGedcomDate(deathdate)
if deathdate < birthdate:
print(
f"Error US03: Death of {individual.get_name()[0]} {individual.get_name()[1]} ({individual.get_pointer()}) occurs before their birth")
return False
else:
return True
return None
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def user_story_3(self):\n for person in self.individuals.values():\n if person.birthday != 'NA' and person.death != 'NA':\n if person.birthday > person.death:\n print(f'US03 - {person.name} birthday after death date on line {person._birthday_line}')",
"def marriageBeforeDeath(individual):\n deathDate = individual.get_death_data()[0]\n marriageDates = gedcom_parser.get_marriages(individual)\n\n if marriageDates and deathDate:\n latestMarriageDate = (max(convertGedcomDate(\n date[0]) for date in marriageDates))\n deathDate = convertGedcomDate(deathDate)\n if latestMarriageDate > deathDate:\n print(\n f\"Error US05: Marriage of {individual.get_name()[0]} {individual.get_name()[1]} ({individual.get_pointer()}) occurs after their death\")\n return False\n else:\n return True",
"def DeathBeforeLife(User):\n if User.date_of_death is None: return\n if User.date_of_death < User.date_of_birth:\n raise interface.Invalid(\n _(u\"One cannot die before being born\"),\n \"date_of_death\", \n \"date_of_birth\")",
"def birthBeforeMarriage(individual):\n birthDate = individual.get_birth_data()[0]\n marriageDates = gedcom_parser.get_marriages(individual)\n\n if marriageDates and birthDate:\n earliestMarriageDate = (min(convertGedcomDate(\n date[0]) for date in marriageDates))\n birthDate = convertGedcomDate(birthDate)\n if earliestMarriageDate < birthDate:\n print(\n f\"Error US02: Marriage of {individual.get_name()[0]} {individual.get_name()[1]} ({individual.get_pointer()}) occurs before their birth\")\n return False\n else:\n return True\n return None",
"def check_bday(self):\n for fam in self.families.values():\n if fam.children != 'NA':\n # fam.children is either a set or 'NA' string\n for child in fam.children:\n bday = self.individuals[child].birthday\n marr = fam.married\n div = fam.divorced\n\n # if child is born before marriage date, and not yet divorced\n if marr != 'NA' and bday < marr and div == 'NA':\n print(f'US08 - {self.individuals[child].name} birthday before marriage on line {self.individuals[child]._birthday_line}')\n # if child is born more than 9 months after divorce\n if div != 'NA' and bday > div + relativedelta(months=9):\n print(f'US08 - {self.individuals[child].name} birthday before marriage on line {self.individuals[child]._birthday_line}')\n\n if fam.husb_id and fam.wife_id:\n dad = self.individuals[fam.husb_id]\n mom = self.individuals[fam.wife_id]\n # if child is born any time after mother dies\n if not mom.alive and mom.death < bday:\n print(f'US09 - {self.individuals[child].name} birthday after mom death date on line {self.individuals[child]._birthday_line}')\n # if child dies later than nine months after father dies\n if not dad.alive and dad.death + relativedelta(months=9) < bday:\n print(f'US09 - {self.individuals[child].name} birthday after dads death date on line {self.individuals[child]._birthday_line}')",
"def birth_check(self):\r\n if random.random() < 0.00017: # 0.0121, or 1.21%, is the yearly birth rate.\r\n birth_flag_list.append(1)\r\n # This makes the birth rate for every 5 days (73 'checks' a year) 0.00017%,\r\n # because 1 - 0.0121 = 0.9879; 98.79% is the chance of not giving birth that year.\r\n # 0.99983 ^73 = 0.9879 are the 5-day chances compounded 73 times, and 1 - 0.99983 = 0.00017.\r\n # or you could use the yearly birth rate and have birth_check only occur randomly\r\n # around once a year.\r\n if birth_flag_list != [] and self.gender == 2 and self.marriage == 1 and self.age < 55:\r\n if self.last_birth_time >= 2: # 2 years is the set birth interval; can modify\r\n self.last_birth_time = 0 # reset counter\r\n birth_flag_list.remove(1)\r\n last = self.model.number_of_humans\r\n # build more attributes\r\n age = 0\r\n gender = random.choice([1, 2])\r\n education = 0\r\n work_status = 0\r\n marriage = 0\r\n if gender == 1:\r\n age_category = 0\r\n elif gender == 2:\r\n age_category = 1\r\n ind = Human(last + 1, self.model, self.current_position, self.hh_id, age, self.resource_check,\r\n self.home_position, self.resource_position, self.resource_frequency, gender,\r\n education, work_status, marriage, self.past_hh_id, self.mig_years,\r\n self.migration_status, self.gtgp_part, self.non_gtgp_area,\r\n self.migration_network, self.mig_remittances, self.income_local_off_farm,\r\n self.last_birth_time, self.death_rate, age_category)\r\n self.model.schedule.add(ind)\r\n self.model.number_of_humans += 1\r\n hh_size_list[self.hh_id] += 1\r\n human_birth_list.append(last + 1)\r\n if ind.gender == 1:\r\n human_demographic_structure_list[0] += 1\r\n elif ind.gender == 2:\r\n human_demographic_structure_list[10] += 1",
"def user_story_13(self):\n for family in self.families.values():\n if family.children != 'NA':\n bday_dict = dict() # { iid1: bday1, iid2: bday1, iid3: bday2 }\n for child in family.children:\n bday_dict[child] = self.individuals[child].birthday\n for i1, i2 in itertools.combinations(bday_dict, 2):\n older = bday_dict[i1] if bday_dict[i1] < bday_dict[i2] else bday_dict[i2]\n younger = bday_dict[i1] if bday_dict[i1] >= bday_dict[i2] else bday_dict[i2]\n if older + relativedelta(days=1) < younger and younger < older + relativedelta(months=8):\n print(f'US13 - {min(self.individuals[i1].name, self.individuals[i2].name)} and {max(self.individuals[i1].name, self.individuals[i2].name)} have birthdays that are too close together on lines {min(self.individuals[i1]._birthday_line, self.individuals[i2]._birthday_line)} and {max(self.individuals[i1]._birthday_line, self.individuals[i2]._birthday_line)}')",
"def user_story_01(self):\n td=datetime.today()\n for person in self.individuals.values():\n pb=person.birthday\n pd=person.death\n if pb !=\"NA\" and pb>td:\n print(f'US01 - {person.name} birthday after today on line {person._birthday_line}')\n if pd !=\"NA\" and pd>td:\n print(f'US01 - {person.name} death after today on line {person._death_line}')\n for family in self.families.values():\n fm=family.married \n fd=family.divorced\n if fm !=\"NA\" and fm>td:\n print(f'US01 - {self.individuals[family.wife_id].name} marriage after today on line {family._married_line}')\n if fd !=\"NA\" and fd>td:\n print(f'US01 - {self.individuals[family.husb_id].name} divorce after today on line {family._divorced_line}')",
"def age_check(self):\r\n # check working status\r\n if 15 <= float(self.age) < 59:\r\n if self.work_status == 0:\r\n self.work_status = 1\r\n num_labor_list[self.hh_id] += 1\r\n labor_list.append(self.unique_id)\r\n if self.work_status == 1 and self.unique_id not in labor_list:\r\n labor_list.append(self.unique_id)\r\n else:\r\n self.work_status = 0\r\n\r\n # check education status; measured in years of education\r\n if 7 <= int(self.age) <= 19:\r\n if random.random() > 0.1:\r\n self.education += 1\r\n # most adults in the FNNR did not get a full 12-13 years of education\r\n elif 19 < float(self.age) < 23 and self.migration_status == 1:\r\n if random.random() < 0.5:\r\n self.education += 1 # went to college and got further education\r\n # this is rare; in the household list, a few received beyond 12 years of education\r\n\r\n # check age-based death rates\r\n if self.age > 65:\r\n self.death_rate = 0.001443 # 5-day death rate\r\n # The average death rate in China is 7.3 per 1,000 people/year, or 0.0073 (Google).\r\n # However, death rates should be higher for the elderly, or else the population structure will skew.\r\n # I set death rates for those over age 65 to be 10% per year--0.9 yearly survival rate.\r\n # The survival rate for each 5-day step is compounded 73 times, so x^73 = 0.85.\r\n # 0.998557 is the 5-day survival rate, and 1 - x is the 5-day death rate.\r\n else:\r\n self.death_rate = 0.00000425\r\n # I wanted people to have a 98% chance of reaching age 65 (death rate is lower if not elderly).\r\n # If a 'check' is every 5 days, 73 checks/year * 65 years = 4,745 checks.\r\n # x^4745 = 0.98; the 5-day survival rate is 0.99999575, and 1 - x is the 5-day death rate.\r\n\r\n # These rates are changeable later.\r",
"def label_birth_death(tree):\n\n _bump_zero_distance_children(tree)\n\n # all the speciciation nodes have pre-defined times\n _label_starter_nodes(tree)\n\n # now we need to label all remaining nodes\n # best achieved in topological order\n for node in [n for n in nx.topological_sort(tree) if 't_death' not in tree.node[n]]:\n _determine_t_death(tree, node)\n\n _add_t_births_and_lengths(tree)\n\n return",
"def test_patient_one_date_of_birth(self):\r\n self.assertEqual(self.test_patient.dateOfBirth, datetime.date(2000, 2, 13))",
"def test_date_of_birth_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_date_of_birth(input_val)\n self.assertEqual(output_val, self.line.date_of_birth)",
"def test_birth_validation(self):",
"def test_patient_date_of_birth(self):\r\n self.assertEqual(self.test_patient.dateOfBirth, '2000-01-01')",
"def eligiblePresident(age,bornInHomeland):\n return (age>=35) and bornInHomeland",
"def update_death_birth(self, t, ind, mother):\n\n if ind:\n orphans = self.P.death(t, ind)\n self.P.process_orphans(t, orphans, float(self.params['adult_age']), self.rng)\n\n if mother:\n sex = self.rng.randint(0, 1)\n new_ind = self.P.birth(t, self.rng, mother, mother.partner, sex)\n return new_ind\n\n return None",
"def test_date_of_birth(self):\n entries = {'uid=test,ou=people,dc=esmgquadrivium,dc=nl': {\n 'uid': ['test'],\n 'qDateOfBirth': [19951226],\n }}\n clone(entries)\n self.assertEqual(date(1995, 12, 26), Person.objects.first().date_of_birth)",
"def _check_birthday(self):\n for employee in self:\n if employee.birthday and employee.gender:\n diff = relativedelta(datetime.today(), datetime.strptime(employee.birthday, DEFAULT_SERVER_DATE_FORMAT))\n if employee.gender == \"male\" and abs(diff.years) < 18:\n raise ValidationError(_(\"Male employee's age must be greater than 18\"))\n elif employee.gender == 'female' and abs(diff.years) < 21:\n raise ValidationError(_(\"Female Employee's age must be greater than 21.\"))",
"def test_ave_age_range(step):\n diff = step[\"ave_birth\"] - step[\"birth\"]\n assert 0 < diff < 15E6",
"def death_check(self):\r\n chance = random.random()\r\n if decimal.Decimal(chance) < decimal.Decimal(self.death_rate):\r\n if self.unique_id in head_of_household_list:\r\n try:\r\n head_of_household_list[self.hh_id] = 0\r\n except TypeError: # head of household migrated\r\n head_of_household_list[self.past_hh_id] = 0\r\n self.model.number_of_humans -= 1\r\n if self.unique_id in labor_list:\r\n labor_list.remove(self.unique_id)\r\n if self.work_status == 1:\r\n try:\r\n num_labor_list[self.hh_id] -= 1\r\n except TypeError:\r\n num_labor_list[self.past_hh_id] -= 1\r\n if self.unique_id in former_hoh_list:\r\n try:\r\n former_hoh_list[self.hh_id] = 0\r\n except:\r\n former_hoh_list[self.past_hh_id] = 0\r\n if [self.unique_id, self.hh_id] in single_male_list:\r\n single_male_list.remove([self.unique_id, self.hh_id])\r\n if self.unique_id in married_male_list:\r\n married_male_list.remove(self.unique_id)\r\n human_death_list.append(self.unique_id)\r\n try:\r\n hh_size_list[self.hh_id] -= 1\r\n except:\r\n hh_size_list[self.past_hh_id] -= 1\r\n human_demographic_structure_list[self.age_category] -= 1\r\n\r\n self.model.schedule.remove(self)\r\n if self in self.model.grid:\r\n self.model.grid.remove_agent(self)",
"def user_story_6(self):\n for family in self.families.values():\n if family.divorced != 'NA':\n if family.wife_id != 'NA':\n if self.individuals[family.wife_id].death != 'NA':\n if self.individuals[family.wife_id].death < family.divorced:\n print(f'US06 - {self.individuals[family.wife_id].name} divorce after individual death date on line {family._divorced_line}')\n\n if family.husb_id != 'NA':\n if self.individuals[family.husb_id].death != 'NA':\n if self.individuals[family.husb_id].death < family.divorced:\n print(f'US06 - {self.individuals[family.husb_id].name} divorce after individual death date on line {family._divorced_line}')",
"def test_init_invalid_order(self):\n with self.assertRaises(ValueError):\n Person(\n self.initial_year, self.name, self.birth_date,\n retirement_date=self.birth_date - relativedelta(days=1))",
"def noBigamy(individual):\n\n\n families = gedcom_parser.get_families(individual)\n\n marriageDateRanges = []\n for family in families:\n marriageDate = None\n divorceDate = None\n for element in family.get_child_elements():\n if element.get_tag() == \"MARR\":\n marriageDate = convertGedcomDate(element.get_child_elements()[0].get_value())\n\n if element.get_tag() == \"DIV\":\n divorceDate = convertGedcomDate(element.get_child_elements()[0].get_value())\n\n if divorceDate == None:\n divorceDate = dt.now()\n\n marriageDateRanges.append((marriageDate, divorceDate))\n \n marriageDateIntervals = pandas.arrays.IntervalArray.from_tuples(marriageDateRanges)\n\n\n if marriageDateIntervals.is_non_overlapping_monotonic:\n return True\n else:\n print(\n f\"Error US11: Marriage of {individual.get_name()[0]} {individual.get_name()[1]} ({individual.get_pointer()}) occurs during another marriage\")\n return False",
"def clean_birth_date(self):\n\n sent_date = self.cleaned_data['birth_date']\n current_date = localtime(now()).date()\n\n if (sent_date - current_date) > datetime.timedelta(seconds=1):\n raise ValidationError(INVALID_BIRTH_DATE_VALUE)\n else:\n return sent_date",
"def user_story_4(self):\n for family in self.families.values():\n if family.married != 'NA':\n if family.wife_id != 'NA' and family.husb_id != 'NA' and family.divorced != 'NA':\n if family.divorced < family.married:\n print(\n f'US04 - {self.individuals[family.wife_id].name} and {self.individuals[family.husb_id].name} married after divorce on line {family._married_line}')",
"def test_DeadPersonTrue (self) :\n\t\t\n\t\tself.person1.updateHealth ()\n\t\tself.assertEqual(self.person1.getHealth(), 0)\n\t\tself.assertTrue(self.person1.DeadPerson())",
"def birthdeath(lgca):\n birth = npr.random(lgca.nodes.shape) < lgca.r_b * lgca.cell_density[..., None] / lgca.K\n death = npr.random(lgca.nodes.shape) < lgca.r_d\n ds = (1 - lgca.nodes) * birth - lgca.nodes * death\n np.add(lgca.nodes, ds, out=lgca.nodes, casting='unsafe')\n random_walk(lgca)",
"def get_age(self):\n today = datetime.now()\n return today.year \\\n - self.date_of_birth.year \\\n - ((today.month, self.date_of_birth.day) \\\n < (self.date_of_birth.month, self.date_of_birth.day))",
"def user_story_5(self):\n for family in self.families.values():\n if family.married != 'NA':\n if family.wife_id != 'NA':\n if self.individuals[family.wife_id].death != 'NA':\n if self.individuals[family.wife_id].death < family.married:\n print(\n f'US05 - {self.individuals[family.wife_id].name} married after individual death date on line {family._married_line}')\n\n if family.husb_id != 'NA':\n if self.individuals[family.husb_id].death != 'NA':\n if self.individuals[family.husb_id].death < family.married:\n print(\n f'US05 - {self.individuals[family.husb_id].name} married after individual death date on line {family._married_line}')",
"def test_init_invalid_birth_date(self):\n # Whether this raises a ValueError or TypeError is an\n # implementation detail delegated to `datetime`\n with self.assertRaises((ValueError, TypeError)):\n Person(\n self.initial_year, self.name, 'invalid',\n retirement_date=self.retirement_date)"
] |
[
"0.73368835",
"0.7128599",
"0.7022836",
"0.6697646",
"0.658086",
"0.64116395",
"0.6272369",
"0.615616",
"0.6134359",
"0.6054821",
"0.60294044",
"0.5999488",
"0.59691304",
"0.5946575",
"0.5878821",
"0.5743965",
"0.5718721",
"0.5715728",
"0.56893015",
"0.56744665",
"0.5630451",
"0.5621765",
"0.5599069",
"0.55660284",
"0.55622",
"0.55104274",
"0.54945755",
"0.548884",
"0.5480117",
"0.5478454"
] |
0.7989253
|
0
|
US01 Dates (birth, marriage, divorce, death) should not be after the current date
|
def datesBeforeCurrentDate(individual):
birthdate = individual.get_birth_data()[0]
deathdate = individual.get_death_data()[0]
marriageDates = gedcom_parser.get_marriages(individual)
fams = gedcom_parser.get_families(individual)
childElements = [(fam.get_child_elements()) for fam in fams]
divorceDates = []
for elements in childElements:
for element in elements:
if element.get_tag() == "DIV":
divorceDates.append(element.get_child_elements()[0].get_value())
latestDivorceDate = max(convertGedcomDate(date)
for date in divorceDates) if divorceDates else None
latestMarriageDate = max(convertGedcomDate(
date[0]) for date in marriageDates) if marriageDates else None
birthdate = convertGedcomDate(birthdate) if birthdate else None
deathdate = convertGedcomDate(deathdate) if deathdate else None
comparisonDates = [birthdate, deathdate,
latestMarriageDate, latestDivorceDate]
if any(day > dt.now() for day in comparisonDates if day):
print(
f"Error US05: Date associated with {individual.get_name()[0]} {individual.get_name()[1]} ({individual.get_pointer()}) occurs after current date")
return False
else:
return True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def clean_birth_date(self):\n\n sent_date = self.cleaned_data['birth_date']\n current_date = localtime(now()).date()\n\n if (sent_date - current_date) > datetime.timedelta(seconds=1):\n raise ValidationError(INVALID_BIRTH_DATE_VALUE)\n else:\n return sent_date",
"def DeathBeforeLife(User):\n if User.date_of_death is None: return\n if User.date_of_death < User.date_of_birth:\n raise interface.Invalid(\n _(u\"One cannot die before being born\"),\n \"date_of_death\", \n \"date_of_birth\")",
"def test_date_of_birth_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_date_of_birth(input_val)\n self.assertEqual(output_val, self.line.date_of_birth)",
"def test_future_birth_date_import():\n _curr_date = datetime.utcnow()\n _later_date = _curr_date + relativedelta(days=1)\n later_date = _later_date.strftime(\"%d.%m.%Y\")\n\n citizen_with_birth_date_later_than_current = deepcopy(CITIZEN_EXAMPLE)\n citizen_with_birth_date_later_than_current[\"birth_date\"] = later_date\n with TestClient(app) as client:\n response = client.post(\n \"/imports\",\n json={\n \"citizens\": [\n citizen_with_birth_date_later_than_current\n ]}\n )\n\n assert response.status_code == 400",
"def knowledge_date_valid(record):\n today = datetime.now(timezone.utc).date().strftime(\"%Y-%m-%d\")\n gen_date = record['knowledge_date'].strftime(\"%Y-%m-%d\")\n assert gen_date == today",
"def test_patient_one_date_of_birth(self):\r\n self.assertEqual(self.test_patient.dateOfBirth, datetime.date(2000, 2, 13))",
"def birthBeforeDeath(individual):\n birthdate = individual.get_birth_data()[0]\n deathdate = individual.get_death_data()[0]\n if birthdate and deathdate:\n birthdate = convertGedcomDate(birthdate)\n deathdate = convertGedcomDate(deathdate)\n if deathdate < birthdate:\n print(\n f\"Error US03: Death of {individual.get_name()[0]} {individual.get_name()[1]} ({individual.get_pointer()}) occurs before their birth\")\n return False\n else:\n return True\n return None",
"def test_form_date_validation(self):\n\n form = My_add_data_form(data={'date': date(1800, 05, 03)})\n self.assertEqual(form.errors['date'], ['You already dead now'])\n form = My_add_data_form(data={'date': date(2200, 05, 03)})\n self.assertEqual(form.errors['date'], ['You not born yet'])",
"def marriageBeforeDeath(individual):\n deathDate = individual.get_death_data()[0]\n marriageDates = gedcom_parser.get_marriages(individual)\n\n if marriageDates and deathDate:\n latestMarriageDate = (max(convertGedcomDate(\n date[0]) for date in marriageDates))\n deathDate = convertGedcomDate(deathDate)\n if latestMarriageDate > deathDate:\n print(\n f\"Error US05: Marriage of {individual.get_name()[0]} {individual.get_name()[1]} ({individual.get_pointer()}) occurs after their death\")\n return False\n else:\n return True",
"def _check_dates(self):\n for record in self:\n if record.end_date and record.end_date < record.start_date:\n raise exceptions.Warning(\n _('Agreement end date must be greater than start date'))",
"def test_date_of_birth_bad_value(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_date_of_birth(val))",
"def birthdate_validator(birthdate):\n max_year = birthdate.replace(year=(birthdate.year + 100))\n min_year = birthdate.replace(year=(birthdate.year + 10))\n today = datetime.date.today()\n if today > max_year or today < min_year:\n raise ValidationError(\n _('%(birthdate)s is not a valid birthdate'),\n params={'birthdate': birthdate},\n )",
"def clean_date(self):\r\n from datetime import datetime\r\n\r\n date = self.cleaned_data[\"date\"]\r\n if date < datetime.now():\r\n self.add_error(\"date\", \"You cannot add a date for the past.\")\r\n return date",
"def test_bad_date_1(self):\n result = self.client.get(\"/search?origin=ORD%2C+Chicago+IL&destination=DFW%2C+Dallas+TX&date=2017-01-01\")\n self.assertNotIn('<meter value=\"70\"', result.data)\n self.assertIn('enter a valid date', result.data)",
"def test_07_no_future_records(self):\n bad_date = timezone.now() + timedelta(days=1)\n record = SwimRecord(record_date=bad_date)\n try:\n record.full_clean()\n except ValidationError as e:\n self.assertTrue(\"Can't set record in the future.\" in e.message_dict['record_date'])",
"def check(self):\n validity_year = int(self.date[0:4])\n validity_month = int(self.date[5:7])\n validity_day = int(self.date[8:10])\n if datetime.today().year > validity_year:\n self.flag = False\n elif datetime.today().year == validity_year:\n if datetime.today().month > validity_month:\n self.flag = False\n elif datetime.today().month == validity_month:\n if datetime.today().day > validity_day:\n self.flag = False\n else:\n self.flag = True\n else:\n self.flag = True\n else:\n self.flag = True",
"def test_bad_date_3(self):\n result = self.client.get(\"/search?origin=ORD%2C+Chicago+IL&destination=DFW%2C+Dallas+TX&date=\")\n self.assertNotIn('<meter value=\"70\"', result.data)\n self.assertIn('enter a valid date', result.data)",
"def _check_birthday(self):\n for employee in self:\n if employee.birthday and employee.gender:\n diff = relativedelta(datetime.today(), datetime.strptime(employee.birthday, DEFAULT_SERVER_DATE_FORMAT))\n if employee.gender == \"male\" and abs(diff.years) < 18:\n raise ValidationError(_(\"Male employee's age must be greater than 18\"))\n elif employee.gender == 'female' and abs(diff.years) < 21:\n raise ValidationError(_(\"Female Employee's age must be greater than 21.\"))",
"def normalize_dates(end_date, start_date, today_date):\n if start_date < today_date or end_date < today_date:\n return {'status': False, 'message': 'Sorry, you cannot enter a past date'}\n elif end_date < start_date:\n return {'status': False, 'message': 'Sorry, end date must be after start date'}\n else:\n return {'status': True, 'message': 'Validation successful'}",
"def test_date2_lower_date1(self):\n date1 = datetime.date(2019, 5, 2)\n date2 = datetime.date(2019, 5, 1)\n\n self.assertFalse(self.expander.is_same_date_month_ahead(date1, date2))",
"def test_bad_date_2(self):\n result = self.client.get(\"/search?origin=ORD%2C+Chicago+IL&destination=DFW%2C+Dallas+TX&date=2020-01-01\")\n self.assertNotIn('<meter value=\"70\"', result.data)\n self.assertIn('enter a valid date', result.data)",
"def validate_future(value: date):\n if value < date.today():\n err = f\"{value} est déjà passé\"\n raise ValidationError(err)",
"def clean_date(self):\n input_day = self.cleaned_data.get('day')\n input_date = self.cleaned_data.get('date')\n if input_date < datetime.date.today():\n raise forms.ValidationError(\"Can not create a lesson in the past.\")\n elif input_date.strftime(\"%A\").lower() != input_day:\n raise forms.ValidationError(input_date.strftime(\"%d-%m-%Y\")+\" does not fall on a \"+input_day.title()+\".\")\n return input_date",
"def test_patient_date_of_birth(self):\r\n self.assertEqual(self.test_patient.dateOfBirth, '2000-01-01')",
"def test_holidays():\n\n assert not datetime.datetime(2003, 12, 25) in TRADING_DATES\n assert not datetime.datetime(2003, 5, 26) in TRADING_DATES # memorial day",
"def settlement_position_effective_date_valid(record):\n knowledge_date = record['knowledge_date']\n effective_date = record['effective_date']\n assert effective_date == knowledge_date",
"def test_form_invalid_date_in_past(self):\n login = self.client.login(\n username='testuser2',\n password='2HJ1vRV0Z&3iD')\n past_date = datetime.date.today() - datetime.timedelta(days=1)\n response = self.client.post(\n reverse('librarian-renew-book',\n kwargs={'pk': self.test_bookinstance2.pk}),\n {'due_back': past_date})\n self.assertEqual(response.status_code, 200)\n self.assertFormError(response,\n 'form',\n 'due_back',\n 'Invalid date - renewal in the past')",
"def test_date_accept_date_minus_many_days(self):\n spi_search = \"find date 2011-02-24 - 946\"\n inv_search = \"year:2008-07-23\"\n self._compare_searches(inv_search, spi_search)",
"def test_date_of_birth(self):\n entries = {'uid=test,ou=people,dc=esmgquadrivium,dc=nl': {\n 'uid': ['test'],\n 'qDateOfBirth': [19951226],\n }}\n clone(entries)\n self.assertEqual(date(1995, 12, 26), Person.objects.first().date_of_birth)",
"def test_non_holidays(self):\n # January 2nd was not public holiday between 2012 and 2017\n self.assertNotIn(date(2013, 1, 2), self.holidays)\n self.assertNotIn(date(2014, 1, 2), self.holidays)\n self.assertNotIn(date(2015, 1, 2), self.holidays)\n self.assertNotIn(date(2016, 1, 2), self.holidays)"
] |
[
"0.6859254",
"0.6827661",
"0.67296493",
"0.6612987",
"0.65961987",
"0.6590323",
"0.65531206",
"0.6381926",
"0.6303823",
"0.626078",
"0.6253482",
"0.62472546",
"0.6229059",
"0.6218926",
"0.62039614",
"0.6194757",
"0.6192037",
"0.6190664",
"0.61717904",
"0.61649835",
"0.61579776",
"0.6151315",
"0.61498165",
"0.6143896",
"0.61418295",
"0.6105096",
"0.6051667",
"0.6021952",
"0.5981323",
"0.5977477"
] |
0.6945236
|
0
|
US05 Marriage should occur before death of either spouse
|
def marriageBeforeDeath(individual):
deathDate = individual.get_death_data()[0]
marriageDates = gedcom_parser.get_marriages(individual)
if marriageDates and deathDate:
latestMarriageDate = (max(convertGedcomDate(
date[0]) for date in marriageDates))
deathDate = convertGedcomDate(deathDate)
if latestMarriageDate > deathDate:
print(
f"Error US05: Marriage of {individual.get_name()[0]} {individual.get_name()[1]} ({individual.get_pointer()}) occurs after their death")
return False
else:
return True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def birthBeforeMarriage(individual):\n birthDate = individual.get_birth_data()[0]\n marriageDates = gedcom_parser.get_marriages(individual)\n\n if marriageDates and birthDate:\n earliestMarriageDate = (min(convertGedcomDate(\n date[0]) for date in marriageDates))\n birthDate = convertGedcomDate(birthDate)\n if earliestMarriageDate < birthDate:\n print(\n f\"Error US02: Marriage of {individual.get_name()[0]} {individual.get_name()[1]} ({individual.get_pointer()}) occurs before their birth\")\n return False\n else:\n return True\n return None",
"def birthBeforeDeath(individual):\n birthdate = individual.get_birth_data()[0]\n deathdate = individual.get_death_data()[0]\n if birthdate and deathdate:\n birthdate = convertGedcomDate(birthdate)\n deathdate = convertGedcomDate(deathdate)\n if deathdate < birthdate:\n print(\n f\"Error US03: Death of {individual.get_name()[0]} {individual.get_name()[1]} ({individual.get_pointer()}) occurs before their birth\")\n return False\n else:\n return True\n return None",
"def user_story_3(self):\n for person in self.individuals.values():\n if person.birthday != 'NA' and person.death != 'NA':\n if person.birthday > person.death:\n print(f'US03 - {person.name} birthday after death date on line {person._birthday_line}')",
"def user_story_4(self):\n for family in self.families.values():\n if family.married != 'NA':\n if family.wife_id != 'NA' and family.husb_id != 'NA' and family.divorced != 'NA':\n if family.divorced < family.married:\n print(\n f'US04 - {self.individuals[family.wife_id].name} and {self.individuals[family.husb_id].name} married after divorce on line {family._married_line}')",
"def check_bday(self):\n for fam in self.families.values():\n if fam.children != 'NA':\n # fam.children is either a set or 'NA' string\n for child in fam.children:\n bday = self.individuals[child].birthday\n marr = fam.married\n div = fam.divorced\n\n # if child is born before marriage date, and not yet divorced\n if marr != 'NA' and bday < marr and div == 'NA':\n print(f'US08 - {self.individuals[child].name} birthday before marriage on line {self.individuals[child]._birthday_line}')\n # if child is born more than 9 months after divorce\n if div != 'NA' and bday > div + relativedelta(months=9):\n print(f'US08 - {self.individuals[child].name} birthday before marriage on line {self.individuals[child]._birthday_line}')\n\n if fam.husb_id and fam.wife_id:\n dad = self.individuals[fam.husb_id]\n mom = self.individuals[fam.wife_id]\n # if child is born any time after mother dies\n if not mom.alive and mom.death < bday:\n print(f'US09 - {self.individuals[child].name} birthday after mom death date on line {self.individuals[child]._birthday_line}')\n # if child dies later than nine months after father dies\n if not dad.alive and dad.death + relativedelta(months=9) < bday:\n print(f'US09 - {self.individuals[child].name} birthday after dads death date on line {self.individuals[child]._birthday_line}')",
"def user_story_5(self):\n for family in self.families.values():\n if family.married != 'NA':\n if family.wife_id != 'NA':\n if self.individuals[family.wife_id].death != 'NA':\n if self.individuals[family.wife_id].death < family.married:\n print(\n f'US05 - {self.individuals[family.wife_id].name} married after individual death date on line {family._married_line}')\n\n if family.husb_id != 'NA':\n if self.individuals[family.husb_id].death != 'NA':\n if self.individuals[family.husb_id].death < family.married:\n print(\n f'US05 - {self.individuals[family.husb_id].name} married after individual death date on line {family._married_line}')",
"def noBigamy(individual):\n\n\n families = gedcom_parser.get_families(individual)\n\n marriageDateRanges = []\n for family in families:\n marriageDate = None\n divorceDate = None\n for element in family.get_child_elements():\n if element.get_tag() == \"MARR\":\n marriageDate = convertGedcomDate(element.get_child_elements()[0].get_value())\n\n if element.get_tag() == \"DIV\":\n divorceDate = convertGedcomDate(element.get_child_elements()[0].get_value())\n\n if divorceDate == None:\n divorceDate = dt.now()\n\n marriageDateRanges.append((marriageDate, divorceDate))\n \n marriageDateIntervals = pandas.arrays.IntervalArray.from_tuples(marriageDateRanges)\n\n\n if marriageDateIntervals.is_non_overlapping_monotonic:\n return True\n else:\n print(\n f\"Error US11: Marriage of {individual.get_name()[0]} {individual.get_name()[1]} ({individual.get_pointer()}) occurs during another marriage\")\n return False",
"def user_story_13(self):\n for family in self.families.values():\n if family.children != 'NA':\n bday_dict = dict() # { iid1: bday1, iid2: bday1, iid3: bday2 }\n for child in family.children:\n bday_dict[child] = self.individuals[child].birthday\n for i1, i2 in itertools.combinations(bday_dict, 2):\n older = bday_dict[i1] if bday_dict[i1] < bday_dict[i2] else bday_dict[i2]\n younger = bday_dict[i1] if bday_dict[i1] >= bday_dict[i2] else bday_dict[i2]\n if older + relativedelta(days=1) < younger and younger < older + relativedelta(months=8):\n print(f'US13 - {min(self.individuals[i1].name, self.individuals[i2].name)} and {max(self.individuals[i1].name, self.individuals[i2].name)} have birthdays that are too close together on lines {min(self.individuals[i1]._birthday_line, self.individuals[i2]._birthday_line)} and {max(self.individuals[i1]._birthday_line, self.individuals[i2]._birthday_line)}')",
"def death_check(self):\r\n chance = random.random()\r\n if decimal.Decimal(chance) < decimal.Decimal(self.death_rate):\r\n if self.unique_id in head_of_household_list:\r\n try:\r\n head_of_household_list[self.hh_id] = 0\r\n except TypeError: # head of household migrated\r\n head_of_household_list[self.past_hh_id] = 0\r\n self.model.number_of_humans -= 1\r\n if self.unique_id in labor_list:\r\n labor_list.remove(self.unique_id)\r\n if self.work_status == 1:\r\n try:\r\n num_labor_list[self.hh_id] -= 1\r\n except TypeError:\r\n num_labor_list[self.past_hh_id] -= 1\r\n if self.unique_id in former_hoh_list:\r\n try:\r\n former_hoh_list[self.hh_id] = 0\r\n except:\r\n former_hoh_list[self.past_hh_id] = 0\r\n if [self.unique_id, self.hh_id] in single_male_list:\r\n single_male_list.remove([self.unique_id, self.hh_id])\r\n if self.unique_id in married_male_list:\r\n married_male_list.remove(self.unique_id)\r\n human_death_list.append(self.unique_id)\r\n try:\r\n hh_size_list[self.hh_id] -= 1\r\n except:\r\n hh_size_list[self.past_hh_id] -= 1\r\n human_demographic_structure_list[self.age_category] -= 1\r\n\r\n self.model.schedule.remove(self)\r\n if self in self.model.grid:\r\n self.model.grid.remove_agent(self)",
"def test_fantasy_status_nov_1(league):\n nov_1 = datetime.datetime(2019,11,1)\n players = league.as_of(nov_1).all_players()\n # make sure sammy blais is not a free agent, he was picked up oct 31\n assert(players.loc[6544, 'fantasy_status'] != 'FA')",
"def test_does_not_die_carn(self):\n self.carn.fitness = 1\n nt.assert_false(self.carn.death())",
"def user_story_01(self):\n td=datetime.today()\n for person in self.individuals.values():\n pb=person.birthday\n pd=person.death\n if pb !=\"NA\" and pb>td:\n print(f'US01 - {person.name} birthday after today on line {person._birthday_line}')\n if pd !=\"NA\" and pd>td:\n print(f'US01 - {person.name} death after today on line {person._death_line}')\n for family in self.families.values():\n fm=family.married \n fd=family.divorced\n if fm !=\"NA\" and fm>td:\n print(f'US01 - {self.individuals[family.wife_id].name} marriage after today on line {family._married_line}')\n if fd !=\"NA\" and fd>td:\n print(f'US01 - {self.individuals[family.husb_id].name} divorce after today on line {family._divorced_line}')",
"def user_story_6(self):\n for family in self.families.values():\n if family.divorced != 'NA':\n if family.wife_id != 'NA':\n if self.individuals[family.wife_id].death != 'NA':\n if self.individuals[family.wife_id].death < family.divorced:\n print(f'US06 - {self.individuals[family.wife_id].name} divorce after individual death date on line {family._divorced_line}')\n\n if family.husb_id != 'NA':\n if self.individuals[family.husb_id].death != 'NA':\n if self.individuals[family.husb_id].death < family.divorced:\n print(f'US06 - {self.individuals[family.husb_id].name} divorce after individual death date on line {family._divorced_line}')",
"def check_nonHappiness(content):\n baseline = 0\n happiness = [\"Smile\"]\n AU12baseline = 20\n label = 50\n # print content\n emotion_time = content[0][1]\n # print 'emotion_time',emotion_time\n for c in content:\n for h in happiness:\n # print h\n # the hapiness value is exactly 0 and AU12 is below AU12 baseline, then a person is non happy\n if c[0] == h and c[1] == baseline and content[12][1] <= AU12baseline and content[13][1] <= AU12baseline:\n print 'emotion & label',emotion_time, label\n return emotion_time, label",
"def testProtractedNSESanityChecks(self):\n self.assertGreater(self.c3.get_species_richness(1), self.c2.get_species_richness(1))\n self.assertLess(self.c4.get_species_richness(1), self.c3.get_species_richness(1))",
"def event_m20_11_x73(z52=_):\n \"\"\"State 0,1: Defeat determination\"\"\"\n IsChrDead(0, z52)\n assert ConditionGroup(0)\n \"\"\"State 2: End state\"\"\"\n return 0",
"def risk_assess(s):",
"def test_orangered_victory(self):\n self.assertEqual(None, self.sapphire.owner)\n sess = self.sess\n self.battle.create_skirmish(self.alice, 5)\n\n self.battle.ends = self.battle.begins\n sess.commit()\n updates = Battle.update_all(sess)\n sess.commit()\n\n self.assertNotEqual(len(updates['ended']), 0)\n self.assertEqual(updates[\"ended\"][0], self.battle)\n self.assertEqual(0, self.sapphire.owner)",
"def good_standing(self):\n # Date of last AR or founding date if they haven't yet filed one\n last_ar_date = self.last_ar_date or self.founding_date\n # Good standing is if last AR was filed within the past 1 year, 2 months and 1 day\n return last_ar_date + datedelta.datedelta(years=1, months=2, days=1) > datetime.utcnow()",
"def age_check(self):\r\n # check working status\r\n if 15 <= float(self.age) < 59:\r\n if self.work_status == 0:\r\n self.work_status = 1\r\n num_labor_list[self.hh_id] += 1\r\n labor_list.append(self.unique_id)\r\n if self.work_status == 1 and self.unique_id not in labor_list:\r\n labor_list.append(self.unique_id)\r\n else:\r\n self.work_status = 0\r\n\r\n # check education status; measured in years of education\r\n if 7 <= int(self.age) <= 19:\r\n if random.random() > 0.1:\r\n self.education += 1\r\n # most adults in the FNNR did not get a full 12-13 years of education\r\n elif 19 < float(self.age) < 23 and self.migration_status == 1:\r\n if random.random() < 0.5:\r\n self.education += 1 # went to college and got further education\r\n # this is rare; in the household list, a few received beyond 12 years of education\r\n\r\n # check age-based death rates\r\n if self.age > 65:\r\n self.death_rate = 0.001443 # 5-day death rate\r\n # The average death rate in China is 7.3 per 1,000 people/year, or 0.0073 (Google).\r\n # However, death rates should be higher for the elderly, or else the population structure will skew.\r\n # I set death rates for those over age 65 to be 10% per year--0.9 yearly survival rate.\r\n # The survival rate for each 5-day step is compounded 73 times, so x^73 = 0.85.\r\n # 0.998557 is the 5-day survival rate, and 1 - x is the 5-day death rate.\r\n else:\r\n self.death_rate = 0.00000425\r\n # I wanted people to have a 98% chance of reaching age 65 (death rate is lower if not elderly).\r\n # If a 'check' is every 5 days, 73 checks/year * 65 years = 4,745 checks.\r\n # x^4745 = 0.98; the 5-day survival rate is 0.99999575, and 1 - x is the 5-day death rate.\r\n\r\n # These rates are changeable later.\r",
"def test_reward_after_battle(self):\n self.assertEqual(self.alice.loyalists, 100)\n self.assertEqual(self.bob.loyalists, 100)\n\n s1 = self.battle.create_skirmish(self.alice, 50)\n s1.react(self.bob, 50, troop_type=\"cavalry\")\n\n self.end_battle(self.battle, self.conf)\n\n # Bob wins the fight and the war\n self.assertEqual(self.battle.victor, self.bob.team)\n\n # Alice should have gotten a 10% reward (5 troops)\n self.assertEqual(self.alice.loyalists, 105)\n # Bob gets 15% (7 troops)\n self.assertEqual(self.bob.loyalists, 107)",
"def test_does_die_carn(self):\n self.carn.fitness = 0\n self.carn.params[\"omega\"] = 1\n nt.assert_true(self.carn.death())",
"def event_m20_11_x99(z30=20115070, z31=20115500):\n \"\"\"State 0,1: Got King Soul?\"\"\"\n WasObjItemAcquired(0, z30, 1)\n WasObjItemAcquired(0, z31, 1)\n assert ConditionGroup(0)\n \"\"\"State 2: End state\"\"\"\n return 0",
"def event_m20_11_6000():\n \"\"\"State 0,2: [Preset] Door of the living person_SubState\"\"\"\n assert event_m20_11_x77(z42=20110480, z43=600000)\n \"\"\"State 1: Finish\"\"\"\n EndMachine()",
"def birth_check(self):\r\n if random.random() < 0.00017: # 0.0121, or 1.21%, is the yearly birth rate.\r\n birth_flag_list.append(1)\r\n # This makes the birth rate for every 5 days (73 'checks' a year) 0.00017%,\r\n # because 1 - 0.0121 = 0.9879; 98.79% is the chance of not giving birth that year.\r\n # 0.99983 ^73 = 0.9879 are the 5-day chances compounded 73 times, and 1 - 0.99983 = 0.00017.\r\n # or you could use the yearly birth rate and have birth_check only occur randomly\r\n # around once a year.\r\n if birth_flag_list != [] and self.gender == 2 and self.marriage == 1 and self.age < 55:\r\n if self.last_birth_time >= 2: # 2 years is the set birth interval; can modify\r\n self.last_birth_time = 0 # reset counter\r\n birth_flag_list.remove(1)\r\n last = self.model.number_of_humans\r\n # build more attributes\r\n age = 0\r\n gender = random.choice([1, 2])\r\n education = 0\r\n work_status = 0\r\n marriage = 0\r\n if gender == 1:\r\n age_category = 0\r\n elif gender == 2:\r\n age_category = 1\r\n ind = Human(last + 1, self.model, self.current_position, self.hh_id, age, self.resource_check,\r\n self.home_position, self.resource_position, self.resource_frequency, gender,\r\n education, work_status, marriage, self.past_hh_id, self.mig_years,\r\n self.migration_status, self.gtgp_part, self.non_gtgp_area,\r\n self.migration_network, self.mig_remittances, self.income_local_off_farm,\r\n self.last_birth_time, self.death_rate, age_category)\r\n self.model.schedule.add(ind)\r\n self.model.number_of_humans += 1\r\n hh_size_list[self.hh_id] += 1\r\n human_birth_list.append(last + 1)\r\n if ind.gender == 1:\r\n human_demographic_structure_list[0] += 1\r\n elif ind.gender == 2:\r\n human_demographic_structure_list[10] += 1",
"def test_half_life():\n assert np.isclose(\n half_life(\"tritium\").to(u.s).value, (12.32 * u.yr).to(u.s).value, rtol=2e-4\n ), \"Incorrect half-life for tritium.\"",
"def starve_checker(hunger):\n death_chance = -30\n hunger -= 1\n\n if (death_chance * (hunger-1)) > random.randint(1,100):\n death = True\n else:\n color.write(\"Somehow, through divine intervention, you manage to survive though the pain, although you know that the end is near. You should definitely eat something.\\n\",\"ERROR\")\n death = False\n return death",
"def test_does_not_die(self):\n self.herb.fitness = 1\n nt.assert_false(self.herb.death())",
"def on_or_before(self, ordinal):\n a5 = self.pancawara - 1\n a6 = self.sadwara - 1\n b7 = self.saptawara - 1\n b35 = (a5 + 14 + (15 * (b7 - a5))) % 35\n days = a6 + (36 * (b35 - a6))\n cap_Delta = self.day_fromordinal(0)\n return ordinal - (ordinal + cap_Delta - days) % 210",
"def event11512041():\n header(11512041, 1)\n if_event_flag_on(0, EVENT.DarkAnorLondo)\n if_entity_dead(1, 1510195)\n end_if_condition_true(1)\n # Change lone Darkwraith near Pale Demons to 'charm', which is basically 'battle friend'.\n chr.set_team_type(1510195, TeamType.charm)\n if_entity_dead(2, 1510110)\n if_entity_dead(2, 1510111)\n if_entity_dead(2, 1510112)\n if_condition_true(0, 2)\n # When all three Pale Demons are dead, switch back to enemy.\n chr.set_team_type(1510195, TeamType.enemy)"
] |
[
"0.6608855",
"0.6395794",
"0.62768626",
"0.6059355",
"0.5927261",
"0.59164596",
"0.5904102",
"0.5811521",
"0.5769977",
"0.57258",
"0.5644478",
"0.5615973",
"0.5540922",
"0.55232954",
"0.5492104",
"0.5450482",
"0.543772",
"0.5406157",
"0.53945804",
"0.53908473",
"0.53890926",
"0.5380795",
"0.53759634",
"0.5358123",
"0.5314803",
"0.5307066",
"0.52905655",
"0.52902585",
"0.5289148",
"0.5285805"
] |
0.74599016
|
0
|
US11 Marriage should not occur during marriage to another spouse
|
def noBigamy(individual):
families = gedcom_parser.get_families(individual)
marriageDateRanges = []
for family in families:
marriageDate = None
divorceDate = None
for element in family.get_child_elements():
if element.get_tag() == "MARR":
marriageDate = convertGedcomDate(element.get_child_elements()[0].get_value())
if element.get_tag() == "DIV":
divorceDate = convertGedcomDate(element.get_child_elements()[0].get_value())
if divorceDate == None:
divorceDate = dt.now()
marriageDateRanges.append((marriageDate, divorceDate))
marriageDateIntervals = pandas.arrays.IntervalArray.from_tuples(marriageDateRanges)
if marriageDateIntervals.is_non_overlapping_monotonic:
return True
else:
print(
f"Error US11: Marriage of {individual.get_name()[0]} {individual.get_name()[1]} ({individual.get_pointer()}) occurs during another marriage")
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def marriageBeforeDeath(individual):\n deathDate = individual.get_death_data()[0]\n marriageDates = gedcom_parser.get_marriages(individual)\n\n if marriageDates and deathDate:\n latestMarriageDate = (max(convertGedcomDate(\n date[0]) for date in marriageDates))\n deathDate = convertGedcomDate(deathDate)\n if latestMarriageDate > deathDate:\n print(\n f\"Error US05: Marriage of {individual.get_name()[0]} {individual.get_name()[1]} ({individual.get_pointer()}) occurs after their death\")\n return False\n else:\n return True",
"def birthBeforeMarriage(individual):\n birthDate = individual.get_birth_data()[0]\n marriageDates = gedcom_parser.get_marriages(individual)\n\n if marriageDates and birthDate:\n earliestMarriageDate = (min(convertGedcomDate(\n date[0]) for date in marriageDates))\n birthDate = convertGedcomDate(birthDate)\n if earliestMarriageDate < birthDate:\n print(\n f\"Error US02: Marriage of {individual.get_name()[0]} {individual.get_name()[1]} ({individual.get_pointer()}) occurs before their birth\")\n return False\n else:\n return True\n return None",
"def _remove_noise_in_o2m():\n if line.reconcile_partial_id:\n if currency_id == line.currency_id.id:\n if line.amount_residual_currency <= 0:\n return True\n else:\n if line.amount_residual <= 0:\n return True\n return False",
"def _remove_noise_in_o2m():\n if line.reconcile_partial_id:\n if currency_id == line.currency_id.id:\n if line.amount_residual_currency <= 0:\n return True\n else:\n if line.amount_residual <= 0:\n return True\n return False",
"def _remove_noise_in_o2m():\n if line.reconcile_partial_id:\n sign = 1\n if currency_id == line.currency_id.id:\n if line.amount_residual_currency * sign <= 0:\n return True\n else:\n if line.amount_residual * sign <= 0:\n return True\n return False",
"def violated(self) -> bool:\n ...",
"def test_disallow_retreat(self):\n self.battle.create_skirmish(self.alice, 1)\n londo = self.get_region(\"Orange Londo\")\n\n with self.assertRaises(db.InProgressException):\n self.alice.move(100, londo, 0)\n\n n = (self.sess.query(db.MarchingOrder).\n filter_by(leader=self.alice)).count()\n self.assertEqual(n, 0)",
"def non_negative_transition_revenue_rule(_m):\r\n\r\n return sum(m.YEAR_SCHEME_REVENUE[y] for y in m.Y if y <= self.transition_year) >= 0",
"def nonphysicalxs_remotion(a2_data,res_nufi_removal):\n for i in a2_data['I'].keys():\n if i=='MACR' and res_nufi_removal==True:\n if 'nufi' in a2_data['I'][i]['R'].keys():\n a2_data['I'][i]['R'].pop('nufi')\n for r in a2_data['I'][i]['R'].keys():\n if any(x in r for x in ['111', '112', '122', '212', '222', '211', '322',\n '321', '312', '311', '221', '121']):\n a2_data['I'][i]['R'].pop(r)\n return a2_data",
"def test_08_no_break_record_before_set_record(self):\n record = SwimRecord(first_name='j',last_name='j',team_name='k',relay=True,stroke='butterfly',distance=100,record_date=timezone.now(),record_broken_date=(timezone.now() - timedelta(days=1)))\n record.save()\n try:\n record.full_clean()\n except ValidationError as e:\n self.assertTrue(\"Can't break record before record was set.\" in e.message_dict['record_broken_date'])",
"def test_shift_ruptures_no_shift(midday):\n shift_mask, shift_amounts = time.shifts_ruptures(\n midday, midday\n )\n assert not shift_mask.any()\n assert_series_equal(\n shift_amounts,\n pd.Series(0, index=midday.index, dtype='int64'),\n check_names=False\n )",
"def test_can_mispair(self):\n assert not self.RNA(\"\").can_mispair(\"\")\n assert self.RNA(\"N\").can_mispair(\"N\")\n assert self.RNA(\"R\").can_mispair(\"Y\")\n assert self.RNA(\"N\").can_mispair(\"r\")\n assert self.RNA(\"CGUACGCAN\").can_mispair(\"NUHCHUACH\")\n assert self.RNA(\"U\").can_mispair(\"C\")\n assert self.RNA(\"U\").can_mispair(\"R\")\n assert self.RNA(\"UUU\").can_mispair(\"AAR\")\n assert self.RNA(\"UUU\").can_mispair(\"GAG\")\n assert not self.RNA(\"UUU\").can_mispair(\"AAA\")\n assert not self.RNA(\"UCAG\").can_mispair(\"CUGA\")\n assert self.RNA(\"U--\").can_mispair(\"--U\")\n\n assert self.DNA(\"TCCAAAGRYY\").can_mispair(\"RRYCTTTGGA\")",
"def user_story_4(self):\n for family in self.families.values():\n if family.married != 'NA':\n if family.wife_id != 'NA' and family.husb_id != 'NA' and family.divorced != 'NA':\n if family.divorced < family.married:\n print(\n f'US04 - {self.individuals[family.wife_id].name} and {self.individuals[family.husb_id].name} married after divorce on line {family._married_line}')",
"def bust(person):\n if person.total > GOAL_TOTAL() and person.aceCount == 0:\n return True\n elif person.total > GOAL_TOTAL() and person.aceCount > 0:\n adjust_ace(person)\n return person.total > GOAL_TOTAL()\n else: # person.total <= GOAL_TOTAL()\n return False",
"def test_block_bad_consensus(self):\n pass",
"def second_vespers_suppressed(self):\n return self._has_rubric('officium terminatur post nonam')",
"def test_avalanche_warning_by_region_obs(self):\n pass",
"def event_m20_11_x99(z30=20115070, z31=20115500):\n \"\"\"State 0,1: Got King Soul?\"\"\"\n WasObjItemAcquired(0, z30, 1)\n WasObjItemAcquired(0, z31, 1)\n assert ConditionGroup(0)\n \"\"\"State 2: End state\"\"\"\n return 0",
"def check_nonHappiness(content):\n baseline = 0\n happiness = [\"Smile\"]\n AU12baseline = 20\n label = 50\n # print content\n emotion_time = content[0][1]\n # print 'emotion_time',emotion_time\n for c in content:\n for h in happiness:\n # print h\n # the hapiness value is exactly 0 and AU12 is below AU12 baseline, then a person is non happy\n if c[0] == h and c[1] == baseline and content[12][1] <= AU12baseline and content[13][1] <= AU12baseline:\n print 'emotion & label',emotion_time, label\n return emotion_time, label",
"def test_contains_month_false(self):\n ary = self.ar = awstats_reader.AwstatsReader(test_file_dir, 'jjncj.com')[2009]\n self.assertFalse(10 in ary)",
"def clue(self):\n if self.item == \"receipt\":\n print(\"The receipt reads that Jay bought 'diltiazem' medication 4 days ago.\")\n print(\"Diltiazem: medication for high blood pressure, when \"\n \"consumed by an individual in large quantities without high blood\"\n \"pressure, can cause heart failure.\")\n else:\n print(\"That is the wrong item!\")",
"def isspeech(phone):\n return phone not in OTHERS",
"def risk_assess(s):",
"def if_not_matched(disease):\n\t\tprint(\"\")\n\t\tid_disease = disease\n\t\tdisease_details = get_details(id_disease)\n\t\ttreatments = get_treatments(id_disease)\n\t\tprint(\"\")\n\t\tprint(\"The most probable disease that you have is %s\\n\" %(id_disease))\n\t\tprint(\"A short description of the disease is given below :\\n\")\n\t\tprint(disease_details+\"\\n\")\n\t\tprint(\"The common medications and procedures suggested by other real doctors are: \\n\")\n\t\tprint(treatments+\"\\n\")",
"def MR_rate_clean(self, mech_rates):\n for rate_tuple in self['MR_rate']:\n \n if rate_tuple not in mech_rates.keys():\n self['MR_rate'].remove(rate_tuple)\n print (\"Removed \" + str(rate_tuple) + \" from MR_rate\")\n \n #check for rate to change in MR params\n for _rtc in self['rate_to_change']:\n rtc_tuple = r_tuple_from_r_name(mech_rates, _rtc)\n \n if rtc_tuple not in self['MR_avoid'] and not self['MR_avoid_preserve']:\n #this blanket hack will remove any special info in MR_avoid\n #flag can be used to make MR_avoid invulnerable\n \n self['MR_avoid'].append(rtc_tuple)\n print (\"Adding \"+str(rtc_tuple)+\" to MR_avoid (now: \"+ str(self['MR_avoid'])+\" )\\n\")\n \n #take the rate to change out of MR use \n if rtc_tuple in self['MR_rate']:\n self['MR_rate'].remove(rtc_tuple)",
"def is_inequality(self): \n return False",
"def should_pay_attention(self):\n return random.randint(1,100) > self.wander",
"def test_initial_records_are_preserved(self):\n input_ = [\n self.indicator_record(date=datetime.date(1998, 5, 1), value=0.50),\n self.indicator_record(date=datetime.date(1998, 6, 1), value=0.02),\n self.indicator_record(date=datetime.date(1998, 7, 1), value=-0.12),\n ]\n records = self.expander._ipca_from_15_expander(input_)\n\n same_date_values = [record.date == records[index_].date and\n record.value == records[index_].value\n for index_, record in enumerate(input_)]\n\n self.assertTrue(all(same_date_values))",
"def check_note_for_history(self):\r\n testrun_notes = [\r\n \"multiple loci suspected\",\r\n \"suspected multicopy, poor performance\",\r\n \"fixed allele 1\",\r\n \"very poor amplification\",\r\n \"very poor amplification, high off target percent\",\r\n \"poor amplification, maybe redesign\",\r\n \"mono-allele 1?\",\r\n \"redesign primer\",\r\n \"most of target\",\r\n \"poor performance\",\r\n \"poor performance, primers off target\",\r\n \"off target amp\",\r\n \"mono-allele 1\",\r\n \"mono-allele 2 and off target\",\r\n \"Nate said it is a mess\",\r\n \"off target amp\",\r\n \"mono-allele 1 and off target\"\r\n ]\r\n if self.note == \"No primers made by primer3\":\r\n self.add_history(\"2018-2-12\",\"Nate\",\"primers were not made for this sequence variation\")\r\n self.note = \"sequence variant selected by GBS-SNP-selection\"\r\n elif self.note == \"Removed by nate, close to other SNP\":\r\n self.add_history(\"2018-2-19\",\"Nate\",\"Primers designed for this SNP were taken out, were to close to other SNP\")\r\n self.note = \"sequence variant selected by GBS-SNP-selection\"\r\n elif self.note == \"Predicted to form hetrodymer\":\r\n self.add_history(\"2018-2-19\",\"Nate\",\"Predicted to form hetrodymer\")\r\n self.note = \"sequence variant selected by GBS-SNP-selection\"\r\n elif self.note == \"no valid primer pair could be made for this position\":\r\n self.note = \"sequence variant selected by GBS-SNP-selection\"\r\n elif self.note in testrun_notes:\r\n self.add_history(\"2018-2-23\",\"Thomas\",self.note)\r\n self.note = \"sequence variant selected by GBS-SNP-selection\"\r\n #check if any were missed.\r\n if self.active and self.note != \"sequence variant selected by GBS-SNP-selection\":\r\n pass #print(self.note)\r",
"def test_phonebook_with_duplicate_entries_is_inconsostent(self):\n self.phonebook.add(\"Bob\", \"12345\")\n self.phonebook.add(\"Mary\", \"12345\")\n self.assertFalse(self.phonebook.is_consistent())"
] |
[
"0.61908466",
"0.5837422",
"0.57115346",
"0.57115346",
"0.5583499",
"0.551999",
"0.5438785",
"0.52849215",
"0.52452075",
"0.5241142",
"0.5138036",
"0.5130192",
"0.5087213",
"0.50658804",
"0.50608194",
"0.50542825",
"0.5035876",
"0.50106514",
"0.49922475",
"0.49858934",
"0.49668103",
"0.49449557",
"0.49165478",
"0.4914226",
"0.49139237",
"0.49127957",
"0.4901989",
"0.48916999",
"0.4891461",
"0.48910365"
] |
0.61542344
|
1
|
US14 No more than five siblings should be born at the same time
|
def multipleBirths(family):
children = gedcom_parser.get_family_members(family, 'FAMILY_MEMBERS_TYPE_CHILDREN')
birthdays = []
for child in children:
birthdays.append(convertGedcomDate(child.get_birth_data()[0]))
if len(birthdays) < 5:
return True
else:
birthdayCounts = dict((i, birthdays.count(i)) for i in birthdays)
if len({k:v for (k,v) in birthdayCounts.items() if v >= 5}) > 0:
print(
f"Error US14: More than 5 siblings born at once in {family.get_value()} family ({family.get_pointer()})")
return False
else:
return True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def user_story_13(self):\n for family in self.families.values():\n if family.children != 'NA':\n bday_dict = dict() # { iid1: bday1, iid2: bday1, iid3: bday2 }\n for child in family.children:\n bday_dict[child] = self.individuals[child].birthday\n for i1, i2 in itertools.combinations(bday_dict, 2):\n older = bday_dict[i1] if bday_dict[i1] < bday_dict[i2] else bday_dict[i2]\n younger = bday_dict[i1] if bday_dict[i1] >= bday_dict[i2] else bday_dict[i2]\n if older + relativedelta(days=1) < younger and younger < older + relativedelta(months=8):\n print(f'US13 - {min(self.individuals[i1].name, self.individuals[i2].name)} and {max(self.individuals[i1].name, self.individuals[i2].name)} have birthdays that are too close together on lines {min(self.individuals[i1]._birthday_line, self.individuals[i2]._birthday_line)} and {max(self.individuals[i1]._birthday_line, self.individuals[i2]._birthday_line)}')",
"def check_bday(self):\n for fam in self.families.values():\n if fam.children != 'NA':\n # fam.children is either a set or 'NA' string\n for child in fam.children:\n bday = self.individuals[child].birthday\n marr = fam.married\n div = fam.divorced\n\n # if child is born before marriage date, and not yet divorced\n if marr != 'NA' and bday < marr and div == 'NA':\n print(f'US08 - {self.individuals[child].name} birthday before marriage on line {self.individuals[child]._birthday_line}')\n # if child is born more than 9 months after divorce\n if div != 'NA' and bday > div + relativedelta(months=9):\n print(f'US08 - {self.individuals[child].name} birthday before marriage on line {self.individuals[child]._birthday_line}')\n\n if fam.husb_id and fam.wife_id:\n dad = self.individuals[fam.husb_id]\n mom = self.individuals[fam.wife_id]\n # if child is born any time after mother dies\n if not mom.alive and mom.death < bday:\n print(f'US09 - {self.individuals[child].name} birthday after mom death date on line {self.individuals[child]._birthday_line}')\n # if child dies later than nine months after father dies\n if not dad.alive and dad.death + relativedelta(months=9) < bday:\n print(f'US09 - {self.individuals[child].name} birthday after dads death date on line {self.individuals[child]._birthday_line}')",
"def us14_multiple_births(repo):\n error = []\n\n for fam in repo.families.values():\n children = fam['CHIL']['detail']\n if children != 'NA' and len(children) > 5:\n date = {}\n for child in list(children):\n child_birth = repo.individuals[child]['BIRT']['detail']\n if child_birth not in date.keys() and not date:\n date[child_birth] = 1\n elif child_birth in date.keys():\n date[child_birth] += 1\n else:\n for day in date.keys():\n if abs(child_birth-day).days <= 1:\n date[day] += 1\n\n for num in date.values():\n if num > 5:\n error_message = f'Family{fam.fam_id} multiple birth more than 5!'\n error.append(('ERROR', 'FAMILY', 'US14', fam.id_line, fam.fam_id, error_message))\n\n return error",
"def test_family_reservation_min_amount_of_childs(self):\n start_date = datetime.now()\n\n reservation_list = [\n BikeReservationPerHour(start_date) \n ,BikeReservationPerDay(start_date)\n ]\n\n with self.assertRaises(InvalidAmountOfBikeReservationsOnFamiliyError):\n family_reservation = FamilyBikeReservation(reservation_list)",
"def test_family_reservation_max_amount_of_childs(self):\n start_date = datetime.now()\n\n reservation_list = [\n BikeReservationPerHour(start_date) \n ,BikeReservationPerDay(start_date)\n ,BikeReservationPerDay(start_date)\n ,BikeReservationPerWeek(start_date)\n ,BikeReservationPerWeek(start_date)\n ,BikeReservationPerWeek(start_date)\n ]\n\n with self.assertRaises(InvalidAmountOfBikeReservationsOnFamiliyError):\n family_reservation = FamilyBikeReservation(reservation_list)",
"def user_story_18(self):\n for f1 in self.families.values():\n for f2 in self.families.values():\n if f2.husb_id in f1.children and f2.wife_id in f1.children:\n try:\n print(f\"US18 - {self.individuals[f2.husb_id].name} and {self.individuals[f2.wife_id].name} are siblings and are married on line {f2._married_line}\")\n except KeyError:\n print(f'US18 - Siblings married each other.')",
"def birth_check(self):\r\n if random.random() < 0.00017: # 0.0121, or 1.21%, is the yearly birth rate.\r\n birth_flag_list.append(1)\r\n # This makes the birth rate for every 5 days (73 'checks' a year) 0.00017%,\r\n # because 1 - 0.0121 = 0.9879; 98.79% is the chance of not giving birth that year.\r\n # 0.99983 ^73 = 0.9879 are the 5-day chances compounded 73 times, and 1 - 0.99983 = 0.00017.\r\n # or you could use the yearly birth rate and have birth_check only occur randomly\r\n # around once a year.\r\n if birth_flag_list != [] and self.gender == 2 and self.marriage == 1 and self.age < 55:\r\n if self.last_birth_time >= 2: # 2 years is the set birth interval; can modify\r\n self.last_birth_time = 0 # reset counter\r\n birth_flag_list.remove(1)\r\n last = self.model.number_of_humans\r\n # build more attributes\r\n age = 0\r\n gender = random.choice([1, 2])\r\n education = 0\r\n work_status = 0\r\n marriage = 0\r\n if gender == 1:\r\n age_category = 0\r\n elif gender == 2:\r\n age_category = 1\r\n ind = Human(last + 1, self.model, self.current_position, self.hh_id, age, self.resource_check,\r\n self.home_position, self.resource_position, self.resource_frequency, gender,\r\n education, work_status, marriage, self.past_hh_id, self.mig_years,\r\n self.migration_status, self.gtgp_part, self.non_gtgp_area,\r\n self.migration_network, self.mig_remittances, self.income_local_off_farm,\r\n self.last_birth_time, self.death_rate, age_category)\r\n self.model.schedule.add(ind)\r\n self.model.number_of_humans += 1\r\n hh_size_list[self.hh_id] += 1\r\n human_birth_list.append(last + 1)\r\n if ind.gender == 1:\r\n human_demographic_structure_list[0] += 1\r\n elif ind.gender == 2:\r\n human_demographic_structure_list[10] += 1",
"def age_check(self):\r\n # check working status\r\n if 15 <= float(self.age) < 59:\r\n if self.work_status == 0:\r\n self.work_status = 1\r\n num_labor_list[self.hh_id] += 1\r\n labor_list.append(self.unique_id)\r\n if self.work_status == 1 and self.unique_id not in labor_list:\r\n labor_list.append(self.unique_id)\r\n else:\r\n self.work_status = 0\r\n\r\n # check education status; measured in years of education\r\n if 7 <= int(self.age) <= 19:\r\n if random.random() > 0.1:\r\n self.education += 1\r\n # most adults in the FNNR did not get a full 12-13 years of education\r\n elif 19 < float(self.age) < 23 and self.migration_status == 1:\r\n if random.random() < 0.5:\r\n self.education += 1 # went to college and got further education\r\n # this is rare; in the household list, a few received beyond 12 years of education\r\n\r\n # check age-based death rates\r\n if self.age > 65:\r\n self.death_rate = 0.001443 # 5-day death rate\r\n # The average death rate in China is 7.3 per 1,000 people/year, or 0.0073 (Google).\r\n # However, death rates should be higher for the elderly, or else the population structure will skew.\r\n # I set death rates for those over age 65 to be 10% per year--0.9 yearly survival rate.\r\n # The survival rate for each 5-day step is compounded 73 times, so x^73 = 0.85.\r\n # 0.998557 is the 5-day survival rate, and 1 - x is the 5-day death rate.\r\n else:\r\n self.death_rate = 0.00000425\r\n # I wanted people to have a 98% chance of reaching age 65 (death rate is lower if not elderly).\r\n # If a 'check' is every 5 days, 73 checks/year * 65 years = 4,745 checks.\r\n # x^4745 = 0.98; the 5-day survival rate is 0.99999575, and 1 - x is the 5-day death rate.\r\n\r\n # These rates are changeable later.\r",
"def rabbit_growth_logic(n ,pairs_per_mate, count = 0, repro_pop = 0, non_repro_pop = 1,\n non_born = 0):\n if n == count:\n pass\n else:\n #appending population numbers to results list\n reproduction_age.append(repro_pop)\n non_reproductive_age.append(non_repro_pop)\n unborn.append(non_born)\n total.append(repro_pop + non_repro_pop)\n #changing populations numbers for next month through mating and growing up\n\n #reproductive bunnies mate and are added to non_born population\n non_born = repro_pop * pairs_per_mate\n\n\n repro_pop = non_repro_pop + repro_pop\n #bunnies grow up\n\n non_repro_pop = repro_pop * pairs_per_mate",
"def test_get_limit_5_dependants(self):\n self.assertEqual(\n gross_income.get_limit(dependant_children=5),\n gross_income.BASE_LIMIT + gross_income.EXTRA_CHILD_MODIFIER\n )",
"def children_impurity(self):\n\t\tpass",
"def testNumberIndividuals(self):\n self.assertEqual(self.tree1.get_number_individuals(), self.tree2.get_number_individuals())\n self.assertEqual(472518, self.tree1.get_number_individuals())",
"def num_instances(diff, flag=False):\r\n daughter = 0\r\n count = 0\r\n while True:\r\n mother = daughter + diff\r\n\r\n # assuming that mother and daughter don't have the same birthday,\r\n # they have two chances per year to have palindromic ages.\r\n if are_reversed(daughter, mother) or are_reversed(daughter, mother+1):\r\n count = count + 1\r\n if flag:\r\n print(daughter, mother)\r\n if mother > 120:\r\n break\r\n daughter = daughter + 1\r\n return count",
"def user_story_3(self):\n for person in self.individuals.values():\n if person.birthday != 'NA' and person.death != 'NA':\n if person.birthday > person.death:\n print(f'US03 - {person.name} birthday after death date on line {person._birthday_line}')",
"def user_story_4(self):\n for family in self.families.values():\n if family.married != 'NA':\n if family.wife_id != 'NA' and family.husb_id != 'NA' and family.divorced != 'NA':\n if family.divorced < family.married:\n print(\n f'US04 - {self.individuals[family.wife_id].name} and {self.individuals[family.husb_id].name} married after divorce on line {family._married_line}')",
"def test_get_limit_6_dependants(self):\n self.assertEqual(\n gross_income.get_limit(dependant_children=6),\n gross_income.BASE_LIMIT + (gross_income.EXTRA_CHILD_MODIFIER*2)\n )",
"def under_three_opens(name, state):\n counter = 0\n for item in state['items']:\n if item['seller'] == name:\n counter += 1\n return counter < 3",
"def test_validate_valid_person(self):\r\n assert self.person_tree != 0",
"def test_21_29(self):\r\n husband = Person()\r\n husband.id = \"I01\"\r\n husband.name = \"Jotaro /Kujo/\"\r\n husband.gender = \"F\"\r\n husband.birthDate = \"29 SEP 1918\"\r\n husband.age = 101\r\n husband.alive = True\r\n husband.death = \"N/A\"\r\n husband.child = [\"I03\"]\r\n husband.spouse = [\"I02\"]\r\n\r\n wife = Person()\r\n wife.id = \"I02\"\r\n wife.name = \"Marry Sue\"\r\n wife.gender = \"M\"\r\n wife.birthDate = \"12 Jan 1988\"\r\n wife.age = 80\r\n wife.alive = False\r\n wife.death = \"23 JAN 2020\"\r\n wife.child = []\r\n wife.spouse = [\"I01\"]\r\n\r\n test_family = Family()\r\n test_family.id = \"F01\"\r\n test_family.married = \"29 SEP 1993\"\r\n test_family.divorce = \"25 JAN 2020\"\r\n test_family.husbandID = \"I01\"\r\n test_family.husbandName = \"Morgan Sue\"\r\n test_family.wifeID = \"I02\"\r\n test_family.wifeName = \"Marry Sue\"\r\n test_family.chidren = [\"I03\"]\r\n\r\n personList = [husband, wife]\r\n familiList = [test_family]\r\n\r\n self.assertEqual(us21(personList, familiList), \"Husband in family is not male or wife in family is not female\")\r\n self.assertEqual(us29(personList), ['I02'])",
"def children_per_woman(self):\n return self.birthrate",
"def test_ave_age_range(step):\n diff = step[\"ave_birth\"] - step[\"birth\"]\n assert 0 < diff < 15E6",
"def _expectedChildCount(self):\n return 0",
"def bust(person):\n if person.total > GOAL_TOTAL() and person.aceCount == 0:\n return True\n elif person.total > GOAL_TOTAL() and person.aceCount > 0:\n adjust_ace(person)\n return person.total > GOAL_TOTAL()\n else: # person.total <= GOAL_TOTAL()\n return False",
"def user_story_5(self):\n for family in self.families.values():\n if family.married != 'NA':\n if family.wife_id != 'NA':\n if self.individuals[family.wife_id].death != 'NA':\n if self.individuals[family.wife_id].death < family.married:\n print(\n f'US05 - {self.individuals[family.wife_id].name} married after individual death date on line {family._married_line}')\n\n if family.husb_id != 'NA':\n if self.individuals[family.husb_id].death != 'NA':\n if self.individuals[family.husb_id].death < family.married:\n print(\n f'US05 - {self.individuals[family.husb_id].name} married after individual death date on line {family._married_line}')",
"def test_US_14(self):\n indi_repo: Repository = Repository(\"../GedcomFiles/US_14.ged\")\n\n exp = [\"US14: @F1@ has more than 5 children born on same date 2005-01-01 in line number 180\"]\n\n self.assertEqual(US_14(indi_repo._individual, indi_repo._family), exp)",
"def test_obvious_auction(self):\n bids = [Cost(ITEM1, ACTOR1, 1000),\n Cost(ITEM2, ACTOR1, 1000),\n Cost(ITEM3, ACTOR1, 1000),\n Cost(ITEM4, ACTOR1, 5000),\n\n Cost(ITEM1, ACTOR2, 1000),\n Cost(ITEM2, ACTOR2, 1000),\n Cost(ITEM3, ACTOR2, 5000),\n Cost(ITEM4, ACTOR2, 1000),\n\n Cost(ITEM1, ACTOR3, 1000),\n Cost(ITEM2, ACTOR3, 5000),\n Cost(ITEM3, ACTOR3, 1000),\n Cost(ITEM4, ACTOR3, 1000),\n\n Cost(ITEM1, ACTOR4, 5000),\n Cost(ITEM2, ACTOR4, 1000),\n Cost(ITEM3, ACTOR4, 1000),\n Cost(ITEM4, ACTOR4, 1000)]\n\n result = self.splitter.split(ITEMS[:4], ACTORS[:4], bids)\n expected = [(ITEM1, ACTOR4, None),\n (ITEM2, ACTOR3, None),\n (ITEM3, ACTOR2, None),\n (ITEM4, ACTOR1, None)]\n item_assignments_present(self, result, expected)",
"def hasSiblings():",
"def test_tax_withheld(self):\n self.assertEqual(\n self.forecast.tax_withheld,\n self.person1.tax_withheld + self.person2.tax_withheld)",
"def noBigamy(individual):\n\n\n families = gedcom_parser.get_families(individual)\n\n marriageDateRanges = []\n for family in families:\n marriageDate = None\n divorceDate = None\n for element in family.get_child_elements():\n if element.get_tag() == \"MARR\":\n marriageDate = convertGedcomDate(element.get_child_elements()[0].get_value())\n\n if element.get_tag() == \"DIV\":\n divorceDate = convertGedcomDate(element.get_child_elements()[0].get_value())\n\n if divorceDate == None:\n divorceDate = dt.now()\n\n marriageDateRanges.append((marriageDate, divorceDate))\n \n marriageDateIntervals = pandas.arrays.IntervalArray.from_tuples(marriageDateRanges)\n\n\n if marriageDateIntervals.is_non_overlapping_monotonic:\n return True\n else:\n print(\n f\"Error US11: Marriage of {individual.get_name()[0]} {individual.get_name()[1]} ({individual.get_pointer()}) occurs during another marriage\")\n return False",
"def test_it_ages():\n blake = Hobbit(\"Blake\")\n assert blake.age == 0\n\n for _ in range(5):\n blake.celebrate_birthday()\n\n assert blake.age == 5\n assert blake.is_adult() is False\n\n for _ in range(28):\n blake.celebrate_birthday()\n\n assert blake.age == 33\n assert blake.is_adult() is True\n\n blake.celebrate_birthday()\n\n assert blake.age == 34\n assert blake.is_adult() is True"
] |
[
"0.6394935",
"0.589646",
"0.58748806",
"0.57955617",
"0.5716806",
"0.56660855",
"0.55908614",
"0.54490757",
"0.53771",
"0.5303966",
"0.5289085",
"0.52771676",
"0.5269098",
"0.5228261",
"0.52020466",
"0.5188563",
"0.5182026",
"0.51784223",
"0.5177228",
"0.5161102",
"0.51388174",
"0.512702",
"0.51091254",
"0.5104896",
"0.5087042",
"0.5069331",
"0.50650525",
"0.505152",
"0.50285816",
"0.5021061"
] |
0.66829455
|
0
|
US53 Return True if the given individual has any of the individual errors listed in this file
|
def listErrors(individual): #TODO: add other errors implemented elsewhere
results = [birthBeforeMarriage(individual), birthBeforeDeath(individual), marriageBeforeDeath(individual),
datesBeforeCurrentDate(individual), noBigamy(individual)]
results = [x for x in results if x is not None]
return not all(results)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def has_errors( self ) :\n for e in self._errlist :\n if e.svr in (starobj.Error.CRIT, starobj.Error.ERR) :\n return True\n return False",
"def checkForErrors(file_to_check, elm_html_dir, protein):\n\n have_error = False\n found_summary_line = False\n tidy_file = elm_html_dir + protein + '.elm_tidy.html'\n\n try:\n # is the file there?\n elm_file = open(file_to_check)\n elm_file.close()\n # if the file is there, try to clean it\n os.system('tidy -f errs -o '\n + tidy_file + ' '\n + file_to_check)\n # if tidy is empty it will not find the summary\n f_open = open(file_to_check)\n for line in f_open:\n if line.find('error message') != -1:\n have_error = True\n break\n elif line.find('Summary of features') != -1:\n found_summary_line = True\n f_open.close() \n except:\n have_error = True\n \n if have_error or not found_summary_line:\n #os.system('rm -f '\n # + file_to_check)\n os.system('rm -f '\n + elm_html_dir + protein\n + '.elm_tidy.html')\n return True\n else:\n return False",
"def has_errors(self) -> bool:",
"def HasErrors(self):\n for name in self._GetStreamNames():\n if name.startswith('error_data.'):\n return True\n\n return False",
"def has_errors(self):\n return len(self.get_errors()) > 0",
"def check_errors(self, data):\n for entry in data:\n if entry.find('ERROR') != -1:\n return entry\n return False",
"def hasErrors(self):\n return False",
"def has_error(self):\n return len(self.unmapped) or len(self.author_denied) \\\n or len(self.pusher_denied) or len(self.foruser_denied) \\\n or len(self.fusion_denied)",
"def has_errors(self) -> bool:\n return len(self.errors) > 0",
"def __contains__(self, errtype):\n return errtype in self._state",
"def all_valid(self):\n return all([is_valid_name(_) for _ in self.files])",
"def has_any(self) -> bool:\n if len(self.failures) == 1:\n return self.failures[0] != \"\"\n\n return len(self.failures) > 1",
"def has_error(self):\n return self.error_found",
"def _has_error(self):\n # Public interface is given by get_status instead.\n # TODO: Cache result if reached execution with no error\n try:\n # If there is something in the error file:\n if path.getsize(path.join(self.run_dir, \"err.txt\")) > 0:\n return True\n except FileNotFoundError:\n pass\n try:\n with open(path.join(self.run_dir, \"out.txt\"), \"r\") as f:\n text = f.read()\n # TODO: Depending on the file size, the following might be better. Investigate this.\n # text = \"\".join(head(path.join(self.run_dir, \"out.txt\"), 300)+tail(path.join(self.run_dir, \"out.txt\"), 10))\n\n # TODO: The commented option is slower (even if compiled) than this one. Investigate.\n if \"(*error*)\" in text or re.search(\"Error reading .* parameters\", text) or re.search(\n \"MPI_ABORT was invoked\", text):\n # if re.search(\"\\(\\*error\\*\\)|Error reading .* parameters|MPI_ABORT was invoked\",text):\n return True\n else:\n return False\n except FileNotFoundError:\n return False",
"def has_errors(self):\n\n return True if len(self.errors) > 0 else False",
"def has_errors(self):\n return self.exc_info != None",
"def should_report_error(self, error):\n # Always report errors that aren't for this file or do not have a line\n # number.\n if error.filename != self._filename or error.lineno is None:\n return True\n # Treat lineno=0 as below the file, so we can filter it.\n lineno = error.lineno or sys.maxsize\n # Report the error if it isn't subject to any ignore or disable.\n return (lineno not in self._ignore and\n lineno not in self._disables[_ALL_ERRORS] and\n lineno not in self._disables[error.name])",
"def has_errors_fatal(self) -> bool:",
"def _sanity_check(trios):\n children = [trio.child for trio in trios]\n if not children:\n return\n id, count = Counter(children).most_common()[0]\n if count > 1:\n raise ParseError(\"Individual {!r} occurs more than once in PED file\".format(id))",
"def has_error(self, response):\n return response.find(' Matched') == -1 and response.find(' Failed') == -1",
"def has_failures_or_errors(self):\r\n return (self._num_failures() > 0) or (self._num_script_errors() > 0)",
"def has_errors(self) -> bool:\n if self.errors:\n return True\n return False",
"def check_usability(hdulist):\n status = True\n\n if 'INSTRUME' in hdulist[0].header:\n if change_style(hdulist[0].header['INSTRUME']):\n pass\n else:\n print (\"Not a valid value for INSTRUME: {}\".format(hdulist[0].header['INSTRUME']))\n status = False\n else:\n print (\"Missing INSTRUME header in file \")\n status = False\n if 'REFTYPE' in hdulist[0].header:\n pass\n else:\n print (\"Missing REFTYPE header in file \")\n status = False\n\n return status",
"def isError(self, errno):\n return self.__errors[errno]",
"def hasError(self) -> bool:\n return self.errorCode is not None and len(self.errorCode) > 0",
"def has_errors_fatal(self) -> bool:\n return len(self.errors_fatal) > 0",
"def _errors_exist(self, doc_type, login, response_payload):\n if \"errors\" in response_payload:\n for _, error in enumerate(response_payload[\"errors\"]):\n message = ':'.join([doc_type, login, str(error)])\n self.logger.error(message)\n return True\n return False",
"def storefront_check_errors():\n\n\tcurrentView = uidoc.ActiveView\n\tfamTypeDict = GetFamilyTypeDict(\"Fabrication-Error-Symbol\")\n\n\t# Clear existing error notations\n\terrorNotations = list(GetElementsInView(BuiltInCategory.OST_GenericAnnotation, Autodesk.Revit.DB.FamilyInstance, currentView.Id))\n\terrorNotations = FilterElementsByName(doc, errorNotations,[\"Fabrication\",\"Error-Symbol\"], False)\n\tif errorNotations:\n\t\twith rpw.db.Transaction(\"Place Errors\"):\n\t\t\tfor error in errorNotations:\n\t\t\t\tdoc.Delete(error)\n\n\n\tdef PointsAndErrors(mullions_list, errorName, cat_or_ids):\n\t\t\"\"\"adds to lists of points and errors\"\"\"\n\t\terrorsToFlag = []\n\t\tcompList =[]\n\t\tfor m in mullions_list:\n\t\t\tmElem = doc.GetElement(m)\n\t\t\tif m not in compList:\n\t\t\t\tintersectingMulls = FindIntersectingMullions(mElem, cat_or_ids)\n\t\t\t\tif list(intersectingMulls):\n\t\t\t\t\tmullPt = mElem.Location.Point\n\t\t\t\t\terrorsToFlag.append([mullPt, errorName])\n\t\t\t\t\tfor mm in list(intersectingMulls):\n\t\t\t\t\t\tcompList.append(mm.Id)\n\t\treturn errorsToFlag\n\n\tdef MullionClash():\n\n\t\terrorsToFlag = []\n\n\t\tselectedLevel = __revit__.ActiveUIDocument.ActiveView.GenLevel.Id\n\n\t\tallMullions = GetAllElements(doc, BuiltInCategory.OST_CurtainWallMullions, Autodesk.Revit.DB.FamilyInstance, currentView=True)\n\t\tallWalls = GetAllElements(doc, BuiltInCategory.OST_Walls, Autodesk.Revit.DB.Wall, currentView=True)\n\n\t\tallWalls = FilterElementsByName(doc, allWalls, [\"Storefront\",\"Storefront\"], True)\n\n\t\terrorsToFlag += PointsAndErrors(allMullions, \"Mullion-Mullion Intersects\", BuiltInCategory.OST_CurtainWallMullions)\n\t\terrorsToFlag += PointsAndErrors(allMullions, \"Mullion-Panel Intersects\", BuiltInCategory.OST_CurtainWallPanels)\n\t\tif allWalls:\n\t\t\terrorsToFlag += PointsAndErrors(allMullions, \"Mullion-Wall Intersects\", allWalls)\n\n\t\treturn errorsToFlag\n\n\tdef PanelClash():\n\n\n\t\terrorsToFlag = []\n\t\t\n\t\tallPanels = GetAllElements(doc, BuiltInCategory.OST_Windows, Autodesk.Revit.DB.FamilyInstance, currentView=True)\n\t\tallPanels = FilterDemolishedElements(doc, allPanels)\n\n\t\tpanelMinWidth = 0.45\n\t\tpanelMaxWidth = 5.0\n\t\tpanelMaxHeight = 8.14\n\n\t\t### ITERATE OVER PANEL LIST ###\n\t\tfor p in allPanels:\n\t\t\tfamInst = doc.GetElement(p)\n\n\t\t\tpan_height = famInst.Parameter[BuiltInParameter.FAMILY_HEIGHT_PARAM].AsDouble()\n\t\t\tpan_width = famInst.Parameter[BuiltInParameter.FAMILY_WIDTH_PARAM].AsDouble()\n\n\t\t\tif \"empty\" not in famInst.Name.lower():\n\t\t\t\tif pan_width < panelMinWidth:\n\t\t\t\t\terrorsToFlag.append([famInst.GetTransform().Origin, \"Small Panel\"])\n\t\t\t\telif pan_width > panelMaxWidth:\n\t\t\t\t\terrorsToFlag.append([famInst.GetTransform().Origin, \"Wide Panel\"])\n\t\t\t\telif pan_height > panelMaxHeight:\n\t\t\t\t\terrorsToFlag.append([famInst.GetTransform().Origin, \"Tall Panel\"])\n\t\t\telse:\n\t\t\t\tpass\n\t\t\n\t\treturn errorsToFlag\n\n\tdef ECWallClash():\n\n\t\terrorsToFlag = []\n\t\tcolumnsLinesEdgesEC = []\n\t\twallsLinesEdgesEC = []\n\n\n\t\tdocLoaded = RevitLoadECDocument(quiet=True)\n\t\tif docLoaded[0]:\n\t\t\tdocEC = docLoaded[0]\n\t\t\tecTransform = docLoaded[1]\n\n\t\t\tselectedLevel = __revit__.ActiveUIDocument.ActiveView.GenLevel.Id\n\n\t\t\tselectedLevelInst = doc.GetElement(selectedLevel)\n\t\t\tlevelElevationEC = None \n\t\t\tfor p in selectedLevelInst.Parameters:\n\t\t\t\tif p.Definition.Name == \"Elevation\":\n\t\t\t\t\tlevelElevationEC = p.AsDouble()\n\n\t\t\tallWallsEC = GetAllElements(docEC, BuiltInCategory.OST_Walls, Autodesk.Revit.DB.Wall)\n\t\t\tallColumnsEC = GetAllElements(docEC, BuiltInCategory.OST_Columns, Autodesk.Revit.DB.FamilyInstance)\n\t\t\tallColumnsEC += GetAllElements(docEC, BuiltInCategory.OST_StructuralColumns, Autodesk.Revit.DB.FamilyInstance)\n\n\t\t\tselectedWallsEC = FilterElementsByLevel(docEC, allWallsEC, levelElevationEC)\n\t\t\tselectedColumnsEC = FilterElementsByLevel(docEC, allColumnsEC, levelElevationEC)\n\n\t\t\twallsLinesEdgesEC = GetWallEdgeCurves(docEC, selectedWallsEC, ecTransform)\n\t\t\tcolumnsLinesEdgesEC = GetColumnEdgeCurves(docEC, selectedColumnsEC, ecTransform)\n\n\t\tallWalls = GetAllElements(doc, BuiltInCategory.OST_Walls, Autodesk.Revit.DB.Wall, currentView=True)\n\t\tstorefrontWalls = FilterElementsByName(doc, allWalls,[\"Storefront\",\"Storefront\"], False)\n\t\tstorefrontWalls = FilterWallsByKind(doc, storefrontWalls, \"Basic\")\n\n\t\tobstructionEdges = columnsLinesEdgesEC\n\t\tobstructionEdges += wallsLinesEdgesEC\n\n\t\tif obstructionEdges:\n\t\t\tfor sfWallId in storefrontWalls:\n\t\t\t\tsfWall = doc.GetElement(sfWallId)\n\t\t\t\tlocLine = sfWall.Location.Curve\n\t\t\t\tlocLineStart = locLine.GetEndPoint(0)\n\t\t\t\tlocLineEnd = locLine.GetEndPoint(1)\n\n\t\t\t\tfor obstructionLine in obstructionEdges:\n\t\t\t\t\tobstLineElevation = obstructionLine.GetEndPoint(0).Z\n\t\t\t\t\tlocLineStart = XYZ(locLineStart.X, locLineStart.Y, obstLineElevation)\n\t\t\t\t\tlocLineEnd = XYZ(locLineEnd.X, locLineEnd.Y, obstLineElevation)\n\t\t\t\t\tlocLineFlat = Line.CreateBound(locLineStart, locLineEnd)\n\t\t\t\t\tintersection = RevitCurveCurveIntersection(locLineFlat,obstructionLine)\n\n\t\t\t\t\tif intersection:\n\t\t\t\t\t\t#ERROR: Hit Existing Condition\n\t\t\t\t\t\terrorsToFlag.append([intersection, \"Hit EC\"])\n\n\t\treturn errorsToFlag\n\n\tallErrors = []\n\tallErrors += ECWallClash()\n\tallErrors += MullionClash()\n\tallErrors += PanelClash()\n\n\terrorSymbolId = famTypeDict[\"Fabrication-Error-Symbol\"]\n\n\tif allErrors:\n\t\twith rpw.db.Transaction(\"Error Check\"):\n\t\t\tRevitPlaceErrorsInView(currentView, allErrors, errorSymbolId)",
"def __call__(self, read, info: ModificationInfo):\n return expected_errors(read.qualities) > self.max_errors",
"def check_pe_resource(self, pe):\n suspicious = False\n if hasattr(pe, 'DIRECTORY_ENTRY_RESOURCE'):\n for r in pe.DIRECTORY_ENTRY_RESOURCE.entries:\n suspicious |= self.resource(pe, r, [])\n return suspicious"
] |
[
"0.70126104",
"0.6666234",
"0.6639376",
"0.6567932",
"0.6328549",
"0.6295345",
"0.61865944",
"0.61504483",
"0.61230916",
"0.6091424",
"0.60665655",
"0.6066381",
"0.606468",
"0.5951048",
"0.5904429",
"0.59001625",
"0.5894452",
"0.58898234",
"0.58656865",
"0.5850387",
"0.58202225",
"0.58110124",
"0.57995903",
"0.57815284",
"0.5773901",
"0.5755062",
"0.5745855",
"0.57272166",
"0.5719706",
"0.57156175"
] |
0.67767346
|
1
|
Delete characters from (upperRow, upperCol) up to (lowerRow, lowerCol) using the current selection mode.
|
def doDelete(self, upperRow, upperCol, lowerRow, lowerCol):
if app.config.strict_debug:
assert isinstance(upperRow, int)
assert isinstance(upperCol, int)
assert isinstance(lowerRow, int)
assert isinstance(lowerCol, int)
assert upperRow <= lowerRow
assert upperRow != lowerRow or upperCol <= lowerCol
if self.selectionMode == kSelectionBlock:
self.parser.deleteBlock(upperRow, upperCol, lowerRow, lowerCol)
elif (self.selectionMode == kSelectionNone or
self.selectionMode == kSelectionAll or
self.selectionMode == kSelectionCharacter or
self.selectionMode == kSelectionLine or
self.selectionMode == kSelectionWord):
self.parser.deleteRange(upperRow, upperCol, lowerRow, lowerCol)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def doDeleteSelection(self):\n upperRow, upperCol, lowerRow, lowerCol = self.startAndEnd()\n self.doDelete(upperRow, upperCol, lowerRow, lowerCol)",
"def handle_backspace(self, peer, row, col):\n\n # If the peer has selected text, delete that\n \n if peer.hasSelection():\n \n peer.deleteSelection()\n\n # Treat as if 1 char was deleted\n\n if peer is self.marker:\n \n self.root.last_col += 1\n\n else:\n\n # Move the cursor left one for a backspace\n\n if row > 0 and col > 0:\n\n index = \"{}.{}\".format(row, col-1)\n\n self.delete(index)\n\n elif row > 1 and col == 0:\n\n index = \"{}.end\".format(row-1,)\n\n self.delete(index)\n\n col = int(self.index(index).split('.')[1])\n\n # peer.move(row-1, col)\n\n return",
"def test_delete_char(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n firstline\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.5\", \"1.5\"),\n after_sel=(\"1.5\", \"1.5\"),\n command_name=\"delete-char\",\n )",
"def test_backward_delete_char__middle_of_line(self):\n before_b = \"\"\"\\\n first line\n last line\n \"\"\"\n after_b = \"\"\"\\\n firstline\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.6\", \"1.6\"),\n after_sel=(\"1.5\", \"1.5\"),\n command_name=\"backward-delete-char\",\n )",
"def clean_selection(row):\n if row['correctfractallocation'] == '(1355, 540)':\n # correct fractal was on right\n return int(float(row['selection'])) - 4\n elif row['correctfractallocation'] == '(565, 540)':\n # correct fractal was on left\n return -int(float(row['selection'])) + 5",
"def test_backward_delete_char(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first lie\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.9\", \"1.9\"),\n after_sel=(\"1.8\", \"1.8\"),\n command_name=\"backward-delete-char\",\n )",
"def delete(self):\n del self.characters[self.cursor.position]",
"def test_backward_delete_char_last_char(self):\n before_b = \"\"\"\\\n first line\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n last lin\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.9\", \"2.9\"),\n after_sel=(\"2.8\", \"2.8\"),\n command_name=\"backward-delete-char\",\n )",
"def test_backward_delete_word_selection(self):\n before_b = \"\"\"\\\n aaaa bbbb cccc dddd\n \"\"\"\n after_b = \"\"\"\\\n aaaa bbcc dddd\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.7\", \"1.12\"),\n after_sel=(\"1.7\", \"1.7\"),\n command_name=\"backward-delete-word\",\n )",
"def test_clear_selected_text(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.4\", \"4.4\"),\n after_sel=(\"2.4\", \"2.4\"),\n command_name=\"clear-selected-text\",\n )",
"def unindent(self):\n\n self.beginEditBlock()\n\n for cursor in self.cursors:\n sel_start, sel_end, _ = self.get_sel_start_end_reverse(cursor)\n\n # retrieve start/end blocks to get the iteration range\n cursor.setPosition(sel_end, cursor.MoveAnchor)\n end_block = cursor.blockNumber()\n # also go to the firstiteration line\n cursor.setPosition(sel_start, cursor.MoveAnchor)\n start_block = cursor.blockNumber()\n\n # go to the start of line (as cursor.NextBlock does) to be sure that\n # cursor.deleteChar() operates on the starting characters of the line\n cursor.movePosition(cursor.StartOfLine, cursor.MoveAnchor)\n\n for _ in range(end_block -start_block +1):\n line = cursor.block().text()\n\n # go to the next line if line is empty\n if not line:\n cursor.movePosition(cursor.NextBlock, cursor.MoveAnchor)\n continue\n\n if line[0] == '\\t':\n cursor.deleteChar()\n cursor.movePosition(cursor.NextBlock, cursor.MoveAnchor)\n continue\n\n if len(line) < 3:\n cursor.movePosition(cursor.NextBlock, cursor.MoveAnchor)\n continue\n\n # perform line un-indent\n if line[:4] == ' ':\n for i in range(4):\n cursor.deleteChar()\n\n # go to the next line\n cursor.movePosition(cursor.NextBlock, cursor.MoveAnchor)\n\n self.endEditBlock()",
"def delete_forward():\r\n point().delete_right_char()",
"def test_backward_delete_word_no_selection(self):\n before_b = \"\"\"\\\n aaaa bbbb cccc dddd\n \"\"\"\n after_b = \"\"\"\\\n aaaa cccc dddd\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.10\", \"1.10\"),\n after_sel=(\"1.5\", \"1.5\"),\n command_name=\"backward-delete-word\",\n )",
"def delete(self):\n if not self.selection.isSelection(): return False\n\n # Save the current text\n self.saveText()\n\n sm1, sm2 = self.selection.order(self.selection.selectionMark,\n self.selection.selectionMark2)\n w1 = sm1[0]\n w2 = sm2[0]\n cx = sm1[1]\n self.edCursor.setPos(w1, cx)\n # Join words before and after selection\n w1.setString(w1.string[:cx] + w2.string[sm2[1]:])\n # Delete all intervening words, and w2\n tl1 = w1.tline\n wx1 = tl1.twords.index(w1)\n tl2 = w2.tline\n wx2 = tl2.twords.index(w2)\n if (tl1 == tl2): # only delete from 1 line\n # delete words from wx1+1 to wx2 (incl.)\n for w in tl1.twords[wx1+1:wx2+1]:\n w.delete()\n del(tl1.twords[wx1+1:wx2+1])\n\n else: # deletion block covers >1 line\n # delete words from wx1+1 to end of paragraph\n for w in tl1.twords[wx1+1:]:\n w.delete()\n del(tl1.twords[wx1+1:])\n # delete all the intervening lines\n while True:\n tl = self.rsubject.nextLine(tl1)\n if (tl == tl2): break\n self.rsubject.deleteTLine(tl)\n\n # Move remaining words after w2 in tl2 to end of tl1\n for w in tl2.twords[wx2+1:]:\n tl1.insert(w)\n del(tl2.twords[wx2+1:])\n # Delete tl2\n self.rsubject.deleteTLine(tl2)\n\n self.selection.clearSelection()\n\n self.rsubject.renderShortened(w1)\n\n self.edCursor.setPos(w1, cx)\n return True",
"def remove(self, xcord, ycord, g_board):\n for i in range(xcord, xcord + 2):\n for j in range(ycord, ycord + self.size):\n g_board[i][j] = ' '",
"def _delChar(self, pos):\n nonGlyph = countInSet(self.text[:pos], self.NO_GLYPH_CHARS)\n\n self.allVertices = self.allVertices[:(pos - nonGlyph) * 4]\n self.allIndices = self.allIndices[:pos - nonGlyph]\n self.colors.pop(pos)\n self._string_metric = self._string_metric[:pos]\n self.text = self.text[:pos] + self.text[pos + 1:]\n self._updateGlyphs(pos)",
"def delete_chr(text):\n \"\"\" if the user try to delete an empty line it will not allowed him:)\"\"\"\n if len(text.getText())<1:\n text.setText(\"\")\n return text\n else:\n text.setText(text.getText()[:-10]) # 10 is the length of the word \"Backspace\" + 1 letter i delete\n return text",
"def BackTab(self):\n sel = self.GetSelection()\n if sel[0] == sel[1]:\n # There is no selection\n cpos = self.GetCurrentPos()\n cline = self.GetCurrentLine()\n cipos = self.GetLineIndentPosition(cline)\n if cpos <= cipos:\n # In indentation so simply backtab\n super(EditraBaseStc, self).BackTab()\n else:\n # In middle of line somewhere\n text = self.GetLine(cline)\n column = max(0, self.GetColumn(cpos) - 1)\n if len(text) > column and text[column].isspace():\n\n # Find the end of the whitespace\n end = column\n while end < len(text) and \\\n text[end].isspace() and \\\n text[end] not in '\\r\\n':\n end += 1\n\n # Find the start of the whitespace\n end -= 1\n start = end\n while end > 0 and text[start].isspace():\n start -= 1\n\n diff = end - start\n if diff > 1:\n # There is space to compress\n isize = self.GetIndent()\n if isize < diff:\n # More space than indent to remove\n repeat = isize\n else:\n # Less than one indent width to remove\n repeat = end - (start + 1)\n\n # Update the control\n self.BeginUndoAction()\n self.SetCurrentPos(cpos + (end - column))\n for x in range(repeat):\n self.DeleteBack()\n self.EndUndoAction()\n\n else:\n # There is a selection\n super(EditraBaseStc, self).BackTab()",
"def __extendWords(self, upperRow, upperCol, lowerRow, lowerCol):\n line = self.parser.rowText(upperRow)\n for segment in re.finditer(app.regex.kReWordBoundary, line):\n if segment.start() <= upperCol < segment.end():\n upperCol = segment.start()\n break\n line = self.parser.rowText(lowerRow)\n for segment in re.finditer(app.regex.kReWordBoundary, line):\n if segment.start() < lowerCol < segment.end():\n lowerCol = segment.end()\n break\n return upperCol, lowerCol",
"def unselect_piece(self, row, col):\n piece = self.squares[row][col]\n self.overwrite_board_square(row, col)\n self._put_chr_at(piece, row, col, self.not_select_color)",
"def deleteSelectedRows(self):\n # Get unique row number (user can select multiple cells in one row)\n uniqRows = set([idx.row() for idx in self.view.selectedIndexes()])\n # It's necessary to remove rows from the end, otherwise indexes become\n # outdated and useless.\n revRovs = sorted(list(uniqRows), reverse=True)\n for row in revRovs:\n self.model.removeRow(row)",
"def delete_selection(self):\n # TODO: Add undo, redo etc functionality\n # If whole column is selected remove that column completely\n # If whole row is selected remove that row completely\n # Else make the selected cells blank\n\n # TODO: Remove the deleted column from the visibility modal also\n\n selected_columns = sorted(self.selected_columns, reverse=True)\n selected_rows = sorted(self.selected_rows, reverse=True)\n\n fileChanged = False\n if len(selected_rows) > 0 or len(selected_columns) > 0:\n self.file_changed = True\n self.set_save_enabled(True)\n\n # delete any fully selected column\n for col in selected_columns:\n # Remove it from the show/hide modal too\n header_value = self.csv_data_table.horizontalHeaderItem(col).text()\n if header_value in self.column_headers_all:\n self.column_headers_all.remove(header_value)\n if header_value in self.column_headers:\n self.column_headers.remove(header_value)\n try:\n self.column_visibility_dialog_reference.remove_header(header_value)\n except:\n pass\n self.csv_data_table.removeColumn(col)\n\n self.selected_columns.clear()\n\n # delete any fully selected row\n for row in selected_rows:\n self.csv_data_table.removeRow(row)\n\n self.selected_rows.clear()\n\n # Now check if any individual cells are to be deleted\n\n cells = self.csv_data_table.selectionModel().selectedIndexes()\n\n if len(cells) > 0:\n self.file_changed = True\n self.set_save_enabled(True)\n\n for cell in sorted(cells):\n r = cell.row()\n c = cell.column()\n self.csv_data_table.item(r, c).setText('')\n\n # update the bottom toolbar to reflect the changes\n self.set_bottom_toolbar_info()",
"def handle_delete(self, peer, row, col):\n if peer.hasSelection():\n \n peer.deleteSelection()\n \n else:\n\n self.delete(\"{}.{}\".format(row, col))\n \n # peer.move(row, col)\n\n return",
"def delete_backward():\r\n point().delete_left_char()\r\n set_point(point().offset(-1))",
"def unmark(self, position):\n\n if self.selected_text_file is None:\n return\n if len(self.case_text) == 0:\n return\n cursor = self.ui.textBrowser.cursorForPosition(position)\n self.ui.textBrowser.setTextCursor(cursor)\n\n location = self.ui.textBrowser.textCursor().selectionStart()\n unmarked = None\n for item in self.case_text:\n if item['pos0'] <= location <= item['pos1']:\n unmarked = item\n if unmarked is None:\n return\n\n # Delete from database, remove from case_text and update gui\n cur = self.app.conn.cursor()\n cur.execute(\"delete from case_text where fid=? and caseid=? and pos0=? and pos1=?\",\n (unmarked['fid'], unmarked['caseid'], unmarked['pos0'], unmarked['pos1']))\n self.app.conn.commit()\n if unmarked in self.case_text:\n self.case_text.remove(unmarked)\n self.unlight()\n self.highlight()\n # The file may be assigned Yes in the table widget but should be empty\n self.get_files()\n self.fill_table()\n self.app.delete_backup = False",
"def edKey(self, key):\n # Any key press except cursor keys (?) causes the selection to be deleted!\n if isinstance(key, types.IntType):\n rawkey = (key & (SHIFT-1))\n if (rawkey in (1,2,3,4)):\n # First check for shift+cursor-key, because these alter the selection\n if (key & SHIFT): # SHIFT + cursor key\n # if there is not already a selection, set the first marker before\n # moving the cursor\n if not self.selection.selectionMark2:\n w, cx = self.edCursor.getPos()\n # Hack to handle empty words which are about to be deleted\n if (w.string == u\"\") and (len(w.tline.twords) > 1):\n # Don't start selection, just move cursor\n self.cursorKey(rawkey)\n return\n self.selection.selectionMark = (w, cx)\n self.cursorKey(rawkey)\n self.selection.setMark2(self.edCursor.getPos())\n\n else: # cursor key without SHIFT\n # clear selection and move cursor.\n self.selection.clearSelection()\n self.cursorKey(rawkey)\n return\n\n # All other keys are ignored if this widget is read-only\n if not self.editable: return\n\n self.cursorX = None\n # If there is a selection this must be deleted\n if self.delete() and (rawkey in (8,9)): return\n\n # Get cursor position\n word, cx = self.edCursor.getPos()\n tline = word.tline\n\n if (rawkey == 10): # space\n if (key & SHIFT):\n self.insertChar(FixedSpace)\n return\n s1 = word.string[:cx]\n s2 = word.string[cx:]\n word.setString(s1)\n # Create a new TWord with the second half of the split:\n nw = TWord(s2)\n wx = tline.twords.index(word)\n tline.insert(nw, wx+1)\n nw.setCanvas(self.canvas)\n\n # Re-render from this word, noting that it became shorter:\n self.rsubject.renderShortened(word)\n self.edCursor.setPos(nw, 0)\n return\n\n if (rawkey == 7): # line break\n s1 = word.string[:cx]\n s2 = word.string[cx:]\n word.setString(s1)\n # Create a new TWord with the second half of the split:\n nw = TWord(s2)\n # And a new Paragraph, copying the properties of the old one:\n para = Paragraph(tline.para)\n # And a new TextLine:\n ntl = TextLine(para, [nw])\n lx = self.rsubject.tlines.index(tline) + 1\n self.rsubject.tlines.insert(lx, ntl)\n nw.setCanvas(self.canvas)\n # Move words following the split:\n wx = tline.twords.index(word)\n for w in tline.twords[wx+1:]:\n ntl.insert(w)\n del(tline.twords[wx+1:])\n # Now move subsequent lines to new paragraph\n while True:\n lx += 1\n if (len(self.rsubject.tlines) <= lx) or \\\n (self.rsubject.tlines[lx].para != tline.para):\n break\n self.rsubject.tlines[lx].para = para\n\n # Re-render from this word, noting that it became shorter:\n self.rsubject.renderShortened(word)\n # Set cursor to start of new word.\n self.edCursor.setPos(nw, 0)\n return\n\n if (rawkey == 8) or (rawkey == 9): # delete / backspace\n if (rawkey == 9):\n # backspace: take one step back and then do as delete.\n if (cx == 0): # at start of word\n para0 = tline.para\n # if stepping back works ...\n if not self.edCursor.step(False): return\n # Get new cursor position\n word, cx = self.edCursor.getPos()\n tline = word.tline\n para = tline.para # needed for deletion test below\n else:\n cx -= 1\n s = word.string\n if (len(s) == cx): # at end of word\n # Join words\n wx = tline.twords.index(word) + 1\n if (wx >= len(tline.twords)): # at end of line\n # If we arrived at the end of a paragraph with\n # backspace, and the step backwards didn't skip\n # to the previous paragraph, do nothing!\n # That is necessary because of the\n # automatic deletion of words which become empty\n # when the cursor leaves them.\n if (rawkey == 9) and (para == para0): return\n # If at end of paragraph, join paragraphs\n nl = self.rsubject.nextLine(tline)\n if nl:\n para0 = tline.para\n para = nl.para\n if (para != para0):\n nl2 = nl\n while True:\n nl2.setPara(para0)\n nl2 = self.rsubject.nextLine(nl2)\n if (not nl2) or (nl2.para != para): break\n # Next line is (now) in same paragraph.\n # Move first word of next line to current line:\n tline.insert(nl.twords[0])\n del(nl.twords[0])\n if not nl.twords:\n # Line now empty, delete it\n self.rsubject.deleteTLine(nl)\n else:\n nl.y = None # to ensure re-rendering\n else:\n # Nothing to delete\n return\n\n nw = tline.twords[wx]\n del(tline.twords[wx])\n word.setString(s + nw.string)\n # The removed word must be 'freed'\n nw.delete()\n # Re-render from tline:\n self.rsubject.linify(tline)\n else:\n # Not at end of word, the word will be shortened.\n s = s[:cx] + s[cx+1:]\n word.setString(s)\n # Re-render from this word, noting that it became shorter:\n self.rsubject.renderShortened(word)\n # Reset cursor to start of new word/paragraph.\n self.edCursor.setPos(word, cx)\n\n self.deleteCount +=1\n if (self.deleteCount >= DELETECOUNT):\n self.saveText()\n return\n\n # Anything else is ignored\n return\n\n # All other keys are ignored if this widget is read-only\n if not self.editable: return\n\n # character key\n self.cursorX = None\n # If there is a selection this must be deleted\n # This must also reset the cursor appropriately\n self.delete()\n self.insertChar(key)",
"def extendSelection(self):\n if self.selectionMode == kSelectionNone:\n return (0, 0, -self.markerRow, -self.markerCol, 0)\n elif self.selectionMode == kSelectionAll:\n lowerRow = self.parser.rowCount() - 1\n lowerCol = self.parser.rowWidth(-1)\n return (lowerRow - self.penRow,\n lowerCol - self.penCol, -self.markerRow,\n -self.markerCol, 0)\n elif self.selectionMode == kSelectionLine:\n return (0, -self.penCol, 0, -self.markerCol, 0)\n elif self.selectionMode == kSelectionWord:\n if self.penRow > self.markerRow or (self.penRow == self.markerRow\n and\n self.penCol > self.markerCol):\n upperCol, lowerCol = self.__extendWords(\n self.markerRow, self.markerCol, self.penRow, self.penCol)\n return (0, lowerCol - self.penCol, 0, upperCol - self.markerCol,\n 0)\n else:\n upperCol, lowerCol = self.__extendWords(\n self.penRow, self.penCol, self.markerRow, self.markerCol)\n return (0, upperCol - self.penCol, 0, lowerCol - self.markerCol,\n 0)\n return (0, 0, 0, 0, 0)",
"def test_rectangle_clear(self):\n before_b = \"\"\"\\\n before\n aaaxxxbbb\n aaaxxxbbb\n aaaxxxbbb\n aaaxxxbbb\n after\n \"\"\"\n after_b = \"\"\"\\\n before\n aaa bbb\n aaa bbb\n aaa bbb\n aaa bbb\n after\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.3\", \"5.6\"),\n after_sel=(\"2.3\", \"5.6\"),\n command_name=\"rectangle-clear\",\n )",
"def delete_selected_rows(self):\n self._export_mode = 'delete'\n self._counter_update_data += 1",
"def test_delete_spaces(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.2\", \"3.2\"),\n after_sel=(\"3.0\", \"3.0\"),\n command_name=\"delete-spaces\",\n )"
] |
[
"0.6620042",
"0.57208854",
"0.5691439",
"0.55859524",
"0.55157864",
"0.5507719",
"0.5377565",
"0.5346679",
"0.53402644",
"0.532339",
"0.5315138",
"0.53036845",
"0.5277891",
"0.5264728",
"0.51874375",
"0.5165313",
"0.5142384",
"0.5119983",
"0.510315",
"0.5101102",
"0.5100013",
"0.5078234",
"0.5048553",
"0.5034066",
"0.50126684",
"0.50092924",
"0.49943328",
"0.4992572",
"0.4976639",
"0.4976284"
] |
0.7226521
|
0
|
Get the marker and pen pair as the earlier of the two then the later of the two. The result accounts for the current selection mode.
|
def startAndEnd(self):
upperRow = 0
upperCol = 0
lowerRow = 0
lowerCol = 0
if self.selectionMode == kSelectionNone:
upperRow = self.penRow
upperCol = self.penCol
lowerRow = self.penRow
lowerCol = self.penCol
elif self.selectionMode == kSelectionAll:
upperRow = 0
upperCol = 0
lowerRow = self.parser.rowCount() - 1
lowerCol = self.parser.rowWidth(-1)
elif self.selectionMode == kSelectionBlock:
upperRow = min(self.markerRow, self.penRow)
upperCol = min(self.markerCol, self.penCol)
lowerRow = max(self.markerRow, self.penRow)
lowerCol = max(self.markerCol, self.penCol)
elif (self.selectionMode == kSelectionCharacter or
self.selectionMode == kSelectionLine or
self.selectionMode == kSelectionWord):
upperRow = self.markerRow
upperCol = self.markerCol
lowerRow = self.penRow
lowerCol = self.penCol
if upperRow == lowerRow and upperCol > lowerCol:
upperCol, lowerCol = lowerCol, upperCol
elif upperRow > lowerRow:
upperRow, lowerRow = lowerRow, upperRow
upperCol, lowerCol = lowerCol, upperCol
#app.log.detail('start and end', upperRow, upperCol, lowerRow, lowerCol)
return (upperRow, upperCol, lowerRow, lowerCol)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def extendSelection(self):\n if self.selectionMode == kSelectionNone:\n return (0, 0, -self.markerRow, -self.markerCol, 0)\n elif self.selectionMode == kSelectionAll:\n lowerRow = self.parser.rowCount() - 1\n lowerCol = self.parser.rowWidth(-1)\n return (lowerRow - self.penRow,\n lowerCol - self.penCol, -self.markerRow,\n -self.markerCol, 0)\n elif self.selectionMode == kSelectionLine:\n return (0, -self.penCol, 0, -self.markerCol, 0)\n elif self.selectionMode == kSelectionWord:\n if self.penRow > self.markerRow or (self.penRow == self.markerRow\n and\n self.penCol > self.markerCol):\n upperCol, lowerCol = self.__extendWords(\n self.markerRow, self.markerCol, self.penRow, self.penCol)\n return (0, lowerCol - self.penCol, 0, upperCol - self.markerCol,\n 0)\n else:\n upperCol, lowerCol = self.__extendWords(\n self.penRow, self.penCol, self.markerRow, self.markerCol)\n return (0, upperCol - self.penCol, 0, lowerCol - self.markerCol,\n 0)\n return (0, 0, 0, 0, 0)",
"def get_from_to(self):\n pfrom = self.cursors['y1']\n pto = self.cursors['y2']\n\n return pfrom, pto",
"def __one_both_open(x, y, c = None, l = None):\n return x - 1, y - 1",
"def previous_line():\r\n set_point(point().previous_line())",
"def getPrev(crossing):\n return",
"def __get_previous_tags__(self, tags):\n if len(self.tags) == 0:\n return None, None\n if self.index == 1:\n return BEGIN, tags[self.index-1]\n elif self.index == 0:\n return BEGIN, BEGIN\n else:\n return tags[self.index-2], tags[self.index-1]",
"def _getPrevKey(self, item):\n return (1, item)",
"def get_pick_position(self):\n x0 = int(self.GetPickX1())\n x1 = int(self.GetPickX2())\n y0 = int(self.GetPickY1())\n y1 = int(self.GetPickY2())\n return x0, y0, x1, y1",
"def get_previous_position(self) -> Tuple[int, int]:\n return self.__previous_row_position, self.__previous_col_position",
"def __one_both_closed(x, y, c = None, l = None):\n return x - 1, y",
"def p2(self):\n return tuple(self.rect[2:])",
"def previousRange(self):\r\n if (self.selectedmap > 0):\r\n self.pickMap(self.selectedmap-1)",
"def get_player2_mark(p1_mark):\r\n if p1_mark == 2:\r\n return markers[0]\r\n else:\r\n return markers[1]",
"def predecessor_pair(basepair, start, stop):\n\tx , y = basepair\n\tif (x - 1 < start) or (y + 1 > stop):\n\t\treturn (-1,-1)\n\telse:\n\t\treturn ( x - 1 , y + 1 )",
"def previous(self):\n if self.cursor.pref:\n self.cursor = self.cursor.pref\n return self.cursor\n return None",
"def selection_pos(self):\n buff = self._vim.current.buffer\n beg = buff.mark('<')\n end = buff.mark('>')\n return beg, end",
"def next_marks(self):\n if self.n != 0:\n pmax = max(self.marks)\n else:\n pmax = 0\n \n p1 = pmax + 1\n p2 = pmax + 2\n \n return p1, p2",
"def previous_symbol(self):\r\n if self.position == 0:\r\n return None\r\n return self.rule.rightside[self.position-1]",
"def GetOldSelection(self):\r\n\r\n return self.old_selection",
"def reorderReadPair(read1, read2):\n\n if (isCisInteraction(read1, read2) and read1.left_pos > read2.left_pos):\n r1_reorder = read2\n r2_reorder = read1\n else:\n r1_reorder = read1\n r2_reorder = read2\n return r1_reorder, r2_reorder",
"def select(self):\n\n return self.p[0], self.p[1]",
"def get_list_before_paired(line):\n if find_pair(line):\n index = get_index(line)\n return line[ :index + 2]\n return []",
"def getMarked(self):\n if not self.selection.isSelection():\n return u\"\"\n sm1, sm2 = self.selection.order(self.selection.selectionMark,\n self.selection.selectionMark2)\n w1 = sm1[0]\n w2 = sm2[0]\n cx1 = sm1[1]\n cx2 = sm2[1]\n if (w1 == w2):\n return w1.string[cx1:cx2]\n # Get the word fragments at the beginning and end of the selection\n snip1 = w1.string[cx1:]\n snip2 = w2.string[:cx2]\n tl1 = w1.tline\n wx1 = tl1.twords.index(w1)\n tl2 = w2.tline\n wx2 = tl2.twords.index(w2)\n # Start the text string with the format of the first line\n text = tl1.para.getFormat() + snip1\n # then get all intervening words\n if (tl1 == tl2): # only 1 line is involved\n # get words from wx1+1 to wx2-1 (incl.)\n for w in tl1.twords[wx1+1:wx2]:\n text += u\" \" + w.string\n ch = u\" \"\n\n else: # deletion block covers >1 line\n # get words from wx1+1 to end of paragraph\n for w in tl1.twords[wx1+1:]:\n text += u\" \" + w.string\n # get all the intervening lines\n while True:\n para = tl1.para\n tl1 = self.rsubject.nextLine(tl1)\n if (tl1.para == para):\n text += u\" \"\n else:\n text += u\"\\n\" + tl1.para.getFormat()\n if (tl1 == tl2): break\n text += tl1.getText()\n\n ch = u\"\"\n # Add the remaining words in tl2 up to w2-1\n for w in tl2.twords[:wx2]:\n text += ch + w.string\n ch = u\" \"\n\n # Add the fragment of the last marked word\n return text + ch + snip2",
"def getKerningPairsRef(self, font):\n kerningRef = font.kerning.keys()[:]\n\n for k in kerningRef:\n left, right = k\n\n if left in font.groups:\n groupGlyphs = font.groups[left]\n groupGlyphs.sort()\n # get first glyphname in the group\n leftRef = groupGlyphs[0]\n else:\n leftRef = left\n\n if right in font.groups:\n groupGlyphs = font.groups[right]\n groupGlyphs.sort()\n # get first glyphname in the group\n rightRef = groupGlyphs[0]\n else:\n rightRef = right\n\n i = kerningRef.index(k)\n kerningRef[i] = (leftRef, rightRef), (left, right)\n\n kerningRef.sort()\n return kerningRef",
"def get_marker_elements(self):\r\n # Fixme: Currently only arrowheads\r\n return [item[1] for item in self.arrows.values()]",
"def get_prev(self, pos):\n if pos <= 0:\n return None, None\n return self._get_at(pos - 1)",
"def onPick(self, event):\n\n modifiers = QtWidgets.QApplication.keyboardModifiers()\n isShift = modifiers == QtCore.Qt.ShiftModifier\n\n logger.info(f'isShift:{isShift}')\n line = event.artist\n\n # filter out clicks on 'Annotation' used by mplcursors\n try:\n # when Scatter, line is 'PathCollection', a list of (x,y)\n offsets = line.get_offsets()\n except (AttributeError) as e:\n return\n\n ind = event.ind # ind is a list []\n if len(ind)==0:\n return\n ind = ind[0]\n\n # ind is the ith element in (x,y) list of offsets\n # ind 10 (0 based) is index 11 (1 based) in table list\n logger.info(f' selected from plot ind:{ind}, offsets values are {offsets[ind]}')\n selectDict = self.getAnnotation(ind)\n\n # to do, just put copy of state dict ???\n selectDict['plotType'] = self.stateDict['plotType']\n selectDict['dataType'] = self.stateDict['dataType']\n\n selectDict['isShift'] = isShift\n\n #\n # emit\n logger.info(f' -->> signalSelectFromPlot.emit()')\n for _k, _v in selectDict.items():\n logger.info(f' {_k}: {_v}')\n self.signalSelectFromPlot.emit(selectDict)",
"def get_offset(a, b):\r\n\tdiff_y = b[0] - a[0]\r\n\tdiff_x = b[1] - a[1]\r\n\toffset_x = 0\r\n\toffset_y = 0\r\n\r\n\tif diff_y > 0:\r\n\t\toffset_y = 1\r\n\telif diff_y < 0:\r\n\t\toffset_y = -1\r\n\r\n\tif diff_x > 0:\r\n\t\toffset_x = 1\r\n\telif diff_x < 0:\r\n\t\toffset_x = -1\r\n\r\n\treturn (offset_x, offset_y)",
"def getMarkerArrow(point,k,n_s,r,g,b,orientation):\n marker = Marker()\n color = ColorRGBA(*[r,g,b,1])\n marker.header.frame_id = \"map\"\n marker.header.stamp = rospy.get_rostime()\n marker.ns = n_s\n marker.id = k\n marker.type = 0\n marker.action = 0\n marker.pose.position=point\n marker.pose.orientation = orientation\n marker.scale.x = 0.2\n marker.scale.y = 0.02\n marker.scale.z = 0.02\n marker.color = color\n return marker",
"def successor_pair(basepair, start, stop):\n\tx , y = basepair\n\tif (x + 1 > stop) or (y - 1 < start):\n\t\treturn (-1,-1)\n\telse:\n\t\treturn ( x + 1 , y - 1 )"
] |
[
"0.6011843",
"0.5677164",
"0.5634399",
"0.54283017",
"0.5415784",
"0.5395966",
"0.5371319",
"0.53632814",
"0.52929765",
"0.5262781",
"0.5216849",
"0.52008176",
"0.5173189",
"0.510564",
"0.50992477",
"0.50596124",
"0.50539047",
"0.50408655",
"0.50330716",
"0.502399",
"0.49885204",
"0.4965858",
"0.4930599",
"0.49293298",
"0.49100155",
"0.48987344",
"0.48706773",
"0.48600295",
"0.48356956",
"0.48306224"
] |
0.5908329
|
1
|
Create a new instance of the consumer class, passing in the AMQP URL used to connect to RabbitMQ.
|
def __init__(self, amqp_url, *handlers):
self._consumer_tags = []
RabbitMQ.__init__(self, amqp_url)
# save our handlers for ruture use
self._handlers = {}
for handle in handlers:
for k, v in handle.handlers().items():
self._handlers[k] = v
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __init__(self, amqp_url):\n self._connection = None\n self._channel = None\n self._url = amqp_url",
"def __init__(self, bot_id, exchange, callback_func, rabbit_user, rabbit_pw, rabbit_host,\n rabbit_port, consumer_id = 0, internal_error_queue = None, statsd = None):\n\n super(RabbitConsumer, self).__init__()\n\n self.rabbit_port = rabbit_port\n self.rabbit_host = rabbit_host\n self.rabbit_pw = rabbit_pw\n self.rabbit_user = rabbit_user\n self.bot_id = bot_id\n self.exchange = exchange\n self.callback_func = callback_func\n self._closing = False\n self.stopped = False\n self._connection = None\n self._channel = None\n self._closing = False\n self._consumer_tag = None\n\n self.queue_name = self.exchange + \"-\" + self.bot_id\n self.error_queue_name = 'error-' + self.bot_id + \"-\" + self.exchange\n self.consumer_id = consumer_id\n self.internal_error_queue = internal_error_queue\n\n self.statsd = statsd\n\n self.statsd_prefix = self.exchange + \".\"\n\n self.invocations = 0\n self.total_execution_time = 0",
"def __init__(self, consumer):\n self._consumer = consumer",
"def __init__(self, routing_key):\n self.routing_key = routing_key\n self.connection = pika.BlockingConnection(pika.ConnectionParameters(host=rabbitmq_hostname))\n self.channel = self.connection.channel()\n result = self.channel.queue_declare(queue='', exclusive=True, durable=True)\n self.callback_queue = result.method.queue\n\n self.channel.basic_consume(\n queue=self.callback_queue,\n on_message_callback=self.on_response,\n auto_ack=True\n )",
"def __init__(self, username, password, consumer):\n\t\t\n\t\tif not callable(consumer):\n\t\t\traise Exception('Consumer must be callable')\n\t\t\n\t\tself.consumer = consumer\n\n\t\tself.curl = pycurl.Curl()\n\n\t\tself.curl.setopt(pycurl.USERPWD, '%s:%s' % (username, password))\n\t\tself.curl.setopt(pycurl.WRITEFUNCTION, self._receive)\n\n\t\tself.reset()",
"def __init__(self, options):\n self._options = options\n host = self._get_option('rabbit_host', 'str', 'localhost')\n port = self._get_option('rabbit_port', 'int', 5672)\n use_ssl = self._get_option('rabbit_use_ssl', 'bool', False)\n userid = self._get_option('rabbit_userid', 'str', 'guest')\n password = self._get_option('rabbit_password', 'str', 'guest')\n virtual_host = self._get_option('rabbit_virtual_host', 'str', '/')\n\n self.connection = kombu.connection.BrokerConnection(\n hostname=host,\n userid=userid,\n password=password,\n virtual_host=virtual_host,\n ssl=use_ssl)\n\n self.topic = self._get_option('rabbit_notification_topic',\n 'str',\n 'glance_notifications')",
"def __init__(self, url, routing_key, log_file='/dev/null', exchange='yacamc_exchange', exchange_type='direct',\n queue=None, acked=True, sender=False, otq = False, log_level=logging.FATAL):\n\n if queue is None:\n queue = routing_key\n self.exchange = exchange\n self.exchange_type = exchange_type\n self.queue = queue\n self.routing_key = routing_key\n self._url = url\n self.acked = acked\n self.otq = otq\n\n self.cb = None\n\n self._connection = None\n self._channel = None\n self._closing = False\n\n log_format = '%(levelname) -10s %(asctime)s %(name) -30s %(funcName) -35s %(lineno) -5d: %(message)s'\n handler = logging.FileHandler(log_file)\n logging.basicConfig(level=log_level, format=log_format)\n self.logger = logging.getLogger(__name__)\n self.logger.addHandler(handler)\n\n # used only for sending\n self._deliveries = []\n self._acked = 0\n self._nacked = 0\n self._message_number = 0\n self._stopping = False\n self._done_sending = False\n self.message = \"\"\n self.sender = sender\n\n # self.run()\n # self._connection = self.connect()",
"def create_amqp_connection():\n # type: () -> amqp.Connection\n cget = partial(config.CFG.get, \"rabbitmq\")\n return amqp.Connection(\n host=cget(\"host\"),\n userid=cget(\"user\"),\n password=cget(\"password\"),\n virtual_host=cget(\"vhost\"),\n )",
"def __init__(self, address, queue_name):\n self.connection = pika.BlockingConnection(\n pika.ConnectionParameters(address))\n self.queue_name = queue_name\n\n # create the channel\n self.channel = self.connection.channel()\n\n # declare the queue\n self.channel.queue_declare(queue=queue_name, durable=True)\n\n logging.info(\"Message Broker connected to {0}\".format(address))",
"def __init__(self):\n self.connection = pika.BlockingConnection(\n pika.ConnectionParameters(host='localhost'))\n self.channel = self.connection.channel()",
"def client_rabbit(url, username, password):\n client = Client(url, username, password)\n return client",
"def new(\n cls,\n baseplate: Baseplate,\n exchange: kombu.Exchange,\n connection: kombu.Connection,\n queue_name: str,\n routing_keys: Sequence[str],\n handler_fn: Handler,\n error_handler_fn: Optional[ErrorHandler] = None,\n health_check_fn: Optional[HealthcheckCallback] = None,\n serializer: Optional[KombuSerializer] = None,\n worker_kwargs: Optional[Dict[str, Any]] = None,\n retry_mode: RetryMode = RetryMode.REQUEUE,\n retry_limit: Optional[int] = None,\n ) -> \"KombuQueueConsumerFactory\":\n queues = []\n for routing_key in routing_keys:\n queues.append(kombu.Queue(name=queue_name, exchange=exchange, routing_key=routing_key))\n return cls(\n baseplate=baseplate,\n name=queue_name,\n connection=connection,\n queues=queues,\n handler_fn=handler_fn,\n error_handler_fn=error_handler_fn,\n health_check_fn=health_check_fn,\n serializer=serializer,\n worker_kwargs=worker_kwargs,\n retry_mode=retry_mode,\n retry_limit=retry_limit,\n )",
"def consumer(self):\n return Consumer(app=self.app, client=self.client)",
"def _create_connection(self, host, port):\n return pika.BlockingConnection(pika.ConnectionParameters(host=host,\n port=port))",
"def __init__(self, config=None, broker=None):\n pass",
"def __init__(self, pyrps, queue, consumer_id):\n self.pyrps = pyrps\n self.queue = queue\n self.consumer_id = consumer_id",
"def get_consumer(conf_settings, address=Config.INSIGHTS_KAFKA_ADDRESS): # pragma: no cover\n conf = _get_consumer_config(address, conf_settings)\n LOG.info(f\"Consumer config {conf}\")\n return Consumer(conf, logger=LOG)",
"def __init__(self, consumer_group):\n self.consumer_group = consumer_group",
"def create_connection(self):\n if self.conn:\n raise Exception(\"Connection already open\")\n cfg = self.config[self.MODULE_NAME]['amqp']['connection']\n log.debug(str(cfg))\n self.conn = pika.AsyncoreConnection(pika.ConnectionParameters(\n credentials = pika.PlainCredentials(cfg['login'],cfg['password']), \n heartbeat=cfg['heartbeat'],\n virtual_host=cfg['vhost'],\n port=cfg['port'],\n host=cfg['host']))\n self.channel = self.conn.channel()\n\n self._setup_tubes()",
"def create_receiver(self):\n receiver = kafka.KafkaConsumer(bootstrap_servers=['%s:%s' % (self._host, self._port)])\n return receiver",
"def get_client() -> RabbitmqClient:\n # Replace the parameters with proper values for host, port, login and password\n # Change the value of exchange if needed.\n #\n # For any parameter that is not given here, the client tries to use a value from an environment variable\n # and most of the parameters also have a default value that is used if neither the constructor parameter\n # nor the environmental variable exist.\n # See tools/clients.py for details about the environmental variables and the default values.\n return RabbitmqClient(\n host=\"\",\n port=0,\n login=\"\",\n password=\"\",\n exchange=\"procem.examples_testing\",\n ssl=True,\n ssl_version=\"PROTOCOL_TLS\",\n exchange_autodelete=True,\n exchange_durable=False\n )",
"def linkRabbit(self):\n\n print(\"Listening for RabbitMQ messages\")\n\n # RabbitMQ setup\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(host='localhost'))\n channel = connection.channel()\n\n #channel.exchange_declare(exchange='freqSweep', exchange_type='fanout')\n channel.exchange_declare(exchange='pwrSweep', exchange_type='fanout')\n\n result = channel.queue_declare(queue='', exclusive=True)\n queue_name = result.method.queue\n\n # channel.queue_bind(exchange='freqSweep', queue=queue_name)\n channel.queue_bind(exchange='pwrSweep', queue=queue_name)\n channel.basic_consume(queue=queue_name, on_message_callback=self.rabbitCallback, auto_ack=True)\n channel.start_consuming()",
"def connect(self):\n try:\n self.conn = amqp.Connection(insist=True, **self.config)\n self.chan = self.conn.channel()\n self.chan.queue_declare(queue=self.RQU, durable=False, exclusive=False, auto_delete=True)\n self.chan.exchange_declare(exchange=self.EXCH, type=\"topic\", durable=True, auto_delete=False,)\n self.chan.queue_bind(queue=self.RQU, exchange=self.EXCH, routing_key=self.RKEY)\n self.chan.basic_consume(queue=self.RQU, no_ack=True, callback=self.amqpCallback, consumer_tag=\"ctag\")\n except:\n self.closeConn()",
"def __init__(self, broker_address, handle_json_message_data=True):\n self.broker_address = broker_address\n self.producer = Producer({'bootstrap.servers': self.broker_address})\n self.handle_json_message_data = handle_json_message_data",
"def connect(_url, _port, _user, _passwd, _exchange, _queue):\n connection = None\n channel = None\n exchange = None\n try:\n connection = pika.BlockingConnection(pika.ConnectionParameters(\n port=_port,\n host=_url,\n credentials=pika.PlainCredentials(_user, _passwd))\n )\n channel = connection.channel()\n exchange = channel.exchange_declare(\n exchange=_exchange,\n passive=False,\n durable=False,\n exchange_type='fanout'\n )\n\n queue = channel.queue_declare(\n queue=_queue,\n durable=False,\n exclusive=False,\n auto_delete=True\n ).method.queue\n channel.queue_bind(exchange=_exchange, queue=queue, routing_key='')\n\n return connection, channel, exchange, queue\n\n except Exception as e:\n logger.error(e)\n if not (channel is None):\n channel.close()\n channel = None\n if not (connection is None):\n connection.close()\n connection = None\n return None, None",
"def create_consumer(self, topic_id: str, callback: Callable, gcp_subscription_id:str=None):\n backend = None\n if self.vendor == 'kafka':\n backend = KafkaClient(topic_id, self.configs['kafka_servers'])\n Consumer(backend, callback)\n else:\n project_id = os.getenv(\"GOOGLE_CLOUD_PROJECT\")\n subscription_id = gcp_subscription_id\n backend = GooglePubSubClient(project_id=project_id, topic=topic_id,\n subscription_id=subscription_id, gcp_configs=self.configs, callback=callback)\n runner_thread = Thread(target=runner)\n runner_thread.start()",
"def _setup_connection(self, parameters):\n logger.info('Connecting to %s', parameters)\n return pika.BlockingConnection(parameters = parameters)",
"def __init__(__self__,\n resource_name: str,\n args: ConsumerChannelArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def connect(self):\n # @TODO: implement SSL\n connection = pika.BlockingConnection(self.params)\n channel = connection.channel()\n channel.queue_declare(queue=self.queue_name, durable=True)\n\n self.connection = connection\n self.channel = channel",
"def __init__(self, connection, exchange, queue, routing_key,\n sub_worker, thread_support=True, no_declare=True):\n # Reduce logging from amqp module\n setup_logging(loglevel='INFO', loggers=['amqp'])\n logger.debug(f\"Initializating a new listener for exchange/queue: {exchange}/{queue}...\")\n self.connection = connection\n self.exchange = Exchange(exchange, 'direct',\n durable=True, no_declare=no_declare)\n self.queue = Queue(queue, exchange=self.exchange,\n routing_key=routing_key, no_declare=no_declare)\n self.queues = [self.queue]\n self.no_declare = no_declare\n logger.info(f\"New listener initialized for exchange/queue: {exchange}/{queue}...\")\n logger.debug(f\"Importing sub_worker module: {sub_worker}...\")\n self.sub_worker = sub_worker\n self.thread_support = thread_support\n mod_name = '.'.join(self.sub_worker.split(\".\")[:-1])\n try:\n self.sub_worker_mod = importlib.import_module(mod_name)\n except ModuleNotFoundError as e:\n logger.error(f\"Cannot import the sub worker module named {mod_name}: ModuleNotFoundError\")\n sys.exit(-1)\n except Exception as e:\n logger.error(f\"Cannot import the sub worker module named {mod_name}: \" + str(e))\n sys.exit(-1)"
] |
[
"0.7262911",
"0.69813114",
"0.6856555",
"0.6677581",
"0.6617419",
"0.6605369",
"0.6598709",
"0.6594421",
"0.6551421",
"0.6519944",
"0.64506525",
"0.6445172",
"0.6418586",
"0.6371481",
"0.63316727",
"0.6271886",
"0.6116819",
"0.610504",
"0.60880953",
"0.6048735",
"0.6037678",
"0.59702814",
"0.59268737",
"0.59165996",
"0.5897666",
"0.58874",
"0.58759",
"0.5847834",
"0.584047",
"0.5750259"
] |
0.7203772
|
1
|
This method is used to pass encoded body to a handler and return it's value. Errors are handled and returned via "error" field.
|
def _wrap_handler(self, handler, body):
try:
decoded_body = json.loads(body)
result = yield handler(decoded_body)
return result
except Exception as e:
return {"error": str(e)}
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_body_encoded(self):\r\n return self.encode(self.get_body())",
"def exec_handler(self, body = None):\n h = self.MyTestHandler()\n h.request = Request.blank('/test_rpc/')\n h.response = Response()\n h.request.body = body\n h.post()\n return (h.response._Response__status[0], h.response.out.getvalue())",
"def body_value(self, body, **kwargs):\n return body",
"def justhandle(self, rawdata):\r\n\r\n return self.__handler(rawdata)",
"def process(self, payload, status_code=0):",
"def get_body(self):\r\n fp = self._environ['wsgi.input']\r\n return fp.read()",
"def get_body(self):\n fp = self._environ['wsgi.input']\n return fp.read()",
"def post_response(self, body, **kwargs):\n data = json.loads(body)\n if \"errors\" in data:\n self.handle_error(data)",
"def HandlePost(self): # pylint: disable=R0201\n return BaseHandler.CreateError('Not implemented yet!', 501)",
"def handler(self):\n\t\treturn self.handle_request",
"def __call__(self, *args, **kwargs):\n\t\treturn self.handler()(self.request(kwargs))",
"def payload(self):",
"def callback():\n signature = request.headers['X-Line-Signature']\n body = request.get_data(as_text=True)\n logger.info('Request body: %s', body)\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n logger.exception(\n 'Invalid signature. Please check your channel access token/channel secret.')\n abort(400)\n\n return 'OK'",
"def post(self):\n deferred.run(self.request.body)",
"def handle(self, rawdata):\r\n\r\n return self.__filter(self.__handler(rawdata))",
"def body(self):\n return self._body # None if nothing from ApiGateway",
"def handle(self, body):\n event_type = body['event_type']\n method_name = event_type.replace('.', '_')\n try:\n method = getattr(self, method_name)\n method(body)\n except AttributeError:\n LOG.debug('%s needs a method called `%s` to handle %s' %\n (self.__class__.__name__, method_name, event_type))",
"def decode_body(body):\n try:\n return base64.b64decode(body)\n except (TypeError, ValueError, binascii.Error) as e:\n logger.warn(f\"Failed to decode body for message {mid}: {str(e)}\")\n return None",
"def get_payload(self):\n return {'message': 'bar'}",
"def ingest_plain_body(request):\n try:\n content = str(request.body, encoding='utf-8')\n except Exception as e:\n log.error(log.exc(e))\n return None\n return content",
"def __get_post_body(self):\n content_len = int(self.headers.getheader('content-length', 0))\n return self.rfile.read(content_len)",
"def mock_get_object_response(raw_body: str) -> Dict[str, Any]:\n\n encoded_message = raw_body.encode(\"utf-8\")\n raw_stream = StreamingBody(io.BytesIO(encoded_message), len(encoded_message))\n\n return {\"Body\": raw_stream}",
"def lambda_handler(event, context):\n\n operations = {\n 'POST': main,\n }\n\n if event.get('httpMethod', False):\n operation = event['httpMethod']\n else:\n operation = \"not available\"\n\n payload = base64.b64decode(event['body'])\n try:\n payload = json.loads(payload)\n except TypeError:\n pass\n\n if operation in operations:\n return respond(None, operations[operation](payload))\n else:\n return respond(ValueError(f'Unsupported method {operation}'))",
"def get( self ):\n return self.__to_message_function( self.__raw_payload )",
"def post(self, *args, **kwargs):\n return self.handle_post_request()",
"def format_body(self, body: 'BodyValue', action: Callable[['BodyParser'], None]) -> 'SimpleJsonFormatter':",
"def callback(ch, method, properties, body):\n record = json.loads(body.decode()) # decode binary string to dict\n pprint(record)",
"def func_wrapper(event, context):\n req = Request(event, context)\n\n try:\n resp = func(req)\n\n if not isinstance(resp, Response):\n message = (\n 'Invalid return value from handler. '\n 'It should be either Response or Exception'\n )\n raise TypeError(message)\n except ServerlessError as e:\n status_code = e.status_code\n message = e.message if e.message else e.__class__.__name__\n\n resp = to_error_response(message, e.errors, status_code)\n except Exception as e: # pylint: disable=W0703\n logger.exception(e)\n status_code = 500\n message = 'InternalServerError'\n errors = tuple()\n\n resp = to_error_response(message, errors, status_code)\n return resp.to_lambda_output()",
"def result(self): \n return self.body",
"def _dispatch(self, body):\n pass"
] |
[
"0.6089419",
"0.5972001",
"0.5903383",
"0.58262664",
"0.5725833",
"0.56453234",
"0.55684733",
"0.5544198",
"0.54686564",
"0.5420627",
"0.53850895",
"0.5339725",
"0.5323851",
"0.5286423",
"0.52700764",
"0.5258086",
"0.52574533",
"0.5253574",
"0.5234289",
"0.5206247",
"0.5197935",
"0.5180782",
"0.5176291",
"0.51597166",
"0.51435167",
"0.51336706",
"0.51324743",
"0.5126236",
"0.51252276",
"0.5091369"
] |
0.7676827
|
0
|
Get duty score of each category. We don't calculate each post score, we think what a man like can be described on category level.
|
def get_duty_cate_score(chosen_duty_list: list) -> pmag.MagicDict:
res = pmag.MagicDict()
for w, cate in chosen_duty_list:
freq = MODEL[cate]['duty'][w]['freq']
prob = MODEL[cate]['duty'][w]['prob']
score = prob # freq * prob / DUTY_NF[cate]
if cate in res:
res[cate] += score
else:
res[cate] = score
return res
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_category_scores(category: Category):\r\n solutions = Solution.objects.filter(challenge__category=category).select_related(\"user\").select_related(\"challenge\")\r\n d = dict()\r\n\r\n for sol in solutions:\r\n d[sol.user] = d.get(sol.user, 0) + sol.get_score()\r\n \r\n return d",
"def calc_score(user_chosen_dict: dict) -> list:\n res = pmag.MagicDict()\n duty_list = user_chosen_dict['like']\n require_list = user_chosen_dict['cando']\n\n duty_cate_score = get_duty_cate_score(duty_list)\n require_post_score = get_require_post_score(require_list)\n demand_post_score = get_demand_post_score(require_list)\n\n for cate, _posts in require_post_score.items():\n cate_s = duty_cate_score.get(cate, 0)\n for post, post_s in _posts.items():\n demand_s = demand_post_score[cate][post]\n score = cate_s * 0.5 + post_s * 0.3 + demand_s * 0.2\n res[cate+\"-\"+post] = score\n sorted_res = sorted(res.items(), key=lambda x: x[1], reverse=True)\n return sorted_res",
"def calc_priors(categories, data):\n counts = np.zeros(categories)\n for val in range(categories):\n counts[val] = np.count_nonzero(data.labels == val)\n return counts / len(data.labels)",
"def get_pertinence (cats):\n sorted_cats = sorted(cats, key=cats.__getitem__, reverse=True)\n score_to_test = cats[sorted_cats[0]]\n all_values = [cats[key] for key in sorted_cats]\n average = sum(all_values) / len(all_values)\n logged_rest = [log(abs(average - val) + 1) for val in all_values[1:]]\n \n rest_average = sum(logged_rest) / len(logged_rest)\n logged_main = log(abs(average - all_values[0])+1)\n \n importance = max(logged_main - rest_average, 0)\n \n return importance",
"def _predict(self, probabilities):\n child_categories = []\n for i in range(0, self.category_level):\n child_categories.append({})\n for category_label in self.classifiers[i].classes_:\n main_category = self._get_categories(category_label)[0]\n if main_category not in child_categories[i]:\n child_categories[i][main_category] = []\n child_categories[i][main_category].append(category_label)\n\n # find the primary category\n max_score = -1\n primary_category_label = None\n\n for i in range(0, self.category_level):\n for category_label in self.classifiers[i].classes_:\n if probabilities[category_label] < 1e-9:\n continue\n total_score = 0\n main_category = self._get_categories(category_label)[0]\n candidates = child_categories[i][main_category]\n for actual_label in candidates:\n probability = probabilities[actual_label]\n if probability < 1e-9:\n continue\n score = self._cal_score(category_label, None, actual_label, i)\n total_score += score * probability\n if total_score > max_score:\n max_score = total_score\n primary_category_label = category_label\n\n # find the secondary category\n max_score = -1\n secondary_category_label = None\n for i in range(0, self.category_level):\n for category_label in self.classifiers[i].classes_:\n if probabilities[category_label] < 1e-9 and secondary_category_label:\n continue\n if category_label == primary_category_label:\n continue\n total_score = 0\n main_category = self._get_categories(category_label)[0]\n main_category2 = self._get_categories(primary_category_label)[0]\n candidates = list(set(child_categories[i][main_category] + child_categories[i][main_category2]))\n for actual_label in candidates:\n probability = probabilities[actual_label]\n if probability < 1e-9:\n continue\n score = self._cal_score(primary_category_label, category_label, actual_label, i)\n total_score += score * probability\n if total_score > max_score:\n max_score = total_score\n secondary_category_label = category_label\n\n return [self._get_categories(primary_category_label), self._get_categories(secondary_category_label)]",
"def test_get_cat_score(self):\n classes = ['blue skin', 'pointy ears']\n negated_classes = []\n categories = ['ear feature', 'skin feature']\n\n categorical_score = self.annot_scorer._get_categorical_score(\n classes, negated_classes, categories,\n self.negation_weight, self.mock_ic_values\n )\n\n assert categorical_score == 0.7002519289078384",
"def PrintCategoryScore(Cat):\r\n print()\r\n print(\"########## Individual Category Results ##########\")\r\n for i in range(len(Cat)): # prints out the results per category \r\n print()\r\n print(Cat[i])\r\n print(CategoryScore(Cat[i]))\r\n print()\r\n return print(\"----- End of Individuals Category Results -----\")",
"def davies_bouldin_score(self):\r\n print(colored(\"The davies bouldin score of the clustering is %0.002f\\n\" %(davies_bouldin_score(self.X, self.labels)),color = 'red', attrs=['bold']))\r\n print()\r\n print(colored(\"The points in each cluster are : \",color = 'yellow', attrs=['bold']))\r\n print(collections.Counter(self.labels))",
"def _get_category_scores(self, data) -> pd.DataFrame:\n return data[self._get_columns(self.version, self.author)]",
"def prob(self, doc, cat):\n catprob = self.category_count(cat) / self.total_count() # Pr(Category)\n docprob = self.doc_prob(doc, cat) # Pr(Document | Category)\n return docprob*Decimal(str(catprob)) # Pr(Category | Document)",
"def score(self, doc, c):\n # >>> YOUR ANSWER HERE\n # the inner loop in the TEST NAIVE BAYES, sum up the logprior of the class and all words' loglikelihood\n sum = self.logprior[c]\n words = doc.split()\n for w in words:\n if w in self.vocabulary:\n sum += self.loglikelihood[(w, c)]\n return sum\n # >>> END YOUR ANSWER",
"def get_average(values_per_category):\n sum = 0\n num_categories = 0\n for category in values_per_category:\n num_categories += 1\n sum += values_per_category[category]\n return sum / num_categories",
"def calculate_metrics(self):\n \n for cv in self.cat_vals:\n cat_inds = np.where(self.category_values == cv)[0]\n weighted_difference = (self.z[cat_inds]-self.mz[cat_inds])/self.weight_values[cat_inds]\n resid = np.sqrt(np.sum(np.square(weighted_difference))/(cat_inds.size))\n self.metric[str(cv)] = resid\n \n return self.metric",
"def score(pid, cid=None, decay=True):\n return cls.decay(cls.papers())[pid]['score']",
"def get_category_count(self, category):\r\n if category in self.category_count:\r\n return float(self.category_count[category])\r\n else:\r\n return 0.0",
"def _cal_score(self, primary_category_label, secondary_category_label, actual_category_label, max_level):\n primary_categories = self._get_categories(primary_category_label)\n if not secondary_category_label:\n secondary_categories = None\n else:\n secondary_categories = self._get_categories(secondary_category_label)\n actual_categories = self._get_categories(actual_category_label)\n total_score = 0\n ignored_primary = False\n ignored_secondary = False\n for i in range(0, max_level + 1):\n if primary_categories and i < len(primary_categories):\n pv = primary_categories[i]\n else:\n pv = None\n if actual_categories and i < len(actual_categories):\n av = actual_categories[i]\n else:\n av = None\n if secondary_categories and i < len(secondary_categories):\n sv = secondary_categories[i]\n else:\n sv = None\n primary_equal = pv == av\n secondary_equal = (secondary_categories is not None) and sv == av\n if not primary_equal:\n ignored_primary = True\n if not secondary_equal:\n ignored_secondary = True\n\n if not ignored_primary:\n total_score += self.level_scores[i]\n\n if ignored_primary and (not ignored_secondary):\n total_score += 0.5 * self.level_scores[i]\n\n return total_score",
"def returns_by_category(self):\n cate_weights = self.weights_by_category\n cate_returns = {}\n for cate in self.unique_category:\n if cate_weights[cate] == 0:\n cate_returns[cate] = 0\n else:\n cate_returns[cate] = (self.returns[self.category == cate] *\n self.weights[self.category == cate]).sum()/cate_weights[cate]\n return pd.Series(cate_returns, index=self.unique_category)",
"def probability_categorical(feature, label):\n assert feature.nunique()>2, 'feature category nums must be greater than 2.'\n t = pd.DataFrame({'feature':feature, 'label':label})\n cat = label.unique()\n cat = [(cat[i], cat[i+1]) for i in range(len(cat)-1)]\n prob = label.value_counts(1).to_dict()\n slope = [prob.get(i[0], 0)-prob.get(i[1], 0) for i in cat]\n \n slope_dict = t.feature.value_counts(1).to_dict()\n prob = t.groupby([ 'feature']).label.value_counts(1).to_dict()\n slope_dict = {i:{'category_rate':slope_dict[i], 'slope':[prob.get((i,j[0]), 0)-prob.get((i,j[1]), 0) for j in cat]} for i in slope_dict}\n for i in slope_dict:\n slope_dict[i]['slope_diff'] = sum([abs(slope[j]-slope_dict[i]['slope'][j]) for j in range(len(slope))])\n value1 = sorted([[[i], slope_dict[i]['slope_diff'], slope_dict[i]['category_rate']] for i in slope_dict], key=lambda x:x[1], reverse=1)\n distance = sorted([value1[i][1]-value1[i+1][1] for i in range(len(value1)-1)])\n std = pd.Series([i[1] for i in value1]).std()\n coupe = value1\n dis = distance[0]\n for k in distance:\n value = value1\n while 1:\n for i in range(len(value)-1):\n if value[i][1]-k<value[i+1][1]:\n value[i+1][0] = value[i][0]+value[i+1][0]\n value[i+1][1] = value[i][1]*value[i][2]/(value[i][2]+value[i+1][2])+value[i+1][1]*value[i+1][2]/(value[i][2]+value[i+1][2])\n value[i+1][2] = value[i][2]+value[i+1][2]\n value.remove(value[i])\n break\n if i==len(value)-2:\n break\n if pd.Series([i[1] for i in value]).std()>std:\n coupe = value\n std = pd.Series([i[1] for i in value]).std()\n dis = k\n return {'group':{k:i for i,j in enumerate(coupe) for k in j[0]}, 'data':coupe, \n 'distance':dis, 'distance_index':f'{distance.index(dis)+1}/{len(distance)}', 'std':std}",
"def classProbs(observation, tree, classes):\n res = classify(observation, tree) #res = results\n total = sum(res.values())\n probs = []\n for c in classes:\n if c in res.keys():\n probs.append(float(res[c])/total)\n else:\n probs.append(0)\n return probs",
"def getCounter(self):\n word_count, noun_word_count = Counter(), Counter()\n word_rating, noun_word_rating = defaultdict(list), defaultdict(list)\n docs = self.nlp.pipe(\n self.docs, n_process=1, disable=self.disablelayers)\n \n\n for index, doc in enumerate(docs):\n for token in doc:\n if not token.is_stop and not token.is_punct and token.pos_ in self.pos:\n if token.pos_ == 'PROPN':\n word_count[token.lemma_] += 1\n word_rating[token.lemma_].append(self.ratings[index])\n else:\n noun_word_count[token.lemma_] += 1\n noun_word_rating[token.lemma_].append(self.ratings[index])\n\n # if 0<=proper nouns<=5 found, add regular nouns\n if not word_count or len(word_count) <= 5:\n word_count += noun_word_count\n word_rating = {**word_rating, **noun_word_rating}\n \n word_color = {word: self.getColor(\n ratings)[1] for word, ratings in word_rating.items()}\n word_sentiment = {word: self.getColor(\n ratings)[0] for word, ratings in word_rating.items()}\n\n return word_count, word_color, word_sentiment",
"def weights_by_category(self):\n cate_weights = {}\n for cate in self.unique_category:\n cate_weights[cate] = self.weights[self.category == cate].sum()\n return pd.Series(cate_weights, index=self.unique_category)",
"def get_score(self):\n files_flare = self.generate_flare_set()\n files_non_flare = self.generate_non_flare_set()\n timeseries = []\n y = []\n scores = {}\n column_mapping = self.__get_column_mapping()\n for col in tqdm(range(1, 25)):\n for file in tqdm(files_flare):\n s = Sample(\"FL\", file).get_data().iloc[:, col].values\n y.append(self.mapping[file[0]])\n timeseries.append(s)\n\n for file in tqdm(files_non_flare):\n s = Sample(\"NF\", file).get_data().iloc[:, col].values\n y.append(self.mapping[file[0]])\n timeseries.append(s)\n embed = self.get_embed_vector(timeseries)\n\n embed_y = KMeans(n_clusters=5).fit_predict(embed)\n y = np.array(y).flatten()\n scores[column_mapping[col]] = self.relevance_score(embed_y, y)\n timeseries = []\n y = []\n scores_data = pd.DataFrame.from_dict(scores, orient='index', columns=['Relevance Score']).sort_values(\n by='Relevance Score', ascending=False)\n return scores_data",
"def categorize_attributes():\n global attr_categories, seeds\n print \"Generating seeds...\"\n seeds = get_seeds()\n\n print \"Categorizing attributes...\"\n categorized = categorize(seeds)\n \n category_distances = {}\n attr_categories = {}\n for c in categorized:\n for (attr, score) in categorized[c]:\n attr_categories[attr] = c\n category_distances[attr] = score",
"def calc_score(n, prob_vals, cat_cols, list_cols):\n score = 0\n for c in cat_cols:\n if n[c] is not None:\n score += prob_vals[n[c]]\n for c in list_cols:\n for d in n[c]:\n score += prob_vals[d]\n score *= n[\"Nc\"]\n score *= n[\"Ec\"]\n return score",
"def pie_chart_score(self, grouped):\n picked_scenario = self.scenario_dict[\"%d\" % (self.scenario_num-1)]\n distinct_enum_X = self.data_dict[picked_scenario[\"X\"]]['distinct_enum']\n score = 0\n if min(grouped) < 0:\n score = 0\n elif distinct_enum_X == 1:\n score = 0\n elif picked_scenario[\"Agg_func_Y\"] == \"avg\":\n score = 0\n elif distinct_enum_X >= 2 and distinct_enum_X <= 8:\n score += self.calculate_entropy(self.data_dict[picked_scenario[\"Y\"]]) / 8\n elif distinct_enum_X > 8:\n score += 4 * (self.calculate_entropy(self.data_dict[picked_scenario[\"Y\"]])) / distinct_enum_X\n if score > 3:\n score = 3\n return score",
"def DCC(self):\n return self.get_class_average(self.DCC_class_level)",
"def categorical_accuracy(preds, y):\n max_preds = preds.argmax(\n dim=1, keepdim=True\n ) # get the index of the max probability\n correct = max_preds.squeeze(1).eq(y)\n return correct.sum() / torch.FloatTensor([y.shape[0]]).to(device)",
"def analyse_reddit(self):\r\n currency_codes = ['ARS', 'BHD', 'BWP', 'BRL', 'BND', 'BGN', 'CLP', 'CNY', 'COP', 'HRK', 'CZK', 'DKK', 'HKD',\r\n 'ISK', 'IDR', 'IRR', 'ILS', 'KZT', 'KRW', 'KWD', 'LYD', 'MYR', 'MUR', 'MXN', 'NPR', 'NZD',\r\n 'NOK', 'OMR', 'PKR', 'PHP', 'PLN', 'QAR', 'RON', 'RUB', 'SAR', 'ZAR', 'LKR', 'SEK', 'CHF',\r\n 'TWD', 'THB', 'TTD', 'TRY', 'AED', 'VEF', 'AUD', 'CAD', 'EUR', 'HUF', 'INR', 'JPY', 'GBP',\r\n 'USD']\r\n\r\n # Word to determine the post\r\n word = ['all time high', 'positive', 'high', 'back up', 'peak', 'bounding off', 'playing well', 'drop',\r\n 'skyrocket']\r\n counter = 0\r\n total = 0\r\n today = datetime.date.today()\r\n first = today.replace(day=1)\r\n thisMonth = today.strftime(\"%b\")\r\n lastMonth = (first - datetime.timedelta(days=1)).strftime(\"%b\")\r\n # Reddit posts from this month and last month are accounted for analysis\r\n if self.choice in currency_codes:\r\n with open('forex.csv', \"rt\", encoding='utf-8') as f:\r\n reader = csv.DictReader(f, delimiter=\",\")\r\n for row in reader:\r\n if self.choice in row['Currency']:\r\n if thisMonth or lastMonth in row['Date']:\r\n # Count the total number of post scraped\r\n total += 1\r\n for i in word:\r\n # If the post contain the word, increase the counter\r\n if i in row[\"Title\"]:\r\n counter += 1\r\n # Calculate the percentage\r\n if counter != 0:\r\n percentage = (counter / total) * 100\r\n percentage = round(percentage, 2)\r\n return percentage\r\n else:\r\n return 0",
"def get_category_metrics(self, category):\n slug_list = self._category_slugs(category)\n return self.get_metrics(slug_list)",
"def concept_categorization(self):\n dataset = pd.read_csv(\"data/Categorization data set.csv\", sep=\";\", header=None)\n dataset.columns = ['concept','word']\n\n cti = {}\n for i,c in enumerate(np.unique(dataset.concept.values)):\n cti[c] = i\n y_true = dataset.concept.apply(lambda x: cti[x]).values\n vs = []\n preds = [''] * dataset.shape[0]\n for ind,w in enumerate(dataset.word.values):\n try:\n vs.append(self.embeddings_index[w])\n except:\n preds[ind] = 0 \n km = KMeans(n_clusters=22, random_state=0)\n km.fit(np.array(vs).astype(np.float32))\n for ind,w in enumerate(dataset.word.values):\n if preds[ind] == '':\n preds[ind] = km.predict(np.array([self.embeddings_index[w]]))[0]\n contingency_matrix = metrics.cluster.contingency_matrix(y_true, preds)\n #purity score\n return np.sum(np.amax(contingency_matrix, axis=0)) / np.sum(contingency_matrix)"
] |
[
"0.613709",
"0.58618087",
"0.5727891",
"0.56878406",
"0.56152403",
"0.56031907",
"0.5602886",
"0.5589381",
"0.55766207",
"0.5559616",
"0.55558634",
"0.551678",
"0.5496905",
"0.5457578",
"0.5345839",
"0.5217769",
"0.5215651",
"0.5173264",
"0.5075885",
"0.50440013",
"0.5034618",
"0.5033379",
"0.50281197",
"0.5021522",
"0.5018752",
"0.5014211",
"0.5013277",
"0.5008846",
"0.50063634",
"0.5005281"
] |
0.66597116
|
0
|
Get require score of each post under all categories.
|
def get_require_post_score(chosen_require_list: list) -> pmag.MagicDict:
res = pmag.MagicDict()
for w, cate in chosen_require_list:
posts = MODEL[cate]['posts']
for post in [*posts]:
if w in posts[post]['require']:
freq = posts[post]['require'][w]['freq']
prob = posts[post]['require'][w]['prob']
score = prob # freq * prob / REQUIRE_NF[cate][post]
else:
continue
if post in res[cate]:
res[cate][post] += score
else:
res[cate][post] = score
return res
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_category_scores(category: Category):\r\n solutions = Solution.objects.filter(challenge__category=category).select_related(\"user\").select_related(\"challenge\")\r\n d = dict()\r\n\r\n for sol in solutions:\r\n d[sol.user] = d.get(sol.user, 0) + sol.get_score()\r\n \r\n return d",
"def calc_score(user_chosen_dict: dict) -> list:\n res = pmag.MagicDict()\n duty_list = user_chosen_dict['like']\n require_list = user_chosen_dict['cando']\n\n duty_cate_score = get_duty_cate_score(duty_list)\n require_post_score = get_require_post_score(require_list)\n demand_post_score = get_demand_post_score(require_list)\n\n for cate, _posts in require_post_score.items():\n cate_s = duty_cate_score.get(cate, 0)\n for post, post_s in _posts.items():\n demand_s = demand_post_score[cate][post]\n score = cate_s * 0.5 + post_s * 0.3 + demand_s * 0.2\n res[cate+\"-\"+post] = score\n sorted_res = sorted(res.items(), key=lambda x: x[1], reverse=True)\n return sorted_res",
"def get_vote_score(self):\n q = PostVote.objects.filter(post=self).aggregate(Sum('score'))\n return q['score__sum'] if q['score__sum'] else 0",
"def get_demand_post_score(chosen_require_list: list) -> pmag.MagicDict:\n res = pmag.MagicDict()\n for w, cate in chosen_require_list:\n posts = MODEL[cate]['posts']\n for post in [*posts]:\n if w in posts[post]['require']:\n demand = posts[post]['demand']\n score = ((demand['continuous_freq'] + demand['interval_freq'])\n / (2*DEMAND_NF) * demand['publish_freq'])\n else:\n continue\n if post in res[cate]:\n continue\n else:\n res[cate][post] = score\n return res",
"def get_pertinence (cats):\n sorted_cats = sorted(cats, key=cats.__getitem__, reverse=True)\n score_to_test = cats[sorted_cats[0]]\n all_values = [cats[key] for key in sorted_cats]\n average = sum(all_values) / len(all_values)\n logged_rest = [log(abs(average - val) + 1) for val in all_values[1:]]\n \n rest_average = sum(logged_rest) / len(logged_rest)\n logged_main = log(abs(average - all_values[0])+1)\n \n importance = max(logged_main - rest_average, 0)\n \n return importance",
"def calc_priors(categories, data):\n counts = np.zeros(categories)\n for val in range(categories):\n counts[val] = np.count_nonzero(data.labels == val)\n return counts / len(data.labels)",
"def get_demand_post_score_from_require_res(\n require_post_score: dict) -> pmag.MagicDict:\n res = pmag.MagicDict()\n for cate, posts in require_post_score.items():\n for post, _ in posts.items():\n demand = MODEL[cate]['posts'][post]['demand']\n score = ((demand['continuous_freq'] + demand['interval_freq'])\n / (2*DEMAND_NF) * demand['publish_freq'])\n res[cate][post] = score\n return res",
"def getScore(self):\n\t\tself.scoreList = [submissionsss.score for submissionsss in self.subreddit.top(time_filter = 'day', limit = self.limits)]\n\t\treturn self.scoreList",
"def apply_all_evaluation(self, tag_bundle_set):\n categories_list = []\n for num, category in enumerate(self.categories):\n (is_matching_category, op_result_meta_info) = category.apply(tag_bundle_set)\n if is_matching_category:\n categories_list.append(\n (num, category.name, op_result_meta_info) if self.debug else (num, category.name))\n return categories_list if categories_list else [(-1, None, []) if self.debug else (-1, None)]",
"def score(self):\n return self.aggregate(Sum('score')).values()[0] or 0",
"def get_scores(self):\n return self.score",
"def coherence_scores(topic_words):\n\tscores = {}\n\tfor score_type in score_types:\n\t\turl = _palmetto_url.format(score=score_type)\n\t\tr = requests.get(url, {'words': ' '.join(topic_words)})\n\t\tscores[score_type] = float(r.text)\n\n\treturn scores",
"def calc_score_for_lang(self, lang_tree):\n scores = []\n lang_tree.tree.find_all(self.text, lambda index, score: scores.append(score))\n self.scores[lang_tree.lang] = sum(scores)",
"def test_get_cat_score(self):\n classes = ['blue skin', 'pointy ears']\n negated_classes = []\n categories = ['ear feature', 'skin feature']\n\n categorical_score = self.annot_scorer._get_categorical_score(\n classes, negated_classes, categories,\n self.negation_weight, self.mock_ic_values\n )\n\n assert categorical_score == 0.7002519289078384",
"def evaluateObjective(posts, threshold):\n partialSum = 0\n for post in posts:\n partialSum += max(np.sign(post[\"similarity\"] - threshold) * post[\"score\"], 0)\n return partialSum",
"def get_score(self):\n for response in self.response_list:\n self.score += response.get_score",
"def calc_score(n, prob_vals, cat_cols, list_cols):\n score = 0\n for c in cat_cols:\n if n[c] is not None:\n score += prob_vals[n[c]]\n for c in list_cols:\n for d in n[c]:\n score += prob_vals[d]\n score *= n[\"Nc\"]\n score *= n[\"Ec\"]\n return score",
"def score(self, doc, c):\n # >>> YOUR ANSWER HERE\n # the inner loop in the TEST NAIVE BAYES, sum up the logprior of the class and all words' loglikelihood\n sum = self.logprior[c]\n words = doc.split()\n for w in words:\n if w in self.vocabulary:\n sum += self.loglikelihood[(w, c)]\n return sum\n # >>> END YOUR ANSWER",
"def getScore(self):\n return sum(self.field)",
"def scores_(self):\n return self.predictor.scores_",
"def percent_using_relevant_words_by_context_and_question(self):\n total_student_count = self.get_number_of_unique_students()\n\n question_context_count_list = self.students_using_relevant_words_by_context_and_question()\n\n question_context_percent_list = []\n for item in question_context_count_list:\n question_context_percent_list.append((item[0], item[1], item[2] / total_student_count))\n\n return question_context_percent_list",
"def score(self, params):\n\n if self.use_sqrt:\n return self.score_sqrt(params)\n else:\n return self.score_full(params)",
"def calculate_score(self):\n try:\n self.score = self.__total_comment_score / float(self.num_comments)\n except ZeroDivisionError:\n self.score = float(0)",
"def scores(self) -> List[float]:\n if not self.prediction:\n return []\n return [sentence.score for sentence in self.prediction.sentences]",
"def score_partial(self, X):\n score = self.model.decision_function([X])[0]\n\n return score",
"def get_score(self):\n files_flare = self.generate_flare_set()\n files_non_flare = self.generate_non_flare_set()\n timeseries = []\n y = []\n scores = {}\n column_mapping = self.__get_column_mapping()\n for col in tqdm(range(1, 25)):\n for file in tqdm(files_flare):\n s = Sample(\"FL\", file).get_data().iloc[:, col].values\n y.append(self.mapping[file[0]])\n timeseries.append(s)\n\n for file in tqdm(files_non_flare):\n s = Sample(\"NF\", file).get_data().iloc[:, col].values\n y.append(self.mapping[file[0]])\n timeseries.append(s)\n embed = self.get_embed_vector(timeseries)\n\n embed_y = KMeans(n_clusters=5).fit_predict(embed)\n y = np.array(y).flatten()\n scores[column_mapping[col]] = self.relevance_score(embed_y, y)\n timeseries = []\n y = []\n scores_data = pd.DataFrame.from_dict(scores, orient='index', columns=['Relevance Score']).sort_values(\n by='Relevance Score', ascending=False)\n return scores_data",
"def _get_category_scores(self, data) -> pd.DataFrame:\n return data[self._get_columns(self.version, self.author)]",
"def get_score(self):\r\n correct = 0\r\n for key in self.correct_map:\r\n try:\r\n correct += self.correct_map.get_npoints(key)\r\n except Exception:\r\n log.error('key=%s, correct_map = %s', key, self.correct_map)\r\n raise\r\n\r\n if (not self.student_answers) or len(self.student_answers) == 0:\r\n return {'score': 0,\r\n 'total': self.get_max_score()}\r\n else:\r\n return {'score': correct,\r\n 'total': self.get_max_score()}",
"def get_agreeableness(self, cat_scores) -> float:\n return self._get_significance_matrix()[:, 3].dot(cat_scores)",
"def score(self):\n return len([req for req in list(set(self.knowledges)) if req in \n self.key_requirements or req in self.other_requirements])"
] |
[
"0.61079204",
"0.5922314",
"0.58088666",
"0.5803367",
"0.57733124",
"0.56357044",
"0.55619234",
"0.5498224",
"0.54960984",
"0.5487963",
"0.5476413",
"0.5473844",
"0.54599303",
"0.5441705",
"0.54069304",
"0.5355828",
"0.53226984",
"0.53197926",
"0.5309574",
"0.5264326",
"0.5246117",
"0.5244421",
"0.52265143",
"0.5206994",
"0.51924014",
"0.5172833",
"0.5164948",
"0.5150741",
"0.51484984",
"0.51444685"
] |
0.66950214
|
0
|
Another method to get demand score for each post. It is the same as `get_demand_post_score`
|
def get_demand_post_score_from_require_res(
require_post_score: dict) -> pmag.MagicDict:
res = pmag.MagicDict()
for cate, posts in require_post_score.items():
for post, _ in posts.items():
demand = MODEL[cate]['posts'][post]['demand']
score = ((demand['continuous_freq'] + demand['interval_freq'])
/ (2*DEMAND_NF) * demand['publish_freq'])
res[cate][post] = score
return res
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_demand_post_score(chosen_require_list: list) -> pmag.MagicDict:\n res = pmag.MagicDict()\n for w, cate in chosen_require_list:\n posts = MODEL[cate]['posts']\n for post in [*posts]:\n if w in posts[post]['require']:\n demand = posts[post]['demand']\n score = ((demand['continuous_freq'] + demand['interval_freq'])\n / (2*DEMAND_NF) * demand['publish_freq'])\n else:\n continue\n if post in res[cate]:\n continue\n else:\n res[cate][post] = score\n return res",
"def get_require_post_score(chosen_require_list: list) -> pmag.MagicDict:\n res = pmag.MagicDict()\n for w, cate in chosen_require_list:\n posts = MODEL[cate]['posts']\n for post in [*posts]:\n if w in posts[post]['require']:\n freq = posts[post]['require'][w]['freq']\n prob = posts[post]['require'][w]['prob']\n score = prob # freq * prob / REQUIRE_NF[cate][post]\n else:\n continue\n if post in res[cate]:\n res[cate][post] += score\n else:\n res[cate][post] = score\n return res",
"def calc_score(user_chosen_dict: dict) -> list:\n res = pmag.MagicDict()\n duty_list = user_chosen_dict['like']\n require_list = user_chosen_dict['cando']\n\n duty_cate_score = get_duty_cate_score(duty_list)\n require_post_score = get_require_post_score(require_list)\n demand_post_score = get_demand_post_score(require_list)\n\n for cate, _posts in require_post_score.items():\n cate_s = duty_cate_score.get(cate, 0)\n for post, post_s in _posts.items():\n demand_s = demand_post_score[cate][post]\n score = cate_s * 0.5 + post_s * 0.3 + demand_s * 0.2\n res[cate+\"-\"+post] = score\n sorted_res = sorted(res.items(), key=lambda x: x[1], reverse=True)\n return sorted_res",
"def evaluateObjective(posts, threshold):\n partialSum = 0\n for post in posts:\n partialSum += max(np.sign(post[\"similarity\"] - threshold) * post[\"score\"], 0)\n return partialSum",
"def get_vote_score(self):\n q = PostVote.objects.filter(post=self).aggregate(Sum('score'))\n return q['score__sum'] if q['score__sum'] else 0",
"def get_scores(self):\n return self.score",
"def getScore(self):\n\t\tself.scoreList = [submissionsss.score for submissionsss in self.subreddit.top(time_filter = 'day', limit = self.limits)]\n\t\treturn self.scoreList",
"def get_score(self, student_answers):\r\n pass",
"def postage_needed(self):\n return self.weight * self.postage_coefficient",
"def __avg_user_interested_post_weight(self, post_objects):\n total_posts = post_objects.count()\n post_weight = 0\n for each in post_objects:\n post_weight += each.weight\n average_post_weight = post_weight // total_posts\n return average_post_weight",
"def getSubmissionScore(submission):\r\n return submission.score",
"def score(self):\n return self.client.call('GET', self.name + 'score')",
"def get_score(self):\n return self.score",
"def get_target_per_score(self):\n pass",
"def do_score(self, hosts, vm, args):\n try:\n hostScores = []\n # use hosts IDs and VM ID to call the Rest API and make a decision\n for hostID in hosts:\n # Do work\n hostScores.append((hostID, 50))\n print(hostScores)\n except Exception as ex:\n print(ex, file=sys.stderr)",
"def getScore(data):\n return score",
"def get_score(self):\n return self.score",
"def get_score(self):\n return self.score",
"def get_score(self):\n return self.score",
"def compute(cls, real_data, synthetic_data, metadata):\n score_breakdowns = cls.compute_breakdown(real_data, synthetic_data, metadata)\n if 'score' in score_breakdowns:\n return score_breakdowns['score']\n\n all_scores = [breakdown['score'] for _, breakdown in score_breakdowns.items()]\n\n return sum(all_scores) / len(all_scores)",
"def get_score(self):\n return self.__score",
"def getScore(self):\n return sum(self.field)",
"def score(self):",
"def get_demand(self):\n return self.df_demand",
"def get_score(self):\n for response in self.response_list:\n self.score += response.get_score",
"def get_score(self):\r\n return None",
"def calculate_risk_tol(*args):\n global total_score\n risk_tol_start = 0.0\n\n for risk_per_pg in risk_tol_per_qs.iterkeys():\n try:\n risk_tol_start = risk_tol_start + risk_tol_per_qs[risk_per_pg][-1] # this is the last item in the list of each information in the page\n except IndexError:\n pass\n total_score = risk_tol_start",
"def get_score(self):\n return self._score",
"def get_score(self):\n return self._score",
"def get_score(self):\n return self._score"
] |
[
"0.71937186",
"0.62748903",
"0.5551882",
"0.5537667",
"0.55115795",
"0.54713887",
"0.5433023",
"0.53453386",
"0.53175926",
"0.53070366",
"0.5296628",
"0.5283368",
"0.5276864",
"0.5266636",
"0.5229753",
"0.52116245",
"0.5197169",
"0.5197169",
"0.5197169",
"0.5185938",
"0.51754034",
"0.5163988",
"0.51428866",
"0.51364225",
"0.51179504",
"0.5115931",
"0.5104889",
"0.51038355",
"0.51038355",
"0.51038355"
] |
0.72410864
|
0
|
Returns true if num1 and num2 are within a sliver of a floating point m gives the multplier on the floating point precision >>> np.exp(np.log(.1)) == .1 False >>> approx(np.exp(np.log(.1)), .1) True
|
def approx(num1, num2, m=4, precision=None):
if precision:
return abs(num1-num2) <= 10**precision
else:
return abs(num1-num2) <= m * np.spacing(1)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def isGE(self, a : float, b : float) -> bool:\n return (a >= b - self.tol * max(abs(a), abs(b), 1.0)) #and (a >= b - 0.1)",
"def approx_eq(a, b):\n return abs(a-b) < approx_eq.eps",
"def fp_eq(x: float, y: float) -> bool:\n return fabs(x-y) < 10**-12",
"def feq(x, y, precision=0.0000005):\n x = np.asanyarray(x)\n y = np.asanyarray(y)\n boolean = abs(x-y) <= (abs(x+y)*precision)\n return boolean",
"def is_close(num1, num2, prec=dec.Decimal('1E-9')):\r\n if not isinstance(num1, dec.Decimal):\r\n num1 = dec.Decimal(num1)\r\n if not isinstance(num2, dec.Decimal):\r\n num2 = dec.Decimal(num2)\r\n err = abs(num1 - num2)\r\n if num1 == 0:\r\n if num2 == 0:\r\n return True\r\n return err < prec\r\n if num2 == 0:\r\n return err < prec\r\n return 2 * err / (num1 + num2) < prec",
"def almost_eq(e1,e2) :\n\treturn round(e1-e2,4) == 0.0",
"def _same(p1,p2,prec=0.0001):\n #return num.all(num.equal(p1,p2))\n t1 = num.fabs(p1[0]-p2[0]) < prec\n t2 = num.fabs(p1[1]-p2[1]) < prec\n if t1 and t2:\n #print \"same\", p1,p2\n return True",
"def arecloseenough(x1, x2):\n\n if abs(x1 - x2) <= VERYSMALL:\n return True\n \n return False",
"def approx_eq(x, y, tolerance = 0.000001):\n\treturn abs(x - y) < tolerance",
"def is_equal_approx(x, y, epsilon=1e-6):\r\n # Check absolute precision.\r\n if -epsilon <= x - y <= epsilon:\r\n return True\r\n\r\n # Is x or y too close to zero?\r\n if -epsilon <= x <= epsilon or -epsilon <= y <= epsilon:\r\n return False\r\n\r\n # Check relative precision.\r\n return (-epsilon <= (x - y) / x <= epsilon\r\n or -epsilon <= (x - y) / y <= epsilon)",
"def all_approx(l1, l2, m=4):\n\tsame = (len(l1) == len(l2))\n\ti = 0\n\twhile same and i < len(l1):\n\t\tsame = approx(l1[i], l2[i], m)\n\t\ti += 1\n\treturn same",
"def fequal(pos_act, pos_exp, eps=1e-5):\n return abs(pos_act - pos_exp) < eps",
"def approx_eq(x, y, tolerance=1e-15):\n return abs(x - y) < tolerance",
"def test_approximation(self):\n ell = np.log(self.data.var() ** 0.5)\n mu = self.data.mean()\n eps = 0.00001\n assert laplace_approx(mu, ell, self.data) > laplace_approx(\n mu + eps, ell + eps, self.data\n )\n assert laplace_approx(mu, ell, self.data) > laplace_approx(\n mu - eps, ell - eps, self.data\n )\n assert true_log_posterior(mu, ell, self.data) > true_log_posterior(\n mu + eps, ell + eps, self.data\n )\n assert true_log_posterior(mu, ell, self.data) > true_log_posterior(\n mu - eps, ell - eps, self.data\n )",
"def __ge__(self, other):\n extremes = self._numerator * other.denominator()\n means = other.numerator() * self._denominator\n return extremes >= means",
"def is_on_line(p0, p1, p2, threshold = 0.01):\n p0, p1, p2 = map(lambda tup : np.array(tup[:2]), [p0, p1, p2])\n p1 -= p0\n p2 -= p0\n return abs((p1[0] / p1[1]) - (p2[0] / p2[1])) < threshold",
"def floats_are_equal(a, b, tol=1e-5):\r\n return abs(a - b) <= tol * (abs(a) + abs(b))",
"def __gt__(self,f2):\n return self.__num * f2.den > self.__den * f2.num",
"def is_bound(pos1, el1, pos2, el2):\n threshold = 0.1\n if el1 == 'H' or el2 == 'H':\n threshold = 0.2\n if np.linalg.norm(np.array(pos1) - np.array(pos2)) < covalence_radius[el1] + covalence_radius[el2] + threshold:\n return True\n return False",
"def is_float_close(a, b, rel_tol=1e-06, abs_tol=0.0):\n return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)",
"def are_close(coord1, coord2, tolerance=10):\n return vincenty(coord1, coord2).meters < tolerance",
"def almosteq(a, b, rel_eps=1e-6, abs_eps=1e-8):\n if type(a) in float_int and type(b) in float_int:\n return math.isclose(a, b, rel_tol=rel_eps, abs_tol=abs_eps)\n else:\n return np.isclose(a, b, rtol=rel_eps, atol=abs_eps)",
"def nearly_equal(a, b, sig_fig=5):\n return a == b or int(a*10**sig_fig) == int(b*10**sig_fig)",
"def isWithinGT(self, a, b):\n return ((a[0]-b[0])**2 + (a[1]-b[1])**2)**0.5 < self.thresh",
"def IsApproximatelyEqual(x, y, epsilon = 1e-6):\n # Check absolute precision.\n if -epsilon <= x - y <= epsilon:\n return True\n\n # Is x or y too close to zero?\n if -epsilon <= x <= epsilon or -epsilon <= y <= epsilon:\n return False\n\n # Check relative precision.\n return (-epsilon <= (x - y) / x <= epsilon or -epsilon <= (x - y) / y <= epsilon)",
"def almost_equal_values(x, y, precision):\n return round(x - y, precision) == 0",
"def is_close(x, y, thresh=1e-8):\n\n diff = x - y\n return diff > (-thresh) and diff < thresh",
"def are_close(num1: float, num2: float, error: float) -> bool:\n\n if abs(num1-num2) < error:\n return True\n return False",
"def fp_gt(x: float, y: float) -> bool:\n return not fp_eq(x, y) and x > y",
"def checarPs(self,p1,p2):\n return abs(p1-p2) < 0.00001"
] |
[
"0.6546254",
"0.63741624",
"0.628203",
"0.61943084",
"0.6191803",
"0.6149317",
"0.60316664",
"0.60307664",
"0.5971287",
"0.59679186",
"0.5940457",
"0.59363294",
"0.5883913",
"0.5864648",
"0.58641005",
"0.58613974",
"0.5849694",
"0.5804423",
"0.5786198",
"0.57757586",
"0.5751323",
"0.5695931",
"0.5663681",
"0.56611323",
"0.5656245",
"0.56540644",
"0.56345415",
"0.5632101",
"0.56270367",
"0.5603849"
] |
0.7322738
|
0
|
Returns true if every element of l1 and l2 are approximately equivalent >>> l1 = [.1, .2, .3] >>> l2 = [np.exp(np.log(l)) for l in l1] >>> all_approx(l1, l2) True >>> l1[0]==l2[0] False >>> l2 = l2+[.4] >>> all_approx(l1, l2) False >>> all_approx(l2, l1) False >>> l1 = l1+[.4] >>> all_approx(l1, l2) True
|
def all_approx(l1, l2, m=4):
same = (len(l1) == len(l2))
i = 0
while same and i < len(l1):
same = approx(l1[i], l2[i], m)
i += 1
return same
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_equal_approx(x, y, epsilon=1e-6):\r\n # Check absolute precision.\r\n if -epsilon <= x - y <= epsilon:\r\n return True\r\n\r\n # Is x or y too close to zero?\r\n if -epsilon <= x <= epsilon or -epsilon <= y <= epsilon:\r\n return False\r\n\r\n # Check relative precision.\r\n return (-epsilon <= (x - y) / x <= epsilon\r\n or -epsilon <= (x - y) / y <= epsilon)",
"def approx_eq(a, b):\n return abs(a-b) < approx_eq.eps",
"def all_equal(x, y, eps=None):\n if eps:\n return all([abs(i - j) <= eps\n for i, j in zip(x, y)\n if i is not None and j is not None])\n return all([i == j for i, j in zip(x, y)])",
"def values_eq_approx(a, b):\r\n assert a.ndim == 4\r\n atol = None\r\n if a.shape[-1] * a.shape[-2] > 100:\r\n #For float32 the default atol is 1e-5\r\n atol = 3e-5\r\n return CudaNdarrayType.values_eq_approx(a, b, atol=atol)",
"def test_approximation(self):\n ell = np.log(self.data.var() ** 0.5)\n mu = self.data.mean()\n eps = 0.00001\n assert laplace_approx(mu, ell, self.data) > laplace_approx(\n mu + eps, ell + eps, self.data\n )\n assert laplace_approx(mu, ell, self.data) > laplace_approx(\n mu - eps, ell - eps, self.data\n )\n assert true_log_posterior(mu, ell, self.data) > true_log_posterior(\n mu + eps, ell + eps, self.data\n )\n assert true_log_posterior(mu, ell, self.data) > true_log_posterior(\n mu - eps, ell - eps, self.data\n )",
"def approx_equal(x, y, *args, **kwargs):\n if not (type(x) is type(y) is float):\n # Skip checking for __approx_equal__ in the common case of two floats.\n methodname = '__approx_equal__'\n # Allow the objects to specify what they consider \"approximately equal\",\n # giving precedence to x. If either object has the appropriate method, we\n # pass on any optional arguments untouched.\n for a,b in ((x, y), (y, x)):\n try:\n method = getattr(a, methodname)\n except AttributeError:\n continue\n else:\n result = method(b, *args, **kwargs)\n if result is NotImplemented:\n continue\n return bool(result)\n # If we get here without returning, then neither x nor y knows how to do an\n # approximate equal comparison (or are both floats). Fall back to a numeric\n # comparison.\n return _float_approx_equal(x, y, *args, **kwargs)",
"def almostEqualList(self, l1:List[float], l2:List[float], margin:float):\r\n ret = False\r\n for i in range(0,len(l1)):\r\n diff = abs(l1[i] - l2[i])\r\n if diff < margin:\r\n ret = True\r\n else:\r\n return False\r\n return ret",
"def almost_equal_values(x, y, precision):\n return round(x - y, precision) == 0",
"def _same(p1,p2,prec=0.0001):\n #return num.all(num.equal(p1,p2))\n t1 = num.fabs(p1[0]-p2[0]) < prec\n t2 = num.fabs(p1[1]-p2[1]) < prec\n if t1 and t2:\n #print \"same\", p1,p2\n return True",
"def values_eq_approx(a, b):\r\n assert a.ndim == 4\r\n atol = None\r\n if a.shape[-1] * a.shape[-2] > 100:\r\n #For float32 the default atol is 1e-5\r\n atol = 3e-5\r\n return GpuArrayType.values_eq_approx(a, b, atol=atol)",
"def test_likelihoods_equal_priors(self):\r\n equal = [0.25, 0.25, 0.25, 0.25]\r\n unequal = [0.5, 0.25, 0.125, 0.125]\r\n equal_answer = [1, 1, 1, 1]\r\n unequal_answer = [2, 1, 0.5, 0.5]\r\n for obs, exp in zip(likelihoods(equal, equal), equal_answer):\r\n self.assertFloatEqual(obs, exp)\r\n\r\n for obs, exp in zip(likelihoods(unequal, equal), unequal_answer):\r\n self.assertFloatEqual(obs, exp)",
"def IsApproximatelyEqual(x, y, epsilon = 1e-6):\n # Check absolute precision.\n if -epsilon <= x - y <= epsilon:\n return True\n\n # Is x or y too close to zero?\n if -epsilon <= x <= epsilon or -epsilon <= y <= epsilon:\n return False\n\n # Check relative precision.\n return (-epsilon <= (x - y) / x <= epsilon or -epsilon <= (x - y) / y <= epsilon)",
"def check_sequences_close(\n first: Sequence[Sequence[float]],\n second: Sequence[Sequence[float]],\n) -> None:\n assert len(first) == len(second)\n for f, s in zip(first, second):\n assert f == pytest.approx(s)",
"def almost_equal(self, other, precision):\n if isinstance(other, self.__class__):\n # Check that all elements in both arrays are almost\n # equal within given precision\n almost_equal = all(map(lambda x, y:\n self.almost_equal_values(x, y, precision),\n self.items, other.items))\n return (self.index == other.index) and \\\n (self.inUse == other.inUse) and \\\n (self.type == other.type) and \\\n (self.previousBlock == other.previousBlock) and \\\n (self.amount == other.amount) and \\\n (self.nextBlock == other.nextBlock) and almost_equal\n else:\n return False",
"def all_equal(list_a, list_b):\n if len(list_a) != len(list_b):\n return False\n a, b = np.array(list_a), np.array(list_b)\n return all(a == b)",
"def approx_eq(x, y, tolerance = 0.000001):\n\treturn abs(x - y) < tolerance",
"def test_likelihoods_equal_evidence(self):\r\n equal = [0.25, 0.25, 0.25, 0.25]\r\n unequal = [0.5, 0.25, 0.125, 0.125]\r\n equal_answer = [1, 1, 1, 1]\r\n unequal_answer = [2, 1, 0.5, 0.5]\r\n not_unity = [0.7, 0.7, 0.7, 0.7]\r\n\r\n for obs, exp in zip(likelihoods(equal, unequal), equal_answer):\r\n self.assertFloatEqual(obs, exp)\r\n\r\n # should be the same if evidences don't sum to 1\r\n for obs, exp in zip(likelihoods(not_unity, unequal), equal_answer):\r\n self.assertFloatEqual(obs, exp)",
"def almost_eq(e1,e2) :\n\treturn round(e1-e2,4) == 0.0",
"def approx(num1, num2, m=4, precision=None):\n\tif precision:\n\t\treturn abs(num1-num2) <= 10**precision\n\telse:\n\t\treturn abs(num1-num2) <= m * np.spacing(1)",
"def _compare_vector(arr1, arr2):\n\n length = len(arr1)\n if len(arr2) != length:\n return False\n\n for i in range(length):\n element_1 = float(arr1[i])\n element_2 = float(arr2[i])\n\n\n diff = abs(abs(element_1) - abs(element_2))\n if diff != 0.0:\n rel = diff / min(abs(element_1), abs(element_2))\n \n # For a basis set, a relatively coarse comparison\n # should be acceptible\n if rel > 1.0e-10:\n return False\n\n return True",
"def almost_equals(self, other, decimal=...): # -> bool:\n ...",
"def _allclose(x, y, rtol=1e-7, atol=1e-14):\n for a, b in zip(x, y):\n if np.abs(a - b) > (atol + rtol * np.abs(b)):\n return False\n return True",
"def equal_angles(a1, a2, angular_accuracy=1e-12): # pragma: no cover\n return np.abs(np.fmod(a1 - a2, two_pi)) < angular_accuracy",
"def IsApproximatelyEqual(x, y, epsilon):\n # Check absolute precision.\n if -epsilon <= x - y <= epsilon:\n return True\n\n # Is x or y too close to zero?0.\n if -epsilon <= x <= epsilon or -epsilon <= y <= epsilon:\n return False\n\n # Check relative precision.\n return (-epsilon <= (x - y) / x <= epsilon\n or -epsilon <= (x - y) / y <= epsilon)",
"def compare_addresses(s1_1, s1_2, s2_1, s2_2):\n\n return ((s1_1 == s2_1) | (s1_2 == s2_2) | (s1_1 == s2_2) | (s1_2 == s2_1)).astype(float)",
"def has_converged(x, y, epsilon=EPSILON):\n for a, b in itertools.izip(x, y):\n if abs(a - b) > epsilon:\n return False\n return True",
"def has_converged(x, y, epsilon=EPSILON):\n for a, b in itertools.izip(x, y):\n if abs(a - b) > epsilon:\n return False\n return True",
"def almosteq(a, b, rel_eps=1e-6, abs_eps=1e-8):\n if type(a) in float_int and type(b) in float_int:\n return math.isclose(a, b, rel_tol=rel_eps, abs_tol=abs_eps)\n else:\n return np.isclose(a, b, rtol=rel_eps, atol=abs_eps)",
"def approx_eq(x, y, tolerance=1e-15):\n return abs(x - y) < tolerance",
"def AreListsEqualToWithinEpsilon(List0, List1, epsilon=1e-06):\n \n import numpy as np\n \n # Get the maximum value of their absolute differences:\n AbsDiffs = abs(np.array(List0) - np.array(List1))\n MaxAbsDiff = max(AbsDiffs)\n \n if MaxAbsDiff < epsilon:\n IsEqual = True\n else:\n IsEqual = False\n \n return IsEqual"
] |
[
"0.63644165",
"0.6298559",
"0.6149365",
"0.60207367",
"0.6004111",
"0.5972103",
"0.5880449",
"0.5873127",
"0.5866577",
"0.5848705",
"0.58028466",
"0.5766068",
"0.5696672",
"0.5637069",
"0.5629465",
"0.56260514",
"0.56233066",
"0.5610046",
"0.5607236",
"0.5603783",
"0.5598328",
"0.5569508",
"0.55582625",
"0.5554236",
"0.5546971",
"0.55288374",
"0.55288374",
"0.5527986",
"0.5523805",
"0.55171764"
] |
0.79060626
|
0
|
Returns the index of the bin that feature belongs in. bin_bounds contains the upper bound on each bin's value, sorted.
|
def get_bin_index(signal, bins):
# could do this binary search-like at least
index = 0
for v in bin_bounds:
if signal > v:
return index
index += 1
return index - 1
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def bin_index(self, xnorm):\n return _bin_index(xnorm, self.nbins, self.padding)",
"def getBinIndex(self, x):\n\t\tb = -1\n\t\tif x == self._max_val: # final bin is [low, high], where others are [low,high)\n\t\t\tb = len(self._bins)-1\n\t\telse:\n\t\t\tb = math.floor((x-self._min_val)/self._bin_width)\n\t\treturn int(b)",
"def compute_bin_indices(X_part, bin_limits=None, n_bins=20):\n if bin_limits is None:\n bin_limits = []\n for variable_data in range(X_part.shape[1]):\n bin_limits.append(numpy.linspace(numpy.min(variable_data), numpy.max(variable_data), n_bins + 1)[1: -1])\n\n bin_indices = numpy.zeros(len(X_part), dtype=numpy.int)\n for axis, bin_limits_axis in enumerate(bin_limits):\n bin_indices *= (len(bin_limits_axis) + 1)\n bin_indices += numpy.searchsorted(bin_limits_axis, X_part[:, axis])\n\n return bin_indices",
"def get_bin_index(self, filter_bin):\n\n left_index = self.left_filter.get_bin_index(filter_bin[0])\n right_index = self.right_filter.get_bin_index(filter_bin[0])\n filter_index = left_index * self.right_filter.num_bins + right_index\n return filter_index",
"def get_bin_index(self, filter_bin):\n\n left_index = self.left_filter.get_bin_index(filter_bin[0])\n right_index = self.right_filter.get_bin_index(filter_bin[0])\n filter_index = left_index * self.right_filter.num_bins + right_index\n return filter_index",
"def get_bin(bins, val):\n\n for i in xrange(len(bins)):\n if val < bins[i]:\n return i-1\n \n return i-1",
"def affect(self, bin_boundaries, element):\n\n # bin_boundaries\n assert type(bin_boundaries) is np.ndarray\n\n # element\n assert isinstance(element, (int, float, np.number)), \\\n \"element = {} should be of a numeric type, not {}.\".format(element, type(element))\n assert bin_boundaries[0] <= element <= bin_boundaries[-1]\n\n # For all bins, in increasing order\n for m in range(1, len(bin_boundaries)):\n\n # If the element is too small to get into the mth bin\n if element < bin_boundaries[m]:\n # Returning the index of the previous one\n return m - 1\n\n # Boundary case : element belongs to the last bin.\n return len(bin_boundaries) - 2",
"def get_bin_index(self, filter_bin):\n\n if filter_bin not in self.bins:\n msg = 'Unable to get the bin index for AggregateFilter since ' \\\n '\"{}\" is not one of the bins'.format(filter_bin)\n raise ValueError(msg)\n else:\n return self.bins.index(filter_bin)",
"def get_bucket_boundaries(feature):\n return np.unique(np.percentile(feature, range(0, 100))).tolist()",
"def fft_bin_to_index(self, bins):\n idx = bins.copy()\n return idx",
"def II_eq_counts(tobin_series, num_bins):\n num_pbin = int(len(tobin_series) / num_bins)\n obs_list = tobin_series.sort_values().tolist()\n upper_bounds = [obs_list[(i + 1) * num_pbin] for i in range(num_bins)]\n lower_bounds = [0]\n lower_bounds += upper_bounds[:-1]\n return pd.IntervalIndex.from_arrays(lower_bounds, upper_bounds), upper_bounds",
"def _bin(self, X):\n H = np.linspace(0, 1, self.Nbin)\n return np.maximum(1 - (abs(X[..., None] - H)) / (H[1] - H[0]) , 0)",
"def get_bounds(f, lb=0, ub=None):\r\n lb_idx = np.searchsorted(f, lb, 'left')\r\n if ub == None:\r\n ub_idx = len(f)\r\n else:\r\n ub_idx = np.searchsorted(f, ub, 'right')\r\n\r\n return lb_idx, ub_idx",
"def bins(self):\n\n if self.hist_x_min is None or self.hist_x_max is None or self.hist_n_bin is None:\n return None\n\n if self.x_log:\n return np.logspace(np.log10(self.hist_x_min),\n np.log10(self.hist_x_max),\n self.hist_n_bin + 1)\n elif isinstance(self.hist_x_min, np.datetime64):\n x_min = self.hist_x_min.astype(int)\n x_max = self.hist_x_max.astype(self.hist_x_min.dtype).astype(int)\n return np.linspace(x_min, x_max, self.hist_n_bin + 1).astype(self.hist_x_min.dtype)\n else:\n return np.linspace(self.hist_x_min, self.hist_x_max,\n self.hist_n_bin + 1)",
"def bins(self):\n\n if self.hist_x_min is None or self.hist_x_max is None or self.hist_n_bin is None:\n return None\n\n if self.x_log:\n return np.logspace(np.log10(self.hist_x_min),\n np.log10(self.hist_x_max),\n self.hist_n_bin + 1)\n else:\n return np.linspace(self.hist_x_min, self.hist_x_max,\n self.hist_n_bin + 1)",
"def binning_to_index(binning: BinningBase, name: Optional[str] = None) -> pandas.IntervalIndex:\n # TODO: Check closedness\n return pandas.IntervalIndex.from_arrays(\n left=binning.bins[:, 0], right=binning.bins[:, 1], closed=\"left\", name=name\n )",
"def bins (self):\n return self._bins",
"def bins (self):\n return self._bins",
"def find_bin(self, x):\n return (x - self.bin_edges[0]) // self.bin_width",
"def bins(self):\n return self._bins",
"def value_to_class_index(bin_arr, val_arr):\n# return pd.cut(val_arr,bin_arr,labels=False)\n return np.digitize(val_arr,bin_arr,right=True)-1",
"def value_to_class_index(bin_arr, val_arr):\n# return pd.cut(val_arr,bin_arr,labels=False)\n return np.digitize(val_arr,bin_arr,right=True)-1",
"def bounds(self):\n return self._bboxes[0][0] #TODO: merge all coverages",
"def bin_by(bin_quantity, bins):\n\n hist_item, bin_edges = np.histogram(bin_quantity[~np.isnan(bin_quantity)], bins=bins)\n \n bin_indx = np.zeros(bin_quantity.shape, np.float_)\n\n # Step through bins\n for i in range(len(bin_edges) - 1):\n \n # Index each bin by bin quantify value\n bin_indx[np.where((bin_quantity > bin_edges[i]) & (bin_quantity < bin_edges[i + 1]))] = i + 1\n \n return bin_indx, bin_edges",
"def createBinsByGiniIndex(self, data, structure, colIndex, numOfBins):\n splits = self.miningCalculator.getListWithBestValueSplitsOfDataByGini(data, structure, colIndex, numOfBins - 1)\n splits.sort()\n bins = {\"value<=\" + str(splits[0]): lambda x: x <= splits[0]}\n if len(splits) > 1:\n for i in range(1, numOfBins - 1):\n bins[str(splits[i - 1]) + '<value<=' + str(splits[i])] = (lambda x: splits[i - 1] < x <= splits[i])\n bins[\"value>\" + str(splits[len(splits) - 1])] = (lambda x: x > splits[len(splits) - 1])\n return bins",
"def _get_i_bnds(self, wave_bounds=None):\n\n wave_grid = self.wave_grid\n i_bounds = self.i_bounds\n\n # Check if wave_bounds given\n if wave_bounds is None:\n wave_bounds = []\n for i in range(self.n_orders):\n wave = self.wave_map[i][~self.mask_ord[i]]\n wave_bounds.append([wave.min(), wave.max()])\n\n # What we need is the boundary position\n # on the wavelength grid.\n i_bnds_new = []\n for bounds, i_bnds in zip(wave_bounds, i_bounds):\n\n a = np.min(np.where(wave_grid >= bounds[0])[0])\n b = np.max(np.where(wave_grid <= bounds[1])[0]) + 1\n\n # Take the most restrictive bound\n a = np.maximum(a, i_bnds[0])\n b = np.minimum(b, i_bnds[1])\n\n # Keep value\n i_bnds_new.append([a, b])\n\n return i_bnds_new",
"def arraybin(array, bins):\n bin_it = lambda value: (i for i in range(len(array)) if array[i] >= value)\n splits = [next(bin_it(value), len(array)) for value in bins]\n return [list(range(start_idx, stop_idx)) for (start_idx, stop_idx)\n in zip([0] + splits, splits + [len(array)])]",
"def getBinIndices(self, linear_index):\n return linear_index / self.magic_array % self.nbins_across_dims",
"def findInterval(intervals,interval):\n low,ind = algorithms.binsearch(intervals,interval.start-1,lambda a,b: cmp(a.start,b))\n return (low,ind)",
"def bin(self, index):\n\n return self._ax.bin(index)"
] |
[
"0.70552397",
"0.70514095",
"0.700788",
"0.6601049",
"0.6601049",
"0.658574",
"0.64486784",
"0.6374188",
"0.6364163",
"0.63276494",
"0.61857146",
"0.6172362",
"0.615024",
"0.6147497",
"0.61349773",
"0.60643",
"0.5958335",
"0.5958335",
"0.5933699",
"0.59336483",
"0.5922114",
"0.5922114",
"0.59187996",
"0.58966124",
"0.587612",
"0.58722836",
"0.5851205",
"0.58202815",
"0.58154625",
"0.5809587"
] |
0.7421128
|
0
|
Find the index of the most significant change bit, the bit that will change when t is incremented >>> mscb(0) 0 >>> mscb(1) 1 >>> mscb(7) 3 >>> mscb(8) 0
|
def mscb(t):
return int(np.log2(t ^ (t + 1)))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def index_of_least_significant_zero_bit(self, value):\n\n index = 1\n while (value & 1) != 0:\n value >>= 1\n index += 1\n return index",
"def _get_bit(byte, ii):\n return (byte >> (7 - ii)) & 1",
"def get_bit_position(x, k):\n\n return x & (1 << k)",
"def get_current_s(self):\n return 1 if self.ff_states[0] else 0",
"def ffb_c (cls):\r\n b=(bin(c0+jmax)[-(cls)])\r\n return (2*int(b)-1) # or -(2*int(b)-1), depending on the final direction of the hierarchy\r",
"def msb(n: int) -> int:\n ndx = 0\n while ( 1 < n ):\n n = ( n >> 1 )\n ndx += 1\n return ndx",
"def state(towers):\n ret = 0\n for i, row in enumerate(towers):\n for val in row:\n ret += i * 4**(val-1)\n return ret",
"def map_binary_values(x) -> int:\n return _bm.get(x, -1)",
"def at(board, pos):\n return (board >> pos-1) & 1",
"def _highNib(self, b):\n return int(self.hex_symbols[(b >> 4) & 0x0f])",
"def get_lowest_set_bit(x):\n\n return x & -x",
"def bitscan_forward(bitboard):\n i = 1\n while not (bitboard >> np.uint64(i)) % 2:\n i += 1\n return i",
"def _lowNib(self, b):\n return int(self.hex_symbols[b & 0x0f])",
"def get_lowest_unset_bit(x):\n\n return ~x & (x + 1)",
"def BitsRemaining(self):\n return self.NumBits() - (8*self.idx_byte + self.idx_boff) - 1",
"def select_MAP_control(Pi, t):\n\n u = Pi.idxmax()[t]\n return u",
"def pcb_temp():\n def r(x):\n return (x & 0xFFF if x & 0x800 else x & 0xFFF-0x1000) >> 4\n return r, None",
"def calcMaxIDX(fls, noct):\n freq_l = fls[-1] / (2.0 ** (1 / (2.0 * noct)))\n max_idx = np.array(abs(fls - freq_l)).argmin()\n return max_idx",
"def bit_pos(self):\n\n return self.byte_ptr * 8 + self.bit_ptr",
"def main_cc(self):\n if self.E > 0:\n cc = self.cc()\n pop = np.array([np.sum(cc == k) for k in np.unique(cc)])\n idx = np.nonzero(cc == pop.argmax())[0]\n else:\n idx = 0\n return idx",
"def next_state(s, w, offset=-0.5):\n return np.sign(w.dot(s) + offset).astype(np.int8)",
"def count_ones(byte):\n for i in range(8):\n if byte >> (7 - i) == 0b11111111 >> (7 - i) & ~1:\n return i\n return 8",
"def bv2int(bv: BitVector) -> int:\n nbits = len(bv)\n index = 0\n for i in range(nbits):\n if bv[i]:\n index += 2**(nbits - i - 1)\n return index",
"def _get_lback_index(self, model, last) -> int:\n assert last > 0\n # last state cannot be loop-back.\n assert model.get_value(self.totime(self._in_loop, last)).is_true()\n assert model.get_value(self.totime(self._in_loop, 0)).is_false()\n idx = last - 1\n while model.get_value(self.totime(self._in_loop, idx)).is_true():\n idx -= 1\n assert idx >= 0\n assert model.get_value(self.totime(self._in_loop, idx + 1)).is_true()\n assert model.get_value(self.totime(self._in_loop, idx)).is_false()\n assert model.get_value(self.totime(self.start_loop, idx)).is_true()\n return idx",
"def get_bit(num, position):\n\treturn (num >> position) & 0b1",
"def getFrq(self):\n fr = [0 for i in range(26)]\n for i in self.state[0]:\n for j in self.state[1]:\n fr[ord(self.table[i][j]) - ord('a')] += 1\n return fr",
"def hit_bin(self, n):\n # TODO: fix this monkey code!\n\n if n < 4:\n return n\n elif n << 3 == 0:\n return 4\n elif n << 4 == 0:\n return 5\n elif n << 5 == 0:\n return 6\n elif n >= 32 and n <= 127:\n return 7\n else:\n return 8",
"def codage(nbr):\n\tmask=1\n\tresult=0\n\tfor index in range(len(G)):\n\t\tif ((mask<<index)&nbr) != 0:\n\t\t\tresult^=G[len(G)-index-1]\n\treturn result",
"def MOSQ_LSB(A):\n return (A & 0x00FF)",
"def get_bit(num, i):\r\n return 1 if num & 1 << i else 0"
] |
[
"0.631035",
"0.59553164",
"0.5750957",
"0.57216984",
"0.5569518",
"0.5563024",
"0.5544709",
"0.5520244",
"0.55109185",
"0.55054873",
"0.55012137",
"0.5486113",
"0.5483539",
"0.54807216",
"0.54805136",
"0.5454347",
"0.54524463",
"0.54152685",
"0.5410381",
"0.540032",
"0.53825086",
"0.53678143",
"0.5354948",
"0.53541183",
"0.5339273",
"0.5336088",
"0.5323716",
"0.53188473",
"0.5301",
"0.52794325"
] |
0.73426956
|
0
|
Return the index of the largest value, randomly breaking ties. >>> argmax([0, 1]) 1 >>> argmax([1, 0]) 0 >>> a = argmax([0, 1, 1]) >>> a in (1, 2) True
|
def argmax(values):
values = np.array(values)
mx = np.max(values)
val = np.where(values==mx)[0]
return np.random.choice(val)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def random_argmax(x: np.ndarray) -> int:\n indices = all_argmax(x)\n return np.random.choice(indices)",
"def __argmax(l: list, key):\n max = float('-inf')\n max_i = -1\n for i in range(len(l)):\n if key(l[i]) > max:\n max = key(l[i])\n max_i = i\n return max_i",
"def argmax(sequence):\r\n\r\n import operator\r\n index, value = max(enumerate(sequence), key=operator.itemgetter(1))\r\n\r\n return index",
"def get_max_index(a):\n return a.argmax()",
"def get_max_index(a):\n return a.argmax()",
"def get_max_index(a):\n return a.argmax()",
"def custom_argmax(arr):\n return np.random.choice(np.flatnonzero(arr == arr.max()))",
"def index_largest(seq):\n assert len(seq) > 0\n x, greatest, index = len(seq), seq[0], 0\n for elem in range(1, x):\n if seq[elem] > greatest:\n greatest = seq[elem]\n index = elem\n return index",
"def argmax_random_tie(seq, key=identity):\n return argmax(shuffled(seq), key=key)",
"def maximo(arr):\n maxVal = float('-inf')\n maxIdx = -1\n\n for i in range(len(arr)):\n if arr[i] > maxVal:\n maxVal = arr[i]\n maxIdx = i\n\n return maxVal, maxIdx",
"def indexOfMax(list):\n max = -np.Infinity\n index = 0\n i = 0\n for value in list:\n if value > max:\n max = value\n index = i\n i += 1\n return index",
"def get_max_index_of_list(a_list):\n if isinstance(a_list, np.ndarray):\n idx = np.argmax(a_list)\n elif isinstance(a_list, list):\n idx=a_list.index(max(a_list))\n return idx",
"def rargmax(vector):\n m = np.amax(vector)\n indices = np.nonzero(vector ==m)[0]\n return pr.choice(indices)",
"def max(input: list[int]) -> int:\n if len(input) == 0:\n raise ValueError(\"max() arg is an empty List\")\n most: int = input[0]\n n: int = 1\n while n < len(input):\n if input[n] > most:\n most = input[n]\n n += 1 \n return most",
"def argmax(table):\n return max((v,k) for k,v in table.iteritems())[1]",
"def check_argmax(array):\n # Check which movements are the best, return it as a list where 1 = max of the list.\n res = [1 if i == max(array) else 0 for i in array]\n return list(compress([\"V\", \"H\", \"D\", \"X\"], res))",
"def my_max(in_list):\n biggest = in_list[0]\n for l in in_list:\n if l > biggest:\n biggest = l\n return biggest",
"def find_max(list):\n return find_value_at(list, 0)",
"def argMax(self):\n if len(self.keys()) == 0: return None\n all = list(self.items())\n values = [x[1] for x in all]\n maxIndex = values.index(max(values))\n return all[maxIndex][0]",
"def argmax(x):\n def op(a, b):\n comp = (a[1] > b[1])\n return comp.if_else(a[0], b[0]), comp.if_else(a[1], b[1])\n return tree_reduce(op, enumerate(x))[0]",
"def argmax(x):\n def op(a, b):\n comp = (a[1] > b[1])\n return comp.if_else(a[0], b[0]), comp.if_else(a[1], b[1])\n return tree_reduce(op, enumerate(x))[0]",
"def max(input: list[int]) -> int:\n if len(input) == 0:\n raise ValueError(\"max() arg is an empty List\")\n else:\n input.sort()\n return input[-1]",
"def argMax(self):\n if len(list(self.keys())) == 0:\n return None\n all = list(self.items())\n values = [x[1] for x in all]\n maxIndex = values.index(max(values))\n return all[maxIndex][0]",
"def get_max_key(dico):\n our_max = 0\n argmax = None\n for key, val in dico.items():\n if val > our_max:\n argmax = key\n our_max = val\n return argmax",
"def rand_val(max):\n order = math.ceil(math.log10(max)) #Determine the num of digits in size\n index = math.floor(random.SystemRandom().random() * (10 ** order))\n\n # Yea, this is quite inefficient\n while (index >= max):\n index = math.floor(random.SystemRandom().random() * (10 ** order))\n\n return index",
"def argmax(vec):\n _, idx = torch.max(vec, -1)\n return to_scalar(idx)",
"def argmax(fn,over):\n return max([(arg,fn(arg)) for arg in over],key=lambda v: v[1])[0]",
"def max(self):\n max = 0\n a = self.array_form\n for i in xrange(len(a)):\n if a[i] != i and a[i] > max:\n max = a[i]\n return max",
"def find_greatest_number(incoming_list):\n retval = max(incoming_list)\n return retval",
"def nextMax(value,lista):\n for i in lista:\n if i>value:\n return i\n raise NameError('No value')"
] |
[
"0.7526515",
"0.7374521",
"0.73375267",
"0.7292933",
"0.7292933",
"0.7292933",
"0.7266164",
"0.7108389",
"0.7082183",
"0.69236606",
"0.6903086",
"0.67438745",
"0.66566026",
"0.6644861",
"0.66012126",
"0.65932435",
"0.64963543",
"0.64745444",
"0.6468216",
"0.6466282",
"0.6466282",
"0.64656556",
"0.64636827",
"0.6441002",
"0.6401284",
"0.6391749",
"0.63842845",
"0.63763374",
"0.63726217",
"0.6355868"
] |
0.77623355
|
0
|
Calculate the numerator such that at t=0, a/(decay+t)=alpha
|
def calc_alpha_init(alpha, decay):
if not decay or decay <= 0:
return alpha
else:
return float(alpha * decay)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def linear_decay(x0, alpha, T, t):\n if t <= T:\n return x0 - (1 - alpha) * x0 * t / T\n else:\n return alpha * x0",
"def alpha0(self, t):\n return (self.alpha_0 * pow((self.Light(t) / self.I0), self.p));",
"def get_rate(self, t):\n return self.l_0 + \\\n self.alpha * sum(np.exp([self.beta * -1.0 * (t - s)\n for s in self.prev_excitations\n if s <= t]))",
"def learning_rate_decay(alpha, decay_rate, global_step, decay_step):\n epoc_number = int(global_step / decay_step)\n alpha /= (1 + decay_rate * epoc_number)\n return alpha",
"def phase_Neptune(alpha):\n phase = 10.**(-0.4*(7.944e-3*alpha + 9.617e-5*alpha**2.))\n return phase",
"def model_growth_rate(t, a_0, omega):\n a = a_0 * np.exp(omega * t)\n return a",
"def time_based(t, eta_init, last_eta, d = 0.01):\n return last_eta/(1+d*t)",
"def alpha_0(r): # r in um\n r1 = r / (1e6) # meters\n epsilonr = 3.\n return 3. * epsilon0 * ((epsilonr - 1)/(epsilonr + 2)) * (4. * np.pi / 3.) * (r1 ** 3)",
"def learning_rate(self, t):\n # return self.init_learning_rate * (1 - t)\n return self.init_learning_rate / (1 + t)\n # return self.init_learning_rate * exp(-t)\n # return self.init_learning_rate * (.005 / self.init_learning_rate) ** t",
"def evaluate(x, amplitude, x_0, alpha):\n\n xx = x / x_0\n return amplitude * xx ** (-alpha)",
"def evaluate(self, _t):\n\n temp = self.init_temp*(self.decay**_t)\n\n if temp < self.min_temp:\n temp = self.min_temp\n\n return temp",
"def asymptotic_decay(learning_rate, t, max_iter):\n return learning_rate / (1+t/(max_iter/2))",
"def _get_alpha(self, m_t, v_t):\n return max(0, ((-m_t * self._psi \n + math.sqrt((m_t ** 2 * self._phi ** 4) \n / 4 + v_t * self._phi ** 2 * self._xi)) \n / (v_t * self._xi)))",
"def evaluate(self, _t):\n\n temp = self.init_temp - (self.decay*_t)\n\n if temp < self.min_temp:\n temp = self.min_temp\n\n return temp",
"def initialwave(alpha, x0):\n return 12*alpha**2*(1/np.cosh(alpha*(x-x0)))**2",
"def update_pulse_rate(t,r_0 = 0.2,gamma=0.05):\n return r_0 * (1 - exp(-gamma*t))",
"def __call__(self, epoch):\n decay = (1 - (epoch / float(self.maxEpochs))) ** self.power\n alpha = self.initAlpha * decay\n \n # return alpha\n return float(alpha)",
"def rateFcn(a0,a1,a2,a3,a4,a5,a6,T):\n return np.exp(a0+a1/T+a2/T**(1/3)+a3*T**(1/3)+a4*T+a5*T**(5/3)+a6*np.log(T))",
"def phase_Saturn_2(alpha):\n phase = 10.**(-0.4*(- 3.7e-04*alpha +6.16e-04*alpha**2.))\n return phase",
"def calc_alpha(epsilon): \n return float(0.5 * np.log((1-epsilon)/epsilon))",
"def compute_alpha(\n T, S, z, rn_lambda1=rn_lambda1, rn_a0=rn_a0, rn_mu1=rn_mu1, rn_nu=rn_nu, **_\n):\n Ta = T - T0\n Sa = S - S0\n alpha = 1 / rho0 * (rn_a0 * (1 + rn_lambda1 * Ta + rn_mu1 * z) - rn_nu * Sa)\n return alpha",
"def acc(x: float, v: float, t: float) -> float:\n return -k*v - np.sin(x) + c*np.cos(omega*t)",
"def Eg_fct_T(Eg0,alpha,beta,T) :\n return Eg0-((T*T*alpha*1e-3)/(beta+T))",
"def lr_decay(step):\n return(alpha / (1 + decay_rate * step))",
"def input_f(t,decay=0.5,freq=1.5):\n u_t = 1*(t>0)\n return np.cos(freq*t)*np.exp(-decay*t) * u_t",
"def __call__(self, epoch):\n exp = np.floor((1 + epoch) / self.dropEvery)\n alpha = initAlpha * (self.factor ** exp)\n \n # return alpha \n return float(alpha)",
"def final_amt(p,r,n,t):\n\n a = p * (1 + r/n) ** (n*t)\n return a #This is what makes the function \"fruitful\"",
"def decay(time_, max_time, coeff):\n threshold = max_time - time_\n if threshold < 0:\n threshold = 0\n return 1 + threshold * coeff / max_time",
"def learning_rate_decay(alpha, decay_rate, global_step, decay_step):\n \"\"\"if staircase == True\n decayed_learning_rate = learning_rate /\n (1 + decay_rate * floor(global_step / decay_step)))\"\"\"\n return tf.train.inverse_time_decay(\n alpha, global_step, decay_step, decay_rate, staircase=True)",
"def succ(self, a):\n#das self.control Dictionary wird unter \"analyse\" aufgebaut; wird hier eingelesen\n n, absucc = self.control[a.name]\n if n == 0: return 0.0\n else: return float(absucc) / float(n)"
] |
[
"0.69432795",
"0.6660636",
"0.6574897",
"0.63983226",
"0.6329011",
"0.62791234",
"0.6273906",
"0.6231108",
"0.6197118",
"0.61908907",
"0.6139079",
"0.6103482",
"0.60895485",
"0.6089476",
"0.6075572",
"0.6073953",
"0.6066894",
"0.6041462",
"0.6040465",
"0.6033664",
"0.59855026",
"0.5965293",
"0.5945855",
"0.59429306",
"0.5935603",
"0.5919179",
"0.5885307",
"0.58809984",
"0.587344",
"0.58641815"
] |
0.6787552
|
1
|
Take a vector a values and a integer window size Return the vector of values that are the mean over n steps. Note that (right now at least) the returned vector will be n1 elements smaller. >>> running_mean([1, 2, 2, 4, 1, 1], 2) array([ 1.5, 2. , 3. , 2.5, 1. ]) >>> running_mean([1, 1, 1, 1, 2, 2, 1, 1, 2, 2, 2, 2], 4) array([ 1. , 1.25, 1.5 , 1.5 , 1.5 , 1.5 , 1.5 , 1.75, 2. ])
|
def running_mean(data, n):
return np.convolve(data, np.ones((n, ))/n)[(n-1):-(n-1)]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def running_mean(x, N):\n return np.convolve(x, np.ones((N,))/N, mode='valid')",
"def running_mean(x, N):\n cumsum = np.cumsum(np.insert(x, 0, 0)) \n return (cumsum[N:] - cumsum[:-N]) / float(N)",
"def running_mean(x, N):\n cumsum = np.cumsum(np.insert(x, 0, 0))\n return (cumsum[N:] - cumsum[:-N]) / float(N)",
"def moving_average(data, window_size):\n window= np.ones(int(window_size))/float(window_size)\n return np.convolve(data, window, 'same')",
"def moving_average(x, window_len = 11):\n\n one_over_size = 1./float(window_len)\n window = np.ones(window_len) * one_over_size\n result = np.convolve(x, window, 'valid')\n # Cut off the clutter at the boundaries\n #return result[ (window_len-1)/2 : -(window_len-1)/2]\n return result",
"def running_avg (mylist, N):\n import numpy as np\n \n cumsum = np.cumsum(np.insert(mylist, 0, 0))\n return (cumsum[N:] - cumsum[:-N]) / float(N)",
"def moving_avg(a, halfwindow, mask=None):\r\n\r\n\r\n if mask is None:\r\n mask = np.ones_like(a, dtype='bool')\r\n\r\n zeros = np.zeros(a.shape[:-1] + (halfwindow,))\r\n falses = zeros.astype('bool')\r\n\r\n a_padded = np.concatenate((zeros, np.where(mask, a, 0), zeros), axis=-1)\r\n mask_padded = np.concatenate((falses, mask, falses), axis=-1)\r\n\r\n\r\n npt = 2 * halfwindow + 1 # total size of the averaging window\r\n rolling_a = as_strided(a_padded,\r\n shape=a.shape + (npt,),\r\n strides=a_padded.strides + (a.strides[-1],))\r\n rolling_mask = as_strided(mask_padded,\r\n shape=mask.shape + (npt,),\r\n strides=mask_padded.strides + (mask.strides[-1],))\r\n\r\n # moving average\r\n n = rolling_mask.sum(axis=-1)\r\n return np.where(n > 0, rolling_a.sum(axis=-1).astype('float') / n, np.nan)",
"def get_rolling_mean(values, window = 20):\n\treturn values.rolling(window, center=False).mean()",
"def moving_average(a, n=5):\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n ret[n-1:] *= 1 / n\n ret[:n-1] *= 1 / np.arange(1, n)\n return ret",
"def ExpMovingAverage(values, window):\n weights = np.exp(np.linspace(-1.0, 0.0, window))\n weights /= weights.sum()\n a = np.convolve(values, weights, mode=\"full\")[: len(values)]\n a[:window] = a[window]\n return a",
"def moving_average(sig, n=100):\n window = deque(maxlen=n) # last n scores\n sig_ma = []\n for i in range(len(sig)):\n window.append(sig[i])\n sig_ma.append(np.mean(window))\n return sig_ma",
"def moving_average(data, size):\n out = np.ma.zeros(data.shape) # Create the output array\n assert type(data) == np.ma.masked_array, 'Input data is not a masked array, use an alternative function or ' \\\n 'convert input to masked array before use'\n for ran in range(data.shape[1]): # for each range gate\n # Normal condition\n if ran >= size:\n window = data[:, ran - size:ran + size]\n \t out[:, ran] = np.ma.average(window,\n axis=1)\n out[:, ran] = np.where(out[:, ran].mask == True,\n np.nan,\n out[:, ran])\n\n # Shortened window at start of the array\n else:\n out[:,ran] = np.nan\n return out",
"def get_rolling_mean(values, window):\r\n return pd.rolling_mean(values, window=window)",
"def rolling_mean(\n self,\n window: tuple[int | int] = (0, 0),\n where: tuple[float | float] = (-inf, inf),\n ) -> pd.Series:\n where = _replace_none_with_infs(where)\n assert len(window) == 2, \"Window should be a listlike object of length 2.\"\n left_delta, right_delta = window\n lower, upper = where\n clipped = self.clip(lower, upper)\n if clipped._data is None:\n return pd.Series([clipped.initial_value] * 2, index=where)\n step_points = clipped._data.index\n sample_points = pd.Index.union(\n step_points - left_delta,\n step_points - right_delta,\n )\n ii = pd.IntervalIndex.from_arrays(\n sample_points + left_delta, sample_points + right_delta\n )\n s = pd.Series(\n clipped.slice(ii).mean().values,\n index=sample_points,\n )\n if lower != -inf:\n s = s.loc[s.index >= lower - left_delta]\n if upper != inf:\n s = s.loc[s.index <= upper - right_delta]\n return s",
"def moving_average(data, temporal_window=100):\n window = np.ones(temporal_window) / temporal_window\n return np.convolve(data, window, 'valid')",
"def rolling_mean_nb(a, window, minp=None):\n out = np.empty_like(a, dtype=np.float_)\n for col in range(a.shape[1]):\n out[:, col] = rolling_mean_1d_nb(a[:, col], window, minp=minp)\n return out",
"def running_mean(sequence: list):\n if not sequence:\n return []\n\n mean = []\n \"\"\"\n [1] = 1 / 1\n [1,2] = 3 / 2 \n [1,2,3] = 6 / 3\n \"\"\"\n for idx, num in enumerate(sequence):\n\n sum_total = sum(sequence[:(idx + 1)])\n result = sum_total / (idx + 1)\n\n mean.append(round(result, 2))\n\n return mean",
"def runningMeanFast(x, N=N):\n x1 = np.pad(x,N,mode='reflect')\n output =np.convolve(x1, np.ones((N,))/N)[(N-1):]\n return output[N:-N]",
"def get_running_mean(data,time_window):\n \n print('--> Starting to calculate running mean') \n timer_start = dt.now()\n filt = [1./float(time_window)]*int(time_window)\n running_mean = np.apply_along_axis(lambda m: np.convolve(m, filt, mode='valid'), axis=0, arr=data)\n running_mean = np.append(np.ones([len(data)-len(running_mean),*data.shape[1:]])*np.nan,running_mean,axis=0)\n print('--> Completed calculating running mean (%.1f seconds)' \\\n % (dt.now()-timer_start).total_seconds())\n return running_mean",
"def moving_average(a, n: int = 3) -> np.array:\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n",
"def _moving_average(self, series=None, window_length=None, train_subwindow_len=None):\n import numpy as np\n\n moving_averages = []\n iter_length = len(series) - window_length\n for i in range(0, iter_length):\n ma_current = np.mean(series[i:i + window_length])\n moving_averages.append(ma_current)\n\n # Moving average shrinkes w.r.t the actual series based on the moving average window. Hence, to keep the\n # length of the moving average equal to the series, we append proxy padding which are the moving averages\n # from the closest representative training sub-window.\n moving_averages_padded = moving_averages[(train_subwindow_len - (\n window_length // 2)):train_subwindow_len] + moving_averages + moving_averages[-train_subwindow_len:-(\n train_subwindow_len - (window_length // 2))]\n\n return moving_averages_padded",
"def exponential_running_mean(data, factor_new, init_block_size=None,\n start_mean=None, axis=None):\n assert not (start_mean is None and init_block_size is None), (\n \"Need either an init block or a start mean\")\n assert start_mean is None or init_block_size is None, (\n \"Can only use start mean \"\n \"or init block size\")\n assert factor_new <= 1.0\n assert factor_new >= 0.0\n if isinstance(axis, int):\n axis = (axis,)\n factor_old = 1 - factor_new\n\n # first preallocate the shape for the running means\n # shape depends on which axes will be removed\n running_mean_shape = list(data.shape)\n if axis is not None:\n for ax in axis:\n # keep dim as empty dim\n running_mean_shape[ax] = 1\n\n running_means = (np.ones(running_mean_shape) * np.nan).astype(np.float32)\n\n if start_mean is None:\n start_data = data[0:init_block_size]\n if axis is not None:\n axes_for_start_mean = (0,) + axis # also average across init trials\n else:\n axes_for_start_mean = 0\n # possibly temporarily upcast to float32 to avoid overflows in sum\n # that is computed to compute mean\n current_mean = np.mean(start_data.astype(np.float32),\n keepdims=True,\n axis=axes_for_start_mean).astype(\n start_data.dtype)\n # repeat mean for running means\n running_means[:init_block_size] = current_mean\n i_start = init_block_size\n else:\n current_mean = start_mean\n i_start = 0\n\n for i in range(i_start, len(data)):\n if axis is not None:\n datapoint_mean = np.mean(data[i:i + 1], axis=axis, keepdims=True)\n else:\n datapoint_mean = data[i:i + 1]\n next_mean = factor_new * datapoint_mean + factor_old * current_mean\n running_means[i] = next_mean\n current_mean = next_mean\n\n assert not np.any(np.isnan(running_means)), (\n \"RUnning mean has NaNs :\\n{:s}\".format(str(running_means)))\n assert not np.any(np.isinf(running_means)), (\n \"RUnning mean has Infs :\\n{:s}\".format(str(running_means)))\n return running_means",
"def moving_average(a, n=3) :\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n",
"def sliding_window_average(data, weights=None, window_size=500, shift_size=1):\n num_chunks = len(_sliding_window_chunkoffsets(data, window_size, shift_size))\n if num_chunks == 0:\n return np.asarray([])\n if weights is None:\n return _numba_sliding_window_average(data, num_chunks, window_size, shift_size)\n else:\n return _numba_sliding_window_average_with_weights(data, num_chunks, weights, window_size, shift_size)",
"def get_rolling_mean(values, window):\n return pd.rolling_mean(values, window=window)",
"def moving_average_forecast(series, window_size):\n\tforecast= []\n\tfor time in range(len(series)- window_size):\n\t\tforecast.append(series[time:time + window_size].mean())\n\treturn np.array(forecast)",
"def moving_average(a, n=3) :\r\n a = a.ravel()\r\n a = np.concatenate(([a[0]]*(n-1),a)) # repeating first values\r\n ret = np.cumsum(a, dtype = float)\r\n ret[n:] = ret[n:] - ret[:-n]\r\n ret=ret[n - 1:] / n\r\n return ret",
"def expanding_mean(arr):\n total_len = arr.shape[0]\n return ((arr / total_len).cumsum() / np.arange(1, total_len + 1)) * total_len",
"def __ExpMovingAverage(self, values, window):\n weights = np.exp(np.linspace(-1., 0., window))\n weights /= weights.sum()\n a = np.convolve(values, weights, mode='full')[:len(values)]\n a[:window] = a[window]\n return a",
"def lin_trim_mean(a: np.ndarray, start: float = 0.5, end: float = 0.1,\n start_v: float = 0, end_v: float = 0.5) -> float:\n start_w = np.linspace(start_v, 1, start * len(a), endpoint=False)\n end_w = np.linspace(end_v, 1, end * len(a), endpoint=False)[::-1]\n mid_w = np.ones(len(a) - len(start_w) - len(end_w))\n weights = np.concatenate((start_w, mid_w, end_w))\n return ((a * weights).sum() / weights.sum()).item()"
] |
[
"0.7720666",
"0.7643432",
"0.7536631",
"0.7211045",
"0.7008733",
"0.6889429",
"0.6789585",
"0.67637384",
"0.67565584",
"0.67500424",
"0.6691265",
"0.66497403",
"0.66207325",
"0.6608271",
"0.6591761",
"0.6547892",
"0.6546986",
"0.6544084",
"0.65184224",
"0.6493564",
"0.6491099",
"0.6477054",
"0.64691335",
"0.6467758",
"0.64362556",
"0.6421074",
"0.63934606",
"0.63837725",
"0.63643193",
"0.6339241"
] |
0.7728464
|
0
|
Converts a list of items into a string with special characters removed Underscores are left >>> join_items(['a','b','c']) 'abc' >>> join_items(['a[0]','a[1]']) 'a0a1' >>> join_items(['true_value',8]) 'true_value8' >>> join_items(['true_values',.1,.2]) 'true_values0.10.2' >>> join_items(['elbow_joint','wrist_joint','shoulder_joint'], True) 'elbow_jointshoulder_jointwrist_joint' >>> join_items('fred') 'fred' >>> join_items(.9) '0.9' >>> join_items(None) 'None' >>> join_items('') '' >>> join_items(['a_return', 'a', 'b', 'a_trace'], sort=True) 'aa_returna_traceb'
|
def join_items(values, sort=False):
if isinstance(values, str):
return clean_string(values)
try:
val = []
for v in values:
val.append(clean_string(v))
if sort:
val.sort()
return "-".join(val)
except TypeError:
return str(values)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def join_list(items: Iterable[str]) -> str:\n\n return ITEM_SEPARATOR.join(items)",
"def list_join(the_list):\n return ' '.join(the_list)",
"def implode(delim, items):\n return delim.join(items)",
"def space_join(*items):\n valid_items = []\n for item in items:\n if item is None:\n continue\n if isinstance(item, tuple):\n if item[0] is None:\n continue\n stripped = strip_if_not_blank(item[0])\n if not is_null(stripped):\n if len(item) == 2:\n if not is_null(item[1]):\n valid_items.append(\"%s%s\" % (item[1], stripped))\n else:\n valid_items.append(stripped)\n elif len(item) >= 3:\n if not is_null(item[1]) and not is_null(item[2]):\n valid_items.append(\"%s%s%s\" % (\n item[1], stripped, item[2]))\n elif not is_null(item[1]):\n valid_items.append(\"%s%s\" % (item[1], stripped))\n elif not is_null(item[2]):\n valid_items.append(\"%s%s\" % (stripped, item[2]))\n else:\n stripped = strip_if_not_blank(item)\n if stripped != \"\":\n valid_items.append(stripped)\n return \" \".join(valid_items)",
"def _format_item_list(items, pad=\"'\", sep=', ', end_sep=' and '):\n result = ''\n items = [pad + item + pad for item in items]\n if items:\n if len(items) != 1:\n result = sep.join(items[:-1]) + end_sep + items[-1]\n else:\n result = items[0]\n return result",
"def list_string(join_list):\n joined_list = '[{}]'.format(join_list, join_list)\n return joined_list",
"def join_and_sanitize(list_):\n if isinstance(list_, str):\n return list_\n\n new_list = []\n for item in list_:\n if isinstance(item, str):\n new_list.append(item)\n elif isinstance(item, int):\n new_list.append(str(item))\n elif isinstance(item, float):\n new_list.append(str(item))\n elif isinstance(item, unicode):\n new_list.append(str(item))\n else:\n raise Exception('Invalid type when attempting to join and sanitize')\n\n return ' '.join(new_list)",
"def join_and_sanitize(list_):\n if isinstance(list_, str):\n return list_\n\n new_list = []\n for item in list_:\n if isinstance(item, str):\n new_list.append(item)\n elif isinstance(item, int):\n new_list.append(str(item))\n elif isinstance(item, float):\n new_list.append(str(item))\n else:\n raise Exception('Invalid type when attempting to join and sanitize')\n\n return ' '.join(new_list)",
"def concatenate_items(items, conjunction='and'):\n text = ''\n if not items:\n text = ''\n elif len(items) == 1:\n text = items[0]\n elif len(items) == 2:\n text = '{} {} {}'.format(items[0], conjunction, items[1])\n else:\n text = ', '.join(items[:-1])\n text += ', {} {}'.format(conjunction, items[-1])\n return text",
"def join(sep, xs):\n return str(sep).join(xs)",
"def underscore_join(iterable):\n iterable_as_str = [str(x) for x in iterable]\n return \"__\".join(iterable_as_str)",
"def rejoin(textList):\n return ','.join(textList)",
"def join_list(jlist, joiner=', '):\n if len(jlist) == 0:\n jlist = '[]'\n else:\n jlist = joiner.join(jlist)\n return jlist",
"def flatten_list(items: List[str]) -> str:\n if len(items) == 1:\n return f'\"{items[0]}\"'\n\n try:\n last = items[-1]\n except IndexError:\n # Empty list\n raise ValueError('Empty list of values received')\n\n return ', '.join(f'\"{item}\"' for item in items[:-1]) + f' or \"{last}\"'",
"def join(self, iterable) -> String:\n pass",
"def _join(lst, key, sep=\";\"):\n return sep.join([d[key] for d in lst if d[key]])",
"def __join_if_list(text_or_list: Union[List[str], str]) -> str:\n\n if isinstance(text_or_list, list):\n return ' '.join(text_or_list)\n return text_or_list",
"def join_params(**params):\n\tparam_list = get_sorted_keys(params)\n\tvalues = []\n\tfor k in param_list:\n\t\tvalues.append(k+'-'+join_items(params[k]))\n\treturn \"_\".join(values)",
"def join_strings(words):\n joined_string = ''\n for word in words:\n joined_string += word\n\n return joined_string",
"def join_str(lst, new_line=False):\n if new_line:\n j_str = \"/n\".join([str(i) for i in lst])\n else:\n j_str = \"\".join([str(i) for i in lst])\n return j_str",
"def list_str(lis):\r\n as_str = \"\"\r\n for item in lis:\r\n as_str += \" \" + str(item) + \",\"\r\n return as_str[:-1]",
"def JoinList(LIST):\r\n if type(LIST) == list:\r\n out = ', '.join(LIST)\r\n elif type(LIST) == str:\r\n out = LIST\r\n return out",
"def prefixCombiner(prefix, itemlist, glue=''):\n result = []\n for item in itemlist:\n result.append(prefix + glue + item)\n return result",
"def abridged_str_from_list(self,\r\n entrylist,\r\n trim_length=0,\r\n override=False):\r\n\r\n if override:\r\n trim_length = KEYLENGTH\r\n if trim_length == 0:\r\n trim_length = self.default_dict['keytrim']\r\n\r\n returntext = EMPTYCHAR\r\n for term in entrylist:\r\n lastlength = len(returntext)\r\n returntext += term+', '\r\n if len(returntext) > trim_length:\r\n if lastlength > trim_length-10:\r\n return returntext[0 : lastlength-2]\r\n return returntext[:trim_length]\r\n return returntext[:-2]",
"def join_list(\n object_list: list, delimiter: str = \", \", last_delimiter: str = \" & \"\n) -> str:\n if not object_list:\n return \"\"\n list_copy = list(object_list)\n last = list_copy.pop()\n if list_copy:\n return f\"{delimiter.join(list_copy)}{last_delimiter}{last}\"\n return f\"{last}\"",
"def list_to_string(inputlist):\n outstring = \"\"\n numusers = len(inputlist)\n if numusers == 1: # foo\n outstring += inputlist[0]\n if numusers == 2: # foo and bar\n outstring += (inputlist[0] + \" and \" + inputlist[1])\n if numusers >= 3: # foo, bar and baz\n for x in range(numusers-2):\n outstring += inputlist[x] + \", \"\n outstring += (inputlist[-2] + \" and \" + inputlist[-1])\n return outstring",
"def word_join(self, words):\n return \" \".join(words)",
"def _join_list_of_list(lst):\n\n int_list = [list(map(int, each)) for each in lst]\n # print(*int_list, sep=\"\\n\")\n str_list = [\",\".join(map(str, each)) for each in int_list]\n # print(*int_list, sep=\"\\n\")\n # print(str_list)\n final_str = \" \".join(str_list)\n # print(final_str)\n return final_str",
"def comma_code(items):\n item_len = len(items)\n \n if item_len == 0:\n return ''\n elif item_len == 1:\n return items[0]\n\n return ', '.join(items[:-1]) + ', and ' + items[-1]",
"def list_to_string(in_list):\n if not in_list:\n return \"[]\"\n else:\n return \"\\n- \" + \"\\n- \".join(in_list)"
] |
[
"0.7977471",
"0.693271",
"0.669698",
"0.66417265",
"0.6443994",
"0.6382685",
"0.6340171",
"0.63081473",
"0.6274466",
"0.61351764",
"0.60828316",
"0.6071877",
"0.5988854",
"0.5981916",
"0.5970071",
"0.59247893",
"0.58747697",
"0.580866",
"0.5806932",
"0.5757411",
"0.569503",
"0.56851745",
"0.56705266",
"0.5669214",
"0.56636566",
"0.5650543",
"0.56373733",
"0.5609777",
"0.5586606",
"0.5562107"
] |
0.70138764
|
1
|
Splits a string of separated items into its component parts. >>> split_items('true_values0.10.2') ['true_values', 0.1, 0.2] >>> split_items('abc') ['a', 'b', 'c'] >>> split_items('true_value8') ['true_value', 8] >>> split_items('elbow_jointshoulder_jointwrist_joint') ['elbow_joint', 'shoulder_joint', 'wrist_joint'] >>> split_items('fred') ['fred'] >>> split_items('None') [None] >>> split_items('alpha0.1_gamma0.9') ['alpha', '0.1_gamma', 0.9]
|
def split_items(item_string):
parts = item_string.split('-')
items = []
# now clean up the types
for v in parts:
if v.isnumeric():
items.append(int(v))
elif v == 'None':
items.append(None)
else:
try:
items.append(float(v))
except:
items.append(v)
return items
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def splitItems(items):\n if isinstance(items, str):\n usable = items.split()\n else:\n usable = items\n\n numItems = len(usable)\n\n vals = [usable[ii] for ii in range(0, numItems, 2)]\n uncs = [usable[ii] for ii in range(1, numItems, 2)]\n\n return vals, uncs",
"def split(items):\n return {\n \"class\": \"split\",\n \"items\": items\n }",
"def split_cmdline_filter_items(string):\n filter_items = string.split(',')\n return filter_items",
"def split(self, s):\r\n l = [self._split(x) for x in _SPLIT_RE.split(s)]\r\n return [item for sublist in l for item in sublist]",
"def split_name_values(param_items):\n return_list = list()\n for single_item in param_items:\n temp_list = [single_item[1]]\n temp_list.extend(clear_useless_end(single_item[2]).split(\",\"))\n return_list.append(temp_list)\n\n return return_list",
"def smart_split(x):\n return R_SPLIT_DELIM.split(x)",
"def test_split_string(self):\n self.assertEqual(('1-4', 14), split_string('1-4/14'))",
"def test_get_items_from_string() -> None:\n assert [\"i\", \"p\"] == common_util.get_items_from_string(\"i, ,p\")\n assert [\"i\", \"p\"] == common_util.get_items_from_string(\"i- -p\", separator=\"-\")\n assert [\"i\", \" \", \" p\"] == common_util.get_items_from_string(\"i, , p\", remove_blanks=False)\n assert [\"i\", \"p\"] == common_util.get_items_from_string(\"i, , p\")\n assert [] == common_util.get_items_from_string(\"\")",
"def split_list(items, pred):\n\n thisresult = []\n results = [thisresult]\n for i in items:\n thisresult.append(i)\n if pred(i):\n thisresult = []\n results.append(thisresult)\n return results",
"def split(\n string: str,\n splitters: Union[str, List[str]],\n count: Optional[int] = None,\n removeEmpty: int = 0,\n) -> List[str]:\n\n if count and count < 0:\n raise ValueError(\"Count cannot be less than zero\")\n\n if count == 0:\n return []\n\n if isinstance(splitters, str):\n if not removeEmpty:\n return string.split(splitters, count - 1 if count else -1)\n\n splitters = [splitters]\n\n splitters = [escape(x) for x in splitters] or [\" \"]\n\n i = 0\n splits: List[str] = []\n matches = re.finditer(\"|\".join(splitters), string)\n for m in matches:\n if count is not None and count <= 1:\n break\n\n split = string[i : m.start()]\n if split or not removeEmpty:\n splits.append(split)\n\n count = count - 1 if count is not None else count\n\n i = m.end()\n\n if (count is None or count and count > 0) and len(string) - i > -1:\n split = string[i:]\n if split or not removeEmpty:\n splits.append(split)\n\n return splits",
"def split(self, string, maxsplit=MAX_INT, include_separators=False):\n return self._split(\n string, maxsplit=maxsplit, include_separators=include_separators\n )",
"def _compute_split_boundaries(split_probs, n_items):\n if len(split_probs) > n_items:\n raise ValueError(\n 'Not enough items for the splits. There are {splits} '\n 'splits while there are only {items} items'.format(\n splits=len(split_probs), items=n_items\n )\n )\n total_probs = sum(p for name, p in split_probs)\n if abs(1 - total_probs) > 1e-8:\n raise ValueError('Probs should sum up to 1. probs={}'.format(split_probs))\n split_boundaries = []\n sum_p = 0.0\n for name, p in split_probs:\n prev = sum_p\n sum_p += p\n split_boundaries.append((name, int(prev * n_items), int(sum_p * n_items)))\n\n # Guard against rounding errors.\n split_boundaries[-1] = (\n split_boundaries[-1][0],\n split_boundaries[-1][1],\n n_items,\n )\n\n return split_boundaries",
"def split(string, separator, keep_separator):\n\t\t\tparts = string.split(separator)\n\t\t\tif keep_separator:\n\t\t\t\t*parts, last_part = parts\n\t\t\t\tparts = [part + separator for part in parts]\n\t\t\t\tif last_part:\n\t\t\t\t\treturn parts + [last_part]\n\t\t\treturn parts",
"def my_splitter(to_split, separator=None):\n if separator is None:\n split_list_regex = re.compile(r'[^\\s]+')\n return split_list_regex.findall(to_split)\n\n split_list = []\n\n while separator in to_split:\n separators_location = to_split.find(separator, 0)\n separated_word = to_split[:separators_location]\n split_list.append(separated_word)\n to_split = to_split[separators_location + len(separator):]\n\n split_list.append(to_split)\n\n return split_list",
"def inner_split(s):\n\n return s.split(split_string)",
"def split_and_extend(items):\n if not items:\n return items\n\n output = set()\n\n for item in items:\n current = []\n\n for split_item in item.split(\".\"):\n current = current + [split_item]\n output.add(\".\".join(current))\n\n return output",
"def split(value, delimiter):\n return value.split(delimiter)",
"def split_str(cmdline_str, has_options):\n return Splitter.split_list(shlex.split(cmdline_str), has_options)",
"def split_chunks(item_list, num_items_in_list):\n for item in range(0, len(item_list), num_items_in_list):\n # Create an index range for item_list of num_items_in_list items:\n yield item_list[item:item + num_items_in_list]",
"def split_bibitems(bibliography):\n \n refs = []\n for filename, bib in bibliography.iteritems():\n split_ind = []\n for ind, item in enumerate(bib):\n if item.startswith(r\"\\bibitem\"):\n split_ind.append(ind)\n \n for ref in partition(bib, split_ind):\n if ref:\n refs.append(RefObj.RefObj(filename, refstr='\\n'.join(ref)))\n return refs",
"def SplitBehavior(behavior):\n return [x for x in re.split('[ ()\"-.,]', behavior) if len(x) > 0]",
"def split_str(str):\n \n logger = logging.getLogger(__name__)\n \n logger.debug('{0}'.format(str))\n \n match = re.match(r\"([0-9]+.?\\d{0,32}?)(d|m|s)\", str)\n \n if match:\n items = match.groups()\n \n return items[0], items[1]",
"def _split_input_list(str_list):\r\n\r\n new_list = re.split(r'[\\n\\r\\s,]', str_list)\r\n new_list = [s.strip() for s in new_list]\r\n new_list = [s for s in new_list if s != '']\r\n\r\n return new_list",
"def split(self, splits, catchall=False):\r\n raise NotImplementedError()",
"def split(value, key):\n return str(value).split(key)",
"def isplit(iterable, splitters):\n return [list(g) for k,g in itertools.groupby(iterable,lambda x:x in splitters) if not k]",
"def testSplit(self):\n\n s = StrObject(u\"first second\")\n result = s.call(u\"split\", [StrObject(u\" \")])\n pieces = [obj._s for obj in unwrapList(result)]\n self.assertEqual(pieces, [u\"first\", u\"second\"])",
"def test_splitPartiesString(self):\n s = \"Appellant: Lucy Johnston - Respondent: Mary-Jane Lawrence\"\n expected = [\"Appellant: Lucy Johnston \",\" Respondent: Mary-Jane Lawrence\"]\n self.assertEqual(expected, split_parties.splitPartiesString(s))",
"def list(self, item, default=None, spliter=\",\", strip=True, mod=None):\n try:\n item = self.__getattr__(item)\n except AttributeError as err:\n if default is not None:\n return default\n raise err\n if strip:\n item = item.lstrip(\"[\").rstrip(\"]\")\n out = [x.strip() if strip else x for x in item.split(spliter)]\n if mod:\n return list(map(mod, out))\n return out",
"def split(\n items: typing.List[typing.Any],\n sizes: typing.List[float],\n random_state: int = 42,\n stratify: typing.Sequence[typing.Hashable] = None,\n group: typing.Sequence[typing.Hashable] = None,\n preserve: typing.Sequence[typing.Optional[int]] = None,\n) -> typing.Sequence[typing.Any]:\n splits: typing.List[typing.List[typing.Any]] = [[] for _ in range(len(sizes))]\n if group is None:\n group = list(range(len(items)))\n if stratify is None:\n stratify = [0] * len(items)\n if preserve is not None:\n assert len(items) == len(\n preserve\n ), \"When preserve is provided, it must be the same length as items.\"\n for item, preserveIdx in zip(items, preserve):\n if preserveIdx is not None:\n splits[preserveIdx].append(item)\n ideal_counts = [s * len(items) for s in sizes]\n items, stratify, group = [\n [\n entry\n for entry, preserveIdx in zip(current_list, preserve)\n if preserveIdx is None\n ]\n for current_list in [items, stratify, group]\n ]\n if len(items) == 0:\n # There's nothing left to split.\n return splits\n # Rebalance sizes so that we shuffle the remaining\n # items into the splits to try and match the originally\n # desired sizes.\n offsets = [\n max(target - len(split), 0) for split, target in zip(splits, ideal_counts)\n ]\n sizes = [offset / sum(offsets) for offset in offsets]\n assert (\n 0.99 < sum(sizes) < 1.01\n ), f\"The sizes must add up to 1.0 (they added up to {sum(sizes)}).\"\n assert len(group) == len(items), \"group must be the same length as the collection.\"\n assert len(stratify) == len(\n items\n ), \"stratify must be the same length as the collection.\"\n rng = np.random.default_rng(seed=random_state)\n grouped = [\n {**dict(zip([\"idxs\", \"stratifiers\"], zip(*grouper))), \"group\": g}\n for g, grouper in groupby_unsorted(\n list(zip(range(len(stratify)), stratify)),\n key=lambda v: typing.cast(typing.Sequence[typing.Hashable], group)[v[0]],\n )\n ]\n hashes = {\n h: list(g)\n for h, g in groupby_unsorted(\n grouped, key=lambda g: hash(tuple(set(g[\"stratifiers\"])))\n )\n }\n for subgroups in hashes.values():\n for a, u in zip(\n rng.choice(len(sizes), size=len(subgroups), p=sizes),\n subgroups,\n ):\n splits[a].extend(items[idx] for idx in u[\"idxs\"])\n return splits"
] |
[
"0.6730633",
"0.6239765",
"0.59711105",
"0.5655868",
"0.5633598",
"0.5582001",
"0.55317825",
"0.5411825",
"0.54079276",
"0.5397846",
"0.5320743",
"0.52738994",
"0.5258152",
"0.5238759",
"0.5187398",
"0.5168217",
"0.5111953",
"0.50975215",
"0.50514513",
"0.5047044",
"0.50419885",
"0.5031108",
"0.5002969",
"0.5002158",
"0.499202",
"0.49889654",
"0.49877962",
"0.4986804",
"0.49844554",
"0.49738425"
] |
0.71201855
|
0
|
Looks for files in the given directory that have filenames that match required_features (using feature1_feature2.txt naming convention) If no files exist, return the standard filename Otherwise return the file that includes all the required features with the fewest extra features
|
def get_best_file_match(required_features, base_dir):
best_match = join_items(remove_modifiers(required_features), sort=True)
required = set(split_items(best_match))
best_match = best_match+".txt"
fe = file_exists(os.path.join(base_dir, best_match), check_zip=True)
if fe:
return os.path.split(fe)[1]
options = get_all_text_files(base_dir, keep_extension=True)
if not options:
return best_match
extra_features = len(required_features) * 10
for o in options:
opt = split_ext(o)[0]
match = match_items(required, opt)
if match > 0 and match < extra_features:
extra_features = match
best_match = o
return best_match
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def find_best_features(year, features, sex, age, heavy):\r\n print 'find_best_features(year=%d,features=%s,sex=%s,age=%s,heavy=%s)' % (year, features, sex,\r\n age, heavy)\r\n X, y, keys = getXy_by_features(year, features, sex, age)\r\n title = 'features=%s,sex=%s,age=%s,year=%d' % (features,sex,age,year) \r\n results, n_samples = select_features.get_most_predictive_feature_set(title, X, y, keys, heavy)\r\n return results, n_samples, keys",
"def list_example_files():\n candidate_fns = os.listdir(data_dir())\n exts = ('.bed', '.gff', '.gtf', '.bed.gz', '.bam', '.gff.gz')\n valid_fns = [f for f in candidate_fns if f.endswith(exts)]\n return sorted(valid_fns)",
"def get_result_filenames(self,directory):\n return [os.path.join(directory,name) for name in os.listdir(directory)\n if os.path.isfile(os.path.join(directory,name)) and\n os.path.splitext(name)[1].lower() == '.trf']",
"def features_from_folder(label_folder, audio_folder, output_folder):\n print('Listing label files from folder.')\n #scan labels folder\n labels_list = os.listdir(label_folder)\n label_files = []\n for filename in labels_list:\n #get its extension\n file_extension = filename.split('.')[-1]\n if file_extension != 'txt':\n continue\n #save to without its extension\n label_files.append(filename[:-4])\n\n print('Listing audio files from folder.')\n #scan audio folder\n audios_list = os.listdir(audio_folder)\n audio_files = []\n for filename in audios_list:\n #get its extension\n file_extension = filename.split('.')[-1]\n if file_extension != 'wav':\n continue\n #save to without its extension\n audio_files.append(filename[:-4])\n\n print('Removing files without matches')\n #use only the files with matching audio/label\n files_to_process = []\n for label_file in label_files:\n if label_file in audio_files:\n files_to_process.append(label_file)\n\n print('Processing each file...')\n i = 1\n class_count = {}\n total_f = len(files_to_process)\n #for each file\n for processing in files_to_process:\n print('File', str(i) + '/' + str(total_f))\n i += 1\n\n #\n label_file = os.path.join(label_folder, processing + \".txt\")\n audio_file = os.path.join(audio_folder, processing + \".wav\")\n\n #get the segments from the corresponding label file\n segments = get_segments(label_file)\n\n #\n total_s = len(segments)\n j = 1\n #for each segment\n for segment in segments:\n print('\\tSegment', str(j) + '/' + str(total_s), segment['class'])\n j += 1\n\n if class_count.get(segment['class']) is None:\n class_count[segment['class']] = 1\n else:\n class_count[segment['class']] += 1\n output_filename = segment['class']\n output_filename += '-' + format(class_count[segment['class']], '04d')\n output_filename = os.path.join(output_folder, output_filename)\n\n #get its features\n segment_features = features_from_label(audio_file, segment)\n\n #save it to a file\n fe.write_as_bin(output_filename, segment_features)",
"def get_best_kml_file(directory_name: str) -> fastkml.kml.KML:\n\t\n\treturn min(\n\t\tos.listdir(directory_name),\n\t\tkey=lambda filename: total_cost(get_kml_coordinates(get_kml_object(os.path.join(directory_name, filename)))),\n\t)",
"def _filter_files(file_dir: Union[str, Path], is_viya4: Optional[bool] = False) -> list:\n file_names = []\n file_names.extend(sorted(Path(file_dir).glob(\"*.json\")))\n if is_viya4:\n file_names.extend(sorted(Path(file_dir).glob(\"score_*.py\")))\n file_names.extend(sorted(Path(file_dir).glob(\"*.pickle\")))\n # Include H2O.ai MOJO files\n file_names.extend(sorted(Path(file_dir).glob(\"*.mojo\")))\n if file_names:\n return file_names\n else:\n raise FileNotFoundError(\n \"No valid model files were found in the provided file directory.\"\n )",
"def test_features_found_by_relative_path(self):\n # Given I have Romaine's core\n from tests.common import romaine\n core = romaine.Core()\n\n # When I locate features in tests/features\n results = core.locate_features('tests/features')\n\n # Then I see the list:\n # | path |\n # | tests/features/feature1 |\n # | tests/features/feature2 |\n # | tests/features/subdir/feature3 |\n self.assertEqual(\n sorted(results),\n [\n 'tests/features/feature1',\n 'tests/features/feature2',\n 'tests/features/subdir/feature3',\n ]\n )",
"def parse_feature_files(paths: list[str], **kwargs: Any) -> tuple[list[Feature], list[ScenarioTemplate], list[Step]]:\n features = get_features(paths, **kwargs)\n scenarios = sorted(\n itertools.chain.from_iterable(feature.scenarios.values() for feature in features),\n key=lambda scenario: (scenario.feature.name or scenario.feature.filename, scenario.name),\n )\n steps = sorted((step for scenario in scenarios for step in scenario.steps), key=lambda step: step.name)\n return features, scenarios, steps",
"def get_file_via_steps(cls, src_path, steps, file_extention, regex):\n res = None\n func = cls.get_pattern\n files_iterator = glob.iglob(\n os.path.join(src_path, f'*.{file_extention}'))\n sorted_file_names = sorted(\n [(func(file, regex, 2), file) for file in files_iterator])\n for samples_seen, file in sorted_file_names:\n if samples_seen >= steps:\n res = file\n steps = samples_seen\n break\n logger.warning(\"couldnt find files for the specified number of steps,\"\n \"loading the latest files instead\")\n\n return res, steps",
"def get_feature_labels_files(dataset):\n features = []\n audio_labels = []\n focal_labels = []\n files = []\n for frame in dataset:\n files.append(frame[0])\n features.append(frame[1][0].T)\n if frame[1][1] is not None:\n audio_labels.append(frame[1][1][0].T)\n focal_labels.append(frame[1][1][1].T)\n else:\n audio_labels.append(None)\n focal_labels.append(None)\n features = np.expand_dims(np.asarray(features), 4)\n audio_labels = np.asarray(audio_labels)\n focal_labels = np.asarray(focal_labels)\n return [features, audio_labels,focal_labels, files]",
"def gather_required_files(filename):\n # open the file, while ignoring encoding errors (usually comments)\n encoding = open_guess_encoding(filename)\n with open(filename, encoding=encoding, errors='surrogateescape') as fp:\n config = MugenParser()\n config.read_string(fp.read())\n\n # go through each section and store any options that look like filenames\n required = set()\n for section in config.sections():\n section = config[section]\n options = set(find_asset(normpath(v)) for k, v in section.items()\n if filename_regex.match(v))\n required.update(options)\n\n # check other def files, then search them and add the results\n root = dirname(filename)\n for child_file in required.copy():\n name, ext = os.path.splitext(child_file)\n if ext.lower() == '.def':\n path = join(root, child_file)\n required.update(gather_required_files(path))\n\n # TODO: this is not implemented\n # mugen does checking against many paths, so we need\n # to emulate that the if we want to check for missing files\n # finally, go through the potential files and verify they exist\n # for child_file in required.copy():\n # path = join(root, child_file)\n # if not os.path.exists(path):\n # required.remove(child_file)\n\n return required",
"def find_local_file(files, traj_num, train_or_val):\n least_num = np.Inf\n for f in files:\n name_list = re.split(\"[-.]\", f)\n if train_or_val in name_list:\n for tmp in name_list:\n if tmp.isdigit():\n num = int(tmp)\n if traj_num <= num < least_num:\n least_num = num\n return least_num",
"def clfFeature(feature, mode):\r\n \r\n feature_path = 'C:\\\\Users\\\\Tom\\\\Documents\\\\Informatiekunde\\\\Thesis\\\\features\\\\' + feature + '.txt'\r\n classlist = ['negative', 'positive']\r\n features = pd.DataFrame()\r\n\r\n for label in classlist:\r\n path = 'C:\\\\Users\\\\Tom\\\\Documents\\\\Informatiekunde\\\\Thesis\\\\data\\\\' + mode + '\\\\' + label + '\\\\'\r\n allFiles = glob.glob(path + \"*.txt\")\r\n for review in allFiles:\r\n title = review.strip('.txt').split('\\\\')[-1]\r\n file = open(review, 'r', encoding='utf8').read().lower()\r\n wordlist = []\r\n featreader = csv.reader(open(feature_path, 'r'), delimiter= '\\n')\r\n for word in featreader:\r\n if word[0] in file:\r\n wordlist.append(word[0])\r\n df = pd.DataFrame({'File': [title], feature.capitalize(): [', '.join(wordlist)]}).set_index('File')\r\n features = features.append(df)\r\n \r\n return features",
"def get_fnames_predict(path):\n\n SUPPORTED_EXTENSIONS = ['png', 'PNG', 'jpg', 'JPG', 'jpeg', 'JPEG']\n\n fnames = []\n for se in SUPPORTED_EXTENSIONS:\n fnames.extend(sorted(glob.glob(join(path, '*.' + se), recursive=True)))\n return fnames",
"def find_many_files(\n top_directory_name, first_date_string, last_date_string,\n prefer_zipped=True, allow_other_format=True, radar_number=None,\n raise_error_if_all_missing=True, raise_error_if_any_missing=False,\n test_mode=False):\n\n error_checking.assert_is_boolean(raise_error_if_any_missing)\n error_checking.assert_is_boolean(raise_error_if_all_missing)\n error_checking.assert_is_boolean(test_mode)\n\n valid_date_strings = time_conversion.get_spc_dates_in_range(\n first_date_string, last_date_string\n )\n\n prediction_file_names = []\n\n for this_date_string in valid_date_strings:\n this_file_name = find_file(\n top_directory_name=top_directory_name,\n valid_date_string=this_date_string,\n prefer_zipped=prefer_zipped, allow_other_format=allow_other_format,\n radar_number=radar_number,\n raise_error_if_missing=raise_error_if_any_missing\n )\n\n if test_mode or os.path.isfile(this_file_name):\n prediction_file_names.append(this_file_name)\n\n if raise_error_if_all_missing and len(prediction_file_names) == 0:\n error_string = (\n 'Cannot find any file in directory \"{0:s}\" from dates {1:s} to '\n '{2:s}.'\n ).format(\n top_directory_name, first_date_string, last_date_string\n )\n raise ValueError(error_string)\n\n return prediction_file_names",
"def get_feature_paths(start_dir, extensions = ['dcm']):\n if start_dir is None:\n start_dir = os.getcwd()\n img_paths = []\n for roots,dirs,files in os.walk(start_dir):\n for name in files:\n for e in extensions:\n if name.endswith('.' + e):\n img_paths.append(roots + '/' + name)\n img_paths.sort()\n return img_paths",
"def load_feature(feature_name, caf_dose, features_path):\n # gets the paths to the folders where the specified feature is stored\n subject_paths = glob.glob(os.path.join(features_path, \"*\", feature_name))\n\n feature = {}\n for path in subject_paths:\n # extract the subject id from the current path (second to last element in the path)\n subject_id = path.split(os.sep)[-2]\n\n # get all stages for the current subject\n stages = set(\n [\n p.split(os.sep)[-1].split(\"_\")[-1].split(\".\")[0]\n for p in glob.glob(os.path.join(path, \"*.npy\"))\n ]\n )\n if len(stages) == 0:\n print(\n f\"The following directory doesn't contain features: {path}. \"\n \"This will likely cause an error down the line\"\n )\n for stage in stages:\n if stage not in feature:\n feature[stage] = {}\n # load the file containing the data for the current stage and subject\n feature[stage][subject_id] = np.load(\n os.path.join(path, f\"{feature_name}_{stage}.npy\"), allow_pickle=True\n )\n return feature",
"def get_input_files(dir_path):\n return [os.path.join(dir_path,f) for f in os.listdir(dir_path)\n if os.path.isfile(os.path.join(dir_path,f))]",
"def get_all_filenames_from_dir(directory,suffex, filename_allowed_list = None):\n\n files_list = list()\n if filename_allowed_list == None:\n for item in glob.glob(directory+'*'+suffex): # Example /datasets/Stock_dataset/Stocks/*.txt\n files_list.append(item) \n else:\n filename_allowed_list = [v.lower() for v in filename_allowed_list] # To avoid case sensitve\n for item in glob.glob(directory+'*'+suffex):\n if item.split(\"/\")[-1].split('.')[0].lower() in filename_allowed_list: # Since linux is case sensitive, then so is this function, make sure the names match correctly\n files_list.append(item)\n if not len(files_list) == len(filename_allowed_list):\n print 'Some Stocks files are missing'\n return files_list",
"def generate_features(headerfile_path, zipfile_path, features_to_use,\n custom_script_path, is_test, already_featurized,\n in_docker_container):\n if already_featurized:\n # Read in features from CSV file\n objects = parse_prefeaturized_csv_data(headerfile_path)\n else:\n # Parse header file\n (features_to_use, fname_class_dict, fname_class_science_features_dict,\n fname_metadata_dict) = parse_headerfile(headerfile_path,\n features_to_use)\n input_params_list = generate_featurize_input_params_list(\n features_to_use, fname_class_dict,\n fname_class_science_features_dict, fname_metadata_dict,\n zipfile_path, custom_script_path, is_test)\n # TO-DO: Determine number of cores in cluster:\n res = featurize_celery_task.chunks(input_params_list,\n cfg.N_CORES).delay()\n res_list = res.get(timeout=100)\n objects = []\n for line in res_list:\n for el in line:\n short_fname, new_feats = el\n if short_fname in fname_metadata_dict:\n all_features = dict(\n list(new_feats.items()) +\n list(fname_metadata_dict[short_fname].items()))\n else:\n all_features = new_feats\n objects.append(all_features)\n return objects",
"def featurefile(request):\n featurename = request.param[0]\n return os.path.join(__FEATURE_FILES_DIR__, featurename + \".feature\")",
"def _find_bam(bam_files, sample):\n score = 0\n candidate = None\n for fn in bam_files:\n sc = sum(a == b for a, b in zip(op.basename(sample), op.basename(fn)))\n if sc > score:\n score = sc\n candidate = fn\n return candidate",
"def categorize_classifier_files(out_dir):\n\n #sort all of the classifier files into a dictionary\n class_files = glob.glob(\"feature_extraction_m*\")\n class_file_dict = {\"positive\":[], \"negative\":[]}\n class_cand_dict = {\"m1\":class_file_dict, \"m2\":class_file_dict, \"m3\":class_file_dict, \"m4\":class_file_dict, \"m5\":class_file_dict}\n\n for filename in class_files:\n split_name = filename.split(\"_\")[-1].split(\".\")\n model_num = split_name[0]\n det = split_name[-1]\n class_cand_dict[model_num][det].append(filename)\n\n #get all of the pfd files into a list\n class_file_m1 = glob.glob(\"feature_extraction_m1*\")\n pfd_files = []\n for afile in class_file_m1:\n f = open(afile, \"r\")\n for line in f.readlines():\n pfd_files.append(line)\n f.close()\n\n #fill a dictionary with pfds and a value for how many positive IDs each pfd has\n pulsar_pfds={}\n for key in pfd_files:\n pulsar_pfds[key]=0\n for model_num in class_cand_dict.keys():\n if class_cand_dict[model_num][\"positive\"]:\n print(class_cand_dict[model_num][\"positive\"])\n f = open(class_cand_dict[model_num][\"positive\"][0], \"r\")\n for line in f.readlines():\n pulsar_pfds[line]+=1\n f.close()\n\n #For each pfd with >=3 positive IDs, write that pfd to 'positive' file, else write to 'negative' file\n pos_f = open(os.path.join(out_dir, \"LOTAAS_positive_detections.txt\"), \"w+\")\n neg_f = open(os.path.join(out_dir, \"LOTAAS_negative_detections.txt\"), \"w+\")\n for pfd_key in pulsar_pfds.keys():\n if pulsar_pfds[pfd_key]>=3:\n print(\"detected pulsar: {}\".format(pfd_key))\n pos_f.write(pfd_key.split(\"/\")[-1])\n else:\n neg_f.write(pfd_key.split(\"/\")[-1])\n pos_f.close()\n neg_f.close()",
"def find_feature_titles_in_file(feature_index, feature_names, file):\n\n dict_of_features_in_this_file = {}\n for feature_name, feature_titles in feature_names.items():\n try:\n features_found = [feature for feature in feature_titles if feature in feature_index]\n if len(features_found) == 1:\n dict_of_features_in_this_file[feature_name] = features_found[0]\n else:\n raise FeatureNotFoundError\n\n except FeatureNotFoundError:\n sys.exit(\n 'ERROR: Finding zero or more than one occurrence of feature {} in the header of input file'\n 'file {}! Please check variable feature_names in the function main().'\n 'Running the code is terminated.'.format(feature_titles, file))\n return dict_of_features_in_this_file",
"def _findFiles(self, inputfolder):\n protofile, caffemodel = None, None\n files = os.listdir(inputfolder)\n for f in files:\n name, ext = splitext(f)\n if ext == '.caffemodel':\n caffemodel = join(inputfolder, f)\n elif f == 'deploy.prototxt':\n protofile = join(inputfolder, f)\n return protofile, caffemodel",
"def get_html_files_for_candidates(f_name):\n candidates = ['Joe Biden','Kamala Harris','Elizabeth Warren','Bernie Sanders']\n candidates_files = ['{}.html'.format(candidate) for candidate in candidates]\n results = []\n files = sorted([os.path.join(f_name, f) for f in os.listdir(f_name) if os.path.isfile(os.path.join(f_name, f))]) #extra check if is file unnecessary\n for file in files:\n for c in candidates_files:\n if c in file:\n results.append(file)\n print(results)\n return results",
"def _find_files(directory, dirs_to_look_in, files_to_search_for, \n current_dir, see_files):\n full_name = True\n if see_files:\n full_name = False\n files_to_load = search_directory(directory, \n look_in=dirs_to_look_in,\n search_for=files_to_search_for,\n file_type='files',\n current_dir=current_dir,\n full_name=full_name)\n if not files_to_load:\n raise UserWarning('No files were found matching the search for %s'\\\n ' in the directory(s) %s%s' \\\n % (files_to_search_for, directory, \n dirs_to_look_in))\n return files_to_load",
"def write_feature_weight2(weights=None, features=None, lambda1s=None, accuracy=None, uniqueness=False, tol=1e-4, filename='selected_features.txt',many_features=False):\n weights=np.asarray(weights,dtype=float)\n lambda1s=np.asarray(lambda1s,dtype=float)\n num_selected=np.zeros(len(lambda1s),dtype=int) # for each lambda, save the number of selected features\n features_selected=np.zeros(len(lambda1s),dtype=object) \n # get the numbers of selected features\n for i in range(len(lambda1s)):\n w=weights[i]\n w_max=np.max(abs(w))\n w_min=np.min(abs(w))\n if tol*w_max<=w_min: # there is no element that is much larger: either none selected, or select all\n continue\n selected=(abs(w)>tol*w_max)\n #selected=(abs(w)>tol)\n num_selected[i]=selected.sum()\n feat_selected=features[selected]\n w_selected=w[selected]\n ind=np.argsort(abs(w_selected))\n ind=ind[::-1]\n feat_selected=feat_selected[ind]\n features_selected[i]=','.join(feat_selected)\n \n # take the first non-zeros\n if uniqueness:\n if accuracy is not None:\n _,_,take=take_max(num_selected,accuracy)\n else:\n take=take_first(num_selected)\n else:\n take=np.ones(len(num_selected),dtype=bool)\n weights_take=weights[take]\n lambda1s_take=lambda1s[take]\n lambda1s_take.resize((lambda1s_take.shape[0],1))\n lambda1s_take.round(decimals=6)\n features_take=features_selected[take]\n features_take.resize((features_take.shape[0],1))\n num_take=num_selected[take]\n # if no subset is selected\n if num_take.shape[0]==0:\n return None \n # if the last one is zero, then it means that all features are selected\n if num_take.shape[0]>1 and num_take[-1]==0 and num_take[-2]>0:\n num_take[-1]=len(features)\n features_take[-1]=','.join(features) \n num_take.resize((num_take.shape[0],1))\n \n if accuracy is not None:\n accuracy=np.asarray(accuracy,dtype=float)\n accuracy_take=accuracy[take]\n accuracy_take.resize((accuracy_take.shape[0],1))\n accuracy_take.round(decimals=4)\n features=np.insert(features,0,['lambda','accuracy','num_selected','feature_subset'])\n features.resize((1,features.shape[0]))\n if not many_features:\n data=np.hstack((lambda1s_take,accuracy_take, num_take,features_take,weights_take))\n data=np.vstack((features,data))\n else:\n header=np.array(['lambda','accuracy','num_selected'])\n header.resize((1,header.shape[0]))\n data=np.hstack((lambda1s_take,accuracy_take, num_take))\n data=np.vstack((header,data))\n else:\n if not many_features:\n features=np.insert(features,0,['lambda','num_selected','feature_subset'])\n features.resize((1,features.shape[0]))\n data=np.hstack((lambda1s_take,num_take,features_take,weights_take))\n data=np.vstack((features,data))\n else:\n header=np.array(['lambda','num_selected'])\n header.resize((1,header.shape[0]))\n data=np.hstack((lambda1s_take, num_take))\n data=np.vstack((header,data))\n\n np.savetxt(filename,data,fmt='%s',delimiter='\\t')",
"def get_testcases(input, output):\n input_files = set(os.listdir(input))\n output_files = set(os.listdir(output))\n common_files = sorted(\n list(input_files & output_files), key=lambda x: os.path.basename(x)\n )\n return common_files",
"def get_youngest_input():\n\n input_dirs = [project.polygons_folder, project.raw_elevation_folder]\n input_files = [project.urs_order_file,\n os.path.join(project.boundaries_folder,\n '%s.sts' % project.sts_filestem),\n project.landward_boundary_file]\n\n youngest = 0.0\t# time at epoch start\n\n # check all files in given directories\n for d in input_dirs:\n with os.popen('ls -l %s' % d) as fd:\n lines = fd.readlines()\n\n for fname in glob.glob(os.path.join(d, '*')):\n mtime = os.path.getmtime(fname)\n youngest = max(mtime, youngest)\n\n # check individual files\n for fname in input_files:\n mtime = os.path.getmtime(fname)\n youngest = max(mtime, youngest)\n\n return youngest"
] |
[
"0.5663445",
"0.55437386",
"0.54259026",
"0.5389287",
"0.5342964",
"0.5334897",
"0.5313537",
"0.53124535",
"0.5311146",
"0.5280167",
"0.5265178",
"0.52467936",
"0.52369076",
"0.52361584",
"0.5224449",
"0.522216",
"0.5216392",
"0.52127874",
"0.5179615",
"0.5176138",
"0.5149295",
"0.51409334",
"0.5126694",
"0.5116908",
"0.51017207",
"0.50965655",
"0.5095995",
"0.50922936",
"0.5090958",
"0.50862694"
] |
0.8017204
|
0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.