query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Reinvite an already invited user.
def reinvite_user(self, user, email): if self.is_moderator and self.has_perm('accounts.invite_user'): # Reset email, set a new token and update decision datetime user.email = email user.auth_token = generate_unique_id() user.decision_datetime = timezone.now() user.save() return user else: raise PermissionDenied
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def invite_user(request):\r\n params = request.params\r\n\r\n email = params.get('email', None)\r\n user = request.user\r\n\r\n if not email:\r\n # try to get it from the json body\r\n email = request.json_body.get('email', None)\r\n\r\n if not email:\r\n # if still no email, I give up!\r\n request.response.status_int = 406\r\n return _api_response(request, {\r\n 'username': user.username,\r\n 'error': \"Please submit an email address\"\r\n })\r\n\r\n email = email.lower()\r\n # first see if the user is already in the system\r\n exists = UserMgr.get(email=email.lower())\r\n if exists:\r\n request.response.status_int = 406\r\n return _api_response(request, {\r\n 'username': exists.username,\r\n 'error': \"This user is already a Bookie user!\"\r\n })\r\n\r\n new_user = user.invite(email.lower())\r\n if new_user:\r\n LOG.debug(new_user.username)\r\n # then this user is able to invite someone\r\n # log it\r\n AuthLog.reactivate(new_user.username)\r\n\r\n # and then send an email notification\r\n # @todo the email side of things\r\n settings = request.registry.settings\r\n msg = ActivationMsg(new_user.email,\r\n \"Enable your Bookie account\",\r\n settings)\r\n\r\n msg.send(\r\n request.route_url(\r\n 'reset',\r\n username=new_user.username,\r\n reset_key=new_user.activation.code))\r\n return _api_response(request, {\r\n 'message': 'You have invited: ' + new_user.email\r\n })\r\n else:\r\n # you have no invites\r\n request.response.status_int = 406\r\n return _api_response(request, {\r\n 'username': user.username,\r\n 'error': \"You have no invites left at this time.\"\r\n })", "def invite(self,roomName,user):\n\n self.sendCommand(roomName +\" /invite\",user)", "def invite_user(request):\n moderator = request.user\n site = get_current_site(request)\n\n invitation_form = InviteMemberForm(request.POST)\n\n if invitation_form.is_valid():\n\n # Invite user\n full_name = invitation_form.cleaned_data['full_name']\n email = invitation_form.cleaned_data['email']\n new_user = moderator.invite_new_user(email, full_name)\n\n # Log moderation event\n msg_type = ModerationLogMsg.INVITATION\n log_comment = _('{} invited {}'.format(moderator.get_full_name(),\n new_user.get_full_name()))\n log_moderator_event(msg_type=msg_type,\n user=new_user,\n moderator=moderator,\n comment=log_comment)\n\n # Send email\n subject = _('Welcome to {}'.format(site.name))\n template = 'moderation/emails/invite_new_user.html'\n token = new_user.auth_token\n url = request.build_absolute_uri(\n reverse('accounts:activate-account', args=[token]))\n send_connect_email(subject=subject,\n template=template,\n recipient=new_user,\n sender=moderator,\n site=site,\n url=url)\n\n messages.success(request, _('{} has been invited to {}.'.format(\n new_user.get_full_name(), site.name)))\n\n return redirect('moderation:moderators')\n\n else:\n return moderation_home(request, invitation_form=invitation_form)", "def invite_user(session, invitee):\n session.invite_event.clear()\n key = b64encode(messaging.common.pkc_encrypt(\n session.get_channel_key(), session.get_encryption_cert(invitee))).decode()\n msg = {\n kk.typ: kk.add_user,\n kk.inviter: session.user,\n kk.invitee: invitee,\n kk.chid: session.chan,\n kk.chkey: key,\n }\n msg[kk.signature] = b64encode(\n messaging.common.create_msg_sig(session, msg)).decode()\n messaging.common.send_msg(session.sock, msg, key=session.symkey)", "def test_user_invite_cant_edit_users_existing_user(self):\n project = fake_clients.FakeProject(name=\"test_project\")\n\n user = fake_clients.FakeUser(name=\"[email protected]\")\n\n setup_identity_cache(projects=[project], users=[user])\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"user\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"username\": \"new_user\",\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})", "def invite(self, invite):\n\n self._invite = invite", "def invite_user():\n\n form = InviteUserForm()\n if form.validate_on_submit():\n invited_by = db.session.query(User).filter_by(id=current_user.id).first()\n user = User(\n invited_by=invited_by.full_name,\n first_name=form.first_name.data,\n last_name=form.last_name.data,\n email=form.email.data)\n db.session.add(user)\n db.session.commit()\n token = user.generate_confirmation_token()\n invite_link = url_for(\n 'account.join_from_invite',\n user_id=user.id,\n token=token,\n _external=True)\n\n get_queue().enqueue(\n send_email,\n recipient=user.email,\n subject='You Are Invited To Join',\n template='account/email/invite',\n user=user.id,\n invited_by=invited_by,\n invite_link=invite_link,\n invite_by=invited_by\n )\n flash('User {} successfully invited'.format(user.full_name),\n 'form-success')\n return redirect(url_for('invite.index'))\n return render_template('invite/new_user.html', form=form)", "def invite(self):\n pass", "def revoke_invitation(request):\n site = get_current_site(request)\n\n revocation_form = RevokeInvitationForm(request.POST)\n\n if revocation_form.is_valid():\n\n user_id = revocation_form.cleaned_data['user_id']\n user = get_object_or_404(User, id=user_id)\n\n if user.is_invited_pending_activation \\\n and user.moderator == request.user:\n messages.success(request, _(\n '{} has been uninvited from {}.'.format(\n user.get_full_name(), site.name)))\n\n # Delete the user rather than deactivate it.\n # Removing the email address from the system altogether means\n # that the same email can later be used to create a new account\n # (e.g. if the user applies or is invited by another moderator).\n # Logs related to this user are also removed,\n # resulting in less junk to filter in that view.\n user.delete()\n else:\n raise PermissionDenied\n\n return redirect('moderation:moderators')\n\n else:\n return moderation_home(request, revocation_form=revocation_form)", "def test_user_invite_cant_edit_users(self):\n project = fake_clients.FakeProject(name=\"test_project\")\n\n setup_identity_cache(projects=[project])\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"user\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"username\": \"new_user\",\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.json(), {\"errors\": [\"actions invalid\"]})", "def test_invited(self) -> None:\n\n self._perform_background_initial_update()\n\n u1 = self.register_user(\"u1\", \"pass\")\n u1token = self.login(\"u1\", \"pass\")\n r1 = self.helper.create_room_as(u1, tok=u1token)\n\n u2 = self.register_user(\"u2\", \"pass\")\n\n r1stats_ante = self._get_current_stats(\"room\", r1)\n assert r1stats_ante is not None\n\n self.helper.invite(r1, u1, u2, tok=u1token)\n\n r1stats_post = self._get_current_stats(\"room\", r1)\n assert r1stats_post is not None\n\n self.assertEqual(\n r1stats_post[\"current_state_events\"] - r1stats_ante[\"current_state_events\"],\n 1,\n )\n self.assertEqual(\n r1stats_post[\"invited_members\"] - r1stats_ante[\"invited_members\"], +1\n )", "def remove_invite(self, redditor: str | praw.models.Redditor):\n data = {\"name\": str(redditor), \"type\": \"moderator_invite\"}\n url = API_PATH[\"unfriend\"].format(subreddit=self.subreddit)\n self.subreddit._reddit.post(url, data=data)", "def _send_existing_agent_user_invite(self):\n standard_invite = self.instance\n try:\n agent_invite = AgentUserInvite.objects.get(\n agent=self._agent_user, organisation=standard_invite.organisation\n )\n except AgentUserInvite.DoesNotExist:\n agent_invite = AgentUserInvite(\n agent=self._agent_user, organisation=standard_invite.organisation\n )\n\n agent_invite.inviter = standard_invite.inviter\n agent_invite.status = AgentUserInvite.PENDING\n agent_invite.save()\n agent_invite.send_confirmation()\n return standard_invite", "def resend_email(self, userdict):\n return self.post('resend', userdict)", "def accept_invite(self):\n url = API_PATH[\"accept_mod_invite\"].format(subreddit=self.subreddit)\n self.subreddit._reddit.post(url)", "def decline_invitation(self, user, group):\n if group.is_invited(user):\n group.remove_invitation(user)", "async def invite(self, ctx):\n await ctx.send(f\"**{ctx.author.name}**, use this URL to invite me\\n<{discord.utils.oauth_url(self.bot.user.id)}>\")", "async def invite(self, ctx):\n await ctx.send(f'🐱You can invite me to your server using the following url:\\n{self.invite_url}'\n '\\n\\nYou will need the **Manage Server** permission to add me to a server. '\n f'Run `{self.heleus.command_prefix[0]}help` to see what you can customise!')", "def invite_users(self, roomName, users, client):\n for room in self.rooms:\n if room.get_name() == roomName:\n if room.verify_owner(client.get_socket()):\n for user in users:\n room.invite_member(user)\n if client.has_name(client):\n name = client.get_name()\n else:\n name = client.get_ip()\n self.send_message('Has sido invidado a la sala {} por '\n 'parte de {}.'.format(roomName, name), user)\n self.send_message('Todos han sido invitados.', client.get_socket())\n else:\n self.send_message('No eres dueno de la sala.', client.get_socket())", "def put(self, id):\n payload = marshal(api.payload, invite_user)\n taskroom_service.invite_user(id, payload['email'])\n return {'Message': \"User Added to the Task Room\"}", "def test_resend_delegate(self):\n self.invite.role = self.role_delegate\n self.invite.save()\n url = reverse(\n 'projectroles:api_invite_resend',\n kwargs={'projectinvite': self.invite.sodar_uuid},\n )\n response = self.request_knox(url, method='POST')\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(len(mail.outbox), 1)", "def test_revoke_delegate(self):\n self.invite.role = self.role_delegate\n self.invite.save()\n url = reverse(\n 'projectroles:api_invite_revoke',\n kwargs={'projectinvite': self.invite.sodar_uuid},\n )\n response = self.request_knox(url, method='POST')\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.invite.refresh_from_db()\n self.assertEqual(self.invite.active, False)", "def acceptInvite(self, user):\n invite = user if isinstance(user, MyPlexInvite) else self.pendingInvite(user, includeSent=False)\n params = {\n 'friend': int(invite.friend),\n 'home': int(invite.home),\n 'server': int(invite.server)\n }\n url = MyPlexInvite.REQUESTS + f'/{invite.id}' + utils.joinArgs(params)\n return self.query(url, self._session.put)", "async def invite(self, ctx):\n perms = discord.Permissions.text()\n perms.update(read_messages=True, manage_messages=True,\n mention_everyone=False, send_tts_messages=False)\n await ctx.send(f'Invite me here:\\n<{discord.utils.oauth_url(self.bot.user.id, perms)}>')", "def remind_users(self, request, pk=None):\n retreat = self.get_object()\n if not retreat.is_active:\n response_data = {\n 'detail': \"Retreat need to be activate to send emails.\"\n }\n return Response(response_data, status=status.HTTP_200_OK)\n\n # This is a hard-coded limitation to allow anonymous users to call\n # the function.\n time_limit = retreat.start_time - timedelta(days=8)\n if timezone.now() < time_limit:\n response_data = {\n 'detail': \"Retreat takes place in more than 8 days.\"\n }\n return Response(response_data, status=status.HTTP_200_OK)\n\n # Notify a user for every reserved seat\n emails = []\n for reservation in retreat.reservations.filter(\n is_active=True, pre_event_send=False):\n send_retreat_reminder_email(reservation.user, retreat)\n reservation.pre_event_send = True\n reservation.save()\n emails.append(reservation.user.email)\n\n response_data = {\n 'stop': True,\n 'emails': emails\n }\n return Response(response_data, status=status.HTTP_200_OK)", "def invitation(id):\n invitation = get_required(Invitation, id)\n if g.user == invitation.inviter.user:\n flash(\"You can't send an invitation to yourself.\")\n return redirect(url_for('front'))\n if invitation.acceptor_member_id:\n flash(\"This invitation has already been used.\")\n return redirect(url_for('front'))\n clicked_invitation(invitation)\n db.session.commit()\n return redirect(invitation.circle.url)", "async def rep_user(self, ctx, *, user: discord.Member = None):\n if user and user.bot:\n return await ctx.send_line(\"😔 Sorry but I just can't do that.\")\n if user and user.id == ctx.author.id:\n return await ctx.send_line(\"🙂 Nice try but wouldn't that be unfair?\")\n author_profile = await self.cache.get_profile(ctx.author.id)\n if user is None:\n if author_profile.can_rep:\n res = \"👌 You can rep someone now.\"\n else:\n res = f\"⏳ You can rep again {author_profile.next_rep.humanize()}.\"\n return await ctx.send_line(res)\n\n if author_profile.can_rep:\n target_profile = await self.cache.get_profile(user.id)\n if not target_profile:\n res = self.plugin.data.responses.no_profile.format(user_name=user.name)\n return await ctx.send_line(res)\n await target_profile.rep(author_profile)\n res = f\"You added one reputation point to {user.name}.\"\n await ctx.send_line(res, ctx.author.avatar_url)\n else:\n res = f\"⏳ You can rep again {author_profile.next_rep.humanize()}.\"\n await ctx.send_line(res)", "async def invite(self, ctx):\n invite = f\"https://discordapp.com/api/oauth2/authorize?client_id={self.bot.user.id}&permissions=67584&scope=bot\"\n await ctx.send(embed=discord.Embed(\n color=discord.colour.Colour.teal(),\n description=f\":mailbox_with_mail: [Invite]({invite}) me to your server!\"))", "def upgradeFixInvitations():\n from base import get_group_database, get_user_database\n \n user_db = get_user_database()\n group_db = get_group_database()\n \n for user in user_db.root.values():\n for g in group_db.users_groups(user):\n g.remove_invitation(user)\n group_db.invitations.remove_all_user_invitations(user, g)", "async def resign(self, ctx):\n currency = await bank.get_currency_name(ctx.guild)\n await self.config.user(ctx.author).gameRole.set(\"User\")\n await ctx.send(\n f\"{ctx.author} has spent 10,000 {currency}- to resign from their current job.\"\n )", "def irc_INVITE(self, prefix, (user, channel)):\n self.join(channel)", "def cancelInvite(self, user):\n invite = user if isinstance(user, MyPlexInvite) else self.pendingInvite(user, includeReceived=False)\n params = {\n 'friend': int(invite.friend),\n 'home': int(invite.home),\n 'server': int(invite.server)\n }\n url = MyPlexInvite.REQUESTED + f'/{invite.id}' + utils.joinArgs(params)\n return self.query(url, self._session.delete)", "async def invite(self, ctx):\r\n myInvite = discord.utils.oauth_url(self.bot.user.id, permissions=discord.Permissions(permissions=8))\r\n await ctx.channel.send('Invite me to *your* server with this link: \\n\\n<{}>'.format(myInvite))", "def testInviteCreatesUser(self):\r\n me = User()\r\n me.username = u'me'\r\n me.email = u'me.com'\r\n me.invite_ct = 2\r\n you = me.invite(u'you.com')\r\n\r\n self.assertEqual(\r\n 'you.com',\r\n you.username,\r\n 'The email should be the username')\r\n self.assertEqual(\r\n 'you.com',\r\n you.email,\r\n 'The email should be the email')\r\n self.assertTrue(\r\n len(you.api_key),\r\n 'The api key should be generated for the user')\r\n self.assertFalse(\r\n you.activated,\r\n 'The new user should not be activated')\r\n self.assertEqual(\r\n 1,\r\n me.invite_ct,\r\n 'My invite count should be deprecated')", "def invite_new_user(self, email, full_name):\n User = get_user_model()\n\n if self.is_moderator and self.has_perm('accounts.invite_user'):\n try:\n User.objects.get(email=email)\n except User.DoesNotExist:\n new_user = create_inactive_user(email, full_name)\n new_user.registration_method = new_user.INVITED\n new_user.moderator = self\n new_user.moderator_decision = new_user.PRE_APPROVED\n new_user.decision_datetime = timezone.now()\n new_user.auth_token = generate_unique_id()\n new_user.save()\n return new_user\n else:\n return None\n else:\n raise PermissionDenied", "def accept_invite_requests(invite_requests):\n for invite_request in invite_requests:\n accepting_user = invite_request.to_facebook_user.profile.user\n graph = accepting_user.profile.get_offline_graph()\n facebook = FacebookUserConverter(graph)\n # Delete the request\n facebook.delete_request(invite_request)\n logger.info('Invite request deleted')", "def accept_invitation(self, invitation_id):\n\n url = self.__url('/user/repository_invitations/', invitation_id)\n return self.__patch(url)", "def accounts_invites_add(request):\r\n rdict = request.matchdict\r\n username = rdict.get('username', None)\r\n if username:\r\n username = username.lower()\r\n count = rdict.get('count', None)\r\n\r\n if username is not None and count is not None:\r\n user = UserMgr.get(username=username)\r\n\r\n if user:\r\n user.invite_ct = count\r\n return _api_response(request, dict(user))\r\n else:\r\n request.response.status_int = 404\r\n ret = {'error': \"Invalid user account.\"}\r\n return _api_response(request, ret)\r\n else:\r\n request.response.status_int = 400\r\n ret = {'error': \"Bad request, missing parameters\"}\r\n return _api_response(request, ret)", "def invite(self, eventid, aid, uid):\n\n u_id = EventId()\n u_id.setHashed(uid)\n\n a_id = EventId()\n a_id.setHashed(aid)\n\n e_id = EventId()\n e_id.setHashed(eventid)\n\n event = Event.getById(e_id)\n admin = User(id=a_id)\n\n if not event.authorized(admin):\n raise EventError(EventError.NO_ADMIN)\n\n user = User.getById(u_id)\n\n invitation = Invitation(user=user, event=event)\n invitation.create()\n\n return", "def post(self, request, format=None, user=None, token=None):\n logger.info(\"Creating invitation\")\n # Validate Invitation request data\n serializer = CreateInvitationSerializer(data=request.data)\n if not serializer.is_valid():\n logger.warning(f\"Unable to validate invitation request : {serializer.errors}\")\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n # Retrieve invited user from IAM\n invited_user = ExternalUsers.get_by_email(\n token,\n serializer.validated_data['email']\n )\n if not invited_user:\n logger.warning(\"Unable to retrieve invited user\")\n return Response(f\"Unable to retrieve invited user ({serializer.validated_data['email']})\", status=status.HTTP_404_NOT_FOUND)\n\n # User cannot invite himself\n if user.id == invited_user.id:\n logger.warning(f\"{user.id} sent an invitation to itself\")\n return Response(\"You can't invite yourself\", status=status.HTTP_400_BAD_REQUEST)\n\n # Save the invitation\n serializer = InvitationSerializer(data={\n 'workspace': serializer.validated_data[\"workspace\"].id,\n 'sender': user.email,\n 'user_id': invited_user.id,\n 'status': InvitationStatus.PENDING.name\n })\n if not serializer.is_valid():\n logger.warning(f\"Unable to save invitation : {serializer.errors}\")\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n invitation = serializer.save()\n\n # Send email to the invited user\n ExternalMail.send(\n to=invited_user.email,\n template_id=\"d-45db8f85eeaf43e9944db49a5777d9f7\",\n template_data={ 'url': 'https://app.worko.tech/#workspace' }\n )\n\n # Build data that will be send\n result = InvitationSerializer(invitation).data\n\n # Notify user that it has been invited\n ExternalNotify.send(\n f\"user {invited_user.id}\",\n 'invitation recieved',\n result\n )\n return Response(result, status=status.HTTP_201_CREATED)", "def testUsernameAlreadyThere(self):\r\n email = '[email protected]'\r\n new_user = UserMgr.signup_user(email, u'invite')\r\n DBSession.add(new_user)\r\n\r\n transaction.commit()\r\n\r\n user = DBSession.query(User).filter(User.username == email).one()\r\n\r\n url = quote('/{0}/reset/{1}'.format(\r\n user.email,\r\n user.activation.code\r\n ))\r\n\r\n res = self.app.post(\r\n url,\r\n params={\r\n 'password': u'testing',\r\n 'username': user.username,\r\n 'code': user.activation.code,\r\n 'new_username': u'admin',\r\n })\r\n self.assertIn('Username already', res.body)", "def test_resend_inactive(self):\n self.invite.active = False\n self.invite.save()\n url = reverse(\n 'projectroles:api_invite_resend',\n kwargs={'projectinvite': self.invite.sodar_uuid},\n )\n response = self.request_knox(url, method='POST')\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(len(mail.outbox), 0)", "def send_user_invitation(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"send_user_invitation\")", "def test_update_invitation_as_retried(session): # pylint:disable=unused-argument\n invitation = factory_invitation_model(session=session, status='FAILED')\n session.add(invitation)\n session.commit()\n invitation.update_invitation_as_retried()\n assert invitation\n assert invitation.invitation_status_code == 'PENDING'", "async def invite(self, ctx):\n embed = discord.Embed(title=\"Invite\", description=f\"**{ctx.author.name}**, use this URL to invite me\\n[link](https://discord.com/oauth2/authorize?client_id=749629426777456691&permissions=8&scope=bot)\", color=0xeff0f1)\n await ctx.send(embed=embed)", "def invite(self, mess, args):\n user = self.get_sender_username(mess)\n if user in self.users:\n self.send(args, '%s invited you to join %s. Say \",help\" to see how to join.' % (user, CHANNEL))\n self.invited['%s@%s' %(xmpp.JID(args).getNode(), xmpp.JID(args).getDomain())] = ''\n self.log.info( '%s invited %s.' % (user, args))\n self.save_state()\n self.message_queue.append('_%s invited %s_' % (self.users[user], args))", "def invite(self, target, channel):\n self.send_line('INVITE %s %s' % (target, channel))", "def invite(id, adminId, userId):\n db = core.connect();\n permission.create({\n \"streamId\": id,\n \"createdBy\": adminId,\n \"userId\": userId,\n \"level\": 0\n })\n event.create({\n \"createdBy\": userId,\n \"streamId\": user.messageStream(userId),\n \"displayString\": \"%s has invited you to the %s %s\" % (user.nameForId(adminId), meta(id), displayName(id)),\n \"unread\": True\n })", "def reject(self):\n self.skype.conn(\"PUT\", \"{0}/users/{1}/invites/8:{2}/decline\"\n .format(SkypeConnection.API_CONTACTS, self.skype.userId, self.userId),\n auth=SkypeConnection.Auth.SkypeToken)", "async def invite(self, ctx):\n lang = getLang(ctx.message.guild.id)\n\n with open(f\"embeds/{lang}/inviting.json\", \"r\") as f:\n inviting = json.load(f)\n\n await ctx.reply(embed=discord.Embed.from_dict(inviting[0]), components=[\n ActionRow(\n Button(label=inviting[1],\n url=\"https://discord.com/api/oauth2/authorize?client_id=878533674042294292&permissions=537259248&scope=bot\",\n style=ButtonStyle.url\n ),\n Button(label=inviting[2],\n url=\"https://discord.com/api/oauth2/authorize?client_id=878533674042294292&permissions=8&scope=bot\",\n style=ButtonStyle.url\n )\n )\n ], mention_author=False, delete_after=20)", "def accounts_invites(request):\r\n user_list = UserMgr.get_list()\r\n ret = {\r\n 'users': [(u.username, u.invite_ct) for u in user_list],\r\n }\r\n return _api_response(request, ret)", "async def invite(self, context: Context) -> None:\n embed = discord.Embed(\n description=f\"Invite me by clicking [here](https://discordapp.com/oauth2/authorize?&client_id={self.bot.config['application_id']}&scope=bot+applications.commands&permissions={self.bot.config['permissions']}).\",\n color=0xD75BF4,\n )\n try:\n # To know what permissions to give to your bot, please see here: https://discordapi.com/permissions.html and remember to not give Administrator permissions.\n await context.author.send(embed=embed)\n await context.send(\"I sent you a private message!\")\n except discord.Forbidden:\n await context.send(embed=embed)", "def test_invite_user_to_project_email_not_username(self):\n project = fake_clients.FakeProject(name=\"test_project\")\n\n setup_identity_cache(projects=[project])\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"user\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"username\": \"new_user\",\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].subject, \"invite_user_to_project\")\n self.assertEqual(mail.outbox[0].to[0], \"[email protected]\")\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"password\": \"testpassword\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(mail.outbox), 2)\n\n self.assertEqual(fake_clients.identity_cache[\"new_users\"][0].name, \"new_user\")", "def test_revoke_inactive(self):\n self.invite.active = False\n self.invite.save()\n url = reverse(\n 'projectroles:api_invite_revoke',\n kwargs={'projectinvite': self.invite.sodar_uuid},\n )\n response = self.request_knox(url, method='POST')\n self.assertEqual(response.status_code, 400, msg=response.content)", "def test_resend_delegate_no_perms(self):\n self.invite.role = self.role_delegate\n self.invite.save()\n delegate = self.make_user('delegate')\n self.make_assignment(self.project, delegate, self.role_delegate)\n\n url = reverse(\n 'projectroles:api_invite_resend',\n kwargs={'projectinvite': self.invite.sodar_uuid},\n )\n response = self.request_knox(\n url, method='POST', token=self.get_token(delegate)\n )\n self.assertEqual(response.status_code, 403, msg=response.content)\n self.assertEqual(len(mail.outbox), 0)", "def update_invite(\n self,\n redditor: str | praw.models.Redditor,\n *,\n permissions: list[str] | None = None,\n ):\n url = API_PATH[\"setpermissions\"].format(subreddit=self.subreddit)\n data = self._handle_permissions(\n other_settings={\"name\": str(redditor), \"type\": \"moderator_invite\"},\n permissions=permissions,\n )\n self.subreddit._reddit.post(url, data=data)", "def reset(request):\r\n rdict = request.matchdict\r\n params = request.params\r\n\r\n # This is an initial request to show the activation form.\r\n username = rdict.get('username', None)\r\n activation_key = rdict.get('reset_key', None)\r\n user = ActivationMgr.get_user(username, activation_key)\r\n new_username = None\r\n\r\n if user is None:\r\n # just 404 if we don't have an activation code for this user\r\n raise HTTPNotFound()\r\n\r\n if 'code' in params:\r\n # This is a posted form with the activation, attempt to unlock the\r\n # user's account.\r\n username = params.get('username', None)\r\n activation = params.get('code', None)\r\n password = params.get('new_password', None)\r\n new_username = params.get('new_username', None)\r\n error = None\r\n\r\n if new_username:\r\n new_username = new_username.lower()\r\n\r\n # Check whether username exists or not. During signup request , a\r\n # record of current user is created with username as his email id\r\n # which is already checked for uniqueness. So when new_username is\r\n # equal to username ie the email id then no need to check for\r\n # uniqueness , but if new_username is something else it has to be\r\n # verified\r\n\r\n if username != new_username and \\\r\n UserMgr.get(username=new_username) is not None:\r\n # Set an error message to the template.\r\n error = \"Username already exists.\"\r\n elif not UserMgr.acceptable_password(password):\r\n # Set an error message to the template.\r\n error = \"Come on, pick a real password please.\"\r\n else:\r\n res = ActivationMgr.activate_user(username, activation, password)\r\n if res:\r\n # success so respond nicely\r\n AuthLog.reactivate(username, success=True, code=activation)\r\n\r\n # if there's a new username and it's not the same as our\r\n # current username, update it\r\n if new_username and new_username != username:\r\n try:\r\n user = UserMgr.get(username=username)\r\n user.username = new_username\r\n except IntegrityError:\r\n error = 'There was an issue setting your new username'\r\n else:\r\n AuthLog.reactivate(username, success=False, code=activation)\r\n error = ('There was an issue attempting to activate'\r\n 'this account.')\r\n\r\n if error:\r\n return {\r\n 'message': error,\r\n 'user': user\r\n }\r\n else:\r\n # Log the user in and move along.\r\n headers = remember(request, user.id, max_age=60 * 60 * 24 * 30)\r\n user.last_login = datetime.utcnow()\r\n\r\n # log the successful login\r\n AuthLog.login(user.username, True)\r\n\r\n # we're always going to return a user to their own /recent after a\r\n # login\r\n return HTTPFound(\r\n location=request.route_url(\r\n 'user_bmark_recent',\r\n username=user.username),\r\n headers=headers)\r\n\r\n else:\r\n LOG.error(\"CHECKING\")\r\n LOG.error(username)\r\n\r\n if user is None:\r\n # just 404 if we don't have an activation code for this user\r\n raise HTTPNotFound()\r\n\r\n LOG.error(user.username)\r\n LOG.error(user.email)\r\n return {\r\n 'user': user,\r\n }", "async def invite(ctx):\n permissions = 2134207679\n url = discord.utils.oauth_url(client_id=bot.user.id, permissions=discord.Permissions(permissions=permissions),\n scopes=(\"bot\", \"applications.commands\"))\n view = discord.ui.View()\n view.add_item(discord.ui.Button(label=\"Invite\", url=url))\n await ctx.respond(\"I'm glad you want to add me to your server, here's a link!\", view=view)", "async def remind(id, reminder):\n \n guild = BOT_GLOBAL.get_guild(BOT_GLOBAL.settings.guild_id)\n if guild is None:\n return\n member = guild.get_member(id)\n if member is None:\n return\n\n embed = discord.Embed(title=\"Reminder!\", description=f\"*You wanted me to remind you something... What was it... Oh right*:\\n\\n{reminder}\", color=discord.Color.random())\n try:\n await member.send(embed=embed)\n except Exception:\n channel = guild.get_channel(BOT_GLOBAL.settings.guild().channel_botspam)\n await channel.send(member.mention, embed=embed)", "def create_retirement_request_and_deactivate_account(user):\n # Add user to retirement queue.\n UserRetirementStatus.create_retirement(user)\n\n # Unlink LMS social auth accounts\n UserSocialAuth.objects.filter(user_id=user.id).delete()\n\n # Change LMS password & email\n user.email = get_retired_email_by_email(user.email)\n user.set_unusable_password()\n user.save()\n\n # TODO: Unlink social accounts & change password on each IDA.\n # Remove the activation keys sent by email to the user for account activation.\n Registration.objects.filter(user=user).delete()\n\n # Delete OAuth tokens associated with the user.\n retire_dot_oauth2_models(user)\n AccountRecovery.retire_recovery_email(user.id)", "def update_user():", "def suspend_acct(request):\r\n params = request.params\r\n user = request.user\r\n\r\n # we need to get the user from the email\r\n email = params.get('email', None)\r\n\r\n if email is None and hasattr(request, 'json_body'):\r\n # try the json body\r\n email = request.json_body.get('email', None)\r\n\r\n if user is None and email is None:\r\n request.response.status_int = 406\r\n return _api_response(request, {\r\n 'error': \"Please submit an email address\",\r\n })\r\n\r\n if user is None and email is not None:\r\n user = UserMgr.get(email=email)\r\n\r\n if user is None:\r\n request.response.status_int = 404\r\n return _api_response(request, {\r\n 'error': \"Please submit a valid address\",\r\n 'email': email\r\n })\r\n\r\n # check if we've already gotten an activation for this user\r\n if user.activation is not None:\r\n request.response.status_int = 406\r\n return _api_response(request, {\r\n 'error': \"\"\"You've already marked your account for reactivation.\r\nPlease check your email for the reactivation link. Make sure to\r\ncheck your spam folder.\"\"\",\r\n 'username': user.username,\r\n })\r\n\r\n # mark them for reactivation\r\n user.reactivate(u\"FORGOTTEN\")\r\n\r\n # log it\r\n AuthLog.reactivate(user.username)\r\n\r\n # and then send an email notification\r\n # @todo the email side of things\r\n settings = request.registry.settings\r\n msg = ReactivateMsg(user.email,\r\n \"Activate your Bookie account\",\r\n settings)\r\n\r\n msg.send({\r\n 'url': request.route_url(\r\n 'reset',\r\n username=user.username,\r\n reset_key=user.activation.code),\r\n 'username': user.username\r\n })\r\n\r\n return _api_response(request, {\r\n 'message': \"\"\"Your account has been marked for reactivation. Please\r\n check your email for instructions to reset your\r\n password\"\"\",\r\n })", "def invite(request):\n form = DepartmentInvitationForm()\n if request.method == 'POST':\n form = DepartmentInvitationForm(request.POST)\n if form.is_valid():\n email = form.cleaned_data['email']\n existing_user = User.objects.filter(email=email)\n existing_email = EmailAddress.objects.get_users_for(email=email)\n if existing_user or existing_email:\n msg = 'User {} already exists.'.format(email)\n messages.error(request, msg)\n else:\n DepartmentInvitation.objects.create(\n email=email,\n invited_by=request.user,\n department=form.cleaned_data['department'],\n account_category=form.cleaned_data['account_category']\n )\n msg = 'Invitation has been send to {}'.format(email)\n messages.add_message(request, messages.SUCCESS, msg)\n return redirect(reverse('eggplant:dashboard:home'))\n ctx = {\n 'form': form,\n 'title': \"send invitation\",\n }\n return render(request, 'eggplant/membership/invite.html', ctx)", "def expireme(message):\n users = hf.get_users()\n requester = message._get_user_id()\n for user in users:\n if user[\"id\"] == requester:\n name = user[\"name\"]\n break\n\n hf.expire_user(name)", "def restart_user(self, subid):\n p = self.revoke_token(subid)\n p = self.refresh_token(subid)\n return p", "def render_and_send_invite_email(invite_id):\n from open_connect.accounts.models import Invite\n invite = Invite.objects.get(pk=invite_id)\n\n if invite.notified:\n return\n\n context = {\n 'invite': invite,\n 'email': invite.email,\n 'origin': settings.ORIGIN\n }\n html = render_to_string(\n 'account/email/new_user_invite.html', context)\n text = render_to_string(\n 'account/email/new_user_invite.txt', context)\n\n send_email(\n email=invite.email,\n from_email=settings.DEFAULT_FROM_EMAIL,\n subject=u\"You're invited to Connect\",\n text=text,\n html=html\n )\n\n invite.notified = now()\n invite.save()", "def send_user_invitation(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"send_user_invitation\")", "def send_user_invitation(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"send_user_invitation\")", "def test_mod_email(self, mapp, existing_user_id, url_of_liveserver):\n mapp.logoff()\n mapp.login(user=existing_user_id, password=\"1234\")\n email_address = existing_user_id + '_' + str(id(self)) + \"@devpi.net\"\n mapp.modify_user(user=existing_user_id, email=email_address)\n # Verify that the email was indeed changed.\n json = mapp.getjson(url_of_liveserver)\n assert json['result'][existing_user_id]['email'] == email_address", "def test_invite(self):\n self.client.invite(\"foo\", \"#bar\")\n self.assertEqual(self.client.lines, [\"INVITE foo #bar\"])", "def notify_email_confirmed(self, user, email):\n \n # make sure user isn't still invited to groups he owns or is a member of\n for g in self.users_groups(user):\n g.remove_invitation(user)", "def save(self, *args, **kwargs):\n if not self.require_confirm_email:\n User.objects.filter(is_active=False, deactivation_reason=\"pending\").update(\n is_active=True, deactivation_reason=None\n )\n if not self.invite_question_text:\n self.invite_question_text = \"What is your favourite book?\"\n super().save(*args, **kwargs)", "def save(self, **kwargs):\n super().save(**kwargs)\n self.instance.send_invite()\n return self.instance", "def set_invincible(self, status: bool):\n self._invincible = status\n if self._invincible: # if become invincible\n self._invincible_time = time.time() # record the invincible time", "def do_get_invites_controlled_by_user(user_profile: UserProfile) -> List[Dict[str, Any]]:\n if user_profile.is_realm_admin:\n prereg_users = filter_to_valid_prereg_users(\n PreregistrationUser.objects.filter(referred_by__realm=user_profile.realm)\n )\n else:\n prereg_users = filter_to_valid_prereg_users(\n PreregistrationUser.objects.filter(referred_by=user_profile)\n )\n\n invites = []\n\n for invitee in prereg_users:\n assert invitee.referred_by is not None\n invites.append(\n dict(\n email=invitee.email,\n invited_by_user_id=invitee.referred_by.id,\n invited=datetime_to_timestamp(invitee.invited_at),\n expiry_date=get_invitation_expiry_date(invitee.confirmation.get()),\n id=invitee.id,\n invited_as=invitee.invited_as,\n is_multiuse=False,\n )\n )\n\n if not user_profile.is_realm_admin:\n # We do not return multiuse invites to non-admin users.\n return invites\n\n multiuse_confirmation_objs = Confirmation.objects.filter(\n realm=user_profile.realm, type=Confirmation.MULTIUSE_INVITE\n ).filter(Q(expiry_date__gte=timezone_now()) | Q(expiry_date=None))\n for confirmation_obj in multiuse_confirmation_objs:\n invite = confirmation_obj.content_object\n assert invite is not None\n\n # This should be impossible, because revoking a multiuse invite\n # deletes the Confirmation object, so it couldn't have been fetched above.\n assert invite.status != confirmation_settings.STATUS_REVOKED\n invites.append(\n dict(\n invited_by_user_id=invite.referred_by.id,\n invited=datetime_to_timestamp(confirmation_obj.date_sent),\n expiry_date=get_invitation_expiry_date(confirmation_obj),\n id=invite.id,\n link_url=confirmation_url(\n confirmation_obj.confirmation_key,\n user_profile.realm,\n Confirmation.MULTIUSE_INVITE,\n ),\n invited_as=invite.invited_as,\n is_multiuse=True,\n )\n )\n return invites", "async def botinvite_command(self, ctx):\n invite = f\"https://discord.com/api/oauth2/authorize?client_id={self.client.user.id}&permissions=1374809815&scope=bot\"\n await ctx.send(invite)", "def approve(user):\n if user.approved:\n logging.warn('noop - User %d already approved', user.id)\n return user\n user.approved = True\n for message in user.messages:\n if message.text == config.MSG_WELCOME:\n session.delete(message)\n session.add(user)\n session.commit()\n return user", "async def invite(self, ctx):\n link = \"https://discordapp.com/oauth2/authorize?client_id=282765243862614016&scope=bot&permissions=19456\"\n await ctx.send(\"Invite me to your server with this link!\\n\" + link)", "async def invite(self):\n link = \"https://discordapp.com/oauth2/authorize?client_id=282765243862614016&scope=bot&permissions=19456\"\n await self.bot.say(\"Invite me to your server with this link!\\n\" + link)", "def cancel_invite(client, invite_id):\n query_str = \"\"\"mutation CancelInvitePyApi($where: WhereUniqueIdInput!) {\n cancelInvite(where: $where) {id}}\"\"\"\n client.execute(query_str, {'where': {'id': invite_id}}, experimental=True)", "async def invite(self, ctx):\n embed = discord.Embed(title='Invite links for NOVA',\n description='[<:news:730866149109137520> Required Permissions](https://discord.com/api/'\n 'oauth2/authorize?client_id=709922850953494598&permissions=1573252215&scope='\n 'bot)\\n'\n '[<:news:730866149109137520> No Permissions]'\n '(https://discord.com/api/oauth2/authorize?client_id=709922850953494598&permi'\n 'ssions=0&scope=bot)\\n[<:news:730866149109137520> All Permissions (admin)]'\n '(https://discord.com/api/oauth2/authorize?client_id=709922850953494598&perm'\n 'issions=8&scope=bot)', color=0x5643fd)\n embed.set_footer(text='Developed by YeetVegetabales', icon_url='https://cdn.discordapp.com/avatars'\n '/569374429218603019'\n '/a_6dac6946906e498650f6c2466aa82200.gif?size'\n '=256&f=.gif')\n embed.set_thumbnail(url='https://images-ext-2.discordapp.net/external/54Mim4lahztGCP4hgmpy4lOdEUc4'\n '-dOeNA_x6hVHMlc/%3Fsize%3D4096/https/cdn.discordapp.com/avatars/709922850953494598'\n '/f78ed19924e8c95abc30f406d47670d7.png')\n await ctx.send(embed=embed)", "async def _invites(self, ctx):\n waiting = await ctx.send(\"`Loading server invites...`\")\n guild = ctx.guild\n guild_invites = await guild.invites()\n invitecodes = []\n uses = []\n channel = []\n inviter = []\n for invite in guild_invites:\n invitecodes.append(invite.code)\n uses.append(str(invite.uses))\n channel.append(invite.channel.mention)\n inviter.append(invite.inviter.mention)\n\n invitecodes = \"\\n\".join(invitecodes)\n uses = \"\\n\".join(uses)\n channel = \"\\n\".join(channel)\n inviter = \"\\n\".join(inviter)\n\n e = discord.Embed(color=ctx.guild.me.top_role.color)\n e.set_author(name=f\"{guild.name}'s invites\")\n e.set_thumbnail(url=guild.icon_url)\n e.add_field(name=\"Invites\", value=invitecodes)\n e.add_field(name=\"Uses\", value=uses)\n e.add_field(name=\"Channel\", value=channel)\n e.add_field(name=\"Inviter\", value=inviter)\n await waiting.edit(content=None, embed=e)", "def save(self, commit=True):\n user = super(InvitationCompleteForm, self).save(commit)\n\n def save_invited_user():\n invited_user = self.invited_user\n invited_user.created_user = user\n invited_user.status = InvitedUser.STATUS_REGISTERED\n invited_user.save()\n if commit:\n save_invited_user()\n else:\n self.save_invited_user = save_invited_user\n return user", "def members_invited(self):\r\n return MembersInvited(self)", "def members_invited(self):\r\n return MembersInvited(self)", "def test_reactivation_for_unregistered_user(self, email_user):\r\n response_data = self.reactivation_email(self.unregisteredUser)\r\n\r\n self.assertFalse(response_data['success'])", "def test_resend_activation_email_expired_user(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **self.user_info)\n new_user.date_joined -= datetime.timedelta(\n days=settings.ACCOUNT_ACTIVATION_DAYS + 1)\n new_user.save()\n\n profile = self.registration_profile.objects.get(user=new_user)\n self.assertTrue(profile.activation_key_expired())\n\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n self.assertEqual(len(mail.outbox), 0)", "async def reacrole(self, ctx: commands.Context):\n pass", "def validate_member_invite(self, invite):\n queryset = TeamMember.objects.filter(\n Q(\n team=self.team,\n invite__team=self.team,\n invite__email=invite.email,\n ),\n )\n if queryset.exists():\n raise forms.ValidationError(\n _('An invitation was already sent to this email'),\n )\n return invite", "async def _invite(self, ctx: Context):\n\n perm_int = discord.Permissions(268494928)\n\n data = await self.bot.application_info()\n invite_url = discord.utils.oauth_url(data.id, permissions=perm_int)\n\n value = (\n f\"Invite TvM Assistant to your bot by [clicking here]({invite_url}).\"\n \"\\n\\nInviting the bot will give it some management permissions. You can\"\n \" review them when you use the link.\"\n )\n\n embed = discord.Embed(color=await ctx.embed_colour(), description=value)\n embed.set_author(name=f\"Invite TvM Assistant\", icon_url=ctx.me.avatar_url)\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n return await ctx.send(\n f\"{invite_url}\\n\\nInviting the bot will give it some management permissions.\"\n \" You can review them when you use the link.\"\n )", "def invites(self, account_id):\n from pureport_client.commands.accounts.invites import Command\n return Command(self.client, account_id)", "def update_user(id):\n pass", "def test_ack_invitation(self):\n (approver_user_id,\n joining_user_id,\n _,\n invite_id) = self.setup_invites()\n uri = '/invite_ack/{}/{}'.format(approver_user_id, joining_user_id)\n rsp = self.client.post(uri, data={'approves': True})\n rsp_json = json.loads(rsp.data)\n\n invite = model.Invitation.query.get(invite_id)\n self.assertEqual(rsp_json['success'], True)\n self.assertEqual(rsp.status_code, 200)\n self.assertEqual(invite.invite_id, invite_id)", "async def invite(self, ctx, plain_url: bool = False):\n\n if not plain_url:\n try:\n await ctx.embed(\n 'Click to invite me to your server!',\n title_url=self.bot.invite_url,\n colour='blue',\n icon=\"https://i.imgur.com/DtPWJPG.png\"\n )\n except discord.errors.Forbidden:\n pass\n else:\n return\n\n await ctx.send(f\"Invite URL: <{self.bot.invite_url}>\")", "def get_people_invited(self, users):\n invited = []\n for user in users:\n if Room.verify_if_is_invited(user):\n invited.append(user)\n return invited", "def remove_RSVP(eid, gid):\n check_admin()\n\n guestList = GuestList.query.filter_by(event_id=eid).all()\n for guest in guestList:\n print(\"guest.guest_id: \" + str(guest.guest_id))\n print(\"gid: \" + str(gid))\n if guest.guest_id == gid:\n guest.is_attending=False\n db.session.commit()\n \n flash('You have successfully set a user as not attending.')\n\n # redirect to the events page\n return redirect(url_for('admin.event_RSVPlist', id=eid))\n\n return render_template(title=\"Removed RSVP\")", "def reactivate(self):\r\n self.require_item()\r\n\r\n url = '{0}/reactivate'.format(self.get_url())\r\n request = http.Request('PUT', url)\r\n\r\n return request, parsers.parse_empty", "async def inviteme(self):\r\n\r\n #Your code will go here\r\n await self.bot.say(\"Here is a link to Invite Me: http://bit.ly/TacoBot\")", "def remove_all_user_invitations(self, user, group):\n if isinstance(user, HasEmail):\n for email in user.email_list():\n self.remove_invitation(email, group)\n else:\n self.remove_invitation(user, group)", "def updateUser(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')" ]
[ "0.6742321", "0.65622705", "0.6341691", "0.618192", "0.6178779", "0.6154837", "0.61355495", "0.60448194", "0.5970474", "0.59555095", "0.592783", "0.59243935", "0.59088224", "0.590702", "0.5906705", "0.5898827", "0.577978", "0.5737973", "0.5707441", "0.57018846", "0.56371075", "0.5636754", "0.5630471", "0.5615821", "0.55907476", "0.55854684", "0.55674404", "0.5539243", "0.55353385", "0.5527742", "0.5526495", "0.55091053", "0.5505409", "0.54911727", "0.5490471", "0.5466162", "0.54659927", "0.54309386", "0.54167825", "0.5416506", "0.541551", "0.5409636", "0.5402998", "0.5402377", "0.53901976", "0.5364371", "0.53559685", "0.5349237", "0.53319854", "0.53297687", "0.5327648", "0.5315224", "0.53116447", "0.5305682", "0.52888703", "0.52882767", "0.5285526", "0.5283102", "0.5272098", "0.52616", "0.52513444", "0.5237143", "0.52121", "0.5211731", "0.5209771", "0.5205286", "0.5198728", "0.5198728", "0.5198474", "0.519303", "0.5182506", "0.5170801", "0.5168835", "0.51590157", "0.5158682", "0.51583254", "0.51525176", "0.5145724", "0.5142811", "0.5132092", "0.51267743", "0.5105176", "0.5099675", "0.50968546", "0.50968546", "0.5093525", "0.50922924", "0.5072175", "0.5069694", "0.5061718", "0.5057049", "0.5055851", "0.5055154", "0.50483483", "0.5044173", "0.5039199", "0.50354373", "0.50252545", "0.5022508", "0.50219655" ]
0.7661665
0
Approve a user's application
def approve_user_application(self, user): if self.is_moderator and \ self.has_perm('accounts.approve_user_application'): user.moderator = self user.moderator_decision = user.APPROVED user.decision_datetime = timezone.now() user.auth_token = generate_unique_id() user.save() return user else: raise PermissionDenied
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def approve(self):\n self._check_if_open()\n data = {\"approved\": True}\n return self.post(\"approve\", data)", "def can_approve(self, user, **data):\n raise Return(False)", "def approve_me(message):\n users = hf.get_users()\n for user in users:\n if user[\"id\"] == message._get_user_id():\n if user[\"approval_level\"] == \"unapproved\": # Unknown\n message.reply(Strings['APPROVER_REQUEST'])\n admins = hf.get_admins()\n names = []\n for admin in admins:\n names.append(admin[\"name\"])\n\n approval_message = Strings[\n 'APPROVER_REQUEST_DETAIL'].format(\">, <@\".join(names), user[\"name\"])\n\n #message._client.send_message(config.AUTH_CHANNEL, approval_message)\n message._client.send_message(public_channel, approval_message)\n else:\n message.reply(\":x: Your approval level is already: \" + str(user[\"approval_level\"]))", "def approve_me(message):\n load_users(message._client.users)\n sender_id = message._get_user_id()\n target = user_list[sender_id].details['name']\n if (user_list[sender_id].is_unknown):\n message.reply(Strings['APPROVER_REQUEST'])\n names = list_to_names(user_list.admin_list)\n approval_message = Strings[\n 'APPROVER_REQUEST_DETAIL'].format(\">, <@\".join(names), target)\n message._client.send_message(config.AUTH_CHANNEL, approval_message)\n else:\n message.reply(\n \"Your status is already: \" + user_list[sender_id].level.name)", "def action_approve(self):\n if not self.date_approve:\n self.date_approve = fields.Datetime.now()\n\n config = self.env['ka_hr_payroll.config'].default_config()\n if check_rapel_status(self, config):\n self.action_rapel()\n else:\n self.action_done()", "def test_approve(self):\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.utils.account.login_as(username,userpass)\n\n self.contribtool.approve(TOOLNAME,TOOLLICENSEDATA)", "def review_applications(request):\n moderator = request.user\n site = get_current_site(request)\n\n pending = User.objects.filter(registration_method='REQ',\n decision_datetime=None,\n is_active=False)\n\n form = ModerateApplicationForm()\n\n if request.method == 'POST':\n\n form = ModerateApplicationForm(request.POST)\n user = get_object_or_404(User, id=request.POST['user_id'])\n\n if form.is_valid():\n decision = form.cleaned_data['decision']\n comments = form.cleaned_data['comments']\n\n if decision == 'APP':\n confirmation_message = _(\"{}'s account application \"\n \"has been approved.\".format(\n user.get_full_name().title()))\n\n moderator.approve_user_application(user)\n\n # Set log and email settings\n msg_type = ModerationLogMsg.APPROVAL\n url = request.build_absolute_uri(\n reverse('accounts:activate-account',\n args=[user.auth_token]))\n subject = _('Welcome to {}'.format(site.name))\n template = 'moderation/emails/approve_user.html'\n\n elif decision == 'REJ':\n confirmation_message = _(\"{}'s account application \"\n \"has been rejected.\".format(\n user.get_full_name().title()))\n\n moderator.reject_user_application(user)\n\n # Set log and email settings\n msg_type = ModerationLogMsg.REJECTION\n url = ''\n subject = _(('Unfortunately, your application to {} '\n 'was not successful').format(site.name))\n template = 'moderation/emails/reject_user.html'\n\n # Log moderation event\n log_comment = '{}'.format(comments)\n log_moderator_event(msg_type=msg_type,\n user=user,\n moderator=moderator,\n comment=log_comment)\n\n # Send moderation email\n send_connect_email(subject=subject,\n template=template,\n recipient=user,\n sender=moderator,\n site=site,\n url=url)\n\n messages.success(request, confirmation_message)\n\n return redirect('moderation:review-applications')\n\n context = {\n 'pending': pending,\n 'form': form,\n }\n\n return render(request, 'moderation/review_applications.html', context)", "def approve(user):\n if user.approved:\n logging.warn('noop - User %d already approved', user.id)\n return user\n user.approved = True\n for message in user.messages:\n if message.text == config.MSG_WELCOME:\n session.delete(message)\n session.add(user)\n session.commit()\n return user", "def Approve(self, request, global_params=None):\n config = self.GetMethodConfig('Approve')\n return self._RunMethod(\n config, request, global_params=global_params)", "def userapprove_admin(user_id):\n # take the supplied user_id and use that to access a given user.\n\n # User objects list which includes list of all users which can be broken down into editors and sponsors\n # get individual user\n user = db.session.query(User).filter(User.id==user_id).first()\n # update status to approved\n user.user_status = 'approved'\n # commit to database\n db.session.commit()\n\n return redirect(url_for('admin_bp.usersview_admin'))", "def approve(self):\n self.approved = True\n self.quest_node['approved'] = True\n graph.push(self.quest_node)\n self.payout()", "def approve_project(cls, project):\n project.status = Project.APPROVED\n project.save()", "def approve_person(message, target):\n load_users(message._client.users)\n if target == 'me':\n return\n user = user_list.find_user(target)\n\n approver = message._get_user_id()\n if user_list[approver].is_admin:\n if user is not None:\n target_name = user.details['name']\n if user.is_unknown:\n message.reply(\"Approving user: '{}'\".format(target_name))\n user_list[user.details['id']].level = Level.Approved\n user_list.save()\n elif user.is_denied:\n message.reply(Strings['MARKED_DENIED'])\n else:\n message.reply(\"{} is already: {}.\".format(target_name,\n user.level.name))\n else:\n message.reply(Strings['USER_NOT_FOUND'].format(target))\n else:\n message.reply(Strings['CANT_APPROVE'])", "def approve(self,toolname,data):\n\n self.logger.info(\"approving the tool '%s'\" % (toolname))\n\n po = self.catalog.load_pageobject('ToolsStatusInstalledPage',toolname)\n po.goto_page()\n\n # click the approve link\n po.flip_status_to_approved()\n\n\n po = self.catalog.load_pageobject('ToolsStatusApproveConfirmVersionPage',toolname)\n\n # check for error on page\n err = po.get_error_info()\n if err:\n # update the version information\n old_version = po.version_form.version.value\n new_version = str(float(old_version) + 0.01)\n po.version_form.submit_form({'version':new_version})\n\n # check for error on page\n err = po.get_error_info()\n if err:\n raise RuntimeError('error found on page: %s' % (err))\n\n # check for the success message\n ok = po.get_success_info()\n if not ok:\n raise RuntimeError('missing success message after updating version')\n\n # click the approve link again ?!?\n po = self.catalog.load_pageobject('ToolsStatusInstalledPage',toolname)\n po.flip_status_to_approved()\n\n # confirm the version\n po = self.catalog.load_pageobject('ToolsStatusApproveConfirmVersionPage',toolname)\n po.version_form.submit_form()\n\n # confirm the license\n po = self.catalog.load_pageobject('ToolsStatusApproveConfirmLicensePage',toolname)\n po.submit_form(data)\n\n # check for error on page\n err = po.get_error_info()\n if err:\n raise RuntimeError('error found on page: %s' % (err))\n\n # confirm the tool info\n po = self.catalog.load_pageobject('ToolsStatusApproveConfirmToolInfoPage',toolname)\n po.approve_tool()\n\n # check for the success message\n po = self.catalog.load_pageobject('ToolsStatusApprovedPage',toolname)\n ok = po.get_success_info()\n if not ok:\n raise RuntimeError('missing success message after approving tool info')", "def approve_person(message, target):\n users = hf.get_users()\n if target == 'me':\n return\n for user in users:\n if user[\"name\"] == target:\n approver = message._get_user_id()\n admins = hf.get_admins()\n for admin in admins:\n if admin[\"id\"] == approver:\n if user is not None:\n if user[\"approval_level\"] == \"unapproved\":\n message.reply(\"Approved user: <@{}>\".format(target))\n user[\"approval_level\"] = \"approved\"\n hf.save_users(users)\n return\n elif user[\"approval_level\"] == \"denied\":\n message.reply(Strings['MARKED_DENIED'])\n return\n else:\n message.reply(\":x: {} is already: {}.\".format(target,\n user[\"approval_level\"]))\n return\n else:\n message.reply(Strings['USER_NOT_FOUND'].format(target))\n return\n\n message.reply(Strings['CANT_APPROVE'])", "def jao_approve(self):\n print \"JAO approved this form. Current state:\", self.state", "def test_admin_approval_already_approved(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n self.assertIsInstance(user, UserModel())\n self.assertTrue(activated)\n\n user = self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n self.assertIsInstance(user, UserModel())\n self.assertIs(user.is_active, True)", "def update_application(request):\n\n record = RegApplication.query.filter_by(email=request.form['application-email']).first()\n\n record.application_processed = True\n record.application_granted = False if request.form['application-action'] == 'reject' else True\n record.processed_date = datetime.datetime.now()\n db.session.commit()\n\n if not record.application_granted:\n\n send_message(subject='OpenAPS Access Refused',\n email=request.form['application-email'],\n content=f\"\"\"Your application for access to the OpenAPS data portal was rejected for the following reason:\n <br><br>\n '{request.form['reject-reason']}'\"\"\")\n\n return record.project_requests", "def approve(self, approver: str, to: str, amount, key: bytes):\n raw_tx = self.approve_build_transaction(approver, to, amount)\n signed_tx = self._sign(raw_tx, key)\n self.send_and_wait(signed_tx)", "def submit(request):\n if not request.user.is_authenticated():\n return proceed(request)\n # If dev has already agreed, continue to next step.\n user = UserProfile.objects.get(pk=request.user.id)\n if not user.read_dev_agreement:\n return redirect('submit.app.terms')\n return manifest(request)", "def handle_application(sender, instance, **kwargs):\n if instance.accepted is not None:\n if instance.accepted:\n instance.user.userprofile.change_status_developer()\n else:\n instance.user.userprofile.change_status_player()", "def approve(self, approved_by=\"system\"):\n\n self.confirm_state(completed=False, cancelled=False)\n\n self.is_valid(\"task invalid before approval\")\n\n # We approve the task before running actions,\n # that way if something goes wrong we know if it was approved,\n # when it was approved, and who approved it.\n self.task.approved = True\n self.task.approved_on = timezone.now()\n self.task.approved_by = approved_by\n self.task.save()\n\n # approve all actions\n for action in self.actions:\n try:\n action.approve()\n except Exception as e:\n handle_task_error(e, self.task, error_text=\"while approving task\")\n\n self.is_valid(\"task invalid after approval\")\n\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n else:\n self.submit()", "def post(self, request, *args, **kwargs):\n application = self.get_object()\n app_complete = Application.objects.filter(\n pk=self.kwargs['app_complete']\n ).first()\n if is_application_owner(self.request.user, application) and (\n application.questionnaire.status != 'complete'\n ) and app_complete is not None and (\n app_complete.authorized_email is not None\n ) and app_complete.questionnaire.completed_by_candidate and (\n app_complete.questionnaire.status == 'complete'\n ):\n\n \"\"\"Attach authorized email & questionnaire to application\"\"\"\n application.authorized_email = app_complete.authorized_email\n application.questionnaire = app_complete.questionnaire\n application.save()\n\n \"\"\"Submit application if nomination is complete too\"\"\"\n if application.nomination.status == 'complete':\n submit_application(application)\n\n return redirect(self.get_success_url())\n else:\n raise Http404(_(\"No application found matching the query\"))", "def approve(self, approver: PrivateKey):\n sig = crypto.get_signature_for_deploy_approval(\n self.hash, approver.private_key, approver.key_algo\n )\n self._append_approval(DeployApproval(approver.account_key, sig))", "def approve(self, request, object_id, extra_context=None):\n obj = self.get_object(request, unquote(object_id))\n title = self._approve_title(obj)\n\n AdminAddApprovalForm = self._approve_approval_form(request)\n\n form = AdminAddApprovalForm(initial={'prescription': obj})\n if request.method == 'POST':\n url = reverse('admin:prescription_prescription_detail',\n args=[str(obj.id)])\n if obj.approval_status == obj.APPROVAL_DRAFT and obj.can_approve:\n # create an approval\n obj.approval_status = obj.APPROVAL_SUBMITTED\n obj.approval_status_modified = timezone.now()\n obj.save()\n self.message_user(\n request, \"Successfully submitted for approval.\")\n return HttpResponseRedirect(url)\n elif obj.approval_status == obj.APPROVAL_SUBMITTED:\n if request.POST.get('_cancel'):\n obj.clear_approvals()\n msg = 'Delete: Clearing Approvals/Endorsements', 'Burn ID: {}, Deleted by: {}'. format(obj.burn_id, request.user.get_full_name())\n logger.warning(msg)\n support_email('Delete: Clearing Approvals/Endorsements', msg)\n\n self.message_user(\n request, \"Approval rejected. ePFP is now draft.\")\n return HttpResponseRedirect(url)\n\n form = AdminAddApprovalForm(request.POST,\n initial={'prescription': obj})\n if form.is_valid():\n approval = form.save(commit=False)\n approval.prescription = obj\n approval.creator = request.user\n approval.modifier = request.user\n approval.save()\n obj.approval_status = obj.APPROVAL_APPROVED\n obj.approval_status_modified = timezone.now()\n obj.save()\n self.message_user(\n request, \"Successfully approved.\")\n return HttpResponseRedirect(url)\n elif obj.is_approved:\n if obj.is_closed:\n self.message_user(\n request, \"You can't extend an approval after the \"\n \"prescribed fire plan has been closed.\")\n return HttpResponseRedirect(url)\n if request.POST.get('_cancel'):\n self.message_user(\n request, \"Didn't extend approval.\")\n return HttpResponseRedirect(url)\n else:\n approval = obj.current_approval\n if approval and approval.extension_count < 3:\n approval.extension_count = approval.extension_count + 1\n approval.valid_to = approval.next_valid_to\n approval.save()\n self.message_user(\n request, \"Successfully extended approval.\")\n else:\n self.message_user(request, \"You can't extend an \"\n \"approval more than 3 times.\")\n return HttpResponseRedirect(url)\n\n admin_form, media = self._approve_form(request, obj, form)\n\n context = {\n 'title': title,\n 'current': obj,\n 'form': admin_form,\n 'media': media,\n 'errors': None,\n }\n return TemplateResponse(request, \"admin/prescription/prescription/\"\n \"approval.html\", context,\n current_app=self.admin_site.name)", "def hod_approve(self):\n print \"HOD approved this form. Current state:\", self.state", "def change_approval(self, status):\r\n if status == 'approve':\r\n return self.approve()\r\n elif status == 'disapprove':\r\n return self.disapprove()", "def authorise(data, ind):\n global approved\n global pending_sheet\n approved.append_row(data)\n ind += 1\n pending_sheet.delete_rows(ind)\n print(colored('\\nApplication authorised.\\n', 'cyan', attrs=['bold']))", "def approved(message):\n hf.query_users(message, hf.get_users(), \"approved\")", "def approve (self, response) :\n if 'event' in response and 'moderator' in response :\n eventId = response ['event']\n userId = response ['moderator']\n else :\n raise ModerationError (response)\n\n mod_status = 'OK'\n if 'status' in response :\n mod_status = response ['status']\n \n event = Event.object.get (id = eventId)\n approval = Approval (approved = event, moderatorId = userId, status = mod_status)\n approval.save ()\n self.editValues (event.answer, response)", "def approve(self, message):\n boto_connection = connection.get_connection()\n boto_connection.approve_assignment(self.assignment_id, message)", "def approve_me_group(message):\n users = hf.get_users()\n sender_id = message._get_user_id()\n\n for user in users:\n if user[\"id\"] == sender_id:\n if (user[\"approval_level\"] == \"unapproved\"):\n message.reply(Strings['APPROVE_ME_REQUEST'])\n else:\n self_name = user[\"approval_level\"]\n message.reply(\":x: Your status is already: {}\".format(self_name))", "def approve_loan(current_user):\n\n if not current_user.admin:\n return jsonify({\"message\": \"Cannot perform the action.\"})\n\n try:\n loan_id = request.get_json()[\"loan_id\"]\n user_id = request.get_json()[\"user_id\"]\n except:\n return jsonify({\"message\": \"Invalid input.\"}), 400\n\n loan = Loan.query.filter_by(id=loan_id, user_id=user_id).first()\n\n if not loan:\n return jsonify({\"message\": \"Invalid loan ID.\"})\n\n if loan.loan_state == \"NEW\":\n loan.loan_state = \"APPROVED\"\n loan.start_date = datetime.utcnow()\n\n db.session.commit()\n message = \"Loan \" + str(loan.id) + \" approved.\"\n\n return jsonify({\"message\": message})\n\n elif loan.loan_state == \"APPROVED\":\n return jsonify({\"message\": \"Loan already approved.\"})\n\n return jsonify({\"message\": \"Cannot approve loan.\"})", "def can_accept(self, user):\n if user.has_perm('funding.make_application_decisions'):\n # Funding manager can override / update decisions, if required\n # But we still need to have had a offer made\n if self.status in ['G', 'A', 'N']:\n return True\n # Applicants can only decide on granted applications\n if self.status == 'G':\n if self.applicant == user:\n return True\n return False", "def send_approval_mail(event_id, user_id):\n event = Event.objects.get(id=event_id)\n user = User.objects.get(id=user_id)\n\n sender = getattr(settings, 'EMAIL_HOST_USER', [])\n subject = \"Your attending request approved\"\n recipients = [user.email]\n context = Context({'name': user.username,\n 'event_title': event.title,\n 'event_id': event.id,\n 'site': Site.objects.get_current()})\n send_html_email(subject,\n sender,\n recipients,\n context,\n \"event/mail/approval\")\n return True", "def approve(pengusulan_id, status):\n user = Staff.is_login()\n if user is None:\n return redirect(url_for('auth.login'))\n\n if user.get_unit_role() == 'staff':\n flash(f\"Anda tidak memiliki akses untuk melakukan approval pengusulan\", flash_code.WARNING)\n return redirect(url_for('pengusulan.table'))\n\n pengusulan_approve = Pengusulan.approve(\n pengusulan_id=pengusulan_id,\n status=status,\n petugas_id=user.id\n )\n if pengusulan_approve:\n flash(f\"Status Pengusulan Buku telah berhasil diperbarui\", flash_code.SUCCESS)\n else:\n flash(f\"Status Pengusulan Buku gagal diperbarui\", flash_code.DANGER)\n return redirect(url_for('pengusulan.manage'))", "def reject_user_application(self, user):\n if self.is_moderator \\\n and self.has_perm('accounts.reject_user_application'):\n user.moderator = self\n user.moderator_decision = user.REJECTED\n user.decision_datetime = timezone.now()\n user.save()\n\n return user\n\n else:\n raise PermissionDenied", "def approve(_spender: address, _amount: uint256) -> bool:\n\n self.allowed[msg.sender][_spender] = _amount\n log.Approval(msg.sender, _spender, _amount)\n return True", "def approve(self):\n if (self.status == self.APPROVED):\n pass\n\n print ('starting approval process by adding events to the primary cal')\n\n primary_calendar = self.course.calendar_courses.get(primary=True)\n # print ('primary = ' + primary_calendar)\n for event in self.events.all():\n d = event.date\n start = datetime.datetime(d.year, d.month, d.day)\n start = timezone.make_aware(start, timezone.get_current_timezone())\n start = start + datetime.timedelta(hours=8)\n end = start + datetime.timedelta(hours=1)\n\n params = {\n 'calendar': primary_calendar,\n 'title': event.title,\n 'start': start,\n 'end': end\n }\n CalendarEvent.objects.create(**params)\n event.approved = True\n event.save()\n\n print ('trying to set syllabus to approved')\n\n try:\n syllabus = self.syllabus.all()[0]\n syllabus.approved = True\n syllabus.course = self.course\n syllabus.save()\n except:\n print ('dang, that failed, but continuing nonetheless.')\n pass\n\n\n print ('creating students from roster-students')\n\n\n for student in self.students.all():\n email = student.email\n if email:\n user = utils.get_or_create_user(email, student.first_name, student.last_name)\n school = self.course.domain\n user_student = utils.get_or_create_student(school, user)\n\n self.course.enroll_by_roster(user_student, self)\n\n student.approved = True\n student.save()\n\n print ('instructors')\n\n for instructor in self.instructors.all():\n instructor.approved = True\n instructor.save()\n\n print ('approving done')\n\n\n self.status = self.APPROVED\n self.save()\n\n add_notification(\n self.created_by.user,\n 'Your class set for {}, is approved and published!'.format(self.course)\n )", "def approve_request(self, request_name):\n self.logger.info(\"Approving request '%s' ...\", request_name)\n\n json_args = json.dumps({\"RequestStatus\": \"assignment-approved\"})\n urn = self.urn_prefix + \"/request/%s\" % request_name\n status, data = self.http_request(\"PUT\", urn, data=json_args,\n headers=self.headersBody)\n\n if status != 200:\n self.logger.error(\"Failed to approve request with status: %s, data: %s\", status, data)\n sys.exit(1)\n self.logger.info(\"Approve succeeded.\")", "def corporate_approve(self, request, object_id, extra_context=None):\n obj = self.get_object(request, unquote(object_id))\n if request.method == 'POST':\n url = reverse('admin:prescription_prescription_detail',\n args=[str(obj.id)])\n if request.POST.get('_cancel'):\n return HttpResponseRedirect(url)\n if request.POST.get('_save'):\n if (obj.planning_status == obj.PLANNING_DRAFT and obj.can_corporate_approve):\n obj.planning_status = obj.PLANNING_SUBMITTED\n obj.planning_status_modified = timezone.now()\n obj.save()\n self.message_user(\n request, \"Successfully submitted for corporate approval.\")\n return HttpResponseRedirect(url)\n if obj.planning_status == obj.PLANNING_SUBMITTED:\n # Only ePFP Application Administrator can apply corporate approval\n if ((not request.user.has_perm(\n 'prescription.can_corporate_approve'))):\n raise PermissionDenied\n\n obj.planning_status = obj.PLANNING_APPROVED\n obj.planning_status_modified = timezone.now()\n obj.save()\n self.message_user(\n request, \"Corporate approval successful.\")\n return HttpResponseRedirect(url)\n elif request.POST.get('_delete'):\n if (obj.planning_status == obj.PLANNING_APPROVED and request.user.has_perm('prescription.can_admin')):\n obj.planning_status = obj.PLANNING_DRAFT\n obj.planning_status_modified = timezone.now()\n obj.save()\n self.message_user(\n request, \"Successfully deleted for corporate approval.\")\n return HttpResponseRedirect(url)\n\n context = {\n 'current': obj,\n }\n return TemplateResponse(request, self.corporate_approval_template,\n context, current_app=self.admin_site.name)", "def approve_requests(self, request, queryset):\n for req in queryset:\n req.approved = True\n req.save()\n self.message_user(\n request,\n '%d requests have been approved.' % queryset.count(),\n level=messages.INFO)", "def approve(self, feedback=None):\n self.hit.generate_connection()\n self.hit.connection.approve_assignment(self.mturk_id, feedback=feedback)\n self.update()", "def handle_nr_approve(self, nr, svc) -> Request:\n return self.approve_nr(nr, svc)", "def switch_to_app_user(self, user):\n try:\n admin_tasks.change_user(user)\n except admin_tasks.AdminTasksError, e:\n self.log.error(str(e))\n sys.exit(1)", "def test_valid_admin_approval(self):\n\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n self.assertIsInstance(user, UserModel())\n\n user = self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n self.assertIsInstance(user, UserModel())\n self.assertIs(user.is_active, True)", "def ApproveApprovalRequest(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "async def approve(self, ctx, user: discord.Member):\n server = ctx.message.server\n if user.id in self.norole[server.id]:\n if self.norole[server.id][user.id]['Role'] == True:\n self.norole[server.id][user.id] = {'Role': False}\n dataIO.save_json(self.warninglist, self.norole)\n nobnl = discord.utils.get(server.roles, name = \"NoBNL\")\n await self.bot.remove_roles(user,nobnl)\n msg = await self.bot.say (\"Role removed!\")\n await asyncio.sleep(8)\n await self.bot.delete_message(msg) \n await self.bot.delete_message(ctx.message)\n else:\n msg = await self.bot.say(\"There is no role to remove!\")\n await asyncio.sleep(8)\n await self.bot.delete_message(msg)\n await self.bot.delete_message(ctx.message)", "def start_approval_process(self, request=None):\r\n # done here to avoid circular import\r\n from cbhooks.models import HookPoint\r\n\r\n hook_point = HookPoint.objects.filter(name=\"order_approval\").first()\r\n orch_actions = cbhooks._get_orchestration_actions_to_run(hook_point)\r\n if orch_actions:\r\n #the orchestration action NEEDs to be first in order to allow a hook\r\n # to model the approval process correctly and not have something\r\n # auto-approve before the hook is run\r\n logger.debug(\"Order Approval orchestration actions exist, so bypassing built-in approver emails.\")\r\n try:\r\n cbhooks.run_hooks(\"order_approval\", order=self)\r\n except cbhooks.exceptions.HookFailureException as e:\r\n msg = _(\"Failed to run hook for order approval. Status: {status},\"\r\n \" Output: {output}, Errors: {errors}\").format(status=e.status, output=e.output, errors=e.errors)\r\n raise CloudBoltException(msg)\r\n return \"\"\r\n\r\n #now that the hooks have run, check if it should be auto-approved\r\n profile = request.get_user_profile()\r\n if self.is_multilevel_approval():\r\n self.approve_my_grms(profile)\r\n\r\n if self.should_auto_approve():\r\n logger.debug(\"Order can be automatically approved, attempting approval by {}\".format(self.owner))\r\n jobs, msg = self.approve(self.owner)\r\n if jobs:\r\n msg = render_to_string(\r\n 'orders/approved_msg.html', {\r\n 'order': self,\r\n 'autoapproved': True,\r\n 'num_jobs': len(jobs),\r\n 'extramsg': msg,\r\n })\r\n return msg\r\n else:\r\n # No auto approval and no approval hooks, so go with\r\n # the default process of emailing a set of approvers, unless the\r\n # owner is an approver.\r\n msg = _(\"Order #{order_id} has been submitted for approval. \").format(order_id=self.id)\r\n msg += orders.mail.email_approvers(self, request)\r\n logger.debug(msg)\r\n return msg", "def approve_or_reject(org_id: int, is_approved: bool, user: UserModel):\n current_app.logger.debug('<find_affidavit_by_org_id ')\n affidavit: AffidavitModel = AffidavitModel.find_by_org_id(org_id)\n affidavit.decision_made_by = user.username\n affidavit.decision_made_on = datetime.now()\n affidavit.status_code = AffidavitStatus.APPROVED.value if is_approved else AffidavitStatus.REJECTED.value\n\n current_app.logger.debug('>find_affidavit_by_org_id ')\n return Affidavit(affidavit)", "def dr_approve(self):\n print \"DR approved this form. Current state:\", self.state", "def test_approve_agreement(self):\n pass", "def post(self, request, *args, **kwargs):\n application = self.get_object()\n app_complete = Application.objects.filter(\n pk=self.kwargs['app_complete']\n ).first()\n email = self.request.user.email\n if app_complete is not None and can_candidate_access(\n application,\n email,\n ) and can_candidate_access(\n app_complete,\n email,\n ) and application.questionnaire.status != 'complete' and (\n app_complete.questionnaire.status == 'complete'\n ):\n\n \"\"\"Attach completed questionnaire to the current application\"\"\"\n application.questionnaire = app_complete.questionnaire\n application.save()\n\n \"\"\"Submit application if nomination is complete too\"\"\"\n if application.nomination.status == 'complete':\n submit_application(application)\n\n return redirect(self.get_success_url())\n else:\n return redirect(\n 'nominations-candidate-questionnaire-select',\n application.id,\n )", "def approve_broadcast(broadcast_id):\n broadcast = Broadcast.objects.get(pk=broadcast_id)\n messages = broadcast.get_messages()\n batch = broadcast.batch\n batch.add_messages(messages)\n batch.status = Batch.APPROVED\n batch.save()", "def KLP_User_Activate(request, user_id):\n\n # get logged in user\n\n user = request.user\n if user.id:\n\n # check logged in user permissions to delete user\n\n KLP_user_Perm(request.user, 'Users', None)\n userObj = User.objects.get(pk=user_id)\n userObj.is_active = 1 # activate user\n userObj.save() # save user object\n return render_to_response('viewtemplates/userAction_done.html',\n {\n 'user': request.user,\n 'selUser': userObj,\n 'message': 'User Activated Successfully',\n 'legend': 'Karnataka Learning Partnership',\n 'entry': 'Add',\n }, context_instance=RequestContext(request))\n else:\n\n # if user is not logged in redirect to login page\n\n return HttpResponseRedirect('/login/')", "def approveList (self, list) : \n for request in list :\n self.approve (request)", "def activateWebAppUser( self, username, activation_code ):\n try:\n con = self.getMetadataDatabaseConnection()\n user_data = con.cursor()\n\n con.cursor().callproc('verify_user_activation_code', [username, activation_code, user_data])\n row = user_data.fetchone()\n if row:\n con.cursor().callproc('activate_user_account', [username])\n return True\n else:\n return False\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def approve(request, img_id):\n if not request.user.is_staff:\n return HttpResponseRedirect(reverse('wainz.views.composite'))\n else:\n image = Image.objects.get(pk=img_id)\n image.is_approved=True\n image.save()\n return HttpResponseRedirect(reverse('wainz.views.approve_images'))", "def approve(self, approval_name, notes):\n new_approval = USBDeviceApproval.create_from_usb_device(self)\n new_approval.approval_name = approval_name\n new_approval.notes = notes\n new_approval.save()\n self._refresh()\n return new_approval", "def approve_me_group(message):\n load_users(message._client.users)\n sender_id = message._get_user_id()\n\n if (user_list[sender_id].is_unknown):\n message.reply(Strings['APPROVE_ME_REQUEST'])\n else:\n self_name = user_list[sender_id].level.name\n message.reply(\"Your status is already: {}\".format(self_name))", "def approve(to_address: str, amount: int) -> bool:\n raise NotImplementedError()", "def comment_approve(request, pk):\n comment = get_object_or_404(Comment, pk=pk)\n comment.approve()\n return redirect('post_detail', pk=comment.post.pk)", "def approve_token(token_id, user):\n token = BlacklistedToken.query.filter_by(id=token_id, user_identity=user).first()\n if token is not None:\n db.session.remove(token)\n prune_if_necessary()\n db.session.commit()", "async def appcheck(self, ctx: commands.Context, user_id: discord.Member):\n return await ctx.send(\n \"This command is currently being reworked, follow updates in The Kompound\"\n )", "def handle_req(reqid):\n req = hl.retrieveRequest(reqid)\n \n if request.method == 'POST':\n if request.form['reqOption'] == 'Approve':\n hl.acceptRequest(req)\n elif request.form['reqOption'] == 'Decline':\n hl.declineRequest(req)\n \n return redirect('/users')", "def confirm_apply(request, pk):\n prop = get_object_or_404(Project, pk=pk)\n if Application.objects.filter(Q(Project=prop) & Q(Student=request.user)).exists():\n return render(request, \"base.html\", context={\n \"Message\": \"You already applied to this project.\",\n \"return\": 'students:list_applications',\n })\n return render(request, \"students/apply.html\", context={\n \"project\": get_object_or_404(Project, pk=pk),\n })", "def withdraw_application(application_id, user_id):\n if not application_id:\n return {\"status\": \"No application ID provided.\"}\n\n application = Application.query.filter_by(id=application_id, user_id=user_id).first()\n\n if application.is_inhouse_posting:\n specific_application = Inhouse.query.filter_by(application_id=application_id).first()\n else:\n specific_application = External.query.filter_by(application_id=application_id).first()\n\n if not application:\n return {\"status\": \"No application found.\"}\n\n db.session.delete(specific_application)\n db.session.delete(application)\n db.session.commit()\n return {\"status\": \"success\"}", "def view_approved():\n global approved\n global appr_ind\n appr = approved.get_all_values()\n headings = appr[0]\n first_appl = appr[appr_ind]\n for head, app in zip(headings, first_appl):\n head = head.ljust(15, ' ')\n print(f'{head} {app}')\n keep_viewing = True\n while keep_viewing:\n view_next = input('\\nPress V to view next, Q to quit, M for main '\n 'menu.\\n')\n if view_next.lower() == 'q':\n logout()\n elif view_next.lower() == 'v':\n appr_ind += 1\n if appr_ind < len(appr):\n print('Next approved application: \\n')\n view_approved()\n else:\n print('\\nNo more approved applications to view \\n')\n keep_viewing = False\n next_action()\n elif view_next.lower() == 'm':\n keep_viewing = False\n hr_main()\n break\n else:\n is_invalid()", "def auto_approve_purchase_order(self, auto_approve_purchase_order):\n\n self._auto_approve_purchase_order = auto_approve_purchase_order", "def can_edit(self, user):\n if user.has_perm('funding.make_application_decisions'):\n # Funding manager can update things later, if required\n return True\n # Applicants can only edit the application before the final review step\n if self.status in ('S', 'U'):\n if self.applicant == user:\n return True\n return False", "def submit_application(application):\n\n \"\"\"Check if questionnaire and nomination are complete\"\"\"\n if application.questionnaire.status == 'complete' and (\n application.nomination.status == 'complete'\n ):\n\n \"\"\"Update status to submitted if needed\"\"\"\n if application.is_editable() and application.status != 'submitted':\n application.status = 'submitted'\n application.save()\n\n \"\"\"Set to Basic Support if type is missing\"\"\"\n if application.application_type is None:\n application.application_type = ApplicationType.basic.value[0]\n application.save()\n\n \"\"\"Send notification for submitted status\"\"\"\n send_application_submitted_notification(application)\n\n return application", "def purchase_indent_approve(request, request_id):\n purchase_indent_request = get_object_or_404(PurchaseIndentRequest, pk=request_id)\n current_employee = request.user.employee_set.all()[0]\n\n if purchase_indent_request.state == 'Submitted':\n if purchase_indent_request.indenter.department.hod_id != current_employee.id:\n raise PermissionDenied\n return render(request, 'purchase/purchase_indent/show_hod.html',\n {'purchase_indent_request': purchase_indent_request})\n\n elif purchase_indent_request.state == 'Approved by Head of Department':\n if not request.user.groups.filter(name='JrAO_AccountsDepartment').exists():\n raise PermissionDenied\n form = PurchaseIndentBudgetDetailsForm()\n\n return render(request, 'purchase/purchase_indent/show_jao.html',\n {'purchase_indent_request': purchase_indent_request, 'form': form})\n\n elif purchase_indent_request.state == 'Approved by Junior Accounts Officer':\n if not request.user.groups.filter(name='DR_AccountsDepartment').exists():\n raise PermissionDenied\n return render(request, 'purchase/purchase_indent/show_dr.html',\n {'purchase_indent_request': purchase_indent_request})\n\n else:\n return PermissionDenied", "def auto_approve_cod(self, auto_approve_cod):\n\n self._auto_approve_cod = auto_approve_cod", "def on_access_approved(self, handler):\n print \"User with {0} has been GRANTED access.\".format(\n handler.client_address[0]\n )", "def confirm(text, app, version, modules=None, default_yes=False):\n print(text)\n print(' Directory: %s' % os.path.basename(app.app_dir))\n print(' App ID: %s' % app.app_id)\n print(' Version: %s' % version)\n print(' Modules: %s' % ', '.join(modules or app.modules))\n if default_yes:\n return raw_input('Continue? [Y/n] ') not in ('n', 'N')\n else:\n return raw_input('Continue? [y/N] ') in ('y', 'Y')", "def AppUpdateApp(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "async def approve_event(self, message_id: int) -> None:\n if message_id not in self.approval_events:\n return\n\n async with self.lock:\n approved_event: BaseEvent = self.approval_events[message_id]\n event_embed = await approved_event.get_calendar_embed()\n try:\n await self.update_new_event(approved_event)\n del self.approval_events[message_id]\n except (HTTPException, Forbidden) as e:\n raise OpheliaCommandError(\n \"events_approval_error\",\n approved_event.title,\n self.calendar_channel.mention\n ) from e\n\n try:\n # Edit approval message\n approved_message: Message = (\n await self.approval_channel.fetch_message(message_id)\n )\n await self.approval_message_edit(\n approved_message,\n disp_str(\"events_add_approved\")\n )\n\n await send_message(\n channel=approved_event.organizer,\n text=approved_event.format_vars(self.accept_template),\n embed=event_embed\n )\n except (HTTPException, Forbidden):\n await send_simple_embed(\n self.approval_channel,\n \"events_approval_dm_error\",\n approved_event.title,\n approved_event.organizer.mention,\n colour=Colour(settings.embed_color_warning)\n )", "def should_auto_approve(self):\r\n if self.group and self.group.allow_auto_approval:\r\n return True\r\n\r\n # some orders (like those duplicated by CIT) will not have owners\r\n if self.is_multilevel_approval():\r\n if self.has_all_approver_roles(self.owner, self.group):\r\n return True\r\n return False\r\n\r\n else:\r\n if self.owner and self.owner.has_permission('order.approve', self.group):\r\n return True\r\n\r\n return False", "def approve(self, employee: Employee, comments: str = None) -> None:\n from .exceptions import OperationForbiddenError, OrderEmptyError\n\n # If order is not in the \"PENDING\" state, raise an\n # OperationForbiddenError\n if not self.is_pending:\n raise OperationForbiddenError(\n self.STATE_CHANGE_FORBIDDEN_ERROR_MSG % {\n 'current_state': Order.OrderState.get_choice_display(\n self.state\n ),\n 'new_state': Order.OrderState.APPROVED.choice_display\n }\n )\n\n # If the order's item list is empty, raise an OrderEmptyError\n if not self.orderitem_set.exists():\n raise OrderEmptyError(\n self,\n 'An order with no associated OrderItems cannot be '\n 'approved.'\n )\n\n # Perform db mutations in a transaction\n with transaction.atomic():\n # Adjust the stock of each item in the order's item list\n order_item: OrderItem\n for order_item in self.orderitem_set.all():\n item: Inventory = order_item.item\n item.deduct(employee.user, order_item.quantity)\n\n # Mark this order as approved\n self.update(\n employee.user,\n comments=comments,\n handler=employee,\n review_date=now(),\n state=Order.OrderState.APPROVED.choice_value\n )", "def apply(request, pk):\n prop = get_object_or_404(Project, pk=pk)\n if request.user.applications.count() >= settings.MAX_NUM_APPLICATIONS:\n return render(request, \"base.html\", context={\n \"Message\": \"already at max amount of applied projects<br>\"\n \"retract one first before continuing\",\n \"return\": 'students:list_applications',\n })\n if Application.objects.filter(Q(Project=prop) & Q(Student=request.user)).exists():\n return render(request, \"base.html\", context={\n \"Message\": \"You already applied to this project.\",\n \"return\": 'students:list_applications',\n })\n\n track = ApplicationTracking()\n track.Project = prop\n track.Student = request.user\n track.Type = 'a'\n track.save()\n\n appl = Application()\n appl.Project = prop\n # highestprio = request.user.applications.aggregate(Max('Priority'))['Priority__max']\n appl.Student = request.user\n # if highestprio is None:\n # appl.Priority = 1\n # else:\n # appl.Priority = highestprio + 1\n appl.save()\n return render(request, \"base.html\", context={\n \"Message\": \"Application saved!\",\n \"return\": 'students:list_applications',\n })", "def unapprove(self):\n self._check_if_open()\n return super(BitbucketCloudBase, self).delete(\"approve\")", "def view_pending():\n global pending_sheet\n global pending\n global pend_app_ind\n global num_pending\n global skipped_apps\n headings = pending[0]\n appl_viewed = pending[pend_app_ind]\n for head, app in zip(headings, appl_viewed):\n head = head.ljust(15, ' ')\n print(f'{head} {app}')\n while True:\n print('\\nDo you wish to approve or reject this application?')\n print('Please enter A to approve, R to reject, V to view next')\n approve = input('or M to return to the main menu:\\n')\n if approve.lower() == 'a':\n authorise(appl_viewed, pend_app_ind)\n pending = pending_sheet.get_all_values()\n num_pending = len(pending)-1\n break\n elif approve.lower() == 'r':\n reject_appl(appl_viewed, pend_app_ind)\n pending = pending_sheet.get_all_values()\n num_pending = len(pending)-1\n break\n elif approve.lower() == 'm':\n hr_main()\n return\n elif approve.lower() == 'v':\n print(colored('\\nApplication not yet processed.\\n', 'cyan',\n attrs=['bold']))\n skipped_apps += 1\n pend_app_ind += 1\n break\n else:\n is_invalid()\n if (num_pending - skipped_apps) > 0:\n print('Next application pending approval:\\n')\n view_pending()\n else:\n print('No more pending applications')\n next_action()", "def user_requested_access(user):\r\n user = CourseCreator.objects.get(user=user)\r\n if user.state != CourseCreator.GRANTED:\r\n user.state = CourseCreator.PENDING\r\n user.save()", "def lifecycle_approve_for_my_org(self, orderer_url, orderer_tls_rootcert, channel_name, cc_name,\n chaincode_version, policy, sequence=1):\n res, installed = self.lifecycle_query_installed(\"3s\")\n cc_label = cc_name+\"_\"+chaincode_version\n package_id = \"\"\n for each in installed['installed_chaincodes']:\n if each['label'] == cc_label:\n package_id = each['package_id']\n break\n if package_id == \"\":\n return 1, \"not exist the chaincode, please check chaincode_name and chaincode_version\"\n\n if os.getenv(\"CORE_PEER_TLS_ENABLED\") == \"false\" or os.getenv(\"CORE_PEER_TLS_ENABLED\") is None:\n if self.version in BasicEnv.binary_versions_v2:\n res = os.system(\"./../bin/{}/bin/peer lifecycle chaincode approveformyorg -o {} \"\n \" --channelID {} --name {} --version {} --init-required --package-id {} --sequence {}\"\n \" --signature-policy {} > ./approve.txt\"\n .format(self.version, orderer_url, channel_name, cc_name,\n chaincode_version, package_id, sequence, policy))\n else:\n if self.version in BasicEnv.binary_versions_v2:\n res = subprocess.Popen(\"./../bin/{}/bin/peer lifecycle chaincode approveformyorg -o {} --tls \"\n \"--cafile {} --channelID {} --name {} --version {} --init-required --package-id \"\n \"{} --sequence {} --signature-policy {}\"\n .format(self.version, orderer_url, orderer_tls_rootcert, channel_name,\n cc_name, chaincode_version, package_id, sequence, policy), shell=True,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = res.communicate()\n return_code = res.returncode\n\n if return_code == 0:\n content = str(stdout, encoding=\"utf-8\")\n else:\n stderr = str(stderr, encoding=\"utf-8\")\n return return_code, stderr\n return return_code, content", "def upgrade(message, target, num):\n return\n users = hf.get_users()\n\n for user in users:\n if user[\"name\"] != target:\n continue\n try:\n user[\"approval_level\"] = int(num)\n except Exception:\n message.reply(\":x: That's not a number, ya dingus. :)\")\n return\n\n hf.save_users(users)\n\n message.reply(\"Successfully upgraded user {} to approval level \"\n \"{}.\".format(target, num))", "def do_subscription_approval(sender, **kwargs):\r\n req_payment = sender.get_product_class().get_requires_payment_details()\r\n if not req_payment or has_valid_billing_details(sender.billing_account):\r\n status = 'approved'\r\n else:\r\n status = 'declined'\r\n sender.set_current_approval_status(status)\r\n return status", "def customise_auth_user_resource(r, tablename):\n\n auth = current.auth\n\n def approve_user(r, **args):\n\n from gluon import redirect\n\n db = current.db\n user = db(db.auth_user.id == r.id).select(limitby = (0, 1)\n ).first()\n org_group_id = user.org_group_id\n if org_group_id:\n # Check if this is a COVID-19 Test Station\n ogtable = current.s3db.org_group\n org_group = db(ogtable.id == org_group_id).select(ogtable.name,\n limitby = (0, 1)\n ).first()\n if org_group and org_group.name == TESTSTATIONS:\n # Custom Approval process\n redirect(URL(c= \"default\", f=\"index\", args=[\"approve\", r.id]))\n\n # Default Approval\n auth.s3_approve_user(user)\n current.session.confirmation = T(\"User Account has been Approved\")\n redirect(URL(args=[r.id, \"roles\"]))\n\n current.s3db.configure(\"auth_user\",\n approve_user = approve_user,\n )", "def customise_auth_user_resource(r, tablename):\n\n auth = current.auth\n\n def approve_user(r, **args):\n\n from gluon import redirect\n\n db = current.db\n user = db(db.auth_user.id == r.id).select(limitby = (0, 1)\n ).first()\n org_group_id = user.org_group_id\n if org_group_id:\n # Check if this is a COVID-19 Test Station\n ogtable = current.s3db.org_group\n org_group = db(ogtable.id == org_group_id).select(ogtable.name,\n limitby = (0, 1)\n ).first()\n if org_group and org_group.name == TESTSTATIONS:\n # Custom Approval process\n redirect(URL(c= \"default\", f=\"index\", args=[\"approve\", r.id]))\n\n # Default Approval\n auth.s3_approve_user(user)\n current.session.confirmation = T(\"User Account has been Approved\")\n redirect(URL(args=[r.id, \"roles\"]))\n\n current.s3db.configure(\"auth_user\",\n approve_user = approve_user,\n )", "def test_approve(self):\r\n request = RequestFactory()\r\n post = request.post(self.url, {'field': 'mod_queue',\r\n 'op': 'approve',\r\n 1: [self.problem_id.to_deprecated_string(), '2.0', '2']})\r\n view.approve(post, self.course_id, 'mod_queue')\r\n problem_hints = XModuleUserStateSummaryField.objects.get(field_name='mod_queue', usage_id=self.problem_id).value\r\n self.assertTrue('2.0' not in json.loads(problem_hints) or len(json.loads(problem_hints)['2.0']) == 0)\r\n problem_hints = XModuleUserStateSummaryField.objects.get(field_name='hints', usage_id=self.problem_id).value\r\n self.assertTrue(json.loads(problem_hints)['2.0']['2'] == ['Hint 2', 1])\r\n self.assertTrue(len(json.loads(problem_hints)['2.0']) == 2)", "def purchase_indent_jao_approve(request, request_id):\n # Check if logged in user is JAO\n if not request.user.groups.filter(name='JrAO_AccountsDepartment').exists():\n raise PermissionDenied\n\n current_employee = request.user.employee_set.all()[0]\n purchase_indent_request = get_object_or_404(PurchaseIndentRequest, pk=request_id)\n form = PurchaseIndentBudgetDetailsForm(request.POST, instance=purchase_indent_request)\n\n if form.is_valid():\n if request.POST.get('Approve'):\n if not can_proceed(purchase_indent_request.jao_approve):\n raise PermissionDenied\n\n purchase_indent_request.jao_approve()\n purchase_indent_request.save()\n\n remark = request.POST.get('remark')\n transition_record = TransitionHistory(\n approver=current_employee,\n form=purchase_indent_request,\n from_state=STATE.APPROVED_BY_HOD,\n to_state=STATE.APPROVED_BY_JAO,\n remark=remark\n )\n transition_record.save()\n messages.success(request, 'The Purchase Indent form was Approved')\n\n elif request.POST.get('Reject'):\n if not can_proceed(purchase_indent_request.reject):\n raise PermissionDenied\n\n purchase_indent_request.reject()\n purchase_indent_request.save()\n\n remark = request.POST.get('remark')\n transition_record = TransitionHistory(\n approver=current_employee,\n form=purchase_indent_request,\n from_state=STATE.APPROVED_BY_HOD,\n to_state=STATE.REJECT,\n remark=remark\n )\n transition_record.save()\n messages.warning(request, 'The Purchase Indent form was Rejected')\n\n return redirect('purchase:purchase-requests-pending')\n else:\n return render(request, 'purchase/purchase_indent/show_jao.html',\n {'purchase_indent_request': purchase_indent_request}, {'form': form})", "def save():\n user = users.get_current_user()\n if user:\n new_app = Applic(parent=base_key)\n new_app.user = user.user_id()\n new_app.username = user.nickname()\n new_app.content = request.forms.get('content')\n new_app.title = request.forms.get('title') \n new_app.put()\n redirect('/')\n else:\n redirect('/')", "def confirm(id):\n #: get resources\n user = User.query.get_or_404(id)\n service = SignUpService(user)\n input_token = request.args['token']\n\n #: active current account\n try:\n service.active(input_token)\n except TokenUsedError:\n message = _(u\"The account had been actived.\")\n return render_template(\"confirm-failed.html\", message=message), 403\n except TokenWrongError:\n message = _(u\"The active token is invalid.\")\n return render_template(\"confirm-failed.html\", message=message), 403\n\n #: automatic sign in\n session_login(user)\n #: output a success message\n message = _(u\"The account has been actived successfully.\")\n return render_template(\"confirm-success.html\", message=message)", "def confirm_email(self, request, email_address):\n email_address.verified = True\n email_address.set_as_primary(conditional=True)\n email_address.save()\n\n u = get_user_model().objects.get(pk=email_address.user.id)\n u.is_active = True\n u.save()", "def update_user(user_id, data):\n logging.debug(\"Uptating user: user_id={}\".format(user_id))\n return ask('appusers/{0}'.format(user_id), data, 'put')", "def confirm_meal(request, e_id):\n enrolment = Enrolment.objects.get(pk=e_id)\n total_meal = enrolment.day_meal_count + enrolment.night_meal_count\n price = enrolment.plan.price\n extended_user = ExtendedUser.objects.get(user=request.user)\n extended_user.balance -= price * total_meal\n if extended_user.balance >= 0:\n extended_user.save()\n owner = enrolment.plan.store.owner\n owner = ExtendedUser.objects.get(user=owner)\n owner.balance += price * total_meal\n owner.save()\n return view_enrolments(request)", "def post(self):\n\n action = self.request.get('action')\n if not action:\n raise ErrorMessage(404, 'missing action (requested_action) params')\n\n self.require_action_permitted('grant')\n\n account = model.Account.get(self.request.get('key'))\n if not account:\n raise ErrorMessage(404, 'bad key given')\n\n #TODO(eyalf): define account.display_name() or something\n name = account.email\n if not action in account.requested_actions:\n #i18n: Error message\n raise ErrorMessage(404, _('No pending request for '\n '%(account_action)s by %(user)s')\n % (action, name))\n account.requested_actions.remove(action)\n grant = self.request.get('grant', 'deny')\n if grant == 'approve':\n account.actions.append(action)\n account.put()\n logging.info('%s request for %s was %s' % (account.email,\n action,\n grant))\n\n if self.params.embed:\n if grant == 'approve':\n self.write(\n #i18n: Application for the given permission action approved\n _('Request for becoming %(action)s was approved.') % action)\n else:\n self.write(\n #i18n: Application for the given permission action denied\n _('Request for becoming %(action)s was denied.') % action)\n else:\n raise Redirect(self.get_url('/grant_access'))", "def test_admin_approval_not_activated(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n\n user = self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n self.assertIs(user, False)\n self.assertIs(profile.user.is_active, False)", "async def update_new_approval_event(\n self,\n event: BaseEvent\n ) -> None:\n message = await send_message(\n channel=self.approval_channel,\n text=None,\n embed=await event.get_approval_embed()\n )\n\n await message.add_reaction(APPROVE_EMOTE)\n await message.add_reaction(REJECT_EMOTE)\n self.approval_events[message.id] = event", "def auto_approve_purchase_order(self):\n return self._auto_approve_purchase_order", "async def activate_application_token(self, apptoken, temptoken) -> bool:\n await self.raw_request(\n self.URL_ACTIVATE.format(apptoken=apptoken, temptoken=temptoken)\n )\n return True" ]
[ "0.7413602", "0.71029663", "0.67888886", "0.6747213", "0.67318124", "0.67286676", "0.6719018", "0.66766196", "0.6670902", "0.65097535", "0.64031065", "0.63882875", "0.63717735", "0.63500285", "0.634869", "0.63414145", "0.63078153", "0.6297645", "0.62773865", "0.6240185", "0.6192649", "0.61336243", "0.611977", "0.6088279", "0.60658", "0.6056476", "0.6044347", "0.6042677", "0.60321546", "0.60213864", "0.5958916", "0.5943743", "0.5928491", "0.59089124", "0.5896483", "0.5893422", "0.58912873", "0.5856053", "0.5848953", "0.5817068", "0.58123386", "0.5793034", "0.57763827", "0.5771706", "0.57697225", "0.57583106", "0.5758173", "0.573315", "0.57305264", "0.5723728", "0.5716319", "0.56781095", "0.56490177", "0.56192863", "0.56187916", "0.56125385", "0.5604722", "0.5592419", "0.5580644", "0.55722874", "0.55687326", "0.556416", "0.5561347", "0.555324", "0.5546984", "0.55459183", "0.5545046", "0.55406356", "0.5539836", "0.55395305", "0.5505703", "0.5502333", "0.55005765", "0.54970443", "0.5486285", "0.54719657", "0.54497474", "0.5414099", "0.54128265", "0.5396337", "0.5380663", "0.53697723", "0.53583914", "0.5353769", "0.5332513", "0.5327544", "0.53241897", "0.53241897", "0.5314607", "0.52939", "0.52872545", "0.5282453", "0.5280774", "0.5278186", "0.5274075", "0.52686256", "0.52682567", "0.5266121", "0.52538836", "0.5251788" ]
0.79233325
0
Reject a user's application
def reject_user_application(self, user): if self.is_moderator \ and self.has_perm('accounts.reject_user_application'): user.moderator = self user.moderator_decision = user.REJECTED user.decision_datetime = timezone.now() user.save() return user else: raise PermissionDenied
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def admin_reject(user):\n if user.comments in (None or \"\"):\n return\n\n subject = \"ECE/CIS Account - Account Application rejected for %s\" % user.username\n application = \"https://www.eecis.udel.edu/NewAccount/\"\n helprequest = \"https://www.eecis.udel.edu/service\"\n sponsor = \"%[email protected]\" % user.sponsor\n \n message = \"Your ECE/CIS Account has been rejected by ECE/CIS faculty adminstrators.\\n\" % user.sponsor\n message += \"The reason given for rejection was:\\n\\n%s\\n\\n\" % user.comments\n message += \"You may re-apply with corrected information at %s\\n\" % application\n message += \"Please don't reply to this email. If have any questions, please \\n\"\n message += \"please post a ticket as an outsider at %s\" % helprequest\n message += \"-- ECE\\CIS Labstaff\"\n\n\n send('[email protected]', 'ECE/CIS Account System', \\\n [user.email, sponsor], subject, message, MAILHOST)", "def serverReject(self):\n self.handshake_deferred.errback(ConnectionDeny(code=403, reason=\"Access denied\"))\n self.cleanup()\n logger.debug(\"WebSocket %s rejected by application\", self.reply_channel)\n self.factory.log_action(\"websocket\", \"rejected\", {\n \"path\": self.request.path,\n \"client\": \"%s:%s\" % tuple(self.client_addr) if self.client_addr else None,\n })", "def _reject(self, reason):\n log.error('Rejected: %s' % reason)\n\n self._remove_changes()\n self._remove_files()\n\n if self.user is not None:\n email = Email('importer_reject_maintainer')\n package = self.changes.get('Source', '')\n\n self.send_email(email, [self.user.email], package=package, message=reason)\n sys.exit(1)", "async def deny(self, ctx: commands.Context, target: discord.Member):\n try:\n accepter = get(ctx.guild.roles, id=await self.config.guild(ctx.guild).accepter_id())\n except TypeError:\n accepter = None\n if not accepter:\n if not ctx.author.guild_permissions.administrator:\n return await ctx.send(\"Uh oh, you cannot use this command.\")\n else:\n if accepter not in ctx.author.roles:\n return await ctx.send(\"Uh oh, you cannot use this command.\")\n try:\n applicant = get(ctx.guild.roles, id=await self.config.guild(ctx.guild).applicant_id())\n except TypeError:\n applicant = None\n if not applicant:\n applicant = get(ctx.guild.roles, name=\"Staff Applicant\")\n if not applicant:\n return await ctx.send(\n \"Uh oh, the configuration is not correct. Ask the Admins to set it.\"\n )\n if applicant in target.roles:\n await ctx.send(\"Would you like to specify a reason? (yes/no)\")\n pred = MessagePredicate.yes_or_no(ctx)\n try:\n await self.bot.wait_for(\"message\", timeout=30, check=pred)\n except asyncio.TimeoutError:\n return await ctx.send(\"You took too long. Try again, please.\")\n if pred.result:\n await ctx.send(\"Please, specify your reason now.\")\n\n def check(m):\n return m.author == ctx.author\n\n try:\n reason = await self.bot.wait_for(\"message\", timeout=120, check=check)\n except asyncio.TimeoutError:\n return await ctx.send(\"You took too long. Try again, please.\")\n await target.send(\n f\"Your application in {ctx.guild.name} has been denied.\\n*Reason:* {reason.content}\"\n )\n else:\n await target.send(f\"Your application in {ctx.guild.name} has been denied.\")\n await target.remove_roles(applicant)\n await ctx.send(f\"Denied {target.mention}'s application.\")\n else:\n await ctx.send(f\"Uh oh. Looks like {target.mention} hasn't applied for anything.\")", "async def deny(self, ctx: commands.Context, target: discord.Member):\n try:\n accepter = get(ctx.guild.roles, id = await self.config.guild(ctx.guild).accepter_id())\n except TypeError:\n accepter = None\n if not accepter:\n if not ctx.author.guild_permissions.administrator:\n return await ctx.send(\"Uh oh, you cannot use this command.\")\n else:\n if accepter not in ctx.author.roles:\n return await ctx.send(\"Uh oh, you cannot use this command.\")\n try:\n applicant = get(ctx.guild.roles, id = await self.config.guild(ctx.guild).applicant_id())\n except TypeError:\n applicant = None\n if not applicant:\n applicant = get(ctx.guild.roles, name=\"Staff Applicant\")\n if not applicant:\n return await ctx.send(\"Uh oh, the configuration is not correct. Ask the Admins to set it.\")\n if applicant in target.roles:\n await ctx.send(\"Would you like to specify a reason? (yes/no)\")\n pred = MessagePredicate.yes_or_no(ctx)\n try:\n await self.bot.wait_for(\"message\", timeout=30, check=pred)\n except asyncio.TimeoutError:\n return await ctx.send(\"You took too long. Try again, please.\")\n if pred.result:\n await ctx.send(\"Please, specify your reason now.\")\n\n def check(m):\n return m.author == ctx.author\n\n try:\n reason = await self.bot.wait_for(\n \"message\", timeout=120, check=check\n )\n except asyncio.TimeoutError:\n return await ctx.send(\"You took too long. Try again, please.\")\n await target.send(\n f\"Your application in {ctx.guild.name} has been denied.\\n*Reason:* {reason.content}\"\n )\n else:\n await target.send(\n f\"Your application in {ctx.guild.name} has been denied.\"\n )\n await target.remove_roles(applicant)\n await ctx.send(f\"Denied {target.mention}'s application.\")\n else:\n await ctx.send(\n f\"Uh oh. Looks like {target.mention} hasn't applied for anything.\"\n )", "def reject_appl(data, ind):\n global rejected\n global pending_sheet\n rejected.append_row(data)\n ind += 1\n pending_sheet.delete_rows(ind)\n print(colored('\\nApplication rejected.\\n', 'cyan', attrs=['bold']))", "def reject(self):\n self.skype.conn(\"PUT\", \"{0}/users/{1}/invites/8:{2}/decline\"\n .format(SkypeConnection.API_CONTACTS, self.skype.userId, self.userId),\n auth=SkypeConnection.Auth.SkypeToken)", "def reject(self):\n pass", "async def reject_challenge(self, user_id, *, delay=0, lifespan=math.inf):\n await self.user_command(\n \"\", \"reject\", user_id, delay=delay, lifespan=lifespan\n )", "def review_applications(request):\n moderator = request.user\n site = get_current_site(request)\n\n pending = User.objects.filter(registration_method='REQ',\n decision_datetime=None,\n is_active=False)\n\n form = ModerateApplicationForm()\n\n if request.method == 'POST':\n\n form = ModerateApplicationForm(request.POST)\n user = get_object_or_404(User, id=request.POST['user_id'])\n\n if form.is_valid():\n decision = form.cleaned_data['decision']\n comments = form.cleaned_data['comments']\n\n if decision == 'APP':\n confirmation_message = _(\"{}'s account application \"\n \"has been approved.\".format(\n user.get_full_name().title()))\n\n moderator.approve_user_application(user)\n\n # Set log and email settings\n msg_type = ModerationLogMsg.APPROVAL\n url = request.build_absolute_uri(\n reverse('accounts:activate-account',\n args=[user.auth_token]))\n subject = _('Welcome to {}'.format(site.name))\n template = 'moderation/emails/approve_user.html'\n\n elif decision == 'REJ':\n confirmation_message = _(\"{}'s account application \"\n \"has been rejected.\".format(\n user.get_full_name().title()))\n\n moderator.reject_user_application(user)\n\n # Set log and email settings\n msg_type = ModerationLogMsg.REJECTION\n url = ''\n subject = _(('Unfortunately, your application to {} '\n 'was not successful').format(site.name))\n template = 'moderation/emails/reject_user.html'\n\n # Log moderation event\n log_comment = '{}'.format(comments)\n log_moderator_event(msg_type=msg_type,\n user=user,\n moderator=moderator,\n comment=log_comment)\n\n # Send moderation email\n send_connect_email(subject=subject,\n template=template,\n recipient=user,\n sender=moderator,\n site=site,\n url=url)\n\n messages.success(request, confirmation_message)\n\n return redirect('moderation:review-applications')\n\n context = {\n 'pending': pending,\n 'form': form,\n }\n\n return render(request, 'moderation/review_applications.html', context)", "def reject(self, responder):\n self._apply_decision(self.Status.REJECTED, responder)", "def get_everyone_denied(self):", "def on_buttonBox_rejected(self):\n self.reject()", "async def appcheck(self, ctx: commands.Context, user_id: discord.Member):\n return await ctx.send(\n \"This command is currently being reworked, follow updates in The Kompound\"\n )", "def reject(self):\r\n QtGui.QDialog.reject(self)", "def no_reason(message, db):\n message.reply(Strings['GRANT_EXAMPLE'].format(db))", "def app_permission_denied(self, request, message=None):\n if not request.successful_authenticator and not message:\n raise exceptions.NotAuthenticated()\n if message:\n raise exceptions.PermissionDenied(detail=message)\n raise exceptions.PermissionDenied(detail=message)", "def Reject(self, request, global_params=None):\n config = self.GetMethodConfig('Reject')\n return self._RunMethod(\n config, request, global_params=global_params)", "def reject(self):\n print \"This form has been rejected. Current state:\", self.state", "def sponsor_reject(user):\n if user.comments in (None or \"\"):\n return\n\n subject = \"ECE/CIS Account - Account Application rejected for %s\" % user.username\n application = \"https://www.eecis.udel.edu/NewAccount/\"\n \n message = \"Your ECE/CIS Account has been rejected by the faculty sponsor you selected (%s).\\n\" % user.sponsor\n message += \"The reason given for rejection was:\\n\\n%s\\n\\n\" % user.comments\n message += \"You may re-apply with corrected information at %s\\n\" % application\n message += \"Please don't reply to this email. If have any questions, please contact\\n\"\n message += \"the faculty member you listed as sponsor or labstaff.\\n\\n\"\n message += \"-- ECE\\CIS Labstaff\"\n \n send('[email protected]', 'ECE/CIS Account System', \\\n [user.email], subject, message, MAILHOST)", "def on_reject(self):\n self.state = REJECTED\n self._reject()", "def update_application(request):\n\n record = RegApplication.query.filter_by(email=request.form['application-email']).first()\n\n record.application_processed = True\n record.application_granted = False if request.form['application-action'] == 'reject' else True\n record.processed_date = datetime.datetime.now()\n db.session.commit()\n\n if not record.application_granted:\n\n send_message(subject='OpenAPS Access Refused',\n email=request.form['application-email'],\n content=f\"\"\"Your application for access to the OpenAPS data portal was rejected for the following reason:\n <br><br>\n '{request.form['reject-reason']}'\"\"\")\n\n return record.project_requests", "def denied(message):\n hf.query_users(message, hf.get_users(), \"denied\")", "def reject_proposal(self, widget):\n print(\"Rejecting project\")\n proposal_id = subprocess.check_output(\n ['python3', os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '/Sawtooth/bin/code_smell.py',\n 'list',\n '--type', 'proposal', '--active', '1', '--url', 'http://127.0.0.1:' + self.api])\n proposal_id = proposal_id.decode('utf-8').split(' ')[0]\n try:\n clocker = open('votelock.txt', 'r').read()\n if proposal_id in clocker:\n ErrorDialog(self, \"You already voted!\")\n return\n except:\n pass\n print(['python3', os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '/Sawtooth/bin/code_smell.py',\n 'vote',\n '--id', proposal_id, '--vote', 'no', '--url', 'http://127.0.0.1:' + self.api])\n subprocess.Popen(\n ['python3', os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '/Sawtooth/bin/code_smell.py',\n 'vote',\n '--id', proposal_id, '--vote', 'no', '--url', 'http://127.0.0.1:' + self.api])\n locker = open('votelock.txt', 'w')\n locker.write(proposal_id)\n locker.close()\n try:\n vote = int(self.lbl_reject.get_text().split(\":\")[1][1:])+1\n self.lbl_accept.set_text(self.lbl_reject.get_text().split(\":\")[0]+\" \"+str(vote))\n except:\n pass", "def handle_application(sender, instance, **kwargs):\n if instance.accepted is not None:\n if instance.accepted:\n instance.user.userprofile.change_status_developer()\n else:\n instance.user.userprofile.change_status_player()", "def unaccept_offer(self, pname, matchid):\n msg = '%s declined the match' % (pname)\n self._rem_offer(matchid, msg)\n msg = '%s canceled the game' % (pname)\n self._rem_game(matchid, msg)", "def ignore(self):\n self.accepted = False", "def ignore(self):\n self.accepted = False", "def reject(request, pk=None):\n # Check request is still valid or not\n friend_request = get_or_none(FriendRequest, pk=pk)\n # if request is not valid\n if friend_request is None:\n return Response({'status': '400', 'code': 'E_REQUEST_NOT_FOUND',\n 'detail': code['E_REQUEST_NOT_FOUND']}, status=400)\n # Delete request\n friend_request.delete()\n return Response({'status': '201', 'code': 'OK_REJECT_FRIEND_REQUEST',\n 'detail': code['OK_REJECT_FRIEND_REQUEST']}, status=201)", "def no_reason(message, db):\n #message.reply(Strings['GRANT_EXAMPLE'].format(db))\n try:\n hf.grant(message, db.lower(), \"[EXTENDING ACCESS TIME]\", False)\n except Exception as e:\n message._client.send_message(errors_channel, \"```{}```\".format(e))", "def can_accept(self, user):\n if user.has_perm('funding.make_application_decisions'):\n # Funding manager can override / update decisions, if required\n # But we still need to have had a offer made\n if self.status in ['G', 'A', 'N']:\n return True\n # Applicants can only decide on granted applications\n if self.status == 'G':\n if self.applicant == user:\n return True\n return False", "def no_reason(message, db):\n #message.reply(Strings['GRANT_EXAMPLE'].format(db))\n try:\n hf.grant(message, db.lower(), \"[EXTENDING ACCESS TIME]\", True)\n except Exception as e:\n message._client.send_message(errors_channel, \"```{}```\".format(e))", "def test_no_program_user_response(self, *args): # pylint: disable=unused-argument\n with mute_signals(post_save):\n no_permissions_profile = ProfileFactory.create()\n self.client.force_login(no_permissions_profile.user)\n resp_post = self.client.post(self.mail_url, data=self.request_data, format='json')\n assert resp_post.status_code == HTTP_403_FORBIDDEN", "def unapproved(message):\n hf.query_users(message, hf.get_users(), \"unapproved\")", "def approve_user_application(self, user):\n if self.is_moderator and \\\n self.has_perm('accounts.approve_user_application'):\n user.moderator = self\n user.moderator_decision = user.APPROVED\n user.decision_datetime = timezone.now()\n user.auth_token = generate_unique_id()\n user.save()\n\n return user\n\n else:\n raise PermissionDenied", "def reject(self, request, queryset):\n\n rejected_count = self.reject_stories(queryset)\n self.message_user_results(request, rejected_count, 0, \"rejected\")", "def auto_reject(self, reason):\n self.auto_rejection_reason = reason\n self.reject(responder=None)", "def alert_cancel(self):\n self._alert_accept_cancel(False)", "def exit_app(self):\n\n if self.wid.changed is True:\n but = QMessageBox().question(self, 'Message', \"Вы точно хотите выйти и не сохранить?\", QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n if but == QMessageBox.Yes:\n qApp.quit()\n else:\n qApp.quit()", "def reject_waiting_call(self) -> None:", "def accept_app(self, uid):\r\n print self.queue_applicants\r\n if self.bind_uid == uid:\r\n return 1\r\n if uid in self.queue_applicants:\r\n return 2\r\n if len(self.queue_applicants) < self.max_queue:\r\n if self.bind_uid == '':\r\n self.bind_uid = uid\r\n return 1\r\n else:\r\n self.queue_applicants.append(uid)\r\n return 2\r\n return 0", "def on_station_user_invite_rejected(self, func):\n self._set_event_handler(\"stations\")\n self._events.on_station_user_invite_rejected(func)", "def __onConfirmNo(self):\n self.__confDlg.reject()", "def leave_request_accept(self, token, **kwargs):\n cr, uid, context = self._get_cr_uid_context()\n res = self._check_leave_request(\n cr, uid, request, token, context=context\n )\n if isinstance(res, http.Response):\n return res\n if res:\n res.signal_workflow('validate')\n if res.state == 'validate':\n return request.website.render(\n \"tk_hr_approve_request.leave_request_accepted\"\n )", "async def team_ignore(self, ctx: commands.Context):\n await self.config.user(ctx.author).do_not_message.set(True)\n await ctx.send('Okay, I won\\'t DM about this anymore.')", "def reject_connection(self, user_id):\n logging.debug('ConnectionsClient/reject_connection()')\n url = '/pod/v1/connection/reject'\n data = {'userId': user_id}\n return self.bot_client.execute_rest_call('POST', url, json=data)", "def force_stop_app(self,param,ignore_error_handle = False):\n message = {}\n package = str(param.get('package',None));\n step = 'force stop app ' + package;\n try:\n self.driver.force_stop_app(package);\n message = self.feedback.feedback_action_ok(step);\n except BaseException,e:\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle);\n finally:\n return message;", "def deny():\n raise InterruptEvent", "def test_rejected(self):\n actions = signoff_actions(appversions={\"code\": \"fx1.0\"},\n locales={\"code\": \"fr\"})\n actions = list(actions)\n eq_(len(actions), 1)\n eq_(actions[0][1], Action.REJECTED)\n so = Signoff.objects.get(action=actions[0][0])\n eq_(so.push.tip.shortrev, \"l10n fr 0003\")\n eq_(so.locale.code, \"fr\")\n eq_(so.action_set.count(), 2)", "def kill(self):\r\n # get current application\r\n currentApplication = self.phone.uiState.getCurrentApplication()\r\n\r\n if not currentApplication in self.__appsThatCantBeKilled:\r\n self.phone.comment('exit.kill()')\r\n self.phone.sx(self.__killCommand)\r\n self.phone.delay(300, False)\r\n self.phone.uiState.getCurrentState(True)\r\n else:\r\n self.phone.warn('Not allowed to kill \"%s\" application using SX' % currentApplication)", "def userreject_admin(user_id):\n\n # User objects list which includes list of all users which can be broken down into editors and sponsors\n # get individual user\n user = db.session.query(User).filter(User.id==user_id).first()\n # update status to approved\n user.user_status = 'rejected'\n # commit to database\n db.session.commit()\n\n return redirect(url_for('admin_bp.usersview_admin'))", "def on_reject(self, update, _context):\n self.send_message(update.message.chat_id, c.MSG_THANKS_NOTHANKS)", "def test_reject_agreement(self):\n pass", "def reject_reason(self, reject_reason):\n allowed_values = [\"INTERNAL_SERVER_ERROR\", \"INSTRUMENT_PRICE_UNKNOWN\", \"ACCOUNT_NOT_ACTIVE\", \"ACCOUNT_LOCKED\", \"ACCOUNT_ORDER_CREATION_LOCKED\", \"ACCOUNT_CONFIGURATION_LOCKED\", \"ACCOUNT_DEPOSIT_LOCKED\", \"ACCOUNT_WITHDRAWAL_LOCKED\", \"ACCOUNT_ORDER_CANCEL_LOCKED\", \"INSTRUMENT_NOT_TRADEABLE\", \"PENDING_ORDERS_ALLOWED_EXCEEDED\", \"ORDER_ID_UNSPECIFIED\", \"ORDER_DOESNT_EXIST\", \"ORDER_IDENTIFIER_INCONSISTENCY\", \"TRADE_ID_UNSPECIFIED\", \"TRADE_DOESNT_EXIST\", \"TRADE_IDENTIFIER_INCONSISTENCY\", \"INSUFFICIENT_MARGIN\", \"INSTRUMENT_MISSING\", \"INSTRUMENT_UNKNOWN\", \"UNITS_MISSING\", \"UNITS_INVALID\", \"UNITS_PRECISION_EXCEEDED\", \"UNITS_LIMIT_EXCEEDED\", \"UNITS_MIMIMUM_NOT_MET\", \"PRICE_MISSING\", \"PRICE_INVALID\", \"PRICE_PRECISION_EXCEEDED\", \"PRICE_DISTANCE_MISSING\", \"PRICE_DISTANCE_INVALID\", \"PRICE_DISTANCE_PRECISION_EXCEEDED\", \"PRICE_DISTANCE_MAXIMUM_EXCEEDED\", \"PRICE_DISTANCE_MINIMUM_NOT_MET\", \"TIME_IN_FORCE_MISSING\", \"TIME_IN_FORCE_INVALID\", \"TIME_IN_FORCE_GTD_TIMESTAMP_MISSING\", \"TIME_IN_FORCE_GTD_TIMESTAMP_IN_PAST\", \"PRICE_BOUND_INVALID\", \"PRICE_BOUND_PRECISION_EXCEEDED\", \"ORDERS_ON_FILL_DUPLICATE_CLIENT_ORDER_IDS\", \"TRADE_ON_FILL_CLIENT_EXTENSIONS_NOT_SUPPORTED\", \"CLIENT_ORDER_ID_INVALID\", \"CLIENT_ORDER_ID_ALREADY_EXISTS\", \"CLIENT_ORDER_TAG_INVALID\", \"CLIENT_ORDER_COMMENT_INVALID\", \"CLIENT_TRADE_ID_INVALID\", \"CLIENT_TRADE_ID_ALREADY_EXISTS\", \"CLIENT_TRADE_TAG_INVALID\", \"CLIENT_TRADE_COMMENT_INVALID\", \"ORDER_FILL_POSITION_ACTION_MISSING\", \"ORDER_FILL_POSITION_ACTION_INVALID\", \"TRIGGER_CONDITION_MISSING\", \"TRIGGER_CONDITION_INVALID\", \"ORDER_PARTIAL_FILL_OPTION_MISSING\", \"ORDER_PARTIAL_FILL_OPTION_INVALID\", \"INVALID_REISSUE_IMMEDIATE_PARTIAL_FILL\", \"TAKE_PROFIT_ORDER_ALREADY_EXISTS\", \"TAKE_PROFIT_ON_FILL_PRICE_MISSING\", \"TAKE_PROFIT_ON_FILL_PRICE_INVALID\", \"TAKE_PROFIT_ON_FILL_PRICE_PRECISION_EXCEEDED\", \"TAKE_PROFIT_ON_FILL_TIME_IN_FORCE_MISSING\", \"TAKE_PROFIT_ON_FILL_TIME_IN_FORCE_INVALID\", \"TAKE_PROFIT_ON_FILL_GTD_TIMESTAMP_MISSING\", \"TAKE_PROFIT_ON_FILL_GTD_TIMESTAMP_IN_PAST\", \"TAKE_PROFIT_ON_FILL_CLIENT_ORDER_ID_INVALID\", \"TAKE_PROFIT_ON_FILL_CLIENT_ORDER_TAG_INVALID\", \"TAKE_PROFIT_ON_FILL_CLIENT_ORDER_COMMENT_INVALID\", \"TAKE_PROFIT_ON_FILL_TRIGGER_CONDITION_MISSING\", \"TAKE_PROFIT_ON_FILL_TRIGGER_CONDITION_INVALID\", \"STOP_LOSS_ORDER_ALREADY_EXISTS\", \"STOP_LOSS_ORDER_GUARANTEED_REQUIRED\", \"STOP_LOSS_ORDER_GUARANTEED_PRICE_WITHIN_SPREAD\", \"STOP_LOSS_ORDER_GUARANTEED_NOT_ALLOWED\", \"STOP_LOSS_ORDER_GUARANTEED_HALTED_CREATE_VIOLATION\", \"STOP_LOSS_ORDER_GUARANTEED_HALTED_TIGHTEN_VIOLATION\", \"STOP_LOSS_ORDER_GUARANTEED_HEDGING_NOT_ALLOWED\", \"STOP_LOSS_ORDER_GUARANTEED_MINIMUM_DISTANCE_NOT_MET\", \"STOP_LOSS_ORDER_NOT_CANCELABLE\", \"STOP_LOSS_ORDER_NOT_REPLACEABLE\", \"STOP_LOSS_ORDER_GUARANTEED_LEVEL_RESTRICTION_EXCEEDED\", \"STOP_LOSS_ORDER_PRICE_AND_DISTANCE_BOTH_SPECIFIED\", \"STOP_LOSS_ORDER_PRICE_AND_DISTANCE_BOTH_MISSING\", \"STOP_LOSS_ON_FILL_REQUIRED_FOR_PENDING_ORDER\", \"STOP_LOSS_ON_FILL_GUARANTEED_NOT_ALLOWED\", \"STOP_LOSS_ON_FILL_GUARANTEED_REQUIRED\", \"STOP_LOSS_ON_FILL_PRICE_MISSING\", \"STOP_LOSS_ON_FILL_PRICE_INVALID\", \"STOP_LOSS_ON_FILL_PRICE_PRECISION_EXCEEDED\", \"STOP_LOSS_ON_FILL_GUARANTEED_MINIMUM_DISTANCE_NOT_MET\", \"STOP_LOSS_ON_FILL_GUARANTEED_LEVEL_RESTRICTION_EXCEEDED\", \"STOP_LOSS_ON_FILL_DISTANCE_INVALID\", \"STOP_LOSS_ON_FILL_PRICE_DISTANCE_MAXIMUM_EXCEEDED\", \"STOP_LOSS_ON_FILL_DISTANCE_PRECISION_EXCEEDED\", \"STOP_LOSS_ON_FILL_PRICE_AND_DISTANCE_BOTH_SPECIFIED\", \"STOP_LOSS_ON_FILL_PRICE_AND_DISTANCE_BOTH_MISSING\", \"STOP_LOSS_ON_FILL_TIME_IN_FORCE_MISSING\", \"STOP_LOSS_ON_FILL_TIME_IN_FORCE_INVALID\", \"STOP_LOSS_ON_FILL_GTD_TIMESTAMP_MISSING\", \"STOP_LOSS_ON_FILL_GTD_TIMESTAMP_IN_PAST\", \"STOP_LOSS_ON_FILL_CLIENT_ORDER_ID_INVALID\", \"STOP_LOSS_ON_FILL_CLIENT_ORDER_TAG_INVALID\", \"STOP_LOSS_ON_FILL_CLIENT_ORDER_COMMENT_INVALID\", \"STOP_LOSS_ON_FILL_TRIGGER_CONDITION_MISSING\", \"STOP_LOSS_ON_FILL_TRIGGER_CONDITION_INVALID\", \"TRAILING_STOP_LOSS_ORDER_ALREADY_EXISTS\", \"TRAILING_STOP_LOSS_ON_FILL_PRICE_DISTANCE_MISSING\", \"TRAILING_STOP_LOSS_ON_FILL_PRICE_DISTANCE_INVALID\", \"TRAILING_STOP_LOSS_ON_FILL_PRICE_DISTANCE_PRECISION_EXCEEDED\", \"TRAILING_STOP_LOSS_ON_FILL_PRICE_DISTANCE_MAXIMUM_EXCEEDED\", \"TRAILING_STOP_LOSS_ON_FILL_PRICE_DISTANCE_MINIMUM_NOT_MET\", \"TRAILING_STOP_LOSS_ON_FILL_TIME_IN_FORCE_MISSING\", \"TRAILING_STOP_LOSS_ON_FILL_TIME_IN_FORCE_INVALID\", \"TRAILING_STOP_LOSS_ON_FILL_GTD_TIMESTAMP_MISSING\", \"TRAILING_STOP_LOSS_ON_FILL_GTD_TIMESTAMP_IN_PAST\", \"TRAILING_STOP_LOSS_ON_FILL_CLIENT_ORDER_ID_INVALID\", \"TRAILING_STOP_LOSS_ON_FILL_CLIENT_ORDER_TAG_INVALID\", \"TRAILING_STOP_LOSS_ON_FILL_CLIENT_ORDER_COMMENT_INVALID\", \"TRAILING_STOP_LOSS_ORDERS_NOT_SUPPORTED\", \"TRAILING_STOP_LOSS_ON_FILL_TRIGGER_CONDITION_MISSING\", \"TRAILING_STOP_LOSS_ON_FILL_TRIGGER_CONDITION_INVALID\", \"CLOSE_TRADE_TYPE_MISSING\", \"CLOSE_TRADE_PARTIAL_UNITS_MISSING\", \"CLOSE_TRADE_UNITS_EXCEED_TRADE_SIZE\", \"CLOSEOUT_POSITION_DOESNT_EXIST\", \"CLOSEOUT_POSITION_INCOMPLETE_SPECIFICATION\", \"CLOSEOUT_POSITION_UNITS_EXCEED_POSITION_SIZE\", \"CLOSEOUT_POSITION_REJECT\", \"CLOSEOUT_POSITION_PARTIAL_UNITS_MISSING\", \"MARKUP_GROUP_ID_INVALID\", \"POSITION_AGGREGATION_MODE_INVALID\", \"ADMIN_CONFIGURE_DATA_MISSING\", \"MARGIN_RATE_INVALID\", \"MARGIN_RATE_WOULD_TRIGGER_CLOSEOUT\", \"ALIAS_INVALID\", \"CLIENT_CONFIGURE_DATA_MISSING\", \"MARGIN_RATE_WOULD_TRIGGER_MARGIN_CALL\", \"AMOUNT_INVALID\", \"INSUFFICIENT_FUNDS\", \"AMOUNT_MISSING\", \"FUNDING_REASON_MISSING\", \"CLIENT_EXTENSIONS_DATA_MISSING\", \"REPLACING_ORDER_INVALID\", \"REPLACING_TRADE_ID_INVALID\"] # noqa: E501\n if reject_reason not in allowed_values:\n raise ValueError(\n \"Invalid value for `reject_reason` ({0}), must be one of {1}\" # noqa: E501\n .format(reject_reason, allowed_values)\n )\n\n self._reject_reason = reject_reason", "async def blacklist_global(self, ctx, user: discord.User, *, reason):\n await self.bot.db.execute(\n \"INSERT IGNORE blacklisted_user VALUES (%s, %s)\", user.id, reason\n )\n self.bot.cache.blacklist[\"global\"][\"user\"].add(user.id)\n await util.send_success(ctx, f\"**{user}** can no longer use Miso Bot!\")", "def reject(self, message):\n boto_connection = connection.get_connection()\n boto_connection.reject_assignment(self.assignment_id, message)", "def add_user_with_status_unrequested(user):\r\n _add_user(user, CourseCreator.UNREQUESTED)", "def denyRequest(self, json):\n uID = json.get('uID')\n if not RequestsDAO().getRequestByuID(uID):\n return jsonify(Error=\"User speak request not found\"), 404\n else:\n approval = RequestsDAO().denyTurn(uID)\n mapped_result = self.buildGrantDenyToDict(uID, approval[0])\n return jsonify(TURN=mapped_result), 200", "def can_approve(self, user, **data):\n raise Return(False)", "def reject(self, text):\n\n RejectProposalCommand(self).execute()\n proposal = self.load_model()\n proposal.reject(text)\n\n with elevated_privileges():\n api.content.delete(self)", "def abort_not_request_owner(reqID, user):\n\n req = get_ride_request(reqID)\n if req.user_id != user:\n msg = \"You are not authorized to view this requests\"\n abort(HTTPStatus.UNAUTHORIZED, message=msg)", "def anti_bot(self, message):\n msg_list = self.ts.get_human_readable_message(message).lower().split(' ')\n bot_creation_date = self._get_creation_date(msg_list[1])\n viewers = self.ts.fetch_chatters_from_API()['viewers']\n mod_list = self.ts.get_mods()\n with codecs.open('whitelist.json', 'r', 'utf-8') as f:\n whitelist = json.load(f)\n for viewer in viewers:\n if self._get_creation_date(viewer) == bot_creation_date and viewer not in whitelist:\n self.ts.send_message('/ban {}'.format(viewer))\n mod_str = ', '.join(mod_list)\n self._add_to_whisper_queue(viewer, 'We\\'re currently experiencing a bot attack. If you\\'re a human and were accidentally banned, please whisper a mod: {}'.format(mod_str))", "def reject_incoming(project_info):\n user = user_collection.find_one({\"_id\": project_info[\"USER_ID\"]})\n bucket = user[\"notification_bucket\"]\n bucket.append(\n {\n \"project_id\": project_info[\"PROJECT_ID\"],\n \"status\": \"Rejected\",\n }\n )\n user_outgoing = user[\"outgoing\"]\n user_outgoing.remove(project_info[\"PROJECT_ID\"])\n user_collection.find_one_and_update(\n {\"_id\": project_info[\"USER_ID\"]},\n {\n \"$set\": {\n \"outgoing\": user_outgoing,\n \"notification_bucket\": bucket,\n }\n },\n )\n owner = user_collection.find_one({\"_id\": project_info[\"OWNER_ID\"]})\n incoming_list = owner[\"incoming\"]\n incoming_list.remove(\n {\n \"user_id\": project_info[\"USER_ID\"],\n \"project_id\": project_info[\"PROJECT_ID\"],\n }\n )\n user_collection.find_one_and_update(\n {\"_id\": project_info[\"OWNER_ID\"]},\n {\n \"$set\": {\n \"incoming\": incoming_list,\n }\n },\n upsert=False,\n )\n\n contribution_info = {\n \"USER_ID\": project_info[\"USER_ID\"],\n \"PROJECT_ID\": project_info[\"PROJECT_ID\"],\n }\n NotificationsHandler.remove_contribution(contribution_info=contribution_info)", "def cancel(self):\n with self.handle_alert(confirm=False):\n self.q(css='button#confirm').first.click()", "async def team_unignore(self, ctx: commands.Context):\n await self.config.user(ctx.author).do_not_message.set(False)\n await ctx.send('Okay, I\\'ll include you back in team-wide DMs.')", "async def unban(self, ctx, name: str):\n try:\n bans = await self.bot.get_bans(ctx.message.server)\n user = discord.utils.get(bans, name=name)\n if user is not None:\n await self.bot.unban(ctx.message.server, user)\n except discord.Forbidden:\n await self.bot.say('I do not have the proper permissions')\n except discord.HTTPException:\n await self.bot.say('Unbanning failed')\n else:\n await self.bot.say('\\N{OK HAND SIGN}')", "def friendship_reject(request, friendship_request_id):\n #if request.method == 'POST':\n #f_request = get_object_or_404(request.user.friendship_requests_received,id=friendship_request_id)\n f_request = FriendshipRequest.objects.get(from_user=friendship_request_id, to_user = request.user)\n from_user = request.user\n f_request.reject()\n return render(request , 'reload_page.html')\n #return render(request,'friendship/template_ags/friend_requests.html', {'from_user':from_user})", "def cant(user, action):\n\n return not can(user, action)", "def on_access_deny(self, handler):\n print \"User with {0} has been DENIED access.\".format(\n handler.client_address[0]\n )\n time.sleep(2) # lets annoy user if it is denied access", "def func_denied(self, message):\n log_th.log_info('{} - {} wrong entry : \"{}\"'.format(self.message_id, self.client_ip, message))\n self.func_sender(message)", "def DismissApprovalRequest(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def reject(self):\n pars = self.params\n if filters.startracker_lost(pars):\n return \"Alert occurred while Swift star-tracker had lost lock.\"\n\n # if not filters.grb_identified(pars):\n # if filters.tgt_in_ground_cat(pars) or filters.tgt_in_flight_cat(pars):\n # return \"Not a GRB - target associated with known catalog source\"\n # else:\n # return \"\"\"Not identified as GRB, but not a known source.\n # See packet for further details.\"\"\"\n return None", "def deny_access():\n flash('You must login first.')\n return redirect(url_for('home'))", "def test_accepted_devices_are_not_rejected(self):\n self.assertEqual(\n self._request(self._make_dummy_notification([DEVICE_ACCEPTED])),\n {\"rejected\": []},\n )", "def unapprove(self):\n self._check_if_open()\n return super(BitbucketCloudBase, self).delete(\"approve\")", "def reject(self):\n self.__shutdown()\n super(BookmarksDialog, self).reject()", "def _validate_running_as_fvalidation_exempt_user():\n if acm.UserName() not in FValidation_settings.SUPERUSERS:\n # Ensure that tool is run as a user exempt from FValidation\n # in order to avoid GUI pop-ups when touching entities.\n raise ValueError(\"This tool must be run by a user that is exempt from FValidation.\")", "def reject_follow(made_request_id, approver_id):\n\n if not g.user.id == approver_id:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\"), 403\n\n wanted_to_follow_user = User.query.get_or_404(made_request_id)\n g.user.from_users.remove(wanted_to_follow_user)\n db.session.commit()\n flash(f\"Follow request from {wanted_to_follow_user.username} rejected.\", \"success\")\n\n return redirect(f\"/users/{g.user.id}\")", "def on_station_admin_invite_rejected(self, func):\n self._set_event_handler(\"stations\")\n self._events.on_station_admin_invite_rejected(func)", "def alert_accept(self):\n self._alert_accept_cancel(True)", "def user_reject_friend_request(self):\n try:\n assert request.is_json\n except AssertionError:\n self.logger.debug(messages.REQUEST_IS_NOT_JSON)\n return messages.ERROR_JSON % messages.REQUEST_IS_NOT_JSON, 400\n content = request.get_json()\n if not FRIEND_REQUEST_MANDATORY_FIELDS.issubset(content.keys()):\n self.logger.debug(messages.MISSING_FIELDS_ERROR % (FRIEND_REQUEST_MANDATORY_FIELDS - set(content.keys())))\n return messages.ERROR_JSON % messages.MISSING_FIELDS_ERROR % (\n FRIEND_REQUEST_MANDATORY_FIELDS - set(content.keys())), 400\n email_token = auth.current_user()[0]\n try:\n self.friend_database.reject_friend_request(content[\"other_user_email\"], email_token)\n except UnexistentFriendRequest:\n self.logger.debug(messages.UNEXISTENT_FRIEND_REQUEST % (content[\"other_user_email\"], email_token))\n return messages.ERROR_JSON % (messages.UNEXISTENT_FRIEND_REQUEST %\n (content[\"other_user_email\"], email_token)), 404\n return messages.SUCCESS_JSON, 200", "def denied_response(self, req):\n if req.remote_user:\n return HTTPForbidden(request=req)\n else:\n return HTTPUnauthorized(request=req)", "def cancelInvite(self, user):\n invite = user if isinstance(user, MyPlexInvite) else self.pendingInvite(user, includeReceived=False)\n params = {\n 'friend': int(invite.friend),\n 'home': int(invite.home),\n 'server': int(invite.server)\n }\n url = MyPlexInvite.REQUESTED + f'/{invite.id}' + utils.joinArgs(params)\n return self.query(url, self._session.delete)", "def on_denied(self, request, ssn):\n if not ssn:\n logger.error(f'Received denial on unwanted subscription with '\n f'topic {request.GET[\"hub.topic\"]}!')\n return Response('Unwanted subscription')\n\n logger.error(f'Hub denied subscription {ssn.pk}!')\n tasks.save.delay(pk=ssn.pk, subscribe_status='denied')\n return Response('')", "def application(application):\n\n service = application.service\n metric = service.metrics.list()[0]\n method = metric.methods.create(rawobj.Method(\"disabled_method\"))\n\n proxy = service.proxy.list()\n proxy.mapping_rules.create(rawobj.Mapping(method, pattern=\"/anything/disabled_method\"))\n\n service.app_plans.list()[0].limits(method).create({\n \"metric_id\": method[\"id\"], \"period\": \"eternity\", \"value\": 0})\n\n proxy.deploy()\n\n return application", "def accept_cancel(self):\n self.ok = False\n self.destroy()", "def stop_application_mode(self) -> None:\n # Nothing to do", "def on_station_user_request_rejected(\n self, func,\n ):\n self._set_event_handler(\"stations\")\n self._events.on_station_user_request_rejected(func)", "def test_with_bad_app(self):\n mutation = DeleteApplication()\n\n message = (\n 'Cannot delete the application \"badapp\". The application could '\n 'not be found in the signature.'\n )\n\n with self.assertRaisesMessage(SimulationFailure, message):\n mutation.run_simulation(app_label='badapp',\n project_sig=ProjectSignature(),\n database_state=None)", "def disable(self):\n self._installed_apps_remove()", "def disallow_handler(update, _):\n global TEMPORARILY_ALLOWED\n user_id = update.message.chat.id\n if user_id == ADMIN_ID:\n TEMPORARILY_ALLOWED = False\n update.message.reply_text(\"Temprarily allowed disabled!\")", "async def cancel_game(self) -> None:\r\n # Checks if the client is already authenticated\r\n if self.is_auth is True and self.is_waiting is True and self.is_in_game is False:\r\n packaged_leave_game_queue_document = self.pkg_doc_manager(\"[CANCEL GAME]\", self.user_data[0])\r\n self.send(packaged_leave_game_queue_document)", "def abort_unauthorized(description):\n raise Unauthorized(description=description)", "def cmd_disable(self, app_name=None):\n rc = self.socket_command_with_project('disable', app_name)\n return rc", "def remove_app(self):\n \n pass", "def test_only_rejected_devices_are_rejected(self):\n self.assertEqual(\n self._request(\n self._make_dummy_notification([DEVICE_REJECTED, DEVICE_ACCEPTED])\n ),\n {\"rejected\": [DEVICE_REJECTED[\"pushkey\"]]},\n )", "def reject(self):\n if (self.status == self.REJECTED):\n pass\n\n if (self.status == self.APPROVED):\n # remove existing events etc\n\n primary_calendar = self.course.calendar_courses.get(primary=True)\n # print ('primary = ' + primary_calendar)\n for event in self.events.all():\n params = {\n 'calendar': primary_calendar,\n 'title': event.title\n }\n events = CalendarEvent.objects.get(**params)\n for event in events:\n event.approved = False\n event.save()\n\n syllabus = self.syllabus.all()[0]\n syllabus.approved = False\n syllabus.course = None\n syllabus.save()\n\n\n for student in self.students.all():\n enroll = Enrollment.objects.filter(\n student=student,\n created_by_roster=self\n )\n\n if enroll.exists():\n enroll.delete()\n\n # Unsubscribing from all calendars\n subscriptions = Subscription.objects.filter(student=self.student, calendar__course=self.course)\n subscriptions.delete()\n\n # Removing student calendars (events will cascade delete too)\n calendars = ClassCalendar.objects.filter(owner=self.student, course=self.course)\n calendars.delete()\n\n student.approved = False\n student.save()\n\n for instructor in self.instructors.all():\n instructor.approved = False\n instructor.save()\n\n\n self.status = self.REJECTED\n self.save()\n\n add_notification(\n self.created_by.user,\n 'Your class set for {} has been rejected'.format(self.course)\n )", "def denyMethod(self, verb, resource):\n self._addMethod(\"Deny\", verb, resource, [])", "def notify_thesis_rejected(thesis: 'Thesis'):\n recipients = []\n if thesis.advisor:\n recipients.append(thesis.advisor.user.email)\n if thesis.supporting_advisor:\n recipients.append(thesis.supporting_advisor.user.email)\n formatted_body = REJECTED_BODY.format(thesis.title, thesis.rejection_reason)\n rejecter = get_master_rejecter()\n msg = EmailMessage(\n REJECTED_SUBJECT,\n formatted_body,\n settings.MASS_MAIL_FROM,\n recipients,\n cc=[rejecter.user.email] if rejecter else []\n )\n msg.send()", "async def approve(self, ctx, user: discord.Member):\n server = ctx.message.server\n if user.id in self.norole[server.id]:\n if self.norole[server.id][user.id]['Role'] == True:\n self.norole[server.id][user.id] = {'Role': False}\n dataIO.save_json(self.warninglist, self.norole)\n nobnl = discord.utils.get(server.roles, name = \"NoBNL\")\n await self.bot.remove_roles(user,nobnl)\n msg = await self.bot.say (\"Role removed!\")\n await asyncio.sleep(8)\n await self.bot.delete_message(msg) \n await self.bot.delete_message(ctx.message)\n else:\n msg = await self.bot.say(\"There is no role to remove!\")\n await asyncio.sleep(8)\n await self.bot.delete_message(msg)\n await self.bot.delete_message(ctx.message)" ]
[ "0.6770561", "0.6585959", "0.65521705", "0.6491148", "0.6487616", "0.646831", "0.63601846", "0.63346696", "0.6136011", "0.61090565", "0.607409", "0.6064712", "0.60478044", "0.59528744", "0.5923107", "0.5874209", "0.5872581", "0.5852314", "0.58413756", "0.58047056", "0.5798305", "0.5784694", "0.57806647", "0.5743636", "0.5707335", "0.56867456", "0.5685694", "0.5685694", "0.5675082", "0.56601423", "0.56398326", "0.5628398", "0.5612266", "0.5607827", "0.55973876", "0.5593775", "0.5561758", "0.5559659", "0.55582696", "0.5544984", "0.5531788", "0.5508362", "0.54915327", "0.5468312", "0.54666036", "0.5464894", "0.54617465", "0.5451674", "0.54503024", "0.5435236", "0.54342705", "0.54304045", "0.54303354", "0.54218197", "0.5417053", "0.54111105", "0.5400225", "0.5396798", "0.53837395", "0.5382139", "0.5372066", "0.53436786", "0.53405786", "0.5338441", "0.53378946", "0.5333262", "0.53192675", "0.5311929", "0.5303727", "0.529601", "0.5292887", "0.52926093", "0.52793884", "0.5272275", "0.5270763", "0.52661234", "0.52643025", "0.5264302", "0.5262997", "0.52603376", "0.5257157", "0.52504873", "0.52470815", "0.5239792", "0.52308327", "0.52290535", "0.52267563", "0.5217406", "0.5212365", "0.51865035", "0.5178474", "0.51763666", "0.51703084", "0.51659185", "0.5163565", "0.5161347", "0.51552516", "0.5129148", "0.5113763", "0.51103806" ]
0.7710392
0
Return a user's profiency in a particular skill as a percentage, based on the position of the proficiency in PROFICIENCY_CHOICES.
def get_proficiency_percentage(self): choice_values = [choice[0] for choice in self.PROFICIENCY_CHOICES] if '' in choice_values: choice_values.remove('') # Remove the empty proficiency choice choice_values.sort() # Ensure values are in the correct order value = choice_values.index(self.proficiency) + 1 factor = 100 / len(choice_values) percentage = round(value * factor) return percentage
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_opinion_percent(self):\n return (self.get_percent()+100)/2", "def profit_per_item_percentage(self, pk=None):\n total_profit_percentage = 0\n total_cost = self.item_cost + self.shipping_cost + self.listing_fee + self.final_value_fee\n total_paid = self.shipping_paid + self.item_paid\n total_profit_percentage = round(100*((total_paid - total_cost) / total_cost), 2)\n return total_profit_percentage", "def set_effective_field_goal_percentage(self):\n bx = self.get_standard_stats()\n tcInt = float(bx[\"t2p_int\"] + bx[\"t3p_int\"])\n tcConv = float(bx[\"t2p_conv\"] + bx[\"t3p_conv\"])\n result = 0.00\n if tcInt > 0:\n result = ((tcConv + (0.5 * float(bx[\"t3p_conv\"]))) / tcInt) * 100\n self.effective_field_goal_percentage = \"%.2f\" % round(result, 2)", "def fidelity_promo(percent: float) -> Promotion:\n return lambda order: (\n order.total() * percent / 100 if order.customer.fidelity >= 1000 else 0\n )", "def percentage(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"percentage\")", "def get_percentage_practices(measure_table):\n with open(OUTPUT_DIR / \"practice_count.json\") as f:\n num_practices = json.load(f)[\"num_practices\"]\n\n num_practices_in_study = get_number_practices(measure_table)\n\n return np.round((num_practices_in_study / num_practices) * 100, 2)", "def profesionalRecommendation(user_preferences: dict, matcher: NodeMatcher):\r\n profesional = user_preferences[\"vida_profesional\"]\r\n equal_styles = list(matcher.match(\"User\", prof = profesional))\r\n return equal_styles", "def GetProportion(self):\r\n\r\n return self.proportion", "def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)", "def get_percentage(self):\n return self.percentage", "def get_percentage(self):\n return self.percentage", "def percentage(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"percentage\")", "def get_percentage(self):\n return self.PotTax_percentage", "def proportional_strategy(our_hist, their_hist):\n if len(our_hist) == 0 or len(their_hist) == 0:\n return choice(CHOICES)\n freqs = count(their_hist)\n prediction_for_them = choices(CHOICES, weights=freqs)[0]\n return CHOICES[(prediction_for_them + 1) % 3]", "def calculate_penalty(self):\n if AT.PENALTY not in self.attributes:\n return (0, 1)\n return self.attributes[AT.PENALTY].calculate(self)", "def score_professor_conflicts(self):\n prof_conflict_score = 0\n multiplier = 4\n \n for day_num in range(self.num_days):\n \n current_day = self.days[ day_num ]\n num_conflicts = 0\n \n for prof_name in current_day.keys():\n if not self.get_prof_by_name[prof_name].available( day_num ):\n num_conflicts += 1\n \n prof_conflict_score += multiplier * ( num_conflicts ** 2 )\n \n self.prof_conflict_score = prof_conflict_score\n return self.prof_conflict_score", "def administer(self):\n\n score = 0.0\n for question in self.questions:\n if question.ask_and_evaluate() is True:\n score += 1\n return (score / len(self.questions)) * 100", "def envisaged_profit(self):\n profit = round(\n self.calcul_buy_nb_action() * self.take_profit - self.investment_price(),\n 2,\n )\n percent_profit = round(profit * 100 / self.capital, 2)\n return profit, percent_profit", "def adjustment_percentage(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"adjustment_percentage\")", "def adjustment_percentage(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"adjustment_percentage\")", "def get_crawlera_incapsula_percent(crawlera_user):\n if crawlera_user:\n return 0\n else:\n return 100", "def factor_in_multiple_professors(self):\n professors = [professor for professor in self.course.professors if professor.lower() != \"none\"]\n number_professors = len(set(professors))\n if number_professors > 1:\n self.score = self.score + number_professors", "def scoreSkills(self, skills, work_hist_skills, req_skills):\n\n if work_hist_skills:\n score = len(set(work_hist_skills).intersection(req_skills))\n else:\n score = len(set(skills).intersection(req_skills))\n\n req_skills_len = len(req_skills)\n\n return score/req_skills_len if score != 0 else 0", "def set_usg_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n tcInt = bx[\"t2p_int\"] + bx[\"t3p_int\"]\n a = tcInt + (Decimal('0.44')*bx[\"tl_int\"]) + bx[\"turnovers\"]\n b = team[\"minutes\"]/5\n c = (team[\"t2p_int\"] + team[\"t3p_int\"]) + (Decimal('0.44')*team[\"tl_int\"]) + team[\"turnovers\"]\n result = 0.00\n if bx[\"minutes\"] > 0:\n result = ((Decimal(a)*Decimal(b))/(bx[\"minutes\"]*c))*100\n self.usg_percentage = \"%.2f\" % round(result, 2)", "def percentage(my_list, item):\n return 100.0 * frequency(my_list, item)", "def adjusted_pa(personal_allowance, salary):\n\t\tlo, hi = 100000, 120000\n\t\tif salary <= lo:\n\t\t\treturn personal_allowance\n\t\telif salary >= hi:\n\t\t\treturn 0\n\t\telse:\n\t\t\treturn (salary - 100000) / 2", "def as_percent(self):\n if self.dynamic:\n self._update_db_obj()\n return f\"{(self._db_obj.progress / self._db_obj.total) * 100}%\"", "def propabilityLVQ(self):\n self.labels = self.labelingLVQ()\n for i in range(self.labels.shape[0]):\n for j in range(self.labels.shape[1]):\n for k in range(self.labels.shape[2]):\n total = sum(self.labels[i, j, k] for i in range(self.labels.shape[0]))\n if total == 0. :\n continue\n else:\n self.propa[i, j, k] = self.labels[i, j, k] / total\n self.propa[i, j, k] = round(self.propa[i, j, k], 2)\n return self.propa", "def get_model_profits(model, cost_benefit, X_test, y_test):\n predicted_probs = model.predict_proba(X_test)[:, 1]\n profits, thresholds = profit_curve(cost_benefit, predicted_probs, y_test)\n\n return profits, thresholds", "def _calcProminenceMetric(self, harmonicComplexity, metricalAccentLevel):\n prominenceScores = []\n profileScores = self.chordProfile.getScores()\n MAXSCORE = float(max(profileScores.values()))\n MIDVALUE = MAXSCORE / 2\n\n # step through candidate triads and calculate prominence score\n for triad in self._candidateTriads:\n code = triad.getCode()\n score = float(profileScores[code])\n prominenceScores.append(score)\n\n # modify scores based on harmonicComplexity\n attractionRate = self._calcHarmonicComplexityImpactOnProfile(\n harmonicComplexity, metricalAccentLevel)\n prominenceScores = RhythmGenerator.compressValues(MIDVALUE, prominenceScores,\n attractionRate)\n\n return prominenceScores", "def get_score_percent(self, value):\n qs_related = RoundData.objects.prefetch_related(\n 'shotdata').select_related('shotdata')\n\n round_holes = int(self.round_type)\n\n if value == 'par':\n return round((qs_related.filter(shotdata__nr_strokes=F('shotdata__hole__par')).count()/round_holes), 2)\n if value == 'birdie_better':\n return round((qs_related.filter(shotdata__nr_strokes__lt=F('shotdata__hole__par')).count()/round_holes), 2)\n if value == 'tbogey_worse':\n return round((qs_related.filter(shotdata__nr_strokes__gte=F('shotdata__hole__par')+3).count()/round_holes), 2)\n if isinstance(value, int):\n return round((qs_related.filter(shotdata__nr_strokes=F('shotdata__hole__par') + value).count()/round_holes), 2)", "def __get_suggested_risk_score(user_risk_profile):\n try:\n return get_risk_profile(user_risk_profile.user)['risk_profile'][\n 'value']\n except ReportWasNotGenerated:\n return user_risk_profile.risk_profile.value", "def participation_rate(self) -> float:\n return self.__participation_rate", "def percent_capital_according_to_probability(self, prob_pos, prob_neg, order_type):\n pos = self.calc_invest(prob_pos)\n neg = self.calc_invest(prob_neg)\n\n # TODO: CAN BE DONE BETTER\n if order_type == Consts.LONG:\n return pos\n # if neg < neutral:\n # return pos\n # else:\n # return pos - abs(neg)\n else:\n return neg\n # if pos < neutral:\n # return neg\n # else:\n # return neg - abs(pos)", "def percent_rating(value):\n value = Decimal(value)\n value = round(value / 3, 2) * 100\n return value", "def percent_community(self):\n total_cost = self.total_cost()\n if total_cost:\n return round(self.community_contribution * 100 / total_cost, 2)\n else:\n return 0", "def get_duty_percentage(self):\n container_line_ids = self\n hbl_customs_obj = self.env['hbl.customs.duty']\n for line in container_line_ids:\n p_line = line.purchase_line\n #Get the supplier from product by using po supplier id.\n product_supplier_id = p_line.product_id.seller_ids.filtered(lambda rec:rec.name.id == p_line.partner_id.id and rec.hts_codes_ids)\n #Get HTS code of the supplier\n hts_codes_ids = product_supplier_id and product_supplier_id[0].hts_codes_ids or False\n if hts_codes_ids:\n percentage = sum(hts_codes_ids.mapped('percentage'))\n line_customs_id = hbl_customs_obj.create({'hbl_line_id' : line.id,\n 'hts_ids': [(6,_, hts_codes_ids.ids)],\n 'duty_percentage': percentage,\n 'quantity' : line.qty_to_load,\n 'unit_price' : p_line.price_unit\n })\n line.write({'line_customs_id' : line_customs_id.id})", "def percent_usage(value, total):\n if total:\n return float(value) * 100.0 / (float(total) + float(value))\n else:\n return 100.0", "def precision(confusion):\n # positive predictve value = true pos / true pos + false positive\n # true posistive\n tp = confusion.diagonal()\n ppv = tp / np.sum(confusion, axis=0)\n\n return ppv", "def get_percentage_f_votes(self):\n\n votes_f = self.get_num_f_votes()\n votes_sf = self.get_num_sf_votes()\n\n # avoid dividing by zero\n if votes_f + votes_sf == 0:\n return 0\n else:\n ratio = float(votes_f)/(votes_f + votes_sf)\n return round(ratio * 100, 1)", "def percentage(context, num, total_num):\n\n p = float(num)/float(total_num) * 100\n percent = str(p) + \"%\"\n return percent", "def as_percentages(self):\n if self.e_pct is None:\n self.calculate_percentages()\n\n score_str = 'E/I: ' + str(self.e_pct) + '%/' + str(self.i_pct) + '%; '\n score_str += 'N/S: ' + str(self.n_pct) + '%/' + str(self.s_pct) + '%; '\n score_str += 'F/T: ' + str(self.f_pct) + '%/' + str(self.t_pct) + '%; '\n score_str += 'J/P: ' + str(self.j_pct) + '%/' + str(self.p_pct) + '%'\n return score_str", "def percentage_progress(self):\n\n if self.total_progress != 0:\n return float(self.progress) / self.total_progress\n return self.progress", "def percentage_progress(self):\n\n if self.total_progress != 0:\n return float(self.progress) / self.total_progress\n return self.progress", "def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)", "def get_statistic_for_user(self, attr):\n all_payments = Payment.objects.payments(user=self).exclude(project__isnull=True)\n user_impact = 0\n for payment in all_payments:\n project = payment.project\n if project:\n user_financial_contribution = payment.amount\n project_funding_total = (int)(project.funding_goal)\n project_impact = getattr(project.statistics, attr)\n user_impact_for_project = project_impact * user_financial_contribution * 1.0 / project_funding_total\n user_impact += user_impact_for_project\n return user_impact", "def _get_prochirality(self):\n for atom in self.invarioms:\n atom.get_prochirality()\n atom.invariom.get_prochirality()", "def leitner_proportions(df):\n denom = df.shape[0]\n prop_dict = {}\n\n for i in range(1,6):\n df_i = df[df['comfort_level'] == i]\n numer = df_i.shape[0]\n prop_dict[i] = numer / denom\n\n prop_df = pd.DataFrame.from_dict([prop_dict], orient='columns') \n\n prop_df = prop_df.T.rename(columns={0:'proportion'}) \n \n return prop_df", "def get_percentage_sf_votes(self):\n\n votes_f = self.get_num_f_votes()\n votes_sf = self.get_num_sf_votes()\n\n # avoid dividing by zero\n if votes_f + votes_sf == 0:\n return 0\n else:\n ratio = float(votes_sf)/(votes_f + votes_sf)\n return round(ratio * 100, 1)", "def calculate_profit(self):", "def add_percentage(grade):\n\tif type(grade) == float:\n\t\tperc_grade = str(grade) + '%'\n\t\treturn perc_grade\n\telse:\n\t\treturn grade", "def get_percent_interest(self):\n return self.__percentage_interest", "def set_assists_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n team_tc_conv = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n player_tc_conv = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n result = 0.00\n try:\n if bx[\"minutes\"] > 0:\n result = (bx[\"assists\"] / (((bx[\"minutes\"] / (team[\"minutes\"] / 5)) * team_tc_conv) - player_tc_conv))*100\n result = result if result <= 100 and result >= 0 else 0\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.assists_percentage = \"%.2f\" % round(result, 2)", "def fidelity_promo(order: Order) -> float: # <3>\n return order.total() * 0.05 if order.customer.fidelity >= 1000 else 0", "def profit_curve(cost_benefit, predicted_probs, labels):\n n_obs = float(len(labels))\n # Make sure that 1 is going to be one of our thresholds\n maybe_one = [] if 1 in predicted_probs else [1] \n thresholds = maybe_one + sorted(predicted_probs, reverse=True)\n profits = []\n for threshold in thresholds:\n y_predict = predicted_probs >= threshold\n confusion_matrix = standard_confusion_matrix(labels, y_predict)\n threshold_profit = np.sum(confusion_matrix * cost_benefit) / n_obs\n profits.append(threshold_profit)\n return np.array(profits), np.array(thresholds)", "def value_to_percent(value):\n return ...", "def percentage_complete(self) -> float:\n return self.__percentage_complete", "def total_to_proportion(total_pronoun_dict):\n if total_pronoun_dict['total'] is 0:\n return total_pronoun_dict\n else:\n return{\n 'first_person_singular': total_pronoun_dict['first_person_singular']/total_pronoun_dict['total'],\n 'first_person_plural': total_pronoun_dict['first_person_plural']/total_pronoun_dict['total'],\n 'second_person': total_pronoun_dict['second_person']/total_pronoun_dict['total'],\n 'third_person_singular': total_pronoun_dict['third_person_singular']/total_pronoun_dict['total'],\n 'third_person_plural': total_pronoun_dict['third_person_plural']/total_pronoun_dict['total'],\n 'total': total_pronoun_dict['total']\n }", "def getPercent(self):\n if isinstance(self.score,numbers.Number) and self.getMaximum():\n return (1.0*self.score/self.getMaximum())\n return None", "def get_attendance(self):\n\n if len(self.attendance_list):\n attendance_sum = 0\n for attendance in self.attendance_list:\n attendance_sum += attendance.attendance_state\n return attendance_sum/len(self.attendance_list) * 100\n\n else:\n return 100.0", "def percentageCompletion(url, workflow, dataset):\n inputEvents = reqMgrClient.getInputEvents(url, workflow)\n outputEvents = reqMgrClient.getOutputEvents(url, workflow, dataset)\n if inputEvents == 0:\n return 0\n if not outputEvents:\n return 0\n percentage = outputEvents/float(inputEvents)\n return percentage", "def percentage(self):\n temp = self.cpu_freq_time_spent.copy()\n for i in self.cpu_freq_time_spent:\n total = 0\n for j in self.cpu_freq_time_spent[i]:\n total += self.cpu_freq_time_spent[i][j]\n for j in self.cpu_freq_time_spent[i]:\n if total != 0:\n temp[i][j] = self.cpu_freq_time_spent[i][j] * 100 / total\n else:\n temp[i][j] = 0\n return temp", "def percentage(part, whole):\n return round((100 * float(part)/float(whole)),2)", "def weighted_ppv(y_true, y_pred):\n\n tpw, fpw, _, _ = get_weighted_confusion_matrix(y_true, y_pred)\n \n return tpw / (tpw + fpw)", "def test_get_skill_progress(self):\n self._build_sample_graph()\n self._add_student_and_progress()\n tracker = SkillCompletionTracker()\n result = tracker.get_skills_progress(\n self.student, [self.sa.id, self.sb.id, self.sc.id])\n self.assertEqual(SkillCompletionTracker.COMPLETED,\n result[self.sa.id][0])\n self.assertEqual(SkillCompletionTracker.IN_PROGRESS,\n result[self.sb.id][0])\n self.assertEqual(SkillCompletionTracker.NOT_ATTEMPTED,\n result[self.sc.id][0])", "def percent_waste(waste, resources, intmed_products):\n n_species = len(waste) + len(resources) + len(intmed_products)\n p = (len(waste) / n_species) * 100\n\n percent_waste = round(p, 2)\n return percent_waste", "def find_percentage(urls):\n # n is the number of pages that lead to philosophy\n n = 0\n for url in urls:\n if find_philosophy(url, [], 0) != -1:\n n += 1\n percentage = n * 100 / len(urls)\n return percentage", "def percent(value, total):\n if total:\n return float(value) * 100.0 / float(total)\n else:\n return 100.0", "def pct(self):\n\t\treturn self.bottle.pct()", "def spot_percentage(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"spot_percentage\")", "def spot_percentage(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"spot_percentage\")", "def interest_to_principle(self) -> float:\n return float(round(self.total_interest / self.total_principal * 100, 1))", "def prob(self, feature_index, feature_value, class_):\r\n\r\n deviation = self.conditional_prob[class_][feature_index][1]\r\n mean = self.conditional_prob[class_][feature_index][0]\r\n\r\n val1 = math.pow((feature_value - mean), 2)\r\n val1 = val1/math.pow(deviation, 2)\r\n\r\n val2 = 2*math.pi*math.pow(deviation, 2)\r\n val2 = 1/(math.sqrt(val2))\r\n\r\n probability = val2 * math.exp(-val1)\r\n\r\n return probability", "def get_row_proportion(self, index):\n\n assert self.has_row_proportion(index)\n return self._proportions[1][index]", "def percentage(a, b):\n return (a * 100.0) / b", "def _find_male_female_percentage():\r\n count, male = 0, 0\r\n for resource in resources:\r\n patient = resource[\"resource\"]\r\n if \"gender\" in patient:\r\n count += 1\r\n if patient[\"gender\"] == \"male\":\r\n male += 1\r\n if count == 0:\r\n return 0, 0\r\n return male / count, 1 - (male / count)", "def predict_proba_confidence(clf, X, y_true):\n class_labels = clf.classes_\n y_pred_proba = clf.predict_proba(X)[:,1]\n ent = [entropy(i) for i in y_pred_proba]\n\n return sum(ent)/len(ent)", "def completion_percent(self) -> Optional[float]:\n return pulumi.get(self, \"completion_percent\")", "def ProbCorrect(efficacy, difficulty, a=1):\n return 1 / (1 + math.exp(-a * (efficacy - difficulty)))", "def percent_signal_change_pupil(self, dtype = 'bp_filt_pupil'):\r\n\r\n exec('self.{}_psc = ((self.{} - self.{}.mean()) / np.mean(self.baseline_filt_pupil[500:-500])) * 100'.format(dtype, dtype, dtype))", "def get_percent(self):\n return self.percent", "def Probability(rating1, rating2):\n return 1.0 * 1.0 / (1 + 1.0 * math.pow(10, 1.0 * (rating1 - rating2) / 400))", "def get_total_prs_per_user(prs):\n return get_total_contributions_per_user(prs, 'user')", "def calc_profile(self, phases):\n raise NotImplementedError()", "def calc_profile(self, phases):\n self._profile = self._generator(phases)\n self._Amax = self.Amax if hasattr(self, '_Amax') else np.max(self.profile)\n return self.profile / self.Amax", "def get_country_percentage(db, percent):\n # Hint: Use get_total_stateless(db)[0][0] to get the total population\n # total_population = get_total_stateless(db)[0][0]\n # percent = (percent / 100)\n pass", "def ptpresionagua(self,prof_pt): #getter que halla la presion de poros en un punto\r\n p_agua=0.0\r\n if prof_pt<self.n_fret:\r\n p_agua=0.0\r\n pass\r\n else:\r\n p_agua=(prof_pt-self.n_fret)*self.gamma_h20\r\n return p_agua", "def percentage(count, total):\n return count / total * 100", "def as_counts_and_pcts(self):\n if self.e_pct is None:\n self.calculate_percentages()\n\n score_str = 'E: ' + str(self.e_score) + '(' + str(self.e_pct) + '%)/'\n score_str += 'I: ' + str(self.i_score) + '(' + str(self.i_pct) + '%) - '\n score_str += 'N: ' + str(self.n_score) + '(' + str(self.n_pct) + '%)/'\n score_str += 'S: ' + str(self.s_score) + '(' + str(self.s_pct) + '%) - '\n score_str += 'F: ' + str(self.f_score) + '(' + str(self.f_pct) + '%)/'\n score_str += 'T: ' + str(self.t_score) + '(' + str(self.t_pct) + '%) - '\n score_str += 'J: ' + str(self.j_score) + '(' + str(self.j_pct) + '%)/'\n score_str += 'P: ' + str(self.p_score) + '(' + str(self.p_pct) + '%)'\n return score_str", "def percent_complete(self) -> int:\n return pulumi.get(self, \"percent_complete\")", "def gpa(self):\n try:\n return sum(self.courses.values()) / len(self.courses)\n except ZeroDivisionError:\n return 0", "def profit_curve(cost_benefit_mat, y_pred_proba, y_true):\n n_obs = float(len(y_true))\n # Make sure that 1 is going to be one of our thresholds\n\n thresholds = np.linspace(0,1,101)\n profits = []\n for threshold in thresholds:\n y_predict = y_pred_proba >= threshold\n confusion_matrix = standard_confusion_matrix(y_true, y_predict)\n threshold_profit = np.sum(confusion_matrix * cost_benefit_mat) * 20 / 1000000\n profits.append(threshold_profit)\n return np.array(profits), np.array(thresholds)", "def SetProportion(self, p):\r\n\r\n self.proportion = p", "def sensitivity(confusion):\n tp = np.array([confusion[i][i] for i in range(len(confusion))])\n return tp / np.sum(confusion, axis=1)", "def percentage_update(self):\n\n self.event_update()\n return self.percentage", "def percent_using_relevant_words_by_context_and_question(self):\n total_student_count = self.get_number_of_unique_students()\n\n question_context_count_list = self.students_using_relevant_words_by_context_and_question()\n\n question_context_percent_list = []\n for item in question_context_count_list:\n question_context_percent_list.append((item[0], item[1], item[2] / total_student_count))\n\n return question_context_percent_list", "def percentage_change(old_value, new_value):\n\n result = float(100 * (new_value - old_value) / old_value)\n\n return result", "def enrichment_factor(y_true, y_score, percentage=..., pos_label=..., kind=...):\n ...", "def heuristics(course, suggestedPlan, user):\n score = course.score\n bonus = 0\n return score + bonus", "def expected_policy_profit(targeting_decision, g, observed_profit, prob_treatment):\n return np.sum(((1-targeting_decision) * (1-g) * observed_profit)/(1-prob_treatment) +\\\n (targeting_decision * g * observed_profit)/(prob_treatment))" ]
[ "0.624777", "0.5969812", "0.58800423", "0.57505316", "0.57186955", "0.57097393", "0.57036775", "0.56423104", "0.5622649", "0.5582169", "0.5582169", "0.55723554", "0.5554754", "0.55056655", "0.5428772", "0.53997266", "0.53909737", "0.53796136", "0.53647095", "0.53647095", "0.5341981", "0.53225017", "0.530566", "0.5294536", "0.5292997", "0.52811795", "0.5279743", "0.5279741", "0.52676153", "0.5254112", "0.5235644", "0.5223993", "0.5217148", "0.5203777", "0.5186397", "0.51856387", "0.5183992", "0.5178738", "0.51775473", "0.517004", "0.51152796", "0.5114778", "0.5113046", "0.5113046", "0.51004255", "0.50896937", "0.50779337", "0.50728714", "0.507218", "0.50716066", "0.50676554", "0.50637513", "0.50623757", "0.50590485", "0.505781", "0.50537616", "0.50493073", "0.50450337", "0.5028693", "0.5022454", "0.50217384", "0.50189614", "0.50142133", "0.50103676", "0.5007902", "0.50057703", "0.5005418", "0.4994313", "0.4982595", "0.49789354", "0.49789354", "0.49747282", "0.49740565", "0.4965366", "0.4961743", "0.49568596", "0.4954276", "0.49483007", "0.49379733", "0.4937583", "0.49344945", "0.49342555", "0.49324706", "0.49241787", "0.49237958", "0.4917862", "0.49074465", "0.49066317", "0.48983386", "0.4896669", "0.48941115", "0.48919302", "0.48903814", "0.48899058", "0.48897013", "0.48817492", "0.48728862", "0.4871972", "0.48705652", "0.48693684" ]
0.8257786
0
If there is no icon matched use default.
def get_icon(self): try: icon = self.icon.fa_icon except AttributeError: icon = 'fa-globe' return icon
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def icon(self):\n return DEFAULT_ICON", "def icon(self):\n return None", "def icon(self):\n return None", "def icon(self, value: str | None) -> None:\n self._icon = value", "def icon(self) -> typing.Union[str, None]:\n return self._icon", "def getIconString(self, iconName='NoIcon.png', alternateIcon='NoIcon.png'):\r\n try:\r\n if os.path.exists(self.IconPath + '/' + iconName) and os.path.isfile(self.IconPath + '/' + iconName):\r\n return self.IconPath + '/' + iconName\r\n elif os.path.exists(self.IconPath + '/' + alternateIcon) and os.path.isfile(self.IconPath + '/' + alternateIcon):\r\n return self.IconPath + '/' + alternateIcon\r\n elif os.path.exists(self.IconPath + '/' + self.DefaultIcon) and os.path.isfile(self.IconPath + '/' + self.DefaultIcon):\r\n return self.IconPath + '/' + self.DefaultIcon\r\n else:\r\n print (\"Error! No Icon found for: \" + iconName)\r\n return None\r\n except:\r\n print (\"Error! No Icon found for: \" + iconName)\r\n return None", "def get_icon(self):\r\n return get_icon(self.ICON)", "def icon(self):\n return STATUSES.get(self._mower_status, {}).get('icon', DEFAULT_ICON)", "def icon(self):", "def icon(self):\n icons = self._icons.split(\",\")\n return (\n f\"mdi:{icons[0]}\" if self.state != TYPE_RECORD_NEVER else f\"mdi:{icons[1]}\"\n )", "def set_icon(self, icon):\n icon = icon.title()\n if icon in self.rewards:\n self.icon = icon", "def icon(self):\r\n try:\r\n return self.data['icon_url_base']+self.data['icon_url_name']\r\n except KeyError:\r\n return ''", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n if \"icon\" in self._typeconf:\n return self._typeconf[\"icon\"]", "def get_icon(self):\r\n raise NotImplementedError", "def icon(self) -> str | None:\n return self._icon", "def icon(self):\n\n if self._playing_tts or self._announce:\n return ICON_TTS\n\n if self._state in [STATE_PAUSED, STATE_UNAVAILABLE, STATE_IDLE, STATE_UNKNOWN]:\n return ICON_DEFAULT\n\n if self._muted:\n return ICON_MUTED\n\n if self._slave_mode or self._is_master:\n return ICON_MULTIROOM\n\n if self._source == \"Bluetooth\":\n return ICON_BLUETOOTH\n\n if self._source == \"DLNA\" or self._source == \"Airplay\" or self._source == \"Spotify\":\n return ICON_PUSHSTREAM\n\n if self._state == STATE_PLAYING:\n return ICON_PLAYING\n\n return ICON_DEFAULT", "def icon(self):\n if not self.device_class:\n return ICONS.get(self.entity_type)\n return None", "def get_icon(self):\n raise NotImplementedError", "def icon(self) -> str | None:\n if isinstance(self.wemo, CoffeeMaker):\n return \"mdi:coffee\"\n return None", "def getIcon(self): #$NON-NLS-1$\r\n iconXPath = self._getIconXPath()\r\n icon = self._getExtensionText(iconXPath)\r\n if icon:\r\n return icon\r\n else:\r\n return None", "def icon(self) -> Optional[str]:\n if not self.profile_device.icons:\n return None\n\n if not self._icon:\n icon_mime_preference = {\"image/png\": 3, \"image/jpeg\": 2, \"image/gif\": 1}\n icons = [icon for icon in self.profile_device.icons if icon.url]\n icons = sorted(\n icons,\n # Sort by area, then colour depth, then preferred mimetype\n key=lambda icon: (\n icon.width * icon.height,\n icon.depth,\n icon_mime_preference.get(icon.mimetype, 0),\n ),\n reverse=True,\n )\n self._icon = icons[0].url\n\n return self._icon", "def icon(self):\n return self.ICON", "def icon(self):\n return self.ICON", "def get_icon(self):\n return self.ICON", "def get_icon_class(self):\r\n return self.icon_class", "def set_icon(self, val):\n self._icon = val", "def getIconImage(self, name: str) -> Any:\n # Return the image from the cache if possible.\n if name in self.iconimages:\n image = self.iconimages.get(name)\n return image\n try:\n iconsDir = g.os_path_join(g.app.loadDir, \"..\", \"Icons\")\n homeIconsDir = g.os_path_join(g.app.homeLeoDir, \"Icons\")\n for theDir in (homeIconsDir, iconsDir):\n fullname = g.finalize_join(theDir, name)\n if g.os_path_exists(fullname):\n if 0: # Not needed: use QTreeWidget.setIconsize.\n pixmap = QtGui.QPixmap()\n pixmap.load(fullname)\n image = QtGui.QIcon(pixmap)\n else:\n image = QtGui.QIcon(fullname)\n self.iconimages[name] = image\n return image\n # No image found.\n return None\n except Exception:\n g.es_print(\"exception loading:\", fullname)\n g.es_exception()\n return None", "def icon_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"icon_url\")", "def icon(self) -> str | None:\n value = self.entity_description.icon\n if self.entity_description.key == \"weather\":\n value = self.state\n if value is None:\n value = \"sunny\"\n elif value == \"partlycloudy\":\n value = \"partly-cloudy\"\n value = f\"mdi:weather-{value}\"\n\n return value", "def icon(self):\n return self.__icon", "def icon(self) -> Optional[str]:\n return \"mdi:water\" if self.is_on else \"mdi:water-off\"", "def icon(self) -> str | None:\n if self._pending_state_is_armed is not None:\n return \"mdi:alarm-snooze\"\n elif self._is_armed:\n return \"mdi:alarm\"\n return \"mdi:alarm-off\"", "def _icons(self):", "def icon(self):\n value = SENSOR_TYPES[self._type][3]\n if self._type == \"weather\":\n value = self.state\n if value is None:\n value = \"sunny\"\n elif value == \"partlycloudy\":\n value = \"partly-cloudy\"\n value = f\"mdi:weather-{value}\"\n\n return value", "def icon(self):\n value = SENSOR_TYPES[self._type][3]\n if self._type == \"weather\":\n value = self.state\n if value is None:\n value = \"sunny\"\n elif value == \"partlycloudy\":\n value = \"partly-cloudy\"\n value = f\"mdi:weather-{value}\"\n\n return value", "def icon(self):\r\n return self._icon", "def icon(self, new_icon):\r\n self.set({\"icon\": new_icon})", "def api_get_icon():\n pkg_name = request.args.get('pkg')\n if pkg_name:\n pkg_files = Database().db.get_pkg_files(pkg_name)\n for src in pkg_files:\n if src.startswith(\"/usr/share/icons/hicolor/32x32/apps/\"):\n return send_file(src, as_attachment=False)\n return send_file(\"static/images/null.gif\")\n else:\n src = request.args.get('i')\n if not os.path.isfile(src):\n #abort(404)\n return send_file(\"static/images/null.gif\")\n return send_file(src, as_attachment=False)", "def _detect_icon_path(self, icon_name):\n\n rel_path = \"share/check_mk/web/htdocs/themes/%s/images/icon_%s.png\" % (self._theme,\n icon_name)\n if os.path.exists(cmk.utils.paths.omd_root + \"/\" +\n rel_path) or os.path.exists(cmk.utils.paths.omd_root + \"/local/\" +\n rel_path):\n return \"themes/%s/images/icon_%s.png\" % (self._theme, icon_name)\n\n # TODO: This fallback is odd. Find use cases and clean this up\n return \"images/icons/%s.png\" % icon_name", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self, icon):\n self._icon = icon", "def getIconPath(self):\n try:\n return self.primaryAq().zIcon\n except AttributeError:\n return '/zport/dmd/img/icons/noicon.png'", "def get_icon(self):\n if self.verb == \"C\" or self.verb == \"A\" or self.verb == \"K\":\n return \"fa-comment\"\n\n elif self.verb == \"I\" or self.verb == \"U\" or self.verb == \"O\":\n return \"fa-users\"\n\n elif self.verb == \"L\":\n return \"fa-heart\"\n\n elif self.verb == \"F\":\n return \"fa-star\"\n\n elif self.verb == \"W\":\n return \"fa-check-circle\"\n\n elif self.verb == \"E\":\n return \"fa-pencil\"\n\n elif self.verb == \"V\":\n return \"fa-plus\"\n\n elif self.verb == \"S\":\n return \"fa-share-alt\"\n\n elif self.verb == \"R\":\n return \"fa-reply\"", "def icon(self):\n if self.device_class:\n return None\n\n return ICONS.get(self.tesla_device.type)", "def icon(self):\n ret_icon = self._icon\n if self.player_name == \"lower\":\n ret_icon = self._icon.lower()\n if self.is_promoted:\n ret_icon = \"+\" + ret_icon\n return ret_icon", "def get_icon(self):\n return self._icon", "def _leadingIcons(self):", "def get_default_icon_size():\n return _QSize(ICON_SIZE, ICON_SIZE)", "def icon(self):\n if self._type == 'birth':\n return 'mdi:calendar-star'\n elif self._type == 'wedding':\n return 'mdi:calendar-heart'\n elif self._type == 'memorial':\n return 'mdi:calendar-clock'\n else:\n return 'mdi:calendar-check'", "def icon_app_string(self, lang, for_default=False, build_profile_id=None):\n\n if not for_default and self.icon_by_language(lang, strict=True):\n return self.icon_by_language(lang, strict=True)\n\n if for_default:\n return self.icon_by_language(lang, strict=False, build_profile_id=build_profile_id)", "def icon(self):\n if \"icon\" in self._prop_dict:\n if isinstance(self._prop_dict[\"icon\"], OneDriveObjectBase):\n return self._prop_dict[\"icon\"]\n else :\n self._prop_dict[\"icon\"] = WorkbookIcon(self._prop_dict[\"icon\"])\n return self._prop_dict[\"icon\"]\n\n return None", "def icon(value, size = QSize(16, 16)):\r\n return value", "def _set_icon(self):\n if self.current_status == 0:\n icon = WORK_ICON\n else:\n if self.break_count == 0:\n icon = LONG_REST_ICON\n else:\n icon = REST_ICON\n self.status_icon.set_title(icon.split('/')[-1])\n self.status_icon.set_from_file(icon)", "def icon(self):\n _LOGGER.info(\"icon for {}\".format(self._sensor_type))\n if self._sensor_type == ATTR_STATUS:\n if not self._state:\n return None\n return DEVICE_MAP[self._sensor_type][DEVICE_MAP_INDEX.index('ICON_INDEX')][self._state]\n else:\n return DEVICE_MAP[self._sensor_type][DEVICE_MAP_INDEX.index('ICON_INDEX')]", "def GetIcon(old=False):\r\n\r\n # Imaris icons used with permission from Bitplane\r\n Icon8 = \"eJzsfQd4VNW2/yDSpCQhvTfSe++Z9J7pvfeSKZkkkw4kJKE3BVTs2EEBBRsqIkgRQXqV3nsHARFY/73PSRC83qved32+/3vs71vfnnrOKruv9VuHQulHGUhxdKSgOoBieJJC0VEolIAA8v189Plm9Fl8fO/7EAqlyZVCycsj34cXUCh+WRSKQtH7/RwKpWQMhdLZ2fv94H6US079KOHomo74uhTy898rUqX+d3/zW0UgMxF1TV2Lf421habS14TwxdIn/8w12MJarZjTOYNT+uWXZRmtDWaryC2/SOkbGFjyyO9oLHnvvZpGmevtg+3tHRSxLs+Nzz49WUwHYMScBlrCUQgIFCxOS5dvENdWD3j4/1y50bGyuHGr3KjQW+1Nz9iae4aq1Sd15tJbwKCsgGrKQuAEnYCkYVOgsqhpW2KsGbxctUfdXFRrEx2mrhJlL7/AjP7wp0LX9+8EuzffpHOEkQY67KJTPoQiyjzIp8wCcdx5yPf4FHJdXj1fY5x+LDGscU/a4DdAEbnvjjLv0F1e4r77XPfj4DSkHTKCFjbwI89APKUTwiltkOj9LPToATJit0LuqG+gKvXVm+PGrL6iit66xFJ88bKx+DLYkJw1aT+DC8UMcf4LvuClX7kTSLFDTPAH0GUEEDAvQn7+YcjP2QNF0XuhpnjH3SbBqUMN3KP7x8gBpjQAdCgBAgc/Db6DJp+S8HZPEBYCqJJvE7rLyziK/nsaWLRLUKe/BTPHAMxqhJ+n1QF0mwA6EX/TmwEyIz4H5/4Nd+r0C4pNE1PcaPFvzav2WwOVkfugKvkIVGScBWbeNZDT7oOJj3hmAqjLAfj5AKzsu5Cdsgc8Rhiu2fRrhfba18ymRn4xK24HMNzWASNgLzDiyeuUph1FdBhK0/dBUcouyI7eDrlR391Mj/xmZ6bPy1Zr7dzxje0Tlyi4n3zLSD4EEu+jIHTfBtX+26AgdDPkhq+HrJCvIDf04/crcqZ2CblCv9EbKP07vqA80kbLg3dx+PGnQDPqFIjd9kGF73eQF7QC8gI/gRy/uT3tX1D6SxTmqQZzPZ3LF/r+VjsWex16Vu52Clieu6DAfzXkBH4GuX7zbyV6a4l2qDPa3YSy2qliceswJov9yH+TAtqJOt15ul+h+0fNVN8PZlF936/Ndh43IMfn5X74O5ma7GsytfnPdK9/WQBI2oJo3I9oHMF36vfob1YiOoI/c0B1HKJmCuVe7//wOBOAKI/yx8aZx4Us5vqWB6/lyjGD/7vvrzO3hOgtzTadqWVxbaO9scZaX8cVCEP5QrEb/p7BFvxl9+ZLmieLZPYtUkXzSpm6/hWxyjTBam/gavX6QDaH68XmiUPpbMvgnHzef/zebH5DK5NrahfT8oN5GTue5uSdOcBOO3qt2G/+D4nJsmnmWm0gX8gdmUuVER2SWsj/U9c31TUn662NMou9lYrf29vGkvcV1CBq8OPLlAx25lYNHY3jbDQeM9G4iF/L0RiZ7vgyRIQqDhvNqsDEBPWLKSnij37vftW9duJKtKUcgWGh2mDqESlqms0NLR+0do5/9uHfChSqUlb+1dEWOxrvqwEk/qeBM3g7MPp9C6yki8APOAF+HgYIHSX6WiJteS46kgfJKRLjr+85ahSNqAMCyJpNH9NJz58M1GQ7KI1qEZsvVjI5UrZKb51R39pZam5oJn5HL/lm2uRpSNaUn4E/5Ac0365E9ClUUpZC2ZBvQZx6F+IGTYZIjxbITtPOYrGaIDpKDcF+unsB3tr1iF5D1IOoyd9LUxfsZWgtjJg5RVy4BBiRH0JF/Jy7JcHPb0x1nwmernoIDVa/KZLppIR+SmwB48YBGNHcIxi4E+iUL4j7VlAWQxnlPSgb9DVI0wCog96GdJc5kOgw7oxA3LSzumw0JEc1gLuTFjxG6ghycdRAvNNUqBj2BWhK1oA8ZxOwIpAOY5ddZHhugryhi8DN0QCZnm9CRqqqlpC9Eka3qgFEA/dCFeVjRO9BKWU+lFDeIdYORa4boSb+PuQ5LkNrh/mQ6fASFMR0fFxTMwM4lZNvJ4a3nHN+qgZGDNFC6rB5wHXaBqKIPWCkHbknSPkBBCkb73CjDt/njjoCVU+tg5GDzRDoMQ8yfGdvwveXi2GHPOgcIW8+5UXIo8wFKmUOZCHKpEwBdtEJqAm9Ablo3qL6fgX5bouB6v7mT3RG897pU9/+sGvylMkJHj23qU++BzyHH0DkehpquHvOKnKP3JLl7gZO/AEQJp4DSdxl4A49CF5PtsEAihUygr8CSsAT/bVCuI/1nonWSImUCYh6IIYyCSIpYyGEMgOmI9toMi9AVvRmyItAc2/w11Dg9ymURM47JuBMg5fmrtkxoWf5IXnQZpA7XkF2vHDLVnL7hib/JKjzzoAi+w4oqdfBhNYyipGXILT/ZBhIkUA8mvvjHcYzlVxAOl+B7tkJoyhNEECxgVf/WnAfrAGT9SeYhdY3dfrzkJ68E9LQOiQvZhNkh3wLeX4rgV38/AVJ1WJobth1t0wyOqXdtuJtC3fHJlP+1Tumsgv3mlHfbUdrKrz+apeivhVxFZIHvQSDKDLwc38J0p1ff0aK7l/R/2vwQTrxpNSAK0UF7pQWEEp/hmfQmmwiIi7zNOTlHYDMzN0EH9nx2yE7fCsUBG772cJZcdtasROaWBeW9LQcWVfXNmaOXXz0EL43XjPObAV4sRsAr9MMGQB5Tp8Q8j/1RBtkui38qqTk7jFp8g2IQ7YPH/gcpIfvBKMNYBwagxRZSDdF54BKPQ5FqB2Ulx+HqvKjUFV8ECqo+4GedhAkacd/tlUcuFlfdu6+hXXkfn35T9CO7t0mRnJrAcYiGoPadwuSf5IFXS9sDyE/pkzHdw7ohUsm9o15XPQbK6prc9C9E29AVdpJYt2ZmXoCMjJOQ2EhWjtWnwel6BIY5ZehRnIFGqS3YKIJ7o+V3L+OeL7ThfTdoybXslj3+N54bTqxFuDlyQBK+llwRDYegPQcOXjalSbrG3pV4tIU6ahDe9TBJ0HucwrYQUeAFovWeomIUg9BEV77ZpyEwqyzUJ5/EXj0q2BW3YZOZJvZ4wHemA3w+lR0ffR6TgfAM+1I343kPfHaeLSe1MNYJL+AcQ1Chs9E8hvAf2jr7Trde61NTS1Oz8LQ/mL6TD49cvEOvF6u8t8CFaP2QmXcD2i9ewAqUo5COeKhLPsMVOZeAnrhdRDTfwIz0lkXsu0kdL/xqB6L2hoeT+zo81pkBxMb2Z1OrrHxej4v+xJkBX0BQ580gJeT6bJF/XmbzWZzlGuZDh09LWFqizCRVTyuoTpo8S2697dAC9gOVSH7oToB83EIaCnHoTr9DFrHn4dqdC167jW0br8NnNw7iO4+IDYaT5lZPyG6hb6/Aczsa1CVdRG132OQFrUDgkeMBn9P7S6bYeU4odp/cJ3uY21X5ztPq7SGDKlaXCKv/uo63e0bIMh7G1QFovtH7Qdmwn5yH4HaRFXa6V5ezhJUmXkGyvH7zFNob3CS2COUpR8i9ggFqbshD/Wd5OidkB6zEXJ93oMoL/uYhpr1rxvkqxlaxVyDtd5e1NDYSNMZRrdWp+4DXsx+YDt/BxxXNIZ7bQW6/x6oDtsPtBhSHxVJP0BZ8j4oRuNsMdq34H1IfuJOyELjRHb0VsiO2AjZkWuAGrkCqOHLruWGfbIxP2zREmrgks4cvzfyiHWB9vOxNTVzJhu1z7w0pmfyvIaG2YtEjOU/lKFr8ZNQ/0JtUeS8BfhumxEP24HmtxvpYheUhu6EPNRX8yN2QE7EFsiI3ABZEWshI2wVUEOXQ0nswv2FoYtbKjOmM2TiPIex9ylPCNsog0QaltfDc3Zr9xh/e/O0bVZ751djx0+Zb7POaC9LRvsspFs8dmsiT4HU/TAIXHYD130H0Hw2Q5n/JigO2gDUkLWQHfoN5IZ8DTmjlgM14MO7haHzxtKry33YIu0yvojjKJHKwmVyhTONznBkMFguAonKTShVPbj/hNmd/7B2ocft+Zmdchbp/yCow86DyvM0iF0OAM9jJ3H/Ev9vgRqI5AxaDtnBX0JuENpX+70DWb6TU4l1B08czeDqppit9ZaWlpbIahrdmc5gOvOF0nB1/Ye/t3SiVPp/1ySIPQLq6EugCz0Haq8zIHE9DHy0f6P5fI/2cGshN/AroAZ/TuwD8/wWQqbPrAP4vzSWhMKT1gwWK01csaJ2FVpLbxJJVXlMNtNLLNN7/d69+wrXZ/c3htBLYAhCc5jrWXT/g2j/uIPYfxYErIScgC/IfaT/EqD6vAUZ3tMrUgN7iP/yJTqiVuntdsTDLb6kFkQSe+kfvXdW4BSiZrismCF02QtS16PAc0dt32szFKOxKc//S+Le2f6fIdkXQ7b3SzP+1fVWffvBE7gWitv+KAsUz8A8og7wljuXO78/udTt00PFnkjXPp+iffdSyEb3RX14LdoDE3JFBqr/8LX/TPEOePSsKd7b4pzo2TCy731a8KS/5L7/Vwo8VO4hOoLoNhXgijPMPtL/flYnxad/J4WCqe8Eoh/lV+cJKxFdQXQXkRl9P5r4fT+YRBkOtym+cJfi13GXQqXep6ALd8IjBZ9TxCNSUB6fUzwu//3l4fMeXNichn5yVfsTA3/1OxbnP3/m8V8tSr3tweuO8RODW8Z05lobmmgGq51usNiK1DpTCF8kdvwbWfynRSC1ELXB2hZssjU3SRT1i0Qy+yb0ekKtvdFstNbZVPqaHJ5AFMLlC8OFEulw/Pu/8vzrzxRx+cR+IllLk1TZ8LlKa5+h0oy9oFCNPi+StJxDcnxqa2qrs9ps4Wj95cficLxZPEEYVyh3r6DXEOfbf8UZ2h8pTF4NRWnofoLDtz/P4tUf5fBtb0vVent1NZfHiJ2h5ocvtNAjnh1bkGGZxBaLadZ6Q7hao/Xm8HhuLK4gpLhMTa9gGX7dPf6tYmlo+qfflVQV/sNnLAF5vs4R2iVMnuU1vkiZKJZmBHFiVk/n51w9ySuHG6xyuMOl/vwzM+DA7RSvrssJ8YoFMoU8RW80eLM4bJfCQnFOZqZi0uRX7X/6DPG3yujuScNRn6vUmxtmGmqb3rQ2tnSb6ptH4e/6zhhxweeMuHDFzU4snvV9tYLuRstcWsoruHSSKwRgor0yHe3Z6XyShFwAlvdBCPFowmePZ0tKRRUGg8E7PV2RmZQkh8xsSR6+XmoG8w/zis8kqxnk2lSq1Y+Qqkxipc7Srbc2LBLL9RPQ+7GI92UtnRO+b+0cn4V/1zqu55FrsAW1Y9Vqph+LerJFzId7UgOAHPHNQ1O7IO4qCEJPgSj8DCBbgLwYIPOp+eDloYPwEOk+JkeaWlFh1SObQEKC+FpBkcofX3PFAe0f4p9WTZ5XckXaODq97svSItv90nLtUYPZbJbINVy5WstDZEIyPN3QPm5DW9cEwh+kNpD+GRa/PkSqZ2fyi861GS0AFhvJoywM7XVG7APOgI1Ao6whfI60wZsBn00xnXaA7/A6CPEzQFgI/wtLbWN+WZkZIiN5kJAo3pGaaR7+W7x6eXEenMfi0ncmy2C2hHIrJx5lFsyC4vgpkJvUADyxpoUnEbMKikR1efkyvUQhSmHz9a9Z7O3z8H+QLMR/BVJ+rLB4R73JCjChG5/X3gPByFPAH7gV8f0NcXaKzzDJ89NlxBmA2Pc0RA2YClEeoyHUzQwZKfIepbp9a0aGHqKilBAdroFAH80hRG8FeGvrgnzVNFRHI/JD5OPvqfX28FD4RHo2+KX79YQKil9ZLCpaCMzEhVAe+gaUJExF+/3Ombm+z0CCTyf4e+oh0E93JSNDXMZkq6faR3d6YN5FdsoT5bkfC41GuP3qCwDq5DsgGHEE2P039vp4yfPmX85+PwZm1nVQBJ+HzCfmQYrrTEgY2QVRrg3nK6vqPtTruiApvgawLzjIxwA+bjrwcdc+ID8P8r2XmwYi3Nog0+VpKAl4FeQly0GcuQ5YYV9DdeRSYKS8dqDa44vrFa6fQdqwueDiWAOjPOohyqvpUimNS+WJdNFE28lcOJRRfXXZSy8h3jNQ/xxxDJiUDQ/p/EPCT12OiDw//hCY2TdAEXQJ8vqjPaLz65Dh9CwkO0y8nx3W/BGNbr9ur58FSZH1kB7TDK4O5Dm2u5P+ATkNV6Pfz4Yqhy+B7rAaNKWrQV2+DfgJO4ATsQn4KZ9fZwV9f5vhsx0Yrt9C9sD54DbcAjEecyDbaz7E+dd9om9mEn4jVjH42ixwy47GGonjKeAh3rHOsa4xz2W9597llLfQ63mI/8XAR31D53MJioaifabrIsh1eps4/04fMeNQeXndFxJJDxhUc6A0p/vn1JjWmy5DjeA8XA+4dkSUOewNYDtsAYbTNpDEbgcDa999df5hECXvBmHKlvuc2K232OGH7uPzvkq37VAx8AtwHVILjkMaIS3kK8hwf+lqcrognmj7lWCuUSPdoz05u98mgu9ixGsx5Q3inL6Q8ipRF1BehjzKc4TO8Vmm3vUyFLiuAqrXMsh1+xDyRs6HbKfXoChs5ntsXvOF9tZXwGqe9o7ZOuGVxIDOGx4D7eA6qBZwbADHAel55D7gex8GI2vfbT398BlZ5gkQpe8Dcc7G+7TwA/d4sWdAGHMaWF5HgDlgC/gOaifONr3c34TsoBV3E1y6iMFNyIQ1mkoA/vDDRFvp4xOf9edSZkEO5WnIRJRBmQGplAlQFr2XOEOV+J0BasA6oI5aA/kBK4Dq/QlhiwLvt4FV1fkRnzseXn118fMTpk1tstlmfBQ7rOcu9YnFwH1qO4hGHgKe2xlQ5xy+ZxZu2aWkHrwnyzwGquItgH3qjNhjIEv5ESRJiEJPgGTgAYjoPxUGUTQwkGKFxMjtkOL28ryRbhUDxGy4o0i9g9rN94SfAvOZTJmCeJ1E1Nh3EEvpQtQGMeh9bS0ao/A8lnyEOCunRn9PnFdh/0GR/5eI/yVQ7P/uZR6n84yU8yK88/baVVOnv/9uz5gPdvAcN9wXDT0IYpcLoI08d9/KPn7MVHTnriH3CqiLd4Ei8wJIM26BLPsaKHKvgQHNPaq46yAefAKS+r2A9K+BIRQJBPl/Bum+H+yMd+pwwvOpNPIS0eYx79jfEE0ZjfafLRCGXocNbIPAJ+3gR6mHipJT8M7zAB2IfwX9KGQkbofMuG1Ajd8M1PD1hC3y/L+CfJ9l96vTXzwjYk+/rxG9B7Omb4EJXbvBpFm/VRGy6b1a6v7TpsKLl2oL793TZ94GU/FJMBSfJ3whtdUADWyAOtYdsNLugCHrZ5A5XoDM/gvgKYoB2UABro6TCD9N0shZfph/GZpXC1EfxXoOozRBMOLVv9dv4fyEAfUdOcRFfQvvL0C6R21n9jj0H/5xSE/fCSkJuyAlZhfkJm6F3KiNkBNGnuHl+qy4xac/c0XFfB8MwlWg02w/RROpQvTijyo7275YUlf70uvG4ovXLBWHThoqT99uYpK+FOzbwOf6k+pIakDy4LG6YMhnMAzxg/kf0q8RcpC9Y90mVfKRrOLYS6jdv0X4eTDPmNyQrCNRf3FAdXLEWpg5C2Ai9ps0AoxuuAU02gmgFu6HjIw9kJFKykHYI3IrZIZtgtyg76Ek+LurVsHHt83MtWAu23m/jX/x+9k9F8Z3ta96G63HehpaJ75mFx/da6cD4FgbzDuOt8F+mRfGA8xqBRiL1jCahFtQPHI9OFPsBP+DEH/x7osg3nlmJ7caTglysK9oFfhQzOCK2pgL+o0XRQcRw9+E0sKz0NGFrmsCwDqqEf8ELNo5KCk5AUVFxxAdgfz8g5CdvQ9yMndDXsYOKEnbAUXxu6E8fB/wknfeaOBsvmMrPwoNpVfvNkmvH7RJj16p0yzfWcc4d6NDSvp0sO6xLwn7NzBh3WP/Vgsa17F81YEHwfvJzl7+NRA69BVIdHpmgbBq5zo2soEk/TZUDd8ICYPnQ47HeijJ+RH4SCdNSB/Yb2RCMvLRZ2V55yAv+wTi9wTk5Z2A0tKTUFFxAqqqTgCt8jgwEJ+M4sPAzjuE1hlHQJx1ErQFp360lO6/UVdyDkxlh8Ba/cPtumK4P4YLD3xSmH88rvX5h1rlpG8K+6rmtKPxvegcBA2ZifqvghhHfftPh3iHmesVghefZVbDUSaSky0mCa+ZpUrS12JHfcqQhNagKZehKv0UUNOOQxaitLSTkIV4y8s7g2Q4S9hEwLkACuF5RBdBw70IOs4lqGFfhyb+PehUw+1m3o1zDYybF7vEcK8b8ysl/XGYd8wr5hkT5h/7lbCPCcdevT4T6VFzG+Kc30b8qwj9u6KxJWbYlCMG+QvPNIp3MQTUqwewH0iD48VKkb7TEEXfAWnUReDGHQd60mEoTz0EZelHCN8bliMbyZObeQYKcs+hsekC8OiXQSm6hsaZm2CvuQ1tlp+hG9nvmQ6yPT/fQbbpWah9TG/obS+9NN7y24T9nM+h/7YiWdICv+4dgwzgiPpCxPDuy2bFO8998Nl0d3XBfC9e0Moevt/396Qe+0DouR/4vvtRu0OE2jH2l5UnIEo5CMVIDuwDKkFzZkHmKcJ/V0o9D1VFaA6vvA5a0U2o092FLsTn0+jeLyP9vfUsqcdXe/15z6M+OmsM6UfFfXZSrwy4zWO/HvYvNitJakXvNYK7kI/GOY/BbQT/TgNMEOzQfq1G8d7H85d0jDSbrSPq33AbxOTp/dnZ896lh356tcrvG6jyWQ+0gK1QHbwHKiJ+IPxL5Yn7oCyZlKE8/QSS4xTh/6vIuQjVeVeAWXwdRLTbYBTeg0bs40RtYEYHOe7i+LxpraReuyy/+AWb5GR7rRc/6h/UofFTitoDreAGZCX9AMnur6D2b0BrKT34j7SdsciWffPKR8XD0VbHEe3HHQ0WrfuceV3RQo69jJH68puVwUuu0bxXQbXPBsJnSMO+y8h9pJ8s8QDhs6tEbakMtSXSV3cBaDmXgUG9Csy8G8AtugnSsrtE/ImWSfKFSf8Qf3j9oizt3TMVkjGDnBxM94CVfQdo2TeRjS9CXNx+yEVtyPOpemI96ONm2GpRffPFmAk0J6PR6KBSK510Zo5rz6SuOKVGGy9VaUtE/Do+PemVg1WenwDN81vCz1QZsBsqRiEZovdBVTxqW0kHoTrlKFT2+h7L088RcmC/ZnX2FWDn3kB83AR2zi3CJ8rO+Ykg7AfF7xloDia//5FYm2OfKP4fIwuNGdkXCP9lbuoxiEb3S0fr61SX54h9RYCXbn6tbv1io7HDR64UDjdqXg5orf3k88k9S5+XqxWRaM9ZJlXLK5TC1/YxgpDuXZYBw301ML3RftJ/B1QH7SP8lnSkl6r4A0Qfr0o+RvjX8ViFfaiVmeeAlnGekKePqrLOE35eXFdmne31s54m/dCoPZZnkDGYJWn7oTB9L+Sn7oas5J0Qi9ZtaWi9hf00ce5jIMTbwrQZN71qrXk5Nr2M0t+qWDGzRj9dM258N725rbtIqlKXoT20iV/+9U1B8gFgem4D+siVwHZbA3SP74Hhs5OI/aSPOgCMKFIOZsLBXjmOkDYh4kGP9vp8j0BJ6hHS95t2kOCvJHUfqvegMWEnFCIeC5J29PpptxOxJZlR3xH+08zwb9Da5GvIDfkSrXuXHsnxfGFCketLA2yGzVNt+rWzjfJvF6gVk0QqnYpqb2krHdfdzZBqBJU61YuLaeg+7GS0No89ADwkA3PkGsJ3zCZk2E74j2lBBwhbYF82Hqsq4/eifr4XSpL2QCnal2A/cn4q5g+tlXp5zER7l/TorYjP71G7+A7xuAbxuArywpcjWgb5YUvOU0MXLy8IfW9Ofvi8hqKYlxjUgA/9qf7vuhR6v9Yfr/9N2i/LDcqvNmiNDUyldDLa2nd1t3dPfK574rOL1aquTn7ZunMVaT8ADfHAS0B7pJDDIHTfAQLnjcBz3QQcj81A9+n1PwehNUMw9kHvRvuYPVAYuRtyEVGjdqC6T58bCV4f1ik1fNmd/NCPz+eHLVpVEvN2R0XaZI5SmTB8PQztJ5tBGSBqoQzmyYpcpXqxw6/PM6zWl31sdXO+ra+f851ONnfT2AmT1nZOnLZ2dNeUdWrhkiPlaTvulSP70pH+sd9aEXOciPmVuuwGgctO4LrtJNoVtgOOAa4I3AZFwdugOGQL5OJ1XNhGxOc6QrcZYashO3QlagPYx/sJ5I9avKUo8p1p9KrRO9i0mljMj8qe7qwwMLxEEp4D9m3TaAwHOoM5ki+SjqqxtvT/Nf/N49qeaOuaoLA3T9pgbRz9SeOY7neR/sc0NE0tqozfuRjzzkZ9kJVyEjiRB0AVcxYU/icJv7PM+QcQ9vLP9t4GlX7fQ5n/RigK/BYKgtdCVsgqRF8T/JI8Y7/0R4DsfzQ/+PVUellNApNT5s3iG7awhcpavlDwlEimSKqpqXHn8HhOdCbTCcngxGJxPAUiVTCxX3zIR/97pTR401ga6m9cNB6yE08AN/IQaCLPgCb4NCjcT4LC5RAIXfegPrGDiKOo9NsApWhPWRDwDeQFrYRstMfAhP35uUHLcBwBUH1fO53tM8EJX5/Dk3mxudwQBkc3mcHVLdIZzXKtXh9hq6v3ZrLZ2Lc/ksXhufMEkki+pO5Px8yW+yyPq4zc+yMn6TiwY9C+L+o4aCMuEvwr3c8S/m+R216Cf+wDx/zjGIC+GASC70AyFiA/APul50Gmz4zuvuuzePYneCJVAlcgTq6oYkQ2NI9b09TW2SGVy2M5XJ4Pg8Vx4wlEQUKpNoD8vfLPikCh+a17CcdLKNAeRxN9DfShF0AXcAYUaP+N+cex6zh+oNp7E5T4rSfi0HEMQ27glwTvOYh3HEuA4yiovs9fS/Mdl0JclyUhrk/nKZ+QKK2lIoV1vkBm/Umpa7htsDQtkkhlmSwOx5cvVoaJlaZBf5rxhwrHZ+c2TcR5MIZeAS3ay2l9z/bGPhwGvvsPRPsvR+uLEqT7vhgIkvfPUJv5mIhFoPq8gXT/9Oe/vnZfTITGaC9W6e3bJErbHbGiFiRy+0GewJqv0NT7/7t8ZweR8RL5LnMcWe4bl+E2j3nHbUfudoKI3cCxI7jtlKF1Ho6hyAlY/oD3B7r3eQ+yvV4+nuHT5YyvFxto+s37sdg6B4HULEL0NFdkmSGStlD/Xd77Sl/MR5nLOwNKXBfUsUauA5kLmstc8BnOXkL31Wg9UeS7ljh7yO6NASGxDB8T8RhZPvNOZXo/G4KvkxjY/Jv34Qh+idPomjjpAaKgklP6p+JDfqvgmJEsv+nE62ERmcMKnF9/nub8+Qm625obVe5r7xd7fQMFPiuA6reMiF2h+mO9f/Qz1XfRJdRnl2Z6Tyb0/lfFkvzRMipA/OD10JSEoenu0wtz3F+xZnu8MT3H88152d5vv0X1fvu1bJ+3JuZ6vy7P9pwd0vf75KDOv4Plfyg4bibO3/7oh09RKHHedQNSPMYOTPBsfgRnkzXqWUpC4D/3xz0uj8vfWchooT9Q+5P1XQeyvnqLrG/2uwyAZpgLlMtwl06h3Kbo4eppHLcUDEevUyhHKJSOlUPJWCYKmkU7cd3/H2sKftk3aj5ae/wan/VbJa+3VvTWfdEnfZ7ijk6ybugkr1vbWxOTI5plcPSkA5D8UFG9DnH1NZYbcf1ab32ZrB1W47qT4nAU1ysp/j/h+goOwkL1bUoHqbfuP6TX8F7eSXYCfl/Qx+VxeVwel8flDxW1ttlDpW5xLS0tH0Ityn3i7+bnrypqIxmzaGvpcmvp6JHZ2zon1Ta1P2eub5pQU1vfbaqt60a1ViiWJvBFYn+ZQvOnchb8Ty+mulY3vbl5hqW+cQJPZPuWw68/gmy/oLbRbkCf4bjHOoPFxkayj+IJRKFcvjAevQ7E/2Xz/uvxan9H4YnJ8xSputms0NiX6kxmrlLb+oFW1wEa3VgQy9pBIGoBsbxxdUPzmK66+oZok6U2mMPj+TDZXG8WmxOIdBErkKie+ptF+dNFrCBhukJJ8xtsbiOwuPUgkTf9JBDVL6moNrTQWeoytYkZbGop81capYkCiVmpqbGxFRpdjNlqDZbK5V4MJsudyWL7MDni1Aq6IfFvFulPF76k+Xk6qxZYHPMLErkhmydk+sjtqb60XCuNFbnwWWbI12vYwV/vKPdfsC8jeNyapBTpVKFUXiRVqtPUWn2wVqf3ojOYrgwWx6ugSFFbXKZO/7tl+qMFyU5jcKwnJXJVjEAkCRDURXmxEl4x8JL2HBXk3AMxG4DLJeM9FZUAsugLUOz4IYQFGSAhXrFEIJbky5SaUTq9wYvGYLiM6UodkpYuh9JKtc9fFUOs1JOx2Wh8dq21N9OM1sY5WnPD14bapm+s9pb51oYmDf6+ps7+L6+DC4tn26fUKAL5QrEvm2qK46cf2iFkAjClpP++WoZI9AsRcS7xNyBv6Pvg52OA8BD5z8VlQpFcqR6F2oGnrSl5cGKCGtLSxFfwxiS/WPgfk5svIWN0DVaLt7m+XqOtsbQg2VeKpAqLVG1ZqNCau2rqWla0d01c39LZs2rMuPGDjbW/fc6GC5tfN1ksUSeL9eXu/NgVAi72maoBpGYgYi7EfCQr9vvmAAjSAETlSCdCADX6XOp5GiKGjIcgfyOEBEmgrELA1GgNgYWF+oL4WBXExkogI1O68j8lu0KrIGq51kSvZuo3FhVrgM3Vf6k2Wtolco1GpjI3CcSyAqlKazVY7e81je36DLWPf5kHgSuwzKppLXcXpu2xs+kAlmYArRERklGcheQNOw9C15NE7C3/qd3AG7ILBJEXgMklcxGUDPwQXBwNEOyvhVGBwmsSmTTVaGwvyc01QFyMGBEfsnMVlv+UDth8y/TqykaoLOyAgvQmyMtTXTbW1tZKFCquSCbLUqgNdolCZxTJFEy0XhlvsbdvaxrTI/uH6wgMOHeSWGUShklyjtpxfPfkZwCMWtJ3rgi/BLyRx4A7ZAewn9xAxO5WUpYTsaQMylKgFZNjAfup78F3cAuEB9gg0EsDoaN4q7Rmnq9WN+5OUoKQiEGOixcC6g+5D98/MJDxp2VnMcY2ckqnA6dgLlSmzIaCyEmQm4DnK90ksVqaX0mXN2ZmCuoyMqT11TRxkUDMjqpiaN5D65lNv6lLgUmrLtkqwbK/9g5AvR619SRkc++zwBm2FzhPfv8gZplGWfZI7C8t+gjRBwQjDkHkk9Mg0rkDIryaINhDB5npIhuNYXtPremAqEgFREfJITlRAQHeagjy1W5E9Hywv9rk66HLCQtSjUqKl/2DL/DXhVY8PlRQ8iqICxcAK+19qAx/C0pC50JxwmSgVZtsecGTr2T4ToZY/3bw89JBoI8OIsO0X2mM7OBqhvr99q7u6F9fk8doseE2v+gjgNZaZPf4OyByPgvcwTuB+cR6MsYcyd0Xr41jQvticcvdfwAl0ptw5BlIp7wK8cOmQarveIgc2YZjuH/kiWo0YnE78HgtEBGugfgYPcRHGR/EPWPy99Q+oADvB/QjoquIrvl6aX6O9miBJJceYOe+BbKSL0CUvQLYUZ8DI2QJVES+A7ScmUcrfd7bXe6xGArd34WU4TPBw8mI4zkgyKsGIkbplklkjDCu2KR5WHaOqiqZUXhv2/OvAsyciNpy/F2QIVk4OL9Hv7VI9uVEHGZf3DSW+5E4ZLedhPw4X0Q++ixjxIuQ4f0MJDhMhjjHsZDoXzenvKr2qN0+DYryGyA+wgIZCQ0wytcIXi66f0nuLhqIcG6HYocFUOqwCNijPgZ15degLlsHwoRNwAlbC6zw5cBJXgzMyKXbcSxKtccaKB/xKSQPfg48HCxIz3rIcJsDca7jIClJqmRwVI90OF7+qgYl6usL3gZQonFdiOwuGLgd2JQ1D9mclLv6odjryt68I6Ve+0CP4xEdr0BJ/08ge8Q7kOvxKqSPeA5SRkyDxBFdeC1lKymzwYQJLxOyp0Y1Q1Z824N8KH0x5K4Ov9ROw7WQPGIOMB2+BrrTKqA7rAVNxRrQ0baCIn8HcGN2ASdiC3DjVoMg/eNjdO9dwPTdRfieKhxWQebAt8B1mBW8nGqJHAZUr/cgxrP5qEghyXhkLCmHHTifTC1a28i9zoFgwC5gob6O5e6TubyXsOw4jru8N5a7FLX3ioSjUIPkVz51DUoGL4NcpyVAdXsHcpxehwyHF1B7eBqyPLoWMti2LRzOWJg08XXIThgDpVndkBo9Gvri0PtoJHqP87NQh72DZP8O2E6bgDlyK4hjtkENcw/oKw+DLH0fCBL2AD9xO4hyVt9iBR68xRh1CDiIqr33Q5njOih98lNwHlIHTk/VgBeyA8b6p7u+BGmZ8gftXxF7JJiPxi6cm0IVfgP4g/cBk7IScC66PvuSsj5KOKaazP/yNFTkA9ShOVAz5CLkI1vlu38JVPclkOvyHhGDn+X4KqI5wKDZaqvpzdDSNBc62l8HLr17p0rd+UqM9xjwQHy6DrGA0yATDB2khayn5gNrxGYiFp/jshvY7vtBj2Q3cQ7dUuedAmnqIUT7QErdCJWR627wo48BJ+Y4CBBxgo5BpdMWYPb/FnwGtcIIdM0hFBnEor5C9fsCYr1aHoD6+cXQLVeTsY0YZ0CnrOvFczwao98nL6ZcgmYRlIe+xzGdzej/yuE4Z986yPNdBXleX0Cex8dE7DuO4c91fh0KAuZu44oa1lRVdMJzs5fcmjh1krZr8rTp9fVPfxL6VBf4PTkaPJ+0Q/7AxcAbthX4jnuB73IIcJ4/ZfY+MIv2XTBU79+tyD4F8rSjIM3eC+KC9ffLAw8TsZjChIvAj7sIvEgkx8h9wHtiJ4Q+OQmGDTDCIIoKXEfOhfTwDZDiOhv65OdWw7dKtLZVRN0C0eCdhN2xvA/H+fdRNmUGZFGmoTF+GpHrB8fSUwP3wOhWgMYEAI73Scjy/w7ykZ4LRn0DhUErIN/3M6It5LstRHp5G+h5k+bhPsBlTLq09OPlMydNnVHXOWHK0w31z30fQZkARU8sBd6gTSBEc67E+ShI3M+CLPgkWCWHbus5G76X5+wHefYxkGQeQGPgBiTveeBhbEDcYZCk/QSylFuon1wGntspEPbfD7H9n4HhT9b0xhcb0BpkL2R7fAj+Xpo899goCprzbmtxrH3gaeAg22N7Y1wDljENyYgpuZdSKD1EviGcZzGOMhrRWML2OC67NgKAl7gPMsO2QF7MFqBGfEvkXsTxKoU45sP7E8j3/gAKPBfcEIrbt3JZE+4rBPN+/uzTzcunTntt8tjOee+Pa5q/Fc832A4YryNxOweywLNQU3Lktl12bKO14C4Ycn4ERS6SvWIzGgMuoXbwI4jTb4A44yLIsq6DDq3N1Sk/gSL4LEgGHIYMJM+IAaZe+RXg6/s5ZAeuhRjncYTjG8dYa1HbFTkdJeZ1bPckZNcEShchZzSSEcfvRw3oRmubTghD74MpdghAuqQm7YFXXgEYh/Rnr/wJeEUHiFxDWX3Yg+gNkB1Cxr0QbcFnGaGHkpA3NwuF44FPfwZ00kWw6P0fYNL4tTBlwnZot6+5pPLec1s9/Cqo/C6BMebqvQbetTNWNMZgTII+5zLoy/aCLv8iaLPvgibvPhhL74Gp/GfQl14n8lpivII68hqIhxyH3H6LwOlJa6/8KnBzngmpkdsheeScPUT7R7yrqWjOG76HGOtSkN1jevNoYqwFlhVT0IBmCBhYD76DasF7iB4i3d6C1g6A958H6EFz/4R6tF5iHIKstB2QjOYlrIeCpC1E/iecOzI3GOkBxy/5f4Xmxi+AWTx9p5Q/E3jVL4BW+jnMnb0H2pvQOpO55WtVR1KMkbd4Rkft6m02/v5dpty7YMm+AwaMHSk7gsaq88R4hWUlckZJf4lLt7FuQC3ShS7pJxA4nYXiAV+C+4CG3hxKZB8oyNqFc0lBn/1VOIZ+6C6i38eh9h1CyF1P5LbCeA0P9B+Me3DqrwWnIXIY5T4ZlAaA1V+heYNN5jLqbr4FXNYRyELXTkvaBUlxiKJ2ATVlC5HbJidyA4HhyAleDTl+30CB96ofRfzJlzSc90BT/TnoJOtBLt+xt1pGcRWK5cF1in0dXR0ffdDa0fFMDWv79+aCG2Bh7tpXU33yJsYN4Nh8HJPfh+vAMdYYX4BzKeHP9On3ib5T9tQ6CBg8+iH5ZWjOXQE5rkvAx1OehOXHMcI8hz1ofHuOsHtfbq0+uZ0pOnCkaGA4RQQRaE6XorXSZ58hmZG+cc6tWWjNaNFfBhrtGOTn74fMzL0EhiU1cTehhz48Do7RzQrbiNoCGiMDN0JZ6JpzVukHP9s4K8FUsR6s5XtgjOjS/u6W5YUvTL/11djR82dZG9rG1TW3Tmzp7nm3QXDqgK2clL1TTRLOGYXjzXEc/Qu9+aTGoz1rfSmZP6wMrRvCnprwkPwqCHZ7FXLclkO4c7OOWQ0/Y9yowOsEMZfhHLDevVgXLD+pA7Q+Q//LGbUbMH723XdJfAHGG3WakOzq66jdniXyfRUXH4bCwoNApe6HbDQ/ZWTsgszUnZCZuBOoydshNw7HfO6EgpBdUBT4A3Djvz/TqlgPjZztUFdyCOxFV6CVe/ndsdabJ22KrbsbWibOa7DN/66Ocek2xjWMRn2tkUvKjjE3GDeAbY51gGscc4+/w20Ejxd0n2OQitZgOIdZn/wO/bvRnLQKYhw7p3JK761nIznkqbeBMXADIb9Lr90xZsYb1amOS4GL48xt6D4zSR7syA56RKLKq8AoO4dkPolsf5zAzpSUHIPS0qNIF0fR54ehKO8A5Of8AIVIH8Wof5Sk7SJid3GsOxfNW4rcrWfsrN3QUHIKGisvg63syqV6ybV7tYITgPo/1KJxrZVL4jpwX8fyYxtPNJHtHuM5MGGMQV/utAY+mddMnHIacka+B08+ZH9MGGMY6zB+saj0qy+Zved5krSfgeN2DK29lkG64xdQHnqIwBTjXGumenR/pO8WpNf6fLxPuAfM7CtQlXcGcjJOojb/C/YG66Ks7BRUVJwCZvVJghhVJ4BVeRRYaPxiEzicI8DJOQrCnNMgR2O6ofTk2bqyvTfqSi6AsegImMoOgLlyzw3Uju/idoblb34Ik4NxUWPkv2BbWnqxLhirgHEuU1C/eGEc4ptxE3Ldv3qAUcOyD0B15rDFEDNiwhq1ctx0QcH9vX1YdqwL3B6I9wqkF9TX9bg9Cck1njEFyZ50C+ipF6A84zTkp5+AjBQSu5OOXmdmnkJt/ySUFp6G6nKcfw7toVnnkH7Pg1xwjiAFeq1An6kYF0DHuAIW1k3CpkiuH+2sKyfrq8+faay6e74T3RMTxnp19OJ6msQk9cmLCWM0MGHbE+2id0x48xmyf2b7bwLvgS0kxo2QX4Pm8+ex/D+oxbPfeOPZJYnCrOu38FzYh1fC5yBaKbmubyxGfTwJ6SHmJ5DGXwJO4ikCD4Bj54szjhIYBIz7wXn3SOzPKchBdi0pOAcVaK5i09HahH8ZtLJrUIPGijr9dbDrf4QG7S1o0twhsCYYj4TxVAQGCPE/DbXrycZfaLzxoTZvJMd8jKfBbb77oX6AazwG4HHwlenoOkgP2Wg+jneY2TsG6og5MKj/RIge0XVGK35twYdLF4c26Tt8hYE/rNfEXgJ9ElpPoPWsKQrVYT+CPuQiSMNOgjDqMHDiDqJ+ux8qk9AeK/kAlOB8f+lHiJx/WAc47x817TSBf8rLPgeVheeBVnIJ+MjOcgGaw9S3wF5zB1pr7xE4oqkdZE5AzOs8RC9NJHFEuO1iXNSzY3r10krqCM9xfbgibGdMvx4DCD2g76ei/+F75KG9ItVjce8YYIDB/fTg2b8Voh3GXTFK3130/uK3AsePH++6GYY+UR3yGofj+/lhofdGELhtJ3Kocbz3AMtvL5FTD+NbME6HwE0l7iVyDZKYo8O97YHUQQHOv5h1FoqyzkNZ3gWoQuM6xlApeTfBrPwJ6eA+9DSTOKrnppI5ETGWCrdZjKfCuRdf6MVT9eVIxPI/jKnqyyOOMW2tvbiqvr5A4JbQez3qy9m4jfqvBteBNkL+YQO04IzWcWGOo68YZe9tXbR0rmdXd4+rXq8frlRo0MpzaL+q7EksVsrbO+ghnwMd7ecqvdYBzXcjVAdsJbAMlUgPVVG9uCtEGOtT3osfK0XtoBjpoDTrNCK0BsFYndxLQMu/SmCwBFU3kR7ugEVK4tcmINs+PY7Es2F6tpvEZM3A81kzicnqriX7Mm7zWNbWXlxWHzbLJvwFn4VzpGP8kwzNT6yS25CSchKtPbZAwsgpBEbLaSiS/6kaCHZuOFMjW3r6/a8sjh2d41z0euMIjUYzQqXWjDSaa/2fmTspiVM9hl8d9+ZuZvgywHitSu9vkT42EZgtjD2rCt0H1TGkHnCfwHqoSCWxWzinIsb/YBwQ1gHO/1hFRfMl9Tqw0HqOV3ITRBV3QIHP2hH/Nilps0b1o/Z8WEYL91EZMcZL3Zu7A5MkvxfnlXsfGLl30P2uQ3LyCYgK20P0AZzH0nWYGVwcdeDratppkX91/LmFeQ5tbW3OOoPBQa3VOmp1emdLXUPg5ClTsxRqXbzaIs2i5Y+pr4h860KV36dQ5fENVHl/B1V+m6EycAeJXQvZT+QA/QW/dgjp4RiB/erDsJHYqIuEHjCWjZZD6gLjv3A+TH7+PUBzEZF3E5O4F7eGqU8uEZUkjGPjZ9/vxbLdJfBsffkzMdFyrgM99wqUof6XmHgEQpGdstE+DJ9ND+1nIs5e/T10X1nVa/c2tRu96uobnLRarYNKo3FU6UQjLQ1i7/GTu5Pkam20UqMvkaq0hSqrqICVN2tTlc9CqEbrRxL39j2Bj6kK3EOMDRUR+4mc+3iMpCUc7M29eYzIv1mZeuqh3JskDq4q6zKBa8M5PzHOrY/IPKA3H8jT9578rhcPh/a7+L+MrKskJo643kVCzwT+DM/FqD+mIJuEh6M1eQRae6N9m49LDXG+7Oelm1Or27DRUjMjBuP85ArlCImCMbzJ8mXtGPunt6ZP+fQZiZIfjOSvkKu0RVK1qlKlmrZZkLIfSl0/AxoijJtjeG5E68ytqE/sJMbIPrxWnx4qe3FneM6s7NUDxtDRkB4whq7igS4uovePEv6u4kFN4usw9u5hbB3GepYS/Q2NPemHoDDtIBSiMR9j7HLQ2jsezX8kxm4DEcue7DoVgj0tEOpryrfqtyyz1bxRKpXJh4tFiqEW3ccJtZqle+taavImTJkmtDdOSVdoDZWoHZRKFUa+XPDGBVHWYeCi9lTpsJbADrJcVwPLfSOwfDZDte8uQgcYt0YLP0DkPsV6oPXh75KOEnog87GeJMYIXBNyPEwEXvA4IVdZxtEHspHzzEEiR2sfRg/nas3vxelhDBy1FwOH953paP+N5cZ4PYzVIvA3Hm9Aiuf4z/D+t65m6wsN1k8bBXz5cLl44giTfPVeTY0q12C25nd0ja/u7JlQjeSvQuNAlckyZjYzZ9M1Bprz+Gj+4/nugWoHZHuX5cB2XUvqwGsrcQbNQP0BY4zpwQeBhXRAjz2A9HCA0AMeI/uIwB33Uh/2sDiVlK04dS+JQXwY44f2UTkJv2ARH8b5ZUSuI/LHkvi5VZAduoLI7UpgE4M/gbzAJfep/gu+zvN5mXgASkxADcWq39Reb/z2oEm1Rm6Uf7dLqbaUa/Q1xbqamtKm1rbyiZMnM5RaPVeqljP0irc20NAcx0D84LMuccxBEPrsBPbIDcBxXkXmvUU6YHpuITCYGL/IxDoYRWIY8bqhHM0VFYjK4vdCaTyJY8RUiPZDmLCMVGRHanxfXtydj8iJ7YjlJHF3qyCrV8b8iM+hMOIjKAxdfL4gZNGqgrB3XywIeb0lP3Quvyp7LMuHYv7NuCyL/oNko2rDGZ180dMCeWGiQjaaY7a1WVvGTuhsHzdx1tSZz38qFXfU64yjWwTl392uykDrvoRdwEX9WdSLexR47Qahy/cE9pHruhHY7ptITKr3DgL/iNtCOcZpo/1zScgeAgOZH0HiIAsJHOQuYk+cFYXsGrWtV86ND2R92KYEZi/0C8gLXYqus+hIUcT7L1akzuopjp+SaeqiDJyJ1nAYF0kX5A4lMIZ0potQogz9Z75Es3zb0FrT29+ZDW+sbe+avFRVvehgS2fPyo6JUxeO6Zn8WcfEyR/o5K/vsuqXnMd5fjHusBDxWxG7l5BfHn0MZL7HQez+A4idtxGEn5vCQYTzD/fhJ/E8WRa0E4pHbUc62IHstB2oYdsgF5+X4nOR8A2QHfEtpBH4T1LePkwlbr84Z29R2Ad3q7NeOFmVPaMlmjKWeBaK1k4bwuULY9RqtTNPIMDYyhE0BsMByT6CzmSOZLLZvkqtxe2fyY9Lc2f3802jJ2yoNb+4R6cd/0lr58T1HROmruuaPGPNmPGTN5gMs1ajdn+2ALXHCtQvmWjs5iD786IPgDr2DGhCjoPM/SjIXX8g8h5j7CVeN+M2QOSB9t0CZf5boDxgM5QGbYaiUZshL2Qjou+I3PDZoWRbxvISuMzQh/IUByyG/OA3VxfHTZeJbBQnFt9wnieUcfkisSfmXSiVh3J4PG+z2ezG4nAckMxYBw5YB0wW200gViX9K9lbO7spYub0J9q7J75qb53wfW1T+yd1zWMWNo/tnt46bqKuuaM7utRpW24lsj2WnY3mIQ7O9Zx0HFhofFfHnAdt6AlQeZ0isacu+x7ogOm+g8Cf4jVCBVo7l/p/R2BQ84LWIPqGkDVn1AqCsJ2xzBiLmhP0MYGLzPN7Y1Ou60uOQnVlEJPNDWawGP5MrnETm69fyBOKymQKxRMiqSLbYq31QutWFwaT1YdNdUSvnVlsjpdApAr7V/Lj0jRm3O/9hFKK2isDzVPM9HPATD4FvPhjBG5VEXUatCGnQeOLyO3EA+yqyG03YX+m12ZivYwxoDgPdWHAGuI8HD8TCM9FfTWWG2NZSRzuAsj2mbsX3zc9fDQFTvlQOHxJGrJvCJOjH8/gGEAkUchkSg2LzeWHd47rCkRt3xnLjLGtWAccnsCbKxBG8cW1w35XuD9QKgPXfkpH8xUr5TSwEo4DPwbthYMPgy4S57w+Q8j/ADvquh+4D8mPbY8xpBiHWUhgeFc8wMH2yY4xjRjHS+BJfeZCqk9nYmJQA3HvaoaYgmzuxhFI0lkcRgCLp9HSWeLqnskzlvZMmEA119ano7buits70oELxscKRJJgZPt/iHP4d0pByIuUaq9Po8qCdyDZD6P5/CBwIg6DOvoKaMMvgXbUGdD6/YKdxfhNnP8at//qPvmR7fPxMzFwDqPAL39lcxLPie2OscBZ3lOP/BYfPLEhlidUVXG4bD++qMpZb2wFW+PYgwqNrkosU+agcTAAjXfuXD7fVyiWxir1DSNY/D+Wq/CPlGq/NfNxm5fHngN93C3QR10FY/gFIue4xvvsP8qPMaCE/N8h+5P4bQJDHPQFSYF9dl9KYHGxfxDbPs2n558+RE6issUrtLYFfKllL19iAYzJNde3XZUqLdPEclkym8MNQP3ET6KoifqPCU7Bz9UisYZojXtWGXX+vj7yGoE71oWcJ+RXeWLs7qkH2Ok++fHYV+5D4qcfYJCDHsUhE7b3fZ+wfbb3M7f/FR86SwOlrFQ7WFNjf0ult9+WKG0gVdeBSI5qZfMaDlsbK1c3VP8nZe8rOUEzKXGe+uFsjw1XVEGnQBN8lpC9D3dNYpdJ7DXGjuN1YHkvBpiUffkjGGwCU9tr+xyf+YhehnSfKb/LO0dI4m15oppMocyyVCAx3+eIzMATWdG42PQiJ/eb/jJ1w1+hAkp64AQKJfnJJ2mun2zBYz2WG5PM9SRhe/5Dbb/KcxNh+2Ikf77/14/ITsj/wPaLiJz8md6z5/4ZXphcEmv/9qKX+ptsrcH34CiB6OOL/71nMP7REuVPxp1mu82poY1cfl88cieIMPYbjfsE9tudtD0+IynqlR0/AyDb/yHZ/fGzCD4icODZ3u+iMe/ZeX8p039hyXKd3VLkvOAKA+2BmG4bgIb2QJWe30Gp1zoo9PmGeA4Slh3L3EdYdmz3LJ95V5HdiTYfE6j5vVv9jy6RPvXJme6zp+S4v7IF77FzPRdAntdCNK4thlzvD1C9kMxZ4PPW9Rzv197J8X62Ev8vN+Sdv5v1/0jpHzTikfc+ATTHBO/2+BSvnoxUz8npqI7uF0158CzRx/j1x+VxeVwel7+vwJ8tVzrIurv3fR75/q4/+fb2FSpR/7zS4Wv0ruMWTnMAN/wvUfrPBJg58wKl30y477AFLfInwv1Bp69QKJ0ddwe7HyHTFTisJNnCdQCqB3WS73GNn4uBsQH9emuc26APK/BIZocne+sRv3r/r0peb63orf9ZnoeOI731FbIeTdb92nvrZvL7QR8QdT94mbggFYi/DwKyTgNYhQW9jhSF6n69df/7qD6C6w6iHnQbKRVddxBOjnEb55FAF7iLazLHA6nylRTSAlf6/WlbAvk8H8ziSsrjPBGPy+PyuDwuj8vj8rg8Lo/L/75CY3J/9zcs7v+8Zwf+/16MltZH3ksUZgetsTFCoxubqtCag5gsrhuLw3NiCwW/i/9+XH6/9OVtwkVrau7XNKZnRH1zV2LLmEmjG9o65tma2l6utTfPNDc0Taqx1k8x19mnGGsbzAq1rlwolScLJDJ/gVgyWCRVPLbHnyxCec2D1/bRXfF1TWPM1sbRL9TUt32m1DW8oTHauoyWWoveXFuHyKY1Wep0Jms9et1Z1zLGJBBJInE+LZ5QlMQTiMKQPVz6rscR/Ofyp/xvLHItmV7EVN88WGdumac2NMw3WurscnVtF4tru8Lm1d0WSppvGmpGf9U8ekxdc1u7ob65zWKpb7QZrXVGHO+M2n0w0n8whyfAFIYoBn0WK9cbCd/F/695vf7K0pczTKS39VPqm0sUmqbD2pq6Vm2NRarS2yxiWfNBjX4smC09BOlrxhG5xDT60d/UN47tHt3Z09bR1c2rrWuIUGv1gUKJ1A/NBz695M/icEJRX0hC45GzwdZF3KuS8efzvv9vLAKZiUJnNRKvxYr26QJpC3CFDYeEkrr5CnXLKoWm7WeZog2E4uaTDKZtbVW1eX55lWluJc30XDXDMo/Osizgi2ufVRsb2k21dVSzrT7caqsPVqg0Pkwux4PB4ngS+czY3CAuTxjL4Oge5I8orhD/U77+LxScL66SXke8FkqbP+Pw7cBg2wCNNUjfjSCUNHzL4Zm7OTxdKYenyuaKlGkihTRerudHK4zcaJWJH6c0ylMVGl2eXGMsVWh1VLlam4LGoUicU89ktvjy+AJ3Bovl1msHPzaXG15Rrev+Hdb+TxXU5pewOQ1AY1pvsnjWzWJZrVkik4UIxFx/kUwwii/mR3Np5kxe8bjiyuQZtvLIF18uD3/1/cKwZz/NDe/5OCPWOi+fqtRX0qR0gURVotRos8QyZaxGZwg2WSy+QpEYx9a5IvJANvAtKxflFJRoZlbQDcScUFIl/btV8N9euCILpbHtaZwzr4XJrQM6y/Q5R2Dgy5VCH76Q7y+Q8kcJR7v50NPHcllRn73Ai9y5nZd4AMQp50GM8zYgkiVeAnHYYaB7fgdU9xcgOrD2YkyM8ousLGmrSivPkikUiag/jML9QCKVudEZTBc6k+leTeP6Z+coPigoUkzAvKRl/fkcWn9VMf0qz15hQdEgmdqWZahtkeutjQZrY5vJ3NBSVdfYGtj3G3P9vxcLgXQfwuKh8YZXM04iV0WKpVJ/NE8G8pSV/vQiTSY7bPVqadrFGwSmigmA8f7MXpwrrvFzHuWIpGn3QBZ8nMibFOZhh9BRujtRkbJDpWVCtVIji5fJFcGW2lpfdH1sA2c2j+acmqbqTEuTQmGxsh3z8nc9I/3h0pcLEZfG0eMK6lrae9Ba8CVEH6oM5rlaU+MqtaF2bm1j62KLvfUTa2PLzLqmjiH4938kP+KvC0dQv5HJM82Wq9SjRGjPxBFX+3ArayOYCZ81CtJOE7p9GE+NicihKPsll2Lf88Mxxg3neikZugQC3WshOAg/h1sOaWnCaWgPkIZsEITHIoFQ5MrmVjsmJ6smJieqIDVVdBuNRaWYn/9kfsU/WvryMNaYrP30JsNAg9XOR/vKaSpdTaPR1rwC7Xfmo7ZZj/b5cwVSw2yDpekjY13r+tZx478ZO2HKTvvozu/r28bG4Gv8q/yMfYXFJ5/zwhU11rF45rVypSJYIpH5suRpbrzEVwt5qUfW4GchY73j/I18PQBX2ZvDkvtL/k5MuD88/Ax3aRoQMYwZQ14CX3cThCAbhAbLITFR8LLerIhWqrRBVqvVp6pa5JGYoF4QH6eG+HgJpKeLDhSValwxXyJZ/V+m618XkUxJsY8l72esNUeL5caxLJ5uAY2lellTY5uqtzS8jtYVTUj/BqnSNEWssIwRStVGZAu0DzV3tHROWDW6e9Km9nETV9uaurA7mKK3/D7/QukYZxbXulMql0cq1Vo/tinEiR3zCYOXffMGnU3qXWkGIl8GxtLj3Jk4RyDONSJOuUc8w1iAnwGceJPAcxLjEvodzrcgj7kMtKdWQuCwNvD1NMKoQC2RUzM+nveS0aIMR3NyoExuDUtN1qyPw7k14xUQGyeA7BzZGsxbWhbzr1T5g4JzcBZXGYjXSJciOlu/rqyy5kZRkeFeQaEKkC1eNNTaZwslcrZUqVYgOxj5InUesgUXzWl1cq1pGtojLbQ1j363tWvit62d45f90XuzBXVGrtCk1eg0AXI13Y0Tu1XPy7sPXCmZp6O2iXzWqpxPPrtUnILad/QlEAaeJnJ4cp0OA3/EARANO0A841oQeQmYTLJvKLJwLsN9kDjwOXBxMBI5Tf19VMgO4p8yMwU2NA9H6Y0NUXSa7f+xdx7wUVbZ348oivQSSO8hvZBAeu89md57n0mZ9E4SaqiiiGJBcVFUbKhYwAKKoCBdQUCkI00ExEL3vOfeZyYZ0N119++u//d9uZ89+zyGlGd+59xzz33mmfPdRHq7RkUqISJCBNHRYsjIVk//jwn+O6NMIB0glldPL6+ogZLCZijJ6YCclCZISzFCaYXh88q6pvlCibRIodbJ8brTJApVAdbaTeiLqXKVsRH9YtBXWqc1dvR83NDRvaVp8rTJ/+xv4t5qMF9snSGRG2JqG3We4qTDzdwSoH1xOqYDNKD2pKcY4XIS3QnHWux+mmrOHfoVcO7bQfuIkr6SpN8V6btF2PS8CedpfiJMT9KPK2/gShg3uBZ83DAP+ZrA10NJ+qoeYXOlhXKVPtxinrK8qKgaoiIktLdoeLgQYmKl1zNzNZn2a/X2/s/MhfLyJqfyquwBLFbzQ6zSdmAV9AIrex4UJs2G7JgpkBrTgD7Qg9ZSt0wslRdIlepslUFfpDEYlCqDlitTChPFCkEcXyyOx71+tlRZvUyhrT/QNHnKe209vb5/7++SXq0ccc1Ynriqp3Ea31OWultPtNdaAB5+DKClBcCE2muw3lFF/ER15I86SvvVsu/dTnvildPehmuh2NbLlPS2JMYe/AWUlDB5ShH+A5QMXAcB904Fv5GNEO5fD/6Yi/w85LgmC9dYrNpoqbSpoap6+rX0dBWEh/Kp/mQexMVJ94YEaH93Y/zv9Hq9fbDLuhktWD18XvE8EOYtBn7Ok8BKfgKKJyyCnND5kBU2FTImNUN+oeFdlUGWJJSrBZlZsikJ8bIHcb3624Qo1dMJkxS9FWxpplwjiOQLxfGFpZoVUnXdubaeacp/9Pf5EitXpFAUaQs3iSUYq+ZG+JX0jO2ZjDHPZ3poysd/B5KxqP2w/cC9bxdw796Csf4J1d3eQ7a/lyrTT5UcWbg+yMnviL2G82QbRN/9MAQM7oIoz04I826EwHFV4OemhPg4aQtPpMuWyNq/r7RMxRqI9JZVow+UuE5IcE7owM9TR3rM7grw0T6EJsDzQH8vrYu3q2Ekng8N9tcNCh2v/bfubwvKFjiLCh8FSe4yEOW8ALyUF6BiwvNQHPIM04s27EHIjZ0OxQXVz7Jym7UpAd03EnynQoxvF4T7NECgrwF8vYwQ4GOEqHD92tJySZJUyQ8pKVUuVenrn+uaMev+2/+m/bNMPLFlgTxjXZ6gCH4iuX7FSoBZM5h8I4+9AUr/0yAddRwEg/YBf+A2YN31Ge1fW+EQ77drb+/5yEq/wfT0ngj05xPxayH39UKU83SIC5gCoWPaIGhcDQS5686yWLLMgsLqnaS3r1I1GchaHB6qhcgIrEkn6SA8yIQ11C39bIlPSD/bXXj8GO01tMfQZqB1o0+a0KrwXIemsZkWv2b29lLW+3lqOkM96npC3ZtnC3KW7pbnvgKK3DdBkv4W8CauAlboG1Ac+BIUBS2DkojHoChxDnDyWmpzvZeczXNbCtloKa6L8LVMBbcxRvAYawYvV8oLhuBAw8ZyliiewxMl8SXm5egD55au3y4FUlXTEC63YTo36/o6KdY4q1YDLH4E5wBZN2Ougtz3NIiGHwHRvbtpz2TSQ9auvV13xx7Cjn2ES52WASvhe6o/7UN7/0HIdHoJou99ACaOmA8p/nMgevRUiByNc2FMA0wIMC7lCaqai0rroLFhLpSX10NEqB7CQo0wIdIMSXGVEOhpor0u7GbvN+xo9r7DtvObeLyBdt1uHi7am6EubZA4Zj4kjV4IhcFPoe5vgabgfZDnrANRwsfADv8QWEHv0V7E5WEvQsXEpcDJn/5imefrx0tc8bWiFbq8ChnDl0HE0FkwbqQFfWDGv2mCAI9K2o8hPFj/ttbIDuSK9Z0SVZXb7807nsiglmTv7Cb14rMrUP83AGqkTL9mqdcZqj3pW0zyDelhS3oX23PN7boTK7qlt+0zUDbpLK2BtEkAksFHoAC/P/7ex/C1L4ZU74UwafQ8mDBiBu1rHOnc+GMp7rtKyqzHBIJ26O5aCLlZdRAZaoaY8GqIi7bCxHArZVKTXsbk+K+an3MtpI9cAvkjX6JWNPo1kKWuBl3JOtoLWJH1GdYNWDuEYA0xfh2wQ94DdvQbwEl/+gwr4N3dpFcLi/RrcVsHhaPWQM6QFRB1/3wYN8wKrqMt6G+MlXE4t13nQJBbA+5zFBYOT1ImUVjiHXVnC5m26cKKyVMw51+ePI3pAdhhxrU2BmivQSbuv6C1DNND973bcs3r6IvXbukpXIrxbe+nXOS0BNgJF5i9AuovHXQCv3c1JA96BtJHk88GPg7JoxdB/IgFMGlEL0SPmAKT3Frek+vN6oIiK5jNM9AHj0JaYj1MCK2DuIhGSI5thmDfKtpj2dHI/L/9a66jTX3HsaP06ONZUDDsDXpPpGTEGigd8T7wg9aCoewT0JV+DqqCbaBI2QGCcFzfQrai9p8CJ2ItcOPfvMmNffMA22PLTbbHNiBG7m8Vj1oHuUNexbp6EbgOrYVxo8wQMLYFMse9AxkeyyFx7EMw3qv6OIvHSyhl6bi/F//inIPfiLGuf3Y50zuL6KT0P0v7pdq1v71XeYUt1stsepc69K8mx2Jbj+MCp0ehPOsmyHEfZsD8o7rvW/TNWtqLOANjL90VfTDqKUgc/hj1Qfyw2bS3dU58jbWcU/tJbn4tdLQvhMb6hZAQ3QSTQlsgIbIN0id1Ur1Jr2vHPte3m/NwE7WRQzWQNHwJlA5bB+UjPgbWyA3AHvUZcMdsBmPZRtCX7QB96W7QZH8FsoQvQRj2FfDDvgB+xFbgT1qP+8k3vmP77PqB48X07SL9SEpdd0LpqI2QNfhNSB34Nxh7fwPNP6Pus0K09+uQNn4NZLi9DLHOs25MiFDhHlqXd7v2ypijUXw2XKhrBXjyYQAL6TEVdB6Uw0/QfM9oz/RKJ7rbY72vT7ytfzbps1psM6J7IeZ90kub5J+iPKbvnAnXX+XAb6F04MeQNvQ1yHR+HTJdXoCMMc9CyoiltK92woiH0RfzMDdNOyiUVdaVlNddLSltgQfmLweVbA4kRbRDZlw35CRNof227frajfTath/7zQyZQ1+CiuGYT0ZuAs7IrcAbtQNr6F24L9wCxopdYK44APrCQ0B6EUsmfQXS6L0gjPkSBDE7QJy+9iY3eNcJts9hIMbxPQIVXgcx/vdC8ejPIW/wB5B29wpwHlSPOdECg5xqYNS97RBP+l3447+NWwaRXg3ry9ja3Nv152fcnEzu6cyYwfQB05B+fKOwxrxvL+q+qZ/HgHrb84o9tzhaLu1hvoz2Oib9y/Mx7xAruPtdKCrC9UQJUBWO8X/vSSgZvB7z75s4R1dD1rjXIG3MCsgY9QKkjnwGkkc8SXu8J41YcD07fPJkFq92Z3FJI+i1c+GpJW+CXDQP8pOnA7twJvDKeo/EBnXCuCFWcB5SSfuPkT7nowabHMwCKUNeANbwz6nuRHPOmC9p/2+R71eYd1B77kGorDgOuuxjIE08BNJJX1MfyBK/AFnGZzd5MRtPc4OPAi/sEAgiDgE//DCwAo9AmcdBKBuzA8oGb4Lcu1aB272t4OXM9IMjFuT5Iu09kub5DkwcN/NmVqauxFF7ecL3wzlF8CnpFT61DeMzA3MP5nymX/rnttryNao3ietb+6Uv64txYtn4NaK9Y0/xTMw9+RgfZF0nPc+qAn8FFeb/nJGfQrrzash1+xAyXFZB+rjXIcP5FcgctQLSRj1Le6snj3wC0lwf/ILHtWrY3CYoL+2Ers4n4LllH4BCOP+QXNbzvKmqsU0umrNx/MhO8LyvCVww/sYNqoEx9zE91wcP1EPG4FeAN3QrcIbvRO1R9zF7gT92P/BcvwFV6m6o5B8Ei/DANUPR8YvqtG9BEX8ExHEHQZG4D2Tp20GU/OkveX5bbpAe7KQfOS/yW2r8sGPA8TkOrLF7gD1kF2WHeN/TA96jm1F7He3J6Tx0OkSHbILkwPWQPO4ZCPWq63bUX5B5w5eN+cZoZHrBKcN+oTmfN2An1Z7kGLvWpG+73UhcZ9uM6EyO6bS/+dNUc3sfd9L3Oi/iG1Crmf6iVd7XQDTsOOSO2wRp7h9BjudHkOmxBrJc36E+IH3dyVwg/e3TcG3OGLcEimNndfFE9Wsq2B1QXtwDCxe8Ci+9tHpVVW2toqVrSsechQtXSNmLj4wf2E17vXsNbAH3gegLtJz73gDekG3AHf4FCEbtAT7pozL2MAhcj4EY49fM+QaqJN9cM4s/XGbMPQvK1BOgjD8GkoSDIEvZC4p8XHuj99ws9MV4xJgXTThHjfSEF0VgTR6CPhh7kLJMSF0Yctd88B7Zrz85+vl9AnGkR5r7WxAxrvOYo/7CtMtaUhdaMP71WUD3WJJ799E6h9F+GdU20+kxh5h+jGpMtLVrbe8rn2zrLZ9s6ytPei+XZP8Kk1sAJlfg/PL4Gbhjj6Lmm2nvRto725v0EH8P1+HVdC6QfEQYA2nOz9OeXjk+T90QcFtqecL266zyqSDizv1lxSurHl/+wgvtTe2Tq6b0zl04ZdbsZ0Slj54IHjAd/O/qhoABUyHz7pV0ry0YvBv4w/eBZDTp4XIcpONOg9DlDOjzDpL+8z9WyTe+ZSg+clWXeRYUySdAnnQU5CkHcB+wGeRpXwIX456NOacsCPNS7CWQTfyRsegLIAvHXOF6kt5zFDhthRjUwXdEC+3HyvRlVYCz80KICPmK9umLG/MIuHiK+3pY8HPgFSHGfRXGpjbqBsidTwD3rh003xPtida/17PfbkkOvfvtfe1J//54p5m0r30a1saEAbIA69qWFACd70Uo8T4Emb5bIT3gM8gK3wg5YRsgN/gTyAtcC9mEp+HxNsb9m5BO9jXuL0GW9/OQH7hkr1TdtJrD7QIeaxYY1Yt3r1v/yfznV7zS1do1tba7d+7DU2c9+Do/e9G5aLwmcg+Kc/dm4A7aCaIhe0E84jB9baTvv8z7NGgmHAGr5NANq2rTx7qivZcUaftBmryPak8YAMq8HaDO2kO5B4QFIIw5BayoYyCYdA7kiddAlXCV3msnTAByL4zkDInTbnz9SyBgeGef/tQHA3QQFP417ROYgnEV4GKlkHvCCRCVwCnCyTBhbKpDL4Bk2H5aZ9pZGHaNE20a23WOczgnWsdSm97XX58wBCKdWmnd0z0VYF4dQGM06h91Dsoj90N60HZIDd0GGZE4L7G+zgjcAFkBH0MOxgjxQ5Yf5iS3VZQtkOX5CuR4vHC1LG3uGpG46zKfPQNErEXQWPXcibUff/7U8hdend7ZM6exa/qD8+YtXvK2KvadC+SeFNmnC+7fC9Khh0A26lvKHSBaEYZAJfvgzWbNvi317O+PVGdfA1PqL0D4E/K0PaDJ+RK0RbtAHvcDrgWXqEkSL4IsHnNP/Gn8vhtgSLuJe/nroIr9BeRBZyljQ3HXQYzT5yF4aM+t+uO5m9sKiMV6Ns39A8Io+KYv/1Qw76GQ+5ok94ju20Xre5JziPZEZ0d9Y/tYDbNQ3zmo80za2z6SMg06IQw1D3NqhkCnKkjEGFfiuvLcU0w/+7qJJMedAVbyV3092bJit1MfZIZupkyDfrYDzgW/9yET64Zsj1WQ6Ub88NI3Qn7HlxLRTBCw5oGY9Qx0NL0N77y179TTSz491N25+kLv9LUnFj2+9nAj993T4vt33ST3OqQjUPMx34PG9yzo/c+DPvHklWbVsT1NnF++rcHcaEq9Aqa0H8GY8R3mpMOUg6NJPQ+apKuYj66AKu0qqNOvgDrjZ1BnXqRWXcLwEAyJN2g/ePFo9ME9R3BtfBNCh02z5X+VzXQwbHAnTJiwjzIiEkY+9r2zmyCA6s9l3ksh92WkY8/QmofUOiSvk/gmuhNt7UY0JhaOcR529zQIvaeLWhCue4ED2sHXqR68Ufvg+xdDTgHAI4sZjkA7zoOW8l+hknMWWFn7sX7fBYkRTP/8nLhtlKfgOBfSAj6mfsjyZdaGTI936XwoCH52jVDUfV0imEc5E1L2Cmivfw9WvnoAHn14B3S1b4IZU/ZAc+POa5XydSd0XgeuaYZdAIPnedD5/ADG0B9v1pZfvFRT+OvV6sxfoSrlOhhTLoMp/SKYsr4Fc/l20KO+OoxxY8ZNMOQwDJlKrJ+rS3+FmrLrYC76GSzFTG9v4gNdzGVQeeJeddARKBzwAUwYOgdjXndLb/7776mB4ODPICloK6SOfvFykHM9h+hPOQVicl/yJkhHH6NruJ1VYtee0ZvhVgTT2GYsAGvdgHvJsdHGsKgGr0EWcLmrCdLjT0DHFICN6wB65YzNrPkVapXfQXnBfkhLQv1xb0NYDtQHSdsgjfBN0AepoUz/OjIXSE6y93khfkhzWQOs3HnvyySzQMx5AATli0DOeRNz0VpYuuRrWDB3PzRY94CIv+cXLndVrXFaRW6lcPniuorNX3XJTx1sFPx0qSrz2k0L5g8z0ZhqfwksuaehsvwrMOd9D+YsRnMr1oUN3P7+8H294MU3oZ5zBWrKr0EDfo8F85Aa55VwyFEov2cTxA5daOsL79gbH/cCASshAed90tiVEDG6q8euP43/2GsgHnWQ7nPTcM0lXBhHXgjRneGjMOwMwpLwwnMP9CvRnLBDxg1W4f5bTnkIpK/+J58w/b5J7iEsmQe68TUZzmIN+Q1k3MbTmBSxmzI10ifugFTCFgnfQvkiJCel+a+n/cWIH9I9PyDsnYt81qxjavl8EJU/CtKSV0EpWA1Wy+cwpetrqDFhjclZ3yXXRXiKZcowkXhqamfD3vdnLnhwTWv3zCUmwYYPqrOv3KzGvGPO+BGqcs+Bhb3/RlXJGaix8UdIbBPNaT9+bT+Pg5yTPvzE2iS/Uv1NyTdAH/wDCHGNKR+0HeIHP0F7Effrz/jA1+MR2r812fU9cg/qhbFe3Hvt+msmXgfRqAO0bkjBeofhlbTbNGeYJXbtPWwMC8JzGOVkgMF3aWHEvSrce0poX1VyH+mNVQAvYu7pYjO950kf8lmdGG9qzP+lhyA9/SvKsIiLQ4vp90PKhC/Qf6Qn6Xbaq5P0ryR8k4xA3K+RPmdoGZ7roTDy1cNi9kPXazWvgKrobdCXvA9a9segFO8CieSjbqHZzY0rKfYWSRQBWtkTqV2VF75u71r47ORpc5dbrJ2tRtbXp2ryL0FVyalfqpQrV5jLDp8h2rdw+3Un/c/tfd/JOTmS/v+PTmGOpG86YWLUkXsrEb+AxB1ja9iXkDxkOYy5p/Y2/XUwakg37Zub7PExTBq1YLO/m3E4n82sv8pJv4LU+RDVn9TwYVi/EGYGw0ypoubhwA4h7JQx+DtHol+H4d8ZQdbb4L2UJ7r0OYB3X2U4IjOUTM/3RWit1h9BIT0DJSWHITv7axtHZC/Ex++mc2HShK8gLnIPtaz4nZAUuQuSw9AXIVshLehzyldJ898EGYRF5bENeGkv7DMKX4V65ftgKfsQTCUbMU9vwxy9//Jk9alPZjcdNtXU1gRPa9tWNX/yD0fbOh+a19Te+1BNQ2dvY0fPrIaWBUvrWrsfqheeOVaVx7BFyB6R8GeI3j0OHIJHHFgsxIgPHkL9p+qZnzPE3gBF8BmowL112pCV4Hp/02/ifyAa6fua4oH7sZGLzgU5N48jbEay/yL1jwz3EWUD1tLcP96BV3Mrs4XhthCGCdF/lJMI/O+dQ/spk/fpn3wGYNXLDFOC5J35DQwrobXmChhV54HPPgVFRYRhcpAybdLTGT8kY01E/ECYLmQ+EB8Qvk0S7bf8BaSjHzJDdmLduhMy/HdibbQLcr32XBZnrj7epPwQ2tSfYSxvgcqCL6Gu4CAQ3gdhXLRWrsiZ33N2xeLeq1ebW+f0VNbVttU0Nkxv7Gx/vLH++d3NotOX7PyX2op+5hHxAWEBkBgnvASiN8mlJI9S7TuY10bin8wZcl9XjfsxjttxyBmxBgIx1hn9+9cAwooJ8XgZUjw3wMSRj0H46C5vXgFcIPfe1DgPVGGngXP/Fqx7FlFWEol3won5rQ8M1JwJb2TMGijIYXgii5cAPPO4jedSxhxJv/tazRXQSy9Qlk5x8QkoKGB4Orm5RyhHJjPzG8rVScM9UAru+dMwLxHeFPFFKumZPPFLyIxhjPQUzsR5ljN+P+T5fw2l4w9c1uav/75VsRW6DV9AXdF+qM8/Bo24jrYW34AG8Z7mVtPFfS2681Cl+nx/Y9Nj7zQ2PPtZnXbjmdqK729MlTLMn6lqRn+y3hJNyX6FcHiI/oRRscDByNcoq8HM5Cl7HUTWcylqmOW8HiaMmmtj8ziuwSpwHTQP95dYg454CoLHNqXxMn58lXJ6hMyzmmKXw5Dv9BqN/3G2eO+Pe+bck9SXAx4EVuxZ+gyW0oLXtZDJMSSOWrFWaylj7mNrWZgXKy5AaRFqn38SNSccG4bnY2f6EJ4PwzdiGEeE65ON8yIT50UO+iML50Yu+iQP/VGIRvpaF6OVTvgauFG475+497yh8NOfCOunmX0YGvJOQVP+eWguvAIN5Wc316ku/lIjOQtW0RGolRyh7B9L/kXKvrEzYIgPSL1D9CQslFnVjM2xsUDm2Hggdj4GyU9kDSb1EKmPiN/I79AnX4QcN4zhUU9SHs7tOWgwakdYhgnDnoXx4xqNosK188kaQJ7PIc+oySf9BIIxBynv1A9rnlGUkaWhmofgPjhh1CooizlM38slzx6aG5n71jOamHlItCdMN1UqridZPwMv9zyU5JyBrLRTmGsYllBGxnHM/yeoL/LyvqV8qVvtKJQUHsG982EoxTxVgj4pQZ+U4RwpTTsAZclfQ3nCAeDEHwJB3FHKQlIknzhvLvziu7rCr2/U499rKj4PjUUXwFJ4EuvKs1DNOgu1nG8wxg9cr827fqOdzeRtuw9I3mkVM/qT+Cd53W5Ea1L3ONY+jpwe8jPEV4QZVl16DbJ9v4LUUSuwBjLfFv/MnoDck0kZthzjv2WWnPvkI4KC767YGUXkGRFRGu4HYi6AOOA4cNyPACfoGPDiLgDhdrNtzHIxzr1q1L61jrmvTPhNhOdTlYTax98AUdKPwCLMhLQzkJ18ElITT9g4RowPiC+ycL+Tm3sS58BJ1PwUlJaepMayGaf8BPDReJTxdBx46Bd+wVEQ4rzhZx8DYeYJEGWeBnXWd6DJwr1V9qUr1SUHD1YX7blmxd9Zg/415hyAyrz9YC7efrWm+MQ5cp0dXKY2IHPVzn8i2hPrkN/KgeqQ36q1nQXVaDsndSmZG2R9JnwdsmaX4j433fkdcBtU/xv9yZyIHPw0JA9+HkLGti3RiBYtVpauXYFz4EaF7Xlmko/sz9Lyb+NGcbXMs5/0frWWmXck5psymPe2FJOuAD/+Iu0nXZJ8mnKkMtEIR4pYPO7LkpK+xTz/LfrhJOb+k5hzTqEPTlOuVEXpaaioOAMC3CeLeIxJbabgngEZriFy/HdZ2RlQlqHupefAVPYD7oV+gXr2TejAa23h3Thcyz24t7Jsyx5L6da9taUnjqLu3xPNKdsNdZ7qwJ6ieyvx7zOofk97Ev+UzWdmtCfrA6mJlmON/RCu1ez0Q7hX/wTChk+/hc/G6K8Dv3vmQdr9LxD+8bta4VPPWmtbuNrc02vIc/zkWVmitf05cvuz5Wy1jVkvZ97HImtOUwmjO2FYkXunipgfgR97DirizlAeB8PrOU45VsQIz4uwrFISvrWxzU6iH05hPjqFazj+TMEZ9MF3wKn4DvU/BxLh9yCXnAed/AI1k+IiNYvsIpilP0AVWo3sJ2hQXIFm1Q2aH0juJprMwWMvajQLvzZTzayvJD/b11q7kVxDeG6k3iFm554RjVtsRmqIDv2t+wFidF1oYPYChC9FOEtP4BooKsb57reZ8vEG9t2H09gYWQbM6a2QOug5CBrTtsUgXP5KbW2XYNWqVROkcXvXCzJujXtyJO+bk2cPCcunns0wCOtTcL2Kxror8iqooy6BNPo74JJ7tBOPUxYIYQXloxGGVk7CUchMYozcl7D7gDDF0ghrDdeGNMxThCtWUvAd7o/PA491HmS8i6AW/0D5Ymbtj2DV/wx1xp+h3ngZmkxXoNV4FdpM16DD8ivlYhGuEmFKkXqRsLVIfU5qxAV1DHeMGPGL3exrrN3s625vZb/GdrOvvY5m15/UooTvtHQB5iH0g5z7PaQEf0nfRxo+oMp2D1RHGU3kPsQw3Mcm3ruE6L/fIF7xVq21V/DayjejH1o01U3it2OpIurkVVX8NcrvJPdCyHpak8LEOXn/tir4Mlhwv60L/Q6UYSdBEI65OfowsKIPQnnsN5RTRBhmDHvkMDXCc8u1+YDko4yEb6llJdmZZqex5jmL84Bwzc4BG30gwLpJxrsEWoxxs+YX1B51r0TNrdehq/ZXyiDrbWH2F+T1k9h7ei6z3yO5mNTqv8c7o2zHFpt/Wpg6084/s3PAbuegOdrtfiEcrXltDFuLPDui5v8IibiXzHBfTTnRZA0g+g+26T90QA199ixoTMtxs/TVd+us8/mvvPZqxAMPPuDWtd3pXq7v6xKRx2efyTy+BBXuJ9QuZ8Dodga07qdB63EKlJ4nQepzHIR+R2mvfHbQIagIZThRhJ/GsKLwfNLXlAOTH9/PUcu1+YGsC4SnlolzIDMRtUcf5KaehXz0QVHW91CG9Tur8CLwMLfLeD+BXvIL1Givog+u417Opn0H874Oibkn5vez1kgeJqw1wp1bOpvJDfZ9q33/ROaJna1pr+n/EYOO5B1inTYumz0fkSP5GmGY9VgZ7pmY8xPu5/dChu8GCB8+py/3D77bDEPuMcCwu6vps5eBzs3fmiVvvldb9aDwhRUvhs6dP9/NarUON7YV3c/JmOJSPP4pVqn/K8e4gWuB7/k5CMZuowwWgdsu4Hp+ASyvL21sJobDQthtRSEMt6wEawDCsbPz/Ox+IDnJ7od8rIUIzy0f1wGiP+XaoQ9yU89RH5Ri7VqW9wOwi37Edfdn0Apx3imvoh9uQgvqQjh386cwnFjCunv6IUZ/R94d8QGZE3YfLHbQ3669o+72GJ/qwEK0156OHLxG+W+NcNbIc8qc4p9gIr7u5OBtkOLyN1vc61B7Cwy7Tw+jBpnos8eBzvUnLNJ311UZFpmWPbcsaO7cuW61tbUj9Hr9UJVaM9RYKxhM7pHmx8yVl0Yt28uZ8OZVVtAHUOaxDipwD1fq8Rll5xFeWrEfYYXtoSycEhtLsDiyn6NH5kPBxIOUKWj3AeE/ER4Uw9RDs/mAsM2K076HgvTzUJR5ESpyfgBuAcPXU3CugFZ0DarkN6GB1OVWhrNH/EB4i3QuzGbssd5+9h7JDaQ2eaCjP9Zn2OJ8ajUTv4RX2aHvr/NpXS2/lcfnyOQjzDpihFlHPouiwnpEXHgDY+cHiI09BLFhu+l77p7DG+kcGDqwEkber4fR95vBc0gT+I6r2V8pe29jpfbJuU8984T/zFmzXKn+BuNwrVZLTaXUj6rtFo9pn9kYpmItbCqb+Ld3KyJX/kr94PsRlLmvp34gLIpSnx1Q7PuFzQ/7oDCI8UNRxNd9OYlwHQnDjjANyRpNGG6FlMnFzAPig0JcC+w+IHzDMvRBWeYlKM++BPy8n4FfdBkU5ddAy71OOYzkvWviiy4St6jpNBLLTf1G8jPReppN6y5bHunQ/5YHeDv30K4xYQOS92iJETagnQ9IP3eVxzD0uFk38TqvQF7aBcrGiwzdS58vmDR2Pq69Bhh0txXGDNPT58HcR1jBx6VyU7Vs/SdV2mcfeOTxh7xnzJjhQvQ3Go0jtDo9sZEanW6MyVLlMbmrO2rOvLnJal11LKeoQ1kS/fgXZeErgRPyAX0GtQzr3b75YPeDP86F8TgHQhg/2DmCRegHO0uw0MbRs3MVCVOQ7BsYlt5ZKLXxFUvSLwIrA3MR+oGd+RPlLPJyfgZhwWXKWpSX/kqfkyfPa1cK+7mLtbflCLu2jvrauYt2fe38RUeNidmZhXZeIa0V0xlWITf1BpSnXaZcxrSk7yAm5ihlEyYFfQ4Z454Hz1HV9N6D60j7c6pm8HE1vVKp3Pheje6lh+Y9ONOze8qUcdU11pF6or9eT/LQKJwLzpXVVq/unikTeufMTdUZTeFKjT7V0CBNKc2Y3FEY/NS3rLDXfy31ehdKXNZBufsGKMO1guQkyqfCvbh9bSB+IPw+skaTezekViKstjKcC2VxR6kPmH3Dt5QBbPeBnWlI/FCWepHyFwnjsSL9EuU8sjJwTmRcBUEmw3q0cx7lBYzZ9XPUkhjhWkptfMvbWZD2I+FBEuOm9uvMvY0LSfiJFek/0WsqxOvMiD8NEyYcghB8vQnhu+h7p9HOM2n+HzvYAh7jdPQzAn5uxoeqVJter9K+8XjP9C73jo4O56qqqhE6g2EkiX3Uf7TBaBpbWV3tPXnKlOjZc+Ym643mUKVan6rSGfI1FlWGRG4oLo2ft6Q0bNm5Io83oMzlfcrRJM8FUz947aJMUTIXivy/pjxNMhfKMCf18SRtc4GwFMvjjtL5QNiBpYkMT7FvLqT0MxXtrE3GH5eAi6+f6MBJvQLctKvUCHuTWtr135hdR0ZL5shOuU517df2KrCSr9zKqLTxKctsfErKyrYxKknOJPV0Ml5/DL6u8VgTkjWAvH+X6vwM+I+uh/txP+DjrqOfz/DzNNTUaD5/tlq75rnWtnb3hsam0UaTeTjqPlKj1Y3U6o2jNQbWKHMtz7Wza3LErDmzEwwmS4hCo0sj+ss1+lw8z1MZdKUS4eTF7JS/XS90W0H5leU4F8pcPwWWx2Yo99xO/UDmgp31a+f2kZzkyPV0ZFraWY52rmWRzRcMq5LxQ0nyedvr/8HG7fyJGsdm7Nv4n/0c0P5z+/fRn7exP/v5nwwDtAL/DvO3vqdxQIyZm6cZFi2uXYQrSfabKTino6MZNijRPzlkE2S6vUGegcaYNxPd6Wdj/L30E2r0Wx626j9cVV3d4VldXU3ifjjWPiPUavVImSZ/eFvN6sqO+o+gt2ftqTmzX2CZLOZAzD/phJtJ2KEKta5AodWVq9XTVitLP7pUEbwLipzfglI0wlJluWykc4E8K0+Zed67qR/KyFwg/ERbTqqIPtA3F8pjmblAchLxQ3nc8T4/lCac6mOMUl4o4YfafEE0+qNWcoue39t8eY6ZY3bOKv0bjvr28ztp7ZZE9pWHUfNDWE8foGzS9IS9kDLpK4gO3wsRIbtR/530eQLKCBr7NwhzbQd/dwsEeJpOBnmZnav12+bUGdZ/ajZO8SGcWqVSOVQmUwyVKMqH1pvfFTSa37/ZYH18UVf3vKoF81c+0NAwJVSu1qZg/BegD/JwDhTLtcoKo2nuIWHexu/EKQeBE7QTSkZ9DGXO71CGZ8W4DcBx3QQst21Yt2I+8vmSMiwpzzTgQB/X1c40tc8F4ocSG9uU+IBYcVy/H+y+KE0408e/Labs11v/m1hR3/w51acpWeuJ2dmohTbeax8X1cZGZfaPDEM0x4GPmmXjo9otM+5LyhCdFPEF1X1ixHZIiNhK+YtpAesosyVp3CKIdOmGCI+mmaG+VQOsxh21DZZP9xt1swIVCsVwsUQ6WCJWDtHLl46zaj46btEvbq5prM9tau8omT1vgXjajMdZUqUyAfUvUmkNhQqdtlSpbKo1mRdeKE/86rIk9RDwI/cA1xXje/inUD5mTR9Hlev6OVS4bqU8WcIRpQxN330MT9XGEmWFfUPnAvED5aqiDwhftgLrOeIHMieK0A/EGPbwCRuL+ffNrqejpvm2e1NkT27X1q6vnc+abeOz5sbt6WO0Ztg4tJl9nNbd9D1q8qwAYXsy9jnaJkgO/6yP8Un4nmmBH0Cm9+uQ4fY3SHSfty/c00T3VrXmbZzGys3XDbqF4SKJZDiLlzpIwNcPtmo/W25SrnzNaKnOIsxaa0NT/ozZc9lz5j8gwLjPUOuMJeiDErlWVWoyzXpNWv7eyXLccwvx9XGi9oEA8wp/LMb4CMz/oz8AtvNHfSxb6gP3nb/xgX0u2H1A2LZMXrLPhYO2dfpQ3x6ikPKwD9n21rdan6ZolOubsJ+anX9LjOhL3lcjlon6ZsUSrTGOY7+4hfdLjD6PYWf+OuhLmKJ2jirhxhI+LmFtEn4s/fxLwNs3Mnxfv5Tptfxshsfjfc+fE05urXlLRH3lDrBWLmUPHTr0Lr3sedcq5a63zYr3NuD6m4X1ZoahsiqrurYhb+qMWeyHHlms0BmrytV6YwXGf7lCaxJVVz9yAPdVv5Tg9Qtx7efZOMJ8zC+8cV9AxcidwB39EQic1zMs3XFbcS7YffAF/RwPyUcs368pX5mwhQljmYU5qSzC7oP+vbTd8ifuxz11v+Wg5cX160vyA9GX8HftOYKwlHNsHF76fjJqbH+Pn2Hxbu/j8TpyalNCP0Fd1/exTIm2mcHv4/E9yBr/LmSMfwPSA1+/kRnw6vHMgJe3ZQcv/zTT96WnMn1fnJbl/Zw+3XtJSrJP931EdwJ5mOjH9EPB/O9ZadgNdaaPTlfrNiyyKHedMSpeeUytqyzUGoy5OqOlQG+uLDZXWQvbJ3cVPfjwIq6xsrIC45/0muBaKnvmSNnvHyMxWIqvjx13gDJ8RZg/xKidEHXlu+4EjvMGyjIWkKPz530+IDyzCpsPyFyo8NnPzAX0Qwn6gazP5J5SAdZKheH7KR83H41wjonlxeztt9ivqGWjZcbsoRrfyjveTZ95ZFjA22/jAW+ieZronBT8CdXZzsklWmeErIa88LchN+pNyI14BXKCVxzNCnr+lZzxz07LDn5SmBv+SGR+zOzAgviewOIsazCfW5ky3Onzf8oHqTauG1Wl37LLoPrqqlm5+aRG8ViTSq/MU2rNeUqNNU9jtOSaqzt4tU2d5q5ps6Y+uGjJkurqBY8rNdVKld4krzQ//m556r6bJTjXCUO4YAKZAyeAj3tcUfRhkAQdAqE3+gJ9IEbdRWM208+72X3Ac91OfcC9xQfM2sywlfdRtnIB7qOJ5eOenlhOeL9l4npDjDwbQczOlibG6Hx7TG9yyB23M4k/pDkjc/zbkBW86qecsNfO5oW/cDgvaslruWGPVRXE9MYRRgkx2VSne8W1zvezpEmDy1ilwwiflvTx4guE4yVKg/c/056MGuOz91mUa6bqxKuWyYXztTr1gsq61m6rjvficovpwVfr26dvmDJr7jud02at6p45953eBQ+/W1/38Cd69ZO91ZYlbyoF73xXkvj1TTQojCN6YG0z6Wug/chijoMi4hDI/I+AxOMASFz2gGTMjj6z308l84Nyrck9VVwTSI3K3FfdAwVoRQF70AeM5QTvgVys6zLRskIxfwTjehi6ixp5Rig9bDukhW6jOjPsckbrfv7zBhvDvF/vlIA1kIPxnR/5BuSGv7oPY/v5vODljflhz+TmBDzsrmuJ85CYEn2Uzf5DzXXc4XwhfzjqTHjQ1MrKK6gRPi6LzRknVqhj5BrjvXyJ6o+4wKmp5ZEGa82SA/V1Tx2ob1z4Uef0Oa8r8tZcrmub/GzL5Bnbe3rnvNndO/ftrhlz3mmbOuOV+qY5WwzyFafrKl8+V5ayC/Vn+NlZGIMF0biehXxF6xfphJMgizwO6tCjIPc8BnLXb0A+dg9Ix3xJTeS8m/KUyRrBd/mScrVJfUr2auTeBdk3l/h/AQVohFFLGNvZWNtSC95BGdvpwdtstoXuc5JDPrXZBhtL/ZM+BnU6YVDbuNupgQyfNnv8SsiPXvFNSU7veXZZUwu7rDrLw+nYALs26mr23RKZIobD5XnjvmiMRqMZzeXzGR41i+FREyY3ZTNzOM5sDtddIteEaQw1A/+R5o4DtZ3Y0NG9taG1d+vk3hnv1BiWfakRPba3afLkF5q7Z36Fcf/h1NkPfDxl1vyN+L0b26fOXG/Uzn+5PG3H6iJc65hncnANxRzEwbqahXsmLq6/wqhDoIg6DZqwY6D2PwZKN4ZhK3f+CmRj9oDMpj+fcL0p19TG9bb5oJSyvXdCvt8O9MFOyAnYAbnjt+F6hxa0FY15NpSxjTat+/VOsdUh/QzsdykDG2uRm7guns8a/9TK4oyOTLHJcxxXbNzNFRpfEoglRUKJ1MuujUShul8kkyXx+AJXq9XqgjX6aNT4Fh44McKHxq+78IVif4nK4PeP9P690dbdW9PaM/P7linTvtQIn9pmrGx4ora1a31z1/RtaPs7p8/a3zVzzhbMQ8tbpvRU8hM/ep/cSy6YxDwTVY77Qy7uR7m4N+JPOoH6H6AccWXUOdCEfgsa/xOgdvsW5C6HQen8DSic9/XxxKkPXPr1J/critFKvLdCoc8Wanl+jGUHbIaMwE30eVzCV7fXJP3xve42zddQ3jrh0RIOc5b382uygh5tLsszJkib3cdyBKU+LA7Ljycyvcbhm84JJUoJ+iBbKJEPI7pIFerxfJE4ymg2u9bV1bkKxWKSY0aSXIM2ksQ9Mcqp5vLdBCJZmEhW5fKHde9h2p1WW2cPQG3TJ0+fvRpzzp6G9u736tu6XkKb0TZlhnnyjNkTMe5dWrqn0esq9/60tiR6L+Z8XDMx5itw787BfaZdf/6EI7SO1EScBW3Iacpy13qeBKXLccryVY49AIox+0GM+cg+Bxz1L/HaRt9TIHzzAt9PIQct128j5gysTQJu5bs7GtnnEM3tuhPueZbPq5DmvQywBpRneiwdRK5fIBLFol7jCf+dzWX7ofZPVfDMwBPpejH+y0iP8O6pvQOkSnW+UCT1be/o9DYajeO4PP4ozDOj7Dx025o7Gr/uzuHxPARSXcq/Gvtk/BFWvOPguL3vVYx5uAzrzQrcz3OSvgNewhkQTDpJOeqiqCMgwNpHEXYCdKFnQBd4EnTeJylLW0NY0s6H0Af7+5jyjvqXeTDa2/nqOVT/DZSzzTDmP6Ks9dSAD6kRzYn1x/tqW54hDGbCZn36Zprnwhpy3Qnjme2PRKoYzBVIk4gPSA/qCo6axeaboYJnPCpRqiTog1KVxlAkFEsnVlutvp2dnT5SmdwZv3cM6j+a6m/zAeGzY83jzROJgoWyysB/R/9/ZfBiPqfHUr/P9pF6vALrTaI9YdhzY/GcfE4Q6x7yOX11+Bn6nIQu4BQYfE6Bxu1UH89ainlIgj74jf42tjvlm/t8hvG/gervqP0tnHs7893PIeZ9X6PM+3SvpyDZ84FNjtfP4vDokSvQB6EPEjg8bhCbJ45kC4xPsPimszyRYZHOVGVVaU385tbWuBkzeidh7vdAjTHHcJ3RxqLuY1gsDvGFM+Z8L75Q6CuSadL+09o7jlKfD/lkDpD7M2yMe17sSRBMOAbCSOaZCDnWPcbwC6ANOgf6wNNUf/IsBeGp2+cAYar/Pf3zbfrn+XyC+ttjf+1vdLfHfKYDcz7d+0XKXce4h3ivbpoTYv372eFlLKkTl2+9iy/RZ3IEoigyBzh8USSHL0tn8SQp5tqWyc889+Inbe0duV3dPRm4D83GuscN54sLWWdJvifac3gCN7FcPl4oVsYIpWZ36l/Bf573nD3+Cacyj3UjSnw/3Ufu43NijlLjovbckEMgDjsCusgfwBB2HoyoP4l/o1+//tqxzDrA6L8PBLg36M//W2nsM/G/kTLuGf1xbfV7n/azcIx5wv2+RXuvFZDp9Sxq/wQkeM48SK43yq/qd1+HWFw3kC8xVvKEsknog0CeQBCIa6xfW2fvzKraLujo7j1uqanXSOSqBLlKWySSyIKwFvXAOUD9IBBJfMRSeQjut6JUhtqB9l5i/61R7rnWWur/xa+l4w/QezYk7ygiz4Ax+meM/R9AF3wBDEG4BvvjGux9mj5DRPRXOOgvcNC/zEH/PO9PUXtG/3TUnmhO9E/3f4+xPuY8o326z0om35O84/k0jf0Er2l/t3Eum6+iR7mmbqBIbrFIVdUdYrm5GPNPEYtrWi+QVAH+G1hqW0+ZqpsXy1SaTJlckYQ+CEYfkL757gKhyA/XiFCFpuZfrjn/rMHy2viVNOIYyCacA23Mj2CI+gX0ET+BIfQHMAWdB33Ad2Cy6W+Pf7v+QqK/y17g2Op/kntu1f8TXHeZz6H26f4b7d9C7d9wyDvLaOwnec47Guc1bew/unauiGE9SJXVd6sN1hKVoX6TVF33o1hpBWISZQ1oTI1QVd9+QaGt3ydTGVS4J0jmCUSBDD+C6y9XV6b+V4S+bUz0ZXp7J7g1jeN4br1KnkXURV4AXcQlMIZdAjPGvnH892AOOAMG39Og8zgDqrE2/ccd649/1J/tupveByK1T5Hn51T7PG+m5klH7dN8f097Rv90n1V9sZ/ptZzm/RTPRyHRc84sn7C4f3oPjIyaVoaIKxRXxpmsrZ9oTQ3HUe9fZepaUGjqQW1oAPxvEMqtIFfWdQgkuniRWBaOcV+m0vfc959T+R+PVH/aysAp3/W5VL7bjgsqUucHfwfmkHNUe3vsE/01bnb9T4B8LLMXFrrsp7mHxH8ZrT232PTfYNOfxP4HVH/qg9u0d4z9DK+XIQ3zPsk9qR4PXZjkNeUP3QOzD4GEyd0VHP0IzOUNGgPGO+ovVVlBJK8BgbQKsL68IpZOPsfjdL6g0jWaNYYe2k9Povz99eW/MdL8F9BjtvvThXyXz26Qfa4G11qiu117rfvZ3+SePv1dmdqHxD6j/yaqfZb3epp3HLV31D/V9y1b3n8DtX+V5p4MzD2kn1yi56x/q0EuT6ylx7wS7QCxtHK0UGbGpbnyA6GsGqhJK/GIeUlRP09vmkx7eSq0Df/wd/43RqIfxSU4RXk1RVWMXXOe1PXasd9SzUnc92v/bX/s23MPvfewg2pfiNrnem1k9Kefe//w97X3edcW+6sgldY8L9Pck+axFJI8Fswn1xLj96/33XccbL6s79xibfU3VrfkGKta+va3fKneSSg1/o/+xp85JvpSXIJToKfBJc/5xaWsMR+DfMxeUDsfAbnzMRr3jPb966499sl786Uem5nYR/1J7BPtSd+B39OexH6691u22F+JOeclSPf4G+b9RS+FuzffhT74Q3n/nw02l+Nkqf/tvQHTH+gl/1eMcB+GWRDsabo73n1mfObYv+3njlwP4lG7qC/Ezvvonpe8N0z3XI6xj/oXenyG8f9JX8+HVJ811G7R3q4/xn4aeU8bY59on+y58KmJbtV3uHo4Anz7y+4o93ZtzphnD5WOWvMLezR5H3ILcMZuo+9/lbtsgRK3zdSI9jmeGyDHC2t9LxL7a/r0Z8wx77xB806m54sk55zFeKd9ueP8pztF+v3n95//143igXfHuHWXpY1dvChr7LO7sseugPyxb0PhuPegwGUdFLqtg1z3DyCL9NfwXI255Z0+y/B5mxrJN2neb9K1lvQ1Rd0/w7W2Gddaer831q/tn13F/5fD37efnzM0MXoA+mFknOusgInu0yvjXRa8leK66KcU16ch0/U5SHd7ATLcX6R9x9I8cE1Fo//N6I321JZUjyfbUPfAVLeHh9l/71tl8Je8tv8bx93+w3/7xcR7BkZ418bEuvfwEjx6TQkesy0Jrg+YE9zmGuLdekuiPBt/s7+P8fv32E53xr8/gn0Vf/Ul3Bl3xp1xZ9wZd8adcWfcGXfG/7UD/qvjSHf/+Yh1fadX7+v/8mWHS/rIqf/7e/Cf+n70Yvd1+3mG04irttNfO53upuc37oIbV5ycrpDzn3rg5hEnp1bym46+1X39I9zqZeD5lCsZ0IPnPvhzd/2ScRm1cPWhP+rzC54PHUHOpy29Qc7xkq7c5bTjNJ7fg5J9NMDJir/SyfaQaMZFh3P89U62G68+t5879dDzEY7n9JDZ03+eRf//Pvr/fsw58yvoT9nuLfo6nDv9nfO7evr//N0Ol3Lfhv7zuxy+7jTL4dzhZ5lvdPoDI9PhXOVwXutw3ulw7ghC6P6o/5wKaj+/4XAOjuc9fdeWAR/1ny91OI/uP1/b/z13f9D/syPW9J93T+v7/d2w4LjtFGNiif0HaUj2fbnvfAeJ7Hvo6X3k/CLzVqrPZzQomcs95XB+nEYr84d+dTi/QaOdXg/c8Ok/vzgC7H8NrtCpcoS8JIxbcn6RubIp5PyK7SrpJCCXY58l5KX2zyqnDPv5RccM4OQwDf9bg4Bp6au8i/zP1+nOuDPujDvjzrgz7ow74864M+6MO+POuDPujDvjzrgz7ow74864M/5TQ6GpcdGZGiP1po5MvWFKhkKrC2JzuB58odiDLxKPFCilf/Ul3hl/4tAbJvt1TJlf19wx/fHGjp7Xals6l9Q2tT5Y1dA8q6q+caalpn5udX3T/Oq6plZjpZUtksjSxXJlIvksrkylGUp+B4vN/atfxp3xL46GjmnVTe3TVrX1zHitqr55ZmVdUw/aZPR1e1VdYwtas7mmrslYZW1GazFWW9ss1vqZDe3dlRKFKloiV8ViPkjAOEjEc0/yOwvyC//ql3Vn/IMh09UMxjneVtfavQH9PFdralgqVdR9LJLVfiJR1L6iNVc36SxVVXpLdfVtZjVU1tSjtde3dQrFMmWIRKYgFsYTiNAE0UKJNAXjgvZAEEjurBH/W4appp0+v2C2tk3Rm1tOK7S1z+pMNZWGqiqJQFL7OptnBY6gHoSSJpAr26Cypvvjlo7ulrbObktLZ7e1sa0Ts0CLFfNBnamylod1gK9IqvDHeU8sgMsX+pPPo+MxmMcXTBBL5QlyrX7QX/267wxmmGqbI1WG1u+UuobnLNZaBc5llbGykiOW1T0hlrWA3tAN1rpeaGyaBbX1s6CqZjqYLVPAZOn6qq6p+4HpM2eap8zobZzWO7u7ta09otpaF6JSa/xw7nuT3jMcLo8xnsCHyxMG4DFEIJIkYi6gn/+u4P53e3LcGf1DqW/plMhbQaJo3FZZV803VldWyNXWdqW6bafJ0oO+ngp6/WQQipqgvKIWiktroKDECkWlVnpeXFYLFRzr91KV9ZmaxobKKmt9bkNjUwTGQai1riFQrlB64rx3Y3O5pDeOOx49MRZ8MAbG49cnCKXqBHIdpRzNXy3F/zdDpmY+AyxTNa8RSiYDT9QMbH4DsLhMjtfqO0CNxhc2QEmF9ZfCEsubJeXmGRUco0gkVU3SGcX++kqhn8EsDlBo1DF8sVnA5Vd388Q1SyzW1rk6c1WuUqMPwzwQ3NjUHGiyWLxxvrvaevK4kh6QeE5ygh+bywmu4Brr+NKGIYVldz679J8eEqWVHsXylu1cYRP6vQ4q2NXA5lhBJGkEqbzpMl9gfaWCbW7gCVWxWiPXQyqXh+F+bgIxUtOLZeo4PMZLFep4hVqTIFdrk+UqbRrhrsg06gSFRpes1OqT8GsTlGptMMaBv7WuzkcmV7iiz8exOBw0HokDD8wJvlxhiVdxmWElW2Qeklt0py78Tw/0/UaukMz3WihjVQOHX3NaIq+dI1eZ00XyknFiqXQ81mihaOFimTxSolBE6ZryAspFnMTc+HpLTuS0uXnhsxdnRcx8JjW84+X46Mq/xcTKH87Kkmr1FkGMTCFPw9jIJhwMlc6QgOt8mE5v8qutr/dRa7VuXB5/LIvNIeaCceCOceDN4ZX7ZOfrvmOxLcNzCiR/tUT/zw70/WNcQQOUcaqhnF11UCw1S3QGmTvWauOxVicWLJLKQkQSRbi+M9W7IEPPKg596vmygDXn+b7bab9u0jNaHnYE5IFM32ih6+dQ5LISEj1nQkiQCaIiNOtTUxWtWoskDveBOQq1LhVjIEqm0vjV1tX76gwGEgPOpD8e6ceM5oZx4J2aqurMylWfJY/a/r+aB0zVlr/k73KElcT3RRxBI53zbL6501Kp9MQ8Hkj2aUKxNAAtCM9DpJ1BXkVR89orAteelUYdAmX8L5Q1JSsBUFUAKLnMUVEEIE8DUE/6CeQhJ0A45kvIH/YqhLm3QvB4I0SEaC4lJUln6Yxycv8nU6bUxOAa4GetrfMxmEzuXD7fuYLNJkZygZtEWeIcH6+C7FwV7QkoFFv/Eq3+U0OltwwWytRRSl3dA6aa1k8sda3b9JaGM9amjgNVDc0vW5va2q3NbfRz9JbaP69/jEBa46Q3Tx7Ixxq+glN9TSQ1FCpV6vFSudIf6zJfND+0QHmPr0tZ1MImftD2y8TnlE9GOL9chj1O2ctCxsg52/ZvWiHDJ1PGXgKJ6xEoG7oagkZ3wnh/EwQHGiAsVH45I1PcKlNKk6VKdSz+XRoDeqPRjfSLrWCxx5AYmPNg2KCJk7S/xCVIMQY0a/40Af6CYaphel5ZG1vubezoUdS3dS2prGt6pqapfT36+jW13jhNpa/dUdnQ9rRcbZ6Jfn+lurHtDYyBL2qaWpc2tE8Z82dej0DavJLFrbomURqysF4Lwnnoi3tzsj/3EckE3qVp1jR+0Nbj8qSrdH5Tn4v7uebE7MztW0zCfB9hQmvKAKSxmCtcjkLJkLfBZ0QDBPpaIDBAD6HjlTBhgmitXCNJxNogFvOBH+4PfXR6g6s9Bsh1TozVwaRYDcTFSSGnUNv5z17X/7ah1Bro0VDTXIS+noG+fhLrYbOltnmNsbplj1yj78b1sEVrqpwjUVbt4ouV0sr6tu34b5+1TZnxWce03m3dM+fuxng5iDGR8T+9Hp642kmkaJ/E5ltBKDWycA0er1CqfPhCoSePL/IsaXcaxQ5Z/Zg07jvqdzrP5QzHnq9njoStTuNA2j//7Ub8T4yck5ghjFN19A8gGrMXMge9CB4ja8Hfx4y5wIBxoISwMNEhuUpC7gOTtcC3tr7RR63RuuB+cHRGhth3YowWYtEmTJBAQpIUCksNibkFuj+lV9d/Y+hN1R5ac3WV3mw2GivNfI3eoDJYarsNVbXPm6obl0uUmmpcB6tkKstsqdr6hkxlqpGq9O1qQ8MBQ2VNL87/5zEGNmEM7MTjt209M8r/p9fEEzfu5QrNC2QKRaBGZyDz3o3LlbqX5HHHC8P2HSZ5nviO+FeI/pZX4Tw2MudcKfNvZH5r8dwoZRjCWi7haGPOFzIxY48H8n1KrBXUQSeBM3gTRN+/AJxHYg7wM6PpIdBfAeGhwkMqvTQB80AkrkM+9Q2N3ng+Due+cUI0+j5Kg/5XQHS0EJJTFcAVWQfnFP7vrQflGqaflt5cXagxWUxsnnFFUZn2ZG6BCorLdJst1vpHLHXNK3AvJZVrzA3of71cbayRaxpWoC8miWV6HeaFJpXG0ID7pda2Kb2bm7umb2jtmb6hc9qcQ5baznhDVc2/fF1k7gskrdlsQfVZtVaK663ZTyqTu3H4IhdO9OOp0rgzdH0n+ZuPc1xbj1aD/sd5z7fldD0eCTdWlwcgmXQDxBN+AknERZCHfw/iiPP0v6UpTJ1Ac4CQiQ917BUQj/4Gcge+DB5DG8F9nAUCsB7w9dJiLpBBRJhgs8ogm0T3hgYDjYH0dFNvLPo/Gv0fhRYZKYSoaNGv6VmqvX+2z/6sIVWo6NFUVaPkCE0vFZcaoKCoEnJzLZCVpYecPDVojJYZ6P+lWF+XYZyL0NdqtcFcKZSos/Br8ZiTJUqtvgHDolJtsL6tMVQ9R9aNyTNmr23s7NnYgeuCk1Phv5UDuaK6zQKxRYhrTqDBZPHiSgtGc8LfFUpSLtN5zVUyPq/rQL/hvBfLmTlNuM2ERyuNv8H4O/gUyHxOgWDcSazzjwJ/1GEQDT8E4hGHQTjqW1CEX+iLAXsOUAaeBPZ9H0PofbNh1DAzeHkw64CvpwYCfCUQEyN8RKpUJmI9GGyutvoqFe2zUlKMEBOthuhIYmKIiBDh94khK08768/0258xFj/F9EKVa6pnsLiWa4VFtVBa2ArFOW2Qm9YEmak1kJqmAp7QsKqyruVpgVhSgnVPOfoaHa7KJu+D4XpQTua8Sl81X6G1PIi1goXEAsZHD+b+Dxs6uj/GGNiMxxn/6vVJVR0uPFHNN1KFyKemtt5PYc0YzQ/ZpCMcYZKzxTjPW6YATJ6GvsZ8Lxfbcjv6XTHpGshCvwdpAONz4m/esL0guH8PcAftBPa924E3cAvw7tlKTTBwB4iDzvXtCQjvXIlxIxjyJSTfswSch1SDu7OF7gn8PHWYB5Tg6y3COSJUYz5MFMuUgVbrNJ1U1gaJ8Tj3IxQQGS6EcDQSA6QezC8yRecW/u+qBTjCqtdKy+qhvLgTWPnToSx7JhSmTIOc+C5In9gEKXFGyM7T3MCa/ikuX5iH/i+Tq7QV+HoTcN5ztMbKmQqNySqQSLJxD5bBF4lTcU1IEsuVxTWNzXNUusafsSZ4F2vDw2Zr27jcYsMfvjaeqPZhoaxSimt+QEO7xUMY8YWarPVkjqpxrs9+CGDabDxXM3mesL2lcRgHYedBgn6XjD1F5zd3yF7qc+Jvzt2boeKujVDutB5KndYCy8G4Tu+BoIiJLbInlE+8DhKMm5wBr4HHoHZwHVoLAR5WCBtfCT7uGvDxlIGfj/CyVCVJw/wUyxdxXKqqpgOXUwuxE0RkjcB6UUBjIDyC1gI/k9c11lXwz176f2Ww2M2PcMq6gV04Bzj5DwI362EoT30QCifNg+zIXsgI74a02GZISTCS/tRb0a8FuP8tJ/fOVXqTSq4xGvhicY5IIskSSiXpGAPE99T/aIlsLn9iGVu/kSeuuVTT0PxOe8/MfykH4s/tliuF3i2tbf6KSZ/zJEXMWl/dAvD4MwC9c9BPSmbOk/VdHv0zyINOg9DlDIhHHgER8ft9u6jf2QM29fm8xOkDqEBflzuthjKnd2411xPU/2QNUWFdIB17BgoGrIbggbPBa0gLjB/bBJGB9eDvWgN+7pgHPCUQMl64Tm1QJSpUihCRqPmDuvpeKCo00PkfGsqnR5IDiCWnaD7z9dCqfNyMfT3VfX0r/nzn/iNdSx+4i8vuzuGW9AK/4GEQ5j0JguyngZO6BErjHofCyEcgP3QBZIXMgsyIHsiMa4HsbMNWuVJZiDV4Il+sNqSnSacmxssejZ2geD4qUr46OkK1KjJC81JCnLKLIxAliBWCaK5QHCeVC8PzipQreeKqK82Te/b80WuUKBrj2XzzNJncEFhVvrqQ8OaJ75u6AZa/jL7vxTVeydR2iiTAXI/rt98pEI3GXD9sP/AH7Qb+wG3AGfD5b/xO/Fzq9Bb+95sO9jq10gGf0hpQbIspJeYR9j0bIPauxRAwqBOCRkyGSO92CPNuhMBxGAOuBvDxkEJikqhWrtKnFhabKjXqyVBTMx3S0rQQHibD/YIac4AS60ExTJwoh4hQI/hjHYl2LsBH+zRafZCfNjU6QnaPowZBQX9+XLCk9ZTtyCtceFOUvwREuctAlLMceOnPAzv+eSiLWAZFIU9DftBjkB+yEHIj5kDuxClQmFvzsUwjTyjJapqR4N8Jif5dMNGnG6J9WiDUvwaCAoxYE2Ft5GWkNVJkhO5zoZgfzRWIJ4plooj8QsWzYkXlsfapM0tU+n/e05strH5MqZH516imhorSrvf5ftVqgIcw71dhzjeUoX/iboIC122xG875EUdBeP9+4Ny7E7h3b6F5vszpI8zt/X7/e74vdnqNWqHTMuCj74npSoCyffj3bIck/HrwfdMgZNB0iHabAnGBXRA6pg1CnOthvKsJAt1VP+vM8iSDRROaX1ADdfWz6XMHCZN0jP/DSE2AFiWHlEQDeHvowcdND74ejJGagliAN5qPdjP+22LMEd3jfTVGjI1CtCB/b9X/+Jmju0ekOYuKHn9Tmvvcr/K8l0GSsxLE6a+DIGElVES9BqyQl6Bo/HIoHL8UCkMXQ0HUg1AQPwvKimt7y1N7WrM8HwVime6PQqrbwxDvNRsi3CZjbWQCj7FG8BxnwtdlBD8346/BgYZTWLtF8QSiiSIxP7KgWLuTPGz1R66TJ6paoexycpGn/HCC5GNrG8D7awGWLgWowX29mdynib0G6sCzIHU+C8Jhh2ltR+o4ssaXO22gazrxOzHic7v9Pd8XOb2Cx6eBW2LzP9YTaswront3Q47TixA+cC5E4F4gZvQsSAmeAZFjuiFsdDuNgSDcH0YFaF/QGLSJxSW120tK66Cr60EQi1vofjA8VIv1gA6iIwxYG+ggJb4K3MfqwZvkD7f+o2NM2OPi9hjx96LnN/F4Ho/H0L5C+9LB9qIdxX//Dvcq18j3h3k0QLgb1qaB00GR/woo898EVd67IM9aA6Lk94AX8y5wwnB+BGEeDEQdgpdDacTTUBr7KJSlzb4uKm5nF3pgXLi9AAWuz0O2y3LIdlsKiSMXQejwqeA+2gJuY8wYA2Z8LbhPdjeQa/41NNBwVqrihGDtOBFrgjSRvGr3P/O9TNM4gic2TZMl7H2U1OKk1nv/I4A3VgE0mjHvk/o+6hqoAs6AZMwZ4A/9BnjoIzJPyTp/+5z/fd+//hvfEyPzn5V+g/qf1JPqkEsgGbgf8vF7J96zEGIHPQhxox+A1MB5MGH0TIgc1Q3hozsxDpr/D3HvAR9VtX2Px/dUenqFkEo6PSG9914mmSSTSU9m0jtJSCghtNCbXRDb08dTERsqCqJIkd577x1EQASR9d/73JkwIPptn8/vn89nfe5MJmXmrrXX3vuce86Fs2k1EhLyUjIya5ujY2uRmzsOnZNeQGJCHYZ5EAfk+0Pcy0kDlYL/wa6VsLZQUcyopaMOWA9PPv+fYIBFCewtajDKZBaCjV8VCNR/HbmRxH3k1yiOXoXC6O+QE7wWilE/QDb4e6S5fItkp6+Q7PwZkt0+RNKwd5HmswgZSeOnJ/b/7H6CJZ0/QrzF54i3/ARhBv9GQN/F8Og7BVaG1bAkDXCPZEPx70C9sqtVE+ysKuDuolqjLEhzIR/wTs8u++zvuE9XqPVk8trmvLBFsTnk73Lq5z9cDvy4DuhslTgpGH4fBQ4XBfeKPkcE9xn/3IYM8vu0p3D/tDyv5V7Lu8T9B4L/5MDfBP9cW5S434byuWP0+pfwfvZ1ePZ4BX7mLyLIdgG8TWdjhEEXhhlOJEzAUKNWirG6HfVtCvfE5PprEZE1aGycifHj5iMsuBZD3Sow1L0SwwdXw3NIHcL8R2Mg80z89zeVjk/Dk6/x87+CteboZ/QCooifKMP3EW24lPABxffHKIxYjdLE71AavxZFMT8iL/AnZA3bBLn7OqQ507lzXkU54EukDPkUslH/RmbUC0fTbL7Znmq15mGq5VqkWH5P+A7xJisR1fdjBPZ5Gx69p8O8Xx3MDSX+BxL/Qyzb4WUxA8MtOqlOroS3d35RWkaWZ0JKQXt9yxjrv9NAZrGyMSfg7kH2/a45wKatlPOpz6uh54UjfifuL0NhfFFwn/38binX6214zO//zP3j8f54zH+AGPJ3RqzeW93xX6nl/9nj9PPfwPe5N+HXczECzRcjeADrYCG8DebBS38mhhtMxnDSwVCDsVQvl1XJFXXj4igH8JjKlKmvo6FhFvy9azHMrQYjPOowakgDvIc2IoRqa3PDUvLOsv8WLI3VT/2+OX2fX3OifBSt/7FArP5niNOneDX4CgkGK1GS8K3gXZ2wEaVxm5AftgU53tuROXg70t22INVlPdJdfkCqx7dIH/E55IFLkT5s2bIUK+qZ+/8kgR4nW21EnMEaRNLfD+z1nhgfs+zbCAvyAK4D7M1qENT3AwT1fwsBFq9gmNlkONmpb1bUJtrJFVkBialq+d/xL498ZwfX30XlwFffAP+inD+afCDfk743iGo948vI6XNM4v6ZbdS3r/tb3p/kPJH4TtThnBGt974Gi5Eag27/Vzv/CuU/T4j+IaDHe/Dt9S556TsIHfAGgkxfh6/+y/AxWIBR+rPhaTAVI0kDw03GXhk9PdkpJa3hQlRMHf2tNixc+D5UJVPhPawBI92a4OXRDO8hrfAb1oqhLnWCOwujsv82zA3LH3tuYlCCoaTDBP1vEK//NRINviWsRrLBD0gxXIucET8Q9+ugSt5M8b+dYn8X8oJ3QuG1G5nu5J+uO4UG0t03QD78O8h9VkAR/q+1SZr75fI9owUG7kCS2TYkGK5DZL8VCOz5PkY99woseo+GmUGVyP+OlBdDbb7n+0CL+xwEmC2Cq/kYDB9W1JqpkI9ITC3N+Fv+wy+Ken/Bi8DyZcC4KuIhiPh3vk6efwXKvseR9dyex7jnXj5Vh/cUnXhP0uFcN9a1nEfp/Yt0ISFa70XR+ymzJP5LB/2KQuKf60n/Xv9BQJ//IIS8NMTyHYSYvYkgg0WkgVcfaUB/OkZSPRTg2jglW1lblZDcgIjIOlRXz8RLL/0HWRkTBfdebq3w9WiD35A2hPtOIN9mPlXE4yNOzeixqX559/GvYNi3FP793kZavx+QRHGZYvAjUg03IM1oI9KNNyPNfDNUKeuJ+630mfZAFb8fJZH7qW/eB4Un1cxuByB3pfrJfafIBZlUD+REfXgpdeCeizKb/Ui2OdiNVOu9SLHYgXjjn8j/v0FYjw8R+Ow7MOvVBNN+VaL+76NXD7cB/0Ggy7fivqUhVh/Cx3QhnGzKr1fVJ9qnytVxT+M9ddQ2vVzPMwpt7L/7LjCnE6iK0/iw6VXk9j0p6nHmXkZ9vW7MJxPXWiSJWv5RzOvyHa85Mt9RlO9ju/GW+D7PHfFYciXVH2X2d1D0z1NiXCCwzzIE91uGUFPSNdW/oab/QpDR2/A3eAN+Bq/BR/9F0sJc+PSbBS/DKfeqJiS7paQ37I5LIA1ENaGr6w0smL8UidETiP82BA4bjzC/DoR7T6Q80AHjvmriU93Nq0m/sj8dGcZ9KgT4sUGfEpGHk/r9SLyvR6oBcW5EcWy0DXKTHUgz3omioE0i5itS90OdfBSlsUdRGHIEVF8jewRxP3Q/5EP2QT5sF7K8NiM3+DtkDP7pYJrdUQjYnniEAUfI//chwWQLYvRXIarHVwh+5l2Y9hxN/NfAluqZnnq1QgO+bj/Cl2oKvq9SsNl7GGxC/bB3ropqwLC/iv2soHu7Oe93TAJenAfUK7jPe4BCa8r5+qeheH4/8b5VcC/F/GfdfMc9EefaWI/Vec78MtfMeyTxzYihno8RQYjuuU6aA6R8U0+6K7e+LfhP6LERIZRTQw0+RbjZCoSZf4QgE/ICo38LDQQYLtF4wcvw058vdBAyaNwrivyqrKSUBsTENiE5sR2LF32KmdPfQVz4RIT7dSIueAoSQmYgKXI6vN3HwbxPFcz6VsJUw/HfoW9PNWL6rEByP6p7DTZBZrgVGYY7kUa8y0zIH03J1yley2TbUZZ2ABVpJ1CeeBqqiFPI8z+O3FFHKP4PQDFyP7JH7oXSZ4eoB3ODVx1KdziJNCfpXrsyF+nIz5OtjyOx/2GRA2LJb1J7bkAonVer58eIGtDWqgo99EoFHMj3vT02Cg0E9/8MviavwNm+7EdlQcnwp3Gf73jAPitFmsObOlXy/WLy/eJB5PsG55DV87Dgnmt85p49PklTv8XrxPWTRy24h48SXrBE5HnmO5yOUXqvC4QRYmyOC/7rVUBdBHmA1U3kPXsacX02IpjqqECjFYiw+pb4/wShZsuEBgKNl5IG3qXX3iQvWAQ/w5fhz15gtAAF5dne5AEbUmUtSIhvR0nRDHy8bA06O95EbNhkxAXNgCKDevLciR/nyOfcdTFtx4DejaSDOvLUGpj0roRxT0KvChjqoE8PFaJ7f4H0vj8hTX8r0g12QG60S/CeYUrxbHYImWZHUBy1A+XJB1EpO0nxfxbq2PMoCj6NXN+TUHpSDTXqEJSjSAN+u5AbsgV5sd/8HGGz/Ve+zy9DNvgksgiZBLnHCWQ4nkLygBNINd9LNeY6ZPTcLXrjAc9OQP8+zcR/A3GfL/jXJwxx24ZR7j8hgO+tbLoU/PnkyoIRT+Nf6XdzHse+inr8zrFANWmheMg9FFDOz+51hDx/h+A+jfw+WYzVSbxz/pZi+l/dsa31dG2MM/j74fQzzDtzHar3qkCkOL5MmI/UkHvIUgKtxH9tAFBhdguKnicRa/gTQky+pZy/EuED1iCU+uBg889IA8sRYvoRQkkDT3qBv/HLCHeY+VluUXlCasZopKa1Izl+Asa1LcLnn21AS+PryEyavLS4dMILFXV1VdWNYyZlpy78xbZHG6x7tMKKPNWiJ9XVPeth0qMOxs/XwqhHFfo9X46oXp8jvQ/1bf2o7zXYRdxTvJvsQ4bJIXHvwQzLo8gedBQV6QdQlXES1YojqEg5ckQdcQEFgWcp1k8h1+s4cnyOIMfvAHKDdqE4di0S3bbdCLfZTDnhPOWDM9JRgyyPc8h2PQ2Z7Rkk0f9I6LdJxGQ8xaL9P6ZgYJ822Fg2avgvRi/i39byLQwmDfi4biAP+ALexgtha11aZ2Ob+if+s8Mf3uTYq6K4byyhnO9H/Z7DVeT1OyXqPfZ8zvVSXpd4Z04jNN6tBcc2I/wpCKHXQ0Tcv0jHx8Ea4NzP84lt9P/rR1DdaXQLWVRzRJhuRrDFGoT1/x4R1t9T/b8SwZZU21h8Tjp45AWsg2Ajqg3IC4JMFyPU6jXIk5rS07Ma1qTJ2pCSPAEpiVOwcP5H+PzTTZgwsaOkpLyqsK65rWHeK298M27S9LdSwxb9Zqc3DvbPjcXAZ9vR/7lWQjMsnmuCybP1IufKehNHfbcT93uQaUS8Gx9GpilxbnYCcotTkJufhSrhACrTT6BWeQKqgvemqmOuoCT0PPL9iX/v08jzJv59D5Pn70dhBPWCwevux9mfQIz9AaRR7GcPv0z14BWB7OHUdw25RL3CeWQ6nUGGBeUC/Z0o6HVCjLe46s2FQ6/xsDbRxn+h8IC+/6iEu/tBjCINsAcEGr8PF8vG7U9yn+Nw2FZB/TbHXn3Fo9jPNb8sNJapt1lwzzleivO3uv2bEap5HqbxcSmetXEtPQ7SPA8kBOstpOfzH0O45VYx9ze6ARhLOajGibzI8BfIjY/Ta8T/gB8ROvBHcc9mcf/C/quov/laeEGIxguCTanvNfk3wkzfpxzBfeKbiBr06umCiqLEjKwx5AETkJo8Cdmy2fjP0m/Wff3tquntEyaWN7S0VU6bu/C1MR1TXu+YPOfj8GEvPXB6phOO5Kt2/xwPm39QXvhHC+Kf+5Zy7k/I6E09e9/9yDQ4BIXRMWSZnoTC/DQyLShG+5+HctgRwX2D8hSqi798rVx2+FJ59GWqBS8gz/cscv1OQel7HHkBh1AQtgslCauR5n4aaeTxSc7HEedwFMqRvyDT8wblCcZNKIdSHh5yBQrX85BZnqceYy8UxD978giKH4denbAyqtHhX/IAG5sv4OGyC34u6xFo+RmGmk6C7Sif5x/3/hv1PNarVEpzOyUhmn7PiPT8j91CYxz3zL3Ety63Ep/aOObHT+NXC39CgN5sAV8NPPUmir4/j/73vMnEP72XGpuHKLW6hkSqecMHbkGw7XoEO65HkOMPCLJZi2Dr7xBk/Y3QQJjVCtLBZ+QRy4UXhJj9R/QIYQPeRYTNW0j2mtuZkz9mqyy9HelpnUhLnA512ZQP1vz444wvV34zbcKkKVUt4yY2TJ//4jt0fHXanNdX+fWfDxfyVednJgsk/uMbyJ79CbIeO5DVm3K8/kHp+iWTU1CYnUOm+UUoBlxAzoBLqEw5hpqc02go3vptecbOvUVRx6Civpq9v5C4z/U7gfzAoygI3YuS5O+Q7Sl5feawc0gffBopbsdE/Od53Ub+qDsCOSNuQTnkBnLdLyPT5jwyjI5A2Yt+T+8n+BAnTj2noL9R3WP8sweYGE6Ck9MhyQMobnyNX+X5rwrrgY/mN5WhDz5g78/juGMPHv4QBf0vI4c+I/997um5hpO4f1nw7K/h2F8DXY79yI+0YH599GYKeOtN12DaY/Drs1zUfc3NwAvUczaT/mrs7otrwBLtDiPYfgcC7TciyGUjIoZsRJTHekRTfxPtvhYRVN9GOX2LCNuvhBewDsLMP0a4xTJEUB8caUOatX4HuSXl+Yrc8ZCldSAjdRoo16O54bUvt+zYMvPzL1dO7ZzaVTtm/MS6ybPmvTZh2sx3Zy98Z6unwbyHw5+ZJ+auU6kH5Tltea894toGnvNSGp8W3OdaXIJy4AUxX13sfRK12SfRVLJve3nW6m9VUUfI4w8j22cXcX4S+QEnkOtP3IccQGnSevKAY1CMuCaQPeIKMkZcgmz4WaQMPSqunyv2u4ciXzoX3r+hcORtFJAG+LqqLOOzyOlN/bjeTjE/6vr8TFgb6fq/pIHezxRgkOtBDHbdDX+OH8qTzuajP38s98fgGJ//ojxpXrdw8K/INToHZe+9IvalMZslwru1nGu51ULLse5jLecjieNRelPoOIW8ahKGU7wP05sgMFhvNBJD7kBBsb/kNWAm1R8tnlSHuNxF3tDTiHc7hGDnnQgctBUhgxmUC6ieCXVeh+BBaxHq8L24j3w45bdo51VCB2FWn1Pup17R+mOEWX9EOvg3Iu3f/CZL2bxBoeiAPG0q5CmzkCd/Ex1t/zmwY8+OOZ9+sWLKpK6Z9W0TOqs6p88YT3ht3svv7k7us/oPvoaB5zWznt+JzF4HxPgnz3fnGZ8XOVI58JLgnsfGa7KOoKn44OHRRbuW1yfcQ3XofZQH/oryIIpff+rzAnZJcZ+wDaqYPcj1vIW8UTe7ofChGPe+ikyv8/T4CgoDyQcDSQe+D1Ay6p7QAM+587xrbh/KI3oHRQ849LlZT+W/h145LC2XkgccgI/LFlEzeZh1PHZDU57rYf75Gm1VGPHvcgM5lHe53+OxHa7z2PO1vDOvnsSrp048j9SAvzdC87qW76EazgcLvsfBQ68d7npjBDz1PxL/e/xE4IM3gQlUhzRT7VftfRt55JUpXvvh707addmBkOE7ET5sG0KGbBEaCHRaLzQQRjmBdRBmt4p0sBph9tQn9P+SeP+cNPAJIvp/hDCLD5EWOb0jWzEBmRmTSAMzhAcUZizF9M6V2Ljh5MlPPtnw6eRJH301cfznBzs7VuyaPPXDb196490NCqsff2f+ea4ru88h5OmfRK7hRRSaXkWRzWXBe5HjNZRSb1dfcOjgmJJT342mGqom7HdUBN1FRfBtlAf/TPXfRagjT6M07gDUKVuQ7/WbWB9X6ENa97uLgoC75A93UEBaKQi6gXy/C/Q7D1AeRb0Q8VLiL2mg2O2mGJORkw4L/3GSeqgvMPy5BbAzan0K/2r07T0Gw70OUw7YhQDKm95GL8DCIq97HIBrP669+fr8Ih+Iud3svvtFzc81H/s++z3HthdxO1InlrXxzJB4no6h/+wiTBUYTLnTnV5zJc5diW8XvRY4U8w76jXCTq8SPObA14yv+BR4jby/nby/gXqPmribyKdzlRSwD4HDd8PHYxcCB+9ExKhtCB2yDWHDtiDUbZPQQcigDQhy0HgB+UCEw3eItP1W3Gs5ZMAK0gLlhAHLEWH58a3MnPoPCvKmIiOtC1mps5Gdsgj56cvQ1bkaa78/i6Xv7cGkjtXomrQW82dvxsJ5ux6+uPjLfZWjfryqpHOtpJgrMriEQuPrUFlfRan9NZQ53oTK+Tb1TXuOjS24dbAhgfQb8gcqAu+jjDgtC/yF+L+O0rBzUEUfRoVsM/n6XRT7/0ac3qcYv4/i4AeC69Lw+1Qr3KfjbyiNuIXCkCtoJH7qKEarIoBy0oB66G2huRz9s2J+JIY82uvZlzHIpA3P6+U+oYFSqgNzMWT4PsoBeymPUswYvg8Hq6rua0Gy0qU5F3HdtucfKKAahq/h4vFdrvm0sc8xPYxifAjxOaQ7nqWY1sKDNOD+7ES4Ue3McH52HFyeb4fj82Pg8EwrHPTqYatXi/6ky/ChB8Vcwyvk+98Q/1Po8Rj6jO30eZtyfkZRyhkkhO1DOOXOgGF7SAN74D9sF6J8tiGQNBA6dKvwghDXn6g2kLwgSOMFoY7Sfc5ZA6EDqDaw/lLUBtGD3l8uk437vSB3JjJTCSkLoEx7D0WZn2Dy+DVY8fkJvLVkLya0b8D49q2Y3LEPLQ37UFa25W6d8tMtJZYnUdT3uqhNS2yvQWV/E+WDfnlYnXDpFsX8bzxuVR/8ELXk2WXEbxXFM8d+OfGojjmCcuK+lPxAxa+TRlThD1HGY10x0lg788yoTfoD9Sm/oyr+V5TF3BA9URN/n3ygnD1g0HXkG14V8+MJz6yH97OL4WYyXsP9n3OAnd1SDHGjHDBoM0JMlsPdtP17Lf/pGv4rxBzvPXHtLl/Lw97PfR3nfG3cS7yPg5vGxzmmtXHNcKLHg3q0Cb4dn28RsHt+NGx71GFgjxrY9KqCOb0fd4M3he+3jge2bQFe57qfzsEk8qDpNaSBspsolp9BfMQBhPntgr/nHowasg+erIGhuxETRPyP2CF0ILzAfbOoD9kLWAeBxL/Qgc13QgchNppewfJLyOKn/Dsnqwt5ObPJA+YiM3khctOWkwa+QHvTD3j/X0ex6NXDmDR+H0bX7IMyez+yMnbuzy1K96qYlJ1Wk/POm9UxP+wenXDkzLjM+2db6XPUhj9AFcVwXRDH/QNUB1Dup9gvD/6FvPsKyqJPUtxvR3nETcF7lYbzGvKKevLAJuKA/84YioEWpQR+3JD5AI0y0lHCTYyl19lbKqguVLlQjhggjc2lPrsdvs+9jcHGE/+SfzPDiQjyOwBvyqOB5l9jiEknnsZ/ybC7yKXakq/b5Z6PY59zvjbumXtdzh27vbxRxLad5rF9z0bBuU3PWsG5Va8KWPZSwbxXCWz6tog5Pvb9H38EVi4DJqZLeHEssKCD6oGaWyjKOoOU2MMIDdwLX6+9GDViH4YLDewTXhDhux0RfpIXMKS64CeqC1gHUn0YYke9IuWEUNvV1DOuQmh/0oHVt1fSM1t3FuXNgTJzLuWgBciOXyL4L8ldhdaGjVg49zDmzDiBStUByGV7rysLUhx5vZEyv2gEwU+dvfPNyZOXfzN5zsw1zePHLyzPWbu6IuIOKoN+Rw35OXNfEXILlWHXUB1zBlUZO1EZfV3wro1z9nXmvF0pjXmMpRp4YgnVQCoJ4+nxGPp+q+J31KXdRGXsZVRH36f/8QdKPe6g2E669i7j+f0IfH6puB7mOcG3Lv+FogZ4Xi8b8dEHRQ0QaLUGIwznw9wu206X/zI6Fg+7g1zqadOf3Szq/gDq9Tj2Obdrc/ggHd4l7iVPH0D53EoDc4LlcxWw6q0mzlUw7V0Ikz556N+vTFzfz2vGlpHnb90IzKigz0oxsLABeHcB9YBTgbb6n1GQfRbJ8UcRFLQPAb574eO5r1sDwzwkHXBtEB24FYHkBf7u20WPEOS+uVsHYszAQdJBkO0aMW7AOoh2+3STXDbxD3XJAuSlLUJuwn9QEPslimTfoKxkI/G+Aw3VB1GuPoy83LmjFLlFjrwvnSIvzyU7L8GlNu/C6jmTzq6dNHPuionT5n9S1zx2ckn2qs+43q8MuUu4herIq+Tf51GdvftuRcIVVEdJHq6NdeZ8fIHE+ZQy8r4KCZOpB5pIz9sKpGNXHcVEq+SL48gTqii/FA/9lWrOG2L9FK+xCO6xDF595gmu/8x/qfABn+Ffwtt9J9WAa+GvvwQDrVQFuvzzukyOf6XJCaQ8t0n0/Jz3OfbZ75l3e+Kd+ZZiXeLdRq9acG/JvQbBlGCimYPoS/9X/7kCGPfNhRVxn5cmrRF+ZTGwZxfwMtV8Y+mcTKdz8NYc4J0XqAfs+AO15ddRqDiH5IRjCA4+iACqA729yQNGSRrQ9QLOCWHe1COOoDph8Hahg0C3LYRNpIFNQgei7mEvIPA4YsiA9UiJeGlVdvKLaC5bjuKk5VAlrIQqfpW4RqsoczPHPdKy3ojKzk+2yysosc/Jz7fjPQkTi/Usxlf8/kdbza4P2ifOfGf85Hkfl1ZU1zQ2zZxYLjv8oDr2Z1THUdyn7ztcrn51QVXi0dNVkQ8F981yKd6f5J05Z3Dc8/M5vM5iqgQ+RwspT84kHXQUSTlANeJ3cX1kFvWfafoHEdz7C3j3fhV9/1H+F/zztawvwI9yZ5DNOgTpL4WjRd1M5l+eJq274vqvyPMeFGZnkNZji+j7vIl/9n3O68y3NfFtTXxroY15LfcmpD9D+l+GxHsf+t+MXvR+TKn24zW/vE6Y1w7t2AG8vUCq9SbR/32lQ3r+ygw6B83koyXXkC0/h9jY4wgPPyw04Ef5y8fnAPy9H3mB57B9mrpgH3zJC8J8dog+gXUQ4LoVAdTzBjlvRsAgqhEdNiDUnnKD7SaED9yEqAHb/lCmvLRDlbYMbZXfoDJ5FSqI+4q4H1ER+xPKY3eiTnb4dkfJue8WdFzrUpaG2lTX1rtWVLwQ9jLx0lq97t22CXPmtU+YvaiqvqG1ecyMxaPbp0yta3jh9dbOttmVDaOnNmVd+7kqgmq5+IeC+ydjfqIG7P1Ta6TzoOV9UZd0fGWyxP/sBun3Womrck9pnIbXWqVSrx6hvwo+vZfApEfVX9QApehJ/MX47oafDdVI1He7mbSt6eY/S1rHVTgKyOXx5b7bRe0/inI/xz7HvRXxrcu1FuYCahHzumAN9NPLwXB6X9xbctxPng1sJM9/+yU6D3Kp3+cxH/6c7PsdLfcp9n9Gad4VJCefJf5PIirqGMLCDlEeOITAwIPw999PWpD8gHWgrQ18R+yB1+C9CPbcJfoEX9KB5AXbhBaCnKhGcNiOcIetCLPbTj3iDsaF4pR/X6zLXoWO6nWoSlyHyriNlKO3oTp2L2pjjqEh5hJaU3/l2mRDdXOW6wuzD3Z+9SEwv+vAyrbxna01Tc3tvLdxdVPL5OrGxintE+bvam55bUWD/NofHfSZW+gzNiY+FPUc89+hyfN8ZI9nX3+xQ4pz5pprYa0G+Dm/tmDso/hvo/NWFcQecA+5HleQZnkaUUaU63ovpXzb+BT+JQ08T1z4e2yR4t/wY74mRKyJkyfh0borHv9xOocMw91ijpf5d6K8z3Gv5f/vNGCmw7859Z3hA/aIWo+57yQ9r/5W4p7zGPv+ZPr+dNL19Gb6Xu0D1KpuEvfXkCm7iMTEs4iLO0X8n0BE9DHygaMICTlCXnD4Tzrw85ZqRK/he4UOfKhfDCA/CCK/Cxy8GyGUI0JcCM7kEYN2I8JhN8Lt9iPa9gASXHYdUyV+cbclbyMm1W1BXfJW1MXsQUPsQTRGn0Z9FPXgUXfEe62qqxveWn7+A47/MaprqG2tGF3T2DyReJ/c0NyxiPLBlw1lK3c2pF4Hc99J/E4p4Zrv927+tWDeF7ZJNe9L4yUw17rguOfcP7OJ6oAqyTu4D+A6UkV9QIHnNSiJryizbQjq+zEc+o39i/iX4Ga5FIEDSCuGn8HDcJLoAbLC/riYJtdZdzHsJuRmB8S1/Oz/Ev+PcrykgT97AfPPMKb/7Ua/p/B9KK3ppfc8dRawfLnk7/z+2xOkeR7xeUjTTUW/Q537C/KzriE95RLi45n7M4im8x8RI2kgMvIkIiJOkBccQ2iopAVdTwjyk+pEf589CCYEkh5CqG8M1yCCckUo5Yow6oMjnA4hfNBhRDseQbzDcWR47jhQmbSa+q4dmFy7Gw3J+ynujxHv59AcfVXwP5o8fHThT+UtqpuHx5b/grps0kbh9tPNo9/6YUzb4i3NdSuP1Cn3/VabeB0TiWvmfxJ9tmkqqcfjep/BMTynScK8ZinXazGvTYL2+exmKT64NpikknTDtaO4Pir0oeg5C4deR6TVLuL0Cww2mvqUMaBHOaA/1YgBA9aLnx2hPxP97bJtFAE3PxR7t2jX3voAOTZnkd6PzqPeYuH/T4t7c22d3133FYo6Id76JIo0+z3xXiAz5lFd/zYwa+wj7lsp9nmMlOeay1Pvo5D6G3nydSTGXSLOzxPfpOmoM8T3GeL9dDeiok4JsA44L0REHBN6iCQ9hJMvhJEeQgIOIJzqRUYo6SGCEEV6iCKPiKX6McZrP2JHUNwPO4TkoYeRNPiouL4mL3Tz3qqE9Rgt24825SGMJu9pirqA0VHX0Bx7G00Uw43p5z5pKv75TlP+L6jKPo7qnOOozT2OesUpNGTtQm3UzT9Y1+M0/HP8swY497MHsO9zHHPsM9jTGZzbmWd+3KUB1wPaXoB7Qc4VYzT8s45YU/w/1P63EW9DNZLJSowyWoBn/8L/e4iarAXBVhsF/159FsDSsjgq23vDBIVmLw+x5of+dp7Xr8jqf1Ks4eQcwDwbUWwbiTyv1vi91vOLqUeYi3jLw6KHEHu6FEp7gsyaT3lyjnQ9GdctY+IkCP+iXJMfdRfyqJtIj72KxMjLxCNxH3aO4vssxfcZ4lbSgKSDs6SNs4ihfloLKT9I/hBHiCVNRJEeYkkPMeQP0aSJWNJEDHlEnP9Bqn/o6H0QiaMOIcHrMJJHHkWGF8f/KWQNP4/C8N17qhN/QiPVnU30vxojKPcnXyW93qSccBPVCRdQJ7/xR232L2jIvo5q+SnUp5+gOnHr/cYYPCCIOp91PkEp8dOh8Xvmi/nnOObajzG1QgLX/Lr1oO4YwNiiR9wzRmvyCNdNjDrSVZrHMYRYfo8AgzfR65myv4x/9oZQC+qJ9b/AqD4v8VqxqnzFjIlZgQ+692nT7scl5p4HnUZmz4OIpFrARW8CLMj3ub+zob5/MHl8ZN+1UA65KXjX7gPEez/xOvHJ06RrycR7T5F4b+a5jADp+iJl4K+Qh/6MtLBrxNdlRAVdoNgl7gPPkp+fJV+XwFoIDz9LGjgnfCE29qzID1pwnZCccEaDU+Qhp5BAdWNy9EkkkiYSSRNJERKSSRcpQceQEngcaf4Ev1OQ+51Bjt85KP0vo9D/Ol+jeagqZf1Vzv3MfyNpczT1dHXkBVWUi2ro/9dQbqpNPI6apH2oSd57lz7ffY77jiyp3mNv02qgXQPd+k+L8Zo6cOwT4HOmy7ngXXNknbB3cF3woub8ykedQqC1VNeb9Kj5S/55fGhkvw8R0O9TjKJ+0d6qqiM/66UFyrBvDyrkj/bn092bL9f3VxSM/Bn5g68i1/0q8obdQq73Q2nPNm28K6W9vnj/p7oWigHKbc1lklex17dSXDRSvKuovyikukXpcwepATeQGHgVccR7uP95yt/nEOh7luq5s/D1faSBoKBz5AXnyAvOCQ1wfoiJuUDcnyfuJaQS0pIuEM5R/XBOHOXUPzDSSR9ZiaeRQZxlkmdkEK+Z5CeMbPqbeeHnCZdREnEN6rCb4HG8quhfL1WnbthRE7cPdeQDgvuIIyiLPIiqqEOiN6hI2ni1LubGRe5hRC0jl/jn3M/+rx3fY9610K3/dDHmLzBaB+M1uWOmZjyI+wTuEblOUJJvBlNfE2L8Gez7tf3lGACPDzn3eAVBfZbDp+erPAawsDDz9SWqqoo2RcCdO9wHavdr0+pAoekNeI8ehaZO1HLOPs+8KyqktaJlxHm1Snq/HAfC70lD1X48t8T7etyDwvsXpPleRZLvZST4X0Q08RzGfBPvfqPOIMDnTLcG/EgTAeQJgYHnhQbCiauIiAvkAxeQGHsRSVQvpCTSMemSqBszZZeRlX6pG8qMi8iho4L6CWXqRShSLiCXdKKk38mNv4i8+MsooPguir8OdcJNVCXcEfMu2vhtybi9oybtwK7y5FUb1QlrdqsTVm+vTjhwtCH++j7i+yHzrQXzzpik4/tC/0+O6/8N30+L9wmafDFZh3vuDZbMksZLXyUNFCdeRYDzfoSZr8JIkxmaGvDp/Fv/swuhvZaL+Hc0b/ygKOOdfxerRnc2ZX+frAh+CK0GuvfnUz4O7f6NzDt7Pdd4fO0W7wlRnyeNbzZTjddOXj86SBqrUA2nXmXEHWR53kCG92WkeF9ErC/leuKYEUo+HOx9WvDPYC0wAnzOCQ34kz+wBoLIK0JDLyKGNBAXdUniP+ESZMmXIU+7QvxfJY1ST5R9FYWKayjKuSqgUl4TKKXvqem1UvoZFfUaFVk3UCa/iZqsO2jM/g1N2ffFOee+RJunuX7n8clpRVK/qkWHDp58znWftvYbX/B0frUer0WLBq2avKAdG+T6gOOea0LuA3ksgMcFeLx06SvAm1RfVyluIWzkfnGdpK/x6+Tzpd0e0Kubf7WYCzLUG42QXsvg1fNlOJk1rSrJfP/jYtWYqe8uXeJYl7JlQVbgQxHn2hgXOtBwLvbzVEo9Ha8P5msGVZrrxtjvmHfO8Y2U46tGEPdDH4ix6vwRN8X1TWleFKte5xFHHEcT37HEexQhwkfinxHi84h/rQYCSSvBpIGAgAtCA5FCA5cQF00eEnOF8v4VZKReJQ1cQw7xWqi4gWLlDZTm3YS6gOq24l9QU3JToK7olkBt4W00EBoLf0VrMfFe/EBceyziTNubNUm1+Zw6qdbiuYoujSa04P6eofu9SU9AcKmp51o11zg/Ce3Yb1fVo/FgBvcBzL3oD5qkMQEeG+L4/9dLdCQPaKD3H0G1bZD9JgQa/UczDlio4b/4Mf6foyOvHRvVcyHzv0WV9eGKkuIJM5csWeK05dDnrnnDNr6V6/Wz2LNZ1/MZWTxGzPs9aThvkEu1zmjy+VbK702U36s9gEqPe2KOiq9Xyxl6GZlUW6ePPIuUkaeR6HUSsYRobwmRvhK0GmAvYA0EP8E/I8jvAungIvV5VDMEX5Y0EHWF6r2rlAeukgauIzeD545+Jv6Z+1uoKLlFOekOGsooxsvuoqniLlorf0NLxT2B9orfqT/5A5NqpHEoPr88FifG4zo04zJjJd9doO3btZrg3k3Ty82uefT4v4Iut/x86n8BXf55fICvk+QxqDfIA14jHYypfIi44OPwd96JMNMv4KTfrhP/j7jX8u/z3BsYKfG/r0KxbLW6dNKsxW8scfzk089GfLGj0zjb7St1jv0xsS8z7/HD1yDx3g88d8ljDzyXxfVcnY90rX6d2x+odrkr5qVLXW6g0P0ylHy9ugfVW0NPI3XYCSQNPy76rQTPYwJxo44R/8cFYnxOCA2wF0QSdDXACNFoQMs/IzTwstBAVBjFfzTxTxpIozyennKD6tKbKKQerST3NiqKf0W96i4ay39Dc9U9jKm9j/b6B5hQ/xAT6XxOJc5ntEnjE3xOOZ8uniGB66vFUzVjsk/ogcfutBDa0IB9gzG7+dE4z2wNd0/r83X7fS202niaD3Tx32mT5sl5zHwhvbdW8o348NPw8diHsP7fwctsDnTXAvTWcC/xXw6PZ+diRM/ZcDRrOl6e88nqctWMua+//rr9suWfDpu/YKF1S3OrYWbsTKs06++O5VsdQaHtORS7XEKF61VUOP8sUO18HRUO9NzhMkoHXSRQ/+x8FgVO1E85nUKG2wlkuB8XvWnKsCNIHH4YSSMOIY4QP/IwaeAI+cDRbh2wBtgPYnwkDURqNMC1YRjVAKHEfxjxzwgn/iMCLiFSo4E40kBi5FXSwXWkxd2guv9nqvtukQ/cphxwh/z/N9Srmf/f0Vb3B8YSB5OIn66x0jrX+XQeX+qSYonzKc9FcW319jwpz749S6OJqRJe09GEVhcLNWO4nJ8XaPShO47HYA1M12jgSd4nP+H7nAt4LGCcBmJcgL/Hr2t00NUq6beeckoS1cSeHgcQYrcO/sZvweC5ikfXAetV4vlnytCT0PufFXB6bjKG9JoBB7OGU5WKz9dWls6d9fLLL9t9+NGyIQsWLLBuaWkxVKsrxf0341wXl6XYfHY3e8BGZJvvRq7RARSYHkWR2QnkWZxAvuVxKKyOIbP/cWRZH0eGzXGxdlVG/pHseAQJTkeQ5Ep8ux9G3JBDYtwtTkcHrAEJxxEl/OBxDURRfx71hAa0/EdQzx4ZcAUxIVdIA9eQQD1cUtQNpMTcREbSTar7b6OYarvyvLuoLrqHutIHpIHH+ec1B1ruFzPfL0h5VYt3dbSwZNYjX9DO0WjnaThfdI/Zj/3zOO6T3Otyrp37754P1NQK4zW1AY8JtBY9qh349Tb+uQppvZYq5wF97isYOfSgmOfiddIO+i3dNUBvXhf8rBp9nysjVMC2x1jiv4v5P12d8/UPFaULX1qw4AWbfy/9j8fc+fMHtLa2GlVUVPQtLi7uk59bKnQQ7vhCRdyg92/I3FYiw3oD5DbrkG6yCTLzrciw3I40qx2QWW9HqvVuJNvuQbLdXiQ67EeS4wHEDzqIWGfin3SQ6HFI6EB4wciDf/IC1sGTXqDVQBRpIJrqwOgnPECrgZjg6+SD10kHkgbS4m5BkXwbBRm/0jn6jXRwDzVU57XwmCpxMIW4mdXxKP55foI18I5GA3zUcq/1AtaALv9/x70u77qc6/Kty3U3x5pe4MkxgCdRq9lTOyf5N6H94eSx3u67+RoneJlP7x73FevCe6jQj2DQsxxWPRvh3mcyHMxrT1Qpv11TXvzy0jmz5w5897333ObOnTuguaXVqKqqql9paWnfwqLivkVFRX3z80v1WQfRw2Yo4oa9cSjR+SPK7V8izfk7JA1Yg0SLH5FCHpE4YBOSB25Bgs1WJNruIB3sRqL9PsRpdJAwSNJBEmkgbrCUE5KeyAmsgTgdL+A+IY40EKfRQEwA4wJiNBqIJQ3EBl5FfBB5AGkgOZzxM9J4bDnmFrKor1em/orCjN9QRj1eed7vqCt8KPLmhIZHOmAv4JzKOmA/WKTB6zMkcG3ANQLjpclSDp73BOddGr6nNkg+zXXlRA3n43T4btUZ59PltFEpgdffa1GXJYH34alK1+yrnaK5b0I89ewxd4T/DaM8O9Kd+kDbH0UO4DXMHP899Rph2KuEoCKUwax3LZz6jYetRc3+KuWa79QFix68+68phkvefMt51uw5Vhz/xL9+qUrdr6i4pB/5gD6DtGBUXKK2mPZ6rnlR7kxlis+iHQnuS8GekO62Gim23yLRag0SBqwnHWxEkvXmbh0k2O8hP9jbrQP2A17fwTqIH/qoNmANJOp4QbxGA9wvxvhIGogjDfD4gVYDMawB8gBdDSSGkgbCbiIlgnwg6hZk0beRE0+9aNI95MnuQ5X5ABU5D1GdJ3loG3PUIOVUrgVnj5fAHGt5ZswdK9VfM1ulfoH5fpLrCTpca/275SkxrcvzkxwzeB8UsRdOkgSxLyLfOyeGeKcaPDv8ITLC7yM25BbVxFcxjOrqwa6cA7aINZEe5u2C/+f06mFEcW/ctxTGfcrEPgcOBq3Ef9XmGuX6rysL3sWSD7P6vrZo0aDpM2dYUf43qqyqZv71yQP0S0pK9ItLSg0Ixip1mSXfn3HS1Gne81+Z4ZWbPSEh3vvlLYmDlyKD9y8iHST2XyV0kKLRQTz5QaLNdiTZ7pJ0oOMHrAOuDZLIC7g20HpBvKZX0HoB6yCeNJBAGojV0UAc5QIeR4z3u0THJ3wg6AZSQ24iKfQXOt5CesQtyKPuICvmLhSxv1GPe4/08DsKUx8KH+Xxq0aOxyJpHTrnV567mlDzOHQ51uW5VYfnv4pnXY55nyuGLs9ajhlFMRIKoiS+eS/WLOq7MkOoHw/5A6nB9+lz3UV00E3qky8R/yfg6npYXOfH17rymm9es/4c1X8mvRrEfle8z4mxfjlsjBpga1Wxojb3p6+qi5di0deWvV546SX7qdO6LJuam5l/A+Jav6RUZcAgHRiWqFQmZeUV/Ruamp2mTJ3mM23GzMCyiioPVW3+qJTo0Xkxw146nOD+AdKHf40UxzVIsiA/4L3L+m/ozguP6YDqA9aBNidoddDdK+jogDXA4wbxo049poOnaSCRvIDnFpICrwsdpAT/TFogLwiWdJAWehuysDvIiLgDeeSvyIr+DTlxpIOEP8R9SNhb2WP5mli+LrpGw532yHxy7q3RiVstr3/H7ZP8ajlmfhk836JFroZnvn9WRhCQHvgQ2UF/QBb4gB4Tgu4hOfBXJNLniaTP6TfqAkaMOAEXD84Be8U1r7wW2tG8UfSAJv8YA0vj0u79rqxNyQMGlC2uyduyvLJoOeZ99o/ec+bNs500eYpFY9No9n8DtVptQJwz/4bkBcalarVpeWX1ANKHy5Su6T5dM2cFEP9uhSXqwNwCdZC6vjA4OWLs6EjnV6/HD/oAqUNWINH6ayRYUo3Qfy3lBSknpAzcJnJCPNUGrANtjZio0yvw3DzrgKHVQfzI40jS6kCjgQTvs0j00XiB7yMNMOLJExMDrmlwg/Tw85+0IOEOZKG/ITP0PrLIT7PDHwhvVWrijv2W71mmhZY73cdPxuzTeFVoeNWCuc0OenTM1vCcQTzznCxznRbwOx3v0/EeUv1/o+Nd4v4OfZbb4nPEEfdhflfg53We8v8xwf9g1/3wc94s1rzwfIAR1QE8BmBlUCH2PuV963i/MLv+6qnVBdv+XVO0Am2zg42ndU237ujoMKtvaDQsr6g0IL4NmXuVSmXE/JeVlZlXVldbU33oMrVrhs8MiX/XgmKVX35xaVBhqTq6sFgdVTY60zs+aPK8aOe3iM+PED/gc+EFCZbfI7n/OrGfHeuA9zdjL3i8RjwsesZ4tyOP5QTWgXb8SKuDJI0X8Hgy6yDB+zwSSQdJpAPWgtAAnRtGAmkhwf96txY4N+hqISX4toin9KC7IrbkIb8TD7+LY2boAwH2XC0yQx8KZAQ/fOyx4inI0sRuhgbMLSOTuGV+U/3vQ078MseyQIljLZhrGXEtE3zfIt5/ETrm98+5LoY+Y4jPBfh4nhL1H/Pv4XIAvm7bxNqXYJM34WTVIPjv90/eP79U7BfM+186DFTV1BRuX1JbtBr1LQ0OHZ2d/dvb203r6uoMy8vLDYh3Q+H7pcS/Wm2iKqsg/msl/qd1eRP/fuQHLvlFpT4FJRXhzH9BiYrv4RtBiC5QFSfFec7bGOP8Pvn/csSbfYVki9WiV+C9DFM5J1hTfThwl+gZE+z2kw4kL4h3lLyAdcBekDxEO4Z0ROiANZBA+Y7Hk1kHiV6nu71A6IDOia4O4nwv6+hA8gVJD5Iv8HlNp/Mrne9fH+NAC4mbe/8jcMzy72lj90ludfnl/59G7yPFn7wpgHTp/7MAv0etj0nvnT4H1bxc+/K4CI+b+1KdxP2fs8dhaa3X4O1i7QOvfeW9aMnrRR/4aM/jMjjbqwJrSrYtrC9ei9rq2SPGtLdbNjc3G1dW1bL36wvuS0oMi0tLOf5N1GXl5kW13iZtY6YP75zaNoT49yE9OOcWFnsWqcqiKQ9Ekw9E5haVRtAxjrwgraCoaXyO7OWLicPfR7TZh6QBygnm3yDFfC2SLDeIfS2TBmwR+xsKHdjt1ejgULcXJIlxA0kHj8YS6fkIyQsSRp4UOmANpNC5iH9CBwxtbtDqQNQI3edU0oF0zn8hbm51HyVNSODnWp60nOki7Ynn2t/R/j0tr7rcSrje7U2P9HmlO49xbcPvn+scqd45J/ogaYz0BIKoV/KmHDl06EGq/w6K+Gf+ed8PXvsYbLoEbtQLaPavFntE21urxfWfdeqtk+uLN6CualFkfWOjBcW+EcW+flGxivs9A4Ih9YCGecWJBm11n+ePa/gRnW1rMKdr/YapM1vcauoaBikLioYXq8rjKfajKe4jyQ8YceQHGQUFnctKC944mkN+neiwAdFGnyKBepMUs5VIMluDVIv1QgOpVluk/S6tdz4aPyIvSHI4LHmByxGkkBcIHZAXpGo0kDj86GM60HpBMuUErQ4kXOj2hOQnaoR4HT9IFTr4ayT+BXR/JkHDZ4IOr1pu43W4lbR4SeJW6FOHXw3H3PNK4x88DnYCMb7HEUmI8DmGcJ8jCPc+jACv/Rg1cj+GeRwQuX+I2x6qAXfC3/UnBPOaeMsP4GM6Hy4WzcIHmH/ngRViL6Aa1baJdapNqC9fml9RVWNeUVHBMd8vv6Cwb35+fr+CggL9gkKVYVPVh/Et1evQUvPh7bFjXp7XNfXNFxfO+eG96tpSG0V+wRDiP4HjnzQQRbEflVesSswvKc1QlU1dV5Dz+rFseu+8F0bqoK2INPy6WwNppIEU83WQWf6EFMvNSBuwTWggdaCkgVTSgPACHksedFRogMEaYC94TAekgRTKCcIPPP9rHWi1IOGKQLzm+DQ87TXmUBfMpxZS7nkUt1qIMQwNmFuGNMZxkl4/2c1xDHEcTRxH+B4mrg+JOd5wnwMI895PvO+n414BvxHSGpih7rsF754U+z6DtyLQbSOCeC001V9B5m/B23Qm3C3HwLl/I1wH1qU72Mn1ast2VJMHoLFq+bhSNfl7cYlBXn5B39zc/D7K3Ly+uXn5/UqKG42aytehTrXsSl1LbWxz+/jUzqldWfMWLKqb1Pl2YE5eoXuRuiKR4j1G4wF8TMlTKVOramf9lhqxfHuO/2ko/Y4gc9geyMwPI8pgDRJNv0Cy6ddIM/1eaCDNYqPQQKrVtu6x5CSbvUixlTTAXsAaeNILeL8UKS/Qa+QFWh2kajSgq4N4bZ3IWvDV5AfNkXn67+CxGNWJVS3EOJXfmW5exRim70nNeOYJnXnPx/mN8j5Izw8gctQBRI3i4z5x3bKW57CRe8W17EEj9og18byei9dD81oXT+KbOfcdsoWOm+Hn/hPF/zqJf9uvEGz1b7EntJf59IdD+7ev1a7/rS/fKasv346m6i9fzMsvMaOYN8hR5vbJzsnpnaVQ9BY/U7ZxV3XBV3fK68vDa+qboupHt8S2j5+YMmvuvKzpMxbnyTJTHTT8x5IHMPecC2QF+RM66lunXIn33Hc7K+gIcgKPI4veO++PJzOiz9hvPZJNVnZrIN1svdgzV2ax5XENDNwnNJBqd1DoINmB+HY81q0DrRewDlJJA7pekEJe0O0HnlK/oO0ZRN/g8wgxGug+1oLHH6UxyEecaq9h0PKq5ZZ51XLLHh1FHh1NYH6ZW238avnlYyivVyAEE8fMbzDxG6LhmNew8Lo2X7G2basGm+HvsUmDjQJ+7hsQ4LYege7Eu8uPCOQ9YAetQojdFwiz/gCB/ZfAt//sXz1tm3q52kv3oKsp3+pDGkBr3Zp1MnmUMfFuIM/M6pORmdnLO1rv2brSdS01+RtQVjopv7yqNqy8uja0ur4xsqW9PX76rLnpc+a/kJtXqBpWrC5PIv7jCXHEPWshq1w1Z0WR4sOjifQZkylHZdE5S6EcpaQ6Lov4lBlTnWdAPYDRGshMvyENrBUakJtteqSB/jspH+xGGmkg2WY/6eDQIw3oeEGq+9FuDUh54ZEXxGvA/YIW2vEkPmqh5fBJ6MarLq8ctzGEyFEUu1oQx8wvx2+oDr8cv8wvx3DI8EccM79ajn21axc1HDO3vm6b/sRtgBtx676WOP5BwM9ljUCA83eEVQhmOH2LEMevEGr/OUJtPkKw9Vu8d/4k5tzXelr3vfDqqlaa1FfsIv5/hJ7dP3rIs7L00+XyPn379v1HTfEGdXXBDqjzXmgsKa8KLi2rDKZ+P7Sytj68qaUtpmvGLNlLr76WW15Vn1RUWpFYVFrGGkgkD0gtUOfI6upfuJ3it+9sIp2LRMpR2eSNWZ5HIB9yCJlU08spx8uM9yJJf6vQQLrJaqGBDLONj3zAcvtTNMBeQHnf/ki3FyS7HhUa6M4HGh1wbcCI08wvacFjzDFeEuJGHRHHKDp2w1v3+aHu2NX1ZuY3gnSt5ThEJ4aDyadDNT4d8Fgc734sjnlNHvPMfs0c++hw7O+6ViCIuGUEEr/MbYiG3yCnlQgd9DXxvAJhDl9IXNt/ijC7jxBqtxTBtu/tCrV5Z0GI7aKn7vvdrQGK/4bKnaireCE/WRbaT3yvZNu86oI9KM1ZMqWkTB1Uoq4IJf5DVRVV4WXVdZF1Tc1xk6d1yV59fXFxbcPoNKr/UqgHTCX+U6jvyygqnNRRW/nWxWQep+FzR587k3KnnGJORn2KkvjJJO6yqe/LMNuHVMPdSDH+DhkmP5AXrBMakGs0kKHVwIBdYh/0NJ18kKajgVSnY4/Vh9wv8viRdgxJO8cUp5l7ZkR7HngMzKcWzDNzq5uDw6nODiUthxGYY47lQE0sa3n2pc8a4LGrm2eJ48d51sYyx7GfThwHOzFWI9CR43cl8f01wl0ojp1WINT5M0Q4f4Jwp2WIGLQU4YPePxPm+O7qUMc3XwtzfnVyrOesieTxw30tR/f4O76f/Kot2/ZrrfowGtTf3asrXfteddGu3yrztj0syZ9ZVqxWR1BujyT+o4n/2NKK6iR1VW0S1QHx4zomJr74yquZtaMbIkrKqgpEv1eiSi9Ulyrqal7ZmRmz6UwC+WWyD5/v/UijWMqiHJo2mPgfcQbZFK8KiuMsqvfTzbdLsU/8y43XIctkIzJMNz+mAb7GQFcD2nyg6wXcJ/A4svZ6g2i3Q4j1kBA/lHV4EDHDiGt6HDv8IKJGHOg+RtF7jNZAl2fmmGNZG8+Bg/cSt3u7eZY8e7tOXn6ca4nndSKWmWvOy+zVzHM3187fCJ6jPFYgdtjn9B6XI2bwB4hwff9UhPM7yyOc35oa7vx6cbjr7IBIvxRD5m0e+jyTVeXYJzUj3jhNluWQU6D2+p/wrv2qUW9YVlG0BxWFh1FZsAvq/K+/pZhPI975HrMRxGlQTmHBKHl2skdOTn1QTf3Y5NqGlrKOKTPGvvjakvntY19cmascX0I/m0fIKSwaXdVQ+9YNzq/Mf7wPr7faj/hh+5A56hzSKO6yKB8rhx5HjvNxZNuQDwyg/GCxAwriPMv4JwG+vuRpGpD1J68YsOeJfCDVhtp5RUa00wHEOB0SiHKl524HBCKoBolwPygdCeFD9wvw+lAtgggBHow9AoGPcf24dzN8XB/3b669/agGC9Lwrct1IHl2mPOXhC8Q6f4Zoj0+RJT70ntRg9/6PsJ18fRItxcUCf7jBjE3N4nj7ErnXsmpqfpJSWn6Sckp/ZJSJCSnpBqkpKaZpMpkFvIshUduUaXt/4b/CvXMiOqi1T+UFSxfXUz+nVeS559VkOCRmp7kXlzcnlmU8eqc+uYpLzaN6doyacbsL8ZPnsH7JX84afrcz+e//PrKrjnzviiQv/BhUUlnfWFFkry+5u0T2fHfn5K4P9LNfzj1pzKqudM9jyGD+vac4aeRw/sdO5yUNNB/v7jGLMd0O7JNtkBhsg1y063INNsmkG5Bzy13kgZ2dWtAjBPwvTK044YO+zXzilR7DaJYdtonEOG8XyDSZZ9AmOv+xxDsTj7uSh7vRl5O7zPIbZfYP0BA499+7pu745r59qX+OuAJvrXxrc3VXH8HOHKe/pK4/gJRbsspppciyuW9fdHu78wJc1okNuQurIvxk+clOKSkppsWFhYbUx9mSDW4fqoslXnWZ/6Tk1OZf33BPx1T0tKMCKay9IwBykLV3+b4v/tqLP8+vjz7py8K5V/NLcj4eF5F0X8+qap8eXn7xNlfl2V8ebEk+/WdYyd1fdrU3vlDx7RZn46fMuNdev7VuMnTP+3omrVs8sw5X6vzF1+oLvtwfmPV57dGN7x9nP00gWpjRhzVw+z/ERRPsRRX6VR/p1Nuzh1xAcrBp5HvcQI5tqeg7H8cuZYHkW22BzkmO7qRZSYhg/whnaDVgLgvDtWFQgOaMUPtdSaxDMc9iBtE+ZsQ7bRHIMJFA+fdYj+AEJddAqFuOxDiKu0XEcQgrrn25phmaLn2d90guNZ6uV93fK/prruDBn0jau9AqseiBn+GhFHLED/qvZtR7kteinB6Lf7J859XWNJPocyLkMkybKj+Nlar1SbKvDzD9HQ5cyxxn5pqoKsBfk5xb0KvmxH/tgUlFd7/W/75i7g+0dj46pnmltfO1ta8sn/C1OnL2ztnf5IXsQbqmtpZYyd3fdEyfspe4nsF4TPCtxOmzvyydeLkDydMm7myXD37mDpv2Z3R9f/6OTV4//4oqoE57uN8uK7aR7G/k/LsfnHuU6n+SqV8nDviEvKG8D3gT4m1pnnWpAWrY8gxP4hc071QmuwRyKbHDIX5XmSQP8gt9iCz2wN2iTECnjvguUSeU46334UYh90CrIFYx12kgd0Id94pwWWHOAa7bNPBFgEeM33E8ePQ5TtAU6dJkDxd22/x/Zei3D9AnNe/NiVGdd5LS6uYlVMW5ZelVDw1P+fkF3pkKnI8U9NklkXFJabl5eWmCoXSMC093YD4NWCf1+Wfj+T7RsS/Gf2ORUam3KVYXe/0f+GfuJxR3zLhQF3TrG3jpkz5gmJ8ZY36ne3Fsn9drGpoXkTx/k3LuGnHifevO2fM+WbSzLkbJ06fvX5i1+wN5AOrx06evLq0cNob6ZFrFyfytbtUK3Hcx3DPSzEv49rf9zxSeY6G6vF06s0Uw0+iYPh55LufRaHzaRTankGB1SnkWRxDgdkh5JnuR67JPqEFLf9yLUgDUh7Y2X1/LNYAX1cQZ7dTINp+B2lgp0CE4w6qobchjOG8tRu8PxDvHShhvcbDJQQ6r9Xg++66vJtvTX2uy3mo3QcIdXjrWMzIF9tS5AE28nx/m/Ssih1pmRV3s3Nzoojj6Oyc3D/dgy+noDCC8rc7xbFZXV2dhUqlMs3Mzn6cf10NcOynyUzSZOkW9DMDlAWq2P8L9xn5FXpTZs3u1dbZdXD0uCkXiOuNo8eN26ZOX/5Ape74srZl3NutHZNXEc8b2yZ2HaUcsGnSjLlrSAfrSA9rWydM/qm2ceam3Ohvzor1Hb7HEUr1MSOO+uh0/6tI872IdO/zlP/PQUa1X6rHEcg9iOehl1E0+DyKXE+jyO4siqzIDyxOIdfsGPJNDwsNZGugeIJ7Lf8yzb3S4gl8fVGs7TbE2m0lDWxHtMNW4n87IgiRg7ZS37SZdLBJQOwN5kT523ndY1zrxnfQoO80x1UaX6eezOHLbs651w62eZ/+3iufxAY3huaNtjFNkcntiRdH4sdZrqj4KDWjHHKF6lXiP5YQo8greD41Q7oVX15RSR9Fbl6ULCPTtqioyLyhsckqv6DQhHK/IXFsSPwbEvcSJP5ZE0b0t80pX1jKMjLssnPLA/8v/PPXnEVNzzS1dbmNn9J1nnRwYkzHpHV5GTOXlFaXj61ubN9d3zphS2P7xK1NbR0bm8d37mnrnHppbOfUVa0dUye3T5wWwX8j3nHT5yleVHtTz897sCX5nkWK4P4yxb7Ev9zrLNKp98scfFT0a7x/SaH7ReL/LEocz6HA+jwKLE+LtQW8zoA1kG96sJv/p2mAuWckDNwmrjWMttlMGtiCGNtNiLLfhEj7jQLhjgzpXgK8P2SA0w/wH/Q94btuBGmg5Vw3znV55zG1IJt3EDLwzTH82bNzswcTh+5p6fJBxI0DHR3puWtGdvlc5l8mLz+Vk5+fSvxHZSmUQdrznltQ7Eme4MWx3NLSMqChocFSqcwzJi9gfxf8M9/dGiBw3k/PkPcnjfWnvzU0U1lr/3/ln7+mzJyjN3bSrF6U66dRTt8zpnPaqeZxkzc0j+tc3tA6fn5j+4T6cVOm506ds8BD9/cmzZ4jjsl26zq4vosddUB4f4r/5cf4533Nmf9MqvszhxwT/OcPuYQij0sodj0PlTNpgPgvtCAPMCc/MD0pacBEygUKs31/4l8b+3wdSRwhXlx7vpk08BNi7DYgghBpv554+xEh9j+IvWF5TkTw7Kjr6Y8gxswdVj3B+wr6/U8RZruM4/1hiM3ia0ED51nqngfiNoA4dxEaIMjSM11kmeopzD8jM6e0NVuZKxMayFG68u8oC4qSyPtdKfatOjs77dTl5RZZCgX18+lGFP9GlOeNNXW+0AAdjelvW8rlWdb0v2yylP/7uv9/81VUpPrL19Kt1w6Pp7oqhuq8FJ8zgn+Z3xXi/jIyfC4I/jM8pTWBWUNOIGPQceS7nYHK/TKKXS6INWSlNudRbHUBBeZnUWx2CoWmx6E0OyI0oDA78JT434Gk/hL/yYL/TYgn7pn/CMH/OgHmniH2BXb4jnSwuhuSt696gveV4h6KT8Z86MB3EWj98q8j7Ub38nJs7/7sqekZejnKfMP0LIUv8eJMHDmxFhKS5SPS5BWC/7TMsm1U6+UQ90mZ2TmxRaUVERT7vunyLJuOjg77cePG2ZD3m5L3m1BsG1OcGwv+dTRAmjCVZ2YNzMjMtpFlyOyylFVe6dml/y+o/299JVCsxXgcQKIX5XjinGM/w/si5f0LYg0wx372sBNizyWZ/Qmq/U6j1O0KylylNYRq2wso6X9ReECJ2RmqA04IDeSYSfznmO9/Kv8c+3xNIfMfLfjfgHDb9Rr+1z7GveD4r9DN/ZcI4pi3+0wT80vZ65l7+NpM8Q0cOOuZp33+9Myy6HR5uivlZWfycGd6PIi4X6HVAHlAZ05eYX6BqqKwWFUulysUw0rV5YNmzprtWlNT01+hVJrS75owzwzi3ERHAyb0miV5vj1pxjZbWRzjmfD5U9/H/19fSTbrXoxypngceZj4Pye4Tx91ieKe6j7K+/LhJ8X97fi+hqm2R1Ay+DrF/1WUuVxCqeOFbv4LzM93e4BS4wGsgafyP2Db4/zbbhT8R9n+2M3/f8k9c24vxTzzzjEfbPtpd33H3IdYvwrfgV3f/VfnICNHXSzLSHcSGiAPSJcn2VEemCnLKv8hPatsGeWAhJrmsV+pyysjZ8yeEzZn7ry4SVOm+OTlF1hQ/2/OfQD5vxl5gJmkgXShA/5eZk6OLXmGU7pcbk91n8//C07/J19xVm8PjKVaK3bwQer1TkImcr60/p+5Z9+Xux8X60Pz3c6hzOMGSl0f8a+yuSjFvzkdzR55gFYDSvODxP/+p8Z/nA7/4d38S7Ev9gF/Gv9/8nop5oNtl0vzpiLu30aQ9WL4W8/FcOtxXoNty/7y8yelFellprQ+J1eq6on/bg0QPOixB+WFoQkJcQ4NrZ37P/rks731TS1x07qmh1fXN4aWqMviM+SZ/elnrUgDFqK/k0ka4F6fOO+vzCtwptzhSLEfmpHT0vv/IbX/7a+UgT98HeO4TVybJRtxqhvpGu55fXCBO+X6wb8I/it0+C+zv0D5n2pCi0vCA5j/PDOpH1SaH0Ym8Z9pceCp8c/8xxHv2tgPf4x/qu3sqW//y5j/6gnuPxbcc74Ptl6C4AEvwcd6yvn/zudPlRfqZSmqTDOV6plp6Sl2rAOKZSfK604Uu4NS05MGTJu+8Hp1wwRMnDL7Rv3oMbW5hcV+xK1nQbEqTaHMcycNDCANWNHvsQbMWQv/X3vXGtzEdYXlOjPu9I+xMbaklSz5BabgiQPBmCAj2cYpuBQwtmTLkiXZlt/GDzABAkRi2pKkQF1ok5KWxhNKUjJpSaZAaEkai+HZ8kqbx7SdBpQBJoFQakpplMH49Nx7d1e7C4EkmGf3eM5cfV5p5+797nfuufu4W1nlNFe7PdnYD9K9/s67TvvEJqR2aop1z5lnowZLM9+n1+ArHvwAuUcfi55+nK4x2pBzEbm/AI3fPE/5b8g8C83pH1P9E/59oxj/HgX/1Tz/Du27lH/2nnSee/QSwj16Ic8/y/v6Kd+Ef+rpu5hfk3uW41uNr9C4T+6bKDD8DCxcLzxsDPR80XZwuFpjnN62pEp323bkcRxymoXz/gzM79IbmltwftACLm8n1DX2QFv3ss/auhZvxr5RiD4R+0IR5okTSV6IvyXzPDLPJ3O9TNR+psvXVOxtXx17K3m8WZtr2LVxTto79Hz8zPS/07leVc5pum5x04OXqPYbx2I/yD4PjVmfyPgn8V/Qv5J/Qf/zdO+I/DP9s9hfItF+EV3zPXRD7lmet53leuat0bhv2Ey1P5X7KeQbnoIbH7Xc7M5ajd/veaCmrvsw+lanp627vKqpq6yi5TDJByvd86nXNi2EzkUrwq1djx/3N7V7sZ9YkP8pqPXx2AfM5PoOxgMD5vzpdkdFpqd+wU2d679dVsEduOQad5Ke4/M/9Cl9vwThnrzDSOC/eTTyP/ostGScgUazRP/JH1/FP3m/LtV+ynsS/o9K9H8Q+d/L+EfuybvhCs1vyXm/DvdE+yTuE+3bjC8i/5vAot+I+l9PYn/fV2kDu9NFS39rR2dt48IP3HVd4K7tgmpfB3NvB2DfAH/LY9Des/wccnulpm5+n9vjKXB5a6fZK51ZpA8Qx/EjE7fNGlaSbpGV6J+PsepWj7Fzh+gcv278OfDnXKC8U+6z/w0tY/4FTVn/hIaMT6AetU/493NnJPH/FD0PRPI/Uf987Gf8s7GfPD8yA7mfYdhP+WfaD6H234IC85tXc4/OuI/yT7gXcz7jy6L2LdxzMIVbC7mpj436qm1R5a7my/aK1q4lx/1tPYc9/gXg8nXTvuDydYKvYQHUNfUA9hFwuEi/6Drn8jbMddbU5pHzQw5ndbbXP9/hqX/8G1XezmHj6VZbkX6jy4ExunbMSZznn6HjPXGyThTVPsZ+fzrG/rQzovZrk8+K8z+mfcZ/ZcrfZGP/LMr/Iap9yr9xH+Nfov0C8y7q1+Ne0L6F5HxE+4YtyP0mer/sVMz7MPZvGq72sDubEpHzPzS1L3q/sX3JRdS72AdILHB6OqGqpoPkBhfqa59+2zHvCTfG/NF1jUsW1TcH0tx19w73ghXqNywi1+y9qafAn3WaxnviRPeUe177DcYzstxPyj+J/YT/Moz9ZZR/pv0ZPP9i7Kf8R7Uv8n8N7i2mnVdpn8Z+A8v7SOyfgnlfzricrw1XW5Q5amhpd7YUOWvaDhDOBe5JLlDhbIXyqrbBCmf7xQrHgk/Ly5ed9zX2HHZVrs7w1a+6q871fBmz6J5ZbE8+CF5tGOo5nOPhPI/wTnQv5V7I+2js5+f+n6/9I7z+o9ovTGU5X+E1uL+R9q3G34h5X4Ee8379s0T7RXmZS4e9PeaUs7ygyuUzOaqbV+F84e1K13xwuFk/cLjawYnzA1dt93r6vZq2Ya/D7baHjStnkmc9yHlcwi/ROeFccML91fM+pn0h7y+TzfuF2L8fpkv4J+97E7R/be53SrS/LTrfR+1buV/RvK+A+wUZ99fd6jbBeZ34ubljyaSmzsVzWzqW2jEfuCvn9zdjgH8Zplrz9ORNH1UlHqbXdn0jP4S6pFM856d5Pykf9wX+ee0T7mfpjtA1RcRxH51wX2ji3/cn4f1a3FsI96ks9hekvsa0T/I+7kXkvo/E/aN3ur3uR8tKc9NyknZVb+nIV6Eq4QhdU9CT+A/sC8h3Ulg83yud89Nxn9c+eS6I5vyUf6b96cY9IvcW0xvo1+FeMu4T7oW8z4Lan8a9gOW6v5A6juWfkVLt1ljC+GLtlJTe/tKEbWAfcRCcCX8GV+K7UDnyPXAm/ZWOE+TZEDt/399cet8n0z7lnjtAnXA/3bibcm/juRdcybvIvUT7hPsCzPmJ7h8x/OglUrfxaY13unnuezNkFNGSMznz8lLWvlmcuAXKE/rBkXAAKkYeAkfSUZhH7vtOPgpzUo5QL9X+Cb6t+yN8S3+Qcl9s2Eu5txrZO5+l3Ec9GvMt4pgvne+9ROf5+dzqeXe4Sf4vzZDObl/UZs8yTdB+b4Ml+SfwaOJWKB35e/hOUghmj9pDfVbKPpip3Q+l+r3UH+V2Uy9C3ouMb4A19Xeo69fp885Wid7Z/3aImrcaXxXjPbuut773DjeBagrLNrfOnqD7/guTU9aBLWkzFI96BUqSX4Pi5NdhespOKNHtgCI98srtoOsb2AzbMZb/lpaEZ/JZcJuRcU5yvALuZX6M//kJi+7H9/6k6j41Lq1E/Jya6s7O4ZZ2Y87468m6tR/la9fDVO3zYE35JVi0m+naBtN0mLvrt4AN+SVOPlMn+TzO5ZHvS4/on91m0T0z/yHjQrqmcV7aU3fs+FT7cmY2l8lwamq1Kce4sGgCt6IuT79q2WTu6eAk/ZqV+fofBvP1a4KT9T9YPJl70p5rWJ6rGR8rXivNMbff9rqrpppqqqmmmmqqqaaaaqqppppqqqmmmmqqqaba7Ta49y0YkMFB6wkZHrDKv74mXo7zY2RwqEsj219EE+iT7Y78XPKNcCAcgP4o3jOkiQdJDY5FNDFwTPguQF8YG/0Aw/hNsAY1GivDn02IA3gCSTHtYdsnxsLQcsTxu9nPl30IQxHEcUGKg4EQXBlAHEubYEgTyA1cCRMcInhQsyISuBxCHBOysiMZtF4OEtZDJoq7plkv014QJm0woLHFWf9LsU+DNQlrbLH9/6HYzDpLMObEAP2QK3SfExFajBC7E9suLosRCNPi6wr8gICtShyihXi7tUmJafU1MZqgHMfyOJ4vVyqwJsRjHtp8rBTXSc2XY7NGsf1zcPQZvFwFXqLAETmOidhoIW7PV+CEkAJr5HiEAvO7DSqwcrsCa2wKbL4BzlVg5cJD0xTYqqgQT3gURxR4SIEhqMBhBY4o8KAUxmDHV+KgDF9QbA/LcGz/MTnu65PhuL5+WYXj+wIyHNgow/GwUlo/VOF3IdqCcSRcBKNLyhCNh6Q/lmOrHMdRHI7u3ETDVdAn7I3iiGmAP1KGgVesJhZ6ZTgOnlTgfUJA5GszKMdDgywqDgrHttwkxxtY4B0ScDiWj4L80QzwgTjI4wg/OLFDDOAPrRA9RBAD/4DYWJIKSYL6IDu8qAltLViIUSHagNC4vA3JdkdroBhD9sC9av8D+gAhkQ==\"\r\n Icon7 = \"eJzsvQd0U8e2Pn7ARdJRsyXZluQiuffee+82YIyxMab33ntNCIGENJJQEkJICCRAAgkBQgKE3iH0TkLv1XQb2/r+e0bk/e677+a+3Nfu+7+F15o1kiydM7Nnl2/P7L2PIDQT7AQHB4F6s9DbVhDaCoJgNlvfL6HPN9JnEREv3vsKQk8nQcjIsL4PyBIEVbIgdO784v8fCkLcBEEIoGs4sOsI1s//6A/A/6rmvi1wnXdZf3R3e2x5vy0wvQQYEPMAhbqj8NL1OPhHvxNHujm7LAj/xOF141nvoLHvj4hotAxPuYPhyXfQM+QxirVHERDW57xmiHsxTVvLfuMnetq6TQ5t6fl5Qrn5h+Q61XjP7+h/zQMFmIdEP8asqkZ0CbyLVsZfkdpsHUI+LETa1WrErCut10z1vSof7HrXtCkJXj+kwHV25PnfxxJstySzjdM19Ik4gb7Rl1DldQWpws/QFybC40QCQi8UI+FMFcJXlkD1rq/FcVGwJXh3/lOXzj4LnN7yXWQYnfmkumgv2iQc4vOu9LiOaNePoPOOh9Nwb7h2pevkFMEYVQjXmRFQv+0Lx/mB9Z4b0+C6KQReA7pjSPw9TMyzoG8EMDoF6BX9FKmGNYjRfo5E16+RHvAjksK/gsePUae1y/zWaSb7nrMd6fCzcWLOlgzFdkuvwPt4uxXQ3vsB+kTexOhUoMb7Pop1J9BSfw5d/a+hg/kYvPuW3w7t2euUcXzaBofZpuMJuuVXyzX30SO4AWPTGzGK7t0poJZo0Yg2HpdRabqBNPkO6s+jbdIh6ObGQvt11HPNXs9L7m06Psy3uUb3eoZ5HYCBsQ14rdCC9yuAQbFNdN/zSFIuRIjwxochme8t0L0dDbsZLiHCp4IyRJgTm2m/826f0GcYFPcEfaNu4JXcBkzIasRUusarOXUIF5Ycd52ZtEXzvt9t50+DfnGcZuzJ1stfGOuTpPj+envzI7zVAviA+O698meo8j2MflG16OT1AKmui34Tp2jHGLaF9Dcuj96im+I3jP3Wx36oGC9feiDdYT1KDEfwVssnGJpQj3ntG9An7CbyNEcR3/wHaPom3Fd8q+7AfmNanvAsYH5al3+2rFmbwNtzkyBYqDVKBCGFhKDxhe5gfR21wfaCUEv/v5AuCJvThX/5XcCL72X8/0zPvGz/u5r+7dDSoGPZ0ExNQIbuAHIUp9He9zzGZT/EyNSHGBD3GGMzH6Nb2FW0cDuOYsNhZGu2IVycBYe4oJuClyD7j95bO9o3M3hf/lntDB8o2vsi1+FXS0+/OkzOsXBd8ElHYG41MC4dmJIPDI5tRGf/B8hQ7kaq6ju4BMT99h+5L/3ZiUPdxnltToNuij/sS7R1zdsLPXPs7m54JRmYVlKHEWTDRqXWYn7npxiZ8hxdg+6jS8A9dPS7jXjxOxQ6/4xg49DDgrNAKEBQ/8n72jjPCemjnxf+m9M34dBO9oNdjHK+osLgzf6fJjkypaW6FtPyGtDK9Tekyn9BpsNutDAeRbnHeVSYrqDa+w6yHLbDKJTD9EEYgjZmwzAlpNY+SbVdkDTrKXNThovhjmEyV0WK06SAVnTP9N/vH7A3d3vAxiwo3/CGcoK51sZTFvOX40uQrJtSormGd0qBz7uS7o97QHr4OSo9L6K1+1m0dD2JIpeDRP9NMIrFyDhZjvQ7HdAGI5F9uQvc5kTCvo8LlK96QTXaE8oRJvisTIXH6xHr1V3dF4SfaQGfPZnPfValwsYszftr+iTbHVjYybMOy/oAO6YAS/sCr9K3hsaDxnCJaH4Uudo9SJX+DLOxDTLuVyHiVAuEHChAR0xCDSYg+VIVzF8l0Bg8YdtZh8ArRQi71xLBP+VCLHPerf7I/7zrjPADf2t9MoWzMypc2To/Qe/Ic3il4AbdvwnjMiwYnwG0druMYpdjyHU8CG+hK5RjjXDYGAbng3EwHU6D+4lUhD5qiQL0o9YbSRfbwunzUKhm+Ta5bIt9lHSrGvr3Qxkj/Ju5206ShPhHjN2QLTtO674b0TZrECR8hmTVamRq16KasExLwkPJitVIUf4Af5thUIXr4TDXF0lPauC+OQEuW6MQf7wMUZtL4TM3E0FvFED+oTdkw92aFLN9HvvdLUThw96QV+v3ELTtYyM0by2E2beVjXf73G6UAwKcZqBGU4/hqXcwMP8aWgcfRp52P9Idfkax8ymU6s8gTrkEfqoRMEtq4Ch6QdneBR4jguHaOghuUYlwVxXATWgBT6EnPISO8D6YCZdlkVDN84N8jje8t6TBf1cWHPt6wtDPH+ZP4uD9dTJMi2PhNbcAbd+ZhyGvbUbrfqvQJecKuvg8Q43PA/QOr0O/iCa08TuHwIJRMId0g7EkD07dIuDYJ4DwWRTcX8uFO81Z/1o8DBMSYRiaBE1PT5CsQ9rfCOWb3lB/EYCoc62Qcr8DgvYXwP2nRBh3x8F5eTji41eikzdhlwRgSAjQL4TJANA/2oJuwQ8J2zVhYDRQZbiHdOVmRChnI9JxNqJU80kfzkSE7CMUexxBccBBRAfPQ2zCx3CZHgK3WZEwrIqB41fBUE4nms30uSNb43dB/p5Xo3ZGAGLPl8A4KgOJwga0Uj3DN8T3DIf1jST8GGbB4DgL4SmQPrJgeBLhM5LaCtM15Gh2IlG+EtkOu5Ch3ow01TokyVcRf+5CnuogKlyJVxt6wXdvFmTTTNek0zweyGsMv0pClO8p+rnPdNoaDe3yAOhGRdTFyVYgyu4z5KiO4LW8RizuaaH5gvTdI1R5XkP34Ad4Nd+CSTnAmDQaW0QDX5cCHc3X5TiKSDarPK/SZ3fo9TFUeB5DvHwF7Ke4XmlWoZ7q2NU5w+901lGfTalQdDFAMcQd0nGunwsThcwozRfnKrQPUOV+n7BuLfpFPsebLZoIR4L0bh35Ao/oHifQO+oc6WJGkwZqz8k/+I104ymUuZ0lnbiT49y2HndozBfxZsunCLdZCukb2ovaxb6n3Oakwm163m9OU7Lm619Pel1a47zUc1fYdH/ziL0F0nMo019FK5cb6BpQj/5RdZiUbcEMwqSM9v2igLakfyq9LnOa9AyrRzuv2yh3Z3rxHDr43kGh01FkajYhTbGL240pOU8QZbMWvj1qkFEwFwWey666KsvHetWUrncpij6tXuD53PRlFuIV3yJdthO56iNo6/aQ8DuzNc/Qg/D01EIQlgeGJTIcTnje/xkGRD+nMQEd/O4RnY8TzW+Sr3CX1vwQctSHkCLfjCLVecLsN5DV5Su4EuN4fV4A52/CG1XLvWE+lgzthhCoVnojyPwmsmyOkN09im4BzzCtAJjREvy+3YIayB9ooHUA5703ShktGjnt+5AcdvC9T3y3B90C65BN/BYsfR3Jsk1X02Q776XbHan3Gz8YPjXdIfvMDPkakv3V/nDaEg23TQmQL3e/oK8oPpshHEIrp9uoNj+jNX+ElUOsOn8BIfQRyU3Ec0/xWoGFfAJgfidgTrV1LD1CnhP/X6I134Ear1oECONgENK+ihCGRrcUHkSnCT+FOX7nc1U2zBniO56r5W94vitONFXbDHJOFqoET4lUHRTabPrRZNlWtNARv/g8oXk9IX+RfMfIy5hVCbxeBFrje0SDevIrGoj367G4F/BFd9INcQ3IJ782XlyBCJs5Tw0lGUc1VQFP9NGp53z8+ryvH5mwwvWLSIukk/Plv2VnoqQfX0gVt6DU+RzR7xEmZjURrqkn3qpFjd+v3NZ92JZhnjpa+yeYVmyh/z/BsASSyx6Eh0gGkuUbECR5pc5FaBHp15j+nW5bBJSLvaF6yw3u+xOhG+/3OtkZyV/fO1ZctLCF02WUu9RiGPHU67Tm02iu04sbye+2Yq5il1/Rzuc0lvS28sNMon8n3yekfx/ho3Yg+/MzvG36HQkVXg1w2xD6hnlX2i337+Ia3JfGrTasiK5zGOp5hPnzf31vd1mrfnHyr1BmuIByQy0GRTdhEq0n8zPZGrN7/zSG9AvRPFL8BBV+h9E18AHR6B66+NajbyjQQnsevsLoA0IXQePyTfAa53eD19O95G4/xl+UxKlTNAuDD8pTtJP/8r4msVoTKI6ZFydfQrhxDZIdF6MF+bRd/B9jdlUDv2/fiOe01hasHk6Yk9a/2vMC2gcdILt7h+zxTrJ9J5Gt3Itw5UcW595R23WLAo45TvN7+/d7OO+LXURjORfyc95DZRtj5l/eP0L8sGey/CeS8aN03WuIk32HPKc96Bd9h+77K77o9ox46iHJ1CO8W/YEn3Z6jBpTLaaT/1zmdor77ZnKXYgQ5sLYOhcOKz0hH0XC+xf3cDoVN9B9UQxCv8mFrMq51T/br3jZ/te0f/03uRkJWzNYBjdDY60w6VGtIPSjZqLWi7XN/2/vQEItLUMQHtBn5yfTa+ueg5s9vbQIAhqFZtvqBBvUkpq5QC7QZsHm+l/fju1TRFDr/HKf4mV72V62/0PNf03GtdDjhRAXu8GvbAzy7c4RLjyCVMUOtCc/dWzWXfSLvY2B8ffQLfQ+RqXf4697R99ClnYzWrofR57zThTodyNDsw4x9gsRYB4LMU13t7kgqP67xk1/MtdPIhfFXCpv0nzgB+lQZ8T5rUSJ5AFa6+8R1nuACZmNmNkG3N9j2Jvtv40nVMN8ssn02ZD4x+SXPUBH/7socT6NTPUuxEtXwk8+EmK6FvYmm4D/lrH7ClGGOeFH4q5VQDXVG5IcDez7ymGO6Xm/XAbLsCir/zKtGFjYDXiT/JYvCbdvfQUYntiEcekWvFFi3VMcnsA+A/lKl5AoW0v+1CFkunwKjU/AQXcM+y8fu02uulozK7A+6moZ1G/4QlKggdjVALuW0ilRtotTimSPMCICWNKH+f6PMb30EdG7kXzAp/huMPBpR7Yf0US+4HPCpffJP7pPvulT7gfGypYiU7cC3b2fwk/ZZwetcfDfwuD/kabq4p6vnuK9Uf6mJ/xPEv6c6gNJuiPEdvodYqET38tKFH5Q5It37pfJgdmEPsdm1iJRsgd52l+QodqFTIft6BR8kPy2C+Qrn0CB02G0MPzKffYczT4kK1chQDIemuZxEPu7QTXRDPs2unM2/rKv7QWbETKhedQ/Om7bLrp03eygTQHbsqH9KAj6n2KgmxUE+0gV+Qi68r/+foq4eVu29AKqDXX4hHzNIQmPUeJyCUVOx8knIvzv8Av1h9E74iH5aFfROeAu+SdP+Z5Hmmojgpq/Cgcvd2Teq0FeXQ/E/laOgLWZUI8wwy5ZDYlZsV5mVo2QFzkPEnN0gyQSyVhFqcsH+pmhK2wEIfBf8fl0l+5e29MQfKiAsL4HZJM8+HmjJNPx3T+ab7Ls52MF6gvo5PUUH9LsPu/G/Pbn5Mfe4PsYPcMe0XocQKnhJLUTNIcrKHQ+iCL9QfLpfoKfMAK6aDNK6/oi+UY7lDYORE9MRzmGI+JYKVTv0Lr30kM2xBVO80OheysQiqHuUI4xw6Gv6Zl9mvpT4jmN/Xyjh9/BnHsVGIWA9VnQL4mEy8Jw2Ceqv/2jsTvZpcoyZcdvVuktmEn8s3YE8O0ga3urpQWvFTKdY8GIJPLNfC6Snj2MVPJDE8SVNPZ1SFKs4nuOpqFBaNUwBKE7C+C6NAbxJ9ugvGk4zWMaWjwdgIDdOVC87QX7kQYop5EfNckEsbcrYu5XIOKXEhjGBt0gGl8K3pIHl59j6sWlfk+yG3tAN8n/LvMz/2j8AdJxzhmyw0/KnGoxhSRiUY9Gvoe9ZTLwcY3Vj2b7JwNiGtE9hO2bHeN7+Wxfn8lIpsM2wsoT4VUdiyx0R259L4SeKUbgoTxaj/58b7sCI2kthqHgYU9E7SmF/uNwSIcbYVethfupNATdK0FiXXuE/1QASYm2TnAWhmk+DjzhuzkD9pmOC/+erPgKg8Us6ZmbFfqn6BXyHO292J7IQ9JBdzCnuomfx0zMttoBplvZHnmJ/jhau/2GXO1+Gv8WBEunQCPxhvwzM6RbA6DcGgr90QR4/5YN8/F0eNAYvS5lI/ZRJVphKNoSfxTe743oXS3g9WMKfHdnwmF/BMIelaHgVi+L06v+z6VvEu/X6C+Tz2H89+Q9Ubn2UKH6PMpcL5JuOYNY6QoEN19APPIj38+oMJ1H15CLeC2/ER18HqO1+wUkK37g+1fFTmcRJf8IuuZhkLfRQjmU+GKmF0znM6D+PgTiIh+ot4XCeCEFMY2ViKbRR6MN0tEJOc+6QP6BN8S3PEl3udY3m+N60etaDkoxEH4bM2GbpNz898Yt7+KSI47VfxtmerchV3YKqfItSJRuIJu6B4mKlYiQzEOkZDFixC/o9WfIVh1BK8MFLsPpDhsQK36JKHE+gsVXYbKpgjJMA/2CMBjXxqIjXkHChQo4rw6H36F0xOwtQtGaTgj9IAOunWLgkhUNfXwCtItpjuM9oBxualBMMN9W7A1FckNHtMMYqMaZLbZK+6lywS5O1MjNoiDVi4KdUV6hT9fOClwlvmGEY00M0oVf0EbViG7m5+jrW49RoUA3jzoUOx9BjnYX6ZlDSJB/i1zS9208rnK+j5d/gyibjxHcbAq87XvAYJ8Bpb0j1O1doXnbH16vx8CzUwQMqSEwe2TCTSiBUWgFD6GG9NVw+AqDYKbX6tXBULzhCYe5AXD5iea0K463nHtdEX2uDPKBbpAY5JAopU320ao621LHetkQI+STTbDr5winghSEuc5B6+ztmD7xKKa9sRf931qGNpM/R+/8G6hyvYOWxt9Qab5OcnsbHf0e8D3gYs0JhMZMgXtma5g9O8DDtQI6r3AovHSQhyshjZNDmeMKbetAuFTHwVCeBpe8BDjHxcHonQtPbUe4q1oj4HwhQk4WWfnoTTPEaSRDb5jg8HkAQg4XwudgFhwXkA2bEgyfz5IQvq0Isb+U0Xq2RPiuEgQcSYH3qUjkfDsafd7/DpWDN6CozRaUp5wk7PkMvUIbacwPCbPR67A6vo/enyxqz8AmFETsQmibyfCNHwpTXA1ch5DufCsHru/lwPBBKgwzU+A6Mwuen7SEeXEJPBYXwbicPl9NdF4dBufFQXDp7svP/MTXTJANMELW3xXKd72hnOUD88Zk5D7ojlaNQ5BHei32Zlt478ggnoyCw4YwKL81w7tXLyR77UF7DTDUG+jnSePzB/oEW8c5NMHas3iWSvMtwkdP+FnIUMJvnQnztFReQpZ0D+JsliCi2RyEN38fkcJcRAizqX2IYOE1xAiLkSPbj46mx8jWbUWYbhbCAt5HSMSbhME0sKvQQtLBGeqJXnBZEQntijBovwuDx6YkeO5Kh9OmKMiW+MKmsxZ2r7vCc2cq/PZmQ7PBhCCPiSgWiNdlDZhGGPnrfuD4mZ0XsHOcvhFNGBgLPubRaU3oFvyYcJyF7+Wz84UeIdY4HYYj4hVLESl+RDZtNT/fYX2sfBGixQUk618h32k3KtyuId+BndfvRhXpYJfPI2D+Ih6+P6fDvDcV2u/DoJhFvDSI+H4gYcxxxlrpUp8rLkfj63RT/aGa7IWkw22Reon4dWoCgmWvIk22Ezn2dzA0sgFrR1rx89gMRnML8UwTeoc38X3SKfkWjMuw7sWPSLbOkZ1JdfJ/hFL9KdJLp0hvfYt09WbCeQeRpl5Hsr4P2ZqdZKt/RKr6JyQovuFnRrnanagJOkr2bTwK0ReGjbEQp5shm2GGpI++kfDGBcVo0/eiVJYo8ZQXiTM9dxrPp8J9dTzk4wxwGh+GONk3iLFdhHDZB0iS7EBfsl+LegA/jrL6An0jLfxMpcb3Ptr73OJYaFDcM7JpFn6+zObCzln6RzcSpnvC58HOtTLVO/hcWhjP0Dj30m/voJXraVR53iKbsZvs3zFkOrL1Wga3oylQLvKHpL/+nKyNyzDbt43T9OtjGj0HhG+Q+Csvy5I1jxXjTHBaRrqBvif/3OeCZInL+qCwyQ0ZzQ4j1/EA4Zn1yNccRfeAJnTyu4eZ5Q34oMLKHx3IJ2P3Z2fAbTwuULvC/bCx6exM0MLlg/kyHfzY9+4SZj1K9u4q8nX7OJ3ZfGp8b2EAzbHKfBuZiuN8X79z0K8ItfkCkrGu9+xaa98hjGOU+SgDPGZHTouvbYcQwiC62cEgHwBiT5Lr4a4X7ZIdKpjdChWmT0+33dtU7f6E/A7ws9OuAaRfQuqIN5owMcuC2e3YuSU7s2vk5zZ9Iuppbo+Q5rgGE3Iu8/F3D26gVk/8ZaHx1/KzPna2U+19Ey2J3sw+ZznuQBv3q4RLHiNLdYDm/yuW9aXxB/+KgGYfkh9lPK19xW+T+4qIq/qfgmDYFAaHFT7Xm3/hvFu63mOF8guftYQjNskrDAku75j6OJmTlyRLfkSR+ipqPB+jwOEsKlzvorPfM+LlBi6nDO+8U2Y9C2LvuwXVc5nt4Hcbqar1aEd2YEgci5NkvP+Y+OIGtw1tTdfIxv1COI/5Cdf4OVmp/jTZvhXcj8hSHqfr3cO6MST3oVdJN30F8VsjVMu8YXqnFIFl/eHp3XdPrOTTUYnCvO75wuw+JiGn2qZGfcBvT+YT/6PpCDJOR4btARTpTqFAewzZyqOoNj8lXPOIxvmY1vou8c2zf/GF2RowPmJjZWen7Oy2O62TVa828TNGJh9s7K1cz3Nb157wKTuPzHHciwqPazwOgOmidNV29A1/jPFpT5GvPIGE0GXwze6PgqCl6Ki5gg4uvzwwCEXdHGOC33CdkHBJ3yUaygpPOH4SAP2eELh1ao0UYRPHOamKLchQ7EUb411Uuj/gfNI3spF4oYFk8THJahM/e5yUDa4rRyZbdU530pfsvGRgbD2tVxOXaaZTW7tdIF4/ROO9TvbuHpdVdjaapdpPNDpCtP+Fn5FmyYmvBMLfMeeQ8s5kmLq3RPDbbeE3Px/GBRFNDnN9IM71hOlACpy3RNLaBEC3JRTq18IRa7sEadIdNP5tSJfvQjsT+eGEdUYnW8/GGc0HxLBz4yZub1ks7Vvk07CYTDaPt4mnRqZY+Dly54Cn9D0aOz/zbSC6n0Ua0YRhi56hdYT1DnHcFymbhwzxANLE3UgX9yDH7hyyAw4jeE5fJJm+g3SuKxSrfaDaFgjlz0FQbgiGx3HyH38thGlfChwZhv3SFQHBE5BBOC1bcQKt9bfJjhCfhDfivTKrr8X8lG8GWON/Gb07+T/B6NRGLgcM+zOd805r6xyYfDDdb+WpRvQh+1CgO0x6czuP1Wilv8LtlVnWnuzLpL2J4g9vJojfz00WN65KFbavMyxocVM6zwBdbirR2gvK1UFQbw6D6sdgOBIuMOyIB+lQKOaR3/ux6w2H6siDybY/W7LEk2jpdA1VHo/QybeO1vkO6Zmn3Nf6tJP1jJadFbPx9gp7RnzxDK/kWe0xiyf7uAPwCX2P6SbmVzLZ7uT/kJ83ZznsRopiA3oEMdx6DJ7Nu8LDrsV6e1GI9/QUZGWmk+os8QvtOOGZxPVczBzdzCBIR+ggvu8F+Uyv5/Lp5pPiZI9vpBPcZ9gOcxnQfKBTG7vejrFCsuAQKkz9PlG2xpKjOIV2bs9R5f4IncnWdAt+xM85hyc9wxslz/FZF+DDKvAxM33T1nSL5PQ5x2yjaC0+rLTgq97AEtJ/i3pa16GD70Mus0nyteTTrEUa8WWw5DUY/DLqvE3tb4lOiufhDqNPF9ldeCPO/vMJHpp2IzzXJB5zmRcG+27OEIe7pcgqXTz+ln8SLQjNfCS9fgwVp5Ot/Ropsq3IV59FW3drrEa/KNL7gY+QIu5HPPktnfzvcVozn5HJa7nbVYzLfMjjmruHPKP53OHxwYz+jNcWdCY7RnJTZryCJHEdYZ+PESgbB3eh7SDdkcDVum/8GG0hGeLy1CZC+F71nutp1WxP+G/JhryH8aw0XTPy7/lXAdLxXzOMkCkeQZ7qDKpMt9HVvw5vFDN+biD+qeM2a1pxPQbE3iR9fRjVPte47Z1dxfZA67k+ZXaAyUHfyDqyYc8wgnTm7EoaP/HSIJLhLMVhRMjmIkicAJNt217cr/4196LpSBqkX3hDs5Z8S/KxNJ8HwfdmAXTvBF0me+v498buLRnUIluzAywuLU91Dj38GzCM5HJiJnhM2uRshr2ekf5uwqddrHudTCemK/eRfbLayO+HWm0Yk2kW2z+I9GUrw22a82OsGkbXSbMgR3WS/Pc3ECCOqA9sNqaa3dv8beKaoItFCL5UDNlIt0af3ZmW0BPFcHzfHx7fxTc2q3Jo83fpLo5xI/l/VuB4Bq0Nd1DmfB+9gpowjmzRDNLp77Sy0pPFfLDYgw+Jlp91tfJzC9KDcdI1xF/P+OfMhk0jWX2H9FMXkvly4x3CO/X4pIb4Rn8NQfZvwlXM3+or9PUUTgsS77Wp+1w+CYMwzWWS1/nsC8bJIXWSYUZ/l51xq6UDjbB3li3+e2MXREHuJXbbFStbRrb7BN3jEkp119AtoB5jme9E+o3FLTA9w+bA9Dvbt90zzRo7OTzxKcIkHyFU+Sr3tyrI363yvE56hfCl13MMiAB6hzShhfYmycxaeEk6XmD3tRsuCXZdGn3DaWbwMyFGmsA+c9kXN8+0LJ5t3DdTfxfU3+mjEPJlxeF/NHapKAnyEXvtiRbnI1qyBClO81HufhJtDPdR7VmLnqTnmB5kenx8poXsf4PVzhJ/7JpqjcMYl0xYh/g8RDEFNcHHSL/UotDpONnrrSh2Oo2OPrXIVZ5FqnQ/woWZz9Uy3+Hat3zGG7+OhuN0v/NCc8Hn9/GoN4TVuKyLhm2s4rWgS8XLfb9MsdBc3P963GpZoOAr9h9EeqYxWb6ObPV+FLn8wu1fqOwdwjmEYQmTsFidt1o+wIaxVn5n8Vq9CdMw+/ol6cP19PlsWpuunozuZzA5t47Gfw95GsL0yj10HfI7nDYhRrIU0dLPYCzPbnD6OuCp02LyRSd7Lv1rmbRZ6B7mfja10XmMf2P0ltL6gBUZTfQd778ev1nWVRYnLn2eqzyFDPl+wt77ybY+Jj/tDNmUdXzfY0j8QwxOOocEmx2Ylv+YsM0D0n21NC8WW3wfw5JqaQ4PSF7vEia6jnEpz8g2P6N5nEeibD0yVDsIy/yCWMm3iLB5H4aoXKjXm6H+yB92VboP/iYvFwsyl1/ibhs/jkDkwjz4fJuK5mrblH90P/1le9letpftv6mx7s/3jYJgetGrWV+72fr+4mRrP22zIAHLrdosCJMEoS7dGthUN8nax8Haq170yhd95O+95UVgUsIfhixZ/y68iJV60ae/Y+3r0qz99Tprr37xf48XP3P4y2s0Wq/TjPpJ1NtQj8mCYD+MejZGCc2D+pSD1n7cfetPMIjniDU7T/OrpX4TrMOZ9O4/QMcXPYvTyqA2WXgZp/WyvWwv28v2sr1sL9t/fbMbpl8Yd6QMLC5F/ZU/5LPNSPRcg1zZcR6fxM56ksSf0d7vAoanXUe/uKvoHXUNfaJvonv4TQyIv4mR6dbXfWNuotC4C8mqVWjrfRa5ztuQpfsZxcZ9SFavRIx8ISKaz4RbahtIEqSPREHQ/TPnLpTLOoTsyH+QcKsSqoUBsH9NA/+8CSiyv48C5RW0dL6Otu63Ue11C6/lW/i+GDuDZXvZbB+M1YNgZzYsL4udWbIz2NGpdagwXeR7lDW+t3kuWonLKWSotyJevhyx9l/Bx9QPsjy1RSa1y/inzb2NemrQ1lwk3msH+btekKY6QtJDDX1lEXIk59FS/RidvOvRPaiBz3dcpnX/le+7tga+HwZ83d8akzo2zYLX8oBXc635SxPou/0iGzA0Dmjv9RB5jkeQptiGFPkGREkXQmMbBWmeAmKIKv1/et4StdRk20W3JWhnLtIed4RshgnSNEeI7fUQ+xlhVy45k6LYeaGtIzAqBrwWBjvzYo3tKbL9cXauxPKxDrxljSfr6NvAY3HZOQyrMzIoGhgSa41rrfaq5efI8eJy5Gh3oEfwHWTo50FjCPvxf3ruYpRjsv1Alxu+ezKRcLcK4ruekOZoINbo+XmymKX7WpAJymz7s4tbKBrQ0wx8N5BoUMbOCh4RDeqwsCswMLqJ1rmB7+193Rd4t7U1LmFEkgU9QupRaWIxIY8wLLEeVV63Oe8nEO/HKD9Gr4CH6OpZDze78g02guD1P8LrohAodjbMkwwzWlxWRyHqcised8jn3sYFYifDDTFD2/P376eJO18pEevQUiR+J15eSuvO4kFY7lmxnuXYHkWOZj9auZ1Bv9gzmF7yFK3dLpGcn0ULw2mec8vOf1q7X6LXR+i7u5GgWIEQcRq87PrBaNsKskoD7Ku0DZIkhx0Sk2KuTLBvR/rQn5rtf9m8fZsHO47wWuC9KqVBOc8PygV+CD5TCPUsP0hTiOczdXfEti5jRW+Hf7XHGif7OrFAfs3SQm5BpQb4tB0wo0UDCnVneawnswtsTVMVmxAtWY5k+RYex1rldQn5ugM8LqLM7Tw6Bz6k/ldkOexCknIVwsS3YWieAZsQAa474uC/LwfOX4bzPGxJtRMkwUrI1OI5UZC+IwrNff5oXvaCYKcK1/1hDGLzYJmbfLzpfZfPwxuCDuXBf2MWpGNd4boxAbqvwiDJ1TwUgxzeFguc9H/r915iV32S+GNdpvwICuS30NOnCYu7sTMbC9p51qLMlZ3L/0prTnre+yGfc6nhOLcHleab6BX2GN2C76O99z0MYvnhHleRqlqHCHEW9EIu3PsGoJrHkA5H1pMuKHrUB4kXK+H2QxzEce6Q5GsgC1XXiwHqjWKcZqW82vC9vMR5pdQo/1Gml+9TDTRddP0o4qlMsP03+/aCp6BSfuF/y+9INoJPFEAzMwC2FRoe/+UwPwDScudbokL0/Xt8Ey6+F5sibkW+6hxau9zB4EgLP1djZ7k8jirhAVoaLqJ/lLW2EVv7DPVObusYLVq7/8pjLwqdD6HUeAR5ur1IVq7h668VouDazxdtG0Yh72F3JN1vh46YjG6Yig6YiJy73eC+MRHyd7x4bLOstxEK4g/Dymget6waboK8nyvk/d1gH6++ZqeXjRD+Ira++QzDrIAT+bwGReyhVrDt7wTnpeFQzyGeb+lUJ2oVif+e3MTKvmqZLZ5FW2dgJNmwD2nO89qD5xyzs96fx7PY/zqMy2jCq2QbWZ2nQbF1aGu+SDQ4wuUjWfEjjxeLI72fQH2C/BuwM1tFcyMiV2Qj+1FXeG1Lg+aLYCQerkDLukFoicHohFeIN8Yg7nw5j+lj8Yekt6CYZOb1KxwXBUFB9loxxgO+WzOheyMQtmmqw0SDbJtOjt3Dz7ZACQYhna6vXRIG084UZDzqzOIALVKVrN2f0Rtxsm+6ZItn0FLzGKOJWisHA0ffBfZOB+aT/fuqj9XOM16YmG3h+eksdqt/zAPkaQ6h2Pkk8p2YjtjCY4cKnA5Z5V/yNhxkfoi/0Artaa0rHg9H6C7i0UXBMP2UhNw73Wj+k9EVU9CZ+lb1gxB5sgWcl0VA8qobbPs4E0ZxgawT2auWzoitrUDW866IPVYG7Rhf2IbJ4TkzFpq14bCZ7fag+Ty3h1l13RB9pBWa56q2/1m9mSB+PzRLPIUSx1sYHGPNs3mrZT020rrvfxPY9qr1jJHFZ0ym//UMfc7PFVk8Vwv9bzy+iMWQdAl4xOOlWAwM05dRsnlc/sM/SEU0qhD3oAppTzsh9EIJ4i63Qd6T7miLETzGvwj9aB0HoA29LmsYgqwrnRG4PQfa+UGQTnLjcQLagzEwnEnmMdw5lh4I2ZQHuxw1bMLFTTLBLoywzbmoa62hmxEIO6NsyJ+df6K4piZHvIAy3SN0C3xG83tGeu0hrzcwJr2WsE8TPxtm55bsHJvhPYYRWZ2yAYR92pqvkuwfpt9c4/FuLJ7eKv8z4G7TAqJJCcX3/rD73huSH8gubQ6B4Wgi/C7mw+e3HHifzYL3Beov5yD0dgtk1nclao0h3piEqqbRyLrWGUE/ZSNobx5iLpTD/dc0OJ9NQC56o/h2X2hH+NwjmbjA9Ib6Y39IExyOK4Xmyj87/zRhV2CG9EhTK+0DtPd8SDr+MdqartNa7kGU/XdIFNejyOUYhibeJSxYh/Y+V0kO6jlOGBRjQY3PI9IDh3jsR7bjTsIFp9HKcIn8n0/hLm0BuVQN5duecGA1SBb4Q1zjD9XhcE4HyVek977zg/hzIFT7wuByJgHmq5kIul+MmGcVPB+hHCPR6sEgqD72g+7zEOiXRUEkesp2ByL5SQe0ahoMjw1J3N7LCNuRnUj+R3CDY0u/TnHqZQ0ZsoPIILFhZ6dpys2IkX6JaPEzHlcRI/sS8bLVPNcsTbkF2Zqt6OBzm/ylO2TvrvC4x0zHTdznSVP9zK/D6g34i0PhKPGBPJFokOcCeVsD2Fkvq40SfKAA9m+5w21dPIz7E2G+kkkyMJhkYBQyyEJEPGtBPJIJv2PpCNySAeUsXyje84Z0nBtkpU61kndNOyQ7/RHxsDXaYSxizpRB0skZUsEu/8/Mm+xsjXSc02Zdj0Qk2K1GnvIMCrUnES9dw/P+i3SneSxhvPxrkuX5iBEXWfMAxA8QSe9T5NtonS9z/JdHNMty3EpzXgp29h8hfsjiOaiNJgyUDWWyIzz3pUG/NZbb6d54E2UYCu23ofDenYbA/WS/f0pH1lcVSJiYD88uoXCODoGzSzSc5NFwliZDtzQUiqlmiGPcoejnXi/2MFwVfwqAy2+JKEZ/soET4bQwFPbhqkuiYJchCvZq8a/ODejPRszW5WneCditeN0T9hMc4R3aH5nNDqPE4S7aeTyhNb1Ja3sfvUMb0c2vHi115zmmS1P/TDjoHHJ023ksUoZ6O48LZXF/uZq9PL8gRlxItJmNQHEMvMROMIrZcCT4IWtuC3UrPVRf+EO/Mgq+K9LgPz0Rrp0C4FESDpNPEgxCOlyEfNKZpfAQOsBfGA4/uyHwlfSDp7QGLhvIPk41cZzAcL3zmii4H0iG/lgCQq+Xko0Zh8TLlZBPJz8nTA3RRnZPVMrPioEOu8UEzW7yd3bLexh/c5jpD8cFgZAON0DS35n01PvIFM6iSkY+XRL5eGTrPmnN4sCeYHTlJfRuexIDkp6hjdtVXm+nzPU86QPy8/UneIwjw3sM/zPZj1bNR5DiFQQJkxDc7BUE2oyDqVkFdEIYFALhPEGEPUF+G6EZJALJhGCCY7NIOEty4C6rgknSnnRmG5p/W3gJ3REgjKI2Ah7N2kDxQyCcV0VCOyeQ5MALjl8GQ7cqHIplAVAtCYTfniyk3axB6PEiKD/0gbRUB4mbHPa+ctiVOMKumxNkw1yhGmWGsr87HAaaoBvhD1PPbHgNaInEaSPQ+ZsZGLLuA3T4fjqylw1ByvwRyBo2B73jH6Gz7xOa+288PrIr2Qlm8/pFNvE6QSyetb2ZfF/zXvhMqIbLmDjoShLhEp8Bp6xEaDoFwXEoYdLxdM+Z3tDO84XzV0HQfx8Klx+C4LaR9NpKP2gXmqGdbYJmqju0ND7ntqFwzcmBPj4VgZeLkPKwA1zXxkH2lhkyxgtTPCCb4M5qzUAx14cwdTwiTpXCe38GtDw/Igxey5MQvqMIqefaI/tWV+Q86I7Me52RfKs9Yq63RsjTFATfzkTmmgkY9PpW9B10FDV5F1EVcRtdvBrQxRXoFdLEcwlYLHvnwCfoEviUxy2y/aHf8yNYLGm16R7y3fYhPpH0ZtgsBHm/As+0bjAPL4dpeiu4vVICj6nFMExIh350BoxDcuA2ogTeE2vg+VYVPGe1h3leJTyWF8KD/Bi3fUkwHo2Ay8YAGLvTOg+gOY+n+b5OemC0O2SdDdyHUBBNFR/4QEcYkOUt5t3vjkrLKG5HW4HW8XFHBB4vgHFDHBzXhkG1gWzPl2Y4zoiAf9Z0JLjtQKWEsL4nyxti8Ts05yALr4HVOwLczvN5RoPnI7A6RG1NN9Ap4C73g1gsKotj7h3eiArSCUXKE8iU7UCCZAWimy14kQsyG2HC24gUPqb+LXr/PkKE6fx1gvA9MoSdqNLeQ3v9faRIf0SYfBZC9e8jOOBN+IdPgCSP5KeG/PdRNO9eRkg7uYDpMDZnx8+DoPkyBJ47UhF/vS1Sb3VA6OkSOJHOkH3mDbvxBtiO0UM+yxuaz4MhLvWD5ls/uFako0i4jyKbRyiSPcOAENIBLUj2K6xxb2xebM5s7VlcMaMBi839fa5VnnfRI/Qp/5w1Viuqe/BzdPR/xPUEqw3GbEKUOI/7gyyXhOeWKFYSRlpL/1tMfsJb3GawvAyWX9Kd5Kmtxw2kyjcgUbISGdLNyFXsYDkZsK/Uwb61FopB7nD5MgIem5PgsioKjsuD4UDN8bMgiLNpvgtMsPvaE6pVwVZMlEr+/wIfmM5moLipH+LPVEB/jvROYRwSm5HtEy+hUFaHnl7A1+Tz/DDSGg/I9vvYnPpEMNzbiJ5hjTzmmNVjYzGlLH+jT4Q1Zp99zr7LaMW+z2LIWQ4KyzFOJ9sRIc7m+yIpNG9mK1j+GXvNsGK8fBnRhvCGbCHR5jsU64/xPAvmS5QajmJA/B3ov42C5w/JCN6dR2tbBM/9adCuCoN8gS/EN0gfVJOv0N1A9sGN2cin4uc+tzSHohs9jqdCM86H+5BBG7KRf4ywxckCKCeEIdBxLKIl85Eh34d8+XUUKx/w/F3mA7H4tumlVh7g+TeRjAYNVp1HOmBSjoXnULC4ctbYPiGjw+AXeQk9Q5t4/grbGykju8nmmabewPNXrHktu+izH3jcObOfDD8WOh1Gqmot4Y3PkKPdyf3rLO0mdAw9Qnh4IrfxCfcqoSWfR5xF2PF1E9d9sgrnRnGAq0V8zfxY7KhPFuWiWiYIznadnYZLvvGp87tdAK+VyZD3doWiC/kSk0MI0y1Gku2PCBffJf77FEnEZ2XOt/F2qXX+bL/v9zhilsfDcki6EW929H9Auu8BjxtmeRlvtrDWr2J+E6tTwOgwOo3pDAuXgy5EK5bfkE9+EcPU6aotvMYmW9801QaeZ1Lje4/osY/7UqyuWobDJu5L5Wr2E898iwTVMiTfbw+fMzkQF5JtG+nK8vcfkA68Ln7tB8V48zPbHPU05SCP7aJOsVYMc9ggVrqc1UzxfarfG2+RbydbuTUKiuXkf6w1IyR9HI/FT5D9SDy3mOhtnX978xPS9fUYmfIYX/ezWPd/08HzY1huEsvR4PafMF/PsPu8Pl/X4Hs874D5xCx+ls2d/YbJBovNZ/kpjG9YXiyr5VhCGIKds7QkHmCfMX5g9RwGxjYRjZ+iynwHOapj6BV6n/zNRqLXNiQov4aM/AbJB8TnvQ3XxEr9ILkgt+d7IEs8NsY8bIuI97IfygLVd6QJ1v1t5fs+UC8MhMNHAczuP5YMMHxqM0de6e5duTFBWEU8f4jW+wYq3e4i3/EE+bK/oqt/I/qG1XNcMyS+nsfDsz0Plr/C8iW6BT0lW/8U5e6XeU1O5v93D64lf9maG8LOAZieZPLP6jf2IF+Z4UPmI7CcU5a3xWxovtM+osFOTkdrLb0bRM86XvOr2OkMUmQHMKNFE49PzlTvRrhkLmRj3Z/K2ukXEYZVsj0gwWCb4dDH9HrI1vy7mY3dEHi7GO77U1hsN1QtjJC1duZYUVrtvEgU5HzPM0AYlBVr+1V9rpJwjNtT9A8G2ruRjaO+L61xOxPZ9LRGvv/Pzng+am/1+5kMMJvH9F/XoCc8N6BvhIXH5aboPsabpfc5//eNZHkndXxfnOkMZit/94vZOjN9yHADw45sn5g15i8zW8ryq1obryJZtpn0/0Us7A5eDy+TeCVU+i6bz0Gx3GWZZrDXVo+PYh54/pAEr1/S4LE3iTCgL+w+84D0M696zZqw+8o3vSD2Mt4XjYp4opXWPkWaqzC6TAiXvfs0R3ESRepr6OT1DBWu18i2/IZqM6v/SLwf+pzHIzM9xviXxSWz8x+WD8E+Y/v+HfweWnOygh/xOgCsDng78ptZjiXTlYzn25P/0M7rFqq8bpIOeMj305ksszq2LF+IyT/zH9jaszqn7LykhGSj0u0BUmX7yJbe5zqI8R/LtQ6z/QTqOf5w+i4c5mPx0O8hG044UT7Q94myMnyfc2nWVEN2fmVAVqsQnSROIZnnFSZ2Miy3G2v4MOJki0fua8LhFlCFNJsdyJQfQInuHKo87iFZup3ns1ST3Fd7PuB7OgOinxLmecJrlrD9DlaLkvVMrzH71ptowObKcoiYbq8y3+JYYHC8VU6Y/u9DcsDmzfR+W1p/Nv9S/VmOnxnfsDw8lpvG+KDa8w7p38+5jLM9tFySqeFJ9VgxgO5dTPIvI/rYroL0W2fYv22EtkMu4pNmosx/A+KEL3fnCgdGthKu9IkSZn4Q4zrkRJxs9B612W2TfQ+2FxqBlMZ20H+WjFi7ZTyPKUXciFKX35BFvnyG/BeUG+5wvcfy11jNUVb/nsk/W0NWl4/tefH5p1tzTdkcGR0YDbqHWGtmstxNlgs5kMkFzZ3pQPZ5pfkG9xdYTX+mM1oZz/N8SpYTzfbJeD662w2S8Z28jmiqcgOyVHtQ43Uf1R4PkCk9hRzHY0gYNh2B0eNR5rMBA11ItnR0X/OBRoUQ2VEwChU2ec3nO75tvqKZ4Q1pmY7XxtAuCoFhXSzkyz3g7T8Qac23IUn8CemEpZLF9UgVd6Ot612UG8nvNT9E7zBrTjDDe2xuTM7ZXhhbd4Z1phRY15bxPtN1Y9OtuKBrYB3P72ax8cwfYPuDwxKZDnxO/vFlfkbE9hEYpmP6jeXGtXG/hgLtCRrLTmSrDiNLcYjnyKXKN9O4tvF5Z0jPoEJoQkXHvYh6vzd88zsjuG81QidVwHMS+Q7Twy3O7wZCxWrrzvdHwIk8eB3OgGwBvf8mCM7roiFfZYK+vAhJAuEs8WekyDciTb6N72+w3A1W17EPYV6WT8NqPTP7zebH8gDZGrN17R/dxPMsmB5guYxMLzBdxz6z4j+mE58Sv9Ry/cdkgf2O9UwGWI0Ktp/UQn+OX5PlW7G8w1QaC8u1jpUt4flBrKWK20k+DyJX/huKmhFmKNuGsHn9kE+0kH5hhOOmALidiYPhSCyc98dAvT4UIuF4h3XkR26JgccvKTz/WrcyHI7kV6qmBSNGshBpkt38unnqU9zWFel+Q4+g5+gfTnMhmz23ndXOzSV9z/Qu8+UYnu9FMs7wCzvzZ/vfTA+yXDA2b0aP99rgRX1gK0bqQzaE8U930v9MNqq973I5y1TtIvmq5XxSpD1L89+FQHEc3x9iss/yCpPFdVw2WZ5hZvMjiAn/Cupf/eFITBeumc33PqSzPSFZTvhnuS/fN1T+FAz9wXhE3WuN2Ott4H8sh++Hq2j9JfOcYY7vjnRhL11zJ887LGc1pnwaaCxPuZ8xh3Db8v7WPW7G46z2KcuN/T03j535sHVl82M0YPaA9cw3YvWdWFwE6xntmH5g8sHsfrfgen4WVuNzl+eVpas2Ywjphu6Bz5CtPkA+zwy4iSXwFDve85X1LwoSJwVFyuZGx4srSuNlK7rGC8t7On2Y9YVqbyCkg/WQjXGC+LE3lN8HQvdLDBx2RPB8RYfN4dBuiYRuM/lAP4RB/MQbsnfMkL7vfk/ewX17rGRxQ5bsBLLlJ1Gqu0L25SEq3clnC2rkZ3fjMu/xHK2fJ1hzb+a0s557vf/C/2O5ojU+D3neLlt7ZgsYj7D8OdZ/2tlaL5r97s3S32sGs3PDu3z/nOl7dm7KcmpqfG6ge0Aj+XxfwySphEGacVbrEBolOAvRf7lH17JatCvUTpIrjnoW+uzNgtjPFQ6vekOc6w050YD5sMzfEV8z3ZW96nFUOtHtZ8kY42eS0cbpssGuQ8RuhixhrSANF2bMSJT9YGH6JF95AR3MhHVM5Fu6kW023yUs94j08SXi23q+9iyfivk8i3ta4x9eK7DydrfgOo5P+kfVc1+f6Qj2fBF2NsRiQlitI5YP+UUPa/5tP9Id7PtM97N1T1Ks4TghS7WP+1k+0j5wk5cgSj71nklWdEYqs0Ge6vv9JZqrm9LsdiwrEPa9mi2s6SAf6zEz+HAB3++Uk88vTvbYSL7/J7K+xs5iZ0O0mK79w9r/etuUSX7iYNIvc7ney1EeQwun68QDN9EtoI5sO7PzTdy+dQ9+TBjkMOGMw/w5Byyu46MaqwwwHcDyoVvoL5C/18D5nPl27Xmu90OSBQuvmcx85RUDgWX9rH4AuzbL285idWTk3yNOXEo+xgKESKdBr8hcrpkbeEE3mjBasRPklQZoYiOva4XQdoooY6V0iO6TZu1l+9wXxiJgXw4/AxPbuNz6s/v53mKPBaHi64ghvZIorgLxAOn77chzOIl2HrU875XZNZaby/K4S50v0Tj3kZ5cyX00do79dpmFryfzZ8ZnWPj8e4XfxMc1Fs7nA2Ke89zYasIKjB+YLWC+0nLCLO/S60n0WZXpLt13J587q9HuLw6Bp6TDPDZGw+mE056nM6H8LhDKbwMhm+UOSbYaiq5GuKyJ4n5LzJU2rFZ5kzTRcaQoSP/Ucx58xD6dWS2KZNKjOfJTXO7L9DdI55HvQrL3Cqup15FhOWaz6tEzpIGwfQP3c0enPSM8Rv6YwwHC6Hd5jQbWGAafnEvrSTzNns/DdP675VY5Z/Rj9UVGJVtzJFnO51tEn/5k+7IVxxEvW4kQWotg8RWYxIpZfA9+UPMIzxOZjX5nc6FY4g/xUx++J6f40Jvv6WlXhcP/biH0X0agmbd95z+77oJEUIaJ714o0p5BiRPhO/lR8u/u8HzHgZHWHPmhhL3ZWe7UAvBzq+7EA0zWGd+uGGRdb4bTMgiDtfO6zGteLx9oPQNm/2ON6UVGAyY7HX3JF3S9jT7k37J4KaYPBxI+LHA4hxjpEpBe543W/o0X5w9OvusyTiY21CD6bjkcFwbBfpwRgecLEXe+DeTvecFzQwqCLxfDbpB+3z8w92Zhspnb8tQn0c0PhB3rUOJ4E939aW1IhkeRLXuXdNzbzE9NYnWuLZjX0cLrmzEZZ/qcxXb9XrOE+aYJsjWE6Z/yeibMRjAdz849Gf+wmtm9aM5t3e+hWHeZ9MZjbJpI9oHsY77DJcRKl8NH7EWt901q7fkYs8RAj+Xxv3qsTWQ8X+d1OQdhewphm6q67rwhembsnba1Tl+FQVpJfmuZ8ylJnubfxHv8UQsWJ49iPhSrr5DreBRFmvNo5cRw/TPy5wnDZ7G8ZQunAZsH86uY3mb6m/E+wzysBjzz45ndL3Y5jST5ar53NyCK2QELJmY1cJw4lWjwKvFB72ALzy3uRTLE6jkyvu/iw2oPbqH1HgZnMWaNThrGnzsm76zP9t+Sec99ZTxsq7WLhYhm7oqfgub7HsyGskS/ldNnlq5QNt+bxWdBJkj+Zj7l32o6MbTMTxzSECcuQ57mICo9rqJQcxotnQlvmx7zsY0nDN+dbP7rhdaamgy3MBvPdDWLc2R665e3wGuQz2rLsPBThEs+hdGuHHluaziWYXXLmWywmv+sTltX3wZ0ocZ4q4vvc6L3XaRKf+Fnpu5iS1azjteAl3XWD3BfFQ/nT8JgU+zwLzUEbRZ41Pj/mgeXCYGX+PwHqIMMG2LhONEHEpV02J+Zu1YMrPISu1n3VpmuVy9B59CDqHFvRAvCO+29HvB6cf0j6vBmyf/LRR+aYOG1kNg+BstlZ9j24NvWfPD3CNeOjH9AurEWBa7r4SmvIZlp4rUMWawH28PLIt+1WHcWHbyeoKtPE+GLi+SnHiCstx4hdq8+1wohZTR/k8MoryWmDUk8LskmVjH0X8nsm85mwy8JDbqPgmHTvHmh7Ef/DtGXyqEZ5HVH+HdqsEpFWzdXMX8Z0y3piu3kS2xHlITVIXofPnZjkOLwLfl2teRz3SL9dIfL6iq6+9J+1vVnezpsL5OdYfFc/hxrzSKW18xiPSYS7htH/lC56TjCJB/zZyKwfRtW26RQd5zXCUmj+3b0u4lMzU9Ile3iuD1WWAwXl8wryjHOi/Wfhj9nZ9sOb/s12CQqqv6NzjIIto5bw694HEiBPFV3yGl5+Pr0h52h7OV+6u/b926+5DtcZmcJ6YrdJPOEbxz2Eaa/yfcP4hVLeM422zcrd73Ka1v0CL1LftoF/DTCgg3jrbna7PkKnfyeYhjxwnCyZazmwg7yA1j9qY9JY9W4Ek/rrpMtf87tO8uRztccQ7pyB3IdjvBWSHghTv4lYmSLEGk3B25pLaFbGgDX9XHkf4c2yYa5rm0eJP5h/VD5usCV/jcLYJgQBPMrUSjFYKgGetQKL55X+LdapPjxRIZp81S/clzH9m1bu53hdfwLHM6D2T8We5yvPcR9zalF99Ej4ggChNXoE/iQMP1tjE27iyl5tdwnZ8+WYnvO3UPukC28jU863CWsex0V7hfRM+g2ZpTex2t5D0jur9J1t3Asn6HcTn77AbIRP5D/+iXC7N6Eq7EQyq/c4bgiCNIxbrBrrV3y78mvZJHXOOP+BBg+jYB7/2Ck36D1H2mCja2N/5/Vfy/by/ayvWwv28v2sv2fbv/Jv82C0Owv+xdVBSz21veNKmvPCyhQfwFWF2YKL38Ei8RamKFRbe0TTNa+2QJrb/N7n44XBRmsvT698UVf+6Igw+Y/rHHwN//crF2zF3UhlC/6tC0XeG+xsfa1g6zXfTFMQfL77SZbe+nfvLikznp9dZ2aD1NZB/67yAfglx0ssZalqLX2zS6oWX0IQXJ0AZ+der219xi3gP88pWkTLzuR3LSJX0bdmH7B2pv+s6vH6kR0psaH8bJOxMv2sr1sL9vL9rK9bC/by/ayvWwv28v2sr1s/8cbK84pGWW8HrGrCLE7W8BtSQy0n4ZA8aknbBbZIypkAbJtD/M85FztbmRptyBbt4nXqsrW7kSl7xn0iz+LwSm/oXPYaXQIPoO+cefQPfI8ekZfQC9qvWPPY2gae30enUKs/6sOPI0UB1bb6nOUeuxBa68jyHH+GWlauq7zTygwbkGuywbEKhYiRvE5wsV32Hk4fM0DIeuhh2hUNIiCnd8/m37/afp3cPjQvCHZUtLUH36HsuG4KhTyhd5QzPBFQMmryFdeR4H8CgrVl1CivYQywzWUOp3jsc4dfWt5HMb0F/EILI6KxRKx1+wsnsWP9ou0xtmyWDMWc8TisVkMKqs/0SngFgqcjvJcIRaby3LPczR7ee2dloYzKHE5iVTlOh53lyD/DlGyTxAueReO7SMhTVBaREHa5Z9Nv/9okxHbN2vnsNj1+1hUYARCThexnA9rnkuVBqregciyP4oCCdFIcQ2Fqqtorb+DavMjdPN/jmHxDTx2ncUAsWdMsRjmMenW/LUvX9Q7YbFNjNbDEqzxUCxOhq0Lq3XFaj+xZywOiWtEhekKrcU9/hyecvdLPJep0OkYzwlKV7Fc+GU8x4/lzIc1fxO64GhIKx0hquVz/tl0/I/R3t7Dpp1mh+vqOLTEIASeLIBkntlanyTIAWIHA+ynOsCc1x35NjdQKL+Dlo6P0MWnEVWmRzyfnsXesfgzVkeHxZyx2CsWU8piqlmcGXsmBIvDY7G4LNaW5R1NLyb6p1tjVNjvee2NfOt1+kdaa/L0DQfaed5Hke4kMlW7kaHciRT5eiSI3yGS+N9NUgTRSaQxOkP0Ub/2z6blP9rEYHU72y66Wx4bk1CJ0Qgi2tvPMvGcTzGSeKpSz+OEZX31kFVqkKRej2JpPdobLBhGumREvJVev+fzsPh9xsss7o3FM7I4MBb7tprov3Ei8ONoa/xzJ7/nGBrfxPUQ+//ELGtMNIvz7hfBcuXAYz/7hluf21nqcg7pym1c76QqNqPE+QxydDtgsh0IR2kgZCnqG2Kco+mfTc8/2+SC0FzM0c20H26A2+YEzvdhZ0pgP5do355on6BhzyLmcatidyPEAifYpgjDUsUdPxaJpJtdgF7ewEeV1rir117oExZzymLN2LOhWXwt0y3j0oA1w4FfZgCbJ1tzEXqGNPJnlDGeZzmm3D7EWGvudfZr4r9j69cnvJHnI7J8PPbsOJZzHSlZhG4hlzCOyVv8c2S6LIHK1vedfzZN/zTPC4JCbO2yRjLZDcat8Shs6M1rh9jPIdp3MUBM0lppP5Jo35Hep+t+E0MdMthvU2Xb5haId1HhCNTQGrxF9Ns4wVqzkD1jjcV/T85p5M8A4s8aTAePhWYx3Ow5JOyZ1190s64Vs8O9wsDjxJg+6uz/jD/HrpN/Lc8RnZjdRPbagkqet7uV53Sz/PZQyfvI1a/EwEC6Bv0+X7sLjs3CvrMVBId/Nm3/XmN/YoC6jGh7WjrZHepvg5F2rwNCTxZB8iHRvivRPJloX0n8P9AVYhW9T9N+Jark/1JXL1FcMyJfvIVWCrKNKtLTAaTX+1p5vmfYU6LFMeT/f+29d3yUdfY2PBiSzNxTMum99957h5CEkkCk9w7CAiKIihVBsZd1d+1dUbF3QZQmCqiAuAoIIkiVpiAgNbmec507Qd/dZ5/P7vo+7/v7Qz6f+zM3M8lk5ny/33Ou064TsFG5hCbn/oQxaccEv2xSTr1ras5getEpPCu66YNrWI/bqnM3B8UewvCkn7SfkHP62DvYHLpN+UfIT8ceQ/bLc540593lGvcj3TZf8M8zyPd4G/GOKTC6+sGW73PICHY8YVhsgwxL5zyDFPz/A+Susk/2HijyXW9cHQXb/CjYn0pUuad/1R3GQ4JzhovMi0Xn9A3m3EEYfYI3GqX+/f/xffKMx0q6GTtU/s0G0E923A2iB96ZafY59Qn7Xvuq2Q9XLhdnjynHoXOp6I91KLa9g+bIFaJXDmBI/A86w1H57vzXa48+eWDYw8o5xpwlzR4/9i/W+a5RDjRyH2UZdyLZmIVQoxq+F8XAyPSF7Q6Tu8M2ub0/uVLWI871g2EzXjYs3jMEm/Y0LrIF/3+83y/qXOAYahsXui7quUKErSiG7T72UMUg/tMa5GxthuOJRFhHyz7PFltbH8h+lI3yONQwKwb+6T3lO3tXG5/vIq9oH9n/XIehgcCDgi3fuoz48jR6BG5Trlz2RjcEbDC5AXyW6zpwVmm21+Mo9H4HXXzW6F5XnrFA8muu1b5xcnA2idzJOzk2/Rc9T1wD9lOXOd5UnqZU42qEGFXw7mSB98wQhK0qRtKnXRD2TqHy0DnvT1D+GGu/QNgyBcP52WF42Y4ZnrbXZS26/9+WvUetq6dxWcT6oCezkLOpF7qeG4uAJzPgdW24zsBN/6oR/q9kwjpJ9HuGmzp/r9jYP/07vKHFxmsvdrVvQoNDsLn7Z/QT7TQtDfiz2N6HhrDH8axgxsNa08xeJfYqN/ivk72+AS1yPvqEfac+Fft0+0Z+r77AJdnkANqlPe2cA3xx5HatiTc5MY7J72w3+U9F/+QZj+o8tnCvRhguB2I+Lkf92Qno2zYDNUdGKOdh7pZmJH/eFUFv5OjMXNuMcNh6BcBI9YHh74DhsK8yLrJeImejUtajyrAaVVZPa5XV4lFls1xUIc8PNMKdY21d/K73m5P4RPT83DX/jtxtPQNqjZkRi8mXFbeqComra5GxuTuCHs6E1/BA5dOJXlGOgNeyYb1SdHyh3y75TLOMASH/Vh8Sr1zjwdfLjQ9QaWNP6xfiE+/CoNBzmCM4Z9Es8bcuYb/8ee2Hvjhin/bLkSeB16jkk8ofQq4Ingf2yU8rPKX9mH3CdsrvHFafbVTyMe0lZn07+3fZX9ocslVtsCn/6xHsUQlXjC/6nJ2GCbgFw3Ed6k+PR4OsxWDl2bsefU5fiuLv++l3dj8lOHWu6N+xsud6yFmvFFtXKjq3LkB1rmN6JOyjwkzdFeuEEeOCTX7O7/5URDycB6OLLNz/QS7ek0LyXM+k6GzM7O+akPJpHUIXFcJ+Y4zJhSp4xv1EivJbKddRlf8Ozj3/T85VJ8PiUWS8tKXG+AKc+djTd7f2vk7JbMO9ov9fY6/eJLMPhH7AjJITaArZrtwe7Pe7sp3rmxwd7GGs8flIdPvnanfJU9Iz+CuR9VHFPDwj3YM3oHf4NjQErhP9swJlzjfV/pIDLsAjG0aEDZXbB2NU2xz0w0yUHhqE0qODlJtpmKzHONyESbgNg85fieqDwxG7sgLuJ1Nhvz0WxnVRMMhpM12uG6Ph90IGoldVIExk5vtoKhyz5PVBwWIbQ2DtK+sV6PjcsHjm/CvZeN0U8bLPB5nI3dcHXY+NQfHu/nDfnQSvnv6qC12PJcNxj5xF8aeMWv9TRmfbv3yvf3X5yBeuMFb80M3YhYsDfsGIaJGnYPYba8UH6Gb2yrHHgH2WxESLr2Jf6Vnl2p2Sf079Yfpa7K3nuaAN5nxr9pJT/5MbhjajPmAduorOLzQWoth4tT3m86py9lH+7MFyWIIQ0D0MGd/2QNHOvija3hdxKyuRsq4OzWemYhTmokXWpGfrFOWRHYd5aDk7HQXb+yBqSan2qNnviiP/iLkO10TB/9F0RH9YhogPShDwchacDyaqznBeJ3ZTMIq1zPcXm8OY+4/YqvNlwZOcr6Sg+OBA5T6dgjtRuKUvOk8JgvPxJCSs76L8eLarI7jvuZb/lQ1KMKbEVBqrzna3H0K/wPMYKz7YzXXQmY3PCa58/VKR+WzT/2Wf52e3m7Ef+lYdvjE5HCaKDppW0Kb9/pNyT8m+36J2gpxF1a5lKLQtRInIvdzxLsqdb8u1SGzvW8p/S86rOGM0PMVUZT9Si6Gyz7v/OBFpG+rhL3vYdlc0gh7PQun6/mj6eQqaxLdskFMwCFeLnpqvfH5dfxqNzK96iL0u0Dl7xjzB35dHwDZNsJ9gRNc9CXDdJ7K/Iw72udGwTwpH1Iel8HsiDdZRsg5Zru9tFu9RamunBVb4vpiOulPj0R9XofHsJOWhDHguC50fiESOnIemc1Pg/2KmcucZTmP6f2vX04258VXGJ22NjgPo7XsCo+NbcUuDyZ++RvyrDeLjfnar/H+q2VPPM8AeYs4UZfyN/VhXVrZhVMpp5V4ze8vPap8y9Q25RNjrRrzJnh/Orle+JP9PVf+wNyjXeABJtmkwOrkR914p6jFZ9cyIc9ejZs9w7bMkF5rj0USEvJ6Hyh1DRP9cpeeB3Mxj5JEcrAPbrkDtoRG6boGv5sD5N8FMNwhmmhJm8m3JZZCHbnio9jN3Oz0OXY6ORurn3eD7YAq8RSd5hxhrPdMchxMWVelZi91WC8eqdBhLU+F9fwxcKzPRF7PQ5dBo2G6TNU73Wfx7cFW2cVdNlbG6rZv9OzSK3R0YflLjC/Rn2fP40Rxg/W0mvy9jbpy/TJnTv2VMjnZhdlWb2lVy/U0vPqtxnwEx+0Xmn+nMePIHsLeO/hdtAuXfEr5TsRM5UGl/04158LEkI+fOKvSQ/V1+fhha2mZitMi29/lLUbSrL1LXd0PWlz1Q+G0L6o+PE7nfqHInL3B/XIEhcg4G4kq0tE5H3ZExyPp7D4QvKlL7QP4v6yzBTFyLMSL/piDEfluL4uO0LdPRdH4Ksr7qCdftCfAuEEwV74Lj7jhYH46FTXxb2+yIj73/En0s9MtS1UcpX9bDa0wQDKvtn/rV/pNLsE+l4P+2Bvte9HQfxKiE8xonI2fY8MQTmFnyi/ZKsxeNfYcb7zbvGXNjjIj9eYxJTMk/r3xTvNiXylmo/aJMDi5y74zPOKWcNOSPIscAsX+t4CXKn/NH2Hsd2rkeAREBqNk/EvGHuyH4u1LE7emKtH09kbKjEenf9UD18RHo2fYn9JKrqW0qmtumiS66BDXnRqPizHB0PT9W5cn1GI5rMejslag/Mg45m5sQubgYPk+mwCDvxNQw2D9Kg21NGqK+q0bFL8OU57vuxFjELa8wee9j7MRSnxnDQnO9xffyvjHix5h1VbL2E+BekAprtd9Zw9LpP8I7/3hlGXcn1BjrNAatvK7RJzFefCTG/8elcQ70j7gki/Ilf9tZ7f1lLI46iP2wPAuUP+eAcN8zB0MbQL5j6iP2ApLPifiUvgB50cjlQ75D+m6M/bPnn/wmUUYzbBYbjCmhcHyUrjxm1reTYH0vCfYP0uD+JNvkg97dgLTdPZD6fSOSdtQjZXcjUvZ1R/K+Rr1yfmpB5ZkRaJZzRDtNLDtWbPWQ1tnocfQSZG3qichnC5C1owl5B/oi5Jsy+HyRg8jtVSj5ZbBya9fsGwGH4EzPdPs34jtcIT7V68Q8oUsKlVPWOlT2fqjjid8je15RRl//KmPNUcaAevn8iDGJrRgULb5W5EEMT/gZQ+KOyPWj+FubUG5frvGDcek/YnrRaY1RMwdA7kXa4A6uHcbk+EguujGpZ9o5xNZqvIEcWvSjmfsi/qfPlm9/HInGFEQY3eG0+MOa54J1QbzyA3Emtu3lRNjelHX4IAWuT7PgXpsN54oM2N8X3LlIcOWSNPisyoLf53kI+boUEVsrEb2zBmmHe6HgZH9UnhuBxrZJGCC6aZzYa9qM5k2TEPZaPvLWN6suz9jXCz7rc+DamIO8Y/1U7118/DKEvJAL7xFBsI0OUWzF82ObIZgn0rmJcc/fK39eZd7vL6s1vkZ35wEMiTqh85fZ7zoy8Rf1pyqc7yvnYqFtAfKtzwqOeQ8VjmXoLnLkrOARyYcVY/aJ+BaXl59Wnpz5YsPHpp7FCNFhg2OZh9yo2IeYh3a4wX8DmoK/Qf+Ig/Jei5Fmm4NI2f8+nSLgzPRF4Ls58H0hHeGvFgreS4brtTQ412Qi4JsiBG8shvFykq6P8UIijDdTYF0smOfjNPhuzEP49grE7a1D4oEGpBzugcyjzcg42oTcExej4vwI1Lddgm5bRisXqvOxJPg9nY6w1wvgtyQbtpWiV1anInpPrXLC0u9jbMf5VDKcDyXqGhg1/m2GpXPl/xuyt3haAlJCZ6+tsn6MGmMDOF+CfhR50hsD/i57fjHybU8p7yJxIrE6+4jJjVoma1JhX4nuAZvl+krk+CEag9ZjfNpxDI46joFRh1TfDE34UTlG6/zXgL3/5NesJhaiHXB9LL7AW8ptGWeMQ6BV/FK3FY5s8WPjxAam+sJWIfezIhC/sRbVZ8coJvcRv9R2TwzClxQjY1tPJHzTFcFflSDtYE/Uib9Mjn5ysleKtSwRhFog0swRS53R1ge58pi3o1l5yl0PCCb9a4L60caVsq/vim2zvZsMr4+SEP59pdoW2pPKA0MVryqfT4nfedn7vyv2J/bE194tYLzjjtDvEzMvVb47cj/V+Xyp8i8x3kCp8Q4qHSuUk4pcnNTXhfZnZB0e1rUgD2uBsUCfK3G+gCKHyctabluBnv47taeceoZr2Tdqp+ihzTrvp8j+gvK40vdi3jdH9H+mcbvY4JsRI3DC4emGo28Q3HMTEHB/OvyfSIfvgnSU7xwk0rhVMQ/nflhvjVJcGvR6DoI/zEfUpkp0w0TR97eKv3QXBv8yW/T4cCS/X4nweakIGhsHvz4R8KuS3ysVLPlgknJfGreITG8Q+Y8IoV753JgffSNtj7E2HTF7u6C+daLaj24/jTP1T48A1gZM+a/2u9hr+4CQe+2XRhywzQ+Hz1XiX3s/iEqryN6+FX1Df9RcIOVOjo4Gv03KJ8FYTmPARtVD9FvJMV9oXyB793XF8Jm225VLvkDWosKxRPnpLw7fo3F/2luuaa17pcY7lWfY/qhya5mx58vU/yIPEh9dHhFwFfkhZXM90vb0QOiyIkStKkPjCcrhJsV/1T+NUE7x6NUVqDo6XGdtpD1XhbT5xUgZVYCoihT4xvjD5e8vuDYOQZZyhFrqEWKpRoSlSa5mBIj/5PyzyJ/+8mzzsnUPOGVrCdpnW5AAxycZCNpSjBzRW0MFS42SdSd3uZU+b5zrhGHx/Ld9Xvnnae3qO9X31sRDuubXRcH7Oj+El/VBueV91T3dnT8I/j+FfuGH0Cdkr3L2D4g6LDr8JEYmn8TguMMa26fu4Kx48lWRn5U5l8agdSg2XtK4QpXIumfgZn2e8dCm0C2K+csd7+ie5/4n5qTsGfehzCOMngg0chFgZMNl52wCDzgy/eB6NgXOjzIR9GUx4rZ1Qfbu3khfXo+sJ2sRfnUqwicnIra3nIHQBLgtsXJlIEBl3Ygwj0ZEefZFgo38MpPUxnONk4xL5W+OQsSyYuV8tk0Vv2B6OPwXiLxfFNv+lwT4vJqGoC+KEL6tAkkHGnXuyETBUXWHxsBvYYb6dNSPRifrYsPb+KvhMGYZdmO4cZGtTvBSNzkfvOrEN+5ty3PfKH7fNyELchAoZ1XjVNPC4X25L5ITZ6Ha41N0MTYpr/LI2HOas51ZbM4PYb6dNUDzBFdenmfmx8YlnNM5QqzrYXyN+SzG+InpmdPi+vQM3qzxIL7WHLZN183E+s8g23Yvsq13y5m5RePOnD2RKvgz1hiOIEP8JSMSdrtT1qAzrE5v2IrcsJX6wprihHewTZ53CE51w2UJh59F1sZShWjLYCR7zUCa97VI8Z6FZOtMpNquVr8ui7VdotvIdZNkTEO0MRAhRiWClxXA9XSycu0zTuFPO7ykCOR2Ic6P29IFQZ8XIuzLMhQc6Ie+rTNxcetl4qf1gI9gAuaGDOYirAYMw6DuglHgC6PaX/PrnIVjTAyH87Y4jQX6v5IF4w5zLoTtavE/ZsQg1/oAaiyfo96yF/28RN4i42cEOy4Q//d58W+fYz5+0FncM/gAZgz4GuPHfIxLR36t68N4NPMrxJCM55OnuSVip+a7aHeHJfwka7Af/SJ3KbcvY6LUQYWuZ1RfpV80H+mWucjoNA+ZHrci3XMukj1nIKbzYIR2qoZfp0TRHaGwW5wiby+9rJ28YLf5we2SPRqQAL/gLAQEFyEgqBiB/hUIdXdDmL0HQryrEeJRjeBO5aJzKhHXaRRSLroK6Z3nIbXzNYj2GgCfdzPgfDkF9gcS4LwzXudBMD7n+2IGgt/NU+5s8qJzbkLgOzlI/7IB5fsGofbISGRs7K4zmmxXiS6qDxBfQGRvkzUIl8cKf9j6BWndg3VEsGJX6+AgeLb4wbPJT2csdG5xw7slFD7F+XCV5SKwS3dkNl+HIVOewuR7H8JVTzyNyS/8Db3emIrSZX2Q9UkD0j9uRMbCoWga8yqmZkJ1En1bxtiYCxiZclz5NMmlOa6dV5jzF1j7Rt5N5h8vDpJzkrAUkX+rg/99qfAbnoOAGpFdXilCkrsiPEb0UEIp3HmpcNXGwtUkMhkQCffkaLivjIPvHJH5HUkIekD0w2NyiQxCxDaHLcxEyJPyfn+Oha/YU59rI+GSM24fFQyjORD2smC40mIRGCnrEdYDgUFlsC5KQtzfu6Dm4AiEv1ukeR274Bv7HbEw5H2M+wVr3hurPI2OhxM1vhq5qAQF21pQsnsA4tfUIIB7+s9xsM4Ig/egQHj19te8jPXyMFnTOAQ8nI6oF4uQtLgGqR90RebKBuSs6oG81U0o/qovyvYIJj7YH8U/9kTB+Sq574emHZei75brMHTZX9H//pfQ87o3MWDARgwu3oOh4ccxJrJNub/oXxFXEl+St3w0Oa/Tz+icC/KX0iebkt+m3M6sSyTPKX+2hTx6ReJPzLoJ8X/th7AreyB8QAsC+1bBOS1JcEkcAgWHh27MQvB60S/rsxC9owRR35Yg7KsChPw9D5FbShD5Nf+fj8hthYjfW46E/RWI2JGP6N2FiNiZJz5AKWJ3lCH+2wokfF2J2DXyHktyEcFZGncnik7pjy6t41CydyD8XslUvkiV/c3R5IyEcW2UWa9za4zG8oiXfGWNI94TzPt32a+bBfd+XovQpYUIWpyHqI/KkPpVA4q+74+6o+NEX11+ISbVD7M0vsFYavWJkSg9MhTZ+5qQvF8+1658xG+oR+mfb8MVzYdwV0/gGpHX1BhgmE8rhvudx+hI2cui90ennNI4G2VPfmHKlryTY+Q5Ps84G/mkOWuB/jA5eclFzXVgvShnEwwW29HTR2yyZR2qPJejyms5Ki5ahILOTyHb/mekxVyPhC4TENtvDKJ7DUNk94GIaByEkMZGBHUVnF/TgMDyWoRk90RoaW9EVoxEbM0URPUch6iR4xA79k+InTgZkbOGIfTWngh+WH7nFdH3S4sRul70+eZ8BInfHHWl+MyXx4M1U5q/IgadF20+TglXfjP6HZzDyjyL66+J6rOFvCny+qQaWSL/8v1D0HRiisbAic14MS5ed2Y88sVmxHxZg6A1hXB/ZPrs6ls8H4fOD4rNuT4RIYMHI6XiblT5ih0VpDQlTuSe1YYR8eK7xp/DqKTTWldFHmbmGSl3crZzP5PHelo7zzXlSj5vnXURd6A9P3ZKOR+ZHyMXJh8ZgyPnK3P0rI3gPAPaZc6BU9/CJj6e54PIttyHfMujyLH8BZmW2+T+EXnuHrEXc5BluUPuxaZabpbXbkWB5XHBcO+h0rIUjZ03Y7Bb/D/3UfSybkWZ5W3kejyEXMfDyA7inJVbkJI/FwnFl8I7xcH8CwzWSJE39MZo85oUbs59lecUC8m+d4mvxlk5xEjkKSvfPQh9z83AaNyoMabupyab8t5UDd/l2fB+MR6ef46E160R8LpNHu8WvMlY6hvitz8VD+e8IGTPnIEell/QbGlDo+ch9HadxZAwkVE2dEbupXlmrdvlpab8dHZMrsk73SFP1uiSS5czJ6a0c4qTr4z5Rubrp+Sd1frQy9trSTmPhOeGNoH2oH/0XvXLGBviGrBevcPPZl0EfYoc4z6tU2GtFh+LxX8oERzL14hrso175bXXFHsxz0COLH4Onk2ucbHxMoptgnu95Hc7L0R5J/EjOz0NY2yYOV92drvsuecHhsAmNpPnwC2+FvMNgS9mqe4PkMeoD0XHbKxH/u6Lkbu7BXFf1SJgdb7GpzyfiIHXPZGwXhdh1r3NkLPz13j4i20il6K/+Ir2F5MQtbUKrvEBCO/ahLrOWzX2WW/sRQ/jBPr4tOF6kdf9A834Jjl+GefnPp7Zvg6UH3UKL+qUGaUmn33HbBeuD88BOZ1Zc05bwFgoHzvOAOcDUG/RXlNevQQbMS5kyngh8u2PyRr8TR4f1Vko9NsYt6Ccyx1v6zwMxkEo+2xZH+ZwiG3pE1a6FmvujXMYNA/htwaVziXmPBV5L+aF6kOWmbJnTVrPQNgaAmDtGaD1Iz73J2neIPL9EtU1rG3wfSkDfuTLXpwL92vpMJ5KgNeTIu+nY+H9TBxsz4q/tiAJxiMJ5tnpJphogeir97OR+FVX1BwbifJdg5Cwohox22sR9noa/AOLUOb1rvi934r894j8j6OPE7ha5PTKZJNnkrFM5hs7ZmnrnJ28X+fMjMsQ3SSynlZgvsa9zvoTXpcWtck+P6Uchnxtcp6przrm1FCXcY/SbgyQc0A/meegznet5uW5v/OMh9pjTW+q7Ln3Nf4kr1POfI4y5XP0w3nlGY+j0HhBZ71xFixr3onROKOl3LFYsMIBjM/bAZvoeetV4fCaKvK6LhIBT2ciZlm51h1Rv4csLoD75XS4nkvW+jbFQqx7+VOY5jYNznR5NB7GohS41+Yg5huR7WeVsM+S9013wfG84LSvi5F79OILudKirX1hrJPnHy5Egn0q8r2fQKV9pfKo93KcRB9XKwYFtuG+PuZcF3IVs96c+TDG9i9rn11A2REDUfbU51N13hPt8wk9L6xLZy0u9zvzxDw/rKvumIvEc8TfMW14m+In5uxZU0SZMV7aMSOP+55ng1etz8r2uUCv6PPUN7yqXEvaZwYtknVZoj52vu1JXSfOYONZ5Jkos7+P4Ul7cUWXnYhcWoL4tTU6B4z5ftY3ZG/thYiPSuH7diYczyWZ8r0nxszndwkw62gp+1mR5uO08DPG7TE/2Belnvf5KAsRWyrVd3NOkTXoHoCY+wtRuKwZxSv6oHr7MAQvyYL9ylQkeV2KbK+79IyXGK+hi0POpGMnOF+z2fc47uwFvDDRnCdOzl1yjjPn1TE/iXv/kpxWlSHxJnn4yafNXIGp59t03zMnw7rnjlp05e1v11cdM6s6ZjCxboL5BeKnlojvVNacT8wYB/ld6/3Wq09NrnLabMYzOJ+ItY2sB2NsqovvKq0Ho09Ie8LcJmN8zDdz/xfbX0VD0CoMTvtUZ3ZynivjSXW/CNb6tBLudzJhPJMIG/G/4E6Nd86JNmuKhoSY85smhH1vDA8daFwdFW3cFKW1tNZXEkOst0fdKna3NfSbMhQdHYDIZwp0hrl3jfhbsTbYevvCMS8BWY47UeGxVM7o83rGyf1dbLyOCmMZqm1fYFD4Lxq/J6cxuUDJ9Uo7wHwLMT3lNqkd89DH4hwvcvhThpx9xDroSwuJ9U9ibn0bHh5uzoTgOWBvBteBuJTXzHZ7wfegPR6edFRtM/1mngGtFRWMxBoK1hb1jfpeZ0N0Fx3O+BPj3fwd/jxjTPw59tdwlgbXkrqrwvXehXlKpQ6z5qXQ+bRg86s1T1l8ZCCCVxeqDufce8X8l0eaPsADcabMc92HROcvc7yawr290stiCTcslijDclG64bIPsiW4brEPD11rvJTU5r0kGXE/1CF9fy8ELsuD84UU+L+cicTNZUjpNh21lo2ocTBe9ppiDMZ+Od+B/P91jq0Ym3RWuYPJwUy+3VVzzTXoyDXqbJXsNsU5Q9XGitxEfzBXw/pE7nXOIhka/7POHn56HDmczfVjDw1fp+z5yNwZ73mmuAb0I4hz+d6MadT7rdNcDeVHPcN4B2N6nGXOOY7MIfSV/U/sy5rriyN3aI31MPHzBsce0rnode4Nyh0/POmg6jDqpyq/N5C1rxnRm6vhXJIO68NiQ68RmU8OJx49ZUwM+0b2/znmdxyL02AfHvazxWIZKnZ4iLNfWJvhtms9HWMN3OP2a6LA+FrynkYYq9Jg+zAFxrspsL9s2g7rS5EIvKSqrcjyPCqNj5V/t9j2pvKOE58Vib0iF3pPv10622lkoshWdMmMkpN4bGSb1qNwrm/HnDdTRqcUu9C+Ue7ck8R7QxMOqI2jvr04Yo/87BldD/pjHTliriNtgdmrZ+o1nqdhiUfVh6M8NVfsuwG9Q7fr32Dussa9QmN+9C9oJzgrijXZY9NOaE0GbQuxwLD4Y+gdvBPdfDaiu98O3N3HrJdh3wf1VLFrAZwfCo55PVlrmLVmbmTofqN/8PVGmX8s65XtjybEe7+RuCFwWzHqD44XXykeHpaLtlp9bGe1tplrJThVZ2c9nwY/2eMBr2absQvOXJwRccSYG/W29fbQ+a7mxKczvOa3VXgvM2eXGCIrX9lHQbvQIL5XtWO1nIm1uDhsv8i/VXyv0+gfyXry7YpP7utnzo8griEOpf7hHuW+Z2yHtYjU2Tz7vUX/Mt7JvPvguP3685ddmDvYdmFuE/c8bS/9OK4p8Sj9AcbqqIu4xzmPxdzfregXvQvV7qVaQ8Tadv6tppCteib6iV7iPJ8xqceV635g1BH9XlXGWoxIOIbXppv1NAOi96DE9jby7A+pH6T6fUJYq9ESfLMR7XT9Q8ze2nlu2JqE77tpzQ99rJgXiuF3RQK8q3xhLXDDXuIP+0yd+WDWGgk+MkaFHjKq/acaFg/lBi6zjPfMsNz0Zan1XZ3t0uDagQGhpzA5VfZckuyVGPFHI48odvmTyHVMsvgANWZf0fQizrdpxaPDTRvAfiTu2WntNnik/A7nfVF+jIdypk+HXqJcuonNfHzkecWx7FtlnQrnfnXgVtperiX9N8aOWJ/C3+utMesv1c4Sv3OWHuckUebU8eb87JUa8za5xpnjPKXzmDg7jtzTNU6x2+6/Y1L2Wbw9y8TRXFvaumz7PToPVPb8N0aWb+pvZJ5wkZ/nGM869/3Oy6P3Z33WQ21E1qE+CNlSqnnl2K9rEXh/OsKeyIU1323W/tJ/nqT+21NGuDP0t+uYZEz7sNB4TmS/ET3d+9BfZH+pfN/xiUCL/ymMipf9STwoWHBwzFH0Cz+CyTlnVN/rbJVyMwdA3n3OU6AOIsaZVmD6mFwH2t5RKT+r7aV/wD3IHse4zrJvohZgXrczmkugf0B7Oz7znM7p49w50w84L+sh5y7pmMq+o9egVvwB6hvOHWTMgn0z3P/0mamPyh2L9DnONx4c8xOGx3P+7wF09fkMJdb3MTT2R+2hZf8Uvw/tN/Vtltg8oyVora3Ad753mvOGzoWOBV69/Tf5XhHXGvFoHlJX1yFvTwsyv+uF4JUFsL2WBOvCBFhfSNBZ4MFritSv1R6jS0V3dQ/cYM/0vcCf7WexdO5ksaRGOHotoD9fbaxBo88uNPsdwpCoXzCjoBUtQQfRzbEDfUOPYIz4iqNE5w+L+0XrfsaJLuC+pJ6gfaTM6YNR/tp/3e4Pd6wB663GiA6mLu+Yd894QJHjWZHj38U+/HRhNix95sntmJN2kjFR2pERin1+UfzJHIKJf1bL/Vb9W+MzT2kOgb0faosDv0JXeZ02tUL8W87IGhXfqrPhuji/QLltlWDdkzqXgnMMOKPvkuwTystNf9mYHPaLfXYU/O5NQezrZcj+qheKDg9AwuauCFyeC/srifB8KBLed0a02m6L/N52W9R645bodwX3v+41O+xu7+mhD4u/u92YHbXbbrJPevnNT5obcXP2JutA3x2OzNBz6V436pyuRp+derHGfHDUCTQH7kG5Vfwu5xb0CfpB9P4P6B9xCJdkCk6sbp+n0sWUF+VPvc9+pDvbL/pWxKKUC/2wsTpb+aw5gzTHnCHJfgDy8HP2Jn+mI25EH5jrxhwB7a0ZB9rTjhsPav6Ac+2p21nDyHjBZSVtque4z5mHZt8250BSz9W6VyHX9ojG7/pF7NO5jOR/7x6wVfTnSZ0fwT5Z6sCxKcdRZlumdQKMZQa+kaPzWZO/6YaQNTnwXBCITjf5wGuS/1nbiNC1jl6RT7orUwYH+BcnOi2+of+Y1/W5Jtlt9At+1RgfttTrrsgf03f2RLdT42FfGIWYnNEov2gxauxrdZZbpfEJ+oYdwNjkc7I/1un/ewf+gGGxZ8TenhK887POuuL+Hasz3Y5qDHNawVnFLsSPt/Qy+3t5BojnGX/riCdwf09r92upW5h7ZPyF+Jyzby/s/Vzzd3R+Wl6brhvnX9He8neIdVgfqn1LsgbU2fQrWGM6gnN4grfpHu4V8rV83nO6xpwNRl+L/i9nxdJ3ox6bWXpOa7c5I3CO7Kturs0osy5FqfVt5WJwLIhH50f80GmeG87h+Ugqm4rCmHuRY70LRR5PnSzxeG5rjfH+hgbfVcsqvN58p97vhdUXJz75RXPY+2+nuUbf7V1g/9AYGwqvGyO075e1qCmH6+B/WwnyOj2q88pq7J+i1HhP53kNiz0u+2Kj/P339bNwpiDjzCMTRQbx3Hein+KO6iwx1m9yv7GfcZr4U/Sd2MtCX4ry57lQvFdoxhE6YqI6VzGnTXUJZ3BzLWibiXtoS6ivKM8r2+Oi/L+Zq/xB9T/lzZkAfQRjEtfSn6VfzZltl2Sd1/+zhpQ+7cDoA7IeW9UOVIq+4zxa+mXkE+kRuAXTOOc0W/aDYIx653co8/pIMMEmlIy9Ex5/c8E2NQpRFcNQEfUCBobsx5Xys3c3cJbpx+savX5Uu/yoBZ5hlpJMR0b0HFe3xB/901MR7lsG37hoWHv7a/yUNe+JK2u0D8y5JB5xaRPb9/5nOsOvSnQj52rVOteK7JfI858L1v8efYL3Cgbdq/NpGOtnfxbnEeoMX7EBxO60fcwtUt4606/exNJzG36N7fPivuZadMSYua+pY2hXGeecnGueI8YmiIGmF7epH0C9Qtlz39OO9A79VmvWmU9WPRRs9q6O0vzmCcX9nAPJPpuLQwUv+W1HnetLNLi3oIf/dvmuH2lMq9axQWS+A3X271Ft3Sy+/Zdo9jiC5pZPEbMuDUXJj6CX6La7Zb/cmCV6M1owXazYwoR1CLe0/IXUwVZX51usyfZltv4BJ9x3JCD0zTz43BMH74m+sF0RBvf9yQh4KQsRHxQj4PVseL8UjoCBFSiwPCn6ZaX4VItQYryp+qfasVLOwSKd/9EcsFf2/QkMEFvVP4L2lziE2KXNxOvtcU6dKya6Y1jCUV2XjrlqHXPniIO43/nIi/ubZ4QxHuIizYnJGaAOoq7nrFL6YIxPj5e17fCZhsu+p82lz8b4J30H1q/0j/pBseX4zNN6LvpG7EWfkF1an1Ql36nG8ZnOSWXfGmeE8rxzdiZnFFYbn6GLwdla3wjO+A6D/M5hSqzs7SfnobD/zSiLuxsZOVORUz0GiXIGwsq7IKibyLc5Eu5hYfC9NBbOq2LgvD0efs9nIGdbM6qODUf40mIYD8Zpr6nfaxkIfC8X/m9kw/5SPBw3J2o8ocz7Pf0MrGFj7y6xgfmZV+p8w9GJZ/RcDo8RDJ4F5Sahb8S9SZlQfsSE1Bn0j8amn9W5LtPa+15Y939vOxalDmEPkuqnOtM+sza9Y8at+mkiX9as0F8weWvadN7z2IxTuubUQcRBrJerao/1sIaIupCfi2tLW1DjXC17faN+tyLBkfnGE2adnmAM1nFXG5/qviPe62LfhHq74A7nfvTyFt1qbUXNy7cg66YrMNByHp63+8D9cjRiNhQg43vRHTu6IHVnV6TvakTClq4I+aQIPm+mw+e1NPgvzkHIyiIEry7SugnH00l6+b+RhZDF+fB5KQ22JyOQkDIV5Z2WaO0aP1OVfbXonM91/9c4BYP6bsWQ6GM6a4ozRWeJLDhzilwx/I7ENB24nzFNxpQ7ciY8+9QbnKX+F85lGmDiIupxngvqJ+om1j7/uX1mE9fGjFm3aZyU54FYaGo7/mEfJJ+jruMasaZI+wN8VguG2ah2YUqe6C9Zqwbfr1X+Zfb3NO/CnkniyCrjYzTY94msv1e5VxmrdWY2fU321dZ775T1/BLpr06Be08w0nPuQYNlt8rPtTAVgSvzEfKZ+LUrcuBemgXXova4xMJEGHLZ30iGz6IM+C/J0bmlsRuqEfBmNtijFPJevvb/Wl+MQPDIepRYzDhmldrd9ahzfo1G9zfy2TfLfvoWvQJ3CgaTvS+6epLIX/3c/mZsh/uZfRf00znHjBiUOp3y43mgTzA0/qickVbVP5ztxv6LjhgaZU3dRJxEzhr6Ch0xH43bFf3q604rNO9pI0xbc1axUoPfF1rH29X9mc5kIl8H55w3+m2Wa4ue40zjTsQYQ7Vng74Na1UZUyk13lYemwpjuWI76p9a20bUdFqH6JvGwnkkFt4PhCC8ugV5QY+Zcc07Y2B9Ol7ztdYXE2AT/4pyt72SBEPk7l6eheivq1FyZCAKd7Futw+ytvVCwHs58H09E27RP/bH5XfvDkda+FxUdF6qcTTKv87xDZr9D4g/clwxziDZ9/QRyZ9xXY05A455Rs6v5lxR+onT8ts01rlqHvDoSFOmzC9yfzPGQ118SbZpR4lDiUcZk+B5oU8wv/2Ra0kdxbW4sT32Nrs9dj01/1fMxJgnzwPPF/PF1c5VWnPNxylid6gX2Y9Q7/d3wTQf6cwy1q9R/qxnk8fP5HFcpnHH0Dzj4aFFxovXyhr8rdh49ZVi4/WlRV4vrEj3ufk1n+VZZ3035MA2RzDL1AB4c04r627vjYX9ZdEli9LgfD9dL5/lmfD9JAdB64sQ822t1lQXHh+AzN1NiFhbpjMQ7S8mw/5CkuYhvW8Lhn+vEpR1ekd1IPd9tf0zPX89/Xajf/iPGk+bmNGms936Rx4RfX0eS2abM2xXzAE+vhk6n/OK8jad28ecC2cUUr9QbowrEKfQV6I9pa7ia5Q7uZaosyhz6n5yZd3XfqY6OGyUO6jBfC/mwbiulDn1+wSxrex54pzMCvsyzRGWiE4fkbQfV8u6T8xolXP7ndbBxxvjEW40ytWAMKPutX+3/tV3bc6msI1lglkiYJ8WAZ87E3SepH1hEvw+zkXI30sQuL4QvqtFB4nsmctizYhrWabGSe2LU2F/OwWsy7X9LRbG7bJ2cnaMW6JOeY22vZvqvvZQpbcZ1+xq3yKY6xv08t+tvu6gyJ8FZ+4TjH8c49PaNDcxKO47wfCnL/R2vXOFyfvDvCH9dPqMjNlTlh1cTNMKW3XuOPH8n0QnU6fQJ6ZsyfnD9eI6MG7HteTcWT7H/AvfU+cydzPttzmT+5z4F/sVB43POK0xs0rHUuWRKLcvUh9gmpyV4XEnFDek2q5HpNGCQFvBOau3fTblavcx6j28LeX/Su6BvlHKYRP4Yd6Kgr39NI9onxwB/ztTdO8azyXC8UqKrIM539P4SxyMe2JhuyPG7G+8Ra55UewP+Nl2Y9Qe47qor+U9lhqXhN9pTArv12mOJSDZcllFgfezp2n32T9B29PDdRAjBWtpPCScs4x/QkvYfsH4xwR7nJXv9rXYhG0ix9Oqx9nbS53N2ZwLJ5l9Rey5vrXJ1DGM/VCPMwZJ+TNeQ14I7mniHuod7nXqHMr+lUvNvDHXkf4n35c9Y+QVMmcfQzEt60XpW9CnqnB8oPkqYvsq51LRNyvRJ+ggujg2Isd2P6JtAxDfeQSKHH853+j/ycq4wEGLPMM8EegOPzgocc1zPYO//LrOd/UWeY/Npc6XPmuO+HhJhePl52Mts0N8FiR9Ubx3ABzXm7F+17z2PUwdNDfajIXOijhmzIxYL68/aUwLn21MCR9sjAttMIYEZxg1AYFGJ6vtn9bXkh6VYly1h30lJWJ/KowPNc7Z3Uf2fuRZDIk6g/5hx2QPndEZi4Nj2U93DhOyGFcw+xUZg7yy4rzYzvPKcUV7zFgn4/20p3Pa9yz1BnE7fST2A4zLOKF2mbE0rf/RGrez+vPkalrYrsN4z7msjEGSu49nhhiXe17PosaMN2l+kHn1Usdb2pPEOaGVto9RYnsD8baxiDCaWgOzS1uD4ouQ7BoJX1ssvGydEGBLQzdjzcHu9n1Pd7F/Pb9rp6+mNFh21uZaHsryDU4p7FzieCR0YR6K9w+A7eYoM2/OfMtN0R+J7/qyMSNiiuzlcmNIaOB/0kthN1w5ogv3sX473ZgrWOAx1Z/0+xp8tqF34H70Djig85xHJ53BDV1MHEN/h3Eb4pZpBeeV46dG9lyNzyrlq6LNo24nxxX1Ee0s7e9l7XkS1hL0Cv5OzsN53e9cG+pz4pnBcT/qxTg/bQH3PvUa9z5l/9bl5joQN9EW0AYMjf9JdU8Xn081jlDMHhnZT8xNE9+zxiqiU48tzqGJl7teSTgX/EQKvMbaYRsaAPuEcBhjQuBZ5LndYrWMF5/Vz1Jq6dV5oNfztulBO4zhwfCcFITivf2RsqZOaw2VA6wp6Njv6R8S29Mz1hh2knXzacYc+ax/U14F4i/OF+WczVrHOlmHb9ESchgDo3+SfX9COfLGC+6mDBmjbwndLZh6reyx95BvfV5jxozBjE49LnI9r/KjDiGm4ZrxPAxPPKKzayfnHVO58qwwHsQ9zRh0/+h9giUPasyBMVTFqn3NGDBjwa9fZs5u5Yxn9hhMEPzZ6LdJ53RS7xcaz2o+mjNDyQsUZ4zZ429JSbXstsSGf1mOimPD4P9utmJ3nxdSNb9tZ119A+vAHW22YSEIfasAwYLNPa4JQcpn3VBxeCi8b5Q93xxEnpSbjEjnP8Uy/90r2hhUnGLMOp9vPG76tvSxFPOsVp+LcTadaxu8ByPiTmFmIf3bsyK3n+U6qf4OcTd7E1tCDmieekj8Aa2TYRyddRpNodvQT3x/2lbaAsqPXJLUHbOrWtVuNgRsEhtwVG0vX9fcWAV95ZNmTbT4TsSYlDHXh37Z07JeCy+B5mOZ4ye+4ezdrs6NojtXgHmiLOMuzY8kybmOMvptDTIKo/i9Pa8Nr3evyUH4lnK4F2XC9Vqq1ujYn5XracHsN0VpjQh7KVij6fN8KtzvZaKubQL8n82Ad1ff/UaCT5ffs+95yZ74gj1w3O9V9o8E52/Wi77exSGHtHZ2dNI59a844/qBgeYcVc4IJzee5pyyGHdolf+f1/1JHUEZjU49qrqYuSfWM0wUO0H9c2ezKT9iG50zLnt3WCLj9odVL/FnHhxq4k/qI/rNwxJ+VltDnTe70pzr/cgws6aLtRWcvX5J+nnxETep/8SzS13DGeSZxq3E9l8bhj2843t3vjfizeS9jSg43A9By/PhXCgY5qF42Bck6nq4X0wza3VmCb58PEVxY8qRHojdXAuvCUFnDYdR/HtlH2Z0bcqw3abz4ut8NqDWtVb1PWXfJ0Bsazp0jjnrZzlvmX7uVZWteFCwzfMTzZjAiKSTaocvK2xT/hL6WW+Iv/vCZHMNWM+mPUPk6wncJDbhiOoL6m+dhz3GxKYdPi/xDLFRB+5nnRZnvbNvbEjsMYxLPaufaX6jyR/BtWbMg8/18P0e5balqu/ZK5RqzNZ+oSRj2ka3ER3U8b07DXOPiF1bhWqMQZdzYxDzeSUcj8mevzEK9icTkba3J3J39IbzcXnu1mj4PpuG6lMjUdo6FPZnEmHt6v/y75W9jxGZnGa7cXcX1+eCbU7KPj+j+ZN611Y0+f2AMYltuKLEjCuQZ5Y+Lm3ulFyTC+DSglZczjxS9inlBdB4fIW5Z8lrwhqBBRNMXU+c2RS6VXFgmfGBYJ9T6iswLsFcNm0of4/2gLqHZ4P/J04lD+KYlDMYHn8CvUN2oTloHy6Tv03OUHJb8v0niB/SJ/iA4hv2UqYYVyoHX4ZxExKNqa/aDO8L/F2W7vYRkS8WofHMJCQf6I6k/Q3I3dtH+xetY0O09iD1UE80YRpKtw6AzyMm/0z8oipEfFxK3o2fjCDH7+Ly9jOS3RnG/B2M7/ULPo5xyZwl3oaLg35Ek/8BDAj7BdNFxlfLGecakO+UZ/wv/UxeYMYnpxe0qd6gPaWciROJZygPYk3GH+hrMb/CujTWIjDXTRzYGPSF2lf6VLQHPC+0vR2xhcdHmmeDen1q3nkMjT2BAZGH0S9c7ErADlxTfVZzgKzp5Z5oDjygtqrIeEl74th7J/r+tGCKq377vT0G+d0Y81YpEtd1AbnAmAM3PknTvpW8jc3w6uMPW6rrLduatBsSt9cfLDkwEM5H5QxMDIV3lRve9X4/2OsD83/v3hed+FiZ8Z7qHeZQmoJ2qG/b5P+D6J3DYsNOYWxyKyZltGF2uSmHO8QePjzElA1nqhNPEqtQ9stvMP0jzeu2mDhxwcR2HMPaBtFNI5KPahyYmKjG9TEGRB1S7Mr1oq6hLWBdxBXkK+5u2pnrq029Mjz+F83rjEyQz5V6Rs8debTI6zRRzlaNbaP6K+x/DDO6MabwYohRkXZhz1ssnayTQ+/L+rwHMrb2gPXaCHKSvGebHFrpsSB6TtiXpcje3qw9bjaL5wj9nXs8wr0Wxm03HoxX7isj3Q3D4nXX75V9rDGijvz4rNdkzSDnmDMOSz3Eud69Aw+iX9hRDIw6prrnSrF3VzBemXYOs0rbFGewlv/ZCWYcgPJmjXKHb8q9T93z2W3Au1eaa3RrT7HT6acF529FtQ95+p9Apc87GJ7wS3s8+Zzgy6Oi106LfuPZknPQVewO8+uUf+xpraug/SHG+VN2m9qjCSnnxFZtFZ3/AYjfYowh8DfS3v/t95V/bvvlkW9mbuyB5C+7wXpFOKy1fvMuvF5r83e8k3Yu92ALAq5PRmeH19CO1wQDzbc/lQTXo8la12942576PbKXz1Ymn/Eo9SJxMbmqm4K3aw6oXjB4T//vFPMwxjkm+QymF5q47irmn9LPae0U9QplTf3MOmZe1EOM1XM9GG97Z5bJsbTiBhOf8NyQ+21myWnkWp9AtPcIBHgXosj3YbHbzDe2oTmUvJObFe8zRzwy6TgukfM3Ma1V7NNpTEg1z+JUsT+s8eof+jPq7NtE569WnUafUfDESS/DEnFBtkEe+faZkZsT1tQg/P1ieI4Pgq3cb/b/Y32ybJ7GG8nf5R/th5iF8jMp9vs7XvO6OeJB1o+Hv1UIa70/DE/ruP9e9uk14UbDCcb8iDcr7ctRZl+MHqHrcAe5rOW09gk+jO6+36IpcBdGJ5/WXMXMIsF8xW2q8xnDZxySsRyz5rhNcXpHPIG4hTqfdeaU/0dzTT1OffWc6KdxqbvEL92onIfZjtvh65WNUenfKZ5kvJ41amat4Tqtdesta8IaRtZw0caSy29w5Cmtfenm2Ka5z3JjidhcM5bpa8Rf0PcXldjH+MyNP5W8sQ5Br+TCa1Bgq+iQIf872dgWJt4fu6UGkStKYWsKOGy1WLQO3HgqYXPZj4MR8WIhvHJd3xgWi+d/I3uH4b44ymg5lWncpjaKNVv0TRj/pl9YHHCH2ICtGBrRJj7UQbV15OIkruwVsE/0TqvGXihbypp6mjJjPeH0IrNeh3k95mpZ70M/i/pn472mHebPPi3r0NvnGO4WTHOz2AXW2wR27oZe0Uswtxsx0kl9jnWy9BeIlSodH2gcoYf/VgyMPIqhYpcYi+pq36xx8XLjQ5SKf55lu7stolPjB5yP4hlh6+3d4r8q6JFM5T4MejWHuv6QEe2q+1fysT4eNypgVR4i1pXBdW0srN7WayzPePsELSs436dtOgLuS4NXnP2v/6ncnUZwidihFfHGON0jrImtda7XehfWDJm9aQ8ixXsespw3ab/I4IhTGt8nv+mwuKMYlXRWscqbM814C/czY8Ad9cus7xkSR9/0vMpeMUy9ic0/vcXEKFwzrRdLaMPlOW24tVH0tt/HiL/oevSP/Ub1F3ur+0bsVm5c6kTWV1SJfuTFmu9LC06gOuBlFFlf11wE84TldrElXs8iyqPfWa88+7O2McGf+t2ahNhlFYhbXwO3+E3egwJ3GrE+6f8nOXk9EFPuXpGNqE2CMRcXw5bms8djlN+18du7aq+t711J8Iq13/qfyD7EqGqONUae556nb8saCuZs+ZmZT6tzbxQbuFhjUrk2WQPbbOUN589099uKRv/NGBh9CBMz2zAo7kfcP7BNOQypV1bdZM5LoK6nb0SOpck559VPYxzmesHxfxZMs3Y+sPomcw2INclBOTCgFVUX7UO591q0hO4yc+3dTH44xu6ag7+TPb9W8UCD7yb5nOvFNm0UH/wIKlyva20a8+Xs/SnyWIgE6yVwXRKL4IWZSPysi+ZTA9/OBTncrX0Dl9r/DV7ti24IDnYty/gl6ttq5O5rgWtgGIyaABScGYgWzID/g7L/UxzP/ruyTzAm5os9OlNkLFR809W1QTEOzypr9QdG7dFcbUvoHp3ZQf78EqfoJPvz8n3Xif7Zj3Hpx0Uu52Wft2ltGPsppmeexLsz2vDWTPF1W1rx/IQ2/LV/m+CYo5iQcVrjEZNzzNkVN9S1iR/WhqXXtOGD2fI7l7XJ2pzHlIxWjI49j8miz//a1/R/72lpE1tzBt0Dtqi8Kx3LFKPW+36JevffxTf/AiXWxSiyca7L05onz/Caj5iQIfC9ORVhH+Url6//8xmM27RaJ4ZsELzyv9X1/+qyv5n8QcC6ApScFH3/WB5sCS407p6AoW3XIvh50WHZrnW2f3POtezpebRN7MFqcH2r8WPGFVjP0tXxFUal7hTMsQvd3d+jwf2N+JS70M39hdZekONxeMIB0ekHMa1oN27qvg9d/Zch1/NpdLeJXUjYjcm5+7SO/traH3BT42HRG7sEz4vNTj0gNnMnBsbs0h7EcRm7ZQ134+bGPaKXyJ8kfyv0a3ltk/yNbbi6eieurtoveJ545wfUuj4V2X+oOUPy/lQ5V+q+oY/CvDgxc4HarOsRdVFfuKbFw7UoAc6HkmCdFwnviSGMma23W7wD/lNdbXs8fr7z7TREr6sUeWfDu9IXaW92RfG3A+D3nKxrnf9Or3/B+/7H9cf1x/XH9cf1x/XH9cf1x/XH9cf1x/XH9cf1x/XH9cf1x/XH9X/9+p/wb6cWG/zz/RwdsGreW369b7P8+jOnLcsv3B/1+PV+p/fyCz8/J1rvffRXb9jZ8UanO5l/zBLNH2+/l1fm+PzmPvrC/Q2WX++jLNUX7r0sN1y4t4T85v4qYHnH/XT8+nyn39x7/Hrf2ePXn/fy6PjCct8J5y0X/t1w1PL/y7/pv956/ObjhP967/G4xdpx7/OJ5XTHffVyjwuf+YY53js77s9bzGXhvznV7WttoXgu/KoswIUfsXT6zZ/t9F9L4bfyw9EL38tD7qPbX/JYdvTCDnBE77ywKhHRN3ZsSUtJ519X6CrZWW3t96dlk5y/8Bkv3HvslA3T/rV8lpfcgPY/FX13xYUNUx396/311XLf/mevRUXHn+10DBEdf6rTRwi5sDG8MeeC1ESUHW+v9/+T//0vFFslQA==\"\r\n \r\n if old == True:\r\n s = Icon7\r\n else:\r\n s = Icon8\r\n\r\n return s.decode('base64').decode('zlib')", "def icon(self) -> Icon:\n return self._icon", "def icon(self):\n return self.sensor_type[\"icon\"]", "def find_icon_path(self, setting: str) -> Optional[str]:\n c = self.c\n s = c.config.getString(setting)\n if not s:\n return None # Not an error.\n for directory in self.compute_icon_directories():\n path = g.finalize_join(directory, s)\n if g.os_path_exists(path):\n return path\n g.es_print('no icon found for:', setting)\n return None", "def icon(self):\n return SENSOR_TYPES[self.type][2]", "def Icon(self, size, name):\n # ------------------------------------------------------------------------\n bitmap = self.Bitmap(size, name)\n if not bitmap:\n return None\n icon = wx.EmptyIcon()\n icon.CopyFromBitmap(bitmap)\n return icon", "def setIconImage(*args):", "def getIconPath(self):\n return '/zport/dmd/img/icons/noicon.png'", "def _get_icon(icon_name):\n theme = 'Adwaita'\n size = '256x256'\n path = f'/usr/share/icons/{theme}/{size}/mimetypes/{icon_name}.png'\n return path", "def processIconFilename(self):\n\t\tself.iconFilename = self._getVal(64, 2)", "def Icon(self, icon):\r\n\r\n if icon is None:\r\n icon = wx.NullIcon\r\n \r\n self.icon = icon\r\n return self", "def iconSet(self, icon_name, tintNormal=True):\n\t\ticon = QtGui.QIcon()\n\n\t\tif isinstance(tintNormal, QtGui.QColor):\n\t\t\ticon.addPixmap(\n\t\t\t\tself.iconTint(icon_name, tint=tintNormal), \n\t\t\t\tQtGui.QIcon.Normal, QtGui.QIcon.Off)\n\t\telif tintNormal is False:\n\t\t\ticon.addPixmap(\n\t\t\t\tself.iconTint(icon_name), \n\t\t\t\tQtGui.QIcon.Normal, QtGui.QIcon.Off)\n\t\telse:\n\t\t\ticon.addPixmap(\n\t\t\t\tself.iconTint(icon_name, tint=self.col['text']), \n\t\t\t\tQtGui.QIcon.Normal, QtGui.QIcon.Off)\n\n\t\t# icon.addPixmap(\n\t\t# \tself.iconTint(icon_name, tint=self.col['highlighted-text']), \n\t\t# \tQtGui.QIcon.Normal, QtGui.QIcon.On)\n\n\t\ticon.addPixmap(\n\t\t\tself.iconTint(icon_name, tint=self.col['disabled']), \n\t\t\tQtGui.QIcon.Disabled, QtGui.QIcon.Off)\n\t\t# icon.addPixmap(\n\t\t# \tself.iconTint(icon_name, tint=self.col['disabled']), \n\t\t# \tQtGui.QIcon.Disabled, QtGui.QIcon.On)\n\n\t\ticon.addPixmap(\n\t\t\tself.iconTint(icon_name, tint=self.col['highlighted-text']), \n\t\t\tQtGui.QIcon.Active, QtGui.QIcon.Off)\n\t\t# icon.addPixmap(\n\t\t# \tself.iconTint(icon_name, tint=self.col['highlighted-text']), \n\t\t# \tQtGui.QIcon.Active, QtGui.QIcon.On)\n\n\t\ticon.addPixmap(\n\t\t\tself.iconTint(icon_name, tint=self.col['highlighted-text']), \n\t\t\tQtGui.QIcon.Selected, QtGui.QIcon.Off)\n\t\t# icon.addPixmap(\n\t\t# \tself.iconTint(icon_name, tint=self.col['highlighted-text']), \n\t\t# \tQtGui.QIcon.Selected, QtGui.QIcon.On)\n\n\t\treturn icon", "def getIconURL(self):\n try:\n return self.getObject().getIconURL()\n except KeyError:\n return super(Favorite, self).getIconURL()", "def icon(self):\n return self._sensor[CONF_ICON]", "def icon(self) -> str | None:\n return self._get_sensor_type()[1]", "def icon(self) -> str:\n return self._icon", "def icon(self) -> str:\n return self._icon" ]
[ "0.75174177", "0.68926984", "0.68926984", "0.6637837", "0.65864366", "0.649431", "0.64609027", "0.6416154", "0.63991344", "0.6376866", "0.63664967", "0.63042384", "0.6299288", "0.6299288", "0.6299288", "0.6299288", "0.6299288", "0.6299288", "0.6299288", "0.6299288", "0.6299288", "0.6299288", "0.62749135", "0.62664443", "0.62413704", "0.6225777", "0.6225499", "0.61775047", "0.6176438", "0.6171232", "0.61572117", "0.61411947", "0.61411947", "0.6141156", "0.6091615", "0.6089511", "0.6073611", "0.6066223", "0.6063631", "0.6043952", "0.6036585", "0.60230047", "0.6012949", "0.60030633", "0.60030633", "0.600145", "0.5991751", "0.59810877", "0.5968102", "0.59370923", "0.59370923", "0.59370923", "0.59370923", "0.59370923", "0.59370923", "0.59370923", "0.59370923", "0.59370923", "0.59370923", "0.59370923", "0.59370923", "0.59370923", "0.59370923", "0.59370923", "0.59370923", "0.59370923", "0.59370923", "0.59370923", "0.59370923", "0.5923973", "0.59164435", "0.58941424", "0.58937716", "0.58872974", "0.58865875", "0.58719873", "0.5867535", "0.5862481", "0.58612037", "0.5857863", "0.5854855", "0.58541745", "0.5844638", "0.5842657", "0.5800544", "0.57978374", "0.57873863", "0.5775224", "0.57746696", "0.5772828", "0.57621366", "0.5759902", "0.5759502", "0.5755138", "0.57405967", "0.5738828", "0.57292545", "0.5727474", "0.572479", "0.572479" ]
0.59392136
49
Attempt to match a user link to a recognised brand (LinkBrand).
def save(self, *args, **kwargs): domain = urlsplit(self.url).netloc try: self.icon = LinkBrand.objects.get(domain=domain) except ObjectDoesNotExist: pass super(UserLink, self).save(*args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def link_match_family(link, family_name): # pylint: disable= too-many-return-statements\n if family_name == \"gaussian\":\n return link in [\"identity\", \"log\", \"inverse\"]\n elif family_name == \"gamma\":\n return link in [\"identity\", \"log\", \"inverse\"]\n elif family_name == \"bernoulli\":\n return link in [\"identity\", \"logit\", \"probit\", \"cloglog\"]\n elif family_name == \"wald\":\n return link in [\"inverse\", \"inverse_squared\", \"identity\", \"log\"]\n elif family_name == \"negativebinomial\":\n return link in [\"identity\", \"log\", \"cloglog\"]\n elif family_name == \"poisson\":\n return link in [\"identity\", \"log\"]\n else: # Custom family, we don't know what link functions can be used\n return True", "def match(self, brain, getlink):\n link = getlink(brain.getId,\n getlink(normalize(brain.Title), None))\n if link:\n return True", "def checklink(key,value):\n try:\n if not value.startswith((\"http\",\"www\")): return False, False\n ## Value is not string, so it can't be website link\n except: return False, False\n linkresearch = LINKRE.search(key)\n ## In normal practice this really shouldn't happen :-/\n if not linkresearch: return False, False\n return linkresearch.group(\"name\"), value", "def link_match_family(link, family_name):\n if family_name in FAMILY_LINKS:\n return link in FAMILY_LINKS[family_name]\n\n # Custom family, we don't know what link functions can be used\n return True", "def lf_is_brand(x, brand_names):\n words = x.product_name.split()\n if words[x.word_idx] in brand_names:\n return BRAND\n # check if the token is in a two word brand name\n if ' '.join(words[x.word_idx-1:x.word_idx+1]) in brand_names or ' '.join(words[x.word_idx:x.word_idx+2]) in brand_names:\n return BRAND\n return -1", "def brand_from_request(self, request):\n if b\"brand\" in request.args:\n return request.args[b\"brand\"][0].decode(\"utf-8\")\n return None", "def extract_brand_name(brand_name_url):\n from debra.models import Brands\n brand_name_url = brand_name_url.strip()\n if not brand_name_url:\n return\n domain = domain_from_url(brand_name_url)\n try:\n brand = Brands.objects.get(blacklisted=False, domain_name=domain)\n except:\n brand = Brands.objects.filter(\n blacklisted=False,\n name__iexact=brand_name_url\n ).order_by('-products_count')\n brand = brand[0] if brand else None\n return brand", "def getLinkstoBrands(url):\n brandUrls = {}\n try:\n print(\"Maker link being crawled : \", url)\n request = requests.get(url)\n if request.status_code == 200:\n sourceCode = BeautifulSoup(request.text, \"html.parser\")\n for td in sourceCode.findAll('td'):\n link = td.find('a', href=True)\n title = td.get_text()\n url = processUrl(link['href'])\n if title not in brandUrls.keys():\n brandUrls[title] = url\n print(title, ' ', url)\n else:\n print('no table or row found ')\n except requests.HTTPError as e:\n print('Unable to open url', e)\n return brandUrls", "def get_brand(alt_text):\n if alt_text:\n txt = alt_text.strip()\n brand = txt.split(' ')[-1]\n brand = brand.strip('\"')\n else:\n brand = None\n return brand", "def save(self, *args, **kwargs):\n super(LinkBrand, self).save(*args, **kwargs)\n\n existing_links = UserLink.objects.filter(url__contains=self.domain)\n\n # Filter out any false positives\n for link in existing_links:\n domain = urlsplit(link.url).netloc\n\n if domain != self.domain:\n existing_links = existing_links.exclude(pk=link.pk)\n\n existing_links.update(icon=self)", "def find_twitter_handle(brand):\n twitter_file = open(\"twitter_handles.txt\")\n\n # get the brand name and twitter handle for each brand\n for line in twitter_file:\n line = line.strip().split(\"|\")\n foundation_brand = line[0]\n twitter_handle = line[1]\n\n # return the twitter handle for that brand\n if foundation_brand == brand:\n twitter_file.close()\n return twitter_handle", "def test_link_registered(self):\n response = self.client.get(reverse('misago:admin:users:accounts:index'))\n\n response = self.client.get(response['location'])\n self.assertContains(response, reverse('misago:admin:users:bans:index'))", "def check_link_header_for_webmention(self, header):\n\n regexes = [\n \"<(.[^>]+)>;\\s+rel\\s?=\\s?[\\\"']?(http:\\/\\/)?webmention(\\.org)?\\/?[\\\"']?\"\n ]\n\n if \"webmention\" not in header:\n return False\n\n for regex in regexes:\n m = re.search(regex, header, re.IGNORECASE)\n if m:\n return m.group(1)\n\n # Must not have found anything\n return False", "def getLinksToPhonesPerBrands(url):\n urls = {}\n print(\"brand link being scrapped : \", url)\n try:\n request = requests.get(url)\n if request.status_code == 200:\n sourceCode = BeautifulSoup(request.content, \"html.parser\")\n li = sourceCode.select('#review-body div > ul > li > a')\n for link in li:\n title = link.get_text()\n url = processUrl(link['href'])\n if title not in urls.keys():\n urls[title] = url\n print(title, ' ', url)\n else:\n print('no table or row found ')\n except requests.HTTPError as e:\n print('Unable to open url', e)\n return urls", "def lookupLink(cls, session, link, model, recordID):\n checkURL = Link.httpRegexSub(link.get('url', None))\n return session.query(cls)\\\n .join(model.__tablename__)\\\n .filter(model.id == recordID)\\\n .filter(cls.url == checkURL)\\\n .one_or_none()", "def test_valid_brand_format(self, cred, brand):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n brand, test_number))\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '3'\n assert resp.json()['error_text'] == parameter_is_too_long_msg.format('brand')", "def brand(self, brand: object):\n\n self._brand = brand", "def DealUrlFirst(self, match, all_link):\n counter = 0\n for each_link in all_link:\n model_link = '<a href=\"(.*)\" class=\"c-3\">'\n break_link = '<a href=\"(.*)\" class=\"c-6\">'\n model_name = 'class=\"c-3\">(.*)</a>'\n if re.search(break_link, each_link):\n break\n result_link = re.findall(model_link, each_link)\n result_name = re.findall(model_name, each_link)\n# print len(result_link), len(result_name)\n if len(result_link) > 0:\n if len(result_name) > 0:\n print >> match, result_link[0]+' '+result_name[0]\n counter += 1\n print \"All the avaliable links is: \", counter", "def test_valid_brand_format(self, cred, brand):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n brand, test_number))\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '0'\n request_id = resp.json()['request_id']\n # terminate verification process\n assert 'Workflow terminated' in \\\n terminate_workflow(cred[0], cred[1], request_id).json()['error_text']", "def brand(self, brand):\n\n self._brand = brand", "def brand(self, brand):\n\n self._brand = brand", "def brand(self, brand):\n\n self._brand = brand", "def match(self, head_str):\n\t\tif \"masscan\" in head_str.lower():\n\t\t\treturn True\n\t\treturn False", "def share_link(cls, user, link):", "def share_link(cls, user, link):", "def test_evaluate_link__match(self, url: str, expected_version: str) -> None:\n link = Link(url)\n evaluator = self.make_test_link_evaluator(formats=[\"source\", \"binary\"])\n actual = evaluator.evaluate_link(link)\n assert actual == (LinkType.candidate, expected_version)", "def get_brand_name(container) -> Optional[str]:\r\n brand_container = container.find_all(\"a\", {\"class\": \"item-brand\"})\r\n # product_brand: List[] = brand_container[0].img[\"title\"]\r\n if len(brand_container) == 0:\r\n return None\r\n return brand_container[0].img[\"title\"]", "def isLinkName(word):\r\n return wikiLink.match(word)", "def card_link(link):\n try:\n link = int(link)\n except ValueError:\n raise exceptions.LinkRatingInvalid()\n\n if link not in range(1, 9):\n raise exceptions.LinkRatingInvalid()", "def _find_matching_link(category, component_type):\r\n\r\n # The tab shows links for the given category\r\n links = world.css_find('div.new-component-{} a'.format(category))\r\n\r\n # Find the link whose text matches what you're looking for\r\n matched_links = [link for link in links if link.text == component_type]\r\n\r\n # There should be one and only one\r\n assert_equal(len(matched_links), 1)\r\n return matched_links[0]", "def get_link(self, user_input):\r\n\r\n\t\t# state that you made it this far\r\n\t\tprint(f\"\\nSuccessfully called get_link() with the parameter(s): \\n\\n\\tuser_input -> {user_input}\")\r\n\r\n\t\t# tokenize the user's input, removing words like \"is\", \"the\", \"it\" and so on...\r\n\t\ttokens = self.tokenize(user_input)\r\n\r\n\t\t# categorize the question\r\n\t\tprint(f\"\\nIdentifying question's category...\")\r\n\t\tcategory = self.bayesian_naive_logic(tokens)\r\n\r\n\t\t# start looking for a link that may provide a Answer\r\n\t\tresponse_set = self.storage.get_urls(tokens, category)\r\n\t\tprint(f\"\\nBest Answer found: {response_set}\")\r\n\r\n\t\treturn f\"Here is a link with information closely matching your question: <a href='{response_set}' target='_blank'>{response_set}</a>\"", "def brand(self) -> object:\n return self._brand", "def lf_is_after_brand(x, brand_names):\n words = x.product_name.split()\n if x.word_idx > 0 and words[x.word_idx-1] in brand_names:\n if any(i.isdigit() for i in words[x.word_idx]):\n return MODELNAME\n return CATEGORY\n return -1", "def check_link(self, link):\n false_links = [\"wikipedia:\", \"w:\", \"wikitionary:\", \"wikt:\", \"wikinews:\",\n \"n:\", \"wikibooks:\", \"b:\", \"wikiquote:\", \"q:\", \"wikisource:\",\n \"s:\", \"wikispecies:\", \"species:\", \"wikiversity\", \"v:\", \n \"wikivoyage:\", \"voy:\", \"wikimedia:\", \"foundation:\", \"wmf:\", \n \"commonds:\", \"c:\", \"chapter:\", \"metawikipedia:\", \"meta:\", \n \"m:\", \"incubator:\", \"outreach:\", \"mw:\", \"mediazilla:\", \n \"bugzilla:\", \"testwiki:\", \"wikitech:\", \"wikidata:\", \"d:\",\n \"phabricator:\", \"phab:\", \"talk:\", \"user talk:\", \"file:\", \n \"user:\", \"template:\", \"category:\", \"file talk:\", \n \"category talk:\", \"image:\", \"media:\", \"special:\", \n \"help:\", \"portal:\", \"portal talk:\", \"\\#\"]\n is_bad = any(false_link in link.lower() for false_link in false_links)\n if is_bad or link[0] == \":\":\n return False\n else:\n return True", "def make_test_brand(self):\n\n b = Brand(slug='test')\n\n return b", "def test_random_mineral(self):\n rendered = self.render_template(\n '{% load minerals_extras %}'\n '{% random_mineral %}'\n )\n match = re.search(r'(?<=href=\"/)\\w+', rendered)\n url_name = match.group(0)\n self.assertTrue(\n Mineral.objects.filter(name__startswith=url_name).exists())", "def validate_social_link(platform_name, new_social_link):\n formatted_social_link = format_social_link(platform_name, new_social_link)\n\n # Ensure that the new link is valid.\n if formatted_social_link is None:\n required_url_stub = settings.SOCIAL_PLATFORMS[platform_name]['url_stub']\n raise ValueError(_('Make sure that you are providing a valid username or a URL that contains \"{url_stub}\". '\n 'To remove the link from your edX profile, '\n 'leave this field blank.').format(url_stub=required_url_stub))", "def check_link(self, link, links_para):\n href = link['href']\n if not href.startswith('/wiki/') or href == '/wiki/Latin' or href.startswith('#'):\n return False\n if \"<i>\" in link or href in links_para:\n return False\n title = href[6:]\n if title.startswith('Help:') or title.startswith('File:') or title.endswith('.ogg') or title.startswith('Wikipedia:'):\n return False\n return True", "def handle_item(item, user_to_check):\n if 'provider' in item:\n if item['provider'] == 'twitter':\n if item['screen_name'] == user_to_check:\n check_avatar(item)\n if item['provider'] == 'facebook':\n if item['screen_name'] == user_to_check:\n check_facebook_avatar(item)\n else:\n #\n # Default to twitter\n if item['screen_name'] == user_to_check:\n check_avatar(item)", "def _fe_brand_presence(self, sample):\n result = OrderedDict()\n for item in self._brands:\n result[\"{}_brand_subdomain\".format(item)] = 1 if item in sample['subdomain'] else 0\n result[\"{}_brand_domain\".format(item)] = 1 if item in sample['domain'] else 0\n\n return result", "def test_brands_reply(self):\n # 1. Setup service channel / dispatch channel\n # 2. send a post to brand\n # 3. Reply with custom response\n # 4. Route a reply\n # 5. check there is no extra responses created\n # 6. create a matchable and repeat 1-5\n brand = 'brand'\n channel, dispatch_channel = self.setup_channels(brand)\n user = self._create_db_user(email='[email protected]', password='test', is_superuser=True)\n user.account = self.account\n user.save()\n profiles = set()\n\n def do_test(matchable):\n profile = gen_profile()\n user_name = profile['user_name']\n profiles.add(user_name)\n post = self._create_db_post(\n '@%s I need some carrot' % brand,\n channel=channel,\n user_profile=profile)\n\n response = Response.objects.get(id=id_from_post_id(post.id))\n self.assertIsInstance(response.matchable, matchable.__class__)\n assert response.matchable == matchable\n\n # post custom response\n creative = \"U could find some carrot there\"\n self.login(user.email, 'test')\n data = dict(creative=creative,\n response=str(response.id),\n latest_post=str(response.post.id))\n resp = self.client.post('/commands/custom_response', data=json.dumps(data))\n resp = json.loads(resp.data)\n\n # check responses and conversations\n self.assertEqual(Response.objects(conversation_id=None).count(), 0)\n self.assertEqual(\n Response.objects(channel__in=[channel, channel.inbound_channel, channel.outbound_channel]).count(),\n 0)\n self.assertEqual(Response.objects(conversation_id=response.conversation.id).count(), 1)\n self.assertEqual(Response.objects(channel__in=[dispatch_channel]).count(), len(profiles))\n\n matchable = EmptyMatchable.get()\n do_test(matchable)\n\n matchable = self._create_db_matchable('Here is your carrot',\n intention_topics=['carrot'],\n channels=[channel.inbound_channel])\n do_test(matchable)", "def test_single_named_link_with_custom_type():\n pass", "def assert_has_valid_link(self, response, expected_ending):\r\n assert link in response['link']\r\n self.assert_valid_url(link, expected_ending)", "def brand_id(self) -> int:\n brand_id_codes = {\n \"Apple\": 319682, \n \"LG\": 353985, \n \"Huawei\": 349965, \n \"Samsung\": 352130}\n\n try:\n return brand_id_codes[self.__brand]\n except KeyError:\n raise KeyError(\"brand must be 'Apple', 'LG', 'Huawei' or 'Samsung'\")", "def find_usefull_links(links, classmodel, class_count_vect):\n\n import re\n final_links = []\n seclinks = links\n for link in links:\n fulllink = link\n if link == None:\n continue\n else:\n link = link.replace('://', ' ')\n link = link.replace('@', ' ')\n link = link.replace('#', ' ')\n link = link.replace('/', ' ')\n link = link.replace('-', ' ')\n link = link.replace('.', ' ')\n link = link.replace('https', '')\n link = link.replace('http', '')\n link = link.replace('www', '')\n link = link.replace('&', ' ')\n link = link.replace('=', ' ')\n linkpd = pd.Series(link.strip())\n link_feature = class_count_vect.transform(linkpd)\n result = classmodel.predict(link_feature)\n\n result = result.tolist()\n result = str(result)\n if result == '[1]':\n final_links.append(fulllink)\n final_links = list(dict.fromkeys(final_links))\n \n if len(final_links) == 0 or len(final_links) < 5:\n for linksec in seclinks:\n linkwords = ['cabinet', 'gover', 'goverment', 'composition', 'ministers', 'minister',\n 'president', 'composicao', 'parliament', 'person', 'who', 'mini', 'compo',\n 'governor', 'secretariat', 'secretary']\n for w in linkwords:\n if re.search(w, linksec):\n final_links.append(linksec)\n else:\n continue\n final_links = list(dict.fromkeys(final_links))\n return (final_links)", "def process_link(self, val):\n last_segment = val\n last_slash = val[-1] == '/'\n if last_slash:\n last_segment = val[0:-1]\n\n last_segment = last_segment.rsplit('/', 1)[1]\n if self.is_version_folder(last_segment):\n logger.info('Skipping link with version: %s' % val)\n return None\n\n logger.debug('Link: %s' % val)\n return None", "def process_survey_link(survey_link, user):\r\n return survey_link.format(UNIQUE_ID=unique_id_for_user(user))", "def extract_link_str(self, link):\n if type(link) is str:\n # import pdb; pdb.set_trace()\n if re.match( r'^link:', link):\n # assume intending to specify a link, now match for rest of pattern \n matchObj = re.match( r'^link:([^ ]+)$', link)\n if matchObj:\n path = matchObj.group(1)\n node = self.get_node(path)\n link_info = {'node': node}\n return link_info\n else:\n print \"** Error, invalid path specified in link string, must not have spaces\"\n print \" link string is: '%s'\" % link\n traceback.print_stack()\n sys.exit(1)\n elif re.match( r'^extlink:', link):\n # assume intending to specify an external link, now match for rest of pattern\n matchObj = re.match( r'^extlink:([^ ]*[^ ,])[ ,]([^ ]+)$', link)\n if matchObj:\n file = matchObj.group(1)\n path = matchObj.group(2)\n link_info = {'extlink': (file, path)}\n return link_info\n else:\n print \"** Error, invalid file or path specified in extlink string\"\n print \" must not have spaces and file name must not end in comma\"\n print \"extlink string is: '%s'\"% link\n traceback.print_stack()\n sys.exit(1)\n return None", "def match_url(self, url):\n pass", "def brand(self) -> str:\n return self._config_entry.data.get(CONF_BRAND, DEFAULT_BRAND)", "def test_bleach_with_href():\n eq_(u'<a href=\"http://xx.com\" rel=\"nofollow\" title=\"xx\">xx</a> '\n u'<a href=\"http://yy.com\" rel=\"nofollow\">http://yy.com</a>',\n bl.bleach('<a title=\"xx\" href=\"http://xx.com\">xx</a> http://yy.com'))\n eq_('<a href=\"http://xx.com\" rel=\"nofollow\">http://xx.com</a>',\n bl.bleach('<a href=\"http://xx.com\">http://xx.com</a>'))", "def link(address):", "def legacy_check(link = None):\n if type(link) == type(\"\"):\n # If we are given a string, we split it ourselves\n return link.split(\"/\")\n else:\n return link", "def extract_brand_names(brand_name_urls):\n from debra.models import Brands\n\n # stripping urls, fetching their domains\n brand_name_urls = [url.strip() for url in brand_name_urls if len(url) > 0]\n if not brand_name_urls:\n return []\n\n brand_domains = [domain_from_url(url) for url in brand_name_urls if len(url) > 0]\n\n t1 = time.time()\n\n queries = [Q(domain_name=value) for value in brand_domains] + [Q(name=value.upper()) for value in brand_name_urls]\n\n query = queries.pop()\n\n for item in queries:\n query |= item\n\n brands = Brands.objects.exclude(blacklisted=True).filter(query)\n\n brands = list(brands)\n log.info('Time passed to get brands: %s ' % (time.time() - t1))\n return brands", "def get_brand(self, brand_code, **kwargs):\n url = self.api_url('brands/{}'.format(brand_code))\n\n return requests.get(\n url,\n headers=self.auth_header,\n params=kwargs,\n ).json()", "def link_extract(link_text, content):\n h = html5lib.parse(content, namespaceHTMLElements=False)\n candidates = h.findall(\".//a[.='%s']\" % link_text)\n if not candidates:\n return 'NOT MATCHED'\n try:\n return candidates[0].attrib['href']\n except:\n return 'NOT MATCHED'", "def get_link(prefix: str, identifier: str, use_bioregistry_io: bool = True) -> Optional[str]:\n providers = get_providers(prefix, identifier)\n for key in LINK_PRIORITY:\n if not use_bioregistry_io and key == \"bioregistry\":\n continue\n if key not in providers:\n continue\n rv = providers[key]\n if rv is not None:\n return rv\n return None", "def brand(self):\n return \"Nest Labs\"", "def looks_like_fallback(url_name):\r\n return (url_name is not None\r\n and url_name.startswith(tag)\r\n and re.search('[0-9a-fA-F]{12}$', url_name))", "def match(self, item):", "def click(cls, user, link):\n pass", "def check_linking(seq):\n\n if seq in ['A', 'C', 'G', 'U']: \n type = \"RNA linking\"\n elif seq in ['DA', 'DC', 'DG', 'DT']:\n type = \"DNA linking\"\n elif seq in [\"ALA\", \"ARG\", \"ASN\", \"ASP\", \"CYS\", \"GLN\", \"GLU\", \"GLY\", \"HIS\", \"ILE\", \"LEU\", \"LYS\", \"MET\", \"PHE\", \"PRO\", \"SER\", \"THR\", \"TRP\", \"TYR\", \"VAL\"]:\n type = \"L-peptide linking\"\n elif seq in list(modified_nucleotides.keys()):\n if modified_nucleotides[seq]['standard'] in ['A', 'C', 'G', 'U']:\n type = 'RNA linking'\n else:\n type = \"Unknown\"\n return type", "def assertLinkUrl(self, package, actual_url):\n parsed_url = urllib.parse.urlparse(actual_url)\n params = urllib.parse.parse_qs(parsed_url.query)\n self.assertEqual([package], params['package'])", "def click(cls, user, link):\r\n pass", "def test_single_link():\n pass", "def verify(link: str\n ) -> bool:\n \n # Ignore any /live/ or /av/ articles as they aren't proper articles\n if any([path in link for path in (\"/live/\", \"/sport1/\", \"/av/\")]):\n return False\n \n # Ensure the link corresponds with a valid BBC News article.\n return any([link.startswith(prefix) for prefix in BBC_URLS])", "def _get_username_from_social_link(platform_name, new_social_link):\n # Blank social links should return '' or None as was passed in.\n if not new_social_link:\n return new_social_link\n\n # Parse the social link as if it were a URL.\n parse_result = urlparse(new_social_link)\n url_domain_and_path = parse_result[1] + parse_result[2]\n url_stub = re.escape(settings.SOCIAL_PLATFORMS[platform_name]['url_stub'])\n username_match = re.search(r'(www\\.)?' + url_stub + r'(?P<username>.*?)[/]?$', url_domain_and_path, re.IGNORECASE)\n if username_match:\n username = username_match.group('username')\n else:\n username = new_social_link\n\n # Ensure the username is a valid username.\n if not _is_valid_social_username(username):\n return None\n\n return username", "def test_get_variant_links(variant_obj):\n # GIVEN a variant object without links\n assert \"thousandg_link\" not in variant_obj\n # WHEN fetching the variant links\n links = get_variant_links(variant_obj)\n # THEN check that links are returned\n assert \"thousandg_link\" in links", "def user_query_stats_helper(request, search_query, base_brand):\n\n # print(\"Got: request %r\" % request)\n print(\"Got search_query %r\" % search_query)\n print(\"Got base_brand %r\" % base_brand)\n\n mongo_utils.track_visit(request)\n\n # first prettify the query for mandrill, intercom, and slack\n try:\n only_setup_params = find_non_default_query(search_query)\n if only_setup_params is None or only_setup_params == [{}]:\n only_setup_params = {}\n query_formatted = format_query_for_displaying(only_setup_params)\n print \"only_setup_params = [%r] query_formatted = [%r]\" % (only_setup_params, query_formatted)\n except:\n a = json.dumps(search_query, sort_keys=True, indent=4, separators=(',', ': '))\n query_formatted = 'Problem in formatting %r' % a\n pass\n\n mongo_utils.track_query(\"brand-search-query\", query_formatted, {\"user_id\": request.visitor[\"auth_user\"].id})\n\n account_helpers.intercom_track_event(request, \"brand-search-query\", {\n 'query': query_formatted,\n })\n\n if base_brand:\n user = User.objects.get(id=request.user.id)\n if base_brand.flag_trial_on and not account_helpers.internal_user(user):\n slack_msg = \"\\n**************\\nBrand = \" + base_brand.domain_name + \" User: \" + request.user.email + \"\\n\" + query_formatted\n account_helpers.send_msg_to_slack('brands-trial-activity', slack_msg)\n\n base_brand.saved_queries.create(query=json.dumps(search_query), user=request.user)", "def test_brand_string(microvm):\n branch_string_format = \"^model name\\\\s+:\\\\s+(.+)$\"\n host_brand_string = None\n for line in open('/proc/cpuinfo', 'r'):\n matchoutput = re.search(branch_string_format, line)\n if matchoutput:\n host_brand_string = matchoutput.group(1)\n assert host_brand_string is not None\n\n test_vm = microvm\n\n test_vm.basic_config(vcpu_count=1)\n test_vm.launch()\n\n guest_cmd = \"cat /proc/cpuinfo | grep 'model name' | head -1\"\n status, output = test_vm.serial_cmd(guest_cmd)\n assert status == 0\n\n line = output.splitlines()[0].rstrip()\n matchoutput = re.search(branch_string_format, line)\n assert matchoutput\n guest_brand_string = matchoutput.group(1)\n assert guest_brand_string\n\n cpu_vendor = _get_cpu_vendor()\n expected_guest_brand_string = \"\"\n if cpu_vendor == CpuVendor.INTEL:\n expected_guest_brand_string = host_brand_string\n\n assert guest_brand_string == expected_guest_brand_string", "def get_brand(self):\n provider = self.account.get_provider()\n return dict(id=provider.id,\n name=provider.name)", "def check_link_tag(self):\r\n node = self.article.raw_doc\r\n meta = self.parser.getElementsByTag(node, tag='link', attr='rel', value='image_src')\r\n for item in meta:\r\n src = self.parser.getAttribute(item, attr='href')\r\n if src:\r\n return self.get_image(item, src, extraction_type='linktag')\r\n return None", "def get_link_type(comp_a, comp_b, link_type):\n need_to_verify = isinstance(comp_a, SpecialMixerComponent) or isinstance(comp_b, SpecialMixerComponent)\n if need_to_verify and link_type is not LinkType.horizontal:\n link_type = LinkType.matched_track_only\n return link_type", "def username_test(self):\n text = 'test @username'\n html = 'test <a href=\"https://www.instagram.com/username/\">@username</a>'\n self.assertEqual(linkify_text(text), html)", "def test_single_named_link():\n pass", "def should_link(self, item):\r\n return item.__class__ in self.class_map.keys()", "def hashtag_and_username_test(self):\n text = 'test #hashtag and @username test'\n html = 'test <a href=\"https://www.instagram.com/explore/tags/hashtag\">#hashtag</a> and <a href=\"https://www.instagram.com/username/\">@username</a> test'\n self.assertEqual(linkify_text(text), html)", "def scrap_site(link):\n pass # Scrapy or BeautifulSoup", "def __verify(self, href):\n # change main url to avoid mistakes with http ou https\n main = self.main_url.replace('https://', '').replace('http://', '')\n forbiden = {\"#\", 'None'} # forbidden possible urls\n if (href is None) or (href in forbiden):\n return False\n for item in ['tel:', 'mailto:', 'javascript:']:\n if item in href: # verify if is a link to telephone, e-mail or javascript\n return False\n if main in href and (\"/checkout/cart/add\" in href or \"/checkout/#/cart\" in href):\n return False # prevents a purchase from being made\n elif main in href or (main not in href and href[:4] != \"http\"):\n return True # possible case of a valid link\n else:\n return False # any other link is not valid", "def test_if_brand_is_missing(self, cred):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n '', test_number))\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '2'\n assert resp.json()['error_text'] == missing_specific_mandatory_parm_msg.format('brand')", "def get_id_attribution(link = None):\n log.debug(\"attribution link: \" + repr(link))\n choppedLink = legacy_check(link)\n id = None\n try:\n # First try to get the relevant part, that is encoded\n step1 = choppedLink[3][choppedLink[3].find(\"watch\"):]\n # Then stplit the other encoded params\n step2 = step1[12:].split(\"%\")\n # and get the good part\n step3 = step2[0]\n id = step3 # choppedLink[3][choppedLink[3].find(\"watch\"):][12:].split(\"%\")[0]\n except Exception as e:\n raise e # dont care 'bout issues here. all will be NotImplementedError \n\n # If we havent found a match, then this is not implemented.\n if id == \"\":\n raise Exception(\"no recognised kind of link\")\n\n return id", "async def return_img_link(query: str) -> dict:\n best_match = extractOne(query.replace(\" \", \"_\"), os.listdir(img_dir))\n if best_match[1] > 50:\n resp = {\n \"success\": True,\n \"query\": query,\n \"link\": f\"{img_baseurl}/{best_match[0]}\",\n }\n else:\n resp = {\n \"success\": False,\n \"query\": query,\n \"reason\": \"Was not able to find an appropriate image\",\n }\n return resp", "def highlight_user(value):\n user_highlight = user_pattern.sub(r\"https://www.instagram.com/\\1\", value)\n\n return user_highlight", "def _match(cls, url, **kwargs):\n return url.scheme.startswith('http')", "def url_validator(arg):\n #пишем костыль, на случай если именная ссылка содержит начало вида club_\n if arg.find('https://vk.com/club_') != -1 or arg.find('https://vk.com/club-') != -1:\n return {\"type\": 'named-link', \"id\": arg.split('/')[-1]}\n else:\n arg = arg.lower()\n\n # If url looks like http(s)://vk.com/named-link\n symbolic_id = TXT_ID_REGEXP.match(arg)\n if symbolic_id:\n url = symbolic_id.groupdict()\n url[\"type\"] = 'named-link'\n return url\n\n # If url looks like http[s]://vk.com/id123456\n numeric_id = NUM_ID_REGEXP.match(arg)\n if numeric_id:\n url = numeric_id.groupdict()\n return url\n\n #raise argparse.ArgumentTypeError(\"{} - invalid url address\".format(arg))", "def flair_type(is_link: bool) -> str:\n return \"LINK_FLAIR\" if is_link else \"USER_FLAIR\"", "def find_by_brandowner(self, brand: str = None) -> pd.DataFrame:\n return self._filter(\"brand_owner\", brand)", "def _set_link(\n meta: Dict,\n link: Optional[Union[type(None), str, bool, KEChainPages]] = None,\n link_value: Optional[CardWidgetLinkValue] = None,\n link_target: Optional[Union[str, LinkTargets]] = LinkTargets.SAME_TAB,\n **kwargs,\n) -> Dict:\n meta[\"linkTarget\"] = check_enum(link_target, LinkTargets, \"link_target\")\n\n from pykechain.models import Activity\n\n if isinstance(link, Activity):\n if link.activity_type == ActivityType.TASK:\n default_link_value = CardWidgetLinkValue.TASK_LINK\n else:\n default_link_value = CardWidgetLinkValue.TREE_VIEW\n\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: link.id,\n MetaWidget.SHOW_LINK_VALUE: default_link_value,\n }\n )\n elif isinstance(link, str) and is_uuid(link):\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: link,\n MetaWidget.SHOW_LINK_VALUE: CardWidgetLinkValue.TASK_LINK,\n }\n )\n elif link is None or link is False:\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: None,\n MetaWidget.SHOW_LINK_VALUE: CardWidgetLinkValue.NO_LINK,\n }\n )\n elif link in KEChainPages.values():\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: \"\",\n MetaWidget.SHOW_LINK_VALUE: CardWidgetKEChainPageLink[link],\n }\n )\n else:\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: link,\n MetaWidget.SHOW_LINK_VALUE: CardWidgetLinkValue.EXTERNAL_LINK,\n }\n )\n\n if link_value is not None:\n meta.update(\n {\n MetaWidget.SHOW_LINK_VALUE: check_enum(\n link_value, CardWidgetLinkValue, \"link_value\"\n ),\n }\n )\n\n return meta", "def CustomLinkCheck(context, message, source, extension=\".cc\"):\n context.Message(message)\n result = context.TryLink(source, extension)\n context.Result(result)\n return result", "def get_image_link():\n image_links = set()\n supplemented_keyword = urllib.parse.quote(\n supplemented_keywords[random.randint(0,\n len(supplemented_keywords) - 1)],\n safe='')\n main_keyword = urllib.parse.quote(\n main_keywords[random.randint(0,\n len(main_keywords) - 1)], safe='')\n\n # print('the theme of cats: ' + supplemented_keyword)\n\n search_query = (main_keyword + ' ' + supplemented_keyword).replace(\n ' ', '%20')\n url = 'https://www.google.com/search?q=' + \\\n search_query + '&source=lnms&tbm=isch'\n image_links = image_links.union(parse_page(url))\n\n image_link = list(image_links)[random.randint(0, len(image_links) - 1)]\n # print('link:' + image_link)\n\n while 'https://' not in image_link or r'\\\\u' in image_link or '.jpg' not in image_link:\n image_link = list(image_links)[random.randint(0, len(image_links) - 1)]\n # print('link:' + image_link)\n\n return image_link", "def test_linked_matches_property():\n # Issue #265\n front_adapter = FrontAdapter(\"GGG\")\n back_adapter = BackAdapter(\"TTT\")\n la = LinkedAdapter(\n front_adapter,\n back_adapter,\n front_required=False,\n back_required=False,\n name=\"name\",\n )\n assert la.match_to(\"AAAATTTT\").score == 3", "def isPresent(self, word):\n\t\treturn word in self.link_words", "def get_mt_usr_invalid_link(self):\n schema = self.district.format_schema()\n\n rdf_nav_link = '%s.rdf_nav_link' % schema\n rdf_link = '%s.rdf_link' % schema\n rdf_nav_strand = '%s.rdf_nav_strand' % schema\n usr_node_link = '%s.usr_node_link' % schema\n adas_node_curvature = '%s.adas_node_curvature' % schema\n adas_node_slope = '%s.adas_node_slope' % schema\n rdf_sign_origin = '%s.rdf_sign_origin' % schema\n rdf_sign_destination = '%s.rdf_sign_destination' % schema\n\n sqls = []\n sqls.append('SELECT link_id FROM %s' % rdf_nav_strand)\n sqls.append('SELECT nav_link_id as link_id FROM %s WHERE nav_link_id IS NOT NULL' % usr_node_link)\n sqls.append('SELECT from_link_id AS link_id FROM %s' % adas_node_curvature)\n sqls.append('SELECT to_link_id AS link_id FROM %s' % adas_node_curvature)\n sqls.append('SELECT to_link_id AS link_id FROM %s' % adas_node_slope)\n sqls.append('SELECT originating_link_id AS link_id FROM %s' % rdf_sign_origin)\n sqls.append('SELECT dest_link_id AS link_id FROM %s' % rdf_sign_destination)\n\n\n table = '%s.usr_invalid_link' % schema\n mt = SqlMeta(table)\n mt.add('CREATE TABLE %s AS (SELECT link_id FROM %s INTERSECT (%s) EXCEPT SELECT link_id FROM %s) ' %(table, rdf_link, ' UNION '.join(sqls), rdf_nav_link))\n mt.add(self._pk_sql(table, 'link_id'))\n\n mt.ref(rdf_nav_link)\n mt.ref(rdf_link)\n mt.ref(rdf_nav_strand)\n mt.ref(usr_node_link)\n mt.ref(adas_node_curvature)\n mt.ref(adas_node_slope)\n mt.ref(rdf_sign_origin)\n mt.ref(rdf_sign_destination)\n\n return mt", "def matched_brand_positions(self):\n collector = self._results.collector\n \"\"\"@type: ok.query.whoosh_contrib.find_brands.FindBrandsQuery.BrandsCollector\"\"\"\n brand = self.current_match['brand']\n pos = collector.brands_found[brand][1]\n return pos", "def header_field_should_have_link(self, label):\n locator = lex_locators[\"record\"][\"header\"][\"field_value_link\"].format(label)\n self.selenium.page_should_contain_element(locator)", "def ble_device_matches(\n matcher: BluetoothCallbackMatcher | BluetoothMatcher,\n service_info: BluetoothServiceInfoBleak,\n) -> bool:\n device = service_info.device\n if (address := matcher.get(ADDRESS)) is not None and device.address != address:\n return False\n\n if matcher.get(CONNECTABLE, True) and not service_info.connectable:\n return False\n\n advertisement_data = service_info.advertisement\n if (\n service_uuid := matcher.get(SERVICE_UUID)\n ) is not None and service_uuid not in advertisement_data.service_uuids:\n return False\n\n if (\n service_data_uuid := matcher.get(SERVICE_DATA_UUID)\n ) is not None and service_data_uuid not in advertisement_data.service_data:\n return False\n\n if (\n manfacturer_id := matcher.get(MANUFACTURER_ID)\n ) is not None and manfacturer_id not in advertisement_data.manufacturer_data:\n return False\n\n if (manufacturer_data_start := matcher.get(MANUFACTURER_DATA_START)) is not None:\n manufacturer_data_start_bytes = bytearray(manufacturer_data_start)\n if not any(\n manufacturer_data.startswith(manufacturer_data_start_bytes)\n for manufacturer_data in advertisement_data.manufacturer_data.values()\n ):\n return False\n\n if (local_name := matcher.get(LOCAL_NAME)) is not None and (\n (device_name := advertisement_data.local_name or device.name) is None\n or not _memorized_fnmatch(\n device_name,\n local_name,\n )\n ):\n return False\n\n return True", "def isLinkIdFormatValid(link_id):\n if linkable.LINK_ID_REGEX.match(link_id):\n return True\n return False", "def card_linkmarker(linkmarker):\n if linkmarker not in constants.LINK_MARKERS:\n raise exceptions.LinkMarkerInvalid()", "def match(request, user_id):\n current_user = CustomUser.objects.get(id=request.user.id)\n liked_user = CustomUser.objects.get(id=user_id)\n\n if not Matches.objects.filter(first_user=current_user, second_user=liked_user):\n Matches.objects.create(first_user=current_user, second_user=liked_user)\n\n if Matches.objects.filter(first_user=liked_user, second_user=current_user):\n send_emails_to_users(current_user, liked_user)\n send_emails_to_users(liked_user, current_user)\n\n return redirect('index')", "def guess_breed(dbo, s):\n s = str(s).lower()\n guess = db.query_int(dbo, \"SELECT ID FROM breed WHERE LOWER(BreedName) LIKE '%\" + db.escape(s) + \"%'\")\n if guess != 0: return guess\n return configuration.default_breed(dbo)" ]
[ "0.61408734", "0.6093003", "0.5789055", "0.5748644", "0.57433885", "0.57430315", "0.567563", "0.539753", "0.5388041", "0.53709215", "0.53483826", "0.5343389", "0.522596", "0.519779", "0.51586634", "0.51541173", "0.5086018", "0.504153", "0.50308156", "0.49988824", "0.49988824", "0.49988824", "0.49827403", "0.49744022", "0.49744022", "0.4940133", "0.49234527", "0.48953873", "0.48940155", "0.4868742", "0.48600423", "0.48561162", "0.4832321", "0.48255765", "0.48203176", "0.4799724", "0.47651377", "0.47397894", "0.47394273", "0.47301158", "0.47290277", "0.47149524", "0.47080156", "0.4706187", "0.46877912", "0.46814623", "0.46803084", "0.46569943", "0.4645464", "0.46406874", "0.4616823", "0.4608053", "0.46056128", "0.46042085", "0.45925528", "0.45530948", "0.45521536", "0.45349097", "0.45288476", "0.4528342", "0.4526011", "0.45229718", "0.45142043", "0.45062935", "0.4499964", "0.4492511", "0.4477424", "0.44727662", "0.44727004", "0.44724867", "0.44719338", "0.4465312", "0.4455471", "0.44544378", "0.44516847", "0.4446364", "0.44450846", "0.44446498", "0.44339243", "0.4417765", "0.4415443", "0.4407291", "0.44020268", "0.43932885", "0.43915534", "0.4388538", "0.43872413", "0.43866605", "0.43813646", "0.4378231", "0.43776238", "0.43767542", "0.43737832", "0.43733773", "0.43721756", "0.43613026", "0.4360045", "0.4358625", "0.43565822", "0.43479773" ]
0.54133815
7
Find any existing links to match to a new (or edited) brand
def save(self, *args, **kwargs): super(LinkBrand, self).save(*args, **kwargs) existing_links = UserLink.objects.filter(url__contains=self.domain) # Filter out any false positives for link in existing_links: domain = urlsplit(link.url).netloc if domain != self.domain: existing_links = existing_links.exclude(pk=link.pk) existing_links.update(icon=self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getLinkstoBrands(url):\n brandUrls = {}\n try:\n print(\"Maker link being crawled : \", url)\n request = requests.get(url)\n if request.status_code == 200:\n sourceCode = BeautifulSoup(request.text, \"html.parser\")\n for td in sourceCode.findAll('td'):\n link = td.find('a', href=True)\n title = td.get_text()\n url = processUrl(link['href'])\n if title not in brandUrls.keys():\n brandUrls[title] = url\n print(title, ' ', url)\n else:\n print('no table or row found ')\n except requests.HTTPError as e:\n print('Unable to open url', e)\n return brandUrls", "def getLinksToPhonesPerBrands(url):\n urls = {}\n print(\"brand link being scrapped : \", url)\n try:\n request = requests.get(url)\n if request.status_code == 200:\n sourceCode = BeautifulSoup(request.content, \"html.parser\")\n li = sourceCode.select('#review-body div > ul > li > a')\n for link in li:\n title = link.get_text()\n url = processUrl(link['href'])\n if title not in urls.keys():\n urls[title] = url\n print(title, ' ', url)\n else:\n print('no table or row found ')\n except requests.HTTPError as e:\n print('Unable to open url', e)\n return urls", "def get_links_to_historic_matches(wd):\n list_of_links = []\n content_blocks = wd.find_elements_by_id(\"js-mutual-table\")\n for block in content_blocks:\n elements = block.find_elements_by_tag_name(\"a\")\n for el in elements:\n one_link = el.get_attribute(\"href\")\n if one_link.count(\"/\") > 6:\n list_of_links.append(one_link)\n return list_of_links", "def getMyLinks(self, link_list, plant):\n my_links = []\n for links in link_list:\n if plant in links:\n my_links.append(links)\n return my_links", "def list_of_links_to_check(main_link):\n # TODO: try else-here, when didn't finds links, then restart\n list_of_links = []\n driver.get(main_link)\n sleep(1)\n temp_list = driver.find_elements_by_class_name(\"table-main__tt\")\n for block in temp_list: \n elements = block.find_elements_by_tag_name(\"a\")\n for el in elements:\n if \"soccer\" in el.get_attribute(\"href\"):\n list_of_links.append(el.get_attribute(\"href\"))\n return list_of_links", "def find_usefull_links(links, classmodel, class_count_vect):\n\n import re\n final_links = []\n seclinks = links\n for link in links:\n fulllink = link\n if link == None:\n continue\n else:\n link = link.replace('://', ' ')\n link = link.replace('@', ' ')\n link = link.replace('#', ' ')\n link = link.replace('/', ' ')\n link = link.replace('-', ' ')\n link = link.replace('.', ' ')\n link = link.replace('https', '')\n link = link.replace('http', '')\n link = link.replace('www', '')\n link = link.replace('&', ' ')\n link = link.replace('=', ' ')\n linkpd = pd.Series(link.strip())\n link_feature = class_count_vect.transform(linkpd)\n result = classmodel.predict(link_feature)\n\n result = result.tolist()\n result = str(result)\n if result == '[1]':\n final_links.append(fulllink)\n final_links = list(dict.fromkeys(final_links))\n \n if len(final_links) == 0 or len(final_links) < 5:\n for linksec in seclinks:\n linkwords = ['cabinet', 'gover', 'goverment', 'composition', 'ministers', 'minister',\n 'president', 'composicao', 'parliament', 'person', 'who', 'mini', 'compo',\n 'governor', 'secretariat', 'secretary']\n for w in linkwords:\n if re.search(w, linksec):\n final_links.append(linksec)\n else:\n continue\n final_links = list(dict.fromkeys(final_links))\n return (final_links)", "def _get_new_urls(self, page_url, soup):\n new_urls = set()\n links = soup.find_all('a', href=re.compile(r'/item/\\w+'))\n for link in links:\n new_url = link['href']\n new_full_url = urljoin(page_url, new_url)\n new_urls.add(new_full_url)\n return new_urls", "def DealUrlFirst(self, match, all_link):\n counter = 0\n for each_link in all_link:\n model_link = '<a href=\"(.*)\" class=\"c-3\">'\n break_link = '<a href=\"(.*)\" class=\"c-6\">'\n model_name = 'class=\"c-3\">(.*)</a>'\n if re.search(break_link, each_link):\n break\n result_link = re.findall(model_link, each_link)\n result_name = re.findall(model_name, each_link)\n# print len(result_link), len(result_name)\n if len(result_link) > 0:\n if len(result_name) > 0:\n print >> match, result_link[0]+' '+result_name[0]\n counter += 1\n print \"All the avaliable links is: \", counter", "def update_links(self):\n for a in self.book.xpath(\"//a[@href]\"):\n href = a.xpath(\"@href\")[0]\n index_list = a.xpath(\"@data-index\")\n \n ### If there is no data-index it is assumed link comes from initial book landing page (the index page)\n if index_list == []:\n index = self.manager.get_page_index(\"index.html\")\n else:\n index = index_list[0]\n \n ### Fix people who are bad at links\n if href.startswith(\"www.\"):\n href = \"https://\" + href\n a.set(\"href\", href)\n \n ## Correct for ambiguity (Naive assumption that this error only occours on index page)\n if href == \"./\":\n href = \"index.html\"\n \n if not href:\n return None\n \n href = self.manager.convert_link(href, index)\n a.set(\"href\", href)", "def extract_brands(models):\n all_brands = database.get_car_brands()\n extracted_brands = []\n for car in models:\n #car = smart_str(car)\n for brand in all_brands:\n #\tbrand = smart_str(brand)\n if brand in car:\n extracted_brands.append(brand)\n return extracted_brands", "def produce_links_search(self, value_list:list) -> list:\n return [\n [self.produce_link_google(f) for f in value_list],\n [self.produce_link_qwant(f) for f in value_list],\n [self.produce_link_bing(f) for f in value_list],\n [self.produce_link_duckduckgo(f) for f in value_list],\n [self.produce_link_yahoo(f) for f in value_list]\n ]", "def test_finder_detects_latest_find_links(data: TestData) -> None:\n req = install_req_from_line(\"simple\")\n finder = make_test_finder(find_links=[data.find_links])\n found = finder.find_requirement(req, False)\n assert found is not None\n assert found.link.url.endswith(\"simple-3.0.tar.gz\")", "def search_thru_comments(urls, listOfKWs):\n browser = webdriver.Chrome('/Users/sophie/documents/chromedriverCurrent')\n\n listKWs = []\n for KW in listOfKWs:\n listKWs.append([KW])\n # ex: listKWs=[['poverty'], ['inequality'], ['aids'], ['hiv']]\n # list where list[something]=name of KW. append after that the urls.\n global listKWsDate\n listKWsDate = []\n for KW in listOfKWs:\n listKWsDate.append([KW])\n print(listKWs == listKWsDate)\n\n for link in urls:\n browser.get(link)\n\n source = browser.page_source\n data = bs(source, 'html.parser')\n body = data.find('body')\n script = body.find('script',\n text=lambda t: t.startswith('window._sharedData'))\n #print(script)\n scriptStr = str(script)\n scriptStr.replace(\"'\",\"\")\n #scriptSplit=script.split('shortcode')\n #print(scriptSplit)\n\n #pass to searchForEach which will check the indiv posts for all KWs\n # and will then add them to the appropriate spread sheet\n for KW in listOfKWs:\n searchForEachKW(KW, scriptStr, listKWs, listKWsDate)\n\n #need to change so that calls search for each KW here. so that\n # searching each link for all the hashtags, and then add link to\n # appropriatre kw spreadsheet\n\n return listKWs", "def link_crawler(seed_url, link_regex):\n crawl_queue = [seed_url]\n # keep track which URL's have seen before\n seen = set(crawl_queue)\n while crawl_queue:\n url = crawl_queue.pop()\n html = download(url)\n # filter for links matching our regular expression\n for link in get_links(html):\n # check if link matches expected regex\n if re.match(link_regex, link):\n # form absolute link\n link = urlparse3.urljoin(seed_url, link)\n # check if have already seen this link\n if link not in seen:\n seen.add(link)\n crawl_queue.append(link)", "def deep_link_scraping(final_links, driver):\n\n import re\n second_links = [] \n for website2 in final_links:\n links2 = extract_all_links(website2, driver)\n final_links1 = find_usefull_links(links2, classmodel, class_count_vect)\n final_links2 = list(set(final_links1) - set(final_links))\n second_links += final_links2\n\n \n second_links = list(dict.fromkeys(second_links))\n second_links1 = find_usefull_links(second_links, classmodel, class_count_vect)\n second_links2 = []\n for link in second_links1:\n if re.search('#', link):\n x = re.search('#', link)\n link = link[:int(x.span()[0])]\n second_links2.append(link)\n else:\n second_links2.append(link)\n\n second_links2 = list(dict.fromkeys(second_links2))\n for final_link in second_links2:\n tags = extract_all_tags(final_link, driver)\n if len(tags) != 0:\n final_tags = find_usefull_tags(tags, tagmodel, tag_count_vect)\n if len(final_tags) != 0:\n scrape_data(final_link, final_tags, driver)\n else:\n scrape_data_tag(final_link, driver)\n else:\n scrape_data_tag(final_link, driver)\n return second_links2", "def extract_brand_names(brand_name_urls):\n from debra.models import Brands\n\n # stripping urls, fetching their domains\n brand_name_urls = [url.strip() for url in brand_name_urls if len(url) > 0]\n if not brand_name_urls:\n return []\n\n brand_domains = [domain_from_url(url) for url in brand_name_urls if len(url) > 0]\n\n t1 = time.time()\n\n queries = [Q(domain_name=value) for value in brand_domains] + [Q(name=value.upper()) for value in brand_name_urls]\n\n query = queries.pop()\n\n for item in queries:\n query |= item\n\n brands = Brands.objects.exclude(blacklisted=True).filter(query)\n\n brands = list(brands)\n log.info('Time passed to get brands: %s ' % (time.time() - t1))\n return brands", "def parse_links_from_HTML():\n\n file_content = open(BANK_LIST_HTML_FILE, 'r').read()\n\n # Parsing html files to get list of all anchor tags \n soup = BeautifulSoup(file_content)\n table_content = soup.find('table', class_='tablebg')\n anchor_links = table_content.find_all('a')\n \n abbr_map = load_from_a_file(BANK_NAME_JSON_FILE)\n bank_links, urls_list = {}, {}\n for anchor_link in anchor_links:\n bank_links[str(anchor_link.text)] = anchor_link.get('href')\n for abbr, bank_name in abbr_map.items():\n if bank_name not in bank_links:\n print \"{0} bank from RBI list\".format(bank_name)\n else:\n urls_list[abbr] = bank_links[bank_name]\n dump_to_file(bank_links, BANK_NAME_FILE_URL_JOSN)\n dump_to_file(urls_list, ABBR_BANK_NAME_FILE_URL)", "def link_scraping(final_links, driver):\n\n for final_link in final_links:\n tags = extract_all_tags(final_link, driver)\n if len(tags) != 0:\n final_tags = find_usefull_tags(tags, tagmodel, tag_count_vect)\n if len(final_tags) != 0:\n print('Extracting(classname): ', final_link)\n scrape_data(final_link, final_tags, driver)\n else:\n print('Extracting(tag): ', final_link)\n scrape_data_tag(final_link, driver)\n else:\n print('Extracting(tag): ', final_link)\n scrape_data_tag(final_link, driver)", "def getLinks(tvshow, season, episode):\n urltv = getTvShowUrl(tvshow, season,episode)\n urlbase = 'http://projectfreetv.so'\n src_urltv = (''.join(getPage(urltv))).split('</a>')\n possible_links = []\n if (src_urltv == -1):\n return possible_links\n for line in src_urltv:\n for nameModule in projectfreetv_mod.__all__:\n if ((nameModule in line) and (('aff_id') in line)):\n link = line.split('\"')[1]\n possible_links.append([link, \\\n \"projectfreetv_mod.\" + nameModule])\n #print possible_links\n return possible_links", "def get_url(soup):\r\n \"\"\"criteria: any(s in a[\"title\"] for s in ('新增', '確診', '肺炎')\"\"\"\r\n url_list = []\r\n for a in soup.find_all('a', {\"href\": re.compile(\"typeid=9$\")}):\r\n if any(s in a[\"title\"] for s in ('新增', '確診', '肺炎')):\r\n url = \"https://www.cdc.gov.tw\" + a['href']\r\n url_list.append(url)\r\n return url_list", "def link_crawler(start_url,link_regex):\n crawl_queue=[start_url]\n seen = set(crawl_queue)\n\n while crawl_queue:\n url = crawl_queue.pop()\n html = download(url)\n if html is None:\n continue\n for link in get_links(html):\n print(link,'==',link_regex)\n #if re.match(link_regex, link):\n if re.match(link,link_regex):\n print('ok')\n abs_link = urljoin(start_url, link)\n if abs_link not in seen:\n seen.add(abs_link)\n crawl_queue.append(abs_link)\n else:\n print('error!')", "def _build_links(links):\n for link in links:\n link['href'] = link['href'].replace('servers', 'instances')\n return links", "def links(iati_import, activity, project, activities_globals):\n imported_links = []\n changes = []\n\n for website in activity.findall('activity-website'):\n url = get_text(website, activities_globals['version'])\n\n # Skip RSR links\n if url and 'rsr.akvo.org' in url:\n continue\n\n link, created = get_model('rsr', 'link').objects.get_or_create(\n project=project,\n url=url\n )\n\n if created:\n changes.append(u'added link (id: %s): %s' % (str(link.pk), link))\n\n imported_links.append(link)\n\n for doc_link in activity.findall(\"document-link[@format='application/http']\"):\n url = ''\n caption = ''\n\n if 'url' in doc_link.attrib.keys():\n url = doc_link.attrib['url']\n\n # Skip RSR links\n if url and 'rsr.akvo.org' in url:\n continue\n\n title_element = doc_link.find('title')\n if not title_element is None:\n caption = get_text(title_element, activities_globals['version'])\n if len(caption) > 50:\n add_log(iati_import, 'link_caption', 'caption is too long (50 characters allowed)',\n project, IatiImportLog.VALUE_PARTLY_SAVED)\n caption = caption[:50]\n\n link, created = get_model('rsr', 'link').objects.get_or_create(\n project=project,\n url=url,\n caption=caption\n )\n\n if created:\n changes.append(u'added link (id: %s): %s' % (str(link.pk), link))\n\n imported_links.append(link)\n\n for link in project.links.all():\n if not link in imported_links:\n changes.append(u'deleted link (id: %s): %s' %\n (str(link.pk),\n link.__unicode__()))\n link.delete()\n\n return changes", "def test_get_similar_recipes(self):\n pass", "def get_all_books_page_links(raw_page_rip):\n nt = {}\n ot = {}\n OTIDS = []\n soup = raw_page_rip.soup\n if not os.path.exists(data_store):\n os.mkdir(data_store)\n \n nt_soup = soup.find(\"td\", class_=\"NT\")\n ot1 = soup.find(\"td\", class_=\"OT1\")\n ot2 = soup.find(\"td\", class_=\"OT2\")\n \n for each in nt_soup.find_all(\"a\", href=True):\n if 'class=\"b\"' in str(each):\n href = each.get(\"href\")\n name = each.text\n\n idd = re.search(r'\\d{5}', href).group(0)\n nt[name] = [domain + href, idd]\n \n with open(os.path.join(data_store, \"new_test.json\"), \"w+\") as wh:\n json.dump(nt, wh)\n\n for each in ot1.find_all(\"a\", href=True):\n if 'class=\"b\"' in str(each):\n \n href = each.get(\"href\")\n name = each.text\n idd = re.search(r'\\d{5}', href).group(0)\n \n if idd in OTIDS:\n ot[domain + href][0] = name + \" or \" + ot[domain + href][0]\n else:\n ot[domain + href] = [name, idd]\n OTIDS.append(idd)\n \n for each in ot2.find_all(\"a\", href=True):\n if 'class=\"b\"' in str(each):\n \n href = each.get(\"href\")\n name = each.text\n idd = re.search(r'\\d{5}', href).group(0)\n \n if idd in OTIDS:\n ot[domain + href][0] = name + \" or \" + ot[domain + href][0]\n else:\n ot[domain + href] = [name, idd]\n OTIDS.append(idd)\n \n rev_old = {value[0] : [key, value[1]] for key, value in ot.items()}\n with open(os.path.join(data_store, \"old_test.json\"), \"w+\") as wh:\n json.dump(rev_old, wh)", "def find_active_links(lat, lon, place, name):\n\tWIKIPEDIA_BASE = 'https://wikipedia.org/wiki/Special:Search/'\n\tlinks = {}\n\tlinks[\"wikipediaUrl\"] = WIKIPEDIA_BASE + name\n\n\ttry:\n\t\tfsqReturn = find_foursquare_url(lat, lon, name)\n\t\tfoursquareVenueId = fsqReturn['venueId']\n\t\tfoursquareUrl = fsqReturn['4sqUrl']\n\t\twebsite = fsqReturn['url']\n\t\tdisplayMetadata = fsqReturn['metadata']\n\n\t\tif foursquareUrl is not None:\n\t\t\tlinks['foursquare'] = {\"foursquareUrl\" : foursquareUrl,\n\t\t\t\t\"foursquareVenueId\" : foursquareVenueId}\n\n\t\tif website is not None:\n\t\t\tlinks['url'] = website\n\n\t\tif displayMetadata is not None:\n\t\t\tlinks['displayMetadata'] = displayMetadata\n\n\texcept:\n\t\tprint \"foursquare failed\"\n\n\ttry:\n\t\topenTableUrl = find_open_table_url(place)\n\t\tif openTableUrl is not None:\n\t\t\tlinks['openTableUrl'] = openTableUrl\n\n\texcept: \n\t\tprint \"opentable failed\"\n\n\treturn links", "def get_image_link():\n image_links = set()\n supplemented_keyword = urllib.parse.quote(\n supplemented_keywords[random.randint(0,\n len(supplemented_keywords) - 1)],\n safe='')\n main_keyword = urllib.parse.quote(\n main_keywords[random.randint(0,\n len(main_keywords) - 1)], safe='')\n\n # print('the theme of cats: ' + supplemented_keyword)\n\n search_query = (main_keyword + ' ' + supplemented_keyword).replace(\n ' ', '%20')\n url = 'https://www.google.com/search?q=' + \\\n search_query + '&source=lnms&tbm=isch'\n image_links = image_links.union(parse_page(url))\n\n image_link = list(image_links)[random.randint(0, len(image_links) - 1)]\n # print('link:' + image_link)\n\n while 'https://' not in image_link or r'\\\\u' in image_link or '.jpg' not in image_link:\n image_link = list(image_links)[random.randint(0, len(image_links) - 1)]\n # print('link:' + image_link)\n\n return image_link", "def get_car_hrefs_reliably( main_href ):\n exit = False\n time_sleep = 5\n while not exit:\n text, html = get_text_html( main_href )\n car_hrefs = get_all_hrefs(html)\n if len(car_hrefs) == 0:\n print('trying get img hrefs one more time')\n sleep(time_sleep)\n time_sleep+= 3\n exit = False\n else:\n exit = True\n return car_hrefs", "def check_href(url, soup):\n # pdb.set_trace()\n ret_vals = []\n href = soup.find_all(\"a\")\n for link in href:\n if url in link.get(\"href\"):\n ret_vals.append(link.get(\"href\").split(url)[1])\n return list(set(ret_vals))", "def extract_urls(genome):\n itemid = genome.get('metadata').get('identifier')\n urls = set([url for url in genome['urls'] if 'archive.org' not in url])\n db_urls_found(itemid, urls)", "def run_search(self, links):\n for s in links:\n self._run_command(\" s \\\"{}\\\" \\n\".format(s))", "def get_links(self, soup):\n \"\"\" @param soup: BeautifulSoup object that cointains the targeted links \"\"\"\n \"\"\" @type soup: BeautifulSoup object \"\"\"\n for link in soup.select('a[href^=\"https://\"]'): # All links which have a href element\n href = link.get('href') # The actually href element of the link\n if not any(href.endswith(x) for x in ['.csv', '.xls', '.xlsx']):\n print(\"No excel\")\n continue\n if not href in self.url_queue:\n self.url_queue.append(href) # Add the URL to our queue", "def search(self, links=False):\n if self.type == \"text\":\n mg = Manager()\n ret = mg.dict()\n jobs = []\n p1 = Process(target=self.google_proc, args=(ret,))\n jobs.append(p1)\n p2 = Process(target=self.yahoo_proc, args=(ret,))\n jobs.append(p2)\n p3 = Process(target=self.bing_proc, args=(ret,))\n jobs.append(p3)\n p1.start()\n p2.start()\n p3.start()\n\n for proc in jobs:\n proc.join()\n\n temp = ret.values()[0] + ret.values()[1] + ret.values()[2]\n print temp\n for i in temp:\n f = 0\n for j in self.uniquelinks:\n if i[1] == j[1]:\n f = 1\n if f == 0:\n self.uniquelinks.append(i)\n if links:\n return self.uniquelinks\n else: # [[title, link, data], [title, link, data] ...]\n mg = Manager()\n ret = mg.dict()\n jobs = []\n n = 0\n for li in self.uniquelinks[0:3]:\n p = Process(target=self.data_collector, args=(n, li[1], ret))\n n += 1\n jobs.append(p)\n p.start()\n\n for proc in jobs:\n proc.join()\n print ret.values()\n print len(ret.values())", "def parse_links(self, response):\n urls = LinkExtractor(canonicalize=True, allow_domains=self.domain)\\\n .extract_links(response)\n\n for link in urls:\n # If link was already extracted on another page, don't save it\n if link.url in self.unique_links:\n continue\n if 'watch?v' in link.url:\n self.links_out_file.write('%s\\n' % link.url)\n self.unique_links[link.url] = 1", "def get_links(query_terms):\n\n # the set of links all of which contains all the terms in the query string\n final_links = None\n for term in query_terms:\n # get all links containing the term and put in a set\n links = Set(index_data.get(term))\n #print(\"\\n\\nQuery Term: %s\" % term)\n #print(links)\n\n # special case for first iteration, because: empty & anything = empty\n if final_links == None:\n final_links = links\n\n # take intersection of links set\n final_links = final_links & links\n\n #print(final_links)\n\n # convert the Set to List and return\n return list(final_links)", "def lookup_urls(regex_l, manifest, inventory, refetch=False):\n selected_targets = []\n # Store whether or not we've found a target in the manifest that matches the requested type\n found_one = False\n for target in manifest.keys():\n # Iterate through the possible targets in the manifest.\n # If any of them match any of the RegExs supplied, add the URL to the\n # return list\n if all(map((lambda regex: re.findall(regex, target)), regex_l)):\n found_one = True\n log(\"TRACE\", \"Selected target: {}\".format(target))\n target_info = manifest.get(target)\n target_url = target_info.get(\"url\")\n target_hash = target_info.get(\"repo_hash\")\n target_sha256 = target_info.get(\"sha256_hash\")\n filename = os.path.basename(target_url)\n # Check if the same filename and hash appear in the inventory\n if not refetch and inventory.get(target, {}).get(\"repo_hash\", \"\") == target_hash:\n # We already have this file, we don't need to download it again\n log(\"INFO\", \"Target {} is up to date.\".format(target))\n else:\n # We don't have that exact file, add it to the list\n selected_targets.append({\"target\": target,\n \"repo_hash\": target_hash,\n \"filename\": filename,\n \"url\": target_url,\n \"sha256_hash\": target_sha256})\n if not found_one:\n log(\"INFO\", \"No targets matching '{}'\".format(regex_l))\n return selected_targets", "def check_for_new_links(feed):\n #read the feed\n feed_url = feed[\"feed_url\"]\n feed_data = feedparser.parse(feed_url)\n\n #parse out entries in the feed for the information we want\n entries = []\n for entry in feed_data.entries:\n parsed_entry = {}\n parsed_entry[\"title\"] = entry[\"title\"]\n parsed_entry[\"link\"] = entry[\"link\"]\n parsed_entry[\"published\"] = entry[\"published\"]\n parsed_entry[\"feed_url\"] = feed_url\n entries.append(parsed_entry)\n\n #check for new entries since the last known entry\n #chop off all entries starting at the last_seen_link\n if \"last_seen_link\" in feed:\n last_link = feed[\"last_seen_link\"]\n idx = -1\n for cidx in range(len(entries)):\n if entries[cidx][\"link\"] == last_link:\n idx = cidx\n break\n #else is a new link\n entries = entries[:idx]\n\n return list(reversed(entries))", "def external_search_engines_links(search, deep=0, debug=0, links=[]):\n s = Subseek()\n for search_engine in SEARCH_ENGINES:\n for subtitle_search_engine in SUBTITLE_SEARCH_ENGINES:\n if debug == 1:\n print \"Searching '%s' in '%s'\" % (search,\n search_engine['name'])\n links_aux = s.get_links(search_engine, search, deep,\n subtitle_search_engine[\"name\"])\n if not links_aux or len(links_aux) == 0:\n if debug == 1:\n print \"No match found in '%s'\" % search_engine['name']\n else:\n if debug == 1:\n print \"%s matches found in '%s'\" % (len(links_aux),\n search_engine['name'])\n links = links_aux + links\n\n return links", "def _parse_links(self, item, start, links_list):\n result_list = []\n target_str_1 = start.strftime(\"%m-%d-%Y\").replace(\" 0\", \" \")\n target_str_2 = start.strftime(\"%m-%d-%y\").replace(\" 0\", \" \")\n for item in links_list:\n if item[\"date\"] in target_str_1 or item[\"date\"] in target_str_2:\n new_dict = {}\n new_dict[\"href\"] = item[\"href\"]\n new_dict[\"title\"] = item[\"title\"]\n result_list.append(new_dict)\n return result_list", "def get_external_links(parsed_drug_doc):\n\n external_link_info = list(parsed_drug_doc.find(id='external-links').next_sibling.dl.children)\n external_links = {}\n for i in range(0, len(external_link_info), 2):\n source = external_link_info[i].text\n value = external_link_info[i+1].text\n # Ignoring a few sources for this MVP that don't give obvious alternate IDs.\n if source not in [\"RxList\", \"Drugs.com\", \"PDRhealth\"]:\n external_links[source] = value\n\n return external_links", "def find_revision_pages(url_text):\n\trevision_links = []\n\tgrammar_indices = [m.start() for m in re.finditer(\"grammar\", url_text.lower())]\n\t# print(\"Grammar indices:\",grammar_indices)\n\n\tfor i in range(len(grammar_indices)):\n\t\tgrammar_index = grammar_indices[i] \n\t\tprev_index = url_text[:grammar_index].rfind('prev')\n\t\thref_index = url_text[:prev_index].rfind('href')\n\t\turl_start_index = url_text[href_index:].find(\"\\\"\")+href_index\n\t\turl_end_index = url_text[url_start_index+1:].find(\"\\\"\")+url_start_index+1\n\t\turl2 = WIKI_URL+url_text[url_start_index+1:url_end_index]\n\t\trevision_links+=[url2]\n\n\treturn list(set(revision_links))", "def dod():\n file = requests.get(\"https://www.bewakoof.com/design-of-the-day\")\n soup = bs4.BeautifulSoup(file.text, \"lxml\")\n # print(soup)\n\n linkList = soup.select(\"a[class='col-sm-4 col-xs-6'] > div > div > div > img:nth-of-type(2)]\")\n # soup.select(\"div[id=foo] > div > div > div[class=fee] > span > span > a\")\n for i in linkList:\n if \"t-shirt-men\" in str(i):\n # print(i.get('src'))\n webbrowser.open(i.get('src'))", "def get_links_filter(self, keyword, number_links):\r\n podcast_data = []\r\n\r\n for entry in self.rss[0].entries:\r\n if keyword in entry.title: \r\n try:\r\n podcast_data = [entry.published, entry.title, \r\n entry.enclosures[0]['href'], \r\n self.rss[0].feed.title\r\n ]\r\n except IOError as err:\r\n print err\r\n except UnicodeDecodeError as err:\r\n print err\r\n else:\r\n self.podcast_list.append(podcast_data)\r\n if number_links != 0:\r\n if len(self.podcast_list) == number_links: \r\n return None\r\n return None", "def find_gp_app_links(html):\n links = []\n for m in re.finditer('href=\"(/store/apps/details[^\"]+)\"', html):\n #print '%02d-%02d: %s' % (m.start(), m.end(), m.group(1))\n links.append(m.group(1))\n return links", "def get_recipe_links_by_page(page):\n page_link = URL.format(page)\n cuisine_recipe_links = get_content_from_url(page_link)\n if not cuisine_recipe_links:\n print \"no content for:\", link\n return None\n soup_search = BeautifulSoup(cuisine_recipe_links)\n return soup_search.find_all(\"div\", {\"class\": \"image_link_medium\"})", "def getLinks(tvshow, season, episode):\n numPage = 1\n possible_links = []\n doNext = True\n while(doNext):\n urltv = getTvShowUrl(tvshow, season, episode, numPage)\n src_urltv = getPage(urltv)\n if (src_urltv == -1):\n return possible_links\n npage = False\n for line in src_urltv:\n if (\"next_page\" in line):\n npage = True\n if (\"disabled next_page\" in line):\n doNext = False\n for nameModule in sidereel_mod.__all__:\n realName = sidereel_mod.__all2__[nameModule]\n if ((realName in line) and ('data-viewable-url') in line):\n possible_links.append([line.split('\"')[5], \\\n \"sidereel_mod.\" + nameModule])\n numPage += 1\n if (npage == False):\n doNext = False\n return possible_links\n \n \n \n\n ## liste=[]\n ## for i in sidereel_mod.__all__:\n ## __import__(\"aggregators.sidereel_mod.\" + i)\n ## liste += sys.modules[\"aggregators.sidereel_mod.\"+i].getFlv(a)\n ## return liste", "async def _find_links(self, res: aiohttp.ClientResponse) -> Iterator[str]:\n\n content = await res.text()\n soup = BeautifulSoup(content, 'html.parser')\n links = [self._format(res.url, a) for a in soup.find_all('a')]\n return filter(lambda l: l is not None, links)", "def getLinks(link):\n source = requests.get(link).text\n soup = BeautifulSoup(source, 'lxml')\n rows = soup.find_all(class_ = 'column-1') #select which column \n list_of_links = []\n \n for row in rows[1:]: #rows[1:] is used in case first row is a title row (ie there is no useful data here)\n name = row.find('a')\n link = name.attrs['href'] #the data I'm trying to extract\n list_of_links.append(link)\n return list_of_links", "def UpdateBrandSeries():\r\n MilkSeries.objects.all().delete()\r\n MilkBrand.objects.all().delete()\r\n MilkTunnel.objects.all().delete()\r\n brandlist = [item.brand for item in MilkProd.objects.all()]\r\n unique_brandlist = {}.fromkeys(brandlist).keys()\r\n for item in unique_brandlist:\r\n b = MilkBrand(name=item)\r\n b.save()\r\n \r\n for brandstr in unique_brandlist:\r\n brandset = MilkProd.objects.filter(brand=brandstr)\r\n serieslist = [item.name for item in brandset]\r\n unique_series = {}.fromkeys(serieslist).keys()\r\n for ser in unique_series:\r\n s = MilkSeries(name=ser, BrandIn=brandstr)\r\n s.save()\r\n \r\n tunnellist = [item.tunnel for item in MilkProd.objects.all()]\r\n unique_tunnellist = {}.fromkeys(tunnellist).keys()\r\n for item in unique_tunnellist:\r\n t = MilkTunnel(name=item)\r\n t.save()", "def get_links_from_one_page(driver,site,URL_exclusions):\r\n while True:\r\n try:\r\n results = driver.find_elements_by_class_name(\"g\") #Find all elements with class=\"g\". This includes search results.\r\n break\r\n except:\r\n continue \r\n links = []\r\n for result in results:\r\n link = result.find_element_by_tag_name(\"a\") #Hyperlinks are contained under <a> tags\r\n link = link.get_attribute('href') #Retrive link as a string\r\n if link.find(site) != -1: #Some class=\"g\" elements are not search results. Only store links with urls containing \"site\".\r\n links.append(link)\r\n sig_links = [] #Create list of links for pages not from travel sections\r\n for url in links:\r\n find = np.zeros(len(URL_exclusions))\r\n for i in range(len(URL_exclusions)):\r\n find[i] = bool(url.find(URL_exclusions[i]) == -1)\r\n if all(find) == True: #If none of the exclusion words are in url\r\n sig_links.append(url)\r\n return sig_links", "def mk_link_list(self, BS_object, base_url):\n link_list = []\n body = BS_object.find('body')\n for element in body.find_all('a'):\n # for link in BS_object.find_all('a'): # TEST if there are any links in html head\n \n raw_link = element.get('href')\n print \"GETS RAW LINK: %r, type:\" % raw_link, type(raw_link)\n if type(raw_link) is not unicode:\n print \"mk_link_list: FAILED TO EXTRACT USABLE LINK, SKIPPING...\"\n continue\n\n if raw_link.startswith(\"https:/\") or raw_link.startswith(\"http:/\"):\n if not raw_link.endswith(\"/\"): # maintaining constant url format\n raw_link + \"/\"\n print \"mk_link_list: FULL LINK\"\n if raw_link.startswith(base_url): # Internal URL check\n print \"mk_link_list: FULL LINK STARTS WITH BASE URL AND IS GOOD FOR LINK LIST\"\n link_list.append(raw_link)\n else:\n print \"mk_link_list: THIS FULL LINK IS NOT INTERNAL LINK\"\n else:\n # when part link found it will be always internal link\n print \"mk_link_list:FOUND PART LINK\", raw_link\n try:\n raw_link.strip()\n except:\n pass\n print \"mk_link_list: MAKING FULL LINK FROM PART\"\n full_link = urlparse.urljoin(base_url, raw_link)\n print \"mk_link_list: FULL LINK MADE FROM PART LINK\", full_link\n if full_link.startswith(base_url): # Internal URL check\n print \"mk_link_list: FULL LINK STARTS WITH BASE URL AND IS GOOD FOR LINK LIST\"\n link_list.append(full_link)\n else:\n print \"mk_link_list: THIS FROM PART TO FULL LINK IS NOT INTERNAL LINK\"\n\n\n\n dedupli_list = c_m.remove_duplicates(link_list) # \n dedupli_list.sort()\n try:\n dedupli_list.remove(base_url) # we do not need retriving base url html again\n print \"mk_link_list: LINK LIST AFTER BASE URL REMOVAL\", len(dedupli_list)\n except ValueError:\n print \"mk_link_list: NO BASE URL FOUND IN BASE URL(HOMEPAGE)\"\n\n return dedupli_list", "def findLinksByText(page, searchRe):\n urls = []\n page = parseHtmlLinks(page)\n for linkUrl, linkText in page['links'].iteritems():\n dbgStr = 'Checking linkText %s (url %s) against %s' % (repr(unidecode.unidecode(linkText)), linkUrl, searchRe.pattern)\n logging.log(5, dbgStr)\n if searchRe.match(linkText):\n urls.append(linkUrl)\n logging.debug('Found link: %s -> %s' % (linkText, linkUrl))\n\n logging.debug('Found links with %s in label: %s' % (repr(searchRe.pattern), urls))\n return urls", "def get_links(names, html):\n ###TODO\n people = []\n readweb = BeautifulSoup(html, 'html.parser')\n for a in readweb.find_all('a'):\n person = os.path.basename(str(a.get('href')))\n if person in names:\n people.append(person)\n return SortedSet(people)\n pass", "def match(self, brain, getlink):\n link = getlink(brain.getId,\n getlink(normalize(brain.Title), None))\n if link:\n return True", "def iter_links(self):", "def extract_links():\n br = mechanize.Browser()\n br.open(BASE_URL)\n f = open('data/svodki/alllinks.csv', 'w')\n calurls = []\n # Collect all calendar urls with reports\n for year in range(2005, 2013):\n for month in range(1, 13):\n calurls.append([year, month, CALEND_URLPAT %(year, month)])\n\n # Update for current year (needs fixes later)\n for year in range(2013, 2014):\n for month in range(1, 3):\n calurls.append([year, month, CALEND_URLPAT %(year, month)])\n # Process calendar urls one by one\n for year, month, calurl in calurls:\n print calurl\n u = br.open(calurl)\n data = u.read()\n u.close()\n soup = BeautifulSoup(data)\n slist = soup.find('ul', attrs={'class': 'emergency_list'})\n urls = slist.findAll('a')\n for url in urls:\n s = '%s\\t%s\\t%s\\t%s\\t' % (unicode(year), unicode(month), url.text, urljoin(BASE_URL, url['href']))\n f.write((s + '\\n').encode('utf8'))\n print s\n f.close()", "def test_get_variant_links(variant_obj):\n # GIVEN a variant object without links\n assert \"thousandg_link\" not in variant_obj\n # WHEN fetching the variant links\n links = get_variant_links(variant_obj)\n # THEN check that links are returned\n assert \"thousandg_link\" in links", "def link_crawler(seed_url, link_regex):\r\n\tcrawl_queue = [seed_url]\r\n\twhile crawl_queue:\r\n\t\turl = crawl_queue.pop()\r\n\t\thtml = download(url)\r\n\t\tfor link in get_links(html):\r\n\t\t\tcrawl_queue.append(link)", "def filter_substitution_image_links(links):\n return [link for link in links if '{' not in link]", "def get_links() -> list:\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36\",\n \"Accept\": \"text/html\",\n \"Accept-Encoding\": \"gzip, deflate\",\n }\n p = re.compile(r'\\d+.html')\n base_url = 'http://stateoftheunion.onetwothree.net/texts/'\n essay_url = base_url + 'index.html'\n res = requests.get(essay_url, headers=headers)\n soup = BeautifulSoup(res.content, 'html')\n links = soup.find_all('a')\n sotu_links = {link.text: base_url + link.get('href', '') for link in links if re.match(p, link.get('href', ''))}\n return sotu_links", "def get_link_list_from_request(request):\n\tquery = request.POST.get('link_list','')\n #Clear all whitespaces from textarea\n whitespace = re.compile(r'\\s+')\n\turls = query.split('\\n')\n\tlinks = list()\n\tfor url in urls:\n\t\turl = whitespace.sub('', url)\n \tif url and url.lower().find('rapidshare') is not -1:\n \tlinks.append(url)\n\n\treturn links", "def test_get_pci_link_list(self):\n pass", "def test_finder_only_installs_stable_releases(data: TestData) -> None:\n\n req = install_req_from_line(\"bar\")\n\n # using a local index (that has pre & dev releases)\n finder = make_test_finder(index_urls=[data.index_url(\"pre\")])\n found = finder.find_requirement(req, False)\n assert found is not None\n assert found.link.url.endswith(\"bar-1.0.tar.gz\"), found.link.url\n\n # using find-links\n links = [\"https://foo/bar-1.0.tar.gz\", \"https://foo/bar-2.0b1.tar.gz\"]\n\n finder = make_test_finder(links)\n found = finder.find_requirement(req, False)\n assert found is not None\n assert found.link.url == \"https://foo/bar-1.0.tar.gz\"\n\n links.reverse()\n\n finder = make_test_finder(links)\n found = finder.find_requirement(req, False)\n assert found is not None\n assert found.link.url == \"https://foo/bar-1.0.tar.gz\"", "def getLinks(content):\n soup = BeautifulSoup(content, 'lxml')\n links = set([link.get('href') for link in soup.find_all('a')])\n return links", "def extract_urls_from_file(f, all_abns, links_existed):\n content = open(CURL_OUTPUT + f).read()\n soup = BeautifulSoup(content)\n\n fh = open(ALL_LINKS + 'all_links.txt', 'a')\n\n cnt = 0\n all_rows = soup.find_all('tr', {'class': 'rgRow'})\n for row in all_rows:\n all_cells = row.find_all('td')\n abn = all_cells[0].text\n if (abn in all_abns):\n link = all_cells[1].findChildren('a')[0]['href']\n if not link in links_existed:\n print(link)\n download_page(link, f, cnt)\n fh.write(link + '\\n')\n cnt = cnt + 1\n\n fh.close()", "def posse_post_discovery(original, regex):\n if not hasattr(regex, 'match'):\n regex = re.compile(regex)\n\n if regex.match(original):\n return original\n\n try:\n d = mf2py.parse(url=original)\n urls = d['rels'].get('syndication', [])\n for item in d['items']:\n if 'h-entry' in item['type']:\n urls += item['properties'].get('syndication', [])\n for url in urls:\n if regex.match(url):\n return url\n except HTTPError:\n current_app.logger.exception('Could not fetch original')\n except SSLError:\n current_app.logger.exception('SSL Error')\n except Exception as e:\n current_app.logger.exception('MF2 Parser error: %s', e)", "def _find_matching_link(category, component_type):\r\n\r\n # The tab shows links for the given category\r\n links = world.css_find('div.new-component-{} a'.format(category))\r\n\r\n # Find the link whose text matches what you're looking for\r\n matched_links = [link for link in links if link.text == component_type]\r\n\r\n # There should be one and only one\r\n assert_equal(len(matched_links), 1)\r\n return matched_links[0]", "def get_recipe_links(pages):\n recipe_links = []\n for page in xrange(1, pages+1):\n sleep(SCRAPING_REQUEST_STAGGER)\n recipe_links.extend(get_recipe_links_by_page(page))\n cuisine_recipes = get_recipe_details(list(set(recipe_links)))\n return cuisine_recipes", "def get_urls(links):\n\n temp_list=[]\n url_list=[]\n temp_list2=[]\n #Open the file where the url's are saved and copy the tuple values into an empty list\n z=open('dbdocs.txt','r')\n for line in z:\n temp_list.append(line)\n #print temp_list\n for x in temp_list:\n index=x.find(',')\n if index==-1:\n y=x.split(\" \",1)\n key=int(y[0])\n val=str(x[1]).replace('\\n','')\n url_list.append((key,val))\n else:\n #find the tab seperator between the key and the url, and\n #split them, in order to put in a list\n key=x[0:index-1]\n #print key\n value=str(x[index+3:len(x)-1])\n #print value\n temp_list2.append((int(key),value))\n #Find the url's of the links where the word was found\n for k in links:\n for i,j in temp_list2:\n #print j\n if i==k:\n url_list.append((i,j))\n break\n #print len(url_list)\n #print len(links)\n z.close()\n return url_list", "async def cmd_galremlinkuwl(self, ctx):\n links = re.findall(r\"(?P<url>http[s]?://[^\\s]+)\", ctx.message.content)\n\n if not links:\n await ctx.channel.send('Useage: [p]galremlinkuwl <startoflink>, [Bot Owner] Removes a link from gallery link whitelist.')\n\n # ===== REMOVE THE LINKS FROM THE LIST\n new_gal_link_wl = list(set(self.cogset['link_wl']) - set(links))\n\n if Gallery.compare(new_gal_link_wl, self.cogset['link_wl']):\n await ctx.channel.send(content=\"{}\\n are not in the gallery link whitelist.\".format('\\n'.join(links)), delete_after=Gallery.delete_after)\n return \n \n else:\n self.cogset['link_wl'] = new_gal_link_wl\n\n # ===== WRITE TO THE DATABASE\n await cogset.SAVE(self.cogset, cogname=self.qualified_name)\n\n # ===== RETURN\n await ctx.channel.send(content=\"{}\\n have been removed from the gallery link whitelist.\".format('\\n'.join(links)), delete_after=Gallery.delete_after)\n return", "def link_residues(self) -> None:\n ...", "def findLinks(self, query):\n\t\ttry:\n\t\t\tassert(type(query)) == str or Pattern\n\t\t\treturn self.driver.find_elements_by_partial_link_text(query)\n\t\texcept Exception as e:\n\t\t\tprint(\"[*] Unable to find link by searching {}\\n{}\".format(query, e))\n\t\t\treturn -1", "def merge_common_ending_breweries(apps, schema_editor):\n mfg_model = apps.get_model(\"beers.Manufacturer\")\n mfgs = mfg_model.objects.all().prefetch_related(\"beers\").order_by(\"name\")\n if not mfgs.exists():\n return\n mfg_dict = {}\n for mfg in mfgs:\n key = ENDINGS_REGEX.sub(\"\", mfg.name.strip()).strip()\n try:\n mfg_dict[key].append(mfg)\n except KeyError:\n mfg_dict[key] = [mfg]\n for short_name, mfg_list in mfg_dict.items():\n if len(mfg_list) == 1:\n if mfg_list[0].name != short_name:\n # we need to shorten the only match\n print(f\"Shortening {mfg_list[0].name} to {short_name}\")\n mfg_list[0].name = short_name\n try:\n with transaction.atomic():\n mfg_list[0].save()\n except IntegrityError:\n print(\"Got an integrity error due to case; merging\")\n original = mfg_model.objects.get(name=short_name)\n merge_mfg(original, mfg_list[0])\n mfg_list[0] = original\n continue\n # sort the beer list by number of beers for simplicity\n kept = None\n try:\n kept = [i for i in mfg_list if i.name == short_name][0]\n except IndexError:\n mfg_list = sorted(\n mfg_list, key=lambda mfg: len(mfg.beers.all()), reverse=True\n )\n kept = mfg_list[0]\n else:\n mfg_list = sorted(\n (i for i in mfg_list if i != kept),\n key=lambda mfg: len(mfg.beers.all()),\n reverse=True,\n )\n if kept.name != short_name:\n # we need to shorten the only match\n print(f\"Shortening {kept.name} to {short_name}\")\n kept.name = short_name\n try:\n with transaction.atomic():\n kept.save()\n except IntegrityError:\n other = mfg_model.objects.get(name=kept.name)\n print(\n f\"Oops, that already exists. Merging instead (keeping PK {other.id} instead of {kept.id})\"\n )\n merge_mfg(other, kept)\n kept = other\n print(kept.id)\n for mfg in mfg_list[1:]:\n print(f\"merging mfg {mfg.name} into {kept.name}\")\n merge_mfg(kept, mfg)", "def getMNACGenerator():\n\n # 0 - 89 (something between 80 and 90\n searchBaseUrl = u'http://www.museunacional.cat/en/advanced-piece-search?title_1=&title=&field_piece_inventory_number_value=&keys=&field_piece_type_value_i18n[0]=pintura&&&page=%s'\n # 0 - 48, for some reason not all paintings get returned in the main query\n # searchBaseUrl = u'http://www.museunacional.cat/en/advanced-piece-search?field_piece_type_value_i18n[0]=pintura&field_piece_info_content_value[p.%%2019th]=p.%%2019th&field_piece_info_content_value[q.%%2020th]=q.%%2020th&&page=%s'\n htmlparser = HTMLParser.HTMLParser()\n\n foundit=True\n\n for i in range(0, 89):\n searchUrl = searchBaseUrl % (i,)\n print searchUrl\n searchPage = urllib2.urlopen(searchUrl)\n searchPageData = searchPage.read()\n\n searchRegex = u'\\<a href\\=\\\"(\\/en\\/colleccio\\/[^\\\"]+)\\\"\\>Read more\\<\\/a\\>'\n itemmatches = re.finditer(searchRegex, searchPageData)\n urllist = []\n #for match in matches:\n # try:\n # # #bla = unicode(match.group(1), u'utf-8')\n # urllist.append(u'http://www.dulwichpicturegallery.org.uk%s' % (match.group(1),))\n # except UnicodeDecodeError:\n # pywikibot.output(u'Found an url I cannot parse: %s' % (unicode(match.group(1), u'utf-8'),))#\n\n #print len(urllist)\n #urlset = set(urllist)\n #print len(urlset)\n\n\n for itemmatch in itemmatches:\n url = u'http://www.museunacional.cat%s' % (itemmatch.group(1),)\n print url\n\n if url==u'http://adsfasdfasdf':\n foundit=True\n if not foundit:\n continue\n metadata = {}\n\n metadata['collectionqid'] = u'Q861252'\n metadata['collectionshort'] = u'MNAC'\n metadata['locationqid'] = u'Q861252'\n metadata['instanceofqid'] = u'Q3305213'\n \n metadata['url'] = url\n\n itemPage = urllib2.urlopen(url)\n itemPageData = unicode(itemPage.read(), u'utf-8')\n \n #print itemPageEnData\n titleRegex = u'<li class=\"ca first\"><a href=\"/ca/colleccio/[^\\\"]+\" class=\"language-link\" xml:lang=\"ca\" title=\"([^\\\"]+)\">Català</a></li>[\\r\\n\\t\\s]*<li class=\"es\"><a href=\"/es/colleccio/[^\\\"]+\" class=\"language-link\" xml:lang=\"es\" title=\"([^\\\"]+)\">Español</a></li>[\\r\\n\\t\\s]*<li class=\"en last active\"><a href=\"/en/colleccio/[^\\\"]+\" class=\"language-link active\" xml:lang=\"en\" title=\"([^\\\"]+)\">English</a></li>'\n #titleEnRegex = u'<main class=\"main narrow\">[\\r\\n\\t\\s]+<h1>[\\r\\n\\t\\s]*([^<]+)[\\r\\n\\t\\s]*</h1>'\n creatorRegex = u'<div class=\"ds-author-piece\">([^<]+)</div>'\n dateRegex = u'Painting<div class=\"ds-feature\"><p>(\\d\\d\\d\\d)</p></div>' #FIXME: Only matches on real years\n invRegex = u'Inventory number:&nbsp;</div><p>([^<]+)</p>'\n\n # Could also get Dimensions, Materials, Acquisition\n \n matchTitle = re.search(titleRegex, itemPageData)\n if not matchTitle:\n pywikibot.output(u'The title data for this painting is BORKED!')\n continue\n\n #FIXME: Check encoding\n\n metadata['title'] = { u'ca' : htmlparser.unescape(matchTitle.group(1)),\n u'es' : htmlparser.unescape(matchTitle.group(2)),\n u'en' : htmlparser.unescape(matchTitle.group(3)),\n }\n \n #pywikibot.output(metadata.get('title'))\n\n creatorMatch = re.search(creatorRegex, itemPageData)\n if not creatorMatch:\n pywikibot.output(u'The creator data for this painting is BORKED!')\n continue\n\n #FIXME: Add some logic for work after and clean up\n\n name = htmlparser.unescape(creatorMatch.group(1))\n # We need to normalize the name\n if u',' in name:\n (surname, sep, firstname) = name.partition(u',')\n name = u'%s %s' % (firstname.strip(), surname.strip(),)\n metadata['creatorname'] = name\n \n metadata['description'] = { u'nl' : u'%s van %s' % (u'schilderij', metadata.get('creatorname'),),\n u'en' : u'%s by %s' % (u'painting', metadata.get('creatorname'),),\n u'ca' : u'%s de %s' % (u'pintura', metadata.get('creatorname'),),\n u'es' : u'%s de %s' % (u'pintura', metadata.get('creatorname'),),\n }\n\n\n invMatch = re.search(invRegex, itemPageData)\n\n if not invMatch:\n pywikibot.output(u'No inventory number found! Skipping')\n continue\n \n metadata['id'] = invMatch.group(1)\n metadata['idpid'] = u'P217'\n\n dateMatch = re.search(dateRegex, itemPageData)\n\n if dateMatch:\n metadata['inception'] = dateMatch.group(1)\n\n yield metadata", "def add_old_products(self, min_last_date_verified, target_urls):\n clean_urls = [smart_str(url) for url in target_urls]\n str_urls = map(str, clean_urls)\n # print str_urls\n c = self.db.cursor(buffered=True)\n if len(str_urls) > 1:\n query = (\"\"\"SELECT url, product_id, last_verified_at\n FROM matcher.url_mrf_global_hits\n WHERE url IN {} and last_verified_at < '{}';\"\"\")\n query = query.format(tuple(str_urls), min_last_date_verified)\n else:\n query = (\"\"\"SELECT url, product_id, last_verified_at\n FROM matcher.url_mrf_global_hits\n WHERE url = '{}' and last_verified_at < '{}';\"\"\")\n element = ' '.join(str(x) for x in str_urls)\n query = query.format(element, min_last_date_verified)\n # print query\n iterable = c.execute(query, multi=True)\n black_dict = {}\n for item in iterable:\n for result in item.fetchall():\n url = result[0]\n product_id = result[1]\n if url not in black_dict:\n black_dict[url] = []\n if product_id not in black_dict[url]:\n black_dict[url].append(product_id)\n # print 'black dictionary', black_dict\n set.update(set(black_dict.keys()))", "def smartphones_scraper(brand, year_min=YEAR_MIN, year_max=YEAR_MAX):\n # Add one to include current year.\n year_range = range(year_min, year_max + 1)\n logger.info(f\"Start scraping all {brand} smartphone data from www.gsmarena.com\")\n website_search = [MAIN_SITE + RESULTS_PAGE.format(year, year, BRANDS[brand], AVAILABLE, FORM_FACTOR)\n for year in year_range]\n logger.info(f\"Prepared {len(website_search)} search links\")\n smartphones_search_pages = get_pages(website_search)\n logger.info(f\"Got {len(website_search)} search pages\")\n if len(smartphones_search_pages) == 0:\n logger.critical('Didn\\'t get any data. Exit the program...')\n exit(1)\n elif len(smartphones_search_pages) < len(website_search):\n logger.warning(f'Not all search links were retrieved {len(smartphones_search_pages)}/{len(website_search)}')\n smartphones_links = get_phone_list(smartphones_search_pages)\n logger.info(f\"Prepared {len(smartphones_links)} smartphone links\")\n # get smartphones data\n smartphones_pages = get_pages(smartphones_links)\n logger.info(f\"Got {len(smartphones_pages)} smartphone pages\")\n if len(smartphones_pages) == 0:\n logger.critical('Didn\\'t get any data. Exit the program...')\n exit(1)\n elif len(smartphones_pages) < len(smartphones_links):\n logger.warning(f'Not all smartphones links were retrieved {len(smartphones_pages)}/{len(smartphones_links)}')\n smartphones_data = [get_phone_data(page) for page in smartphones_pages]\n logger.info(f\"Done extracting {brand} smartphone data.\")\n\n return smartphones_data", "def getExpandedLinks():", "def check_uri_redirects(df, column, replace=True, custom_name_postfix=None, redirection_property=\"http://dbpedia.org/ontology/wikiPageRedirects\", endpoint=DBpedia, regex_filter=\"dbpedia\", bundled_mode=True, uri_data_model=False, progress=True, caching=True):\n\n if custom_name_postfix == \"\":\n\n raise ValueError(\"'custom_name_postfix' can't be an empty string. If you don't want to use a custom_name_postfix, please set the attribute to None\")\n\n df = df.copy()\n\n if bundled_mode and not uri_data_model:\n\n values = \" ( <\"+df[column].str.cat(sep=\"> ) ( <\")+\"> ) \"\n\n query = \"SELECT DISTINCT ?value ?redirect WHERE {VALUES (?value) {\" +values+\"} ?value <\"+redirection_property+\"> ?redirect . }\"\n\n result_df = endpoint_wrapper(query, endpoint, caching=caching).drop_duplicates().reset_index(drop=True)\n\n else: \n \n result_df = pd.DataFrame()\n \n if uri_data_model:\n \n query = \"SELECT DISTINCT ?value ?redirect WHERE {VALUES (?value) {(<**URI**>)} ?value <\"+redirection_property+\"> ?redirect . }\"\n\n result_df = uri_querier(df, column, query, regex_filter=regex_filter, progress=progress, caching=caching)\n \n else:\n\n for uri in df[column].iteritems():\n\n if pd.notna(uri[1]):\n\n query = \"SELECT DISTINCT ?value ?redirect WHERE {?value <\"+redirection_property+\"> ?redirect . FILTER (?value = <\"+uri[1]+\">) }\"\n\n result = endpoint_wrapper(query, endpoint, caching=caching)\n\n result_df = result_df.append(result)\n\n else:\n pass\n\n result_df = result_df.rename({\"callret-0\": \"value\"}, axis=\"columns\").drop_duplicates().reset_index(drop=True)\n\n if result_df.empty:\n\n return df\n\n else:\n\n if custom_name_postfix == None:\n\n new_attribute_name = column+\"_redirect\"\n\n else:\n\n new_attribute_name = column+custom_name_postfix\n\n result_df = pd.merge(df, result_df, how=\"left\", left_on=column, right_on=\"value\").drop(\"value\",axis=1).rename(columns={\"redirect\":new_attribute_name})\n\n if replace:\n\n result_df.loc[(pd.isnull(result_df[new_attribute_name])), new_attribute_name] = result_df[column]\n result_df.drop(column, axis=1, inplace=True)\n result_df.rename(columns={new_attribute_name: column}, inplace=True)\n\n return result_df", "def find_match_for_company(self):\n if self.urls.company_urls_df is not None:\n # the first row in the data frame is the best matching web site\n best_match = self.urls.company_urls_df.head(1)\n\n # store the best matching web site\n web_match_index = best_match.index.values[0]\n\n # store in the df itself\n self.urls.company_urls_df.loc[:, BEST_MATCH_KEY] = False\n self.urls.company_urls_df.loc[web_match_index, BEST_MATCH_KEY] = True\n\n # also store the best match in the collection\n url_best = self.urls.company_urls_df.loc[web_match_index, URL_KEY]\n for url, url_info in self.urls.collection.items():\n if url_info is None or url_info.match is None:\n continue\n if url == url_best:\n url_info.match.best_match = True\n else:\n url_info.match.best_match = False\n\n self.logger.debug(\"Best matching url: {}\".format(best_match.url))", "def _scrape_product_links(self, response):\n lis = response.xpath(\n \"//div[@id='resultsCol']/./ul/li |\"\n \"//div[@id='mainResults']/.//ul/li [contains(@id, 'result')] |\"\n \"//div[@id='atfResults']/.//ul/li[contains(@id, 'result')] |\"\n \"//div[@id='mainResults']/.//div[contains(@id, 'result')] |\"\n \"//div[@id='btfResults']//ul/li[contains(@id, 'result')]\")\n links = []\n last_idx = -1\n\n for li in lis:\n is_prime = li.xpath(\n \"*/descendant::i[contains(concat(' ', @class, ' '),\"\n \"' a-icon-prime ')] |\"\n \".//span[contains(@class, 'sprPrime')]\"\n )\n is_prime_pantry = li.xpath(\n \"*/descendant::i[contains(concat(' ',@class,' '),'\"\n \"a-icon-prime-pantry ')]\"\n )\n data_asin = self._is_empty(\n li.xpath('@id').extract()\n )\n\n is_sponsored = bool(li.xpath('.//h5[contains(text(), \"ponsored\")]').extract())\n\n try:\n idx = int(self._is_empty(\n re.findall(r'\\d+', data_asin)\n ))\n except ValueError:\n continue\n\n if idx > last_idx:\n link = self._is_empty(\n li.xpath(\n \".//a[contains(@class,'s-access-detail-page')]/@href |\"\n \".//h3[@class='newaps']/a/@href\"\n ).extract()\n )\n if not link:\n continue\n\n if 'slredirect' in link:\n link = 'http://' + self.allowed_domains[0] + '/' + link\n\n links.append((link, is_prime, is_prime_pantry, is_sponsored))\n else:\n break\n\n last_idx = idx\n\n if not links:\n self.log(\"Found no product links.\", WARNING)\n\n if links:\n for link, is_prime, is_prime_pantry, is_sponsored in links:\n prime = None\n if is_prime:\n prime = 'Prime'\n if is_prime_pantry:\n prime = 'PrimePantry'\n prod = SiteProductItem(prime=prime, is_sponsored_product=is_sponsored)\n yield Request(link, callback=self.parse_product,\n headers={'Referer': None},\n meta={'product': prod}), prod", "def obtain_series_links(series_names):\n links = []\n for product in series_names[0]:\n product = product.lower()\n splitted = product.split()\n product = \"-\".join(splitted)\n series_link = \"https://cryptoslam.io/\" + product + \"/mints\" \n links.append((product,series_link))\n return links", "def get_resource_titles(self, html_doc):\n soup = BeautifulSoup(html_doc, 'html.parser')\n links = soup.find_all('a')\n resources = []\n for link in links:\n href = link.get('href') #get id a dict method returns a value for the given key\n if href and '/title' in href and not href in resources:\n resources.append(href)\n return resources", "def gp_link_finder(url):\n print(\"Starting to find links on Google Play ({0})...\".format(url))\n # Download google Play start Page\n start_page = urlopen(url + \"/store\").read()\n global link_queue\n global link_finder_continue\n # Find the App category pages\n link_pages = find_gp_categories_links(start_page)\n # For each category page, find all App Links on that page\n while link_finder_continue:\n for link_page_lnk in link_pages:\n # Download the category page\n print(\"Fetching link page {0}\".format(url + link_page_lnk))\n link_page = urlopen(url + link_page_lnk).read()\n # Find the app links\n app_links = find_gp_app_links(link_page)\n # Add the link in the queue for the link downloader\n didadd = False\n for link in app_links:\n if not (url+link) in downloaded_links.keys():\n link_queue.put(url + link)\n didadd = True\n print(\"Added {0} to download queue...\".format(url + link))\n if didadd:\n # Sleep for a while to not overload site\n sleep_time = random.randint(20,30)\n print(\"Link finder sleeping for {0} seconds...\".format(sleep_time))\n time.sleep(sleep_time)\n print(\"Exiting link finding...\")", "def search_brands_by_name(mystr):\n brands = Brand.query.filter(Brand.name.like('%'+mystr+'%')).all()\n return brands", "def findLinksWithUrlRe(page, searchRe):\n urls = []\n page = parseHtmlLinks(page)\n for linkUrl, linkText in page['links'].iteritems():\n dbgStr = 'Checking link: %s (%s), against %s' % (linkUrl, unidecode.unidecode(linkText), searchRe.pattern)\n logging.log(5, dbgStr)\n if searchRe.match(linkUrl):\n urls.append(linkUrl)\n logging.debug(u'Found link: %s -> %s' % (repr(linkText.decode('utf8')), repr(linkUrl.decode('utf8'))))\n\n if len(urls) != 0:\n logging.debug('Found links with %s in URL: %s' % (repr(searchRe.pattern), urls))\n else:\n logging.debug('Found no links with %s in URL' % searchRe.pattern)\n return urls", "def get_links(self, response, domain, port, folder):\n\t\t# find link in tags: a, link, form, button\n\t\t# call to all function in file get_link\n\t\t# for method in get_link:\n\t\tlinks = get_link(response, domain, port, folder)\n\t\tlinks = filter(None, links.getResults())\n\t\treturn links", "def rel_links(cls, page):\r\n for match in cls.REL_RE.finditer(page):\r\n href, rel = match.group(0), match.group(1)\r\n if rel not in cls.REL_TYPES:\r\n continue\r\n href_match = cls.HREF_RE.search(href)\r\n if href_match:\r\n href = cls.href_match_to_url(href_match)\r\n parsed_href = urlparse(href)\r\n if any(parsed_href.path.endswith(ext) for ext in cls.REL_SKIP_EXTENSIONS):\r\n continue\r\n yield href", "def get_links(self):\n soup = BeautifulSoup(requests.get(self.locations_url).text.strip(), features=\"lxml\")\n for region in soup.select('td[class=\"navbox-list navbox-odd\"]'):\n self.links.extend(region.div.find_all('a'))\n\n soup_prague = BeautifulSoup(requests.get(self.url_prague).text.strip(), features=\"lxml\")\n table_prague = soup_prague.findAll('table', {\"class\": \"wikitable\"})[3]\n for prague_parts in table_prague.select(\"tr > td:nth-child(3)\"):\n self.links.extend(prague_parts.find_all('a'))\n\n self.links = [self.url + i['href'] for i in self.links]\n self.links.append(self.url_prague)\n return None", "def find_twitter_handle(brand):\n twitter_file = open(\"twitter_handles.txt\")\n\n # get the brand name and twitter handle for each brand\n for line in twitter_file:\n line = line.strip().split(\"|\")\n foundation_brand = line[0]\n twitter_handle = line[1]\n\n # return the twitter handle for that brand\n if foundation_brand == brand:\n twitter_file.close()\n return twitter_handle", "def _parse_links(self, item):\n regex = compile(r\"<a\\s+(?:[^>]*?\\s+)?href=([\\\"\\'])(.*?)\\1.*\\>(.*)<\\/a>\")\n links = [\n {\"href\": href, \"title\": title}\n for (_, href, title) in findall(regex, item[\"Event\"][\"Description\"])\n ]\n for link in links:\n if link[\"href\"][0] == \"/\":\n link[\"href\"] = \"https://www.pghschools.org\" + link[\"href\"]\n return links", "def search_brands_by_name(mystr):\n \n return Brand.query.filter(Brand.name.like('%' + mystr + '%')).all()", "def get_links(new_links_file_path, archive_path=HTML_FOLDER):\n # parse and validate the new_links_file\n raw_links = parse_links(new_links_file_path)\n valid_links = validate_links(raw_links)\n\n # merge existing links in archive_path and new links\n existing_links = []\n if archive_path:\n existing_links = parse_json_links_index(archive_path)\n valid_links = validate_links(existing_links + valid_links)\n \n num_new_links = len(valid_links) - len(existing_links)\n print('[*] [{}] Adding {} new links from {} to index'.format(\n datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n num_new_links,\n new_links_file_path,\n ))\n\n return valid_links", "def test_finder_detects_latest_already_satisfied_find_links(data: TestData) -> None:\n req = install_req_from_line(\"simple\")\n # the latest simple in local pkgs is 3.0\n latest_version = \"3.0\"\n satisfied_by = Mock(\n location=\"/path\",\n version=parse_version(latest_version),\n )\n req.satisfied_by = satisfied_by\n finder = make_test_finder(find_links=[data.find_links])\n\n with pytest.raises(BestVersionAlreadyInstalled):\n finder.find_requirement(req, True)", "def test_finder_priority_nonegg_over_eggfragments() -> None:\n req = install_req_from_line(\"bar==1.0\")\n links = [\"http://foo/bar.py#egg=bar-1.0\", \"http://foo/bar-1.0.tar.gz\"]\n\n finder = make_test_finder(links)\n all_versions = finder.find_all_candidates(req.name)\n assert all_versions[0].link.url.endswith(\"tar.gz\")\n assert all_versions[1].link.url.endswith(\"#egg=bar-1.0\")\n\n found = finder.find_requirement(req, False)\n\n assert found is not None\n assert found.link.url.endswith(\"tar.gz\")\n\n links.reverse()\n\n finder = make_test_finder(links)\n all_versions = finder.find_all_candidates(req.name)\n assert all_versions[0].link.url.endswith(\"tar.gz\")\n assert all_versions[1].link.url.endswith(\"#egg=bar-1.0\")\n found = finder.find_requirement(req, False)\n\n assert found is not None\n assert found.link.url.endswith(\"tar.gz\")", "def process_links():\n from pymongo import Connection\n conn = Connection()\n db = conn['mchs']\n# db.drop_collection('svodki')\n coll = db['svodki']\n coll.ensure_index(\"url\")\n f = open('alllinks.csv', 'r')\n for l in f:\n parts = l.strip().split('\\t')\n if len(parts) < 4: continue\n year, month, day, url = parts\n o = coll.find_one({'url' : url})\n if o is not None: \n print url, 'passed'\n continue\n u = urllib2.urlopen(url)\n data = u.read()\n u.close()\n data = data.decode('cp1251')\n record = {'year' : int(year), 'month' : int(month), 'day' : int(day), 'url' : url, 'text' : data.encode('utf8')}\n coll.save(record)\n # MCHS site is badly designed and it could block us if we will download pages too often\n time.sleep(5)\n print url, 'processed'", "def refresh(cls):\n # Flip the order of the links so that the first URL listed is the\n # highest priority and will take precedence\n for url in current_app.config['MATLAB_DOC_LINKS'][::-1]:\n resp = requests.get(url)\n soup = BeautifulSoup(resp.text, 'html.parser')\n\n terms = soup.findAll('td', {'class': 'term'})\n links = [term.find('a') for term in terms]\n\n for link in links:\n\n function = link.text.rstrip()\n\n doc = cls.query.filter_by(name=function).first()\n doc_url = urljoin(url, link['href'])\n\n # Create an entry if one doesn't already exist\n if doc is None:\n doc = cls(name=function)\n\n doc.link = doc_url\n doc.save()\n\n # Make sure to remove i and j entries\n toremove = cls.query.filter(or_(cls.name == 'i', cls.name == 'j')).all()\n for item in toremove:\n item.delete()\n\n return cls.query.all()", "def get_listings(soup):\n listings = []\n for link in soup.find_all(\"a\"):\n if is_valid_listings(link):\n listings.append(link.attrs[\"href\"])\n return listings", "def exactor_links(self, response: BeautifulSoup):\n raise NotImplementedError", "def search(builders, spec):\n if 'builder' in spec:\n return [b for b in builders if b['name'] ==\n spec['builder']]\n elif 'hostname' in spec:\n return [b for b in builders if b['slavename']\n == spec['hostname']]\n else:\n return [b for b in builders if (b['name'] ==\n spec['either']) or (b['slavename'] == spec['either'])]", "def cleanup_links(path, inspect_links=False):\n with open(path) as f:\n text = f.read()\n\n# if 'BokehJS does not appear to have successfully loaded' in text:\n# for k, v in BOKEH_REPLACEMENTS.items():\n# text = text.replace(k, v)\n\n text = component_links(text, path)\n soup = BeautifulSoup(text, features=\"html.parser\")\n for a in soup.findAll('a'):\n href = a.get('href', '')\n if '.ipynb' in href and 'http' not in href:\n # for k, v in LINK_REPLACEMENTS.items():\n # href = href.replace(k, v)\n a['href'] = href.replace('.ipynb', '.html')\n\n # check to make sure that path exists, if not, try un-numbered version\n try_path = os.path.join(os.path.dirname(path), a['href'])\n if not os.path.exists(try_path):\n num_name = os.path.basename(try_path)\n name = re.split(r\"^\\d+( |-|_)\", num_name)[-1]\n new_path = try_path.replace(num_name, name)\n if os.path.exists(new_path):\n a['href'] = os.path.relpath(new_path, os.path.dirname(path))\n else:\n also_tried = 'Also tried: {}'.format(name) if name != num_name else ''\n warnings.warn('Found missing link {} in: {}. {}'.format(a['href'], path, also_tried))\n\n if inspect_links and 'http' in a['href']:\n print(a['href'])\n for img in soup.findAll('img'):\n src = img.get('src', '')\n if 'http' not in src and 'assets' in src:\n try_path = os.path.join(os.path.dirname(path), src)\n if not os.path.exists(try_path):\n also_tried = os.path.join('..', src)\n if os.path.exists(os.path.join(os.path.dirname(path), also_tried)):\n img['src'] = also_tried\n else:\n warnings.warn('Found reference to missing image {} in: {}. Also tried: {}'.format(src, path, also_tried))\n with open(path, 'w') as f:\n f.write(str(soup))" ]
[ "0.6620311", "0.6351018", "0.61144876", "0.5799459", "0.57774466", "0.56885266", "0.56424046", "0.5591962", "0.5496416", "0.5412613", "0.538556", "0.536212", "0.5332424", "0.5328891", "0.53039545", "0.5301546", "0.52905977", "0.5289629", "0.52668715", "0.52547145", "0.52470016", "0.5234316", "0.52269185", "0.5225522", "0.5213021", "0.5208822", "0.5198679", "0.5183416", "0.5182295", "0.5180101", "0.5170062", "0.5167452", "0.51272863", "0.5116159", "0.51149684", "0.5110054", "0.5084029", "0.50791466", "0.50647324", "0.5064732", "0.5062362", "0.50593823", "0.50491947", "0.50481087", "0.50389355", "0.5028429", "0.5024244", "0.50231564", "0.50144356", "0.5011742", "0.5006128", "0.5005623", "0.49818492", "0.4980396", "0.49794537", "0.49643332", "0.49585626", "0.4958265", "0.49579597", "0.4956384", "0.4953978", "0.493723", "0.49329802", "0.49285993", "0.49285412", "0.49172395", "0.4905432", "0.4897649", "0.48901337", "0.48897973", "0.48897502", "0.48793516", "0.4876185", "0.4875767", "0.4872676", "0.48698404", "0.48641044", "0.48597363", "0.48564467", "0.4854843", "0.4854338", "0.484911", "0.48484007", "0.4848304", "0.48433456", "0.48367617", "0.4836402", "0.48338965", "0.48300794", "0.4829617", "0.48246574", "0.48232868", "0.48203316", "0.481512", "0.48129648", "0.48100972", "0.48060125", "0.48053947", "0.48044255", "0.48017728" ]
0.5429173
9
Generate the preparation files for the projects in a run
def format_preparation_files(run_dir, sample_sheet, output_dir, pipeline, verbose): sample_sheet = KLSampleSheet(sample_sheet) df_sheet = sample_sheet_to_dataframe(sample_sheet) if pipeline == 'atropos-and-bowtie2': click.echo('Stats collection is not supported for pipeline ' 'atropos-and-bowtie2') else: stats = run_counts(run_dir, sample_sheet) stats['sample_name'] = \ df_sheet.set_index('lane', append=True)['sample_name'] # returns a map of (run, project_name, lane) -> preparation frame preps = preparations_for_run(run_dir, df_sheet, pipeline=pipeline) os.makedirs(output_dir, exist_ok=True) for (run, project, lane), df in preps.items(): fp = os.path.join(output_dir, f'{run}.{project}.{lane}.tsv') if pipeline == 'fastp-and-minimap2': # stats are indexed by sample name and lane, lane is the first # level index. When merging, make sure to select the lane subset # that we care about, otherwise we'll end up with repeated rows df = df.merge(stats.xs(lane, level=1), how='left', on='sample_name') # strip qiita_id from project names in sample_project column df['sample_project'] = df['sample_project'].map( lambda x: re.sub(r'_\d+$', r'', x)) # center_project_name is a legacy column that should mirror # the values for sample_project. df['center_project_name'] = df['sample_project'] df.to_csv(fp, sep='\t', index=False) if verbose: project_name = remove_qiita_id(project) # assume qiita_id is extractable and is an integer, given that # we have already passed error-checking. qiita_id = project.replace(project_name + '_', '') print("%s\t%s" % (qiita_id, abspath(fp)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def task_generate_tasks():\n \n yield {\n 'basename': 'generate_tasks',\n 'name': None,\n # 'doc': 'docs for X',\n 'watch': ['trains/'],\n 'task_dep': ['create_folders'],\n }\n \n for root, dirs, files in os.walk('trains/',topdown=False):\n for f in files:\n #print(f)\n yield template_train_model(os.path.join(root,f))", "def project():", "def project():", "def project():", "def generate_files(self):\n\t\tapply_stemmer, xml_file, query_file, expected_file = self.read_config_file()\n\t\tself.generate_query_file(query_file, xml_file, apply_stemmer)\n\t\tself.generate_expected_file(expected_file, xml_file)\n\t\tlogging.info('FINALIZADO: MÓDULO PROCESSADOR DE CONSULTAS')", "def create_files(project_name, root_dir):\r\n root_dir = projectfolders.create_path(root_dir, project_name) #Modify the root\r\n \r\n write_setup(project_name, root_dir)\r\n write_inits(project_name, root_dir)\r\n write_tests(project_name, root_dir)", "def _prepare_projects(self):\n self._projects = {}\n self._cfgs = {}\n self._plugins = {}\n\n working_bins = []\n for b in self._seed_bins:\n if any([nb in b for nb in self._ignore_bins]):\n continue\n\n log.info(\"Building %s CFG (this may take some time)\" % b.split('/')[-1])\n try:\n blob = False\n try:\n self._projects[b] = angr.Project(b, auto_load_libs=False)\n except:\n log.info(\"We got a blob\")\n self._projects[b] = angr.Project(b, auto_load_libs=False, load_options={'main_opts': {'custom_arch': self.config['arch'], 'backend': 'blob', 'custom_base_addr': int(self.config['base_addr'], 16)}})\n blob = True\n\n self._cfgs[b] = self._projects[b].analyses.CFG(collect_data_references=True, extra_cross_references=True)\n\n self._plugins[b] = []\n\n if blob:\n memcplike = find_memcmp_like(self._projects[b], self._cfgs[b])\n else:\n memcplike = []\n\n for plugin in self._enabled_plugins:\n self._plugins[b].append(plugin(self._projects[b], self._cfgs[b], self._fw_path, memcmp_like_functions=memcplike,log=log))\n working_bins.append(b)\n except Exception as e:\n log.warning(\"Skipping binary %s\" % b)\n import ipdb; ipdb.set_trace()\n self._seed_bins = list(working_bins)", "def _build_pre_project_template(self, output_filename=\"{}_pr_p.json\"):\n template = actions.ActionsTemplate()\n\n for resource_type, resource_cls in six.iteritems(AVAILABLE_RESOURCES):\n resource_cls.register_type_pre_project_template(self, template)\n for r in self.get_resources(resource_type):\n r.register_pre_project_template(template)\n\n if template:\n output_filename = output_filename.format(self._get_next_build_sequence_id())\n self.puts(colored.cyan(output_filename))\n with open(os.path.join(self.build_path, output_filename), 'w') as f:\n f.write(template.to_json(indent=4))", "def main():\n global GOLIVE # If False, it's a dry run only\n global PROJECT_ROOT\n global CAD_SOURCE\n global REVIT_SOURCE\n global GENERIC_SOURCE\n global FOLDER_LIST\n global logger\n\n logger = logging.getLogger('__name__')\n stream_handler = logging.StreamHandler()\n logger.addHandler(stream_handler)\n logger.setLevel(logging.INFO)\n\n logger.debug(sys.argv)\n parser = argparse.ArgumentParser(description='Create a project')\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-i', action='store_true', help=\"Show INFO messages\")\n group.add_argument('-d', action='store_true', help=\"Show DEBUG messages\")\n parser.add_argument('-t', action='store_true', help='Test: dry run only')\n parser.add_argument('-r', help=\"Set root directory\")\n parser.add_argument('project_data', nargs='+', help=\"<num>%,<name>%<type>\")\n\n args = parser.parse_args(sys.argv[1:])\n logger.debug(args)\n if args.i:\n logger.info('Setting logging level to INFO')\n logger.setLevel(logging.INFO)\n elif args.d:\n logger.info('Setting logging level to DEBUG')\n logger.setLevel(logging.DEBUG)\n if args.t:\n GOLIVE = False\n logger.info('Dry run...')\n if args.r:\n PROJECT_ROOT = args.r\n logger.info(f'Setting PROJECT_ROOT to {args.r}')\n\n CAD_SOURCE = os.path.join(PROJECT_ROOT, 'Templates', 'CAD_Template')\n REVIT_SOURCE = os.path.join(PROJECT_ROOT, 'Templates', 'Revit_Template')\n GENERIC_SOURCE = os.path.join(PROJECT_ROOT,\n 'Templates', 'Generic_Template')\n FOLDER_LIST = os.listdir(PROJECT_ROOT)\n project_info = ' '.join(args.project_data) # The parser split at spaces\n logger.debug(f'Project info: {project_info}')\n project_info = project_info.split('%') # Divide it into our 3 fields\n project_number, project_name, project_type = project_info\n assert project_type in ['Revit', 'CAD', 'Generic']\n\n if checkNewProject(project_number, project_name): # Sanity checks\n success = createProject(project_number, project_name, project_type)\n if success:\n logger.info(f'Created project {project_number} {project_name}')\n else:\n logger.error('Project creation failed.')", "def prepare_run_directory(resource_types, trecs_root_dir):\n # Copy the executables and required files in 'run' directory.\n sources = {path.join(trecs_root_dir, 'src', 'model', 'grid'),\n path.join(trecs_root_dir, 'src', 'api'),\n path.join(trecs_root_dir, 'src', 'module'),\n path.join(trecs_root_dir, 'src', 'util'),\n path.join(trecs_root_dir, 'src', 'router')}\n\n for source in sources:\n for dirpath, _, filenames in walk(source):\n for filename in filenames:\n from_path = path.join(dirpath, filename)\n to_path = path.join(trecs_root_dir, 'run', filename)\n copy2(from_path, to_path)\n\n # From resource model repositories, copy all the directory contents as they are\n for resource in resource_types:\n copy_tree(path.join(trecs_root_dir, 'src', 'model', 'resource', resource),\n path.join(trecs_root_dir, 'run'))", "def generate_all_files():\n for (name, fn) in lang_module.targets.items():\n path = of_g.options.install_dir + '/' + name\n os.system(\"mkdir -p %s\" % os.path.dirname(path))\n with open(path, \"w\") as outfile:\n fn(outfile, os.path.basename(name))\n print(\"Wrote contents for \" + name)", "def prepare(self):\n import tempfile \n import evoware\n self.f_project = tempfile.mkdtemp(prefix='evoware_cherrypick_')\n evoware.plates.index.clear()", "def cmd_generate_requirements(): \n \n for env in ('dev', 'test'):\n source = Path(ROOT, \"requirements\", f\"{env}.txt\")\n target = Path(ROOT, \"requirements\", f\"{env}.in\")\n os.system(f\"pip-compile --output-file={source} {target}\")", "def cmd_generate_requirements(): \n \n for env in ('dev', 'test'):\n source = Path(ROOT, \"requirements\", f\"{env}.txt\")\n target = Path(ROOT, \"requirements\", f\"{env}.in\")\n os.system(f\"pip-compile --output-file={source} {target}\")", "def setupRunDir(self):\n\n pass", "def generate_project_files(specs_path, dst_path):\n hm_generator = HookManGenerator(hook_spec_file_path=specs_path)\n hm_generator.generate_project_files(Path(dst_path))\n return 0", "def setup_for_compilation_testcase(self):\n os.chdir(self.tmp_work)\n\n for container in self.containers:\n self._setup_single_directory_for_compilation(container.directory)\n # Run any necessary pre_commands\n self._run_pre_commands(container.directory)", "def generate_build_files(ctx):\n\n project_dir = Path(__file__).parent\n\n directory_of_the_tests = project_dir / \"tests/plugins\"\n directory_to_build_tests = project_dir / \"build/build_directory_for_tests\"\n\n # Clean UP\n if directory_to_build_tests.exists():\n shutil.rmtree(directory_to_build_tests)\n os.makedirs(directory_to_build_tests)\n\n # Finding hook_specs.py, each hook_specs represent a different project with different hooks\n hook_spec_paths = [\n path for path in directory_of_the_tests.glob(\"**/hook_specs.py\") if \"tmp\" not in path.parts\n ]\n\n # CMakeList.txt that includes all sub_directory with tests to be compiled\n root_cmake_list = directory_to_build_tests / \"CMakeLists.txt\"\n cmake_file_of_test_build_dir = [\n f\"add_subdirectory({i.parent.name })\\n\" for i in hook_spec_paths\n ]\n root_cmake_list.write_text(\"\".join(cmake_file_of_test_build_dir))\n\n # For each hook_specs, create a directory for the compilation and generate the files\n for project_hook_spec_path in hook_spec_paths:\n project_dir_for_build = directory_to_build_tests / project_hook_spec_path.parent.name\n project_dir_for_build.mkdir(parents=True)\n\n hm_generator = HookManGenerator(hook_spec_file_path=project_hook_spec_path)\n hm_generator.generate_project_files(dst_path=project_dir_for_build)\n\n # Find folder with Plugins\n plugins_dirs = [\n x\n for x in project_hook_spec_path.parent.iterdir()\n if x.is_dir() and (x / \"assets\").exists()\n ]\n\n # Copy all the plugins to the build dir\n for plugin in plugins_dirs:\n plugin_dir_build = project_dir_for_build / f\"plugin/{plugin.name}\"\n shutil.copytree(src=plugin, dst=plugin_dir_build)\n (plugin_dir_build / \"src/hook_specs.h\").write_text(\n hm_generator._hook_specs_header_content(plugin.stem)\n )\n\n # Create the CMakeFile on root of the project to include others CMake files.\n main_cmakelist = project_dir_for_build / \"CMakeLists.txt\"\n main_cmakelist_content = []\n main_cmakelist_content.append(\"add_subdirectory(cpp)\\nadd_subdirectory(binding)\\n\")\n main_cmakelist_content += [\n f\"add_subdirectory(plugin/{plugin.name}/src)\\n\" for plugin in plugins_dirs\n ]\n main_cmakelist.write_text(\"\".join(main_cmakelist_content))", "def post_build(self, manager):\n if not self.output_files_dir.exists():\n return\n\n output_file_dirs = [\n d for d in self.output_files_dir.rglob(\"*\") if d.is_dir()\n ] + [self.output_files_dir]\n for output_file_dir in output_file_dirs:\n stem = output_file_dir.relative_to(self.output_files_dir)\n api_path = self.api_dir / stem / ALL_JSON\n\n yield self.task(\n name=f\"contents:{stem}\",\n doc=f\"create a Jupyter Contents API response for {stem}\",\n actions=[\n (self.one_contents_path, [output_file_dir, api_path]),\n (self.maybe_timestamp, [api_path]),\n ],\n file_dep=[p for p in output_file_dir.rglob(\"*\") if not p.is_dir()],\n targets=[api_path],\n )", "def setups():\n setups = []\n\n # If you run this in detailed mode, you need to set --t8 to 1e8\n kotani2017_F2 = dict()\n kotani2017_F2['name'] = 'kotani2017_F2'\n kotani2017_F2['piltemplate'] = kotani2017_F2_pil\n kotani2017_F2['pilparams'] = [None]\n kotani2017_F2['pepperargs'] = {'condensed': True, 'conc': 'nM', 'release_cutoff': 10}\n kotani2017_F2['simulation'] = [\n ('pilsimulator', '--nxy', '--atol', '1e-13', '--rtol', '1e-13', '--mxstep', '10000', '--t8', '36000', '--p0', 'S1=10', 'S2=10', 'R=20', 'C1=1'),\n ('pilsimulator', '--nxy', '--atol', '1e-13', '--rtol', '1e-13', '--mxstep', '10000', '--t8', '36000', '--p0', 'S1=10', 'S2=10', 'R=20', 'C1=0.5'),\n ('pilsimulator', '--nxy', '--atol', '1e-13', '--rtol', '1e-13', '--mxstep', '10000', '--t8', '36000', '--p0', 'S1=10', 'S2=10', 'R=20', 'C1=0.05')]\n kotani2017_F2['reporter'] = 'D'\n kotani2017_F2['exp_results'] = [(7733, 7.42), (11333, 6.18), (25533, 1.40)]\n setups.append(kotani2017_F2)\n\n\n\n # If you run this in detailed mode, you need to set --t8 to 1e8\n kotani2017_F3 = dict()\n kotani2017_F3['name'] = 'kotani2017_F3'\n kotani2017_F3['piltemplate'] = kotani2017_F3_pil\n kotani2017_F3['pilparams'] = [None]\n kotani2017_F3['pepperargs'] = {'condensed': True, 'conc': 'nM', 'release_cutoff': 10}\n kotani2017_F3['simulation'] = [\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S1=10', 'S2=10', 'S3=10', 'S4=10', 'R=20', 'C1=0.1'),\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S1=10', 'S2=10', 'S3=10', 'S4=10', 'R=20', 'C1=0.01'),\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S1=10', 'S2=10', 'S3=10', 'S4=10', 'R=20', 'C1=0.001')]\n kotani2017_F3['reporter'] = 'D'\n kotani2017_F3['exp_results'] = [(21220, 7.72), (64203, 3.12), (86996, 0.69)]\n setups.append(kotani2017_F3)\n\n # If you run this in detailed mode, you need to set --t8 to 1e8\n kotani2017_F4 = dict()\n kotani2017_F4['name'] = 'kotani2017_F4'\n kotani2017_F4['piltemplate'] = kotani2017_F4_pil\n kotani2017_F4['pilparams'] = [None]\n kotani2017_F4['pepperargs'] = {'condensed': True, 'conc': 'nM', 'release_cutoff': 10}\n kotani2017_F4['simulation'] = [\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S5au=10', 'S6au=10', 'R=20', 'C1x=0.1'),\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S5au=10', 'S6au=10', 'R=20', 'C1x=0.01'),\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S5au=10', 'S6au=10', 'R=20', 'C1x=0.001'),\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S5au=10', 'S6au=10', 'R=20', 'C1x=0')]\n kotani2017_F4['reporter'] = 'D'\n kotani2017_F4['exp_results'] = [(6815, 6.06), (9004, 4.78), (10278, 4.03), (10795, 3.73)]\n setups.append(kotani2017_F4)\n\n return setups", "def run() -> ():\n if len(sys.argv) > 1:\n show_help()\n errs = get_cargo_input()\n main_stack = make_proj_stack(errs)\n while len(main_stack) > 0:\n file_stack = make_file_stack(main_stack)\n overwrite(file_stack)\n\n # FIXME", "def prepare(skip_static=False):\n\n local('npm install')\n local('grunt build')\n\n with warn_only():\n local('git add staticfiles')\n local('git add {{ project_name }}/templates')\n local('git commit -m \"PRODUCTION ONLY: Build static files.\"')\n\n files_to_remove = (\n '.bowerrc',\n '.editorcinfig',\n '.gitignore',\n '.jshintrc',\n 'bower.json',\n 'dev-only-package.json',\n 'error.log',\n 'fabfile.py',\n 'Gruntfile.js',\n 'migrate.sh',\n 'README.md',\n 'serve.sh',\n 'flush_cache.py',\n )\n\n with warn_only():\n for file_ in files_to_remove:\n local('git rm {}'.format(file_))\n\n # store it\n local('git commit -m \"PRODUCTION ONLY: Removing files.\"')\n\n if skip_static:\n local('touch .skipDjango')\n local('git add .skipDjango')\n local('git commit -m \"PRODUCTION ONLY: Skip static files\"')", "def setup():\n print('...')\n # Make sure dirs exist\n for directory in [DATA_DIR, DATA_INPUT_DIR, DATA_OUTPUT_DIR]:\n os.makedirs(directory, exist_ok=True)", "def write_inits(project_name, root_dir):\r\n \r\n #Create our file paths first...\r\n test_init_path = get_file_path(root_dir, \"tests\", \"__init__.py\")\r\n project_init_path = get_file_path(root_dir, project_name, \"__init__.py\")\r\n \r\n #Write the test_init file first\r\n test_init = open(test_init_path, 'w')\r\n test_init.close()\r\n print_file(test_init_path)\r\n \r\n #Write the NAME_init second\r\n project_init = open(project_init_path, 'w')\r\n project_init.close()\r\n print_file(project_init_path)", "def project_starter(project_name,yaml_project):\n snpt.load_snippets()\n archives = yaml.load(yaml_project)\n make_project_structure(archives,\"./\",project_name)\n make_exec(project_name + '/manage.py')", "def test_project_generation(cookies, context, context_combination):\n result = cookies.bake(extra_context={**context, **context_combination})\n assert result.exit_code == 0\n assert result.exception is None\n assert result.project.basename == context[\"project_slug\"]\n assert result.project.isdir()\n\n paths = build_files_list(str(result.project))\n assert paths\n check_paths(paths)", "def create_project(self,*pages,config_folder = \"config\",FunctionBased = False):\n\n self._make_initial_directories()\n self._make_initial_files(*pages,FunctionBased = FunctionBased)", "def init(projectfolder, projectname, example):\n\n productline_dir = path.join(projectfolder, \"productline\")\n configs_path = path.join(productline_dir, \"configs\")\n bddfeatures_path = path.join(projectfolder, \"bddfeatures\")\n testreports_path = path.join(projectfolder, \"testreports\")\n\n if not path.exists(productline_dir):\n makedirs(productline_dir)\n\n if not path.exists(configs_path):\n makedirs(configs_path)\n\n if not path.exists(bddfeatures_path):\n makedirs(bddfeatures_path)\n\n if not path.exists(testreports_path):\n makedirs(testreports_path)\n\n model_src = pkg_resources.resource_filename(__name__, \"templates/model.xml\")\n model_dst = path.join(productline_dir, \"model.xml\")\n shutil.copyfile(model_src, model_dst)\n utilities.sed_inplace(model_dst, \"{{PROJECT_NAME}}\", projectname.replace(\" \", \"\"))\n\n configtemplate_src = pkg_resources.resource_filename(__name__, 'templates/aplet.yml')\n configtemplate_dst = path.join(projectfolder, \"aplet.yml\")\n shutil.copyfile(configtemplate_src, configtemplate_dst)\n utilities.sed_inplace(configtemplate_dst, \"{{PROJECT_NAME}}\", projectname)\n\n # copy docs templates from aplet application into projectfolder\n lektortemplates_path = pkg_resources.resource_filename(__name__, 'templates/lektor')\n doc_templates_path = path.join(projectfolder, \"doc_templates\")\n if not path.exists(doc_templates_path):\n shutil.copytree(lektortemplates_path, doc_templates_path)\n\n\n if example:\n examples_dir = \"templates/exampleproject\"\n model_src = pkg_resources.resource_filename(__name__, path.join(examples_dir, \"model.xml\"))\n shutil.copyfile(model_src, model_dst)\n exampleconfig_src = pkg_resources.resource_filename(__name__, path.join(examples_dir, \"ExampleProduct.config\"))\n shutil.copyfile(exampleconfig_src, path.join(configs_path, \"ExampleProduct.config\"))\n configtemplate_src = pkg_resources.resource_filename(__name__, path.join(examples_dir, \"aplet.yml\"))\n shutil.copyfile(configtemplate_src, configtemplate_dst)", "def setup():\n require('hosts', 'project_path', provided_by=envs.ENVS)\n\n if not exists(env.project_path):\n abort(red('Project path ({project_path}) does not exist. '\n 'Create it on the server before continuing.'.format(**env)))\n\n with cd(env.project_path):\n run('mkdir -p api renderer conf markup_renderer')\n run('mkdir -p api/static api/uploads')\n\n make_release_folders('api')\n make_release_folders('renderer')", "def main():\n for db_csv_export in current_dir.glob(\"template*.csv\"):\n data_projects = load_projects(db_csv_export)\n json_path = db_csv_export.with_suffix(\".json\")\n with open(json_path, \"w\") as fh:\n json.dump(data_projects, fh, indent=2)", "def prepare_run(self, **kwargs):\n assert self.cloud\n LOGGER.debug('Validating run tests...')\n for test in kwargs.get('tests', self.stests):\n if test in self.stests:\n self.tests.append(test)\n else:\n raise Exception(f\"Test name '{test}' is invalid\")\n\n if not os.path.exists(self.task_dir):\n os.makedirs(self.task_dir)\n\n task = os.path.join(self.rally_dir, 'task.yaml')\n if not os.path.exists(task):\n LOGGER.error(\"Task file '%s' does not exist.\", task)\n raise Exception(f\"Task file '{task}' does not exist.\")\n self.task_file = os.path.join(self.task_dir, 'task.yaml')\n shutil.copyfile(task, self.task_file)\n\n task_macro = os.path.join(self.rally_dir, 'macro')\n if not os.path.exists(task_macro):\n LOGGER.error(\"Task macro dir '%s' does not exist.\", task_macro)\n raise Exception(f\"Task macro dir '{task_macro}' does not exist.\")\n macro_dir = os.path.join(self.task_dir, 'macro')\n if os.path.exists(macro_dir):\n shutil.rmtree(macro_dir)\n shutil.copytree(task_macro, macro_dir)\n\n self.update_keystone_default_role()\n self.compute_cnt = self.count_hypervisors()\n self.network_extensions = self.cloud.get_network_extensions()\n self.flavor_alt = self.create_flavor_alt()\n self.services = [service.name for service in\n functest_utils.list_services(self.cloud)]\n\n LOGGER.debug(\"flavor: %s\", self.flavor_alt)", "def prepare():\n sh('pip install pylint pyflakes behave nose clonedigger pep8 sphinx')\n sh('pip install watchdog coverage ipython sphinx_rtd_theme')\n develop()", "def task_generate_virtual_samples():\n metadata_files = Path(__file__).parent.glob('*_meta.yaml')\n data_files = Path(__file__).parent.glob('*_data.yaml')\n\n script = Path(__file__).parents[0] / \"virtual_experiment.py\"\n\n return {\n \"actions\": [f\"{PYTHON_EXE} {script}\"],\n \"file_dep\": [script, *metadata_files],\n \"verbosity\": 2, # show stdout\n \"targets\": [*data_files],\n \"setup\": [\"generate_virtual_metadata\"],\n \"clean\": [clean_targets]\n }", "def setUp(self):\n # main directory of the project\n self.project_dir = os.path.dirname(self.basedir)\n\n # change to workdir so simulation process finds the source files\n os.chdir(self.workdir)", "def setup_analysis_skeleton(project_dir,sub_stem):\n \n # Move from project_dir to ToProcess\n TP_dir = os.path.join(project_dir,'ToProcess')\n move_files(project_dir, TP_dir, '')\n \n all_files = glob.glob(os.path.join(TP_dir,'*%s*' % (sub_stem)))\n all_files = [os.path.basename(f) for f in all_files]\n \n prefixes = [pref[:len(sub_stem)+4] for pref in all_files]\n subjects = list(set(prefixes))\n subjectDir = []\n\n for idx, subs in enumerate(subjects):\n \n sub_dir = os.path.join(TP_dir, subs[0:(len(sub_stem) + 4)])\n subjectDir.append(sub_dir)\n \n if os.path.exists(subjectDir[idx]) == 0:\n os.mkdir(os.path.join(TP_dir,subjectDir[idx]))\n \n toMove = [os.path.abspath(f) for f in glob.glob(subjectDir[idx] + '*')]\n \n # Move all files to subject-specific dirs\n for mFile in toMove:\n if os.path.isfile(mFile):\n shutil.move(mFile, subjectDir[idx])\n \n for entry in glob.glob(subjectDir[idx] + '*'):\n if os.path.isfile(entry):\n print \"Not allocated: \" + entry\n \n return subjectDir", "def prepare_files_to_send(self, run_dir=None):\n if run_dir is None:\n run_dir = self.local_directory\n self._set_arguments()\n initials = self._serialize(run_dir)\n pscript = self._make_run(run_dir)\n dependencies = self._append_further_files_to_send()\n self.main_runfile = self._write_main_runfile(pscript, run_dir)\n initials.append(self.main_runfile)\n self.files_sent(False)\n return initials + dependencies", "def post_process(self, **kwargs):\n self.create_ignore()\n click.echo('Create project {} successfully. Enjoy yourself!'.format(self.app_dir))", "def modelarts_pre_process():\n config.file_name = os.path.join(config.output_path, config.file_name)", "def prepare_gen(self, targets):\r\n pass", "def setup_build_tests(self):\n # Now copy the relative files\n self.cache_extra_test_sources(self.build_relpath)\n\n # Ensure the path exists since relying on a relative path at the\n # same level as the normal stage source path.\n mkdirp(self.install_test_root)", "def setUp(self):\n # main directory of the project\n self.project_dir = os.path.dirname(self.basedir)\n\n # change to workdir so simulation process find the source files\n os.chdir(self.workdir)", "def setup():\n for dir_path in [train_dir, output_dir]:\n Path(dir_path).mkdir(exist_ok=True)\n\n # create the training and test data files that we will use\n create_jsonlines_feature_files(train_dir)", "def gen_tasks(self):\n self.kw = {\n 'image_srcset_sizes': self.site.config['IMAGE_SRCSET_SIZES'],\n 'image_srcset_format': self.site.config['IMAGE_SRCSET_FORMAT'],\n 'extra_image_extensions': self.site.config['EXTRA_IMAGE_EXTENSIONS'],\n 'max_image_size': self.site.config['MAX_IMAGE_SIZE'],\n 'image_folders': self.site.config['IMAGE_FOLDERS'],\n 'output_folder': self.site.config['OUTPUT_FOLDER'],\n 'filters': self.site.config['FILTERS'],\n 'preserve_exif_data': self.site.config['PRESERVE_EXIF_DATA'],\n 'exif_whitelist': self.site.config['EXIF_WHITELIST'],\n 'preserve_icc_profiles': self.site.config['PRESERVE_ICC_PROFILES'],\n }\n\n self.image_ext_list = self.image_ext_list_builtin\n self.image_ext_list.extend(self.site.config.get('EXTRA_IMAGE_EXTENSIONS', []))\n\n yield self.group_task()\n for src in self.kw['image_folders']:\n dst = self.kw['output_folder']\n filters = self.kw['filters']\n real_dst = os.path.join(dst, self.kw['image_folders'][src])\n for task in self.process_tree(src, real_dst):\n task['basename'] = self.name\n task['uptodate'] = [utils.config_changed(self.kw)]\n yield utils.apply_filters(task, filters)", "def prepare_project(self, project=None):\n if project == None:\n return [None, '']\n else:\n memory_file = BytesIO()\n with zipfile.ZipFile(memory_file, 'w') as zf:\n project_dict = project.compress()\n comments = project_dict['comments']\n del project_dict['comments']\n resources = project_dict['resources']\n del project_dict['resources']\n history = project_dict['history']\n del project_dict['history']\n records = project_dict['records']\n del project_dict['records']\n diffs = project_dict['diffs']\n del project_dict['diffs']\n application = project_dict['application']\n del project_dict['application']\n try:\n self.agent_prepare(zf, 'project', project_dict)\n except:\n print(traceback.print_exc())\n try:\n self.agent_prepare(zf, 'comments', comments)\n except:\n print(traceback.print_exc())\n try:\n self.agent_prepare(zf, 'resources', resources)\n except:\n print(traceback.print_exc())\n try:\n self.agent_prepare(zf, 'environments', history)\n except:\n print(traceback.print_exc())\n try:\n self.agent_prepare(zf, 'records', records)\n except:\n print(traceback.print_exc())\n try:\n self.agent_prepare(zf, 'application', application)\n except:\n print(traceback.print_exc())\n try:\n self.agent_prepare(zf, 'diffs', diffs)\n except:\n print(traceback.print_exc())\n memory_file.seek(0)\n\n return [memory_file, \"project-%s.zip\"%str(project.id)]", "def init(cx):\n\n\n # create the folder structure\n for d in PROJECT_DIRS:\n cx.run(\"mkdir -p {}\".format(d))\n cx.run(\"touch {}/.keep\".format(d))", "def generate_project(self, config_file_name, debug=False):\n\t\t#reading the project config data into the the project tags\n\t\tresult = self.read_config_file(config_file_name)\n\t\tif (not result):\n\t\t\tif (debug):\n\t\t\t\tprint \"failed to read in project config file\"\n\t\t\treturn False\n\t\t\n\t\t#extrapolate the bus template\n\t\tself.project_tags[\"CLOCK_RATE\"] = saputils.read_clock_rate(self.project_tags[\"CONSTRAINTS\"][\"constraint_files\"][0])\n\t\tresult = self.read_template(self.project_tags[\"TEMPLATE\"])\n\t\tif (not result):\n\t\t\tif (debug):\n\t\t\t\tprint \"failed to read in template file\"\n\t\t\treturn False\n\n\t\t#set all the tags within the filegen structure\n\t\tif debug:\n\t\t\tprint \"set all tags wihin filegen structure\"\n\t\tself.filegen.set_tags(self.project_tags)\n\n\t\t#generate the project directories and files\n\t\tsaputils.create_dir(self.project_tags[\"BASE_DIR\"])\t\t\n\t\tif debug:\n\t\t\tprint \"generated the first dir\"\n\n\t\t#generate the arbitrator tags, this is important because the top\n\t\t#needs the arbitrator tags\n\t\tarb_tags = saparbitrator.generate_arbitrator_tags(self.project_tags, False) \n\t\tself.project_tags[\"ARBITRATORS\"] = arb_tags\n\n\n\t\t#print \"Parent dir: \" + self.project_tags[\"BASE_DIR\"]\n\t\tfor key in self.template_tags[\"PROJECT_TEMPLATE\"][\"files\"]:\n\t\t\tself.recursive_structure_generator(\n\t\t\t\t\t\t\tself.template_tags[\"PROJECT_TEMPLATE\"][\"files\"],\n\t\t\t\t\t\t\tkey,\n\t\t\t\t\t\t\tself.project_tags[\"BASE_DIR\"])\n\n\t\tif debug:\n\t\t\tprint \"generating project directories finished\"\n\t\n\t\tif debug:\n\t\t\tprint \"generate the arbitrators\"\n\t\t\n\t\tself.generate_arbitrators()\n\n\t\t#Generate all the slaves\n\t\tfor slave in self.project_tags[\"SLAVES\"]:\n\t\t\tfdict = {\"location\":\"\"}\n\t\t\tfile_dest = self.project_tags[\"BASE_DIR\"] + \"/rtl/bus/slave\"\n\t\t\tfn = self.project_tags[\"SLAVES\"][slave][\"filename\"]\n\t\t\tresult = self.filegen.process_file(filename = fn, file_dict = fdict, directory=file_dest)\n\t\t\tif (not result):\n\t\t\t\tprint \"Error: Failed to process the slave file: \" + fn\n\t\t\t#each slave\n\n\t\tif (\"MEMORY\" in self.project_tags):\n\t\t\tfor mem in self.project_tags[\"MEMORY\"]:\n\t\t\t\tfdict = {\"location\":\"\"}\n\t\t\t\tfile_dest = self.project_tags[\"BASE_DIR\"] + \"/rtl/bus/slave\"\n\t\t\t\tfn = self.project_tags[\"MEMORY\"][mem][\"filename\"]\n\t\t\t\tresult = self.filegen.process_file(filename = fn, file_dict = fdict, directory = file_dest, debug = True)\n\t\t\t\tif (not result):\n\t\t\t\t\tprint \"Error: Failed to proecess memory file!: \" + mem\n\n\t\t#Copy the user specified constraint files to the constraints directory\n\t\tfor constraint_fname in self.project_tags[\"CONSTRAINTS\"][\"constraint_files\"]:\n\t\t\tsap_abs_base = os.getenv(\"SAPLIB_BASE\")\n\t\t\tabs_proj_base = saputils.resolve_linux_path(self.project_tags[\"BASE_DIR\"])\n\t\t\tconstraint_path = self.get_constraint_path(constraint_fname)\n\t\t\tif (len(constraint_path) == 0):\n\t\t\t\tprint \"Couldn't find constraint: \" + constraint_fname + \", searched in current directory and \" + sap_abs_base + \" /hdl/\" + self.project_tags[\"CONSTRAINTS\"][\"board\"]\n\t\t\t\tcontinue\n\t\t\tshutil.copy (constraint_path, abs_proj_base + \"/constraints/\" + constraint_fname)\n\n\t\t#Generate the IO handler\n\t\tinterface_filename = self.project_tags[\"INTERFACE\"]\n\t\tfdict = {\"location\":\"\"}\n\t\tfile_dest = self.project_tags[\"BASE_DIR\"] + \"/rtl/bus/interface\"\n\t\tresult = self.filegen.process_file(filename = interface_filename, file_dict=fdict , directory=file_dest)\n\n\t\tif debug:\n\t\t\tprint \"copy over the dependencies...\"\n\t\tprint \"verilog files: \"\n\t\tfor f in self.filegen.verilog_file_list:\n\t\t\tprint f\n\t\tprint \"dependent files: \"\n\t\tfor d in self.filegen.verilog_dependency_list:\n\t\t\tfdict = {\"location\":\"\"}\n\t\t\tfile_dest = self.project_tags[\"BASE_DIR\"] + \"/dependencies\"\n\t\t\tresult = self.filegen.process_file(filename = d, file_dict = fdict, directory = file_dest)\n\t\t\tprint d\n\t\treturn True", "def gen_project(project_name, project_revision, target, template, working_dir):\n gen_project_tcl(project_name, project_revision,\n target, template, working_dir)\n qsys_files = filter(lambda file: file.endswith(\".qsys\"), target.files_list)\n for file in qsys_files:\n gen_qsys_system_from_qsys_file(file, working_dir)\n\n log_msg = \"Generating project\"\n cmd = f\"cd {working_dir} && {QUARTUS_BIN_DIR}quartus_sh -t make_project.tcl\"\n log_file_path = working_dir + \"project_gen.log\"\n\n run_cmd_and_log(cmd, log_msg, log_file_path)", "def prepare_submit(self, mapping):\n self.dag_path = self.mk_path('%(mex_id)s.dag', mapping)\n self.create_file(self.dag_path,\n self.template['condor.dag_template'], mapping)\n\n self.conf_path = self.mk_path('%(mex_id)s.dag.config', mapping)\n self.create_file(self.conf_path,\n self.template['condor.dag_config_template'], mapping)\n\n self.submit_path = self.mk_path('%(mex_id)s.cmd', mapping)\n self.create_file(self.submit_path,\n self.template['condor.submit_template'], mapping)", "def test_first_run():\n setup_first_run(\".\", True, extra_context={\"number_of_iterations\": 2, \n \"project_name\": \"first_run_test\",\n \"logging_frequency\": 1,\n \"enable_cuda\": False\n })\n\n generated_project_dir = Path(\"first_run_test\")\n assert generated_project_dir.is_dir()\n assert (generated_project_dir / \"facades\" / \"train\" / \"A\" ).is_dir()\n assert (generated_project_dir / \"facades\" / \"train\" / \"B\" ).is_dir()", "def setup(self):\n self.cwd = os.getcwd()\n self.t = tempfile.mkdtemp()\n dir_path = Path(\"packages\")\n tmp_dir = self.t / dir_path\n src_dir = self.cwd / Path(ROOT_DIR, dir_path)\n shutil.copytree(str(src_dir), str(tmp_dir))\n shutil.copytree(Path(CUR_PATH, \"data\", \"dummy_aea\"), Path(self.t, \"dummy_aea\"))\n os.chdir(Path(self.t, \"dummy_aea\"))\n self.runner = CliRunner()", "def makeProjects(self, *versions):\n baseDirectory = FilePath(self.mktemp())\n baseDirectory.createDirectory()\n for version in versions:\n self.makeProject(version, baseDirectory)\n return baseDirectory", "def environment_preparation():\n report_file_path = (\n f\"{os.path.abspath('.')}/{Common.get_config_value('report_location')}\"\n )\n data_location_path = (\n f\"{os.path.abspath('.')}/{Common.get_config_value('data_location')}\"\n )\n if f\"{Common.get_config_value('report_location')}\":\n if os.path.isdir(f\"{report_file_path}\"):\n for data_path, directory_list, file_list in os.walk(\n f\"{report_file_path}\"\n ):\n [os.remove(f\"{report_file_path}/{file}\") for file in file_list]\n else:\n os.mkdir(f\"{report_file_path}\")\n workbook = xlwt.Workbook()\n workbook.add_sheet(\"test1\")\n workbook.save(f\"{report_file_path}/report.xls\")\n if (\n f'{Common.get_config_value(\"data_location\")}'\n not in Common.get_config_value(\"unsupported_path\")\n ):\n try:\n if os.path.isdir(f\"{data_location_path}\"):\n for data_path, directory_list, file_list in os.walk(\n f\"{data_location_path}\"\n ):\n [os.remove(f\"{data_path}/{file}\") for file in file_list]\n else:\n os.mkdir(f\"{data_location_path}\")\n except OSError as ex:\n Common.logger.warning(f\"Path not found {ex}\")\n else:\n Common.logger.warning(f\"Path not found\")\n Common.logger.info(\"Environment preparation completed successfully\")", "def run():\n build_no_documentation()\n build_sphinx_build()\n #build_sphinx_pdf()\n build_graphviz_files()", "def create(self):\n\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n else:\n create_if_not_exists(self.dirs[key])\n\n self.inputFileIds = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info['use_it']:\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n is_mc = (sample_info[\"type\"] == \"mc\")\n\n if not is_mc:\n continue\n\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable, process_name))\n\n inputFileList = generateInputFileList(sample_info, self.max_files_per_job)\n key_dir = getKey(process_name)\n\n outputFile = os.path.join(\n self.dirs[key_dir][DKEY_HISTO], \"%s.root\" % process_name\n )\n self.outputFiles[process_name] = {\n 'inputFiles' : [],\n 'outputFile' : outputFile,\n }\n if os.path.isfile(outputFile) and tools_is_file_ok(outputFile, min_file_size = 2000):\n logging.info('File {} already exists --> skipping job'.format(outputFile))\n continue\n\n for jobId in inputFileList.keys():\n\n key_file = getKey(sample_name, jobId)\n\n self.inputFiles[key_file] = inputFileList[jobId]\n if len(self.inputFiles[key_file]) == 0:\n logging.warning(\n \"'%s' = %s --> skipping job !!\" % (key_file, self.inputFiles[key_file])\n )\n continue\n\n self.cfgFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"project_%s_%i_cfg.txt\" % (process_name, jobId)\n )\n self.outputFiles_tmp[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_HISTO_TMP], \"histogram_%i.root\" % jobId\n )\n self.logFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_LOGS], \"project_%s_%i.log\" % (process_name, jobId)\n )\n self.scriptFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"project_%s_%i_cfg.sh\" % (process_name, jobId)\n )\n projection_module = self.projection_module\n if projection_module == \"count\":\n projection_module = \"countHistogramAll\"\n if sample_name.startswith('/TTTo'):\n projection_module += \"CompTopRwgt\"\n elif sample_info['sample_category'].startswith('ttH'):\n projection_module += \"CompHTXS\"\n elif isSplitByNlheJet(process_name):\n projection_module += \"SplitByLHENjet\"\n elif isSplitByNlheHT(process_name):\n projection_module += \"SplitByLHEHT\"\n elif isSplitByNlheJetHT(process_name, sample_name):\n projection_module += \"SplitByLHENjetHT\"\n self.jobOptions_sbatch[key_file] = {\n 'histName' : process_name,\n 'inputFiles' : self.inputFiles[key_file],\n 'cfgFile_path' : self.cfgFiles_projection[key_file],\n 'outputFile' : self.outputFiles_tmp[key_file],\n 'logFile' : self.logFiles_projection[key_file],\n 'scriptFile' : self.scriptFiles_projection[key_file],\n 'projection_module' : projection_module,\n }\n if self.projection_module != 'puHist':\n self.jobOptions_sbatch[key_file]['ref_genWeight'] = self.ref_genWeights[process_name]\n if process_name not in self.ref_genWeights:\n raise RuntimeError(\"Unable to find reference LHE weight for process %s\" % process_name)\n self.createCfg_project(self.jobOptions_sbatch[key_file])\n self.outputFiles[process_name]['inputFiles'].append(self.outputFiles_tmp[key_file])\n\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable)\n self.num_jobs['project'] += self.createScript_sbatch(\n self.executable, self.sbatchFile_projection, self.jobOptions_sbatch\n )\n\n logging.info(\"Creating Makefile\")\n lines_makefile = []\n self.addToMakefile_project(lines_makefile)\n self.addToMakefile_hadd(lines_makefile)\n if self.plot:\n self.addToMakefile_plot(lines_makefile)\n self.addToMakefile_finalHadd(lines_makefile)\n self.createMakefile(lines_makefile)\n logging.info(\"Done\")\n\n return self.num_jobs", "def test_explant(install_test_files, data_dir):\n with make_workdir() as workdir:\n cl = [\"bcbio_nextgen.py\",\n get_post_process_yaml(data_dir, workdir),\n os.path.join(data_dir, os.pardir, \"1_explant\"),\n os.path.join(data_dir, \"run_info-explant.yaml\")]\n subprocess.check_call(cl)", "def actionPrepare():\n \n #Do preparation that is common for all platforms. Pass true if ortc is one of targets\n result = Preparation.setUp('ortc' in Settings.targets)\n if result != NO_ERROR:\n #Terminate execution, because prepration common for all targets and platforms has failed.\n System.stopExecution(result)\n\n for target in Settings.targets:\n for platform in Settings.targetPlatforms:\n for cpu in Settings.targetCPUs:\n if System.checkIfCPUIsSupportedForPlatform(cpu,platform):\n for configuration in Settings.targetConfigurations:\n Logger.printStartActionMessage('Prepare ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration,ColoredFormatter.YELLOW)\n result = Preparation.run(target, platform, cpu, configuration)\n Summary.addSummary(ACTION_PREPARE, target, platform, cpu, configuration, result, Preparation.executionTime)\n if result != NO_ERROR:\n Logger.printEndActionMessage('Failed preparing ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration,ColoredFormatter.RED)\n #Terminate script execution if stopExecutionOnError is set to True in userdef\n shouldEndOnError(result)\n else:\n Logger.printEndActionMessage('Prepare ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration)", "def createproject(destinationdir):\n print(f\"Writing json data files to {destinationdir}\")\n return", "def main():\n\n parser = argparse.ArgumentParser(description=\"generateTestStubs\")\n\n parser.add_argument(\"taskFile\",\n help=\"Path for assignment file.\")\n\n args = parser.parse_args()\n\n if not os.path.exists(args.taskFile):\n print(\"Task file does not exist.\")\n sys.exit(1)\n\n taskMgr = EEWebLPProject()\n taskMgr.initLP()\n\n #taskMgr.listProjects()\n #taskMgr.loadTree([\"project_id=8008922\"])\n tasks = taskMgr.getTasks([\"project_id=6890048\"],parent_id=8008922)\n\n fileByAssignee = taskMgr.getTaskOwners(args.taskFile)\n taskMgr.updateTaskOwners(fileByAssignee,tasks)", "def __setup(self):\n\n backupFolder = self.config['destination']\n self.__createBackupFolder(backupFolder)\n\n # create the project based backup folder\n today = date.today()\n\n if 'projects' in self.config:\n for project in self.config['projects'].iterkeys():\n timestamp = datetime.now().strftime('%d-%H-%M-%S')\n backupDestination = os.path.join(backupFolder, project, str(today.year), today.strftime('%m'), timestamp)\n self.__createBackupFolder(backupDestination)\n self.config['projects'][project]['destination'] = backupDestination", "def makeProjects(self, *versions):\n baseDirectory = FilePath(self.mktemp())\n for version in versions:\n self.makeProject(version, baseDirectory)\n return baseDirectory", "def main(args):\n\n for dir in args.dirs:\n # prepdir = mdssprep.Directory(dir,exclude=['file_*3*','file_2??'],include=['file_*5*'],maxarchivesize=mdssprep.one_meg*200.,minsize=mdssprep.one_meg*100.)\n prepdir = mdssprep.Directory(dir)\n prepdir.archive(dryrun=False)", "def pre_start(self):\n self.make_runpath_dirs()", "def Write(self):\n template_mappings = {\n 'pypi_token': self._project_definition.pypi_token or ''}\n\n file_content = []\n\n template_data = self._GenerateFromTemplate('environment', template_mappings)\n file_content.append(template_data)\n\n if self._project_definition.name not in self._PROJECTS_WITHOUT_BUILD:\n if self._project_definition.pypi_token:\n template_data = self._GenerateFromTemplate(\n 'pypi_token', template_mappings)\n file_content.append(template_data)\n\n template_data = self._GenerateFromTemplate('matrix', template_mappings)\n file_content.append(template_data)\n\n template_data = self._GenerateFromTemplate('install', template_mappings)\n file_content.append(template_data)\n\n if self._project_definition.name != 'l2tdevtools':\n template_data = self._GenerateFromTemplate(\n 'install_l2tdevtools', template_mappings)\n file_content.append(template_data)\n\n if self._project_definition.name in self._PROJECTS_WITHOUT_BUILD:\n template_filename = 'build_off'\n else:\n template_filename = 'build'\n\n template_data = self._GenerateFromTemplate(\n template_filename, template_mappings)\n file_content.append(template_data)\n\n template_data = self._GenerateFromTemplate('test_script', template_mappings)\n file_content.append(template_data)\n\n if self._project_definition.name not in self._PROJECTS_WITHOUT_BUILD:\n template_data = self._GenerateFromTemplate('artifacts', template_mappings)\n file_content.append(template_data)\n\n if self._project_definition.pypi_token:\n template_data = self._GenerateFromTemplate(\n 'deploy_script', template_mappings)\n file_content.append(template_data)\n\n file_content = ''.join(file_content)\n\n with io.open(self.PATH, 'w', encoding='utf-8') as file_object:\n file_object.write(file_content)", "def setUp_base(self):\n self._create_main_project_and_root()", "def task_output_police_precincts():\n for dept in Department.list():\n yield {\n 'name': dept.name,\n 'file_dep': [dept.police_precincts_path],\n 'targets': [dept.police_precincts_output],\n 'actions': ['cp %(dependencies)s %(targets)s'],\n 'clean': True,\n }", "def basic_project(tmp_path):\n build_dir = tmp_path / BUILD_DIRNAME\n build_dir.mkdir()\n\n # the metadata\n metadata_data = {\n \"name\": \"name-from-metadata\",\n \"summary\": \"test-summ\",\n \"description\": \"text\",\n }\n metadata_file = tmp_path / \"metadata.yaml\"\n metadata_raw = yaml.dump(metadata_data).encode(\"ascii\")\n metadata_file.write_bytes(metadata_raw)\n\n # a lib dir\n lib_dir = tmp_path / \"lib\"\n lib_dir.mkdir()\n ops_lib_dir = lib_dir / \"ops\"\n ops_lib_dir.mkdir()\n ops_stuff = ops_lib_dir / \"stuff.txt\"\n ops_stuff.write_bytes(b\"ops stuff\")\n\n # simple source code\n src_dir = tmp_path / \"src\"\n src_dir.mkdir()\n charm_script = src_dir / \"charm.py\"\n charm_script.write_bytes(b\"all the magic\")\n\n # the license file\n license = tmp_path / \"LICENSE\"\n license.write_text(\"license content\")\n\n # other optional assets\n icon = tmp_path / \"icon.svg\"\n icon.write_text(\"icon content\")\n\n # README\n readme = tmp_path / \"README.md\"\n readme.write_text(\"README content\")\n\n yield tmp_path", "def run(opts, args):\n create_new_project()", "def write_setup(project_name, root_dir):\r\n setup_path = get_file_path(root_dir, None, \"setup.py\") #Get the path for setup.py\r\n setup_content = get_setup_text(project_name)\r\n \r\n setup_file = open(setup_path, 'w')\r\n setup_file.write(setup_content)\r\n setup_file.close()\r\n print_file(setup_path, \" +++\")", "def prepare(self):\n if self.opts['verbose']:\n print(\"Preparing dataset (one-time operation)...\")\n # Create paths files and load them back in\n self._build_ID_sets()\n self._create_ID_files()\n self._load_ID_files()\n if self.opts['verbose']:\n print(\"... done with preparing the dataset.\")", "def setUp(self):\n self.outdir = \"tests/out/pdftotext\"\n if not os.path.exists(self.outdir):\n os.makedirs(self.outdir)\n else:\n files = glob.glob(self.outdir)\n for f in files:\n if os.path.isfile(f):\n os.remove(f)", "def prepare():\n with cd(env.code_dir):\n run('svn up api')\n run('svn up pyramid-oauth2')", "def setup(self, registers):\n \"\"\" tasks before any generation functions are called \"\"\"\n pass", "def task_prepare_build():\n\n import sys\n\n python_path = sys.executable.split(os.sep)\n venv_path = str(Path(os.sep.join(python_path[:-2])))\n\n def get_dst_path():\n import platform\n\n print(f\"Going on with {venv_path} as the virtual environment exclusively used for using pyinstaller.\")\n arch = platform.system()\n if arch == \"Windows\":\n return Path(venv_path) / \"Lib/site-packages/mad_gui/qt_designer/build/\"\n if arch in [\"Linux\", \"Darwin\"]:\n python_dirs = os.listdir(Path(venv_path) / \"lib/\")\n warnings.warn(\n f\"dodo.py: Assuming your python 3.7 installation is in {Path(venv_path)}/lib/{python_dirs[0]}\"\n )\n return Path(venv_path) / \"lib\" / python_dirs[0] / \"site-packages/mad_gui/qt_designer/build/\"\n raise ValueError(\"What operating system is this?!\")\n\n def set_up_paths():\n if not os.path.exists(get_dst_path().parent):\n raise FileNotFoundError(\n \"Apparently mad_gui is not installed in this environemnt. Use `pip install . ` to do so.\"\n )\n dst_path = get_dst_path()\n os.makedirs(dst_path, exist_ok=True)\n\n def convert_ui_to_py():\n dst_path = get_dst_path()\n ui_files = [file for file in os.listdir(dst_path.parent) if \".ui\" in file]\n print(\"\\n\")\n for file in ui_files:\n print(f\"Converting from: {dst_path.parent}{os.sep}{file}\")\n print(f\"To: {dst_path}{os.sep}{file.split('.')[0]}.py\\n\")\n os.popen(f\"pyside2-uic -o {dst_path}{os.sep}{file.split('.')[0]}.py {dst_path.parent}{os.sep}{file}\")\n\n print(\n \"Info: These conversion should take place in the virutal environment you are going to use with \"\n \"pyinstaller.\"\n )\n\n return {\n \"actions\": [set_up_paths, convert_ui_to_py],\n \"verbosity\": 2,\n }", "def steps(self):\n\n if not os.path.exists(self.build_path):\n raise exceptions.ProjectNotBuildError()\n\n steps = []\n for filename in os.listdir(self.build_path):\n match = re.match(r'(\\d{4})_(.*)\\.json', filename)\n if not match:\n continue\n\n with open(os.path.join(self.build_path, filename), 'r') as f:\n template = json.loads(f.read())\n\n template_type = 'custom' if '_type' in template else 'cloudformation'\n steps.append((int(match.groups()[0]), match.groups()[1], filename, template_type))\n steps = sorted(steps, key=lambda x: x[0])\n\n return steps", "def create_project(directory):\n \n if os.path.exists(directory):\n raise OSError(\"Directory '%s' already exists\" % directory)\n\n data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))\n \n \n def callback(src, names):\n base = os.path.relpath(src, data_dir)\n for name in names:\n print(\"\\033[92mcreate\\033[0m {:s}\".format(os.path.join(directory, base, name)))\n return []\n\n\n shutil.copytree(data_dir, directory, ignore=callback)", "def setup():\n if os.path.exists(\"observations.p\"):\n os.remove(\"observations.p\")\n else:\n pass", "def prepare():\n sh('docker build --rm -t {image} {dir}', image=IMAGE, dir=os.path.dirname(__file__))", "def prepare_runs(args):\n output_directory = _prepare_output_dir(args.output_directory)\n z_score_dir = args.z_score_dir\n region_list = args.region_list \n if args.region_list is None:\n try:\n flanking_region = int(args.flanking_region)\n except ValueError:\n logging.error('Flanking region argument needs to be an integer')\n sys.exit(COMMAND_LINE_ERROR)\n build = args.build\n bed_directory = args.bed_directory\n # Create the SNPList\n try:\n min_maf = float(args.maf)\n except:\n logging.error(\"Min Maf -m or --min-maf needs to be an floating point number\")\n sys.exit(COMMAND_LINE_ERROR)\n if args.region_list is not None:\n region_list = {}\n snp_list = []\n with open(args.region_list) as input_file:\n # When using no flaking region SNP must be valid, but it doesn't actually matter what it is, need to ensure that is actually the case.\n for i, line in enumerate(input_file):\n rsid = str(i)+ \"_\" + ''.join(line.strip().split(\"\\t\"))\n chromosome = line.strip().split(\":\")[0] \n snp = Snp(chromosome,\"1\",rsid)\n snp_list.append(snp)\n region_list[snp.rsid] = line.strip()\n else:\n snp_list = SnpList(args.snp_list, build)\n logging.info(snp_list)\n # Locus to process\n # population_to_extract_vcf\n if not args.annotation_only:\n no_flanking = args.flanking_units\n if no_flanking:\n raise NotImplementedError(\"Using a number of flanking SNPs instead of a region is not supported\")\n populations= args.populations.split(',')\n logging.info(\"Populations to process: {0}\".format(populations))\n loci = []\n gemini_databases = []\n output_vcfs = []\n for snp in snp_list:\n logging.info('Preparing output files for SNP {0}'.format(snp.rsid))\n locus = snp.rsid\n loci.append(locus)\n logging.info(\"Obtaining VCF file from the 1000 genomes project\")\n if region_list is not None:\n vcf = get_vcf_file(snp, string_region=region_list[locus])\n else: \n vcf = get_vcf_file(snp, flanking_region=flanking_region)\n for population in populations:\n tmp_vcf = extract_population_from_1000_genomes(vcf=vcf, super_population=population)\n z_score_file = get_relevant_zscore(snp.chrom, population, z_score_dir)\n pos_list_zscore = create_pos_hash_table(z_score_file)\n output_vcf = generate_zscore_and_vcf_output(output_directory=output_directory, zscore_hash=pos_list_zscore, vcf=tmp_vcf, locus=locus,population=population, multiply_rsquare=args.multiply_rsquare)\n if bed_directory is None:\n logging.info(\"Creating gemini database\")\n # TODO: Fix broxen gemini referenec\n gemini_databases.append(create_gemini_database(vcf=output_vcf))\n vcf_to_plink(locus, output_directory=output_directory, vcf=output_vcf, population=population)\n plink_to_ld_matrix(locus, output_directory=output_directory, population=population)\n logging.info(\"Generate transancestrals matrices\")\n generate_transancestral_output(loci, populations, output_directory)\n if bed_directory is None:\n logging.info(\"Generating annotation matrices to be used with Paintor\")\n logging.info(gemini_databases)\n generate_and_write_encode_annotations(databases=gemini_databases, output_directory=output_directory, loci=snp_list)\n else:\n logging.info(\"Annotation using bed files\")\n generate_bed_file_annotations(loci=loci, bed_directory=bed_directory, output_directory=output_directory) \n # So finally we need to fix the LD matrices for inputting into PAINTOR. \n\n with open(os.path.join(output_directory, 'input.files'), 'w') as out_f:\n for snp in snp_list:\n out_f.write(snp.rsid +'\\n')\n # Remove .tbi files\n for file in os.listdir('.'):\n if fnmatch.fnmatch(file, '*.tbi'):\n try:\n os.remove(file)\n except OSError:\n logging.warning(\"Could not remove a .tbi file from the 1000 genomes tabix run\")\n else: \n loci = []\n for snp in snp_list:\n loci.append(snp.rsid)\n if bed_directory is not None:\n logging.info(\"Annotation using bed files\")\n generate_bed_file_annotations(loci=loci, bed_directory=bed_directory, output_directory=output_directory) \n logging.info(\"Finemapping file preparation complete\")", "def new(root: str = \".\", name: str = \"piccolo_project\"):\n tree = os.walk(TEMPLATE_DIR)\n\n router = get_routing_framework()\n\n template_context = {\n \"router\": router,\n \"router_dependencies\": ROUTER_DEPENDENCIES.get(router) or [router],\n \"server\": get_server(),\n \"project_identifier\": name.replace(\" \", \"_\").lower(),\n }\n\n for directory in tree:\n dir_path, sub_dir_names, file_names = directory # type: ignore\n\n output_dir_path = os.path.join(root, dir_path.split(TEMPLATE_DIR)[-1])\n\n if not os.path.exists(output_dir_path):\n folder_name = output_dir_path.split(\"/\")[-1]\n if folder_name.startswith((\"_\", \".\")):\n continue\n os.mkdir(dir_path)\n\n for sub_dir_name in sub_dir_names:\n if sub_dir_name.startswith(\"_\"):\n continue\n\n sub_dir_path = os.path.join(output_dir_path, sub_dir_name)\n if not os.path.exists(sub_dir_path):\n os.mkdir(sub_dir_path)\n\n for file_name in file_names:\n if file_name.startswith(\"_\") and file_name != \"__init__.py.jinja\":\n continue\n\n extension = file_name.rsplit(\".\")[0]\n if extension in (\"pyc\",):\n continue\n\n if file_name.endswith(\".jinja\"):\n output_file_name = file_name.replace(\".jinja\", \"\")\n template = Environment(\n loader=FileSystemLoader(searchpath=dir_path)\n ).get_template(file_name)\n\n output_contents = template.render(**template_context)\n\n if output_file_name.endswith(\".py\"):\n try:\n output_contents = black.format_str(\n output_contents,\n mode=black.FileMode(line_length=80),\n )\n except Exception as exception:\n print(f\"Problem processing {output_file_name}\")\n raise exception from exception\n\n with open(\n os.path.join(output_dir_path, output_file_name), \"w\"\n ) as f:\n f.write(output_contents)\n else:\n if file_name.endswith(\".jinja_raw\"):\n output_file_name = file_name.replace(\n \".jinja_raw\", \".jinja\"\n )\n else:\n output_file_name = file_name\n\n shutil.copy(\n os.path.join(dir_path, file_name),\n os.path.join(output_dir_path, output_file_name),\n )\n\n print(\n \"Run `pip install -r requirements.txt` and `python main.py` to get \"\n \"started.\"\n )", "def setup_for_random_output(self, testcase_dependencies):\n os.chdir(self.tmp_work)\n for container in self.solution_containers:\n self._setup_single_directory_for_random_output(\n container.directory,\n testcase_dependencies\n )\n self._run_pre_commands(container.directory)\n\n if container.import_router:\n router_path = os.path.join(self.tmp_autograding, \"bin\", \"submitty_router.py\")\n self.log_message(f\"COPYING:\\n\\t{router_path}\\n\\t{container.directory}\")\n shutil.copy(router_path, container.directory)\n autograding_utils.add_all_permissions(container.directory)", "def setup(self):\n super(__class__, self).setup()\n # construct command line call\n setup_script = '%s/tfMRI.py' % \\\n os.environ['ABCDTASKPREPDIR']\n arg1 = self.kwargs['path']\n arg2 = self.kwargs['sourcedata_root']\n arg3 = self.kwargs['subject']\n arg4 = self.kwargs['session']\n anat_metadata = self.config.get_bids('t1w_metadata')\n # get make/software information\n make = anat_metadata['Manufacturer']\n if make == 'GE':\n reg = re.compile(r'.*(DV2[56]).*')\n software_version = reg.match(anat_metadata[\n 'SoftwareVersions']).group(1)\n else:\n software_version = 'NA'\n cmd = ' '.join((setup_script, arg1, arg2, arg3, arg4, make,\n software_version))\n print(cmd)\n\n log_dir = self._get_log_dir()\n out_log = os.path.join(log_dir, self.__class__.__name__ + '_setup.out')\n err_log = os.path.join(log_dir, self.__class__.__name__ + '_setup.err')\n result = self.call(cmd, out_log, err_log)", "def run_test(self):\n\n # populate *_ps sets\n self.enter_project_file()\n\n # populate *_dir sets\n self.enter_directories()\n\n # The files in the directories makes up the largest possible set of files\n self.result_files = self.result_files_dir\n self.design_files = self.design_files_dir\n self.design_space_files = self.design_space_files_dir\n self.test_bench_files = self.test_bench_files_dir\n\n # populate *_ms sets\n self.enter_meta_results_file()\n\n # populate *_OK sets\n self.check_analysis_status()\n\n df = {'design_files_dir' : list(self.design_files_dir),'design_files_pr' : list(self.design_files_pr),\n 'design_files_ms' : list(self.design_files_ms), 'design_files_OK' : list(self.design_files_OK)}\n\n ds = {'design_space_files_dir' : list(self.design_space_files_dir),\n 'design_space_files_pr' : list(self.design_space_files_pr)}\n\n rs = {'result_files_dir' : list(self.result_files_dir), 'result_files_ms' : list(self.result_files_ms),\n 'result_files_OK' : list(self.result_files_OK)}\n\n tb = {'test_bench_files_dir' : list(self.test_bench_files_dir),\n 'test_bench_files_ms' : list(self.test_bench_files_ms)}\n\n srl = SummaryReportsLinks(self.result_files_dir)\n\n lf = {'files_linked_from_sum_reps' : srl.get_files(),\n 'folders_linked_from_sum_reps' : srl.get_folders()}\n\n # 'test_bench_files_pr' : list(self.test_bench_files_pr),\n \n json_test = {'design_files' : df, 'design_space_files' : ds, 'result_files' : rs,\n 'test_bench_files' : tb, 'stat_files' : self.stat_files,\n 'files_linked_from_sum_reps' : lf}\n\n with open('test_run.json','wb') as f_out:\n json.dump(json_test, f_out, indent=4)", "def main():\n p = Path.cwd()\n path = str(p)\n\n files = tracked_files()\n scripts = search_dir(p, path, files, '.py')\n scripts = [i for i in scripts if 'tests/' not in i[:7]]\n scripts = list(map(partial(process, p), scripts))\n\n for script in scripts:\n script['display'] = script['name'].replace('_', '\\_')\n write_readme(scripts)", "def _setup(self):\n self._raw_top_dir = os.path.join(self._snippets_dir,\"raw\",\"dynamic\")\n if not os.path.exists(self._raw_top_dir):\n os.mkdir(self._raw_top_dir)\n\n self._trec_top_dir = os.path.join(self._snippets_dir,\"trec\",\"dynamic\")\n if not os.path.exists(self._trec_top_dir):\n os.mkdir(self._trec_top_dir)\n\n self._temp_top_dir = os.path.join(self._snippets_dir,\"temp\",\"dynamic\")\n if not os.path.exists(self._temp_top_dir):\n os.mkdir(self._temp_top_dir)\n\n self._snippet_result_top_dir = os.path.join(self._snippets_dir,\"result\",\"dynamic\")\n if not os.path.exists(self._snippet_result_top_dir):\n os.mkdir(self._snippet_result_top_dir)\n\n self._snippet_index_top_dir = os.path.join(self._snippets_dir,\"index\",\"dynamic\")\n if not os.path.exists(self._snippet_index_top_dir):\n os.mkdir(self._snippet_index_top_dir)\n\n self._para_top_dir = os.path.join(self._snippets_dir,\"para\",\"dynamic\")\n if not os.path.exists(self._para_top_dir):\n os.mkdir(self._para_top_dir)", "def setUp(self):\n if os.path.isdir(OUTPUT_FOLDER):\n shutil.rmtree(OUTPUT_FOLDER)\n os.mkdir(OUTPUT_FOLDER)\n if OUTPUT_FOLDER not in sys.path:\n sys.path.append(OUTPUT_FOLDER)", "def test_prepare_java_folder(self):\n self.obj.execution.merge({\"scenario\": {\"script\": RESOURCES_DIR + \"selenium/junit/java/\"}})\n self.obj.prepare()\n self.assertIsInstance(self.obj.runner, JavaTestRunner)\n prepared_files = listdir(self.obj.runner.working_dir)\n java_files = [fname for fname in prepared_files if fname.endswith(\".java\")]\n class_files = [fname for fname in prepared_files if fname.endswith(\".class\")]\n jars = [fname for fname in prepared_files if fname.endswith(\".jar\")]\n self.assertEqual(len(java_files), 0)\n self.assertEqual(len(class_files), 2)\n self.assertEqual(len(jars), 1)", "def main(arguments):\n # represents project number\n number = arguments[1]\n\n # configuration\n base_directory = \"/Users/inxiti/code/LearnCTheHardWay/\"\n skeleton_files = [\n [\"Makefile.skeleton\", \"Makefile\"],\n [\"skeleton.c\", \"ex{0}.c\".format(number)],\n [\"dbg.h\", \"dbg.h\"]\n ]\n directory = \"{0}ex{1}/\".format(base_directory, number)\n\n # create project, copy files, and tailor them if directory does not exist\n if not os.path.exists(directory):\n create(number, skeleton_files, directory, base_directory)\n else:\n print(\"{0} already exists.\".format(directory))", "def assemble_project(project, base_dir, build_result=None):\n resources = project.resources.all()\n\n if project.is_standard_project_type:\n # Write out the sources, resources, and wscript and jshint file\n assemble_source_files(project, base_dir)\n if project.project_type != 'rocky':\n assemble_resource_directories(project, base_dir)\n assemble_resources(base_dir, project.resources_path, resources)\n with open(os.path.join(base_dir, 'wscript'), 'w') as wscript:\n wscript.write(generate_wscript_file(project))\n with open(os.path.join(base_dir, 'pebble-jshintrc'), 'w') as jshint:\n jshint.write(generate_jshint_file(project))\n elif project.project_type == 'simplyjs':\n # SimplyJS is a particularly special case\n assemble_simplyjs_sources(project, base_dir, build_result)\n elif project.project_type == 'pebblejs':\n # PebbleJS projects have to import the entire pebblejs library, including its wscript\n assemble_resource_directories(project, base_dir)\n shutil.rmtree(base_dir)\n shutil.copytree(settings.PEBBLEJS_ROOT, base_dir)\n assemble_resources(base_dir, project.resources_path, resources, type_restrictions=('png', 'bitmap'))\n assemble_source_files(project, base_dir)\n\n # All projects have a manifest\n manifest_filename = manifest_name_for_project(project)\n manifest_dict = generate_manifest_dict(project, resources)\n\n with open(os.path.join(base_dir, manifest_filename), 'w') as f:\n f.write(json.dumps(manifest_dict))", "def _setup(self):\n self._raw_dir = os.path.join(self._snippets_dir,\"raw\",\"static\")\n if not os.path.exists(self._raw_dir):\n os.mkdir(self._raw_dir)\n\n self._trec_dir = os.path.join(self._snippets_dir,\"trec\",\"static\")\n if not os.path.exists(self._trec_dir):\n os.mkdir(self._trec_dir)\n\n self._temp_dir = os.path.join(self._snippets_dir,\"temp\",\"static\")\n if not os.path.exists(self._temp_dir):\n os.mkdir(self._temp_dir)\n\n self._para_dir = os.path.join(self._snippets_dir,\"para\",\"static\")\n if not os.path.exists(self._para_dir):\n os.mkdir(self._para_dir)\n\n self._snippet_result_dir = os.path.join(self._snippets_dir,\"result\",\"static\")\n if not os.path.exists(self._snippet_result_dir):\n os.mkdir(self._snippet_result_dir)\n\n self._snippet_index_dir = os.path.join(self._snippets_dir,\"index\",\"static\")\n if not os.path.exists(self._snippet_index_dir):\n os.mkdir(self._snippet_index_dir)\n\n \n\n\n self._index_para = os.path.join(self._para_dir,\"index_para\")\n\n self._temp_query_para = os.path.join(self._para_dir,\"temp_query_para\")\n\n self._index_list = os.path.join(self._para_dir,\"static_index_list\")\n \n self._orf = os.path.join(self._snippet_result_dir,\"orf\")\n\n self._oqf = os.path.join(self._temp_dir,\"oqf\")\n \n self._temp_output = os.path.join(self._temp_dir,\"temp_output\")\n\n with open(self._index_list,\"w\") as f:\n f.write(self._snippet_index_dir+\"\\n\")\n\n self._temp_query_builder = IndriQueryFactory(count=10000,\n rule=self._retrieval_method)\n\n self._oqf_builder = IndriQueryFactory(count=30,\n rule=self._retrieval_method)", "def preparation(self):\n # [1] Makes a dir for saving results.\n # if 'Result' dir already exists,\n # a 'temporary' dir will be made.\n\n try:\n os.mkdir(self.dir_for_saving_result)\n except FileExistsError:\n self.viewer.display_message(\"Made a temporary directory.\")\n self.dir_for_saving_result = 'temporary'\n os.mkdir('temporary')\n\n # [2] Copies config file into the same dir as the one where results will be stored\n shutil.copy2(self.config_file_name, self.dir_for_saving_result)", "def create_aiida_project_environment(self):\n try:\n self.create_folder_structure()\n self.build_python_environment()\n self.install_packages_from_index()\n except Exception:\n self.exit_on_exception()\n raise\n self.create_spec_entry()", "def generate():\n local('cd doc && make clean && make html')", "def prepare(args) :\n from preparer import prepare_genome\n prepare_genome(args)", "def setUp(self):\n # make directory test\n self.temp_dir_string = '/tmp/test_for_seqprep/'\n create_dir(self.temp_dir_string)\n\n # make directory with spaces test\n self.temp_dir_string_space = '/tmp/test for seqprep/'\n create_dir(self.temp_dir_string_space)\n \n # create temp file path strings\n self.test_fn1 = os.path.join(self.temp_dir_string,'reads1.fastq')\n self.test_fn1_space = os.path.join(self.temp_dir_string_space, \n 'reads1.fastq')\n self.test_fn2 = os.path.join(self.temp_dir_string,'reads2.fastq')\n self.test_fn2_space = os.path.join(self.temp_dir_string_space,\n 'reads2.fastq')", "def setup(self):\n skip_start = False\n if self.inst_kwargs.get('dst_type', 1) == 0:\n skip_start = True\n if os.path.isfile(self.tempfile): # pragma: debug\n os.remove(self.tempfile)\n super(TestCisPlyOutput, self).setup(skip_start=skip_start)", "def _generate_and_load_initial_batch(self, working_directory: Path):\n\n template_dir = Path(working_directory) / \"template_1\"\n template_dir.mkdir()\n # changes here should often be reflected in\n # data_generator_opts and data_loader_opts\n\n channel_decl = self.channel_configs[0]\n\n plugin_options = {\n \"pid\": \"0\",\n \"big_ids\": \"True\",\n }\n # if it's efficient to do the whole load in one go, let's just do that.\n if self.run_until.gap < MIN_PORTION_SIZE:\n num_records = self.run_until.gap\n else:\n num_records = 1 # smallest possible batch to get to parallelizing fast\n results = self._generate_and_load_batch(\n template_dir,\n channel_decl.org_config,\n {\n \"generator_yaml\": self.options.get(\"recipe\"),\n \"num_records\": num_records,\n \"num_records_tablename\": self.run_until.sobject_name or COUNT_REPS,\n \"loading_rules\": self.loading_rules,\n \"vars\": channel_decl.merge_recipe_options(self.recipe_options),\n \"plugin_options\": plugin_options,\n \"bulk_mode\": self.bulk_mode,\n },\n )\n self.update_running_totals_from_load_step_results(results)\n\n # rename directory to reflect real number of sets created.\n wd = SnowfakeryWorkingDirectory(template_dir)\n if self.run_until.sobject_name:\n self.sets_finished_while_generating_template = wd.get_record_counts()[\n self.run_until.sobject_name\n ]\n else:\n self.sets_finished_while_generating_template = num_records\n\n new_template_dir = data_loader_new_directory_name(template_dir, self.run_until)\n shutil.move(template_dir, new_template_dir)\n template_dir = new_template_dir\n\n # don't send data tables to child processes. All they\n # care about are ID->OID mappings\n wd = SnowfakeryWorkingDirectory(template_dir)\n self._cleanup_object_tables(*wd.setup_engine())\n\n return template_dir, wd.relevant_sobjects()", "def makeprojects(working_directory=None, args=None):\n from .__main__ import main\n if args is None:\n args = []\n return main(working_directory, args)", "def pre_testing(predictor_exe, parameters_xml, test_param, queries):\n\n run('mkdir -v tmp-testing', shell=True)\n pred = 'Fiana' if 'Fiana' in predictor_exe else 'Anna'\n run('mkdir -v tmp-testing/clarity-{}'.format(pred), shell=True)\n print('The temporary files will be saved in the directory tmp-testing')\n for i in PARAMS:\n print('\\n ******** Running for: {} documents ******** \\n'.format(i))\n output = 'tmp-testing/clarity-{}/predictions-{}'.format(pred, i)\n run('{} {} -{}={} {} > {}'.format(predictor_exe, parameters_xml, test_param, i,\n queries, output), shell=True)", "def build_step(self):\n run_cmd('./compile.sh', log_all=True, simple=True, log_ok=True)", "def generate(experiment, ifilename, parameterarray):\n import numpy as np\n import os\n # create file in fms_tmp and copy in requisite files\n rsyncstring = \"rsync -a --exclude='climspinup' \\\n'/network/aopp/hera/mad/bakerh/fms_tmp/climspinup/' \\\n'/network/aopp/hera/mad/bakerh/fms_tmp/\" + experiment + \"'\"\n os.system(rsyncstring)\n # separate code to change run_names and write initial files\n runfile = open('/home/bakerh/fms/exp/' + experiment +\n '/run/' + 'runfile', 'w')\n runfile.write('#!/bin/csh -f\\n')\n for i in range(np.ma.size(parameterarray, axis=0)-1):\n ifile = open('/home/bakerh/fms/exp/' + experiment +\n '/run/' + ifilename, 'r')\n lines = ifile.readlines()\n ifile.close()\n ofile = open('/home/bakerh/fms/exp/' + experiment + '/run/' +\n parameterarray[i+1, 0], 'w')\n for line in lines:\n if line.find('label for') != -1:\n ofile.write('set run_name = ' + parameterarray[i+1, 0] + '\\n')\n else:\n ofile.write(line)\n ofile.close()\n os.chmod('/home/bakerh/fms/exp/' + experiment + '/run/' +\n parameterarray[i+1, 0], 33279)\n runfile.write('./' + parameterarray[i+1, 0] + '\\n')\n # copy restart file and create restart text file\n dirtomake = \"mkdir '/network/aopp/hera/mad/bakerh/fms_tmp/\\\n\" + experiment + \"/\" + parameterarray[i+1, 0] + \"'\"\n os.system(dirtomake)\n copyrestart = \"rsync -a '/network/aopp/hera/mad/bakerh/fms_tmp/\\\nclimspinup/climspinup/output/restart/day3600h00.cpio' \\\n'/network/aopp/hera/mad/bakerh/fms_tmp/\\\n\" + experiment + \"/\" + parameterarray[i+1, 0] + \"'\"\n os.system(copyrestart)\n rfile = open('/network/aopp/hera/mad/bakerh/fms_tmp/' + experiment +\n '/' + parameterarray[i+1, 0] + '/reload_commands', 'w')\n rfile.write('set irun = 1\\n\\\nset init_cond = /network/aopp/hera/mad/bakerh/fms_tmp/' +\n experiment + '/' + parameterarray[i+1, 0] +\n '/day3600h00.cpio \\nset ireload = 2')\n rfile.close()\n runfile.close()\n os.chmod('/home/bakerh/fms/exp/' + experiment + '/run/' + 'runfile', 33279)\n # now alter parameters\n for i in range(np.ma.size(parameterarray, axis=0)-1):\n for j in range(np.ma.size(parameterarray, axis=1)-1):\n parameters('/home/bakerh/fms/exp/' + experiment +\n '/run/' + parameterarray[i+1, 0],\n '/home/bakerh/fms/exp/' +\n experiment + '/run/' + parameterarray[i+1, 0],\n parameterarray[0, j+1], parameterarray[i+1, j+1])" ]
[ "0.64848816", "0.6330899", "0.6330899", "0.6330899", "0.6284369", "0.62667686", "0.6219365", "0.620572", "0.6156918", "0.6099373", "0.60919136", "0.60415375", "0.60354227", "0.60354227", "0.6011478", "0.6007769", "0.5993345", "0.59776956", "0.59592646", "0.59452546", "0.5938593", "0.5932985", "0.59248996", "0.5923378", "0.5913009", "0.590542", "0.59036285", "0.5899277", "0.58928245", "0.587222", "0.5867477", "0.58572876", "0.58562684", "0.58517784", "0.58509487", "0.58479744", "0.5844584", "0.5842581", "0.5841546", "0.584119", "0.5839406", "0.5815324", "0.5814731", "0.58125174", "0.5811605", "0.5807979", "0.5798675", "0.5785687", "0.5777739", "0.57770854", "0.5763099", "0.57603973", "0.5758529", "0.5744876", "0.5727554", "0.5726401", "0.5720667", "0.57198507", "0.5716474", "0.57122296", "0.57098854", "0.56808704", "0.56725013", "0.5671547", "0.5658456", "0.56504345", "0.5642933", "0.5635226", "0.56343055", "0.56230265", "0.5619295", "0.56000763", "0.5599919", "0.5597536", "0.5590361", "0.558091", "0.55791086", "0.55755675", "0.5574759", "0.55740255", "0.5566939", "0.55654913", "0.5563944", "0.55635387", "0.5553942", "0.5552131", "0.5551472", "0.5548685", "0.5548046", "0.5547185", "0.554603", "0.5543027", "0.5539383", "0.55377567", "0.5525636", "0.5525532", "0.55201465", "0.5518601", "0.551612", "0.55158305" ]
0.66563916
0
Return tokenized list of strings from raw text input
def tokenize1(text): return TOKEN_PATTERN1.findall(text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_token_list(text):\n return text.split()", "def tokenize(self, input_string: str) -> List[str]:", "def tokenize(text: str):\n result = []\n for s in text:\n result.append(s)\n return result", "def _tokenize(self, text: str) -> List[str]:\n return self.bpe.tokenize(text)", "def tokenize(self, raw_text):\n # TODO implement\n raw_tokens = word_tokenize(raw_text.decode('utf8'))\n return self.filter_tokens(raw_tokens)\n # return self.split_by(raw_tokens, '-')", "def _tokenize(self, text: str) -> List[str]:\n tokens = []\n for token in re.findall(self._pat, text):\n tokens.extend(bpe_token for bpe_token in self._bpe(token).split(\" \"))\n return tokens", "def _tokenize(self, text: str) -> List[str]:\n text = text.lower().strip()\n return self.bpe.tokenize(text)", "def tokenize(text):\n return tokens_re.findall(text)", "def tokenize(text: str) -> list:\n TOKENIZER = re.compile(f'([!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`|~“”¨«»®´·º½¾¿¡§£₤‘’\\n\\t])')\n return TOKENIZER.sub(r' \\1 ', text).split()", "def _get_tokens(s: str) ->List[str]:\n return [] if not s else _normalize_text(s).split()", "def tokenize(text):\n return text.split(' ')", "def tokenize(text):\n source = list(text.rstrip().replace('\\n', ' '))\n return source", "def get_tokens(self, text):\n if text is not None:\n text = text.strip()\n words = self.safe_split(text)\n return words\n return []", "def _tokenize(self, text: str) -> List[str]:\n return self.bert_model.tokenize(text.strip())", "def tokenize(text):\n regex = re.compile(r'\\W+')\n tokens = regex.split(text.lower())\n tokens = [token for token in tokens if token]\n return tokens", "def tokens_from_string(self, text):\n\n if self.level == \"character\":\n return list(text)\n elif self.level == \"word\":\n return nltk.word_tokenize(text)\n else:\n print(\"error: invalid level\")", "def _tokenize(self, text):\n bpe_tokens = []\n for token in re.findall(self.pat, text):\n bpe_tokens.extend([t for t in self.bpe(token).split(\" \")])\n return bpe_tokens", "def tokenize(self, text: str) -> ['token']:\n # project2: takes a str to tokenize instead of a text file\n tokens = []\n word = ''\n for char in text:\n # read one char at a time to save memory\n if char in string.ascii_uppercase:\n word += char.lower() # tokens are lower case only\n elif (char in string.ascii_lowercase) or (char in string.digits): # accepted char for token\n word += char\n elif word != '':\n # this branch will only be triggered at the end of a token,\n # avoid adding empty string to list of tokens when there's multiple non-alphanumeric chars in a row\n tokens.append(word)\n word = ''\n return tokens", "def _tokenize(self, text):\n split_tokens = []\n for token in self.basic_tokenizer.tokenize(text):\n for sub_token in self.wordpiece_tokenizer.tokenize(token):\n split_tokens.append(sub_token)\n return split_tokens", "def text2tokens(raw_text):\n clean_text = raw_text.lower().translate(translate_tab)\n tokens = [token.strip() for token in tokenizer.tokenize(clean_text)]\n tokens = [token for token in tokens if token not in eng_stopwords]\n stemmed_tokens = [stemmer.stem(token) for token in tokens]\n return [token for token in stemmed_tokens if len(token) > 2] # skip short tokens", "def tokenize(self, texts: List[str]) -> List[Token]:\n raise NotImplementedError", "def tokenize(self, text):\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = ''.join(chars[start:end])\n if start > 0:\n substr = '##' + substr\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n if len(output_tokens) == 0:\n return [self.unk_token]\n return output_tokens", "def preprocessing(raw_text):\n words_list = tokenize(raw_text)\n words_list = remove_stop_words(words_list)\n words_list = remove_punctuations(words_list)\n words_list = lemmatization(words_list)\n return words_list", "def tokenize(text, token):\n text = [token(x) for x in text]\n return text", "def tokenize_text(text):\n text = re.sub(\"_\", \"\", text)\n text = text.lower()\n tokens = re.split(\"\\W+\", text)\n tokens = [t for t in tokens if t]\n #print(tokens[0:10])\n return tokens", "def tokenize(self, text):\n rx = re.compile(r\"\\B(.)\")\n text = rx.sub(r\" ##\\1\", text)\n output_tokens = []\n\n for token in text.split():\n if token in self.vocab:\n output_tokens.append(token)\n else:\n output_tokens.append(self.unk_token)\n return output_tokens", "def tokenize(text: str):\n # Create list of word tokens\n token_list = []\n # nlp = init_nlp(TaggingMethod.SPACY, Lang.ES, size=DictionarySize.MEDIUM)\n doc = nlp(text)\n token_list = [token.text for token in doc]\n # for token in doc:\n # token_list.append(token.text)\n return token_list", "def _tokenize(self, text):\n if not text:\n return []\n\n text = PUNCTUATION_CHARS.sub(' ', text)\n\n words = [\n t[:128].lower() for t in text.split()\n if len(t) >= MIN_WORD_LENGTH and t.lower() not in STOP_WORDS\n ]\n\n return words", "def tokenize(lines, token='word'):\n if token == 'word':\n return [line.split() for line in lines]\n elif token == 'char':\n return [list(line) for line in lines]\n else:\n print('ERROR: unknown token type: ' + token)", "def tokenize_tag(text):\n return [tok for tok in single_tokenizer(text)]", "def tokenize(raw_text):\n tokenized_text = nltk.tokenize.word_tokenize(raw_text)\n return tokenized_text", "def _batch_tokenize(self, text: List[str]) -> List[List[str]]:\n return self.bert_model.batch_tokenize([t.strip() for t in text])", "def tokenize(self, text):\n # text = convert_to_unicode(text)\n\n output_tokens = []\n for token in split_by_whitespace(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + substr\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens", "def _tokenize(ustr1):\n\n return list(tk.generate_tokens(io.StringIO(ustr1).readline))", "def _tokenize(self, text):\n if not text:\n return []\n\n text = self.PUNCTUATION_CHARS.sub(' ', text)\n\n words = [t[:128] for t in text.split() if len(t) >= self.MIN_WORD_LENGTH and t.lower() not in self.STOP_WORDS]\n\n return words", "def simple_tokenizer(text):\n re_tok = re.compile(punctuation_string)\n return re_tok.sub(' ', text).split()", "def tokenize_by_space(text: str) -> List[str]:\n return text.split(\" \")", "def tokenizer(text):\n # remove punctuation from text - remove anything that isn't a word char or a space\n replace_punctuation = str.maketrans(string.punctuation, ' ' * len(string.punctuation))\n text = text.translate(replace_punctuation)\n return re.split('\\s+', text)", "def whitespace_tokenize(text):\n text = text.strip()\n if not text:\n return []\n tokens = text.split()\n return tokens", "def tokenize(text):\n sentence = Sentence(text)\n return sentence.tokens()", "def whitespace_tokenize(text):\n text = text.strip()\n if not text:\n return []\n tokens = text.split()\n return tokens", "def whitespace_tokenize(text):\n text = text.strip()\n if not text:\n return []\n tokens = text.split()\n return tokens", "def whitespace_tokenize(text):\n text = text.strip()\n if not text:\n return []\n tokens = text.split()\n return tokens", "def tokenize(text):\n return [token.lower() for token in simple_preprocess(text) if token not in STOPWORDS]", "def tokenise_str(input_str):\n t = Tokeniser(input_str)\n tokens = []\n while True:\n token = t.next()\n if token is None:\n break\n tokens.append(token)\n return tokens", "def tokenize(text):\n\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n return [lemmatizer.lemmatize(token).lower().strip() for token in tokens]", "def tokenize(self, text: str) -> List[str]:\n\n # calls the selected tokenizer function e.g. 're' => re_tokenize(text)\n word_tokens = self.gpt2_tokenize(text)\n\n return word_tokens", "def tokenize(text):\n\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n \n clean_tokens =[]\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "def tokenize(str):\n return str.split()", "def tokenize(self, sequence: str) -> List[str]:\n raise NotImplementedError", "def tokenize_en(text):\n return [tok.text for tok in spacy_en.tokenizer(text)]", "def tokenize(txt):\n Depunc = depunctuate(txt).lower()\n Tokens = word_tokenize(Depunc)\n \n return Tokens", "def tokenize(instr: str) -> list:\n candidates = instr.split()\n # TODO(dfinninger): Handle paren groups correctly\n return [_get_tokens(item) for item in candidates]", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for token in tokens:\n clean_token = lemmatizer.lemmatize(token).lower().strip()\n clean_tokens.append(clean_token)\n \n return clean_tokens", "def get_tokens(self, text):\n\t\treturn tuple(self._compiled_pattern.findall(text))", "def tokenize(self, text):\n\n text = convert_to_unicode(text)\n\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token) # pragma: no cover\n continue # pragma: no cover\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + substr # pragma: no cover\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1 # pragma: no cover\n if cur_substr is None:\n is_bad = True # pragma: no cover\n break # pragma: no cover\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token) # pragma: no cover\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n \n return clean_tokens", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n return clean_tokens", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "def tokenize(sent):\n return [x.strip() for x in re.split('(\\W+)', sent) if x.strip()]", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for token in tokens:\n clean_token = lemmatizer.lemmatize(token).lower().strip()\n clean_tokens.append(clean_token)\n\n return clean_tokens", "def get_tokens(sent):\n return word_tokenize(sent)", "def tokens(self):\n tokens = []\n for index in range(len(self.sentrep)):\n tokens.append(self.sentrep.getWord(index).lexeme())\n return tokens", "def tokenize(self, input): # pylint: disable=redefined-builtin\n (tokens, _, _) = self.tokenize_with_offsets(input)\n return tokens", "def tokenize_en(text):\n return [tok.text for tok in spacy_en.tokenizer(text)]", "def tokenize_en(text):\n return [tok.text for tok in spacy_en.tokenizer(text)]", "def tokenize_en(text):\n return [tok.text for tok in spacy_en.tokenizer(text)]", "def tokenize_en(text):\n return [tok.text for tok in spacy_en.tokenizer(text)]", "def tokenize(text):\n #Clean data, remove all character except character and number,such as punctuation etc.\n text = re.sub(r'[^a-zA-Z0-9]', ' ', text.lower())\n tokens = word_tokenize(text)\n tokens = [WordNetLemmatizer().lemmatize(word) for word in tokens if word not in ST_english]\n return tokens", "def tokenize(raw_text):\n def _xop(tokens):\n def _(x):\n return xop(x, \"op\", tokens)\n return _\n \n raw_tokens=xversa_split(raw_text, tokens=Op_Tokens+Group_Tokens)\n tokens=map(xtotype, raw_tokens) \n tokens=map(_xop(Op_Tokens+Group_Tokens), tokens)\n return tokens", "def tokenize(self, text):\n\n text = convert_to_unicode(text)\n\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n ### joonho.lim @ 2019-03-15\n # if start > 0:\n # substr = \"##\" + substr\n # print ( '[substr]\\t%s\\t%s\\t%d\\t%d' % ( substr, substr in self.vocab, start, end))\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n output_tokens.insert(0, '[CLS]')\n output_tokens.append('[SEP]')\n return output_tokens", "def tokenize_keras(raw_data):\n from keras.preprocessing.text import text_to_word_sequence\n return [text_to_word_sequence(d) for d in raw_data]", "def tokenize(review):\n\n token = strip_multiple_whitespaces(strip_punctuation(review))\n return [token.split() for token in simple_preprocess(token) if token not in STOPWORDS]", "def tokenize(text):\n text = re.sub('[^A-Za-z0-9]', ' ', text)\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n return clean_tokens", "def tokenize_en(text):\n spacy_en = spacy.load('en')\n return [tok.text for tok in spacy_en.tokenizer(text)]", "def tokenize(source_code):\n return source_code.replace('(',' ( ').replace(')',' ) ').split()", "def _tokenize(self, string):\n self._tokens = []\n\n # Split and strip the input string by newlines\n for token in re.split('(.*)', string):\n if token.strip() != '':\n self._tokens.append(token)", "def tokenize(text,split_str='\\s',chars=False):\n if not chars:\n text=re.split(split_str,text)\n return [token for token in text if token not in [\"\"]]", "def preprocess(text):\n\tX = []\n\tsent_detector = nltk.data.load('tokenizers/punkt/english.pickle')\n\tfor t in text:\n\t\tsents = sent_detector.tokenize(t)\n\t\tresult = ''\n\t\tfor s in sents:\n\t\t\ttokens = word_tokenize(s)\n\t\t\tresult += ' ' + ' '.join(tokens)\n\t\tX.append(result)\n\treturn X", "def tokens(self, text):\n startIndex = 0\n tokens = []\n while True:\n i = text.find(\"\\0\", startIndex)\n if i == -1:\n break\n tokens = tokens + self.__splitTokens(text[startIndex:i])\n tokens.append(Token(\"\\0\", Token.UNKNOWN))\n startIndex = i + 1\n tokens = tokens + self.__splitTokens(text[startIndex:])\n return tokens", "def tokenize(text):\n text = re.sub(r\"[^a-zA-Z0-9]+\", \" \", text.lower())\n\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "def tokenize(self):\n tknzr = TweetTokenizer()\n tkn = []\n for tweet in self.tweets:\n for word in tknzr.tokenize(tweet):\n tkn.append(word)\n return tkn", "def tokenize(s):\n return s.split()", "def tokenize(expression):\n return [t.strip() for t in TOKEN_SEPARATOR.split(expression.strip()) if t]", "def tokenise(sample):\n\n processed = sample.split()\n return processed", "def tokenize(text):\n tokens=word_tokenize(text)\n lemmatizer=WordNetLemmatizer()\n \n clean_tokens=[]\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n return clean_tokens\n pass", "def tokenize(text):\n # YOUR CODE HERE\n t = text.lower()\n words = re.findall(r'[a-z]+',t)\n return words", "def tokenize(text):\n wt = nltk.RegexpTokenizer(pattern=r'\\s+', gaps=True)\n tokens = wt.tokenize(text)\n return tokens", "def tokenize(text):\n tokens = TreebankWordTokenizer().tokenize(text)\n tokens = lemmatize(tokens)\n tokens = filter(lambda s: len(s) > 2, tokens) # remove tokens with < 3 chars\n return tokens", "def txt_to_word_list(text):\r\n return [w for w in text.split()]", "def tokenize(self, sText):\n\n lTokens = []\n sToken = \"\"\n for c in sText:\n if re.match(\"[a-zA-Z0-9]\", str(c)) != None or c == \"\\\"\" or c == \"_\" or c == \"-\":\n sToken += c\n else:\n if sToken != \"\":\n lTokens.append(sToken)\n sToken = \"\"\n if c.strip() != \"\":\n lTokens.append(str(c.strip()))\n\n if sToken != \"\":\n lTokens.append(sToken)\n\n return lTokens", "def raw_text_to_tokenized_phrases(raw_phrases, language='english'):\n tokenized = nltk.sent_tokenize(raw_phrases, language)\n return [tokenize_phrase(Phrase(phrase)) for phrase in tokenized]", "def tokenize(self, input: str) -> List[Tuple[str, int, int]]:\n raise NotImplementedError", "def tokenize(text):\n common_words_string = \" | \".join(common_words)\n re_tok = re.compile(punctuation_string + \"| \" + common_words_string + \" \")\n words = re_tok.sub(' ',re_tok.sub(' ',text)).split()\n\n tokens = []\n for i in range(len(words)-1):\n first = words[i]\n second = words[i+1]\n # third = words[i+2]\n tokens.append(' '.join([first, second]))\n return tokens", "def tokenize(text):\n tokens = nltk.word_tokenize(text)\n lemmatizer = nltk.WordNetLemmatizer()\n \n lemmatized_words = []\n for word in tokens:\n lemmatized_words.append(lemmatizer.lemmatize(word).lower().strip())\n \n return lemmatized_words", "def __call__(self, string, include_gd=True): # -> \"TokenList\":\r\n self.load(string)\r\n result = []\r\n while True:\r\n try:\r\n result.append(self.nextToken(include_gd))\r\n except:\r\n break\r\n return result", "def word_tokenize(text):\n word_list = []\n for sentences in nltk.sent_tokenize(text):\n for words in nltk.word_tokenize(sentences):\n word_list.append(words)\n return word_list", "def tokenize_text(document, nlp):\n\n return [token.text for token in nlp(document)]", "def tokenize(self, text):\n return self._tokenize(text)", "def tokens(text) -> Tuple[Word]:\n return tuple(re.findall('[a-z]+', text.lower()))" ]
[ "0.83176076", "0.7947587", "0.7839667", "0.778215", "0.7778221", "0.77569646", "0.77184075", "0.77048725", "0.76263833", "0.7593482", "0.7591023", "0.7581511", "0.756711", "0.75667906", "0.73598903", "0.73473614", "0.73437184", "0.73308897", "0.731043", "0.73101467", "0.7242299", "0.723186", "0.7195483", "0.71914685", "0.71733296", "0.7170052", "0.71602106", "0.71369725", "0.7125153", "0.71100557", "0.7109995", "0.7104108", "0.7087012", "0.70761895", "0.70627624", "0.70527637", "0.70223904", "0.7021035", "0.7017524", "0.69924504", "0.6982653", "0.6982653", "0.6982653", "0.69773626", "0.6965874", "0.6944344", "0.69288033", "0.6920568", "0.6919032", "0.69183004", "0.69104576", "0.69023323", "0.68969053", "0.68879116", "0.68788433", "0.68723726", "0.68684757", "0.6861949", "0.6860814", "0.68583626", "0.68576294", "0.6856221", "0.6851917", "0.68388504", "0.68347263", "0.68347263", "0.68347263", "0.68347263", "0.6825115", "0.6807683", "0.6791546", "0.6790688", "0.67810106", "0.6770823", "0.6762589", "0.676004", "0.67512316", "0.6749711", "0.6748282", "0.6741436", "0.6719611", "0.6715559", "0.6712352", "0.670653", "0.6704872", "0.6697468", "0.66974103", "0.669717", "0.6694211", "0.6692217", "0.6688208", "0.6684651", "0.6682467", "0.66798735", "0.6679022", "0.6676146", "0.6674575", "0.66650134", "0.66630316", "0.6660071" ]
0.69686973
44
Return tokenized list of strings from raw text input using keras functionality
def tokenize_keras(raw_data): from keras.preprocessing.text import text_to_word_sequence return [text_to_word_sequence(d) for d in raw_data]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preproc_user_input(txt, model):\r\n txt = pre_process(txt)\r\n txt_tokenized = [word for word in txt.split(\" \") if word in model.wv.vocab]\r\n return \" \".join(txt_tokenized)", "def input_new_phrase(self, text):\n \n x_new_tokens = [word_idx[word] for word in text.split()]\n \n pred = self.model.predict(np.array([x_new_tokens]))\n pred = np.argmax(pred, axis=-1)[0]\n \n return [[word_list[w], tags[pred]] for (w, pred) in zip(range(len(x_new)), pred)]", "def _batch_tokenize(self, text: List[str]) -> List[List[str]]:\n return self.bert_model.batch_tokenize([t.strip() for t in text])", "def tokenize(lang):\n lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='')\n lang_tokenizer.fit_on_texts(lang)\n tensor = lang_tokenizer.texts_to_sequences(lang)\n # pad zero after sequences for the same length.\n tensor = tf.keras.preprocessing.sequence.pad_sequences(tensor,\n padding='post')\n return tensor, lang_tokenizer", "def tokenized(self, text):\n return self.tokenizer.encode_plus(text,\n max_length=512,\n pad_to_max_length=True,\n truncation=True)[\"input_ids\"]", "def create_model_uniform(text: str) -> List[str]:\n return str.split(text)", "def identity_tokenizer(text):\n return text", "def _tokenize(self, text: str) -> List[str]:\n return self.bert_model.tokenize(text.strip())", "def batch_tokenize_fn(examples):\n sources = examples[config.source_lang]\n targets = examples[config.target_lang]\n model_inputs = config.tokenizer(sources, max_length=config.max_source_length, truncation=True)\n\n # setup the tokenizer for targets,\n # huggingface expects the target tokenized ids to be stored in the labels field\n with config.tokenizer.as_target_tokenizer():\n labels = config.tokenizer(targets, max_length=config.max_target_length, truncation=True)\n\n model_inputs[\"labels\"] = labels[\"input_ids\"]\n return model_inputs", "def tokenize(text):\n\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n return [lemmatizer.lemmatize(token).lower().strip() for token in tokens]", "def tokenize(text):\n tokens=word_tokenize(text)\n lemmatizer=WordNetLemmatizer()\n \n clean_tokens=[]\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n return clean_tokens\n pass", "def tokenize(text):\n\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n \n clean_tokens =[]\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "def create_tokenizer(dataset):\n lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(char_level=True)\n lang_tokenizer.fit_on_texts([x['input'] for x in dataset])\n return lang_tokenizer", "def tokenize(text):\n tokens = nltk.word_tokenize(text)\n lemmatizer = nltk.WordNetLemmatizer()\n \n lemmatized_words = []\n for word in tokens:\n lemmatized_words.append(lemmatizer.lemmatize(word).lower().strip())\n \n return lemmatized_words", "def process_text(input_txt):\r\n # if input is string\r\n tidy_txt = remove_pattern(input_txt,\"@[\\w]*\")\r\n ##=============================== if input is dataframe ====================##\r\n # tidy_txt = np.vectorize(remove_pattern)(input_txt,\"@[\\w]*\") #\r\n ##==========================================================================##\r\n # remove special characters\r\n tidy_txt = tidy_txt.replace(\"[^a-zA-Z#]\",\" \")\r\n # split into words\r\n tokenized_txt = tidy_txt.split()\r\n # perform stemming\r\n stemmer = PorterStemmer()\r\n tokenized_txt = [stemmer.stem(i) for i in tokenized_txt]\r\n print(tokenized_txt)\r\n # joining words back\r\n tokenized_txt = ' '.join(tokenized_txt)\r\n return tokenized_txt", "def preprocessing(raw_text):\n words_list = tokenize(raw_text)\n words_list = remove_stop_words(words_list)\n words_list = remove_punctuations(words_list)\n words_list = lemmatization(words_list)\n return words_list", "def tokenize_pretraining(self, inputs):\n\n ref_ids = prepare_ref([inputs], self.tokenizer_ltp, self.tokenizer_cn)\n\n tokens = self.tokenizer_cn.tokenize(inputs)\n\n if len(tokens) > self.max_seq_length - 2:\n tokens = tokens[:(self.max_seq_length - 2)]\n ref_ids = ref_ids[:(self.max_seq_length - 2)]\n\n ref_ids = cn_whole_word_mask(tokens, ref_ids[0])\n tokens, labels = random_word_wwm(tokens, ref_ids, self.tokenizer_cn)\n\n tokens = ['[CLS]'] + tokens + ['[SEP]']\n lm_label_ids = ([-100] + labels + [-100])\n\n input_ids = self.tokenizer_cn.convert_tokens_to_ids(tokens)\n\n attention_mask = [1] * len(input_ids)\n token_type_ids = [0] * len(input_ids)\n\n while len(input_ids) < self.max_seq_length:\n input_ids.append(0)\n attention_mask.append(0)\n token_type_ids.append(0)\n lm_label_ids.append(-100)\n\n assert len(input_ids) == self.max_seq_length\n assert len(attention_mask) == self.max_seq_length\n assert len(token_type_ids) == self.max_seq_length\n assert len(lm_label_ids) == self.max_seq_length\n\n\n outputs = {'input_ids': tf.constant(input_ids), 'attention_mask': tf.constant(attention_mask), \n 'token_type_ids': tf.constant(token_type_ids), 'lm_label_ids': tf.constant(lm_label_ids)}\n\n return outputs", "def preprocess(self,text):\n return preprocess.get_tokens(text)", "def preprocess(text):\n\tX = []\n\tsent_detector = nltk.data.load('tokenizers/punkt/english.pickle')\n\tfor t in text:\n\t\tsents = sent_detector.tokenize(t)\n\t\tresult = ''\n\t\tfor s in sents:\n\t\t\ttokens = word_tokenize(s)\n\t\t\tresult += ' ' + ' '.join(tokens)\n\t\tX.append(result)\n\treturn X", "def tokenize(text):\n \n text.lower() # convert to lowercase\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text) #remove punctuation\n words = word_tokenize(text) # tokenize by individual word\n words = [w for w in words if w not in stopwords.words(\"english\")] #remove stop words\n lemmed = [WordNetLemmatizer().lemmatize(w) for w in words] #lemminization\n \n return words", "def tokenize(text):\n #Clean data, remove all character except character and number,such as punctuation etc.\n text = re.sub(r'[^a-zA-Z0-9]', ' ', text.lower())\n tokens = word_tokenize(text)\n tokens = [WordNetLemmatizer().lemmatize(word) for word in tokens if word not in ST_english]\n return tokens", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for token in tokens:\n clean_token = lemmatizer.lemmatize(token).lower().strip()\n clean_tokens.append(clean_token)\n \n return clean_tokens", "def tokenise(sample):\n\n processed = sample.split()\n return processed", "def generate_text(session, model, config, starting_text='<eos>',\n stop_length=100, stop_tokens=None, temp=1.0):\n state = model.initial_state.eval()\n # Imagine tokens as a batch size of one, length of len(tokens[0])\n tokens = [model.vocab.encode(word) for word in starting_text.split()]\n for i in xrange(stop_length):\n ### YOUR CODE HERE\n #print tokens\n feed = {}\n #x = np.array([tokens[-1]])\n #x.reshape(1,1)\n feed[model.input_placeholder] = [[tokens[-1]]]\n feed[model.dropout_placeholder] = 1\n feed[model.initial_state] = state\n y_pred, state = session.run([model.predictions[-1], model.final_state], feed_dict=feed)\n ### END YOUR CODE\n next_word_idx = sample(y_pred[0], temperature=temp)\n tokens.append(next_word_idx)\n if stop_tokens and model.vocab.decode(tokens[-1]) in stop_tokens:\n break\n output = [model.vocab.decode(word_idx) for word_idx in tokens]\n return output", "def get_text_features() -> np.array:\r\n # Universal sentence encoder model\r\n # Original model by Google could be loaded from: https://tfhub.dev/google/universal-sentence-encoder/4\r\n # In this notebook the model is loaded from a public dataset on Kaggle\r\n # at https://www.kaggle.com/dimitreoliveira/universalsentenceencodermodels\r\n text_model = tf.keras.Sequential(\r\n [KerasLayer(txt_model_path, input_shape=[], dtype=tf.string, # Pretrained model\r\n output_shape=[512], trainable=False),\r\n tf.keras.layers.Layer(512, dtype='float16')] # This layer reduces precision of float numbers\r\n )\r\n\r\n # Convert all texts to vectors\r\n features = text_model.predict(data['title'],\r\n batch_size=BATCH_SIZE,\r\n use_multiprocessing=True,\r\n workers=-1)\r\n print('Text features extracted. Shape:', features.shape)\r\n\r\n return features", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n \n return clean_tokens", "def tokenize(text):\n return text.split(' ')", "def predict(self, text: list) -> list:\n\n encoded_input = self.__encode_token(text)\n dataloaders = self.__create_dataloaders(encoded_input, self.batch_size)\n\n preds = []\n for i, batch in enumerate(zip(dataloaders[0], dataloaders[1], dataloaders[2])):\n preds_batch_list = self.__predict_batch(self.model, batch)\n preds += preds_batch_list\n\n return preds", "def tokenizer(self):\n tokenizer = RegexpTokenizer(r'\\w+')\n \n self.tweet_tokenized_train = [tokenizer.tokenize(x.lower()) for x in self.tweet_prepro_train]\n self.tweet_tokenized_test = [tokenizer.tokenize(x.lower()) for x in self.tweet_prepro_test]", "def tokenize(self, input_string: str) -> List[str]:", "def _preprocess(self, txt_seq):\n input = []\n for token in txt_seq.split():\n input.append(self.word2id.get(token, self.word2id[\"<UNK>\"]))\n input.append(self.word2id[\"<END>\"])\n input = torch.LongTensor(input)\n return input", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n return clean_tokens", "def predict(self, text):\n tokens = ['[CLS]'] + self.tokenizer.tokenize(text) + ['[SEP]']\n xx = self.tokenizer.convert_tokens_to_ids(tokens)\n xx = torch.tensor(xx).unsqueeze(0).to(self.device)\n _, y_hat = self.model(xx)\n pred_tags = []\n for tag in y_hat.squeeze():\n pred_tags.append(idx2tag[tag.item()])\n return pred_tags, tokens", "def _tokenize(text, language_code):\n seq = annotool.raw2basicseq(text, language_code, pos=False)\n word_seq = seq['word']\n return word_seq", "def tokenize(self, inputs):\n if hasattr(self.tokenizer, \"batch_encode\"):\n return self.tokenizer.batch_encode(inputs)\n else:\n return [self.tokenizer.encode(x) for x in inputs]", "def pre_process(text):\n # replace (,.'\") with ''\n text = text.replace(',', '')\n text = text.replace('.', '')\n text = text.replace(\"'\", '')\n text = text.replace(\"\\\"\", '')\n\n # tokenize into words\n tokens = [word for sent in sent_tokenize(text) for word in word_tokenize(sent)]\n\n # remove stopwords\n stop = stopwords.words('english')\n tokens = [token for token in tokens if token not in stop]\n\n # remove words less than three letters\n tokens = [word for word in tokens if len(word) >= 3]\n\n # lower capitalization\n tokens = [word.lower() for word in tokens]\n\n # lemmatize\n lmtzr = WordNetLemmatizer()\n tokens = [lmtzr.lemmatize(word) for word in tokens]\n\n return tokens", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for token in tokens:\n clean_token = lemmatizer.lemmatize(token).lower().strip()\n clean_tokens.append(clean_token)\n\n return clean_tokens", "def _tokenize_text(self, text: str) -> torch.Tensor:\n # specify max length truncation to avoid IndexError due to exceeding length\n return torch.tensor(self.tokenizer.encode(text, max_length=self.max_len, truncation=True)).unsqueeze(0)", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "def generate_token_arrays(\n text,\n text_tar,\n text_pred,\n tokenizer=None,\n expand_predictions=True,\n split_true_entities=True,\n ignore_value=None\n):\n # split text for token evaluation\n if isinstance(tokenizer, stanfordnlp.pipeline.core.Pipeline):\n doc = tokenizer(text)\n # extract tokens from the parsed text\n tokens_base = [\n token.text for sentence in doc.sentences\n for token in sentence.tokens\n ]\n elif isinstance(tokenizer, spacy.tokenizer.Tokenizer):\n doc = tokenizer(text)\n # extract tokens from the parsed text\n tokens_base = [token.text for token in doc]\n else:\n if tokenizer is None:\n tokenizer = ''\n # treat string as a regex\n tokens_base = re.findall(tokenizer, text)\n\n tokens = []\n tokens_pred = []\n tokens_true = []\n tokens_start, tokens_length = [], []\n\n n_tokens = 0\n\n start = 0\n for token in tokens_base:\n # sometimes we have empty tokens on their own\n if len(token) == 0:\n continue\n start = text.find(token, start)\n token_true = text_tar[start:start + len(token)]\n token_pred = text_pred[start:start + len(token)]\n\n if all(token_true == -1) & all(token_pred == -1):\n # skip tokens which are not labeled\n start += len(token)\n n_tokens += 1\n continue\n\n if split_true_entities:\n # split the single token into subtokens, based on the true entity\n idxDiff = np.diff(token_true, prepend=0)\n if any(idxDiff > 0):\n # split\n idxDiff = np.diff(token_true, prepend=0)\n subtok_start = 0\n subtoken_true, subtoken_pred = [], []\n for subtok_end in np.where(idxDiff > 0)[0]:\n subtoken_true.append(token_true[subtok_start:subtok_end])\n subtoken_pred.append(token_pred[subtok_start:subtok_end])\n subtok_start = subtok_end\n if subtok_end < len(token_true):\n # add final token\n subtoken_true.append(token_true[subtok_start:])\n subtoken_pred.append(token_pred[subtok_start:])\n else:\n # in this case, there is only 1 label_id for the entire token\n # so we can just wrap in a list for the iterator later\n subtoken_true = [token_true]\n subtoken_pred = [token_pred]\n else:\n # do not split a token if there is more than 1 ground truth\n # consequently, tokens with multiple labels will be treated\n # as equal to the most frequent label\n subtoken_true = [token_true]\n subtoken_pred = [token_pred]\n\n # now iterate through our sub-tokens\n # often this is a length 1 iterator\n for token_true, token_pred in zip(subtoken_true, subtoken_pred):\n if len(token_true) == 0:\n continue\n\n if expand_predictions:\n # expand the most frequent ID to cover the entire token\n token_pred = expand_id_to_token(token_pred, ignore_value=-1)\n token_true = expand_id_to_token(token_true, ignore_value=-1)\n\n # get the length of the token for later\n token_len = len(token_true)\n\n # aggregate IDs for this token into the most frequent value\n if len(token_true) == 0:\n token_true = -1\n else:\n token_true = mode(token_true, ignore_value)\n if len(token_pred) == 0:\n token_pred = -1\n else:\n token_pred = mode(token_pred, ignore_value)\n\n # append the prediction for this token\n tokens_true.append(token_true)\n tokens_pred.append(token_pred)\n tokens.append(text[start:start + token_len])\n tokens_start.append(start)\n tokens_length.append(token_len)\n\n start += token_len\n # keep track of total tokens assessed\n n_tokens += 1\n\n # now we have a list of tokens with preds\n tokens_true = np.asarray(tokens_true, dtype=int)\n tokens_pred = np.asarray(tokens_pred, dtype=int)\n\n return tokens_true, tokens_pred, tokens, tokens_start, tokens_length", "def get_token_list(text):\n return text.split()", "def process_text(self, text: str, max_length: int) -> Dict[str, Sequence[int]]:\n inputs = self.tokenizer(\n [c for c in text],\n return_token_type_ids=True,\n return_attention_mask=True,\n max_length=max_length,\n padding=\"max_length\",\n truncation=True,\n is_pretokenized=True,\n )\n return inputs.data", "def tokenize(self, text):\n # text = convert_to_unicode(text)\n\n output_tokens = []\n for token in split_by_whitespace(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + substr\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens", "def tokenize(text):\n sentence = Sentence(text)\n return sentence.tokens()", "def tokenize(self, text):\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = ''.join(chars[start:end])\n if start > 0:\n substr = '##' + substr\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n if len(output_tokens) == 0:\n return [self.unk_token]\n return output_tokens", "def tokenize(text):\n source = list(text.rstrip().replace('\\n', ' '))\n return source", "def _featurize_py_func(text):\n label = np.array(text[-1], dtype=np.int32)\n words = word_tokenize(text[:-2])\n chars = np.zeros([max_sentence_length, max_word_length], dtype=np.int32)\n for i, word in enumerate(words):\n ids = [char_to_int.get(char, -1) for char in word]\n chars[i,:len(ids)] = ids\n return chars", "def tokenize(text):\n \n #regular expression to avoid pucntuations or any special character\n tokenizer = nltk.RegexpTokenizer(r\"\\w+\")\n \n #tokenizing text\n tokens = tokenizer.tokenize(text)\n \n #initiating lemmatizer\n lemmatizer = WordNetLemmatizer()\n \n #iteratating through each token\n clean_tokens = []\n for tok in tokens:\n \n #stop words are irrelevant in this context of classifying response\n if (tok.lower() not in stopwords.words(\"english\")):\n \n # lemmatizing, normalizing case, and removing leading/trailing white space\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n \n return clean_tokens", "def tokenize(text):\n \n #regular expression to avoid pucntuations or any special character\n tokenizer = nltk.RegexpTokenizer(r\"\\w+\")\n \n #tokenizing text\n tokens = tokenizer.tokenize(text)\n \n #initiating lemmatizer\n lemmatizer = WordNetLemmatizer()\n \n #iteratating through each token\n clean_tokens = []\n for tok in tokens:\n \n #stop words are irrelevant in this context of classifying response\n if (tok.lower() not in stopwords.words(\"english\")):\n \n # lemmatizing, normalizing case, and removing leading/trailing white space\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n \n return clean_tokens", "def _preprocess(self, txt_seq):\n input = []\n label = []\n punc = \" \"\n for token in txt_seq.split():\n if token in self.punc2id:\n punc = token\n else:\n input.append(self.word2id.get(token, self.word2id[\"<UNK>\"]))\n label.append(self.punc2id[punc])\n punc = \" \"\n input.append(self.word2id[\"<END>\"])\n label.append(self.punc2id[punc])\n input = torch.LongTensor(input)\n label = torch.LongTensor(label)\n # input = np.array(input)\n # label = np.array(label)\n return input, label", "def tokenize(self, raw_text):\n # TODO implement\n raw_tokens = word_tokenize(raw_text.decode('utf8'))\n return self.filter_tokens(raw_tokens)\n # return self.split_by(raw_tokens, '-')", "def process_text(text):\n text = text.translate(translator)\n tokens = word_tokenize(text)\n# if stem:\n stemmer = PorterStemmer()\n tokens = [stemmer.stem(t) for t in tokens]\n \n return tokens", "def parse_input(input_data, dictionary, model):\n vec_text = TextBlob(input_data).words.lower().lemmatize()\n vec_bow = dictionary.doc2bow(vec_text)\n return model[vec_bow]", "def predict(self, sample: InputSample) -> List[str]:\n doc = self.model(sample.full_text)\n tags = self._get_tags_from_doc(doc)\n if len(doc) != len(sample.tokens):\n print(\"mismatch between input tokens and new tokens\")\n\n return tags", "def tokenize(text):\n tokens = word_tokenize(text)\n words = [token for token in tokens if re.match(\"[a-zA-Z0-9]\", token)]\n no_stopwords = [word for word in words if word not in stopwords.words(\"english\")]\n lowercase_words = [word.lower() for word in no_stopwords]\n pos_tagged_words = pos_tag(lowercase_words)\n lemmatized_words = [WordNetLemmatizer().lemmatize(word, pos=convert_pos_tag(pos)) for word, pos in pos_tagged_words]\n return lemmatized_words", "def text_pipeline_func(self, batch, seq_len, vocab_path):\n vocab_path = os.path.abspath(vocab_path)\n token_ids = tokenize_sentence(batch, seq_len, vocab_path)\n return token_ids", "def tokenize(text, token):\n text = [token(x) for x in text]\n return text", "def tokenize(text):\n text = re.sub('[^A-Za-z0-9]', ' ', text)\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n return clean_tokens", "def get_text_input(path):\n with open(path, 'r', encoding='utf8') as f:\n sent_dict = json.load(f)\n sents = [sent_dict[i] for i in sent_dict]\n tokenized_sents = [[word[0] for word in sent] for sent in sents]\n return tokenized_sents", "def process(self, example: str) -> List[torch.Tensor]:\n return torch.tensor(self._tokenizer.encode(example, max_length=self.max_seq_len))", "def preprocess_txt(txt, word_index, max_txt_size=255):\n wd_list = listify_txt(txt)\n encoded = [1]\n for word in wd_list:\n if word in word_index:\n encoded.append(word_index[word])\n else:\n encoded.append(word_index[\"<UNK>\"]) \n encoded = keras.preprocessing.sequence.pad_sequences([encoded], value=word_index[\"<PAD>\"], padding=\"post\", maxlen=max_txt_size)[0]\n return encoded", "def tokens_from_string(self, text):\n\n if self.level == \"character\":\n return list(text)\n elif self.level == \"word\":\n return nltk.word_tokenize(text)\n else:\n print(\"error: invalid level\")", "def tokenize(self, text):\n text = convert_to_unicode(text)\n text = self.clean_text(text)\n\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n text = self._add_space_around_cjk_chars(text)\n\n orig_tokens = split_by_whitespace(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n token = remove_accents(token)\n split_tokens.extend(split_by_punctuation(token))\n\n output_tokens = split_by_whitespace(\" \".join(split_tokens))\n return output_tokens", "def test_tokenization():\n X = Tokenizer().transform([[\"A test\"]])\n assert X[\"corpus\"][0] == [\"A\", \"test\"]", "def __call__(self, text):\r\n if self.use_pos_tagging:\r\n return [self.wnl.lemmatize(t, self.pos(t)) for t in word_tokenize(self.clean(text))]\r\n else:\r\n return [self.wnl.lemmatize(t) for t in word_tokenize(self.clean(text))]", "def preprocess(self, inputs, is_list_of_str=False):\n return self.vocab.transform(inputs, is_list_of_str)", "def _tokenize(self, text: str) -> List[str]:\n return self.bpe.tokenize(text)", "def _single_encode_text(self, text: str) -> np.array:\n input_ids = self._tokenize_text(text)\n print(f\"id length: {len(input_ids[0])}\")\n try:\n text_embeddings = self.model(input_ids)\n except IndexError as e:\n print(f\"input text: {text}\")\n print(f\"input inds: {input_ids}\")\n raise e\n return text_embeddings.pooler_output.detach().numpy()", "def tokenize(text):\n text = re.sub(r\"[^a-zA-Z0-9]+\", \" \", text.lower())\n\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "def tokenize(self, text):\n\n text = convert_to_unicode(text)\n\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n ### joonho.lim @ 2019-03-15\n # if start > 0:\n # substr = \"##\" + substr\n # print ( '[substr]\\t%s\\t%s\\t%d\\t%d' % ( substr, substr in self.vocab, start, end))\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n output_tokens.insert(0, '[CLS]')\n output_tokens.append('[SEP]')\n return output_tokens", "def tokenize(self, text):\n rx = re.compile(r\"\\B(.)\")\n text = rx.sub(r\" ##\\1\", text)\n output_tokens = []\n\n for token in text.split():\n if token in self.vocab:\n output_tokens.append(token)\n else:\n output_tokens.append(self.unk_token)\n return output_tokens", "def tokenize(self, text):\n\n text = convert_to_unicode(text)\n\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token) # pragma: no cover\n continue # pragma: no cover\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + substr # pragma: no cover\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1 # pragma: no cover\n if cur_substr is None:\n is_bad = True # pragma: no cover\n break # pragma: no cover\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token) # pragma: no cover\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens", "def tokenize(self, texts: List[str]) -> List[Token]:\n raise NotImplementedError", "def _tokenize(self, text: str) -> List[str]:\n text = text.lower().strip()\n return self.bpe.tokenize(text)", "def sents_to_tokens(sents, wordset):\n padded_sentences = ([\"<s>\", \"<s>\"] + s + [\"</s>\"] for s in sents)\n # This will canonicalize words, and replace anything not in vocab with <unk>\n return np.array([utils.canonicalize_word(w, wordset=wordset) \n for w in utils.flatten(padded_sentences)], dtype=object)", "def tokenize(raw_text):\n tokenized_text = nltk.tokenize.word_tokenize(raw_text)\n return tokenized_text", "def transform_sequences(self,tokens_labels):\n X_train = []\n y_train = []\n for seq in tokens_labels:\n features_seq = []\n labels_seq = []\n for i in range(0, len(seq)):\n features_seq.append(self.word2features(seq, i))\n labels_seq.append(self.word2labels(seq[i]))\n X_train.append(features_seq)\n y_train.append(labels_seq)\n return X_train,y_train", "def tokenize(text: str):\n # Create list of word tokens\n token_list = []\n # nlp = init_nlp(TaggingMethod.SPACY, Lang.ES, size=DictionarySize.MEDIUM)\n doc = nlp(text)\n token_list = [token.text for token in doc]\n # for token in doc:\n # token_list.append(token.text)\n return token_list", "def tokenize_tag(text):\n return [tok for tok in single_tokenizer(text)]", "def tokenize_en(text):\n return [tok.text for tok in spacy_en.tokenizer(text)]", "def text2tokens(raw_text):\n clean_text = raw_text.lower().translate(translate_tab)\n tokens = [token.strip() for token in tokenizer.tokenize(clean_text)]\n tokens = [token for token in tokens if token not in eng_stopwords]\n stemmed_tokens = [stemmer.stem(token) for token in tokens]\n return [token for token in stemmed_tokens if len(token) > 2] # skip short tokens", "def preprocess_sent(sent):\n #tokenized = word_tokenize(sent.lower())\n tokenizer = Tok()\n tokenized = tokenizer.tokenize(sent.lower())\n return tokenized", "def generate_text(pmodel, num_generate, temperature, start_string):\n\n # Converting the start string to numbers (vectorizing)\n input_eval = [char2idx[s] for s in start_string]\n input_eval = tf.expand_dims(input_eval, 0)\n\n # Empty string to store the results\n text_generated = np.empty(1)\n\n # Here batch size = 1\n pmodel.reset_states()\n for i in range(num_generate):\n \n predictions = pmodel(input_eval)\n \n # remove the batch dimension\n predictions = tf.squeeze(predictions, 0)\n \n # using a multinomial distribution to predict the word returned by the model\n predictions = predictions / temperature\n predicted_id = tf.random.categorical(predictions, num_samples=1)[-1,0].numpy()\n \n # We pass the predicted word as the next input to the model\n # along with the previous hidden state\n input_eval = tf.expand_dims([predicted_id], 0)\n \n text_generated = np.vstack((text_generated, idx2char[predicted_id].tolist()))\n \n return text_generated", "def get_tokens(sent):\n return word_tokenize(sent)", "def tokenize_en(text):\n return [tok.text for tok in spacy_en.tokenizer(text)]", "def tokenize_en(text):\n return [tok.text for tok in spacy_en.tokenizer(text)]", "def tokenize_en(text):\n return [tok.text for tok in spacy_en.tokenizer(text)]", "def tokenize_en(text):\n return [tok.text for tok in spacy_en.tokenizer(text)]", "def featurize(self, data):\n \n bag_of_words = []\n\n tokens = data.split()\n\n for i in tokens:\n bag_of_words.append((i, True))\n\n return bag_of_words", "def gen_embedding(text, model, tokenizer):\n ### Tokenize the texts\n encoded_input = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors='pt')\n \n ### Encode the tokenized data with model\n with torch.no_grad():\n model_output = model(**encoded_input)\n \n ### Pool the outputs into a single vector\n sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])\n return sentence_embeddings", "def string_to_tokens(input_rows):\n EXPECTED_COLUMN_LENGTH = 3\n\n tokens = []\n sentence = []\n for row in input_rows:\n if row == '\\n':\n # End of sentence: append to tokens\n if sentence:\n # Only append if sentence is non-empty\n tokens.append(sentence)\n sentence = []\n continue\n\n row_tokens = row.split()\n assert len(row_tokens) is EXPECTED_COLUMN_LENGTH,\\\n 'Row {} has {} columns'.format(row, len(row_tokens))\n\n # Append this token to active sentence\n sentence.append(row_tokens)\n\n # Append the last sentence\n if sentence:\n tokens.append(sentence)\n\n return tokens", "def get_retokenized(tokenizer, text):\n return ' '.join(tokenizer.tokenize(text))", "def _tokenize_example(example, max_length, tokenizer, text_preprocessing=None):\n # Needs additional [CLS] and [SEP] tokens.\n max_length = max_length - 2\n new_examples = []\n new_example = InputExample(sentence_id=example.sentence_id, sub_sentence_id=0)\n for i, word in enumerate(example.words):\n if any([x < 0 for x in example.label_ids]):\n raise ValueError(\"Unexpected negative label_id: %s\" % example.label_ids)\n\n if text_preprocessing:\n word = text_preprocessing(word)\n subwords = tokenizer.tokenize(word)\n if (not subwords or len(subwords) > max_length) and word:\n subwords = [_UNK_TOKEN]\n\n if len(subwords) + len(new_example.words) > max_length:\n # Start a new example.\n new_examples.append(new_example)\n last_sub_sentence_id = new_example.sub_sentence_id\n new_example = InputExample(\n sentence_id=example.sentence_id,\n sub_sentence_id=last_sub_sentence_id + 1)\n\n for j, subword in enumerate(subwords):\n # Use the real label for the first subword, and pad label for\n # the remainings.\n subword_label = example.label_ids[i] if j == 0 else _PADDING_LABEL_ID\n new_example.add_word_and_label_id(subword, subword_label)\n\n if new_example.words:\n new_examples.append(new_example)\n\n return new_examples", "def preprocess_input(text, tokenizer, max_id):\n X = np.array(tokenizer.texts_to_sequences(text)) - 1\n encoded = tf.one_hot(X, depth=max_id)\n return encoded", "def tokenize(text: str):\n result = []\n for s in text:\n result.append(s)\n return result", "def perform_NER(self,text):\n X_test = []\n documents = [text]\n sequences = tokenize_fa(documents)\n word_sequences = []\n for seq in sequences:\n features_seq = []\n labels_seq = []\n sentence = []\n for i in range(0, len(seq)):\n features_seq.append(self.word2features(seq, i))\n labels_seq.append(self.word2labels(seq[i]))\n sentence.append(seq[i][0])\n X_test.append(features_seq)\n word_sequences.append(sentence)\n y_pred = [self.crf_model.tag(xseq) for xseq in X_test]\n #y_pred = self.crf_model.tag(X_test)\n final_sequences = []\n for i in range(0,len(y_pred)):\n sentence = []\n for j in range(0,len(y_pred[i])):\n sentence.append((word_sequences[i][j],y_pred[i][j]))\n final_sequences.append(sentence)\n return final_sequences", "def prepare_inputs(token_mapping, w2v_W, w2v_U, sentences):\n tokens = [tokenize(token_mapping, sentence) for sentence in sentences] \n \n depth = len(token_mapping)\n one_hot_tokens = []\n for sentence in tokens:\n one_hot_sentence = []\n for i, token in enumerate(sentence):\n if token != token_mapping['#UNK#']:\n one_hot_sentence.append(one_hot_encode(token, depth))\n else:\n if i <= 2:\n context_tokens = sentence[:i] + sentence[i+1:i+3]\n else:\n context_tokens = sentence[i-2:i] + sentence[i+1:i+3]\n context_one_hot = [one_hot_encode(token, depth) for token in context_tokens]\n context_mean = np.mean(np.asarray(context_one_hot), axis=0)\n one_hot_sentence.append(context_mean)\n one_hot_tokens.append(one_hot_sentence)\n \n one_hot_tokens = [np.asarray(ls) for ls in one_hot_tokens]\n vec_tokens = [word2vec(w2v_W, w2v_U, sentence) for sentence in tqdm(one_hot_tokens, desc='Vectorizing tokens')]\n return vec_tokens", "def preprocess(input_str: str, tokenizer, is_observation=False, lower_case=True) -> List[str]:\n if input_str is None:\n return [\"nothing\"]\n\n input_str = input_str.replace(\"\\n\", ' ')\n if input_str.strip() == \"\":\n return [\"nothing\"]\n\n if is_observation:\n if \"$$$$$$$\" in input_str:\n input_str = \"\"\n if \"-=\" in input_str:\n input_str = input_str.split(\"-=\")[0]\n\n input_str = input_str.strip()\n if len(input_str) == 0:\n return [\"nothing\"]\n\n tokens = [t.text for t in tokenizer(input_str)]\n\n if lower_case:\n tokens = [t.lower() for t in tokens]\n\n return tokens", "def tokenize_fast(self, text):\n\n tokens = self.tokenizer_cn.tokenize(text)\n\n tokens, labels = random_word(tokens, self.tokenizer_cn)\n\n tokens = self.tokenizer_cn.convert_tokens_to_ids(tokens)\n\n assert len(tokens) == len(labels)\n\n return tokens, labels", "def tokenize(self, text):\n text = convert_to_unicode(text)\n text = self._clean_text(text)\n\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n text = self._tokenize_chinese_chars(text)\n\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n token = self._run_strip_accents(token)\n if self.split_on_punc:\n split_tokens.extend(self._run_split_on_punc(token))\n else:\n split_tokens.append(token) # pragma: no cover\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens" ]
[ "0.70793176", "0.6935871", "0.6846465", "0.6740176", "0.66016084", "0.6601571", "0.65836084", "0.6577888", "0.656995", "0.6567432", "0.65455496", "0.654399", "0.6511661", "0.65019155", "0.6500425", "0.649716", "0.64943504", "0.6473806", "0.6438927", "0.64235365", "0.64078707", "0.64054835", "0.640119", "0.63890177", "0.63861823", "0.6371208", "0.63663167", "0.63476497", "0.6334111", "0.6331364", "0.631463", "0.6312944", "0.6308807", "0.630789", "0.6307233", "0.630018", "0.62976986", "0.6285059", "0.6280509", "0.62459886", "0.6244595", "0.62427217", "0.62324464", "0.6229319", "0.6227489", "0.6201981", "0.6193137", "0.61819124", "0.61819124", "0.6180774", "0.61427397", "0.6137529", "0.6136192", "0.6117758", "0.6105454", "0.6104942", "0.60977674", "0.60976315", "0.60945034", "0.60933465", "0.6092022", "0.6084625", "0.60810965", "0.60723066", "0.6067511", "0.6065509", "0.6062656", "0.60607076", "0.6060689", "0.6046257", "0.60418874", "0.6032085", "0.6028393", "0.6023562", "0.6020291", "0.60070646", "0.60063434", "0.60058826", "0.5995837", "0.5993117", "0.5978322", "0.5976037", "0.5975508", "0.5974922", "0.59702545", "0.59702545", "0.59702545", "0.59702545", "0.596868", "0.59476763", "0.5945446", "0.59434247", "0.59432596", "0.5941751", "0.59354264", "0.59339166", "0.59317344", "0.5930339", "0.59300435", "0.5927955" ]
0.8465603
0
Return True if word passes filter
def filter1(word): if not word: return False w = word.lower() if w in STOPWORDS: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apply_word_filter(self, fn):\n self._apply_filter(lambda ng, f: any(fn(w) for w in ng))", "async def wordfilter(self, ctx):\n pass", "async def wordfilter_test(self, ctx, *, message):\n found = self.test_sentence(message)\n if found:\n await ctx.send(f\"Message contains `{found}`\")\n else:\n await ctx.send(\"Couldn't detect any filtered words\")", "def filter_word(text):\n text = normalize(text)\n if re.match(r'^\\p{P}+$', text):\n return True\n if text.lower() in STOPWORDS:\n return True\n return False", "def can_recept(self, text, *args, **kwargs):\n for each_cur in self.flat_norm.keys():\n if each_cur.lower() in text.lower():\n return True\n\n else:\n return False", "def match(self, filter_text):\n\n return filter_text.lower() in self.artist.lower() or \\\n super().match(filter_text)", "def match(self, filter_text):\n\n return filter_text.lower() in self.director.lower() or \\\n filter_text.lower() in self.actor.lower() or \\\n super().match(filter_text)", "def match(self, filter_text):\n\n return filter_text.lower() in self.author.lower() or \\\n super().match(filter_text)", "def check(self, word: str) -> bool:\n for s in (word, word.lower(), word.capitalize()):\n if s in self.words or s in self.ignored_words:\n return True\n return False", "def filter_tokens(x):\n if x in _STOP_WORDS:\n return False\n if not re.search(r'\\w', x):\n # Does not contain at least one word character\n return False\n return True", "def filter(word):\n if word.strip() not in stop: # Print word only if it is not a stop word\n print(word.strip())", "def match(self, filter_text):\n return filter_text.lower() in self.name.lower() or \\\n filter_text.lower() == self.isbn.lower() or \\\n filter_text.lower() in (str(tag).lower() for tag in self.tags)", "def retweet_filter(self, text):\n return not text.lower().startswith('rt')", "def text_is_relevant(self, text):\n for word in text:\n if word in self.relevant_words:\n return True\n return False", "def search(self, word):\n for wc in self.get_wildcards(word):\n # Don't forget word not in self.all_words\n if wc in self.wc_dict and (self.wc_dict[wc] > 1 or word not in self.all_words) :\n return True\n return False", "def is_simple (self, phrase):\r\n\r\n return not self.contains(phrase,'()&|>#')", "def __contains__(self, word):\n if word in self.vocab:\n return True\n else:\n char_ngrams = compute_ngrams(word, self.min_n, self.max_n)\n return any(ng in self.ngrams for ng in char_ngrams)", "def is_unimportant(word):\n return word in ['.', '!', ',', ] or '\\'' in word or word in stop_words", "def search(self, word):", "def check_word(words, word):\r\n if word in words:\r\n return True\r\n else:\r\n return False", "def two_word_finder(word1,word2,text):\r\n word1 = word1.lower()\r\n word2 = word2.lower()\r\n text = str(text).lower()\r\n if word1 and word2 in text:\r\n return True #return text to see specific tweets\r\n return False", "def is_stop_word(word):\n return word in final_stop_words", "def onlyuse(word, letters):\r\n truth = True\r\n for letter in word:\r\n truth = letter in letters and truth\r\n return truth", "def is_stopword(self, word, language):", "def isValid(text):\n return bool(re.search(r'\\b%s\\b' %new_word, text, re.IGNORECASE))", "def text_search(self, text, stuff_to_cop):\n if any(ext in text for ext in stuff_to_cop):\n return(True)\n else:\n return(False)", "def search(self, word):\n if len(word) not in self.length_set:\n return False\n for i in self.mutate(word):\n if i in self.s:\n return True\n return False", "def include_word(word, chardict):\n if (all(char in chardict.keys() for char in word)) & (len(word)<=25):\n # Some word2vec entries are all capitals and generally are acronyms.\n # This is unlikely to be learnable\n if not word.isupper():\n return True\n\n return False", "def filter(self, ffun):\n # BEGIN\n lst = []\n for item in WordSet(self.text).words():\n # if len(item) == len(ffun):\n # lst.append(item)\n if ffun(item) == True:\n lst.append(item)\n return lst\n\n # END", "def search(self, word):\n length = len(word)\n if length == 1:\n for letter in string.ascii_lowercase:\n key = \"{}/{}\".format(1, letter)\n if key in self.origin and letter != word:\n return True\n return False\n\n key = \"{}/{}\".format(len(word), word[0])\n ls = self.origin.get(key, [])\n if len(ls) == 0:\n return False\n\n for origin in ls:\n if self.only_modify_one_char(word, origin):\n return True\n return False", "def check_word(self, word):\n word = word.lower().strip()\n return not word or word in self.dictionary", "def word_finder(word, text):\r\n word = word.lower()\r\n text = str(text).lower()\r\n match = re.search(word, text)\r\n if match:\r\n return True\r\n return False", "def avoids(word, forbidden):\n for letter in word:\n if letter in forbidden:\n return False\n return True", "def is_a_word(self, word):\n word = word.lower()\n if word in self.data:\n return True\n else:\n # for char in word:\n # if char.isnumeric():\n # return True\n word = list(word)\n numbers = len([x for x in word if x.isnumeric()])\n # # letters = len([x for x in word if x.isalpha()])\n if numbers >= 2 or numbers/len(word) > 0.4:\n return True\n return False", "def _words_in_text(word, text):\n\n regexword = \"\\\\b\" + word + \"\\\\b\"\n\n return True if re.search(regexword, text, re.IGNORECASE) else False", "def test_filter_hot_words(self):\n\n recieved_hot_words = self.sp.filter_hot_words(HOT_WORDS)\n self.assertEqual(recieved_hot_words, DESIRED_FILTERED_HOT_WORDS)", "def check_words(dictionary_, start_word, stop_word):\n if dictionary_.is_real_word(start_word) is False:\n print(\"Word {} not found in the dictionary\".format(start_word))\n return False\n if dictionary_.is_real_word(stop_word) is False:\n print(\"Word {} not found in the dictionary\".format(stop_word))\n return False\n return True", "def _pass_filter(self, match: str, filter_: Tuple[str, Collection[str]]) -> bool:\n match_l = match.lower()\n last_match_idx = match_l.find(filter_[0])\n\n while last_match_idx != -1:\n # Check args\n end_of_if = match_l.find(\"/\", last_match_idx)\n # This should be aug.get (vars are not used e.g. parser.aug_get)\n expression = self.aug.get(match[:end_of_if] + \"/arg\")\n\n if expression.startswith(\"!\"):\n # Strip off \"!\"\n if expression[1:] in filter_[1]:\n return False\n else:\n if expression not in filter_[1]:\n return False\n\n last_match_idx = match_l.find(filter_[0], end_of_if)\n\n return True", "def accepts(self, word: Iterable[str]) -> bool:\n if self._enfa is None:\n self._enfa = self.to_epsilon_nfa()\n return self._enfa.accepts(word)", "def isValid(text):\n\n\n return any(word in text for word in [u\"我好看么\", u\"称赞\"])", "def is_valid(self, text):\n return any(p.lower() in text.lower() for p in self.get_phrases())", "def test_WikiWordFilter(self):\n tkns = get_tokenizer(\"en_US\", filters=(WikiWordFilter,))(self.text)\n out = [t for t in tkns]\n exp = [(\"this\", 0), (\"text\", 5), (\"with\", 10), (\"http\", 15), (\"url\", 22), (\"com\", 26),\n (\"and\", 30), (\"ftp\", 62), (\"my\", 68), (\"site\", 71), (\"com\", 76), (\"au\", 80),\n (\"some\", 83), (\"file\", 88), (\"not\", 103), (\"quite\", 108),\n (\"a\", 114), (\"url\", 116), (\"with\", 134), (\"an\", 139), (\"aemail\", 142),\n (\"address\", 149), (\"as\", 157), (\"well\", 160)]\n self.assertEqual(out, exp)", "def valid(phrase):\n words = []\n series_of_words = phrase.split(' ')\n words.append(series_of_words.pop())\n for word in series_of_words:\n if word in words:\n return False\n words.append(word)\n return True", "def uses_only(word, available):\n for letter in word:\n if letter not in available:\n return False\n return True", "def search(self, word: str) -> bool:\n curr_chars = self.chars\n for c in list(word):\n if c not in curr_chars:\n return False\n curr_chars = curr_chars[c]\n return self.end_of_word in curr_chars", "def canada_query(text):\n return 'canada' in text.lower()", "def _usable_word(self, filtered_words):\n usable = set()\n for word in filtered_words:\n counter = 0\n for x in range(0, len(self._to_word)):\n if word[x] == self._from_word[x]:\n counter += 1\n if counter == len(self._to_word) - 1:\n usable.add(word)\n return usable", "async def wordfilter_list(self, ctx):\n await ctx.send(f'Current filtered words ({len(self.words)}):\\n||{\", \".join(self.words)}||')", "def search(self, word: str) -> bool:\n node = self.find(word)\n return node and node.is_word", "def isWord(wordList, word):\n word = word.lower()\n word = word.strip(\" !@#$%^&*()-_+={}[]|\\\\:;'<>?,./\\\"\")\n return word in wordList", "def isWord(wordList, word):\n word = word.lower()\n word = word.strip(\" !@#$%^&*()-_+={}[]|\\\\:;'<>?,./\\\"\")\n return word in wordList", "def isWord(wordList, word):\n word = word.lower()\n word = word.strip(\" !@#$%^&*()-_+={}[]|\\\\:;'<>?,./\\\"\")\n return word in wordList", "def match(self, sentence) -> bool:\r\n for word in self.word_list:\r\n if word.lower() in sentence.lower():\r\n return True\r\n return False", "def is_word(wordlist, word):\n word = word.lower()\n word = word.strip(\" !@#$%^&*()-_+={}[]|\\:;'<>?,./\\\"\")\n return word in wordlist", "def search(self, word: str) -> bool:\n cur = self.root\n for letter in word:\n if letter not in cur:\n return False\n cur = cur[letter]\n if \"isWord\" not in cur:\n return False\n return True", "def dunkin_query(text):\n\n return 'dunkin' in text.lower()", "def check_common_word(song: Song, result: Result) -> bool:\n\n sentence_words = slugify(song.name).split(\"-\")\n to_check = slugify(result.name).replace(\"-\", \"\")\n\n for word in sentence_words:\n if word != \"\" and word in to_check:\n return True\n\n return False", "def match(self, words):\n return words == self.words(len(words))", "def search(self, word: str) -> bool:\n temp=self.root\n \n for char in word:\n if(not temp.children[ord(char)-ord('a')]):\n return False\n temp=temp.children[ord(char)-ord('a')]\n \n if(temp and temp.endOfWord==True):\n return True\n \n return False", "def __filter( self, text ):\n return text", "def check_message(self, message):\n for word in self.bad_words:\n if word in message:\n return -1\n for word in self.filter_words:\n if word in message:\n return 0\n return 1", "def search(self, word):\n lenw = len(word)\n if lenw not in self.bag: return False\n return any([self.equal_to(word, item) for item in self.bag[lenw]])", "def check_if_word_fits_the_context(self, context, token, replacement):\n \n if len(context) == 3:\n if (context[0] + ' ' + replacement).lower() in self.bigrams_brown_frequency_dictionary.keys() or (replacement + ' ' + context[2]).lower() in self.bigrams_brown_frequency_dictionary.keys() :\n return True\n else:\n return False\n else:\n return False", "def finalfrase(self, word):\n if word == '.' or word == ';' or word == ',' or word == '?' or word == '!':\n return True\n return False", "def stop_word(w): # local feature\n return (w in swl)", "def replacement_allowed(self, word):\n not_list = ['was', 'were', 'is', 'are', 'have', 'has', 'had']\n for not_word in not_list:\n if word == not_word:\n return False\n return True", "def isValid(text):\n return bool(re.search(r'\\blight\\b', text, re.IGNORECASE))", "def substring_match(recipe, word):\n if names_only:\n line = recipe.name\n else:\n line = str(recipe)\n\n if not case:\n word = word.lower()\n line = line.lower()\n\n return line.find(word) != -1", "def is_word(self, word):\r\n\r\n return self.data(word) is not None", "def isExcludedFromMerge(self, word):\n #print word\n return ((self.isExcludedWord(word) != False) \n or (self.isMeasure(word) != False) \n or (self.isShortWord(word) != False))", "def fn(query):\n i = 0\n for x in query:\n if i < len(pattern) and x == pattern[i]: i += 1\n elif x.isupper(): return False\n return i == len(pattern)", "def or_sum (self, phrase):\r\n for x in phrase:\r\n if x:\r\n return True\r\n return False", "def contains (self,phrase,chars):\r\n\r\n for x in chars:\r\n\r\n if x in phrase:\r\n return True\r\n return False", "def avoids (word, frbdn_letters):\n for letter in frbdn_letters:\n if letter in word: \n return False\n return True", "def whole_word_matches(self):\n start = '1.0'\n while True:\n start = self.text.search(self.term, start, stopindex=tk.END)\n if not start:\n break\n end = start + ' wordend'\n # whole word includes a space before\n found = self.text.get(start + '-1c', end)\n if found == ' ' + self.term:\n self.text.tag_add('found', start, end)\n start = end", "def whole_word_matches(self):\n start = '1.0'\n while True:\n start = self.text.search(self.term, start, stopindex=tk.END)\n if not start:\n break\n end = start + ' wordend'\n # whole word includes a space before\n found = self.text.get(start + '-1c', end)\n if found == ' ' + self.term:\n self.text.tag_add('found', start, end)\n start = end", "def check_word(self, word):\n first_letter, rest = word[0], word[1:]\n\n for possible_start in self._find_letter(first_letter):\n if self._check_word(possible_start, rest):\n return True\n\n return False", "def search(self, w: str) -> bool:\n if not w:\n return self.end\n return w[0] in self.d and self.d[w[0]].search((len(w) > 1 and w[1:]) or '')", "def match(self, filter):\n return filter in self.tags or filter in self.memo", "def search(self, word: str) -> bool:\n\n temp = self.start\n\n for i in range(len(word)):\n \n if temp.children[ord(word[i]) - ord('a')] is None:\n return False\n temp = temp.children[ord(word[i])-ord('a')]\n if i+1 == len(word) and temp.end == True:\n return True\n\n return False", "def search(self, word: str) -> bool:\n node = self._traverse(word)\n return node.word if node else False", "def make_query(term):\n def search(text):\n s=term.lower()\n if s in text.lower():\n return True\n return False\n return search", "def is_offensive(drug_name, bad_words):\n\n for bad_word in bad_words:\n if bad_word in drug_name:\n return True\n return False", "def check_match(self, word_found,word): \r\n self.count = 0\r\n for char in self.word_found:\r\n if char != self.word[self.count]:\r\n return False\r\n self.count +=1\r\n #print(self.count)\r\n \r\n return True", "def containing(letter, text):\n return([word for word in text if word.count(letter) >= 1])", "def verify(self, word):\n if len(word) < 2:\n return (True, word)\n\n if word.lower() in self.replacement_words.keys():\n return (True, self.replacement_words[word.lower()])\n\n if word.lower() in self.word_list:\n return (True, word)\n\n if word.lower() in self.ignored_words:\n return (True, word)\n\n return (False, word)", "def check_word(word):\n\n return bool(re.match(r'^[a-z]+$', word))", "def search(self, word: str) -> bool:\n\n # # for candidate in self.buckets[len(word)]:\n # # for a, b in zip(word, candidate):\n # # result = any(sum(a!=b))\n return any(sum(a!=b for a, b in zip(word, candidate)) == 1\n for candidate in self.buckets[len(word)])\n #\n # for candidate in self.buckets[len(word)]:\n # sum = 0\n # for a, b in zip(word, candidate):\n # sum += (a!=b)\n # if sum == 0:\n # return True\n # return False", "def check_word(self, word):\n\n return self.graph.is_in(word)", "def grep_words(words, file_name):\n data = read_file(file_name, split_to_lines=False)\n for word in words:\n if word in data:\n return True\n return False", "def __isStopWord__(self, word):\n self.stopWords = ('the', 'in', 'of', 'from', 'at', 'it')\n for stopWord in self.stopWords:\n if stopWord == word:\n return ('stop', word), True\n return None, False", "def match(self, sentence) -> bool:\r\n if (any(word[0] in sentence.lower() for word in self.word_list if word[1] == \"partial\") or any(\r\n word[0].lower() == sentence.lower() for word in self.word_list if word[1] == \"full\")) and not any(\r\n word[0] in sentence.lower() for word in self.word_list if word[1] == \"not\"):\r\n return True\r\n else:\r\n return False", "def isWordPartOf(self,word,wordlist):\n\t\tfor w in wordlist:\n\t\t\tif w in self._part_of_badword: \n\t\t\t\treturn True \t \n\t\t\t\tif w.startswith(word) or w.endswith(word):\n\t\t\t\t\tself._part_of_badword[w] = True \n\t\t\t\t\treturn True\n\t\treturn False", "def passes_custom_cutoff(self, string_filterset): \r\n filterset = []\r\n for item in string_filterset:\r\n try: filterset.append(eval(item))\r\n except: filterset.append(False)\r\n return all(filterset)", "def _validate_word(self, word):\n return type(word) == type('a') and set(self._letters) == set(list(word))", "def contains(self, term):\n\t\tif term in self.textFile:\n\t\t\treturn True\n\t\t\n\t\treturn False", "def article_contains_word(article,\n keyword,\n preprocess_type=PreprocessWordType.LEMMATIZE):\n for word in article.words:\n preprocessed_word = query_utils.preprocess_word(word,\n preprocess_type)\n if keyword == preprocessed_word:\n return True\n return False", "def isExcluded(self, word):\n #print word\n return ((self.isExcludedWord(word) != False) \n or (self.isMeasure(word) != False) \n or (self.isAllDigits(word) != False) \n or (self.isShortWord(word) != False))", "def good_word(self, word):\r\n return word.strip().lower()", "def basic_check(word):\n if word[-1] == \"b\" or word[-1] == \"g\":\n return False\n consonant_counter = 0\n for char in word:\n if char in VOWELS:\n consonant_counter = 0\n else:\n consonant_counter += 1\n if consonant_counter >= 3:\n return False\n return True" ]
[ "0.74591035", "0.7444302", "0.7269609", "0.71056175", "0.7085896", "0.69943607", "0.6989586", "0.68581426", "0.6818804", "0.67899287", "0.6701206", "0.6658593", "0.66539127", "0.6639391", "0.6632937", "0.66313547", "0.6630455", "0.6528471", "0.65281737", "0.6523878", "0.6522145", "0.6517991", "0.6516359", "0.64933324", "0.6491399", "0.6485058", "0.6468987", "0.6460723", "0.6442059", "0.6430321", "0.6420437", "0.64195997", "0.64187974", "0.64184177", "0.6405093", "0.64043486", "0.639189", "0.637418", "0.63699794", "0.63685197", "0.6360047", "0.63495076", "0.63482046", "0.63301516", "0.6303901", "0.6291473", "0.62892705", "0.6281067", "0.627287", "0.6263683", "0.6263683", "0.6263683", "0.6244453", "0.6232278", "0.6230015", "0.6229419", "0.62284714", "0.61938256", "0.61788434", "0.6173942", "0.61730653", "0.61704934", "0.61646885", "0.6162858", "0.6161748", "0.6161083", "0.61533034", "0.6150166", "0.61399037", "0.61394733", "0.6135124", "0.6132499", "0.61304855", "0.61273265", "0.61247253", "0.61247253", "0.6123525", "0.61171216", "0.61161286", "0.6103147", "0.6086233", "0.60800207", "0.60790366", "0.6058955", "0.6058181", "0.6056102", "0.6056086", "0.60553145", "0.60514057", "0.6049076", "0.60385025", "0.603459", "0.6028813", "0.60281324", "0.6021044", "0.60104", "0.5996041", "0.5995525", "0.5994717", "0.59940994" ]
0.78754514
0
Return processed list of words from raw text input To be honest, we're currently using sklearn CountVectorizer and keras text_to_word_sequence instead of this function.
def process_text(text, tokenize=tokenize1, filter=filter1, stem=None, lower=True): assert tokenize, "Must provide tokenize method for preprocess_text" if not text: return [] if lower: text = text.lower() words = tokenize(text) if filter: words = [w for w in words if filter(w)] if stem: words = [stem(w) for w in words] return words
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preprocessing(raw_text):\n words_list = tokenize(raw_text)\n words_list = remove_stop_words(words_list)\n words_list = remove_punctuations(words_list)\n words_list = lemmatization(words_list)\n return words_list", "def words(self, text):\n return re.findall(r'\\w+', text)", "def tokenize_keras(raw_data):\n from keras.preprocessing.text import text_to_word_sequence\n return [text_to_word_sequence(d) for d in raw_data]", "def text_to_wordlist(raw_text, remove_stopwords=False):\n\n # Pre-processing. Removing HTML\n # TODO: Do further pre-processing (eg. remove links)\n text = BeautifulSoup(raw_text).get_text()\n\n # Remove non-letters\n text = re.sub(\"[^a-zA-Z]\",\" \", text)\n\n # Convert words to lower case and split them\n words = text.lower().split()\n\n # Optionally remove stop words (false by default)\n if remove_stopwords:\n stops = set(stopwords.words(\"english\"))\n words = [w for w in words if not w in stops]\n\n return(words)", "def process_text(self, text, lemma=False):\n processed_text = TextGraph.nlp(text.lower())\n words = [t.text.strip() if not lemma else t.lemma_ for t in processed_text if not t.is_punct]\n return words", "def txt_to_word_list(text):\r\n return [w for w in text.split()]", "def words(self):\n return self.text.split()", "def process(self, sentence):\n\n # selects onlt alphanumeric words\n words = self.tokenizer.tokenize(sentence)\n\n # lemmatize the words\n words = [self.lemmatizer.lemmatize(word) for word in words]\n\n # lowercase all the words and remove single characters\n words = [word.lower() for word in words if len(word) > 1]\n\n # remove the stopwords using NLTK\n words = [word for word in words if word not in stopwords.words('english')]\n\n return words", "def tidy_text(self, text: List[str],\n ) -> List[str]:\n \n if self.case_fold:\n text = list(map(lambda t: t.lower(), text))\n \n if self.special_syms:\n text = list(map(self.handle_special_symbols, text))\n \n text = list(map(self.handle_kw_phrases, text))\n text = list(map(self.depluralise_keywords, text))\n\n if self.tokenise:\n text = list(map(word_tokenize, text))\n \n if self.stem:\n text = list(map(self.stemming, text))\n \n if self.lemmatise:\n text = list(map(self.lemmatisation, text))\n \n if self.del_stop_words:\n stop_words = subsample_frequent_words(text, set(self.target_words))\n text = list(map(\n lambda t: self.handle_stop_words(t, stop_words), text\n ))\n \n return text", "def text_to_wordlist(text, remove_stopwords=True):\n text = re.sub(\"[^a-zA-Z]\",\" \", text)\n words = text.lower().split()\n if remove_stopwords:\n stops = set(stopwords.words(\"english\"))\n words = [w for w in words if not w in stops]\n\n b=[]\n stemmer = english_stemmer\n for word in words:\n b.append(stemmer.stem(word))\n return(b)", "def get_words(self, cleaner):\n return cleaner.clean(self.get_text())", "def preprocess(text):\n text_words = word_tokenize(text)\n words = []\n ix = 0\n for rawWord in text_words:\n lowerWord = rawWord.lower()\n if lowerWord not in stop_words_slovene:\n words.append((lowerWord, ix))\n ix += 1\n return words", "def process_text(input_txt):\r\n # if input is string\r\n tidy_txt = remove_pattern(input_txt,\"@[\\w]*\")\r\n ##=============================== if input is dataframe ====================##\r\n # tidy_txt = np.vectorize(remove_pattern)(input_txt,\"@[\\w]*\") #\r\n ##==========================================================================##\r\n # remove special characters\r\n tidy_txt = tidy_txt.replace(\"[^a-zA-Z#]\",\" \")\r\n # split into words\r\n tokenized_txt = tidy_txt.split()\r\n # perform stemming\r\n stemmer = PorterStemmer()\r\n tokenized_txt = [stemmer.stem(i) for i in tokenized_txt]\r\n print(tokenized_txt)\r\n # joining words back\r\n tokenized_txt = ' '.join(tokenized_txt)\r\n return tokenized_txt", "def process_text(self, text):\n\n flags = (re.UNICODE if sys.version < '3' and type(text) is unicode # noqa: F821\n else 0)\n pattern = r\"\\w[\\w']*\" if self.min_word_length <= 1 else r\"\\w[\\w']+\"\n regexp = self.regexp if self.regexp is not None else pattern\n\n words = re.findall(regexp, text, flags)\n # remove 's\n words = [word[:-2] if word.lower().endswith(\"'s\") else word\n for word in words]\n # remove numbers\n if not self.include_numbers:\n words = [word for word in words if not word.isdigit()]\n # remove short words\n if self.min_word_length:\n words = [word for word in words if len(word) >= self.min_word_length]\n\n stopwords = set([i.lower() for i in self.stopwords])\n if self.collocations:\n word_counts = unigrams_and_bigrams(words, stopwords, self.normalize_plurals, self.collocation_threshold)\n else:\n # remove stopwords\n words = [word for word in words if word.lower() not in stopwords]\n word_counts, _ = process_tokens(words, self.normalize_plurals)\n\n return word_counts", "def _get_word_list(text):\n return re.findall('\\w+', text)", "def get_words(text):\n return re.compile('\\w+').findall(text)", "def get_words(self):\n return [self.id2word[idx] for idx in range(len(self))]", "def _words(self):\n regex = r'\\b\\w+\\b'\n for word in re.findall(regex, self.text):\n yield word", "def list_of_words(self):\n\t\treturn str.split(re.sub(r'\\W+', ' ', self.body.encode('ascii', 'replace')))", "def generate_corpus(self, text):\n if isinstance(text, str):\n sentences = self.sentence_split(text)\n else:\n sentences = []\n for line in text:\n sentences += self.sentence_split(line)\n passing = filter(self.test_sentence_input, sentences)\n runs = map(self.word_split, passing)\n return runs", "def prepare_words(raw_text, bApplyStemmer=True, bCheckStopWords=False):\n\n raw_text = re.sub(r'^http?:\\/\\/.*[\\r\\n]*', '', raw_text, flags=re.MULTILINE) # remove web-adresses\n raw_text = re.sub(r'\\\\.', ' ', raw_text) # remove all control-characters: \\n, \\t ...\n # http://stackoverflow.com/questions/4324790/removing-control-characters-from-a-string-in-python\n\n raw_text = re.sub(r'\\([^()]*\\)', ' ', raw_text)\n\n letters = re.sub(\"[^a-zA-Z]\", \" \", raw_text) # remove everything that isn't a letter\n\n words = letters.lower().split() # write words into array\n\n if bCheckStopWords:\n words = [w for w in words if w not in stopwords.words(\"english\") and w not in stopwords.words(\"german\")] # remove \"filler\" words\n\n if bApplyStemmer:\n # see: http://www.nltk.org/howto/stem.html for more details\n stemmer = PorterStemmer()\n singles = [stemmer.stem(word) for word in words] # only allow words with a length higher than 2 if len(word) > 2\n singles = [single for single in singles if len(single) > 2]\n words = \" \".join(singles)\n\n return words # return the words as a string, separator: space", "def full_text_words(self):\n\n if self._full_text_words == []:\n for s in self.full_text():\n for w in s.split():\n self._full_text_words.append(w)\n\n return self._full_text_words", "def target_words(self) -> List[str]:\n return list(map(\n lambda w: self.spaces[w.lower()] \n if w.lower() in self.spaces else w.lower(), \n self.keywords\n ))", "def process_text(text):\n words = word_tokenize(text)\n return [word.lower() for word in words if word not in string.punctuation]", "def _preprocess_text(text: str) -> List[List[str]]:\n\n # replace all except characters_to_keep with space\n characters_to_keep = '[^\\n:-äÄöÖåÅA-Za-z0-9]'\n text = re.sub(characters_to_keep,' ', text )\n\n # split the whole text to list of strings\n sentences = text.splitlines()\n\n # split each string further to list of words\n sentences = [sentence.split(' ') for sentence in sentences if sentence.strip()]\n\n words = _analyze_sentences(sentences)\n return words", "def words(self) -> List[str]:\n return list(self.solutions)", "def process_words(texts, stop_words=stop_words, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):\r\n texts = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]\r\n texts = [bigram_mod[doc] for doc in texts]\r\n texts = [trigram_mod[bigram_mod[doc]] for doc in texts]\r\n texts_out = []\r\n nlp = spacy.load('en', disable=['parser', 'ner'])\r\n for sent in texts:\r\n doc = nlp(\" \".join(sent))\r\n texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])\r\n # remove stopwords once more after lemmatization\r\n texts_out = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts_out]\r\n return texts_out", "def process_text(text):\n text = text.translate(translator)\n tokens = word_tokenize(text)\n# if stem:\n stemmer = PorterStemmer()\n tokens = [stemmer.stem(t) for t in tokens]\n \n return tokens", "def getWords(self, text):\n\t\ttextWithoutPunctuation = self.removePunctuation(text)\n\t\treturn [word for word in textWithoutPunctuation.split() if len(word) >= 1]", "def process_words(texts, stop_words=stop_words, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):\n texts = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]\n texts = [bigram_mod[doc] for doc in texts]\n texts = [trigram_mod[bigram_mod[doc]] for doc in texts]\n texts_out = []\n nlp = spacy.load('en', disable=['parser', 'ner'])\n for sent in texts:\n doc = nlp(\" \".join(sent)) \n texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])\n # remove stopwords once more after lemmatization\n texts_out = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts_out] \n return texts_out", "def processes_and_tokenize(raw_document):\n\ttokenizer = RegexpTokenizer(r'\\w+')\n\ttokens = tokenizer.tokenize(raw_document.lower())\t\t# tokens = nltk.word_tokenize(corpus.lower()) # without removing punctiation\n\n\t#remove stop words\n\tstop_words = set(nltk.corpus.stopwords.words('english'))\n\t#stop_words = set(stopwords.words('english'))\n\tfiltered_tokens = [w for w in tokens if not w in stop_words]\n\treturn filtered_tokens", "def _tokenize(self, text):\n if not text:\n return []\n\n text = PUNCTUATION_CHARS.sub(' ', text)\n\n words = [\n t[:128].lower() for t in text.split()\n if len(t) >= MIN_WORD_LENGTH and t.lower() not in STOP_WORDS\n ]\n\n return words", "def preprocess_corpus(self) -> List[str]:\n return self.tidy_text(self.corpus)", "def word_tokenize(text):\n word_list = []\n for sentences in nltk.sent_tokenize(text):\n for words in nltk.word_tokenize(sentences):\n word_list.append(words)\n return word_list", "def transform(self, X):\n out= [self._word_ngrams(text,ngram=self.word_ngrams)\n for text in X]\n return out", "def getWords(speech):\r\n return speech.split()", "def create_word_list(text_as_string):\n # print 'creating word list'\n global global_word_list\n\n for w in text_as_string.split():\n word = w.translate(string.maketrans(\"\", \"\"), string.punctuation).lower()\n if len(word) > 0:\n global_word_list.append(word) # Appends each word to global word list\n\n return global_word_list", "def words(self):\n return list(self._words())", "def words(self):\n return list(self._words())", "def words(self) -> List[str]:\n return pulumi.get(self, \"words\")", "def words(self) -> List[str]:\n return pulumi.get(self, \"words\")", "def text_to_wordlist(text, remove_html_related=True, remove_non_letter=True,\n to_lowercase=True, remove_stopwords=False, use_lem=False):\n if remove_html_related:\n text = url_removal(text)\n # Remove HTML using BeautifulSoup\n text = BeautifulSoup(text, 'lxml').get_text()\n\n # Remove non-letters using regex\n if remove_non_letter:\n text = non_letter_removal(text)\n # Convert words to lower case and split them\n if to_lowercase:\n text = text.lower()\n\n words = text.split()\n # get tagged before possible stopword removal\n tagged_words = pos_tag(words)\n\n # Optionally remove stop words (false by default)\n if remove_stopwords:\n tagged_words = stopword_removal_from_taggedwords(tagged_words)\n\n # Optionally get part of speech tag of words then lemmatize them\n if use_lem:\n words = lemmatize_tagged_words(tagged_words)\n # Return a list of words and tagged words\n return(words, tagged_words)", "def get_words():\n # words\n words_list = list()\n for i in range(1, 114+1):\n sura = quran.get_sura(i)\n for aya in sura:\n wordsList = aya.split(' ')\n for word in wordsList:\n words_list.append(word)\n\n return words_list", "def preproc_user_input(txt, model):\r\n txt = pre_process(txt)\r\n txt_tokenized = [word for word in txt.split(\" \") if word in model.wv.vocab]\r\n return \" \".join(txt_tokenized)", "def txt2vec(self, text: str) -> List[int]:\n itr: List[int] = []\n for token in self.tokenize(str(text)):\n itr.append(self._word_lookup(token))\n return itr", "def vocabulary(self) -> np.ndarray:\n return np.array(\n list(set(word for text in self.preprocess_corpus for word in text))\n )", "def get_words(self):\n words = self.wiki.get_words(cleaner=self.cleaner)\n df = pd.DataFrame({\"word\": words})\n df = df.drop_duplicates(\"word\")\n df = df.head(100)\n mask = df[\"word\"].isin(self.common[\"word\"])\n mask |= df[\"word\"].str.lower().isin(self.common[\"word\"])\n\n words = [ Word(word) for word in df[~mask][\"word\"] ]\n for word in words:\n word.get_definition(definer=self.definer)", "def get_words(self, indices):\n return [self.get_word(index) for index in indices]", "def preprocess(text):\n\tX = []\n\tsent_detector = nltk.data.load('tokenizers/punkt/english.pickle')\n\tfor t in text:\n\t\tsents = sent_detector.tokenize(t)\n\t\tresult = ''\n\t\tfor s in sents:\n\t\t\ttokens = word_tokenize(s)\n\t\t\tresult += ' ' + ' '.join(tokens)\n\t\tX.append(result)\n\treturn X", "def input_new_phrase(self, text):\n \n x_new_tokens = [word_idx[word] for word in text.split()]\n \n pred = self.model.predict(np.array([x_new_tokens]))\n pred = np.argmax(pred, axis=-1)[0]\n \n return [[word_list[w], tags[pred]] for (w, pred) in zip(range(len(x_new)), pred)]", "def text_to_words(the_text):\n\n my_substitutions = the_text.maketrans(\n # If you find any of these\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&()*+,-./:;<=>?@[]^_`{|}~'\\\\\",\n # Replace them by these\n \"abcdefghijklmnopqrstuvwxyz \")\n\n # Translate the text now.\n cleaned_text = the_text.translate(my_substitutions)\n wds = cleaned_text.split()\n return wds", "def text_to_words(the_text):\n\n my_substitutions = the_text.maketrans(\n # If you find any of these\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&()*+,-./:;<=>?@[]^_`{|}~'\\\\\",\n # Replace them by these\n \"abcdefghijklmnopqrstuvwxyz \")\n\n # Translate the text now.\n cleaned_text = the_text.translate(my_substitutions)\n wds = cleaned_text.split()\n return wds", "def transform(self, input_: list) -> list:\n idx = np.zeros((self._fixed_length_text, self._fixed_length_word))\n for i in range(min(len(input_), self._fixed_length_text)):\n for j in range(min(len(input_[i]), self._fixed_length_word)):\n idx[i, j] = self._char_index.get(input_[i][j], 1)\n\n return idx.tolist()", "def clean_raw_data(self, text):\r\n return [token.lower() for token in nltk.word_tokenize(text)\r\n if token not in self.stop_words and token not in punctuation]", "def get_words(data):\n return data[\"words\"]", "def normalize_text(text: str) -> List[str]:\n normalized_text = []\n\n for word in text.split():\n preprocessed_word = TextProcessor.remove_punctuation(word)\n if preprocessed_word:\n normalized_text.append(preprocessed_word.lower())\n\n return normalized_text", "def clean_text(self, text):\n words = SPLIT_TEXT.findall(text.lower())\n words = self.rm_stop_words(words)\n words = self.stem_words(words)\n return words", "def extractWords(self, inputDataset):\n reviewFile = open(inputDataset, \"r\", encoding=\"utf-8-sig\")\n for record in reviewFile:\n record = record.strip().split(\"\\t\") # tab-delimited .txt file\n self.addUnigrams(int(record[0]), record[1])\n reviewFile.close()", "def featurize(self, data):\n \n bag_of_words = []\n\n tokens = data.split()\n\n for i in tokens:\n bag_of_words.append((i, True))\n\n return bag_of_words", "def getWordList(text):\n\ttmpwordlist = string.split(text)\n\twordlist = []\n\tfor i in range(len(tmpwordlist)):\n\t\tword = puncTrim(tmpwordlist[i])\n\t\tif len(word) > 0:\n\t\t\twordlist.append(word)\n\treturn wordlist", "def words(self, uncased=False):\n if uncased:\n return [t[self.TEXT].lower() for t in self.data]\n else:\n return [t[self.TEXT] for t in self.data]", "def words(self, uncased=False):\n if uncased:\n return [t[self.TEXT].lower() for t in self.data]\n else:\n return [t[self.TEXT] for t in self.data]", "def vectorize_text(text):\n\n def remove_punctuation(text):\n \"\"\"Removes special characters from text.\"\"\"\n return re.sub('[,.?\";:\\-!@#$%^&*()]', '', text)\n\n def remove_common_words(text_vector):\n \"\"\"Removes 50 most common words in the uk english.\n\n source: http://www.bckelk.ukfsn.org/words/uk1000n.html\n\n \"\"\"\n common_words = set(['the', 'and', 'to', 'of', 'a', 'I', 'in', 'was',\n 'he', 'that', 'it', 'his', 'her', 'you', 'as', 'had', 'with',\n 'for', 'she', 'not', 'at', 'but', 'be', 'my', 'on', 'have', 'him',\n 'is', 'said', 'me', 'which', 'by', 'so', 'this', 'all', 'from',\n 'they', 'no', 'were', 'if', 'would', 'or', 'when', 'what', 'there',\n 'been', 'one', 'could', 'very', 'an', 'who'])\n return [word for word in text_vector if word not in common_words]\n\n text = text.lower()\n text = remove_punctuation(text)\n words_list = text.split()\n words_list = remove_common_words(words_list)\n\n return words_list", "def get_words(self):\n return self.words", "def preprocessing(data):\n #tokenizer = RegexpTokenizer(r'\\w+') # allow charachter only\n #words = tokenizer.tokenize(data) # tokenize : convert to words\n words = word_tokenize(data)\n # remove stop words & stemming\n new_words = []\n for word in words:\n if word not in stop_words:\n new_words.append(stemmer.stem(word)) # append to new words with stemming\n \n if '' in new_words: new_words.remove('') # remove space from list\n #print(\"Preprocessing : {}\".format(new_words))\n return new_words", "def extract_words(self):\n str = self.text.lower()\n words = re.sub(r'[?|—|:|\"|,|\\.\\n|\\.|\\s|\\n|\\t|\\v|\\f|\\r]+', \"*\", str)\n self.word_list = words.split(\"*\")", "def process_words(texts, bigram_mod,trigram_mod,stop_words=stop_words, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):\r\n texts = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]\r\n texts = [bigram_mod[doc] for doc in texts]\r\n texts = [trigram_mod[bigram_mod[doc]] for doc in texts]\r\n texts_out = []\r\n nlp = spacy.load('en_core_web_sm')\r\n for sent in texts:\r\n doc = nlp(\" \".join(sent)) \r\n texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])\r\n texts_out = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts_out] \r\n return texts_out", "def __call__(self, text):\r\n if self.use_pos_tagging:\r\n return [self.wnl.lemmatize(t, self.pos(t)) for t in word_tokenize(self.clean(text))]\r\n else:\r\n return [self.wnl.lemmatize(t) for t in word_tokenize(self.clean(text))]", "def proc_text(self, text):\n\n lemmas = []\n tokens = []\n doc = self(text)\n for tokObj in doc:\n if self._remove_punct and tokObj.is_punct:\n continue\n lemma = tokObj.lemma_\n text = tokObj.text\n if self._keep_only_alpha_num and not is_alpha_num(text):\n continue\n tok1 = text.lower()\n tok2 = lemma.lower()\n if tok1 in self._stopwords or tok2 in self._stopwords:\n continue\n\n if self._lower_case:\n text = text.lower()\n lemma = lemma.lower()\n\n lemmas.append(lemma)\n tokens.append(text)\n\n return ' '.join(lemmas), ' '.join(tokens)", "def get_word_pos_list(self, raw_text):\n raw_text = raw_text.strip()\n word_list = []\n pos_list = []\n # pdb.set_trace()\n seg_list = jieba.posseg.cut(raw_text,HMM=False) # 默认是精确模式\n for word, flag in seg_list:\n # remove the punctuation, we will keep punctuation as prosodic boundary\n if word in ['「', '」', '.', '-' , '', ' ', '。' , '—' , '?', ':', '、', '…',';',',',',','!']:\n continue\n word_list.append(word)\n pos_list.append(flag)\n return word_list, pos_list", "def get_tokens(self, text):\n if text is not None:\n text = text.strip()\n words = self.safe_split(text)\n return words\n return []", "def getWords(text, stemmRequired = False, correctWordRequired = False):\n # m aking all words lowercase\n # removing all punctuations\n cleanText = re.sub(u'[^a-zа-я0-9]', ' ', text.lower())\n\n # Correcting words if required (no by default) -> stemming if required\n if correctWordRequired:\n words = [correctWord(w) if not stemmRequired or re.search(\"[0-9a-z]\", w) else stemmer.stem(correctWord(w)) for w in cleanText.split() if len(w)>1 and w not in stopwords]\n\n # Don't applying corrections. -> stemming if required\n else:\n words = [w if not stemmRequired or re.search(\"[0-9a-z]\", w) else stemmer.stem(w) for w in cleanText.split() if len(w)>1 and w not in stopwords]\n \n return words", "def process_text(text, stem=True):\n tokens = word_tokenize(text.lower())\n stop_words = set(stopwords.words('english')) \n \n tokens_cpy = []\n for t in tokens:\n if t not in stop_words:\n tokens_cpy.append(t)\n tokens = tokens_cpy\n \n if stem:\n tokens_cpy = []\n stemmer = PorterStemmer()\n tokens = [stemmer.stem(t) for t in tokens]\n for t in tokens:\n if t not in stopword_list:\n tokens_cpy.append(t)\n tokens = tokens_cpy\n return tokens", "def vectorize_text(corpus):\n bag_of_words_model = CountVectorizer()\n\n # performs the above described three tasks on the given data corpus.\n dense_vec_matrix = bag_of_words_model.fit_transform(corpus).todense()\n bag_of_word_df = pd.DataFrame(dense_vec_matrix)\n bag_of_word_df.columns = sorted(bag_of_words_model.vocabulary_)\n return bag_of_word_df", "def _tokenize(self, text):\n if not text:\n return []\n\n text = self.PUNCTUATION_CHARS.sub(' ', text)\n\n words = [t[:128] for t in text.split() if len(t) >= self.MIN_WORD_LENGTH and t.lower() not in self.STOP_WORDS]\n\n return words", "def tokenize(self, raw_text):\n # TODO implement\n raw_tokens = word_tokenize(raw_text.decode('utf8'))\n return self.filter_tokens(raw_tokens)\n # return self.split_by(raw_tokens, '-')", "def isolate_words(self, input_string):\n self.all_searched_words = input_string.split()\n return self.all_searched_words", "def tokenize(text):\n text_norm = re.sub(r'[^a-zA-Z0-9]', ' ', text) # normalize: remove punctuation\n word_list = word_tokenize(text_norm) # tokenize\n word_list_clean = [w for w in word_list if w not in stopwords.words('english')] # remove stopwords\n word_list_stemmed = [PorterStemmer().stem(w) for w in word_list_clean] # stemm words\n return word_list_stemmed", "def pre_process(text):\n # replace (,.'\") with ''\n text = text.replace(',', '')\n text = text.replace('.', '')\n text = text.replace(\"'\", '')\n text = text.replace(\"\\\"\", '')\n\n # tokenize into words\n tokens = [word for sent in sent_tokenize(text) for word in word_tokenize(sent)]\n\n # remove stopwords\n stop = stopwords.words('english')\n tokens = [token for token in tokens if token not in stop]\n\n # remove words less than three letters\n tokens = [word for word in tokens if len(word) >= 3]\n\n # lower capitalization\n tokens = [word.lower() for word in tokens]\n\n # lemmatize\n lmtzr = WordNetLemmatizer()\n tokens = [lmtzr.lemmatize(word) for word in tokens]\n\n return tokens", "def extract_words(s):\n\n # Convert the data the data into normal for (Eg: 'ç' to 'c') and lowercase it.\n s = unicodedata.normalize('NFKD', s).lower()\n\n # Replace the punctuation with a space using the _regex and filter stopwords.\n wordlist = [w for w in _regex.sub(' ', s).split() if w not in _stopwords]\n\n return wordlist", "def read_words(self):\n # If lowercase is True, it is already handled when we read the dataset.\n # If it is False, the unit must be other than word, so that we need to lowercase\n # the data since the word lookup table is for target words which are\n # always in lowercase.\n data = self.train_data\n if not self.lowercase or self.unit == \"oracle\":\n tmp_data = []\n for word in data:\n if self.unit == \"oracle\":\n if '+' in word:\n tags = word.split('+')\n word_tag = tags[0].split(':')\n word = word_tag[1]\n if self.unit == \"morpheme\":\n word = re.sub(\"@@\", \"\", word)\n word = word.lower()\n tmp_data.append(word)\n data = tmp_data\n return data", "def preprocess_text(self, input_text):\n input_text = self.clean_text(input_text)\n tokenization_list = self.tokenize_text(input_text)\n index_list = self.replace_token_with_index(tokenization_list, self.max_length_dictionary)\n index_list = self.pad_sequence(index_list, self.max_length_tweet)\n return index_list", "def get_words(text):\n\n only_words_text = re.compile(r'[^0-9^a-z^A-Z\\s]').sub('', text)\n return only_words_text.split(' ')", "def tokenize(raw_text):\n tokenized_text = nltk.tokenize.word_tokenize(raw_text)\n return tokenized_text", "def processText(text):\n\n no_punc = [word for word in text.split() if word.isalpha()] # and word not in stopwords.words('english')]\n #removes non-letter characters and only includes words not included in stopwords\n no_punc = \" \".join(no_punc) \n clean_words = nltk.word_tokenize(no_punc) #splits the punctuation marks from the real words\n return clean_words", "def preprocess_corpus(corpus): \n \n # print 'preprocessing words'\n # remove space\n # text = re.findall(r'\\w+', corpus) # for [a-zA-Z0-9_]\n text = re.findall(r'[a-zA-Z]+', corpus) # for [a-zA-Z] keep words only no numbers and '_' \n words = [w.lower() for w in text]\n # print words \n \n # stemmer based on existing ones in the current list\n lemma = nltk.WordNetLemmatizer()\t\t\t#extract the original word pattern\n lemmed_words = [lemma.lemmatize(w) for w in words]\n \n # tag lemmed_words\n tagged_words = nltk.pos_tag(lemmed_words)\n # print tagged_words \n \n processed_words = []\n tag_list = ['CC', 'DT', 'EX', 'IN', 'MD', \n 'PDT', 'POS', 'PRP', 'PRP$', 'TO', \n 'WDT', 'WP', 'WRB']\n for word, tag in tagged_words:\n if tag in tag_list:\n pass \n else: \n processed_words.append(word)\n \n return processed_words", "def words(self):\n pass", "def getWords(text, stemmRequired = True,\r\n correctWordRequired = True,\r\n excludeStopwordsRequired = True):\r\n text = text.replace(\",\", \" \").replace(\".\", \" \")\r\n cleanText = re.sub('[^a-zа-я0-9]', ' ', text.lower())\r\n if correctWordRequired:\r\n if excludeStopwordsRequired:\r\n words = [correctWord(w) \\\r\n if not stemmRequired or re.search(\"[0-9a-z]\", w) \\\r\n else stemmer.stem(correctWord(w)) \\\r\n for w in cleanText.split() \\\r\n if len(w)>1 and w not in stopwords]\r\n else:\r\n words = [correctWord(w) \\\r\n if not stemmRequired or re.search(\"[0-9a-z]\", w) \\\r\n else stemmer.stem(correctWord(w)) \\\r\n for w in cleanText.split() \\\r\n if len(w)>1]\r\n else:\r\n if excludeStopwordsRequired:\r\n words = [w \\\r\n if not stemmRequired or re.search(\"[0-9a-z]\", w) \\\r\n else stemmer.stem(w) \\\r\n for w in cleanText.split() \\\r\n if len(w)>1 and w not in stopwords]\r\n else:\r\n words = [w \\\r\n if not stemmRequired or re.search(\"[0-9a-z]\", w) \\\r\n else stemmer.stem(w) \\\r\n for w in cleanText.split() \\\r\n if len(w)>1]\r\n \r\n return words", "def normalize_func(text: str) -> List[str]:\n tokens = nltk.word_tokenize(text) # need to be consistent with the basic tokenize used in other functions\n return [lemmatizer.lemmatize(w.lower(), get_wordnet_pos(w.lower())) for w in tokens]", "def get_words():\n words = [w.lower() for w in movie_reviews.words() \n if len(w) > 2 and w not in STOPWORDS and w.isnumeric() == False]\n \n return words", "def tokenize(text):\n \n text.lower() # convert to lowercase\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text) #remove punctuation\n words = word_tokenize(text) # tokenize by individual word\n words = [w for w in words if w not in stopwords.words(\"english\")] #remove stop words\n lemmed = [WordNetLemmatizer().lemmatize(w) for w in words] #lemminization\n \n return words", "def makeWords(self):\r\n clean_s = self.cleanString(self.text)\r\n LoW = clean_s.split() \r\n for x in LoW: \r\n if x not in self.words: \r\n self.words[x] = 1\r\n else: \r\n self.words[x] += 1\r\n return self.words", "def get_words(self):\n return self._words", "def tokenize(text):\n tokens = nltk.word_tokenize(text)\n lemmatizer = nltk.WordNetLemmatizer()\n \n lemmatized_words = []\n for word in tokens:\n lemmatized_words.append(lemmatizer.lemmatize(word).lower().strip())\n \n return lemmatized_words", "def tokenize(text):\n return [token.lower() for token in simple_preprocess(text) if token not in STOPWORDS]", "def span_tokenize_words(self, text):\n\t\tsentences = self.tokenize_sentences(text)\n\t\ttokens_per_sentence = list()\n\t\tsentence_offset = 0\n\t\t\n\t\tsentence_counter = 0\n\t\tfor sentence in sentences:\n\t\t\tsentence_tokens = list()\n\t\t\tfor token in self.word_tokenizer.span_tokenize(sentence):\n\t\t\t\t# save actual token together with it's positions\n\t\t\t\tbegin = token[0] + sentence_offset\n\t\t\t\tend = token[1] + sentence_offset\n\t\t\t\ttoken_tuple = (text[begin:end],begin,end,sentence_counter)\n\t\t\t\tsentence_tokens.append(token_tuple)\n\t\t\t\t\n\t\t\ttokens_per_sentence.append(sentence_tokens)\n\t\t\t\n\t\t\tsentence_counter = sentence_counter + 1\n\t\t\tsentence_offset = sentence_offset + len(sentence) + 1\n\t\t\n\t\treturn tokens_per_sentence", "def txt2vectors(self, txt, is_html):\n words = txt2words(txt)\n words = [w for w in words if w in self._model]\n if len(words) != 0:\n for w in words:\n yield self._model[w]", "def review_to_wordlist(review):\n\n words = review.lower().split()\n words = [w for w in words]\n return(words)", "def _create_word_list(self, sentences):\n\n ############ 1.4 TODO\n \"\"\"\n https://machinelearningmastery.com/clean-text-machine-learning-python/\n \"\"\"\n import string\n table = str.maketrans('','',string.punctuation)\n # import ipdb; ipdb.set_trace()\n word_list = []\n if type(sentences) == list:\n for sentence in sentences:\n words = sentence.split(\" \")\n word_list += [word.translate(table).lower() for word in words]\n else:\n words = sentences.split(\" \")\n word_list += [word.translate(table).lower() for word in words]\n ############\n # raise NotImplementedError()\n return word_list", "def getWordsList(self):\n return self.words" ]
[ "0.73962986", "0.7058597", "0.7052718", "0.7027226", "0.700164", "0.699", "0.69413567", "0.6924657", "0.68807524", "0.6822172", "0.680491", "0.6783017", "0.6780739", "0.6760617", "0.67566645", "0.674435", "0.6730428", "0.67244", "0.6674279", "0.6671103", "0.6661868", "0.6655476", "0.6630995", "0.6624591", "0.6620864", "0.66146183", "0.656415", "0.6560358", "0.6559959", "0.65436286", "0.6511927", "0.65005016", "0.6498331", "0.64878076", "0.647121", "0.6467225", "0.64610875", "0.645251", "0.645251", "0.6448211", "0.6448211", "0.6447921", "0.6439336", "0.6437184", "0.64355767", "0.6432795", "0.6428005", "0.64126456", "0.63996935", "0.63957065", "0.6390045", "0.6390045", "0.6368499", "0.63420093", "0.63382196", "0.63331854", "0.63281476", "0.6327651", "0.63260704", "0.63209903", "0.631488", "0.631488", "0.63033617", "0.6291788", "0.62892497", "0.6285844", "0.62825507", "0.6278928", "0.6271204", "0.6266201", "0.6259161", "0.62555563", "0.62510586", "0.6237819", "0.6229273", "0.6228022", "0.62243855", "0.6222446", "0.62218267", "0.6217101", "0.62150216", "0.62122333", "0.6209613", "0.6207553", "0.6199532", "0.61952287", "0.6194009", "0.61884356", "0.6185756", "0.61846197", "0.61750287", "0.6172296", "0.61629975", "0.61580753", "0.6157032", "0.61432934", "0.61400586", "0.61271673", "0.61144835", "0.6109922" ]
0.62860745
65
Return dict of wordtoid from raw text data If max_size is specified, vocab is truncated to set of highest frequency words within size.
def build_vocab(raw_data, max_size=None): data = [w for doc in tokenize_keras(raw_data) for w in doc] counter = collections.Counter(data) count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0])) if max_size: count_pairs = count_pairs[:max_size] words, _ = list(zip(*count_pairs)) word_to_id = dict(zip(words, range(len(words)))) word_to_id[UNKNOWN_WORD] = len(word_to_id) word_to_id[PAD_WORD] = len(word_to_id) return word_to_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_vocab(self):\n word2id = {}\n for document in self.docs:\n for word in document:\n if word not in word2id.keys():\n word2id[word] = len(word2id)\n return word2id", "def build_vocab(sentences, max_num_words):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences)).most_common()\n if max_num_words != 0 and max_num_words < len(word_counts):\n word_counts = word_counts[:max_num_words]\n\n # Mapping from index to word\n vocabulary = dict()\n index = 0\n for x in word_counts:\n vocabulary[index] = x[0]\n index += 1\n\n return vocabulary", "def construct_vocab(lines, vocab_size):\n vocab = {}\n for line in lines:\n for word in line:\n if word not in vocab:\n vocab[word] = 1\n else:\n vocab[word] += 1\n \n word2id = {}\n id2word = {}\n word2id['<pad>'] = 0\n word2id['<unk>'] = 1\n id2word[0] = '<pad>'\n id2word[1] = '<pad>'\n \n sorted_word2id = sorted(\n vocab.items(),\n key=operator.itemgetter(1),\n reverse=True\n )\n\n sorted_words = [x[0] for x in sorted_word2id[:vocab_size]]\n\n for ind, word in enumerate(sorted_words):\n word2id[word] = ind + 2\n\n for ind, word in enumerate(sorted_words):\n id2word[ind + 2] = word\n\n return word2id, id2word", "def build_vocab(data):\n # data = _read_words(filename)\n counter = collections.Counter(data)\n # print('counter', counter) # dictionary for the occurrence number of each word, e.g. 'banknote': 1, 'photography': 1, 'kia': 1\n count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n # print('count_pairs',count_pairs) # convert dictionary to list of tuple, e.g. ('ssangyong', 1), ('swapo', 1), ('wachter', 1)\n words, _ = list(zip(*count_pairs))\n word_to_id = dict(zip(words, range(len(words))))\n # print(words) # list of words\n # print(word_to_id) # dictionary for word to id, e.g. 'campbell': 2587, 'atlantic': 2247, 'aoun': 6746\n return word_to_id", "def process_text(self, text: str, max_length: int) -> Dict[str, Sequence[int]]:\n inputs = self.tokenizer(\n [c for c in text],\n return_token_type_ids=True,\n return_attention_mask=True,\n max_length=max_length,\n padding=\"max_length\",\n truncation=True,\n is_pretokenized=True,\n )\n return inputs.data", "def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,\n tokenizer=None, normalize_digits=True):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"r\") as f:\n counter = 0\n for line in f:\n counter += 1\n line = line.strip().split('\\t')[0]\n if counter % 100000 == 0:\n print(\" processing line %d\" % counter)\n tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)\n for w in tokens:\n word = re.sub(_DIGIT_RE, \"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n sorted_vocab = sorted(vocab, key=vocab.get, reverse=True)\n vocab_list = _START_VOCAB + sorted_vocab\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n print(\"Corpus %s has %d tokens, %d uniq words, %d vocab at cutoff %d.\" % (\n data_path, sum(vocab.values()), len(vocab), max_vocabulary_size, vocab[sorted_vocab[max_vocabulary_size - len(_START_VOCAB)]] ) )\n else:\n print(\"Corpus %s has %d tokens, %d uniq words, %d vocab at cutoff %d.\" % (\n data_path, sum(vocab.values()), len(vocab), len(vocab), 0))\n\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + \"\\n\")", "def build_vocab(words, vocab_size, visual_fld=None):\n utils.safe_mkdir(visual_fld)\n file = open(os.path.join(visual_fld, 'vocab.tsv'), 'w',encoding='utf8')\n\n dictionary = dict()\n count = [('UNK', -1)]\n index = 0\n count.extend(Counter(words).most_common(vocab_size - 1))\n\n for word, _ in count:\n dictionary[word] = index\n index += 1\n file.write(word + '\\n')\n\n index_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n file.close()\n return dictionary, index_dictionary", "def build_words_dataset(words, vocabulary_size=50000, printable=True):\n import collections\n count = [['UNK', -1]]\n count.extend(collections.Counter(words).most_common(vocabulary_size - 1))\n dictionary = dict()\n for word, _ in count:\n dictionary[word] = len(dictionary)\n data = list()\n unk_count = 0\n for word in words:\n if word in dictionary:\n index = dictionary[word]\n else:\n index = 0 # dictionary['UNK']\n unk_count += 1\n data.append(index)\n count[0][1] = unk_count\n reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n if printable:\n print('Real vocabulary size %d' % len(collections.Counter(words).keys()))\n print('Limited vocabulary size {}'.format(vocabulary_size))\n assert len(collections.Counter(words).keys()) >= vocabulary_size , \\\n \"Read vocabulary size can be less than limited vocabulary size\"\n return data, count, dictionary, reverse_dictionary", "def create_vocab(vocab_size):\n vocab_dict = tff.simulation.datasets.stackoverflow.load_word_counts(\n cache_dir='/tmp')\n return list(vocab_dict.keys())[:vocab_size]", "def __init__(self, vocab_file, max_size):\n\t\tself._word_to_id = {}\n\t\tself._id_to_word = {}\n\t\tself._count = 0 # keeps track of total number of words in the Vocab\n\n\t\t# [UNK], [PAD], [START] and [STOP] get the ids 0,1,2,3.\n\t\tfor w in [UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:\n\t\t\tself._word_to_id[w] = self._count\n\t\t\tself._id_to_word[self._count] = w\n\t\t\tself._count += 1\n\n\t\t# Read the vocab file and add words up to max_size\n\t\twith open(vocab_file, 'r') as vocab_f:\n\t\t\tfor line in vocab_f:\n\t\t\t\tpieces = line.split()\n\t\t\t\tif len(pieces) != 2:\n\t\t\t\t\tprint ('Warning: incorrectly formatted line in vocabulary file: %s\\n' % line)\n\t\t\t\t\tcontinue\n\t\t\t\tw = pieces[0]\n\t\t\t\tif w in [SENTENCE_START, SENTENCE_END, UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:\n\t\t\t\t\traise Exception(\n\t\t\t\t\t\t'<s>, </s>, [UNK], [PAD], [START] and [STOP] shouldn\\'t be in the vocab file, but %s is' % w)\n\t\t\t\tif w in self._word_to_id:\n\t\t\t\t\traise Exception('Duplicated word in vocabulary file: %s' % w)\n\t\t\t\tself._word_to_id[w] = self._count\n\t\t\t\tself._id_to_word[self._count] = w\n\t\t\t\tself._count += 1\n\t\t\t\tif max_size != 0 and self._count >= max_size:\n\t\t\t\t\tprint (\"max_size of vocab was specified as %i; we now have %i words. Stopping reading.\" % (\n\t\t\t\t\tmax_size, self._count))\n\t\t\t\t\tbreak\n\n\t\tprint (\"Finished constructing vocabulary of %i total words. Last word added: %s\" % (\n\t\tself._count, self._id_to_word[self._count - 1]))", "def count_words(path, max_vocab_size=40000, tok=False):\n counts = collections.Counter()\n for words in read_file(path, tok):\n for word in words:\n counts[word] += 1\n\n vocab = [word for (word, _) in counts.most_common(max_vocab_size)]\n return vocab", "def _create_id_map(self, word_list, max_list_length):\n\n ############ 1.5 TODO\n from collections import Counter\n \n # import pdb; pdb.set_trace()\n word_rank_list = Counter(word_list).most_common(max_list_length)\n \n id_map = {}\n for idx, (word,_) in enumerate(word_rank_list):\n id_map[word] = idx\n\n ############\n # raise NotImplementedError()\n return id_map", "def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,\n tokenizer=None, normalize_digits=True,\n _DIGIT_RE=re.compile(br\"\\d\"),\n _START_VOCAB=[b\"_PAD\", b\"_GO\", b\"_EOS\", b\"_UNK\"]):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"rb\") as f:\n counter = 0\n for line in f:\n counter += 1\n if counter % 100000 == 0:\n print(\" processing line %d\" % counter)\n tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)\n for w in tokens:\n word = re.sub(_DIGIT_RE, b\"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b\"\\n\")\n else:\n print(\"Vocabulary %s from data %s exists\" % (vocabulary_path, data_path))", "def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,\n tokenizer=None, normalize_digits=True):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"rb\") as f:\n counter = 0\n for line in f:\n counter += 1\n if counter % 100000 == 0:\n print(\" processing line %d\" % counter)\n tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)\n for w in tokens:\n word = re.sub(_DIGIT_RE, b\"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n print(\"vocab too big\")\n vocab_list = vocab_list[:max_vocabulary_size]\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b\"\\n\")", "def make_vocab(corpus, word_vocab, char_vocab, max_len):\n\n word_id = len(word_vocab)\n char_id = len(char_vocab) + 1\n \n for words in corpus:\n words_list = words.split()+['+'] \n for word in words_list:\n if word not in word_vocab:\n word_vocab[word] = word_id\n word_id += 1\n for char in word:\n if char not in char_vocab:\n char_vocab[char] = char_id\n char_id += 1\n if max_len < len(word):\n max_len = len(word) \n\n return (word_vocab, char_vocab, max_len)", "def make_word2id():\r\n with open(\"public_data/stats/stats_train.pkl\", 'rb') as stats:\r\n stats = pickle.load(stats)\r\n vocab = stats[\"VOCAB\"]\r\n word2id = {word: id for id, word in enumerate([\"PAD\"] + [\"UNK\"] + vocab)}\r\n with open('public_data/vocab/word2id.pkl', 'wb') as out:\r\n pickle.dump(word2id, out, protocol=4)", "def build_vocab(self, min_count=3):\n word2count = defaultdict(int)\n for sentence in self.tokenized_corpus:\n for word in sentence:\n word2count[word] += 1\n\n word2dict = {}\n word2dict['PAD'] = {'id': 0}\n word2dict['UNK'] = {'id': 1}\n for word in word2count:\n if word2count[word] >= min_count:\n word2dict[word] = {'id': len(word2dict), 'count': word2count[word]}\n self.vocab = word2dict", "def data_to_word_ids(self, input_data, filter=False):\n\n _buffer = list()\n for word in input_data:\n word = word.lower()\n if self.unit == \"oracle\":\n if \"+\" in word:\n tokens = word.split('+')\n word_tag = tokens[0].split(':')\n word = word_tag[1]\n if self.unit == \"morpheme\":\n word = re.sub(\"@@\", \"\", word)\n\n # flag to randomize token with frequency one\n flag = 1\n if word in self.unk_word_list:\n flag = random.randint(0, 1)\n\n if word in self.word_to_id and flag == 1:\n # if filter is True, reduce output vocabulary for softmax\n # (map words not in top self.max_vocab_size to UNK)\n if filter:\n # index start from 0\n if self.word_to_id[word] < self.max_vocab_size:\n _buffer.append(self.word_to_id[word])\n else:\n _buffer.append(self.word_to_id['<unk>'])\n else:\n _buffer.append(self.word_to_id[word])\n else:\n _buffer.append(self.word_to_id['<unk>'])\n return _buffer", "def get_vocab_dicts(vocab_size, vocab):\n assert vocab_size == len(vocab)\n\n word2idx = {}\n idx2word = {}\n\n for idx in range(vocab_size):\n word2idx[vocab[idx]] = idx\n idx2word[idx] = vocab[idx]\n\n return word2idx, idx2word", "def build_vocab(vocab_size, text_vector):\n vocab = Counter()\n for text in text_vector:\n for word in text.split(' '):\n vocab[word.lower()]+=1\n vocab = dict(vocab.most_common(vocab_size))\n return vocab", "def get_vocab(data, nb_words=50000, min_nb=10, remove_stop_words = True):\n\n\n # Put everything into onw long string\n data = [item for sublist in list(data.values()) for item in sublist]\n data = \" \".join(data)\n\n # Do a bit of steaming\n data = remove_punctuations(data)\n vocab = Counter(data)\n\n # Remove the stop words\n new_vocab = vocab.copy()\n for key, value in vocab.items():\n if remove_stop_words and key in stopwords:\n del new_vocab[key]\n if value < min_nb:\n del new_vocab[key]\n\n vocab = new_vocab\n\n # Keep the most common words\n vocab = Counter(dict(vocab.most_common(nb_words)))\n\n # Extract a mapping\n mapping = {}\n mapping[1] = \"--UNK--\"\n mapping[\"--UNK--\"] = 1\n for i, word in enumerate(sorted(vocab.keys())):\n mapping[i + 2] = word\n mapping[word] = i + 2\n\n return vocab, mapping", "def build_morpheme_vocab(self):\n max_morph_per_word = 0\n morpheme_dict = collections.defaultdict(int)\n splitter = \"@@\"\n for token in self.train_data:\n if token == self.eos or token == self.sos:\n continue\n token = '^' + token + '$'\n morphemes = token.split(splitter)\n if len(morphemes) > max_morph_per_word:\n max_morph_per_word = len(morphemes)\n for morpheme in morphemes:\n morpheme_dict[morpheme] += 1\n\n unk_morpheme_list = set()\n item_to_id = dict()\n item_to_id[constants.PAD_ITEM] = len(item_to_id)\n item_to_id[constants.UNK_ITEM] = len(item_to_id)\n sorted_dict = sorted(morpheme_dict.items(), key=operator.itemgetter(1), reverse=True)\n for token, freq in sorted_dict:\n if freq == 1:\n unk_morpheme_list.add(token)\n if token not in item_to_id:\n item_to_id[token] = len(item_to_id)\n return item_to_id, unk_morpheme_list, max_morph_per_word", "def load_target_vocab(self):\n vocab = [line.split()[0] for line in open(os.path.join('preprocessed', 'all_vocab.txt'), 'r').read().splitlines()]\n self.word2idx = {word: idx for idx, word in enumerate(vocab)}\n self.idx2word = {idx: word for idx, word in enumerate(vocab)}\n self.vocab_size = len(self.word2idx)", "def build_vocab(sentences, vocab_limit):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n print( 'Total size of vocab is {}'.format(len(word_counts.most_common())))\n # Mapping from index to word\n # vocabulary_inv = [x[0] for x in word_counts.most_common(vocab_limit)]\n vocabulary_inv = [x[0] for x in word_counts.most_common(vocab_limit)]\n \n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i+1 for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def tokenize_document(doc_info: dict, tokenizer: BertTokenizer, max_doc_length: int = None) -> dict:\n sub_tokens: List[str] = [] # all sub tokens of a document\n sentence_map: List[int] = [] # collected tokenized tokens -> sentence id\n subtoken_map: List[int] = [] # collected tokenized tokens -> original token id\n\n word_idx = -1\n\n for sentence_id, sentence in enumerate(doc_info['sentences']):\n for token in sentence:\n word_idx += 1\n word_tokens = tokenizer.tokenize(token)\n sub_tokens.extend(word_tokens)\n sentence_map.extend([sentence_id] * len(word_tokens))\n subtoken_map.extend([word_idx] * len(word_tokens))\n if max_doc_length:\n num_to_pad = max_doc_length - len(sub_tokens)\n sub_tokens.extend([\"[PAD]\"] * num_to_pad)\n sentence_map.extend([sentence_map[-1]+1] * num_to_pad)\n subtoken_map.extend(list(range(word_idx+1, num_to_pad+1+word_idx)))\n # global MAX_LENGTH\n # if len(sub_tokens) > MAX_LENGTH:\n # print(len(sub_tokens))\n # MAX_LENGTH = len(sub_tokens)\n # print(MAX_LENGTH)\n # todo(yuxian): need pad speakers?\n speakers = {subtoken_map.index(word_index): tokenizer.tokenize(speaker)\n for word_index, speaker in doc_info['speakers']}\n clusters = [[(subtoken_map.index(start), len(subtoken_map) - 1 - subtoken_map[::-1].index(end))\n for start, end in cluster] for cluster in doc_info['clusters']]\n tokenized_document = {'sub_tokens': sub_tokens, 'sentence_map': sentence_map, 'subtoken_map': subtoken_map,\n 'speakers': speakers, 'clusters': clusters, 'doc_key': doc_info['doc_key']}\n return tokenized_document", "def build_vocab(sentences_list, vocab_size, visual_fld):\n words = [word for sentence in sentences_list for word in sentence]\n utils.safe_mkdir(visual_fld)\n with open(os.path.join(visual_fld, 'vocab.tsv'), 'w') as fd:\n dictionary = {}\n index_dictionary = {}\n count = [('UNK', -1)]\n count.extend(Counter(words).most_common(vocab_size - 1))\n for index, (word, _) in enumerate(count):\n dictionary[word] = index\n index_dictionary[index] = word\n fd.write(word + '\\n')\n\n return dictionary, index_dictionary", "def load_vocab(vocab):\r\n\tvocab = [line.split()[0] for line in open(\r\n\t\t'{}{}'.format(pm.vocab_path, vocab), 'r', encoding='utf-8').read().splitlines()\r\n\t\t\t if int(line.split()[1]) >= pm.word_limit_size]\r\n\tword2idx_dic = {word: idx for idx, word in enumerate(vocab)}\r\n\tidx2word_dic = {idx: word for idx, word in enumerate(vocab)}\r\n\treturn word2idx_dic, idx2word_dic", "def vocab_size():\n\n MAXSIZE = 10000\n\n ls = Language.objects.exclude(id=80).filter(vocabulary_size__gt=0, vocabulary_size__lte=MAXSIZE).conlangs()\n\n outliers = Language.objects.filter(vocabulary_size__gt=MAXSIZE).order_by('vocabulary_size')\n\n # Assumes unimodal distribution\n modes = [(mode['count'], mode['vocabulary_size'])\n for mode in ls.values('vocabulary_size').annotate(count=Count('vocabulary_size')).order_by('-count', '-vocabulary_size')\n if mode['count'] > 5]\n mode = modes[0][1]\n\n avg_maximum_minimum = ls.aggregate(avg=Avg('vocabulary_size'), maximum=Max('vocabulary_size'), minimum=Min('vocabulary_size'))\n avg = avg_maximum_minimum['avg']\n maximum = avg_maximum_minimum['maximum']\n minimum = avg_maximum_minimum['minimum']\n\n curve = ls.order_by('-vocabulary_size')\n rows = [v.vocabulary_size for v in curve]\n\n chart_svg = vocab_chart(rows)\n\n # median\n med = median(rows)\n\n return {'average': avg,\n 'min': minimum,\n 'max': maximum,\n 'median': med,\n 'chart_svg': chart_svg,\n 'mode': mode,\n 'common': modes,\n 'stddev': stddev(rows),\n 'outliers': outliers,\n 'upper_bound': MAXSIZE}", "def build_pos_tag_vocab(data, vocab_size=1000, min_freq=1):\n counter = Counter()\n for d in data:\n tags = d['pos_class']\n counter.update(tags)\n\n itos = ['<pad>']\n min_freq = max(min_freq, 1)\n\n # sort by frequency, then alphabetically\n words_and_frequencies = sorted(counter.items(), key=lambda tup: tup[0])\n words_and_frequencies.sort(key=lambda tup: tup[1], reverse=True)\n\n for word, freq in words_and_frequencies:\n if freq < min_freq or len(itos) == vocab_size:\n break\n itos.append(word)\n # stoi is simply a reverse dict for itos\n stoi = defaultdict()\n stoi.update({tok: i for i, tok in enumerate(itos)})\n\n return {'itos': itos, 'stoi': stoi, 'len': len(itos)}", "def token2id(data, mode):\n vocab_path = 'vocab.' + mode\n in_path = data + '.' + mode\n out_path = data + '_ids.' + mode\n _, vocab = load_vocab(os.path.join(config.PROCESSED_PATH, vocab_path))\n in_file = open(os.path.join(config.PROCESSED_PATH, in_path), 'rb')\n out_file = open(os.path.join(config.PROCESSED_PATH, out_path), 'wb')\n\n lines = in_file.read().splitlines()\n for line in lines:\n if mode == 'dec': # we only care about '<s>' and </s> in encoder\n ids = [vocab[b'<s>']]\n else:\n ids = []\n ids.extend(sentence2id(vocab, line))\n # ids.extend([vocab.get(token, vocab['<unk>']) for token in basic_tokenizer(line)])\n if mode == 'dec':\n ids.append(vocab[b'</s>'])\n out_file.write(b' '.join(str(id_).encode('ascii') for id_ in ids) + b'\\n')", "def dictionary(cleaned_data,threshold):\n news = []\n for date in cleaned_data:\n for headlines in cleaned_data[date]:\n news.append(headlines)\n\n word_freq = nltk.FreqDist(itertools.chain(*news))\n id_to_word = ['<pad>'] + [word for word, cnt in word_freq.items() if cnt >= threshold] + ['<unk>']\n word_to_id = {word:idx for idx, word in enumerate(id_to_word)}\n \n return id_to_word, word_to_id", "def process_data(data, max_len, top_words=None, custom_vocab=None):\n # We have to calculate this every time even though we\n # only need it for the training data\n if custom_vocab is None:\n vocab, word_to_idx, idx_to_word = get_vocab(data['utterance_t'], top_words)\n\n label_to_idx = {label:idx for idx, label in enumerate(data['dialog_act'].unique())}\n\n X = sequence.pad_sequences(data['utterance_t'].apply(process_seq, args=[word_to_idx]),\n maxlen=max_len)\n y = data['dialog_act'].map(label_to_idx).values\n y = to_categorical(y, len(data['dialog_act'].unique()))\n \n return (X, y), (vocab, word_to_idx, idx_to_word)", "def build_vocab(train_dir, vocab_dir, vocab_size=5000):\n data_train, _ = read_file(train_dir)\n\n all_data = []\n for content in data_train:\n all_data.extend(content)\n\n counter = Counter(all_data)\n count_pairs = counter.most_common(vocab_size-1)\n words, _ = list(zip(*count_pairs))\n\n open_file(vocab_dir,mode='w').write('\\n'.join(words)+'\\n')", "def create_vocabulary(vocabulary_path, words, max_vocabulary_size, normalize_digits=True):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s with max size %d\" % (vocabulary_path, max_vocabulary_size))\n vocab = {}\n counter = 0\n for w in words:\n counter += 1\n if counter % 10000 == 0:\n print(\" processing word %d = %s\" % (counter, w))\n word = re.sub(_DIGIT_RE, \"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n with gfile.GFile(vocabulary_path, mode=\"w\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + \"\\n\")", "def make_answer_vocab(adic, qdic, vocab_size, use_ocr):\n\n counter = Counter()\n for qid in adic.keys():\n answer_obj = adic[qid]\n answer_list = [ans['answer'] for ans in answer_obj]\n if use_ocr:\n ocr_tokens = qdic[qid]['ocr_tokens']\n answer_list = [x for x in answer_list if x not in ocr_tokens]\n\n counter.update(answer_list)\n\n adict = {}\n # TODO: check whether this is the right implementation\n adict[''] = 0\n idx = 1\n alist = counter.most_common(vocab_size - 1)\n for pair in alist:\n adict[pair[0]] = idx\n idx += 1\n return adict", "def get_vocab(data_set):\n vocab = {'PADDING': 0, 'PUNCT': 1}\n inv_vocab = {0: 'PADDING', 1: 'PUNCT'}\n wid = 2\n max_len = -1\n for record in data_set:\n assert 'words' in record\n words = record['words']\n if len(words) > max_len:\n max_len = len(words)\n for w in words:\n if w not in vocab:\n vocab[w] = wid\n inv_vocab[wid] = w\n wid += 1\n print(\"The maximum length of the sentence is %d\" % max_len)\n print(\"Find %s different words in the dataset\" % len(vocab))\n char_string = ''\n for w in vocab:\n char_string += w\n chars = list(set(char_string))\n cid, char_vocab = 0, {}\n for ch in chars:\n if ch not in char_vocab:\n char_vocab[ch] = cid\n cid += 1\n print(\"Find %s different chars in the dataset\" % len(char_vocab))\n return vocab, char_vocab, max_len", "def build_to_ids_fn(\n vocab,\n max_sequence_length,\n num_oov_buckets = 1):\n special_tokens = get_special_tokens(len(vocab), num_oov_buckets)\n bos = special_tokens.bos\n eos = special_tokens.eos\n\n table_values = np.arange(len(vocab), dtype=np.int64)\n table = tf.lookup.StaticVocabularyTable(\n tf.lookup.KeyValueTensorInitializer(vocab, table_values),\n num_oov_buckets=num_oov_buckets)\n\n def to_ids(example):\n\n sentence = tf.reshape(example['tokens'], shape=[1])\n words = tf.strings.split(sentence, sep=' ').values\n truncated_words = words[:max_sequence_length]\n tokens = table.lookup(truncated_words) + 1\n tokens = tf.cond(\n tf.less(tf.size(tokens), max_sequence_length),\n lambda: tf.concat([tokens, [eos]], 0), lambda: tokens)\n\n return tf.concat([[bos], tokens], 0)\n\n return to_ids", "def build_dataset(words):\n count = []\n # count.extend(collections.Counter(words).most_common(n_words - 1))\n count.extend(collections.Counter(words).most_common())\n dictionary = dict()\n for word, _ in count:\n dictionary[word] = len(dictionary)\n data = list()\n # unk_count = 0\n for word in words:\n index = dictionary.get(word, 0)\n # if index == 0: # dictionary['UNK']\n # unk_count += 1\n data.append(index)\n # count[0][1] = unk_count\n reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n data = [data[::2],data[1::2]]\n new_data = list()\n for i in range(len(data[0])):\n new_data.append([data[0][i],data[1][i]])\n data = new_data\n vocabulary_size = len(dictionary)\n print(\"\\n\\ndictionary size = \")\n print(len(dictionary))\n return data, count, dictionary, reversed_dictionary, vocabulary_size", "def split_by_word_sequentially(data, word_to_id_dict, max_sequence_len):\n seq = []\n for line in data:\n encoded_line = Suggest_Util.text_to_id(line, word_to_id_dict)\n for i in range(1, len(encoded_line)):\n seq.append(encoded_line[:i+1])\n sequences = tf.keras.preprocessing.sequence.pad_sequences(seq, maxlen=max_sequence_len, padding='pre')\n sequences = np.array(sequences)\n X, y = sequences[:,:-1], sequences[:,1:]\n y = tf.keras.utils.to_categorical(y, num_classes=len(word_to_id_dict))\n return X, y", "def buildVocabToNumMapping(vocab):\n # Index starts at one so we reseve 0 as a padding character \n index = 1\n vocab_to_num = {}\n num_to_vocab = {}\n \n for word in vocab:\n if word not in vocab_to_num:\n vocab_to_num[word] = index\n num_to_vocab[index] = word\n index += 1\n print(\"Max index // length of vocab: %s\" % index)\n \n return (vocab_to_num, num_to_vocab)", "def split_doc2sen(doc, word2id, data_type, max_sens, max_words, padding):\n if data_type == \"json\":\n sens = [sen.lower() for sen in json.loads(doc)]\n elif data_type == \"str\":\n sens = re.split(\"\\.|\\?|\\|\", doc.lower()) \n sens = [sen for sen in sens if len(sen.strip().split(\" \")) > 5]\n\n pad = padding\n sens_pad = []\n for j, sen in enumerate(sens[:max_sens]):\n sen_ids = [0] * pad\n tokens = word_tokenize(sen)\n for w in tokens[:max_words]:\n sen_ids.append(word2id.get(w.encode('utf-8'), 1))\n num_suff = max(0, max_words - len(tokens)) + pad\n sen_ids += [0] * num_suff\n sens_pad.append(sen_ids)\n\n # add more padding sentences\n num_suff = max(0, max_sens - len(sens))\n for i in range(0, num_suff):\n sen_ids = [0] * len(sens_pad[0])\n sens_pad.append(sen_ids)\n\n return sens_pad", "def create_ngram_dict(min_ngram=1, max_ngram=3, filename='./resources/vocabulary.json'):\n db_labels = get_labels_from_db()\n vocabulary = filter_ngram_labels(db_labels, min_ngram, max_ngram)\n save_dict_as_json(vocabulary, filename)\n return vocabulary", "def get_weibo_data(vocab_file, vector_file):\n if os.path.exists(\"word_misc.pkl\"):\n return cPickle.load(open(\"word_misc.pkl\", \"rb\"))\n\n word_misc, word2id, id2word = {}, {}, {}\n word_count = 0\n\n # vocab file\n print \"Building vocabulary ...\"\n for lines in open(vocab_file).readlines():\n word = lines.split()[0]\n if not is_unwanted_words(word, ['', '\\n']):\n word2id[word] = word_count\n id2word[word_count] = word\n word_count += 1\n word2id['_START'] = word_count\n id2word[word_count] = '_START'\n word_count += 1\n word2id['_END'] = word_count\n id2word[word_count] = '_END'\n word_count += 1\n word2id['_UNK'] = word_count\n id2word[word_count] = '_UNK'\n word_count += 1\n word2id['_MASK'] = word_count\n id2word[word_count] = '_MASK'\n word_count += 1\n print \"Vocabulary size:\", word_count\n\n # Initialization is refered to in https://www.tensorflow.org/versions/r0.7/tutorials/word2vec/index.html\n word_emb = (1/np.sqrt(word_count)*(2*np.random.rand(word_count, options['embedding_size']) - 1)).tolist()\n\n # load word vectors\n for lines in open(vector_file).readlines()[1:]:\n word = lines.split()[0]\n #if word == '</s>' or word not in word2id.keys():\n # continue\n if word not in word2id.keys():\n continue\n ids = word2id[word]\n #print ids, lines, len(word_emb)\n word_emb[ids] = [float(w) for w in lines.split()[1:]]\n\n print len(word_emb), \"words have been loaded with\", len(word_emb[0]), \"dimensions\"\n\n # load word misc\n word_misc['id2word'] = id2word\n word_misc['word2id'] = word2id\n word_misc['word_count'] = word_count\n word_misc['word_emb'] = word_emb\n cPickle.dump(word_misc, open(\"word_misc.pkl\", \"wb\"))\n print \"Dump complete.\"\n return word_misc", "def create_vocabulary(vocabulary_path, data_paths, max_vocabulary_size,\n tokenizer=None, normalize_digits=True):\n if not gfile.Exists(vocabulary_path):\n vocab = {}\n files = []\n files += [data_paths+f for f in os.listdir(data_paths) ]\n for one_file in files:\n with gfile.GFile(one_file, mode=\"rb\") as f:\n review = f.read()\n tokens = tokenizer(review) if tokenizer else character_tokenizer(review)\n for w in tqdm(tokens):\n word = _DIGIT_RE.sub(b\"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b\"\\n\")\n else:\n print(\"Vocabulary already created.\")", "def raw_to_ids(raw_data, word_to_id):\n docs = tokenize_keras(raw_data)\n uid = word_to_id[UNKNOWN_WORD]\n return [[word_to_id.get(w, uid) for w in doc] for doc in docs]", "def get_vocab(lexicon:Dict[str, int], vocab_size:int=80000,unknownword_token=\"UNK\"):\n\titems = lexicon.items()\n\tsorted_items = sorted(items, key=lambda x:x[1], reverse=True)\n\tsimple_lexicon = dict(sorted_items[0: vocab_size])\n\tsave_lexicon(simple_lexicon, filename=\"simple_lexicon.json\")\n\ti = 0\n\tvocab = {}\n\tfor key in simple_lexicon.keys():\n\t\tvocab[key] = i\n\t\ti += 1\n\n\tvocab[unknownword_token] = i\n\tsave_lexicon(vocab, filename=\"vocab.json\")", "def build_ngram_vocab(self, n):\n max_ngram_per_word = 0\n ngram_dict = collections.defaultdict(int)\n for word in self.train_data:\n if word == self.eos or word == self.sos:\n continue\n _word = '^' + word + '$'\n ngram_counts = len(_word) - n + 1\n if ngram_counts > max_ngram_per_word:\n max_ngram_per_word = ngram_counts\n for i in range(ngram_counts):\n ngram = _word[i:i + n]\n ngram_dict[ngram] += 1\n\n unk_ngram_list = set()\n item_to_id = dict()\n item_to_id[constants.PAD_ITEM] = len(item_to_id)\n item_to_id[constants.UNK_ITEM] = len(item_to_id)\n sorted_dict = sorted(ngram_dict.items(), key=operator.itemgetter(1), reverse=True)\n for token, freq in sorted_dict:\n if freq == 1:\n unk_ngram_list.add(token)\n if token not in item_to_id:\n item_to_id[token] = len(item_to_id)\n return item_to_id, unk_ngram_list, max_ngram_per_word", "def make_question_vocab(qdic, max_length):\n qdict = {'':0}\n vid = 1\n for qid in qdic.keys():\n # sequence to list\n q_str = qdic[qid]['qstr']\n q_list = VQADataProvider.seq_to_list(q_str, max_length)\n\n # create dict\n for w in q_list:\n if w not in qdict:\n qdict[w] = vid\n vid +=1\n\n return qdict", "def build_dataset(data, vocab_size=50000):\r\n\r\n # we will replace non-frequent tokens with the `unknown` token\r\n unk_token = '<UNK>'\r\n\r\n # calc frequencies of the tokens in our data\r\n tokens_counts = Counter(data)\r\n most_common_tokens = tokens_counts.most_common(vocab_size)\r\n\r\n # create a token => id mapping\r\n token2id = {unk_token: 0}\r\n for token, counts in most_common_tokens:\r\n token2id[token] = len(token2id)\r\n\r\n # create a reverse mapping from ids to tokens\r\n id2token = {i: t for t, i in token2id.items()}\r\n\r\n # convert data to tokens ids\r\n nb_unks = 0\r\n data_tokens_ids = []\r\n for token in data:\r\n if token in token2id:\r\n idx = token2id[token]\r\n else:\r\n idx = token2id[unk_token]\r\n nb_unks += 1\r\n\r\n data_tokens_ids.append(idx)\r\n\r\n print('Vocab size:', len(token2id))\r\n print('Unknown tokens:', nb_unks)\r\n\r\n return data_tokens_ids, token2id, id2token", "def data_to_token_ids(data_path, target_path, vocabulary_path,\n tokenizer=None, normalize_digits=True):\n if not os.path.exists(target_path):\n os.makedirs(target_path)\n if not gfile.Exists(target_path+\"sentences.txt\"):\n print(\"Tokenizing data in %s\" % data_path)\n vocab, _ = initialize_vocabulary(vocabulary_path)\n files = []\n #for d in data_path:\n files += [data_path+f for f in os.listdir(data_path) ]\n with gfile.GFile(target_path+\"sentences.txt\" , mode=\"w\") as tokens_file:\n for one_file in files:\n with gfile.GFile(one_file, mode=\"rb\") as f:\n for line in tqdm(f.readlines()):\n sentence = cleanHTML( line )\n if len(sentence) > 10: # don't save short sentences\n while sentence[0] == \" \":\n sentence = sentence[1:]\n token_ids = sentence_to_token_ids(tf.compat.as_bytes(sentence), vocab,\n tokenizer, normalize_digits)\n tokens_file.write(\" \".join([str(tok) for tok in token_ids]) + \"\\n\")\n #sentiments_files.write( str(rating) + \"\\n\")", "def build_vocab(filenames):\n vocab = set()\n max_word_length = 0\n max_sentence_length = 0\n number_of_sentences = 0\n for filename in filenames:\n with io.open(filename, 'r', encoding='utf8') as fin:\n for line in fin.readlines():\n number_of_sentences += 1\n vocab = vocab | set(line)\n sentence_length = len(line)\n if sentence_length > max_sentence_length:\n max_sentence_length = sentence_length\n if number_of_sentences % 1000 == 0:\n print(str(number_of_sentences))\n vocab = list(vocab)\n char_to_int = {char:(i+1) for i, char in enumerate(vocab)}\n int_to_char = {(i+1):char for i, char in enumerate(vocab)}\n metadata = {\"char_to_int\": char_to_int,\n \"int_to_char\": int_to_char,\n \"max_sentence_length\": max_sentence_length,\n \"number_of_sentences\": number_of_sentences}\n return metadata", "def process_decoder_input(target_data, target_vocab_to_int, batch_size):\n # Create a constant tensor with the 'go id'.\n go_id = tf.constant(target_vocab_to_int['<GO>'], shape=(batch_size,1), dtype=tf.int32)\n # Concatenate the vector without the last word id with the go ids vector\n processed_input = tf.concat([go_id,target_data[:,:-1]],1)\n return processed_input", "def generate_vocab_dict(vocab):\n v_dict = {}\n for word in vocab:\n if len(word) in v_dict:\n v_dict[len(word)].append(word)\n else:\n v_dict[len(word)] = [word]\n return v_dict", "def get_vocabulary(corpus,\n initial_vocab={\n '<unk>': 0,\n '<sssss>': 1\n },\n vocabsize=0):\n vocab = copy.copy(initial_vocab)\n word_count = Counter()\n for text in corpus:\n for w in text.split(' '):\n word_count[w] += 1\n\n # if vocabulary size is specified, most common words are selected\n if vocabsize > 0:\n for w in word_count.most_common(vocabsize):\n if w[0] not in vocab:\n vocab[w[0]] = len(vocab)\n if len(vocab) >= vocabsize:\n break\n else: # all observed words are stored\n for w in word_count:\n if w not in vocab:\n vocab[w] = len(vocab)\n return vocab", "def vocab(self):\n num_words = -1\n if not self._vocab:\n c = self._conn.cursor()\n c.execute('select feature, censored, word_id from vocab')\n\n d = {}\n for ww, cc, ii in c:\n d[ii] = ww\n d[ww] = ii\n if cc == 1:\n self._censored.add(ww)\n num_words = max(ii, num_words)\n\n logger.info(\"Loaded vocab with %i words; %i censored\" % \\\n (len(d) / 2, len(self._censored)))\n\n # Add the start symbol\n if not START_SYMBOL in d:\n d[START_SYMBOL] = num_words + 1\n d[num_words + 1] = START_SYMBOL\n\n logger.info(\"Retrieved %i words\" % num_words)\n self._vocab = d\n\n return self._vocab", "def learn_word_vocab(self, word_counts: typing.Counter[str]) -> Dict[str, int]:\r\n for token in set(self.required_tokens or []):\r\n word_counts[token] = int(2 ** 31)\r\n word_counts[self.PAD] = int(2 ** 32) # Make sure that PAD gets id=0\r\n sorted_word_counts = sorted(word_counts.items(), key=lambda p: -p[1])\r\n return {word: idx for idx, (word, count) in enumerate(sorted_word_counts[: self.word_vocab_size])}", "def loadw2v(embfile, embsize, myzipfile=None, maxvoc=None):\n word_to_ix = {}\n word_to_ix[constants.PAD_ITEM] = 0\n word_to_ix[constants.UNK_ITEM] = 1\n # fill padding word with zeros\n model = [[0.]*embsize]\n # fill unk word with random numbers\n model.append(np.random.normal(0,0.15,size=embsize).tolist())\n if myzipfile != None:\n zip = zipfile.ZipFile(myzipfile, 'r')\n f = zip.read(embfile).split(\"\\n\")\n else:\n #f = open(embfile, 'r')\n f = codecs.open(embfile, \"r\", \"utf-8\")\n ix = 2\n for line in f:\n if maxvoc!=None:\n if ix >= maxvoc:\n break\n splitLine = line.split()\n if(len(splitLine)>embsize+1):\n phrase_lst = splitLine[:-embsize]\n word = ' '.join(phrase_lst)\n embedding = [float(val) for val in splitLine[-embsize:]]\n word_to_ix[word] = ix\n model.append(embedding)\n ix += 1\n elif(len(splitLine)>2):\n word = splitLine[0]\n embedding = [float(val) for val in splitLine[1:]]\n word_to_ix[word]=ix\n model.append(embedding)\n ix += 1\n else:\n print(line)\n print(\"%d words loaded!\" % len(model))\n return word_to_ix, model", "def _make_word_dictionary(self,annos):\n # get training annos\n train_annos = self.annos[\"train\"]\n # read tokens\n tokens_list = []\n for ann in train_annos:\n tokens_list += [tk for tk in ann[\"tokens\"]]\n # print results: count tokens and show top-n\n print(\"Top-{} tokens list:\".format(self.cfg.DATASET.SHOW_TOP_VOCAB))\n tokens_count = sorted(Counter(tokens_list).items(), key=lambda x:x[1])\n for tk in tokens_count[-self.cfg.DATASET.SHOW_TOP_VOCAB:]:\n print(\"\\t- {}: {}\".format(tk[0],tk[1]))\n # make wtoi, itow\n wtoi = {}\n wtoi[\"<PAD>\"], wtoi[\"<UNK>\"] = 0, 1\n wtoi[\"<S>\"], wtoi[\"<E>\"] = 2, 3\n for i,(tk,cnt) in enumerate(tokens_count):\n idx = i+4 # idx start at 4\n wtoi[tk] = idx\n itow = {v:k for k,v in wtoi.items()}\n self.cfg.MODEL.QUERY.EMB_IDIM = len(wtoi)\n return wtoi, itow", "def create_vocab():\n \n cutoff = CUTOFF\n \n lines = open(INFNAME_FORMAT.format(\"train\")).readlines() \\\n + open(INFNAME_FORMAT.format(\"test\")).readlines()\n raw = [process_line(l) for l in lines]\n cntx = Counter( [ w for e in raw for w in e ] )\n vocab = { x for x, y in cntx.items() if y > cutoff }\n \n return vocab", "def create_vocab(df, datapath):\n if os.path.isfile(\"vocab_max_l.p\"):\n o = cPickle.load(open(\"vocab_max_l.p\", \"rb\")) # search if vocab file is already existing\n vocab = o[0]\n max_l = o[1]\n else:\n vocab = defaultdict(int)\n max_l = 0\n for d in read_data_files(df.file, datapath):\n words = clean_str(d).split(\" \")\n if len(words) > max_l:\n max_l = len(words)\n\n for w in words:\n vocab[w] += 1\n\n cPickle.dump([vocab, max_l], open(\"vocab_max_l.p\", \"wb\"))\n return vocab, max_l", "def load_train_word_dict():\n train_dict = {}\n with open(TRANSCRIPTION_PATH) as file:\n for line in file:\n if int(line[0:3]) < 300:\n word_id, transcript = str.split(line, \" \")\n train_dict[word_id] = transcript.rstrip('\\n')\n return train_dict", "def preprocess(document, max_features=150, max_sentence_len=300):\n\n def lemmatize(token, tag):\n \"\"\"\n Converts the tag to a WordNet POS tag, then uses that\n tag to perform an accurate WordNet lemmatization.\n \"\"\"\n tag = {\n 'N': wn.NOUN,\n 'V': wn.VERB,\n 'R': wn.ADV,\n 'J': wn.ADJ\n }.get(tag[0], wn.NOUN)\n\n return WordNetLemmatizer().lemmatize(token, tag)\n\n def vectorize(doc, max_features, max_sentence_len):\n \"\"\"\n Converts a document into a sequence of indices of length max_sentence_len retaining only max_features unique words\n \"\"\"\n tokenizer = Tokenizer(num_words=max_features)\n tokenizer.fit_on_texts(doc)\n doc = tokenizer.texts_to_sequences(doc)\n doc_pad = pad_sequences(doc, padding='pre', truncating='pre', maxlen=max_sentence_len)\n return np.squeeze(doc_pad), tokenizer.word_index\n\n cleaned_document = []\n vocab = []\n\n # Break the document into sentences\n for sent in document:\n\n # Clean the text using a few regular expressions\n sent = re.sub(r\"[^A-Za-z0-9^,!.\\/'+-=]\", \" \", sent)\n sent = re.sub(r\"what's\", \"what is \", sent)\n sent = re.sub(r\"\\'\", \" \", sent)\n sent = re.sub(r\"@\", \" \", sent)\n sent = re.sub(r\"\\'ve\", \" have \", sent)\n sent = re.sub(r\"can't\", \"cannot \", sent)\n sent = re.sub(r\"n't\", \" not \", sent)\n sent = re.sub(r\"i'm\", \"i am \", sent)\n sent = re.sub(r\"\\'re\", \" are \", sent)\n sent = re.sub(r\"\\'d\", \" would \", sent)\n sent = re.sub(r\"\\'ll\", \" will \", sent)\n sent = re.sub(r\"(\\d+)(k)\", r\"\\g<1>000\", sent)\n sent = sent.replace(\"\\n\", \" \")\n\n lemmatized_tokens = []\n\n # Break the sentence into part of speech tagged tokens\n for token, tag in pos_tag(wordpunct_tokenize(sent)):\n\n # Apply preprocessing to the tokens\n token = token.lower()\n token = token.strip()\n token = token.strip('_')\n token = token.strip('*')\n\n # If punctuation ignore token and continue\n if all(char in set(string.punctuation) for char in token) or token in set(sw.words('english')):\n continue\n\n # Lemmatize the token\n lemma = lemmatize(token, tag)\n lemmatized_tokens.append(lemma)\n vocab.append(lemma)\n\n cleaned_document.append(lemmatized_tokens)\n\n vocab = sorted(list(set(vocab)))\n\n return cleaned_document, vocab", "def read_data(max_size=None, max_sentence_size=None, min_sentence_size=10):\n sentences = []\n with tf.gfile.GFile('data_WMT/sentences/sentences.txt', mode=\"r\") as source_file:\n source = source_file.readline()\n print (source)\n counter = 0\n while source and (not max_size or counter < max_size):\n source_ids = [int(x) for x in source]\n if len(source_ids) < max_sentence_size and len(source_ids) > min_sentence_size:\n sentences.append(source_ids)\n ratings.append(rating)\n counter += 1\n if counter % 10000 == 0 and counter != 0:\n print(\" reading data line %d\" % counter)\n sys.stdout.flush()\n source = source_file.readline()\n return sentences", "def build_vocab(cleaned_captions):\n # QUESTION 1.1\n # Here we Build a vocabulary\n\n # create a vocab instance\n vocab = Vocabulary()\n\n words = dict()\n for caption in cleaned_captions: # iterate through all cleaned_caption\n for word in caption.split(): # iterate over all words in a caption\n # add the token words to vocabulary if and only if the count of word is more than MIN_FREQUENCY i.e. 3\n if word not in words.keys():\n words[word] = 1\n else:\n words[word] += 1\n if words[word] > MIN_FREQUENCY:\n vocab.add_word(word)\n\n vocab.add_word('<pad>')\n vocab.add_word('<start>')\n vocab.add_word('<end>')\n vocab.add_word('<unk>')\n\n print(vocab.idx)\n\n return vocab", "def data_to_token_ids(self, input_path, target_path, train = True):\n # Set up the path\n path_target = os.path.join(self.data_dir, target_path)\n\n # Initialize list\n tokens_ids = []\n tokens_length = []\n labels = []\n\n # Tokenize\n print(\"Tokenizing data in %s\" % path_target)\n self.initialize_vocabulary()\n counter = 0\n for file in input_path:\n path_input = os.path.join(self.data_dir, file)\n with open(path_input, 'r', newline=\"\\n\", encoding='utf8') as f:\n for line in f:\n counter += 1\n if counter % 100000 == 0:\n print(\"Tokenizing line %d\" % counter)\n if not train:\n line = self.word_sub.sub(r\"\", line)\n tokens_ids.append(self.sentence_to_token_ids(line))\n tokens_length.append(len(tokens_ids[-1]))\n # Insert labels for classification\n if \"pos\" in file:\n labels.append(1)\n elif \"neg\" in file:\n labels.append(0)\n\n # Print statistics\n print(\"Maximum length {}\".format(max(tokens_length)))\n print(\"Average length {}\".format(sum(tokens_length)/len(tokens_length)))\n print(\"Number of sentences {}\".format(len(tokens_length)))\n n_unks = sum([tokens.count(3) for tokens in tokens_ids])\n n_words = sum([len(tokens) for tokens in tokens_ids])\n print(\"Number of unks {}\".format(n_unks))\n print(\"Number of words {}\".format(n_words))\n print(\"Ratio unks/words {}%\".format(n_unks/n_words*100))\n\n # Print longest sentences\n np_tokenls_length = np.array(tokens_length)\n idx = np.argsort(np_tokenls_length)[-10:]\n for i in idx:\n print([self.dict_vocab_reverse.get(id) for id in tokens_ids[i]])\n\n return tokens_ids, tokens_length, labels", "def vectorize(doc, max_features, max_sentence_len):\n tokenizer = Tokenizer(num_words=max_features)\n tokenizer.fit_on_texts(doc)\n doc = tokenizer.texts_to_sequences(doc)\n doc_pad = pad_sequences(doc, padding='pre', truncating='pre', maxlen=max_sentence_len)\n return np.squeeze(doc_pad), tokenizer.word_index", "def load_vocab(vocab_file):\n index = 0\n itos = {}\n stoi = {}\n with open(vocab_file, \"r\") as reader:\n while True:\n token = reader.readline()\n if not token:\n break\n token = token.strip()\n itos[index] = token\n stoi[token] = index\n index += 1\n itos[index] = 'style_options'\n stoi['style_options'] = index\n itos[index+1] = 'ambience'\n stoi['ambience'] = index + 1\n return {'itos': itos, 'stoi': stoi, 'len': len(itos)}", "def construct_dict(self):\n i = 0\n self.word2idx = dict()\n fi = open(self.config.word_vec_fi_glove, 'r')\n\n for line in fi:\n self.word2idx[line.split(\" \")[0]] = i\n i += 1\n\n self.vocab_size = i\n self.write_dict()\n fi.close()", "def process_file(filename, word_to_id, cat_to_id, max_length=600):\n contents, labels = read_file(filename)\n data_id, label_id = [],[]\n for i in range(len(contents)):\n data_id.append([word_to_id[x] for x in contents[i] if x in word_to_id])\n label_id.append(cat_to_id[labels[i]])\n\n # 使用keras提供的pad_sequences来将文本pad为固定长度\n x_pad = kr.preprocessing.sequence.pad_sequences(data_id, max_length)\n y_pad = kr.utils.to_categorical(label_id) # 将标签转换为one-hot表示\n return x_pad, y_pad", "def build(corpus: List[List[str]], size=5000, freq_cutoff=5):\n vocab = VocabEntry()\n word2freq = Counter(chain(*corpus))\n word2freq = {word: freq for word, freq in word2freq.items() if freq > freq_cutoff}\n words_selected = sorted(word2freq.keys(), key=lambda w: word2freq[w], reverse=True)[:size]\n for w in words_selected:\n vocab.add(w)\n print(\"vocabulary constructing completed, %d/%d words included......\" % (len(words_selected), len(word2freq)))\n return vocab", "def data_to_token_ids(data_path, target_path, vocabulary_path):\n if not gfile.Exists(target_path):\n print(\"Tokenizing data in %s\" % data_path)\n vocab, _ = initialize_vocabulary(vocabulary_path)\n with gfile.GFile(data_path, mode=\"rb\") as data_file:\n with gfile.GFile(target_path, mode=\"w\") as tokens_file:\n counter = 0\n for line in data_file:\n counter += 1\n if counter % 100000 == 0:\n print(\" tokenizing line %d\" % counter)\n token_ids = sentence_to_token_ids(tf.compat.as_bytes(line), vocab)\n tokens_file.write(\" \".join([str(tok) for tok in token_ids]) + \"\\n\")", "def create_vocab(self, input_file):\n print(\"create the vocabulary and tag , convert them to id ...\")\n\n sents_info = []\n\n with open(input_file, 'r') as f:\n sent, tag = [], []\n for i, line in enumerate(f.readlines()):\n if i % 1000 == 0:\n print(i, line)\n if len(line.strip()) > 2:\n ls = line.strip().split()\n c = ls[0]\n t = ls[-1]\n if c not in self.char2id.keys():\n self.char2id[c] = len(self.char2id)\n self.id2char[len(self.id2char)] = c\n\n if t not in self.tag2id.keys():\n self.tag2id[t] = len(self.tag2id)\n self.id2tag[len(self.id2tag)] = t\n\n sent.append(c)\n tag.append(t)\n else:\n assert len(sent) == len(tag)\n sent_id = [self.char2id.get(c, 0) for c in sent]\n tag_id = [self.tag2id.get(t, 0) for t in tag]\n sents_info.append([sent, tag, sent_id, tag_id, len(sent)])\n sent, tag = [], []\n return sents_info", "def words_to_word_ids(data, word_to_id):\n # if isinstance(data[0], six.string_types):\n # print(type(data[0]))\n # # exit()\n # print(data[0])\n # print(word_to_id)\n # return [word_to_id[str(word)] for word in data]\n # else:\n return [word_to_id[word] for word in data]\n\n # if isinstance(data[0], str):\n # # print('is a string object')\n # return [word_to_id[word] for word in data]\n # else:#if isinstance(s, bytes):\n # # print('is a unicode object')\n # # print(data[0])\n # return [word_to_id[str(word)] f", "def load_preprocessed(self):\n with open(self.words_vocab_file, 'rb') as f:\n self.word_to_id, self.unk_word_list = pickle.load(f)\n self.word_vocab_size = len(self.word_to_id)\n\n if self.unit != \"word\":\n with open(self.sub_vocab_file, 'rb') as f:\n if self.unit == \"char\":\n self.max_word_len = self.get_max_word_length(self.word_to_id) + 2\n self.char_to_id, self.unk_char_list, self.max_word_len = pickle.load(f)\n self.subword_vocab_size = len(self.char_to_id)\n elif self.unit == \"char-ngram\":\n self.ngram_to_id, self.unk_char_list, self.unk_ngram_list, \\\n self.max_ngram_per_word = pickle.load(f)\n self.subword_vocab_size = len(self.ngram_to_id)\n elif self.unit == \"morpheme\":\n self.morpheme_to_id, self.unk_char_list, self.unk_morph_list, \\\n self.max_morph_per_word = pickle.load(f)\n self.subword_vocab_size = len(self.morpheme_to_id)\n elif self.unit == \"oracle\":\n self.morpheme_to_id, self.max_morph_per_word = pickle.load(f)\n self.subword_vocab_size = len(self.morpheme_to_id)\n else:\n sys.exit(\"Unknown unit\")", "def text2vec(self, maxlen):\n # Vocab = {word : index}\n self.Vocab = dict()\n\n for SentenceLabel in self.Pos + self.Neg:\n vector = [0] * maxlen\n for index, word in enumerate(SentenceLabel[0]):\n if index >= maxlen:\n break\n if word not in self.Vocab.keys():\n self.Vocab[word] = len(self.Vocab)\n vector[index] = len(self.Vocab) - 1\n else:\n vector[index] = self.Vocab[word]\n SentenceLabel[0] = vector\n self.doConvert = True", "def build_sense_embedding(target_sense_to_id, word_freq, EMBEDDING_DIM):\r\n res = {}\r\n wordvecs = load_glove(EMBEDDING_DIM)\r\n \r\n for target_sense_list in target_sense_to_id:\r\n for key, _ in target_sense_list.items():\r\n sense_vector = np.zeros(EMBEDDING_DIM)\r\n senses = key.split(',')\r\n n = 0\r\n for sensekey in senses:\r\n #print(sensekey) \r\n if '/' in sensekey:\r\n continue\r\n sense_synset = sc2ss(sensekey)\r\n if sense_synset:\r\n sense_vector += build_sense_vector(sense_synset, word_freq, wordvecs)\r\n n += 1\r\n if n != 0:\r\n res[key] = sense_vector/n\r\n return res", "def build_vocab(self, corpus):\n if self.vocabulary_counts != None:\n logger.debug(\"building vocabulary from provided frequency map\")\n vocab = self.vocabulary_counts\n else:\n logger.debug(\"default vocabulary building\")\n super(Skipgram, self).build_vocab(corpus)\n return\n\n # assign a unique index to each word\n self.vocab, self.index2word = {}, []\n\n for word, count in vocab.iteritems():\n v = Vocab()\n v.count = count\n if v.count >= self.min_count:\n v.index = len(self.vocab)\n self.index2word.append(word)\n self.vocab[word] = v\n\n logger.debug(\"total %i word types after removing those with count<%s\" % (len(self.vocab), self.min_count))\n\n if self.hs:\n # add info about each word's Huffman encoding\n self.create_binary_tree()\n if self.negative:\n # build the table for drawing random words (for negative sampling)\n self.make_table()\n # precalculate downsampling thresholds\n self.precalc_sampling()\n self.reset_weights()", "def tokenize(self, max_length=12):\n for entry in self.data:\n tokens = self.dictionary.tokenize(entry['question'], False)\n tokens = tokens[:max_length]\n if len(tokens) < max_length:\n # Note here we pad in front of the sentence\n padding = [self.dictionary.padding_idx] * (max_length - len(tokens))\n tokens = tokens + padding\n assert len(tokens) == max_length\n entry['q_token'] = tokens", "def data_to_token_ids(self,data_path, target_path, vocab,\n tokenizer=None, normalize_digits=True):\n # if not gfile.Exists(target_path):\n if True:\n with tf.gfile.GFile(data_path, mode=\"r\") as data_file:\n counter = 0\n results = []\n for line in data_file:\n token_ids = self.sentence_to_token_ids(line, vocab, tokenizer,\n normalize_digits)\n results.append(\" \".join([str(tok) for tok in token_ids]) + \"\\n\")\n try:\n len_d, len_q = len(results[2].split()), len(results[4].split())\n except:\n return\n with open(\"%s_%s\" % (target_path, len_d + len_q), mode=\"w\") as tokens_file:\n tokens_file.writelines(results)", "def getVocabularyDict(vocabulary: dict, training_feature: TrainingFeature):\n vocab = {}\n index = 0\n if training_feature.FEATURE_DROP_FREQUENT_WORDS:\n print(\"Select vocabdict with drop_frequent\")\n array = sorted([(k, v) for (k, v) in vocabulary.items()], key= lambda x: x[1])\n print(\"Total length: \", len(array))\n length = len(array)\n array = array[int(length * 0.75): int(length * 1.0)][0:training_feature.VOCAB_SIZE]\n for (k , _) in array:\n vocab.setdefault(k, index)\n index += 1\n else:\n print(\"Select vocabdict with non_drop_frequent\")\n array = sorted([(k, v) for (k, v) in vocabulary.items()], key=lambda x: x[1])\n length = len(array)\n print(\"Total length: \", length)\n array = array[-training_feature.VOCAB_SIZE:]\n for (k, _) in array:\n vocab.setdefault(k, index)\n index += 1\n # for (k, v) in vocabulary.items():\n # if v > 50:\n # vocab.setdefault(k, index)\n # index += 1\n print(\"VocabDict length: \", len(vocab))\n # print(vocab)\n return vocab", "def preprocess_input(text, tokenizer, max_id):\n X = np.array(tokenizer.texts_to_sequences(text)) - 1\n encoded = tf.one_hot(X, depth=max_id)\n return encoded", "def _file_to_word_ids(filename, word_to_id, train=True, backwards=False):\n sentences = []\n if train:\n sentences = _read_words(filename, backwards)\n else:\n sentences = _read_test_stop_at_blank(filename, backwards)\n data = []\n for sentence in sentences:\n data.extend(sentence.split())\n return [word_to_id[word] for word in data if word in word_to_id], sentences, [[word_to_id[word] for word in sentence.split() if word in word_to_id] for sentence in sentences]", "def parse_data_from_json_file(val_dataset: str, max_data: int = 1e10):\n # check if input file is of type jsonl\n assert os.path.splitext(val_dataset)[-1] == \".jsonl\", \"dataset file type is not jsonl, check the file provided\"\n # store all document ids\n id_list = []\n # store all candidates\n id_candidate_list = []\n # store length of all candidates\n id_candidate_len_list = []\n # store data id to candidate length dict\n id_candidate_len_dict = {}\n data_dict = {}\n\n with jsonlines.open(val_dataset) as reader:\n for n, data_line in enumerate(tqdm(reader)):\n if n > max_data:\n break\n doc_id = data_line['example_id']\n id_list.append(doc_id)\n\n # initialize data_dict\n data_dict[doc_id] = {\n 'document_text': ' '.join([item['token'] for item in data_line['document_tokens']]),\n 'question_text': data_line['question_text'],\n 'long_answer_candidates': data_line['long_answer_candidates'],\n }\n\n question_len = len(data_line['question_text'].split())\n\n # We use the white space tokenized version to estimate candidate length here.\n for i, candidate in enumerate(data_line['long_answer_candidates']):\n id_candidate_list.append((doc_id, i))\n candidate_length = question_len + candidate['end_token'] - candidate['start_token']\n id_candidate_len_list.append(candidate_length)\n id_candidate_len_dict[(doc_id, i)] = candidate_length\n\n # sorting candidates based on candidate's length\n sorted_index = np.argsort(np.array(id_candidate_len_list))\n id_candidate_list_sorted = []\n for i in range(len(id_candidate_list)):\n id_candidate_list_sorted.append(id_candidate_list[sorted_index[i]])\n\n return id_list, id_candidate_list_sorted, data_dict", "def data_to_token_ids(data_path, target_path, vocabulary_path,\n tokenizer=None, normalize_digits=True):\n if not gfile.Exists(target_path):\n print(\"Tokenizing data in %s\" % data_path)\n vocab, _ = data_utils.initialize_vocabulary(vocabulary_path)\n with gfile.GFile(data_path, mode=\"rb\") as data_file:\n with gfile.GFile(target_path, mode=\"w\") as tokens_file:\n counter = 0\n for line in data_file:\n counter += 1\n if counter % 100000 == 0:\n print(\" tokenizing line %d\" % counter)\n\n utterences = line.split('\\t')\n\n tokenized_utterences = []\n for utter in utterences:\n token_ids = data_utils.sentence_to_token_ids(tf.compat.as_bytes(utter), vocab,\n tokenizer, normalize_digits)\n tokenized_utterences.append(\" \".join([str(tok) for tok in token_ids]))\n\n tokens_file.write(\"\\t\".join(tokenized_utterences) + \"\\n\")", "def create_last_word_dict():\n song_urls = get_song_url_list()\n word_dict = {}\n\n lyrics = get_all_sentence_array()\n viable_words = find_viable_words()\n for i, line in enumerate(lyrics):\n print(i)\n if len(line) > 2 and len(line) < 12:\n last_word = line[0]\n if last_word in viable_words:\n if last_word not in word_dict:\n word_dict[last_word] = 1\n else:\n word_dict[last_word] = word_dict[last_word] + 1\n\n with open('first_word_dict.json', 'w') as outfile:\n json.dump(word_dict, outfile, indent=4)", "def words(text):\n clean = TextBlob(clean(text))\n sentence_count = len(clean.sentences)\n words = clean.tokenize()\n word_count = len(words)\n avg_len = np.mean([len(word) for word in words])\n words_dict = {'sentence_count': sentence_count, 'word_count': word_count,\n 'avg_len': avg_len}\n return words_dict", "def getTF_IDFSpace():\n sids,documents = getSongTextInfo()\n texts = [[word for word in document.lower().split()] for document in documents]\n dictionary = corpora.Dictionary(texts)\n corpus = [dictionary.doc2bow(text) for text in texts]\n tfidf = models.TfidfModel(corpus)\n corpus_tfidf = tfidf[corpus]\n songMap = {}\n index = 0\n for doc in corpus_tfidf:\n sid = sids[index]\n rMap = {}\n for item in doc:\n wid = item[0]\n count = item[1]\n rMap[wid] = count\n songMap[sid] = rMap\n index += 1\n return songMap", "def make_idx2word():\n idx2word = {}\n d = train_data.shared['word2idx']\n for word, idx in d.items():\n print(word)\n idx2word[idx] = word\n if config.use_glove_for_unk:\n d2 = train_data.shared['new_word2idx']\n for word, idx in d2.items():\n print(word)\n idx2word[idx+len(d)] = word\n return idx2word", "def word_ids_to_words(data, id_to_word):\n return [id_to_word[i] for i in data]", "def data_to_token_ids(data_path, target_path, vocabulary_path,\n tokenizer=None, normalize_digits=True):\n if not gfile.Exists(target_path):\n print(\"Tokenizing data in %s\" % data_path)\n vocab, _ = initialize_vocabulary(vocabulary_path)\n with gfile.GFile(data_path, mode=\"rb\") as data_file:\n with gfile.GFile(target_path, mode=\"w\") as tokens_file:\n counter = 0\n for line in data_file:\n counter += 1\n if counter % 100000 == 0:\n print(\" tokenizing line %d\" % counter)\n token_ids = sentence_to_token_ids(line, vocab, tokenizer,\n normalize_digits)\n tokens_file.write(\" \".join([str(tok) for tok in token_ids]) + \"\\n\")", "def load_pretrained_embeddings(vocabulary: dict, max_size: int):\n # get GloVe 6B pre-trained word embeddings, of dimension 100\n glove_vec = torchtext.vocab.GloVe(name=\"6B\", dim=100, unk_init=torch.Tensor.normal_)\n\n pretrained = []\n for k, _ in vocabulary.stoi.items():\n if k == \"<PAD>\":\n emb = torch.zeros([glove_vec.dim])\n elif k == \"<UNK>\":\n emb = torch.rand([glove_vec.dim])\n else:\n emb = glove_vec.get_vecs_by_tokens(k, lower_case_backup=True)\n pretrained.append(emb) \n\n # return a tensor of size [vocab_size, emb_dim]\n return torch.stack(pretrained, dim=0)", "def preprocess(self):\n self.word_to_id, self.unk_word_list = self.build_vocab(mode=\"word\")\n self.word_vocab_size = len(self.word_to_id)\n self.max_word_len = self.get_max_word_length(self.word_to_id)\n # Do not write the same file again\n if not os.path.exists(self.words_vocab_file):\n with open(self.words_vocab_file, 'wb') as f:\n pickle.dump((self.word_to_id, self.unk_word_list), f)\n if self.unit != \"word\":\n self.preprocess_sub_units()", "def get_vocab(train_data, valid_data, test_data):\n \n print(\"-----------------------------------------------\")\n print(\"Constructing Vocabulary of Words and Characters\")\n print(\"-----------------------------------------------\")\n\n with open(train_data,'r') as f:\n train_corpus = f.readlines()\n f.close()\n\n with open(valid_data,'r') as f:\n valid_corpus = f.readlines()\n f.close()\n\n with open(test_data,'r') as f:\n test_corpus = f.readlines()\n f.close()\n\n word_vocab = {}\n char_vocab = {}\n max_len = 0\n\n word_vocab, char_vocab, max_len = make_vocab(train_corpus, word_vocab, char_vocab, max_len)\n word_vocab, char_vocab, max_len = make_vocab(valid_corpus, word_vocab, char_vocab, max_len)\n word_vocab, char_vocab, max_len = make_vocab(test_corpus, word_vocab, char_vocab, max_len)\n\n char_vocab['<SOT>'] = len(char_vocab)+1 \n char_vocab['<EOT>'] = len(char_vocab)+1\n\n print(\"Word Vocabulary Size : %d\"%len(word_vocab))\n print(\"Character Vocabulary Size : %d\"%len(char_vocab))\n print(\"Max Length of Word - 2 : %d\"%max_len)\n\n return word_vocab, char_vocab, max_len", "def make_word_to_freq(self):\n\t\tword_to_freq = {}\n\t\tdocuments = self.tokenized_documents[\"train\"]\n\t\tfor document in documents:\n\t\t\tfor word in document:\n\t\t\t\tif not word in self.worddict: # make sure we have not found one of the pre-defined words\n\t\t\t\t\tword_to_freq[word] = word_to_freq.get(word, 0) + 1\n\t\t\n\t\treturn word_to_freq", "def gen_dtm(text_data, vocab):\n vectorizer = sklearn.feature_extraction.text.CountVectorizer(\n vocabulary = vocab)\n return vectorizer.fit_transform(text_data)", "def data_to_token_ids(data_path, target_path, vocabulary_path,\n tokenizer=None, normalize_digits=True,\n UNK_ID=3, _DIGIT_RE=re.compile(br\"\\d\")):\n if not gfile.Exists(target_path):\n print(\"Tokenizing data in %s\" % data_path)\n vocab, _ = initialize_vocabulary(vocabulary_path)\n with gfile.GFile(data_path, mode=\"rb\") as data_file:\n with gfile.GFile(target_path, mode=\"w\") as tokens_file:\n counter = 0\n for line in data_file:\n counter += 1\n if counter % 100000 == 0:\n print(\" tokenizing line %d\" % counter)\n token_ids = sentence_to_token_ids(line, vocab, tokenizer,\n normalize_digits, UNK_ID=UNK_ID,\n _DIGIT_RE=_DIGIT_RE)\n tokens_file.write(\" \".join([str(tok) for tok in token_ids]) + \"\\n\")\n else:\n print(\"Target path %s exists\" % target_path)", "def _parse_embedding_size(embedding_size, max_size, num_categories) -> List[int]:\n _check_embedding_size(embedding_size, num_categories)\n # calculate the individual values if \"sqrt\" or \"log\"\n if isinstance(embedding_size, str):\n num_categories = np.array(num_categories)\n if embedding_size == \"sqrt\":\n base_size = np.ceil(np.sqrt(num_categories))\n elif embedding_size == \"log\":\n base_size = np.ceil(np.log(num_categories))\n else: # embedding_size == \"fastai\":\n base_size = (1.6 * num_categories ** 0.56).round()\n clipped_size = np.clip(1, max_size, base_size).astype(\"int\")\n embedding_size = list(clipped_size)\n else: # iterable of int\n pass\n return embedding_size", "def predict_next_oneofmany(text, tokenizer, max_id, model, temperature=1):\n X_new = preprocess_input([text], tokenizer, max_id)\n y_proba = model.predict(X_new)[0, -1:, :]\n rescaled_logits = tf.math.log(y_proba) / temperature\n char_id = tf.random.categorical(rescaled_logits, num_samples=1) + 1\n return tokenizer.sequences_to_texts(char_id.numpy())[0]", "def get_word_embedding(counter, emb_file, emb_size, vocab_size, vec_size, vocab_file):\n print(\"Generating word embedding...\")\n # load word embeddings\n embedding_dict = {}\n with open(emb_file, \"r\", encoding=\"utf-8\") as fh:\n for line in tqdm(fh, total=emb_size):\n array = line.split()\n word = \"\".join(array[0:-vec_size])\n vector = list(map(float, array[-vec_size:]))\n embedding_dict[word] = vector\n\n TRANSLATE = {\n \"-lsb-\": \"[\", \"-rsb-\": \"]\", \"-lrb-\": \"(\", \"-rrb-\": \")\", \"-lcb-\": \"{\",\n \"-rcb-\": \"}\", \"-LSB-\": \"[\", \"-RSB-\": \"]\", \"-LRB-\": \"(\", \"-RRB-\": \")\",\n \"-LCB-\": \"{\", \"-RCB-\": \"}\"\n }\n SPECIAL_TOKENS = [\"<NULL>\", \"<UNK>\", \"<S>\", \"</S>\"]\n words = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x: x[1], reverse=True)))\n words = SPECIAL_TOKENS + words\n if vocab_size > 0:\n words = words[:vocab_size]\n with open(vocab_file, 'w') as f:\n f.write('\\n'.join(words[1:]))\n embedding = np.random.normal(scale=0.1, size=(len(words), vec_size))\n word2idx_dict = {}\n unknown_count = 0\n for i, word in enumerate(words):\n word2idx_dict[word] = i\n if word in TRANSLATE:\n word = TRANSLATE[word]\n done = False\n for w in (word, word.lower(), word.upper(), word.capitalize()):\n if w in embedding_dict:\n embedding[i] = embedding_dict[w]\n done = True\n break\n if not done:\n unknown_count += 1\n return embedding, word2idx_dict, unknown_count", "def train_word2vec(self, size = 50, window = 20, min_count = 5, epochs = 40):\n\n\n # Read the entire previous data for training\n full_data = pd.read_csv(self.path_full_data, encoding = \"ISO-8859-1\")\n\n # Also read the column which we are performing analysis for\n col_data = pd.read_csv(self.path_data_col\n , encoding = \"ISO-8859-1\"\n , usecols = [self.id_col_name, self.col_name])\n \n\n # Clean the data in the column\n col_data[self.col_name] = self.cln.clean(col_data[self.col_name], typo = self.typo_ind)\n col_data.replace(np.nan, '', inplace = True)\n col_name_list = list(col_data[self.col_name].apply(lambda x: str(x).split(' ')))\n\n\n # Make a list of lists of the data\n input_list = list(full_data['response'].apply(lambda x: x.split(' ')))\n input_list = input_list + col_name_list\n\n # Remove the responses having only one or two words\n input_list = [x for x in input_list if len(x) > 1]\n\n # Build vocabulary and train model\n model = gensim.models.Word2Vec(\n input_list,\n size = size,\n window = window,\n min_count = min_count)\n\n model.train(input_list, total_examples = len(input_list), epochs = epochs)\n\n return model" ]
[ "0.6375627", "0.6333941", "0.63053066", "0.6265616", "0.6252101", "0.6250396", "0.62214345", "0.6192168", "0.6157442", "0.60978895", "0.6085281", "0.6050244", "0.6039959", "0.6008718", "0.6007198", "0.6006812", "0.59851426", "0.59756815", "0.59671265", "0.5960956", "0.5825441", "0.58087635", "0.57941294", "0.5790539", "0.5758069", "0.57558537", "0.5742934", "0.57081914", "0.57024837", "0.5697753", "0.56972224", "0.56927276", "0.56895065", "0.56869984", "0.56493914", "0.5641879", "0.5616718", "0.56163114", "0.5591889", "0.5590846", "0.55758166", "0.55743957", "0.55715835", "0.551486", "0.55081236", "0.54981023", "0.54977244", "0.54877335", "0.5472204", "0.5448516", "0.5441677", "0.54296136", "0.54255366", "0.5425484", "0.54080486", "0.5405491", "0.540485", "0.5402293", "0.53998744", "0.5397611", "0.5379587", "0.5368055", "0.53573793", "0.53533137", "0.5352265", "0.53436804", "0.53351414", "0.5334634", "0.533317", "0.533137", "0.5331296", "0.5325147", "0.53159297", "0.5298561", "0.52932775", "0.52745456", "0.52743036", "0.52705014", "0.5259204", "0.52577966", "0.5247319", "0.5241022", "0.5229063", "0.5223484", "0.52114046", "0.5208445", "0.5207588", "0.5203633", "0.5194667", "0.51933724", "0.5189476", "0.5187672", "0.51852846", "0.51701665", "0.5165241", "0.51629746", "0.51513416", "0.5134411", "0.5129445", "0.51287246" ]
0.7943447
0
Convert raw text data into integer ids
def raw_to_ids(raw_data, word_to_id): docs = tokenize_keras(raw_data) uid = word_to_id[UNKNOWN_WORD] return [[word_to_id.get(w, uid) for w in doc] for doc in docs]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def text2ids(self, text: str, length: int):\n # Tokenize\n tokens = self.tokenizer.tokenize(text)\n token_ids = self.tokenizer.tokens2ids(tokens)\n # Padding\n while len(token_ids) < length:\n token_ids.append(0)\n # Truncate\n if len(token_ids) > length:\n token_ids = token_ids[:length]\n assert len(token_ids) == length\n return token_ids", "def get_ids(cls, text):\n tokens = TokenizerContainer.TOKENIZER.tokenize(text)\n token_ids = TokenizerContainer.TOKENIZER.convert_tokens_to_ids(tokens)\n input_ids = token_ids + [0] * (cls.MAX_LEN-len(token_ids))\n return tokens, input_ids", "def _text_to_ids(self, *Xs, max_length=None):\n return Xs", "def _build_data_from_text(self, text):\n # tokenize text if tokenizer is given\n if self.tokenizer is not None:\n data = self.tokenizer.text_to_ids(text)\n else:\n data = text\n\n return data", "def encode(self, text: str) -> List[int]:\n return [self._label2id.get(char, self.oov) for char in text]", "def get_ids(self, text):\n\n tokens = [token.orth for token in self.tokenizer(text)]\n ids = []\n for token in tokens:\n try:\n id = self.vocab.vectors.key2row[token]\n except KeyError:\n id = self.oov_id\n\n ids.append(id)\n\n return ids", "def get_ids(self, text):\n\n tokens = [token.orth for token in self.tokenizer(text)]\n ids = []\n for token in tokens:\n try:\n id = self._vocab.vectors.key2row[token]\n except KeyError:\n id = self.oov_id\n\n ids.append(id)\n\n return ids", "def texts2ids(self, texts: list, length: int):\n return [self.text2ids(text, length) for text in texts]", "def token2id(data, mode):\n vocab_path = 'vocab.' + mode\n in_path = data + '.' + mode\n out_path = data + '_ids.' + mode\n _, vocab = load_vocab(os.path.join(config.PROCESSED_PATH, vocab_path))\n in_file = open(os.path.join(config.PROCESSED_PATH, in_path), 'rb')\n out_file = open(os.path.join(config.PROCESSED_PATH, out_path), 'wb')\n\n lines = in_file.read().splitlines()\n for line in lines:\n if mode == 'dec': # we only care about '<s>' and </s> in encoder\n ids = [vocab[b'<s>']]\n else:\n ids = []\n ids.extend(sentence2id(vocab, line))\n # ids.extend([vocab.get(token, vocab['<unk>']) for token in basic_tokenizer(line)])\n if mode == 'dec':\n ids.append(vocab[b'</s>'])\n out_file.write(b' '.join(str(id_).encode('ascii') for id_ in ids) + b'\\n')", "def _convert_ids(self, ids):\n ids_list_int = []\n\n for id_ in ids:\n if not self._validate_identifier(id_):\n raise PhabfiveDataException(f\"Identifier '{id_}' is not valid\")\n\n id_ = id_.replace(\"P\", \"\")\n # constraints takes int\n id_ = int(id_)\n ids_list_int.append(id_)\n\n return ids_list_int", "def text_to_int(self, text):\n int_sequence = []\n for c in text:\n if c == ' ':\n ch = self.char_map['']\n else:\n ch = self.char_map[c]\n int_sequence.append(ch)\n return int_sequence", "def batches2IDs(batches):\n l = [ np.array( [ char2id(x) for x in characters(b) ] ) for b in batches ]\n return l", "def data_to_int(data): \r\n data = str(data).strip().upper()\r\n if data[0]== 'B':\r\n return bin_to_int(data[1:])\r\n elif data[0]== 'H':\r\n return hex_to_int(data[1:])\r\n else:\r\n return int(data, 10)", "def words_to_id(text, is_list=False, old_word_to_id=None):\n if is_list:\n x = \"\"\n for line in text:\n x += line + \" \"\n text = x\n \n uniq_words = set(text.split(\" \"))\n \n if old_word_to_id:\n word_to_id = old_word_to_id\n start = len(old_word_to_id)\n for word in uniq_words:\n if word not in word_to_id:\n word_to_id[word] = start\n start += 1\n else:\n word_to_id = {word:i for i, word in enumerate(uniq_words)}\n \n id_to_word = {str(v):k for k,v in word_to_id.items()}\n return word_to_id, id_to_word", "def _preprocess(self, txt_seq):\n input = []\n for token in txt_seq.split():\n input.append(self.word2id.get(token, self.word2id[\"<UNK>\"]))\n input.append(self.word2id[\"<END>\"])\n input = torch.LongTensor(input)\n return input", "def _natural_keys(text: str) -> list[int | str]:\n return [_atoi(c) for c in re.split(r\"(\\d+)\", text)]", "def convert_texts_to_ids(self, batch_text):\n max_len = self.field_config.max_seq_len\n batch_fea_list = []\n name_block_len = []\n name_block_begin = []\n name_block_end = []\n for idx_batch, text in enumerate(batch_text):\n fea_str = text.split(' [SEP] ')\n fea_list = [[float(y) for y in x.split(' ')] for x in fea_str]\n\n # 加上截断策略\n if len(fea_list) > self.field_config.max_seq_len:\n logging.warn('input instance is to long: %s', text)\n fea_list = truncation_words(fea_list, self.field_config.max_seq_len, self.field_config.truncation_type)\n batch_fea_list.append(fea_list)\n\n return_list = []\n\n padded = [0] * self._feature_dim\n padded_ids = np.array([inst + list([padded] * (max_len - len(inst))) for inst in batch_fea_list])\n padded_ids = padded_ids.astype('float32').reshape([-1, max_len, self._feature_dim])\n\n return_list.append(padded_ids)\n\n return return_list", "def text_to_id(tweets_dict):\n text_to_id_dict = {}\n for key in tweets_dict:\n # we assume that there are no retweets as this has been preprocessed before\n text_to_id_dict[key] = tweets_dict[key][\"text\"]\n return text_to_id_dict", "def text_to_id(text, word_to_id_dict):\n return [word_to_id_dict[word] for word in text.split(\" \") if word in word_to_id_dict]", "def words_to_word_ids(data, word_to_id):\n # if isinstance(data[0], six.string_types):\n # print(type(data[0]))\n # # exit()\n # print(data[0])\n # print(word_to_id)\n # return [word_to_id[str(word)] for word in data]\n # else:\n return [word_to_id[word] for word in data]\n\n # if isinstance(data[0], str):\n # # print('is a string object')\n # return [word_to_id[word] for word in data]\n # else:#if isinstance(s, bytes):\n # # print('is a unicode object')\n # # print(data[0])\n # return [word_to_id[str(word)] f", "def read_lines_of_ints(text):\n ints = []\n ints_as_strs = split_line(text)\n # below is equivalent code to the following for loop\n # index = 0\n # while index < len(ints_as_strs):\n # int_as_str = ints_as_strs[index]\n # index += 1\n for ints_as_str in ints_as_strs:\n ints.append(int(int_as_str))\n return ints", "def text2Int(text):\n return reduce(lambda x, y : (x << 8) + y, map(ord, text))", "def ids(filename):\n with open(filename) as file:\n contents = file.read()\n return [int(x) for x in contents.split(\",\")]", "def data_to_word_ids(self, input_data, filter=False):\n\n _buffer = list()\n for word in input_data:\n word = word.lower()\n if self.unit == \"oracle\":\n if \"+\" in word:\n tokens = word.split('+')\n word_tag = tokens[0].split(':')\n word = word_tag[1]\n if self.unit == \"morpheme\":\n word = re.sub(\"@@\", \"\", word)\n\n # flag to randomize token with frequency one\n flag = 1\n if word in self.unk_word_list:\n flag = random.randint(0, 1)\n\n if word in self.word_to_id and flag == 1:\n # if filter is True, reduce output vocabulary for softmax\n # (map words not in top self.max_vocab_size to UNK)\n if filter:\n # index start from 0\n if self.word_to_id[word] < self.max_vocab_size:\n _buffer.append(self.word_to_id[word])\n else:\n _buffer.append(self.word_to_id['<unk>'])\n else:\n _buffer.append(self.word_to_id[word])\n else:\n _buffer.append(self.word_to_id['<unk>'])\n return _buffer", "def doc2id(self, doc):\n if isinstance(doc, string_types):\n raise TypeError(\"doc2idx expects an array of unicode tokens on input, not a single string\")\n doc = map(self.process_token, doc)\n return [self.token_to_id(token) for token in doc]", "def line2ints(line):\n return [int(d) for d in line.strip()]", "def intparse(text):\n return int(text, 0)", "def test_convert_id():", "def __ui_convert_ids_string_to_list(string_of_ids):\n if string_of_ids == \"\":\n return []\n string_of_ids = string_of_ids.strip()\n string_of_ids = string_of_ids.replace(\",\", \" \")\n\n done = False\n while not done:\n if string_of_ids.find(\" \") == -1:\n done = True\n else:\n string_of_ids = string_of_ids.replace(\" \", \" \")\n list_of_ids = string_of_ids.split(\" \")\n for id_index in range(len(list_of_ids)):\n list_of_ids[id_index] = int(list_of_ids[id_index])\n return list_of_ids", "def _encode(self, text: str) -> List[str]:\n token_ids: List[int] = self.bert_model.encode(text.strip())\n tokens_ids_str: List[str] = [str(token_id) for token_id in token_ids]\n return tokens_ids_str", "def natural_keys(text):\n return [atoi(c) for c in re.split(\"(\\d+)\", text)]", "def data_parser(filepath):\n d = [int(line) for line in open(filepath)]\n return (int(s) for s in d)", "def _params_to_int(self, qs):\n return [int(str_id) for str_id in qs.split(',')]", "def convert_str_encoded_cards_to_int_encoded(cards: List[str]) -> List[int]:\n return [card_ids[card] for card in cards]", "def convert2int(self,seq_pep):\n\t\treturn [self.aminoacids.index(pep) for pep in seq_pep]", "def _str2id(text):\n return sha1(text).hexdigest()", "def get_ids(self, sentence):\n return [self.get_id(word) for word in sentence.strip().split(' ')]", "def doc2id(self, doc):\n doc = map(self.process_token, doc)\n return [self.token_to_id(token) for token in doc]", "def _parse(self, the_id: typing.Union[int, str]) -> int:\n return int(the_id)", "def _params_to_ints(self, qs):\n return [int(str_id) for str_id in qs.split(',')]", "def _params_to_ints(self, qs):\n return [int(str_id) for str_id in qs.split(',')]", "def _params_to_ints(self, qs):\n return [int(str_id) for str_id in qs.split(',')]", "def _params_to_ints(self, qs):\n return [int(str_id) for str_id in qs.split(',')]", "def _params_to_ints(self, qs):\n return [int(str_id) for str_id in qs.split(',')]", "def _format_ids(self, args):\n ids = []\n\n if isinstance(args, (int, long)):\n ids.append(args)\n elif isinstance(args, (str, unicode)):\n for item in re.split(u'[ ,]+', args):\n if len(item) == 0:\n continue\n addition = None\n try:\n # handle index\n addition = [int(item)]\n except ValueError:\n pass\n if not addition:\n # handle hashes\n try:\n int(item, 16)\n addition = [item]\n except:\n pass\n if not addition:\n # handle index ranges i.e. 5:10\n match = re.match(u'^(\\d+):(\\d+)$', item)\n if match:\n try:\n idx_from = int(match.group(1))\n idx_to = int(match.group(2))\n addition = range(idx_from, idx_to + 1)\n except:\n pass\n if not addition:\n raise ValueError(u'Invalid torrent id, \\\"%s\\\"' % item)\n ids.extend(addition)\n elif isinstance(args, (list)):\n for item in args:\n ids.extend(self._format_ids(item))\n else:\n raise ValueError(u'Invalid torrent id')\n return ids", "def convert_tokens_to_ids(self, tokens, max_len=None):\n if max_len is not None:\n token_length = len(tokens)\n if max_len < token_length:\n tokens = tokens[:max_len]\n else:\n for _ in range(max_len - token_length):\n tokens.append(self.pad_token())\n return [self.stoi(tok) for tok in tokens]", "def convert_tokens_to_ids(self, tokens):\n ids = []\n if isinstance(tokens, str):\n if tokens in self.special_tokens:\n return self.special_tokens[tokens]\n else:\n return self.encoder.get(tokens, self.unk_id)\n for token in tokens:\n if token in self.special_tokens:\n ids.append(self.special_tokens[token])\n else:\n ids.append(self.encoder.get(token, self.unk_id))\n return ids", "def token2id(data, mode, phase): \n #Outputs data in the form of tokens from vocab to processed/(train_or_test)/ids.(enc or dec)\n vocab_path = 'vocab.' + mode\n in_path = data + '.' + mode\n out_path = data + '_ids.' + mode\n\n _, vocab = load_vocab(os.path.join(config.PROCESSED_PATH, vocab_path))\n in_file = open(os.path.join(config.PROCESSED_PATH, in_path), 'rb')\n out_file = open(os.path.join(config.PROCESSED_PATH, out_path), 'wb')\n \n lines = in_file.read().splitlines()\n for line in lines:\n if mode == 'dec' and phase is not 2: # we only care about '<s>' and </s> in encoder\n ids = [vocab['<s>']]\n else:\n ids = []\n ids.extend(sentence2id(vocab, line))\n # ids.extend([vocab.get(token, vocab['<unk>']) for token in basic_tokenizer(line)])\n if mode == 'dec':\n ids.append(vocab['<\\s>'])\n out_file.write(' '.join(str(id_) for id_ in ids) + '\\n')", "def process_data(words,puncts,word_to_id):\n\tids = []\n\tp_ids = []\n\tfor i in range(len(words)):\n\t\tids.append(word_to_id[words[i]])\n\t\tp_ids.append(punct_to_id[puncts[i]])\n\treturn ids,p_ids", "def _sentence_to_ids(sentence, vocab):\n ids = [vocab.get(w, special_words.UNK_ID) for w in sentence]\n if FLAGS.add_eos:\n ids.append(special_words.EOS_ID)\n return ids", "def get_ids(self,tokens, tokenizer, max_seq_length):\n token_ids = tokenizer.convert_tokens_to_ids(tokens,)\n input_ids = token_ids + [0] * (max_seq_length-len(token_ids))\n return input_ids", "def convert_to_ids(self, vocab):\n for data_set in [self.train_set, self.dev_set, self.test_set]:\n if data_set is None:\n continue\n for sample in data_set:\n sample['question_token_ids'] = vocab.convert_to_ids(sample['question_tokens'])\n for passage in sample['passages']:\n passage['passage_token_ids'] = vocab.convert_to_ids(passage['passage_tokens'])", "def load_ids(data):\n identifiers = {}\n base_id = settings.MDM_UUID\n for payload in data:\n identifiers[payload] = \"%s-%s\" % (base_id, data[payload])\n return identifiers", "def _parse_id(line):\n ablt_pat = re.compile('(?<=2014_)[0-9]{12}(?=.jpg)')\n orig_pat = re.compile('(?<=[0-9]{16}_)[0-9]+')\n mat = ablt_pat.search(line)\n if mat is None: #original image\n mat = orig_pat.search(line)\n assert not mat is None, (\"this line does not contain a COCO image id: {}\" % line )\n return line[mat.start(): mat.end()], 'orig'\n else: #ablated image\n num = line[mat.start(): mat.end()]\n return str(int(num)), 'ablt'", "def _batch_encode(self, text: List[str]) -> List[List[str]]:\n token_ids: List[List[int]] = self.bert_model.batch_encode([t.strip() for t in text])\n tokens_ids_str: List[List[str]] = [[str(t) for t in token_id] for token_id in token_ids]\n return tokens_ids_str", "def get_label2id(labels_path: str) -> Dict[str, int]:\n with open(labels_path, 'r') as f:\n labels_str = f.read().split()\n labels_ids = list(range(1, len(labels_str)+1))\n return dict(zip(labels_str, labels_ids))", "def getid(data):\n return int(data.split('/')[-1])", "def __preprocess_line(line):\n return [int(element) for element in line.lstrip().rstrip().split()] # preprocess the input line", "def test_get_ids(self):\r\n lines = \"\"\">S74_1 E86FECS01CEVAV orig_bc=ACATGTCACGTG new_bc=ACATGTCACGTG bc_diffs=0\r\nCTCCTC\r\n>Unassigned_2 E86FECS01EKKMF orig_bc=AGCGCTGATGTA new_bc=None bc_diffs=1\r\nGGTGCCTCCCTCGC\r\n>S80_3 E86FECS01EKKMF orig_bc=AGCGCTGATGTA new_bc=None bc_diffs=1\r\nGGTGCCTCCCTCGC\r\n>S80_4 E86FECS01CW66X orig_bc=AGTCCATAGCTG new_bc=AGTCCATAGCTG bc_diffs=0\r\nGTCCTGGCAG\"\"\".splitlines()\r\n result = get_ids(lines, 1)\r\n self.assertEqual(\r\n dict(result),\r\n {'S74': ['E86FECS01CEVAV'],\r\n 'Unassigned': ['E86FECS01EKKMF'],\r\n 'S80': ['E86FECS01EKKMF',\r\n 'E86FECS01CW66X']})", "def _preprocess(self, txt_seq):\n input = []\n label = []\n punc = \" \"\n for token in txt_seq.split():\n if token in self.punc2id:\n punc = token\n else:\n input.append(self.word2id.get(token, self.word2id[\"<UNK>\"]))\n label.append(self.punc2id[punc])\n punc = \" \"\n input.append(self.word2id[\"<END>\"])\n label.append(self.punc2id[punc])\n input = torch.LongTensor(input)\n label = torch.LongTensor(label)\n # input = np.array(input)\n # label = np.array(label)\n return input, label", "def replace_ids_submission(ids):\n \n item = np.zeros((len(ids), ), dtype = 'int')\n user = np.zeros((len(ids), ), dtype = 'int')\n for i in range(len(ids)):\n row, col = ids[i].split(\"_\")\n item[i] = int(row.replace(\"r\", \"\"))\n user[i] = int(col.replace(\"c\", \"\"))\n \n return item, user", "def tokenized(self, text):\n return self.tokenizer.encode_plus(text,\n max_length=512,\n pad_to_max_length=True,\n truncation=True)[\"input_ids\"]", "def convert_string2int(question, word2int):\n question = clean_text(question)\n return [word2int.get(word, word2int['<OUT>']) for word in question.split()]", "def map_wordlist_2_int(word_list, vocab_list):\r\n word_to_index = {u:i for i, u in enumerate(vocab_list)}\r\n text_to_num = np.array([word_to_index[c] for c in word_list])\r\n return text_to_num", "def get_ids_values(path):\n \n def read_txt(path):\n \"\"\"read text file from path.\"\"\"\n \n with open(path, \"r\") as f:\n return f.read().splitlines()\n \n raw_data = read_txt(path)[1:]\n \n def deal_line(line):\n ids, values = line.split(',')\n return ids, values\n \n ids_values = [deal_line(line) for line in raw_data]\n ids = [x[0] for x in ids_values]\n values = [x[1] for x in ids_values]\n values = list(map(int, values))\n \n return ids, values", "def sentence_to_seq(sentence, vocab_to_int):\n # TODO: Implement Function\n # print(vocab_to_int)\n lower_case_word_list = sentence.lower().split()\n id_unk = vocab_to_int['<UNK>']\n sentence_ids = []\n for word in lower_case_word_list:\n try:\n id = vocab_to_int[word]\n except KeyError:\n id = id_unk\n sentence_ids.append(id)\n return sentence_ids", "def element2id(self):\n elements = self.contents['Element']\n unique_elements, indices = np.unique(elements, return_inverse=True)\n self.contents['Sub_ID'] = indices + 1\n self.contents['ID'] = np.arange(1, len(self.contents)+1)\n self.num_atom_types = len(unique_elements)", "def list_int_from_str_base(line):\n temp = line.split()\n for i in range(len(temp)):\n temp[i] = int_from_str_base(temp[i])\n return temp", "def _sentences_to_ints(texts, lowercase=True):\n w_dict = {}\n for sen in texts:\n for w in sen:\n if lowercase:\n w = w.lower()\n w_dict.update({w: w_dict.get(w, 0) + 1})\n int_to_word = [(i, word[0]) for i, word in\n enumerate(sorted(w_dict.items(), key=lambda x: x[1], reverse=True))]\n vocab = {w: i for i, w in int_to_word}\n return [[vocab[w.lower()] if lowercase else vocab[w]\n for w in sen] for sen in texts], vocab", "def convert_to_ids(self, terms):\n vec = [self.get_id(label) for label in terms]\n return vec", "def getIDs():", "def bin2int(r: str) -> int:", "def text_to_int(text):\n # type (str) -> int\n try:\n return int(\"\".join(x for x in text if x.isdigit()))\n except ValueError:\n return 0", "def extract_term_id( text ):\n if ('[' in text) and (']' in text):\n term_id = text.split('[')[1].split(']')[0]\n elif re.match(INT_IN_STRING, text):\n term_id = text\n else:\n term_id = text\n return term_id", "def get_ids():\n # Filename for SALAMI IA metadata\n metadata_file = os.path.join(\n dpath.SALAMI, 'metadata', 'id_index_internetarchive.csv')\n\n ids = []\n\n with open(metadata_file, \"r\") as rwc_file:\n reader = csv.reader(rwc_file)\n next(reader) #skip header\n for row in reader:\n ids.append(int(row[0]))\n\n return ids", "def to_int(data):\n return {int(k): data[k] for k in sorted(data.keys())}", "def ids_from_fasta_lines(lines):\r\n ids = []\r\n for line in lines:\r\n if not line.startswith(\">\"):\r\n continue\r\n id = id_from_fasta_label_line(line)\r\n ids.append(id)\r\n\r\n return ids", "def _convert_words_to_ids(self, words, vocab_id_dict):\n return [self._convert_token_to_id(w, vocab_id_dict) for w in words]", "def data_to_token_ids(data_path, target_path, vocabulary_path,\n tokenizer=None, normalize_digits=True,\n UNK_ID=3, _DIGIT_RE=re.compile(br\"\\d\")):\n if not gfile.Exists(target_path):\n print(\"Tokenizing data in %s\" % data_path)\n vocab, _ = initialize_vocabulary(vocabulary_path)\n with gfile.GFile(data_path, mode=\"rb\") as data_file:\n with gfile.GFile(target_path, mode=\"w\") as tokens_file:\n counter = 0\n for line in data_file:\n counter += 1\n if counter % 100000 == 0:\n print(\" tokenizing line %d\" % counter)\n token_ids = sentence_to_token_ids(line, vocab, tokenizer,\n normalize_digits, UNK_ID=UNK_ID,\n _DIGIT_RE=_DIGIT_RE)\n tokens_file.write(\" \".join([str(tok) for tok in token_ids]) + \"\\n\")\n else:\n print(\"Target path %s exists\" % target_path)", "def _get_token_ids(self, tokens):\n token_ids = self.tokenizer.convert_tokens_to_ids(tokens)\n pad_amount = self.max_seq_length - len(tokens)\n input_ids = token_ids + [0] * pad_amount\n return np.array(input_ids)", "def str_to_id(string, block):\n if len(string) % block != 0:\n raise Exception('String length not a multiple of block={}'.format(block))\n num_blocks = len(string) // block\n return tuple([int(string[i*block: (i+1)*block]) for i in range(num_blocks)])", "def words2ints(list_of_strings):\n wordlist = set([])\n for strings in list_of_strings:\n wordlist.update(strings.split())\n w2i = {k: str(v) for v, k in enumerate(wordlist)}\n return w2i", "def translate_ids(dwi):\n def get_ids(*args):\n \"\"\"Translates the ids to the format the :class:`DataWrapper` instance expects.\n\n Arguments:\n *args (int): integers with :obj:`Detection` ids\n\n Returns:\n list of int or str: A list with the ids in the expected format.\n \"\"\"\n return [ids[i] for i in args]\n if isinstance(dwi, DataWrapperTracks):\n dwi = dwi.data\n if isinstance(dwi, DataWrapperPandas):\n values = detections_clean.id\n elif isinstance(dwi, DataWrapperBinary):\n values = detections_clean.generatedID\n else:\n values = detections_clean.generatedID\n ids = {key: val for key, val in zip(detections_clean.id.tolist(), values)}\n return get_ids", "def test_generate_spin_id_data_array2(self):\n\n # The data.\n data = ['1', 'GLY', '234', 'NH']\n\n # The ID.\n id = mol_res_spin.generate_spin_id_data_array(data, res_num_col=1, res_name_col=2, spin_num_col=3, spin_name_col=4)\n\n # Test the string.\n self.assertEqual(id, ':1@234')", "def convert_to_ints(command, start, end):\n return [raw_bytes_to_int(command[x:x + BYTES_IN_INT]) for x in range(start, end, BYTES_IN_INT)]", "def encode_data_to_token_ids(raw_data_path, encoded_path, vocabulary_path, targetSet,\n tokenizer=None, normalize_digits=True):\n if not gfile.Exists(encoded_path):\n print(\"Tokenizing data in %s\" % raw_data_path)\n vocab, _ = initialize_vocabulary(vocabulary_path)\n with codecs.open(encoded_path, 'w', 'utf-8') as tokens_file:\n counter = 0\n for line in codecs.open( raw_data_path, 'r', 'utf-8'):\n counter += 1\n if counter % 100000 == 0:\n print(\" tokenizing line %d\" % counter)\n if len(line.strip().split('\\t')) != 3:\n print('Error data:%s' % line)\n continue\n src, tgt, tgtID = line.strip().split('\\t')\n if tgtID not in targetSet:\n print('Error!!! %s with %s not found in full targetID file!!' % (tgt, tgtID))\n continue\n else:\n src = text_normalize(src)\n token_ids = sentence_to_token_ids(src, vocab, normalize_digits)\n token_ids = [BOS_ID] + token_ids + [EOS_ID]\n tokens_file.write(tgtID + '\\t' + \" \".join([str(tok) for tok in token_ids]) + \"\\n\")", "def data_to_token_ids(self,data_path, target_path, vocab,\n tokenizer=None, normalize_digits=True):\n # if not gfile.Exists(target_path):\n if True:\n with tf.gfile.GFile(data_path, mode=\"r\") as data_file:\n counter = 0\n results = []\n for line in data_file:\n token_ids = self.sentence_to_token_ids(line, vocab, tokenizer,\n normalize_digits)\n results.append(\" \".join([str(tok) for tok in token_ids]) + \"\\n\")\n try:\n len_d, len_q = len(results[2].split()), len(results[4].split())\n except:\n return\n with open(\"%s_%s\" % (target_path, len_d + len_q), mode=\"w\") as tokens_file:\n tokens_file.writelines(results)", "def test_generate_spin_id_data_array3(self):\n\n # The data.\n data = ['Ap4Aase', '234', 'NH']\n\n # The ID.\n id = mol_res_spin.generate_spin_id_data_array(data, mol_name_col=1, res_num_col=None, res_name_col=None, spin_num_col=2, spin_name_col=3)\n\n # Test the string.\n self.assertEqual(id, '#Ap4Aase@234')", "def data_parser(filepath):\n tmp = open(filepath).read().split('\\n')\n return [int(x) for x in tmp]", "def hex2int(r: str) -> int:", "def create_num_id(df):\n df['id'] = df['patient_id'].apply(lambda x:int(x.split('_')[1]))\n return df", "def get_int_seq(line):\n return map(int, line.strip().split())", "def _converter(self,string_representation):\n assert len(string_representation) == 1\n\n hash_dic = {'T':10,'J':11,'Q':12,'K':13,'A':14}\n\n try:\n integer_representation=int(string_representation)\n except:\n integer_representation=hash_dic[string_representation]\n\n return integer_representation", "def dec2int(r: str) -> int:", "def data_to_token_ids(data_path, target_path, vocabulary_path,\n tokenizer=None, normalize_digits=True):\n if not gfile.Exists(target_path):\n print(\"Tokenizing data in %s\" % data_path)\n vocab, _ = initialize_vocabulary(vocabulary_path)\n with gfile.GFile(data_path, mode=\"rb\") as data_file:\n with gfile.GFile(target_path, mode=\"w\") as tokens_file:\n counter = 0\n for line in data_file:\n counter += 1\n if counter % 100000 == 0:\n print(\" tokenizing line %d\" % counter)\n token_ids = sentence_to_token_ids(line, vocab, tokenizer,\n normalize_digits)\n tokens_file.write(\" \".join([str(tok) for tok in token_ids]) + \"\\n\")", "def convert_to_num(text):\n\n\tnums = []\n\tfor character in text:\n\t\tnums.append(ord(character))\n\treturn nums", "def get_id(self):\n id_num = []\n i = 0\n while True:\n serial_data = self.rfid_serial_port.read()\n data = serial_data.decode('utf-8')\n i = i + 1\n if i == 12:\n i = 0\n ID = \"\".join(map(str, id_num))\n return ID\n else:\n id_num.append(data)", "def sentence_to_token_ids(sentence, vocabulary,\n tokenizer=None, normalize_digits=True):\n if tokenizer:\n words = tokenizer(sentence)\n else:\n words = character_tokenizer(sentence)\n if not normalize_digits:\n return [vocabulary.get(w, UNK_ID) for w in words]\n # Normalize digits by 0 before looking words up in the vocabulary.\n output = [vocabulary.get(_DIGIT_RE.sub(b\"0\", w), UNK_ID) for w in words]\n output += [EOS_ID]\n return output", "def transform_input(data: str) -> Matrix:\n return [\n list(map(int, list(row)))\n for row in data.split('\\n')\n ]", "def create_id(elements: Iterable) -> str:\r\n i = 1\r\n while str(i) in elements:\r\n i += 1\r\n return str(i)" ]
[ "0.672705", "0.6686971", "0.66411066", "0.66214275", "0.6610237", "0.6563587", "0.650595", "0.6494439", "0.6406515", "0.6315634", "0.6188173", "0.6168283", "0.6166879", "0.6148879", "0.60975975", "0.6084565", "0.6077067", "0.6060543", "0.60536844", "0.60036033", "0.59868073", "0.59835935", "0.5962527", "0.5944648", "0.5933868", "0.5907434", "0.5895855", "0.5890603", "0.58717227", "0.58454424", "0.5827732", "0.58012164", "0.5790343", "0.5790328", "0.5776119", "0.5775628", "0.5775206", "0.5774312", "0.5772555", "0.5766481", "0.5766481", "0.5766481", "0.5766481", "0.5766481", "0.5746725", "0.57387704", "0.57343173", "0.5715382", "0.5708837", "0.5705429", "0.570495", "0.5700942", "0.56946796", "0.56765074", "0.5657502", "0.5651616", "0.56448275", "0.56286097", "0.56148136", "0.5612189", "0.5610529", "0.56093407", "0.56052256", "0.56031233", "0.56004983", "0.559704", "0.5595496", "0.5579731", "0.5574342", "0.5573943", "0.5555741", "0.5545939", "0.55398417", "0.55390924", "0.55361557", "0.55248296", "0.5521771", "0.5508495", "0.5503054", "0.5491392", "0.5490131", "0.5479172", "0.5471123", "0.5464995", "0.5458891", "0.54512644", "0.54506636", "0.5446525", "0.54438066", "0.5438459", "0.54335344", "0.5410277", "0.54087627", "0.5392428", "0.53806627", "0.5377441", "0.5377174", "0.53725976", "0.53674275", "0.536053" ]
0.7270317
0
callback for when the detector has found a stop sign. Note that a distance of 0 can mean that the lidar did not pickup the stop sign at all
def stop_sign_detected_callback(self, msg): # distance of the stop sign corners = msg.corners dx = corners[3] - corners[1] dy = corners[2] - corners[0] r = dx/dy # aspect ratio rdist = np.array([.15, .20, .25, .30,.35, .40, .45, .50]) pixelheight = np.array([139, 102, 82, 64, 56, 50, 44, 40]) if dy > pixelheight[-1] and dy < pixelheight[0]: dist = np.interp(dy, pixelheight[::-1], rdist[::-1]) else: return # Get location of camera with respect to the map try: (translation,rotation) = self.tf_listener.lookupTransform('/map', '/camera', rospy.Time(0)) xcam = translation[0] ycam = translation[1] zcam = translation[2] euler = tf.transformations.euler_from_quaternion(rotation) thetacam = euler[2] except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException): return # Get angle of robot with respect to the map try: (translation,rotation) = self.tf_listener.lookupTransform('/map', '/base_footprint', rospy.Time(0)) euler = tf.transformations.euler_from_quaternion(rotation) thetarobot = euler[2] except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException): return # Now we have pose of robot, we want to determine stop sign angle relative # to camera frame thstopsign = (wrapToPi(msg.thetaright) + wrapToPi(msg.thetaleft))/2. zstopsign = dist*np.cos(-thstopsign) xstopsign = dist*np.sin(-thstopsign) x = xcam + xstopsign*np.cos(thetacam) - zstopsign*np.sin(thetacam) y = ycam + xstopsign*np.sin(thetacam) + zstopsign*np.cos(thetacam) # Now that we have x and y coord of stop sign in world frame, append coord found = False for i in range(len(self.stopSigns[0])): xcur = self.stopSigns[0][i] ycur = self.stopSigns[1][i] thetarobotcur = self.stopSigns[2][i] distance = np.sqrt((x - xcur)**2 + (y - ycur)**2) n = self.stopSignCounts[i] if distance < .2: if n < 100: # We have found the same stop sign as before xnew = (n/(n+1.))*xcur + (1./(n+1))*x ynew = (n/(n+1.))*ycur + (1./(n+1))*y thetarobotnew = (n/(n+1.))*thetarobotcur + (1./(n+1))*thetarobot self.stopSigns[0][i] = xnew self.stopSigns[1][i] = ynew self.stopSigns[2][i] = thetarobotnew self.stopSignCounts[i] += 1 found = True if not found: # Found a new one, append it self.stopSigns[0].append(x) self.stopSigns[1].append(y) self.stopSigns[2].append(thetarobot) self.stopSignCounts.append(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _detect_stop(func):\n def wrapper(*args,**kwargs):\n self = args[0]\n self.episode_length -= 1\n if self.episode_length <=0:\n \"\"\"if the episode is end\"\"\"\n self.end = True\n else:\n if self.adsorption:\n \"\"\"just stop moving and wait until the end of episode\"\"\"\n self.state = self.previous_state\n else:\n func(*args,**kwargs)\n self._detect_obstacles()\n\n # func(*args,**kwargs)\n # self._detect_obstacles()\n # if self.adsorption:\n # \"\"\"if this step update is invalid, the point will rebond\"\"\"\n # self.state = self.previous_state\n\n if self.distance <= 0.02:\n \"\"\"if the point reached the boundary around the goal, let it stop and reset the punishment(self.reward)\"\"\"\n self.end = True\n self.reward = 0\n if self.state[0] <0 or self.state[0] > 10 or self.state[1] <0 or self.state[1] > 10:\n # self.end = True\n self.reward = -800\n return np.array(self.state), self.reward, self.end, self.distance\n return wrapper", "def _change_seg_stop(self, seg_img, depth_img, stop_signs, cam, _region_size=6): \r\n for stop in stop_signs:\r\n\r\n _dist = self._get_distance(stop.get_transform().location)\r\n \r\n _region = np.abs(depth_img - _dist)\r\n\r\n seg_img[(_region < _region_size) & (seg_img == 12)] = 26\r\n\r\n # lane markings\r\n trigger = stop.trigger_volume\r\n\r\n _trig_loc_world = self._trig_to_world(np.array([[0], [0], [0], [1.0]]).T, stop, trigger)\r\n _x = self._world_to_sensor(_trig_loc_world, self._get_sensor_position(cam))[0,0]\r\n\r\n if _x > 0: # stop is in front of camera\r\n\r\n bb = self._create_2d_bb_points(trigger, 4)\r\n trig_loc_world = self._trig_to_world(bb, stop, trigger)\r\n cords_x_y_z = self._world_to_sensor(trig_loc_world, self._get_sensor_position(cam), True)\r\n\r\n #if cords_x_y_z.size: \r\n cords_x_y_z = cords_x_y_z[:3, :]\r\n cords_y_minus_z_x = np.concatenate([cords_x_y_z[1, :], -cords_x_y_z[2, :], cords_x_y_z[0, :]])\r\n bbox = (self._sensor_data['calibration'] @ cords_y_minus_z_x).T\r\n\r\n camera_bbox = np.concatenate([bbox[:, 0] / bbox[:, 2], bbox[:, 1] / bbox[:, 2], bbox[:, 2]], axis=1)\r\n\r\n if np.any(camera_bbox[:,2] > 0):\r\n\r\n camera_bbox = np.array(camera_bbox)\r\n\r\n polygon = [(camera_bbox[i, 0], camera_bbox[i, 1]) for i in range(len(camera_bbox))]\r\n\r\n img = Image.new('L', (self._sensor_data['width'], self._sensor_data['height']), 0)\r\n ImageDraw.Draw(img).polygon(polygon, outline=1, fill=1)\r\n _region = np.array(img)\r\n\r\n seg_img[(_region == 1) & (seg_img == 6)] = 27", "def update_trailing_stop(self, trade, instrument, distance, local=True, distance_in_percent=True):\n close_exec_price = instrument.close_exec_price(trade.direction)\n stop_loss = trade.sl\n\n if trade.direction > 0:\n # long case\n ratio = close_exec_price / trade.entry_price\n sl_ratio = (trade.entry_price - trade.sl) / trade.entry_price\n dist = (close_exec_price - trade.sl) / trade.entry_price\n step = distance\n\n if distance_in_percent:\n # @todo\n if dist > (sl_ratio + step):\n stop_loss = close_exec_price * (1.0 - distance)\n else:\n # @todo\n pass\n\n # # if dist > (sl_ratio + step):\n # # stop_loss = close_exec_price * (1.0 - sl_ratio)\n # # logger.debug(\"update SL from %s to %s\" % (trade.sl, stop_loss))\n\n # # # alternative @todo how to trigger\n # # if ratio >= 1.10:\n # # stop_loss = max(trade.sl, close_exec_price - (close_exec_price/trade.entry_price*(close_exec_price-trade.entry_price)*0.33))\n\n # # ultra large and based on the distance of the price\n # # if dist > 0.25:\n # # stop_loss = trade.entry_price + (trade.entry_price * (dist * 0.5))\n\n elif trade.direction < 0:\n # short case\n ratio = close_exec_price / trade.entry_price\n sl_ratio = (trade.sl - trade.entry_price) / trade.entry_price\n dist = (trade.sl - close_exec_price) / trade.entry_price\n step = distance\n\n if distance_in_percent:\n # @todo\n if dist > (sl_ratio - step):\n stop_loss = close_exec_price * (1.0 - distance)\n pass\n else:\n # @todo\n pass\n\n if stop_loss != trade.sl:\n if local:\n trade.sl = stop_loss\n else:\n trade.modify_stop_loss(trader, instrument, stop_loss)", "def __stop_loss_dist_rsi(rsi):\n return (100 - rsi)/1000.0 # return value between 0 - 0.1", "def linear_track(self, dist):\n\t\tglobal estop_flag, move_state\n\n\t\t#Disable timer interrupt, reset halfway flag, set target distance\n\t\tsignal.alarm(0) \n\t\thalfway_flag = False\n\n\t\t#Set starting position\n\t\twith self.move_state_lock:\n\t\t\tstart_x, start_y, start_z = move_state['x'], move_state['y'], move_state['z']\n\t\t#Set current position initially to start position\n\t\tcurrent_x, current_y, current_z = start_x, start_y, start_z\n\t\t#Check if the distance travelled is greater than the goal distance\n\t\twhile math.sqrt((current_x - start_x)**2 + (current_y - start_y)**2 + (current_z - start_z)**2) < abs(dist):\n\t\t\t#Check if the estop flag is set, if so, kill movement\n\t\t\tif estop_flag:\n\t\t\t\tself.publisher.publish(Mover.stop_msg)\n\t\t\telse:\n\t\t\t\ttwist_msg = Twist()\n\t\t\t\tif dist < 0:\n\t\t\t\t\tif self.correction == riu.no_correction:\n\t\t\t\t\t\ttwist_msg.linear.x = -1 * riu.move_rate\n\t\t\t\t\telse:\n\t\t\t\t\t\ttwist_msg.linear.x = -1 * riu.move_rate/2\n\t\t\t\t\tif self.correction == \"left\":\n\t\t\t\t\t\ttwist_msg.angular.z = -1 * riu.turn_rate/2\n\t\t\t\t\telif self.correction == \"right\":\n\t\t\t\t\t\ttwist_msg.angular.z = riu.turn_rate/2\n\t\t\t\t#If distance goal is positive, move forward\n\t\t\t\telif dist > 0:\n\t\t\t\t\tif self.correction == riu.no_correction:\n\t\t\t\t\t\ttwist_msg.linear.x = riu.move_rate\n\t\t\t\t\telse:\n\t\t\t\t\t\ttwist_msg.linear.x = riu.move_rate/2\n\t\t\t\t\tif self.correction == \"left\":\n\t\t\t\t\t\ttwist_msg.angular.z = riu.turn_rate/2\n\t\t\t\t\telif self.correction == \"right\":\n\t\t\t\t\t\ttwist_msg.angular.z = -1 * riu.turn_rate/2\n\n\t\t\t\tself.publisher.publish(twist_msg)\n\t\t\t\t#Check if the current movement is half completed, if so, send a Half message and set flag to avoid message duplication\n\t\t\t\tif (math.sqrt((current_x - start_x)**2 + (current_y - start_y)**2 + (current_z - start_z)**2) >= abs(dist)/2\n\t\t\t\t\tand not halfway_flag):\n\t\t\t\t\thalfway_flag = True\n\t\t\t\t\tself.status_pub.publish(String(\"half\"))\n\n\t\t\t\t#update current_x, current_y, and current_z (using local variables to be thread safe)\n\t\t\t\twith self.move_state_lock:\n\t\t\t\t\tcurrent_x = move_state['x']\n\t\t\t\t\tcurrent_y = move_state['y']\n\t\t\t\t\tcurrent_z = move_state['z']\n\t\t\trospy.sleep(.2)\n\t\tself.publisher.publish(Mover.stop_msg)\n\t\tself.status_pub.publish(String(\"done\"))\n\t\tsignal.alarm(Mover.ready_message_interval)", "def dist_to_stop(speed):\n return speed ** 2 / 4", "def stopif(self, stop):\n if stop:\n self._stopsim = True", "def trailing_stop(self):\n # price = self.binance.get_price(self.market)\n pos = self.get_position()\n entry_price = pos['avgEntryPrice']\n qty = pos['currentQty']\n print('Trailing stop triggered')\n order_type = 'market'\n if qty > 0:\n # long position\n price = self.ws.get_ticker()['sell']\n offset_price = float(price) - float(self.strategy.trail_offset)\n text = 'Trailing sell stop for long position'\n qty = qty * -1\n side = 'Sell'\n print(f'Trailing Stop for long position triggered: offset price {offset_price}')\n elif qty < 0:\n # short position\n price = self.ws.get_ticker()['buy']\n offset_price = float(price) + float(self.strategy.trail_offset)\n text = 'Trailing buy stop for short position'\n qty = qty * -1\n side = 'Buy'\n print(f'Trailing Stop for short position triggered: offset price {offset_price}')\n else:\n self.logger.info('No position found!')\n return False\n\n while True:\n if side == \"Sell\":\n if self.strategy.double_check or self.ws_restarting:\n quote = self.get_quote()\n self.logger.info('Bid: {} Ask: {}'.format(quote['bidPrice'], quote['askPrice']))\n price = quote['askPrice']\n else:\n price = self.ws.get_ticker()['sell']\n self.logger.info('Bid: {} Ask: {}'.format(self.ws.get_ticker['buy'], self.ws.get_ticker['sell']))\n if (float(price) - float(self.strategy.trail_offset)) > float(offset_price):\n offset_price = float(price) - float(self.strategy.trail_offset)\n print(\"New high observed: Updating stop loss to %.8f\" % offset_price)\n elif float(price) <= float(offset_price):\n price = self.ws.get_ticker()['sell']\n ret = self.execute_order(oq=qty, ot=order_type, text=text)\n self.logger.info(\"Sell triggered | Price: %.8f | Stop loss: %.8f\" % (price, offset_price))\n self.logger.debug(ret)\n if self.strategy.double_check or self.ws_restarting:\n sleep(0.5)\n break\n\n if side == \"Buy\":\n if self.strategy.double_check or self.ws_restarting:\n quote = self.get_quote()\n self.logger.info('Bid: {} Ask: {}'.format(quote['bidPrice'], quote['askPrice']))\n price = quote['bidPrice']\n else:\n price = self.ws.get_ticker()['buy']\n if (float(price) + float(self.strategy.trail_offset)) < float(offset_price):\n offset_price = float(price) + float(self.strategy.trail_offset)\n print(\"New low observed: Updating stop loss to %.8f\" % offset_price)\n elif price >= offset_price:\n price = self.ws.get_ticker()['buy']\n ret = self.execute_order(oq=qty, ot=order_type, text=text)\n self.logger.info(\"Buy triggered | Price: %.8f | Stop loss: %.8f\" % (price, offset_price))\n self.logger.debug(ret)\n if self.strategy.double_check or self.ws_restarting:\n sleep(0.5)\n break", "def lidar_callback(self, data):\n \n proc_ranges = self.preprocess_lidar(data)\n\n closest_idx, closest_dist = self.find_closest_point(proc_ranges, data.range_max)\n \n bubble_size = int(math.floor(math.atan(0.55/closest_dist)*1080))/2\n \n print(\"Closest dist: \"+str(closest_dist)+\", Closest idx: \"+str(closest_idx)+\", No Go Zone: [\"+\n str(closest_idx-bubble_size)+\",\"+str(closest_idx+bubble_size)+\"]\")\n \n #Eliminate all points inside 'bubble' (set them to zero) \n for i in range(closest_idx-bubble_size, closest_idx+bubble_size):\n proc_ranges[i] = 0\n \n #Find max length gap \n start, end = self.find_max_gap(proc_ranges)\n\n #Find the best point in the gap \n angle, target = self.find_best_point(start, end, data.ranges)\n \n rospy.loginfo(\"Max Length Gap: [\"+str(start)+\",\"+str(end)+\"] , Target: \"+str(target))\n #if (abs(angle) > 0.2):\n #print(proc_ranges[270:810])\n \n #VELOCITY = 1\n if (abs(angle) < 0.05):\n VELOCITY = 1.5\n elif abs(angle) >= 0.05 and abs(angle) < 0.1:\n VELOCITY = 1.0\n else:\n VELOCITY = 0.5\n\n #Publish Drive message\n drive_msg = AckermannDriveStamped()\n drive_msg.header.stamp = rospy.Time.now()\n drive_msg.header.frame_id = \"laser\"\n drive_msg.drive.steering_angle = angle\n drive_msg.drive.speed = VELOCITY\n self.drive_pub.publish(drive_msg)", "def is_stopper(self):\r\n return self.stopper", "def stop(self):\n return _spacegrant_swig.DeNRZI_sptr_stop(self)", "def CalcStopLevel(self,entryLevel,tradeSignal):\r\n pass", "def test_get_stop_true(self):\n\n tt = TemperatureTracker()\n tt.stop()\n self.assertIsNotNone(tt.get_stop())", "def stop(self):\n return _spacegrant_swig.NRZI_sptr_stop(self)", "def stop_calibration(self):\n self.socket.send_string('c')\n return self.socket.recv_string()", "def get_stopped_pts(gpx_track, speed_threshold=2.5):\n\n n = 0\n stopped_bool = [False]*gpx_track.get_points_no() # pre-allocate\n for segment in gpx_track.segments:\n for ida, point in enumerate(segment.points):\n stopped_bool[n] = segment.get_speed[ida] < speed_threshold\n n = n + 1\n\n _, stopped_time = gpx_track.get_moving_data(speed_threshold)\n\n return stopped_bool, stopped_time", "def _test_if_stop_points_reached(self):\n for s in self.program.steps:\n if s.blending == 0 and s.move_type == MoveType.Frame:\n lastFrame = s.playback_frames[-1]\n expectedFramePose = get_frame_pose(s, lastFrame)\n delta = 1e-06\n msg = f\"Step {s.name} is a stop point (frame move, blending 0). Exact target position should be reached\"\n for index, value in enumerate(expectedFramePose):\n self.assertAlmostEqual(s.pose[index], value, msg=msg, delta=delta)", "def stop(self) -> float:\n raise NotImplementedError()", "def stopDetection(self):\n self.statusWrite(\"stop\")\n self.p.sleep()\n self.birdHere = 0", "def test_sense_distance(self):\n\n\t\tmeasurements = [29, 29, 28]\n\t\tself.driver.us_dist.side_effect = lambda x: measurements.pop()\n\t\texpected_measurement = int(ultrasonic_sensor_error(29))\n\n\t\tself.assertEqual(self.s.sense_distance(60), expected_measurement)\n\t\tself.mount.move.assert_called_once_with(x=60)", "def capture_stop(self):\n pass", "def need_stop(self, path):", "def stop_f(self, state):\n return stop(predict_correct, state, update_state,\n similarity_threshold, confidence_threshold)", "def getSTOP(self):\n return self.listener.STOP", "def set_linear_track_stop(self):\r\n return self._arm.set_linear_track_stop()", "def stop(self):\n return _spacegrant_swig.invert_bit_sptr_stop(self)", "def stop_at_detection(lag=1):\n def policy(model, hist):\n # stop if there was a positive result after lag time\n return (model.lastPositive>=0) and (model.lastPositive+lag <= model.t)\n return policy", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def stop_sign_detection(img_in):\n\n img = img_in.copy()\n red_color_map = red_masking(img, stop_mask=True)\n red_color_map = cv2.dilate(red_color_map, np.ones((5, 5)))\n canny_edges = cv2.Canny(red_color_map, threshold1=50, threshold2=250, apertureSize=5)\n\n # display_img(canny_edges, 'Stop Sign Canny Lines')\n\n min_line_length = 10\n max_pixel_gap = 10\n hough_lines = cv2.HoughLinesP(image=canny_edges,\n rho=0.5,\n theta=np.pi/180,\n threshold=20,\n minLineLength=min_line_length,\n maxLineGap=max_pixel_gap\n )\n if hough_lines is None:\n return None, None\n hough_lines = hough_lines[0, :] # cleanup dimensionality to make it easier to work with.\n lines = remove_duplicates(hough_lines)\n\n if len(lines) < 8:\n return None, None\n\n # once given the lines of interest perform some more calculations\n min_x, max_x, min_y, max_y = calculate_min_max_values(lines)\n mid_x = min_x + ((max_x - min_x) / 2)\n mid_y = min_y + ((max_y - min_y) / 2)\n return mid_x, mid_y", "def stop(self):\n return _TestA_swig.my_qpsk_demod_cb_sptr_stop(self)", "def stop_cb(evt): \n speech_recognizer.stop_continuous_recognition()\n nonlocal done\n done = True", "def stop(self):\n return _TestA_swig.cleanslate_sptr_stop(self)", "def _stop(self):", "def set_stop_callback(self):\n\t\tself.signalPoller.add_callback_to_channel(INPUT.STOP, self.stop)", "def stop_rec(self, instance):\n if self.ocr_event:\n self.ocr_stop = True\n logger.info(f'Recognizer: Canceled!')", "def test_get_stop_false(self):\n\n tt = TemperatureTracker()\n self.assertIsNone(tt.get_stop())", "def get_stop_response():\n\n speech_output = STOP_MESSAGE\n return response(speech_response(speech_output, True))", "def get_stop_response():\n\n speech_output = STOP_MESSAGE\n return response(speech_response(speech_output, True))", "def measure_distance(self):\n # set Trigger to HIGH\n GPIO.output(self.GPIO_TRIGGER, True)\n\n # set Trigger after 0.01ms to LOW\n time.sleep(0.00001)\n GPIO.output(self.GPIO_TRIGGER, False)\n\n start_time = time.time()\n stop_time = time.time()\n\n # save StartTime\n while GPIO.input(self.GPIO_ECHO) == 0:\n start_time = time.time()\n\n # save time of arrival\n while GPIO.input(self.GPIO_ECHO) == 1:\n stop_time = time.time()\n\n # time difference between start and arrival\n time_elapsed = stop_time - start_time\n # multiply with the sonic speed (343.00 m/s)\n # and divide by 2, because there and back\n distance = (time_elapsed * 343.00) / 2\n\n return distance", "def on_check_distance_button_click(self):\n\n map = self.check_for_map()\n if map is None:\n return\n\n if self.qr_polytraj is not None:\n # if self.distance_current\n # Get distance to the trajectory\n dist_traj = self.check_distance_to_trajectory()\n # self.dist_traj = dist_traj\n print(\"Minimum Distance from Trajectory to obstacles is: {} m\".format(np.min(dist_traj)))\n self.traj_dist_line_edit.setText(\"{:.4f}\".format(np.min(dist_traj)))\n else:\n print('No trajectory to Check')\n self.traj_dist_line_edit.setText(\"N/A\")\n return", "def stop(self) -> \"bool\":\n return _beamforming_swig.phasedarray_sptr_stop(self)", "def elemStop(self, elem):\n stopColor = elem.get('stop-color')\n if not stopColor:\n style = css2dict(elem.get('style'))\n if 'stop-color' in style:\n stopColor = style['stop-color']\n else:\n stopColor = '#000000'\n color = cssColor2Eps(stopColor, 'CMYKRGB')\n offsetString = elem.get('offset').strip()\n if offsetString[-1] == '%':\n offset = float(offsetString[:-1])\n else:\n offset = float(offsetString) * 100\n self.gradients[self.curGradientId]['stops'].append( (offset, color) )", "def stop(self):\n self.stop_recognising.set()\n self.thread.join()", "def stop(self):\n return _spacegrant_swig.ax25_udp_pdu_receiver_sptr_stop(self)", "def stop_sign_detection(img_in):\n\n thresh1 = 110\n thresh2 = 60\n cannyEdges = cv2.Canny(img_in, thresh1, thresh2)\n # cv2.imshow(\"test\", cannyEdges)\n\n lines = cv2.HoughLinesP(cannyEdges, rho=1, theta=np.pi /90, threshold=20, minLineLength=20, maxLineGap=1)\n\n Line_list = []\n Angle_45 = []\n Angle_M45 = []\n\n for line in lines:\n line = line.flatten()\n line_instance = Line(line)\n if line_instance.length < 500 and line_instance.angle != 0 and RedSide(img_in,line_instance):\n Line_list.append(line_instance)\n Angle_45.append(np.abs(line_instance.angle - 45))\n Angle_M45.append(np.abs(line_instance.angle + 45))\n\n if len(Angle_45) < 2:\n return None\n if len(Angle_M45) < 2:\n return None\n \n # index = np.argsort(Angle_45)\n # line1 = Line_list[index[0]]\n # line2 = Line_list[index[1]]\n\n index = np.argsort(Angle_M45)\n line1 = Line_list[index[0]]\n line2 = Line_list[index[1]]\n\n if line1.angle < -50 or line1.angle > -40 or line2.angle < -50 or line2.angle > -40 :\n return None\n\n #Mark the line we use to determine the center\n # cv2.line(img_in,(line1.line[0],line1.line[1]), (line1.line[2], line1.line[3]),(255, 0, 0), 3)\n # cv2.line(img_in,(line2.line[0],line2.line[1]), (line2.line[2], line2.line[3]),(255, 0, 0), 3)\n\n column45 = int((line1.mid[0] + line2.mid[0])/2)\n row45 = int((line1.mid[1] + line2.mid[1])/2)\n\n # columnM45 = int((line3.mid[0] + line4.mid[0])/2)\n # rowM45 = int((line3.mid[1] + line4.mid[1])/2)\n\n # column = (column45 + columnM45)//2 + 1\n # row = (row45 + rowM45)//2 + 1\n coordinates = (column45, row45)\n\n # cv2.circle(img_in, coordinates, 2, (255, 0, 0), 2)\n # cv2.imshow('detected lines',img_in)\n\n return coordinates\n raise NotImplementedError", "def is_stop(self):\n return self.p_state._getvalue()['stop']", "def on_stop_order(self, stop_order: StopOrder):\n pass", "def on_stop_order(self, stop_order: StopOrder):\n pass", "def on_stop_order(self, stop_order: StopOrder):\n pass", "def on_stop_order(self, stop_order: StopOrder):\n pass", "def trailing_stop(self, offset=25, ts_o_type='market', tschase=False, max_chase=None):\n\n pos = self.get_position()\n entry_price = pos['avgEntryPrice']\n qty = pos['currentQty']\n self.logger.info('Trailing stop triggered')\n if qty > 0:\n # long position, so this will be a sell stop\n buy_price = self.ws.get_ticker()['sell']\n offset_price = float(buy_price) - float(offset)\n text = f'Trailing sell stop for long position, type {ts_o_type}'\n qty = qty * -1\n side = 'Sell'\n self.logger.info(\n f'Trailing Stop for long position of entry price: {entry_price} triggered: offset price {offset_price}'\n f' current price: {[buy_price]}')\n else:\n # short position, so this will be a buy stop\n buy_price = self.ws.get_ticker()['buy']\n offset_price = float(buy_price) + float(offset)\n text = f'Trailing buy stop for short position, type {ts_o_type}'\n qty = qty * -1\n side = 'Buy'\n self.logger.info(\n f'Trailing Stop for short position of entry price: {entry_price} triggered: offset price {offset_price}'\n f' current price: {[buy_price]}')\n\n while True:\n if side == \"Sell\":\n sell_price = self.ws.get_ticker()['sell']\n if (float(sell_price) - float(offset)) > float(offset_price):\n offset_price = float(sell_price) - float(offset)\n self.logger.info(\"New high observed: %.8f Updating stop loss to %.8f\" % (sell_price, offset_price))\n elif float(sell_price) <= float(offset_price):\n sell_price = self.ws.get_ticker()['sell']\n if tschase:\n self.logger.info(f'Chasing sell order ... max chase: {max_chase}')\n self.logger.info(\"Sell triggered: %s | Price: %.8f | Stop loss: %.8f\" % (ts_o_type, sell_price,\n offset_price))\n chaser = threading.Thread(target=self.limit_chase, args=(qty, max_chase, True))\n chaser.start()\n else:\n self.logger.info(\"Sell triggered: %s | Price: %.8f | Stop loss: %.8f\" % (ts_o_type, sell_price,\n offset_price))\n ret = self.send_order(oq=qty, ot=ts_o_type, text=text)\n self.logger.debug(ret)\n\n self.triggered = False\n break\n\n if side == \"Buy\":\n buy_price = self.ws.get_ticker()['buy']\n if (float(buy_price) + float(offset)) < float(offset_price):\n offset_price = float(buy_price) + float(offset)\n self.logger.info(\"New low observed: %.8f Updating stop loss to %.8f\" % (buy_price, offset_price))\n elif float(buy_price) >= float(offset_price):\n buy_price = self.ws.get_ticker()['buy']\n if tschase:\n self.logger.info(f'Chasing buy order ... max chase: {max_chase}')\n self.logger.info(\"Sell triggered: %s | Price: %.8f | Stop loss: %.8f\" % (ts_o_type, buy_price,\n offset_price))\n chaser = threading.Thread(target=self.limit_chase, args=(qty, max_chase, True))\n chaser.start()\n else:\n self.logger.info(\"Sell triggered: %s | Price: %.8f | Stop loss: %.8f\" % (ts_o_type, buy_price,\n offset_price))\n ret = self.send_order(oq=qty, ot=ts_o_type, text=text)\n self.logger.debug(ret)\n\n self.triggered = False\n break", "def lidar_callback(self, data):\n ranges = data.ranges\n proc_ranges = self.preprocess_lidar(ranges)\n drive_st_msg = AckermannDriveStamped()\n drive_msg = AckermannDrive()\n prev_drive = AckermannDrive()\n prev_drive_st_msg = AckermannDriveStamped()\n \n\n #Find closest point to LiDAR\n minPointIdx = 0\n for i in range(proc_ranges) :\n if proc_ranges[i] < proc_ranges[minPointIdx] :\n minPointIdx = i\n\n carLength = rospy.get_param(\"wheelbase\")\n carWidth = rospy.get_param(\"width\")\n avoidRadius = math.sqrt(math.pow(carLength, 2) + math.pow(carWidth, 2) + 1)\n angleToConsider = math.atan(avoidRadius / proc_ranges[minPointIdx])\n numPointsReplace = math.ceil(angleToConsider / data.angle_increment)\n\n for i in range(numPointsReplace) :\n proc_ranges[minPointIdx - i] = 0\n proc_ranges[minPointIdx + i] = 0\n \n maxGap = 0\n count = 0\n idxStartGap = 0\n idxEndGap = 0\n for i in range(proc_ranges) :\n if proc_ranges[i] != 0 :\n if count == 0:\n idxStartGap = i\n count += 1\n else :\n if count > maxGap :\n maxGap = count\n idxEndGap = i - 1\n count = 0\n\n bestPoint = self.find_best_point(idxStartGap, idxEndGap, proc_ranges)\n finalSteeringAngle = data.angle_min + data.angle_increment * bestPoint\n drive_msg.steering_angle = finalSteeringAngle\n if abs(drive_msg.steering_angle < 10):\n drive_msg.speed = 5\n elif abs(drive_msg.steering_angle < 20):\n drive_msg.speed = 2\n else:\n drive_msg.speed = 1\n\n prev_drive = drive_msg\n drive_st_msg.drive = drive_msg\n prev_drive_st_msg.drive = prev_drive\n\n if proc_ranges[bestPoint] > 3: \n self.drive_pub.publish(prev_drive_st_msg)\n else :\n self.drive_pub.publish(drive_st_msg)\n\n\n\n #Eliminate all points inside 'bubble' (set them to zero) \n\n #Find max length gap \n\n #Find the best point in the gap \n\n #Publish Drive message", "def stop(self):\n return _spacegrant_swig.udp_debug_sptr_stop(self)", "def alignment_stop():\n\n smi = SMI_Beamline()\n yield from smi.modeMeasurement()\n proposal_id('2023_2', '311564_Pettersson')", "def nearest_test_pulse(self):", "def stop():", "def stop():", "def stop():", "def stop():", "def stop(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_stop(self)", "def stop(self, pin):\n raise NotImplementedError", "def getPointAwayFrom(startPoint, direction, distance):\n x = vectorMultiply(direction, distance)\n return vectorAdd(startPoint, x)", "def stop(self):\n return _spacegrant_swig.ax25_pdu_unpacker_sptr_stop(self)", "def get_detected_traffic_stops(traffic_stops, depth_frame):\n def get_stop_markings_bbox(bbox3d, depth_frame):\n \"\"\" Gets a 2D stop marking bounding box from a 3D bounding box.\"\"\"\n # Move trigger_volume by -0.85 so that the top plane is on the ground.\n ext_z_value = bbox3d.extent.z - 0.85\n ext = [\n pylot.utils.Location(x=+bbox3d.extent.x,\n y=+bbox3d.extent.y,\n z=ext_z_value),\n pylot.utils.Location(x=+bbox3d.extent.x,\n y=-bbox3d.extent.y,\n z=ext_z_value),\n pylot.utils.Location(x=-bbox3d.extent.x,\n y=+bbox3d.extent.y,\n z=ext_z_value),\n pylot.utils.Location(x=-bbox3d.extent.x,\n y=-bbox3d.extent.y,\n z=ext_z_value),\n ]\n bbox = bbox3d.transform.transform_points(ext)\n camera_transform = depth_frame.camera_setup.get_transform()\n coords = []\n for loc in bbox:\n loc_view = loc.to_camera_view(\n camera_transform.matrix,\n depth_frame.camera_setup.get_intrinsic_matrix())\n if (loc_view.z >= 0 and loc_view.x >= 0 and loc_view.y >= 0\n and loc_view.x < depth_frame.camera_setup.width\n and loc_view.y < depth_frame.camera_setup.height):\n coords.append(loc_view)\n if len(coords) == 4:\n xmin = min(coords[0].x, coords[1].x, coords[2].x, coords[3].x)\n xmax = max(coords[0].x, coords[1].x, coords[2].x, coords[3].x)\n ymin = min(coords[0].y, coords[1].y, coords[2].y, coords[3].y)\n ymax = max(coords[0].y, coords[1].y, coords[2].y, coords[3].y)\n # Check if the bbox is not obstructed and if it's sufficiently\n # big for the text to be readable.\n if (ymax - ymin > 15 and depth_frame.pixel_has_same_depth(\n int(coords[0].x), int(coords[0].y), coords[0].z, 0.4)):\n return BoundingBox2D(int(xmin), int(xmax), int(ymin),\n int(ymax))\n return None\n\n if not isinstance(depth_frame, DepthFrame):\n raise ValueError(\n 'depth_frame should be of type perception.depth_frame.DepthFrame')\n det_obstacles = []\n for transform, bbox in traffic_stops:\n bbox_2d = get_stop_markings_bbox(bbox, depth_frame)\n if bbox_2d is not None:\n det_obstacles.append(DetectedObstacle(bbox_2d, 1.0,\n 'stop marking'))\n return det_obstacles", "def check_stops_order_on_tracks(self, stop_sequence):\n error_message = self.check_stops_order_on_tracks_direct(stop_sequence)\n if error_message:\n error_message_reversed = self.check_stops_order_on_tracks_direct(reversed(stop_sequence))\n if error_message_reversed is None:\n error_message = None\n self.city.warn('Tracks seem to go in the opposite direction to stops', self.element)\n return error_message", "def stop(self):\n return _spacegrant_swig.hdlc_framer_sptr_stop(self)", "def locus_stop(self):\n return int(open(self.locus_file).read().split('\\t')[4])", "def pre_stop(self):", "def run(self):\n print \"Starting LandmarkDetector\"\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n print \"Interrupted by user, stopping LandmarkDetector\"\n self.landmark_detection.unsubscribe(\"LandmarkDetector\")\n #stop\n sys.exit(0)", "def post_stop(self):", "def process_traffic_lights(self):\n stopping_waypoint_index = int(self.stopping_waypoint_index)\n nearest_light = self.stopping_waypoint_distance\n # the result of the image_cb function is in the equation below\n traffic_light_value = self.last_state\n #obtain the minimum stopping distance possible given the acceleration, and jerk limits, and slow stop point\n acceleration_limit = 10.0 - 1.0\n slow_stop_point = 0\n min_stop_distance = .2*self.current_velocity + (self.current_velocity*(self.current_velocity-slow_stop_point)/acceleration_limit - acceleration_limit/2.0*((self.current_velocity-slow_stop_point)/acceleration_limit)**2) + (0.5*slow_stop_point**2)\n #obtain the maximum stopping distance by changing the acceleration limit to 6\n acceleration_limit -= 3.0\n max_stop_distance = .2*self.current_velocity + (self.current_velocity*(self.current_velocity-slow_stop_point)/acceleration_limit - acceleration_limit/2.0*((self.current_velocity-slow_stop_point)/acceleration_limit)**2) + (0.5*slow_stop_point**2)\n #add on the current_velocity*rate to make sure it does not overlook the time gap\n max_stop_distance += self.current_velocity*1.0/self.loop_rate\n #if the previous light was red and the car began to stop, continue stopping\n if (traffic_light_value==self.Red_Light and self.prev_traffic_light_value==self.Red_Light and self.prev_stopping_waypoint_index>=0):\n None\n #else if the traffic light is green, ignore it.\n elif traffic_light_value==self.Green_Light:\n stopping_waypoint_index = -1\n #If the velocity is less than 2*slow_stop_point and the distance to the light is less than 2*(0.5*slow_stop_point**2) and the light is red\n elif (self.current_velocity<=5 and nearest_light<=5 and traffic_light_value==self.Red_Light):\n None\n #if the distance to the nearest_light is more than the max_stop_distance, ignore it\n elif nearest_light > max(max_stop_distance,10):\n stopping_waypoint_index = -1\n # else if the traffic light is Yellow or Red and the distance to the nearest light is more than the min_stop_distance\n elif ((traffic_light_value==self.Red_Light or traffic_light_value==self.Yellow_Light) and nearest_light>min_stop_distance):\n None\n #else if the traffic light is unknown, stopping waypoint will be -2, telling whatever previous action to keep proceeding\n elif traffic_light_value==self.Unknown_Light:\n stopping_waypoint_index = -2\n # #else if the traffic light is green, ignore it.\n # elif traffic_light_value==self.Green_Light:\n # stopping_waypoint_index = -1\n #publish the stopping waypoint index\n # self.upcoming_red_light_pub.publish(stopping_waypoint_index)\n self.prev_traffic_light_value = traffic_light_value\n self.prev_stopping_waypoint_index = stopping_waypoint_index\n return stopping_waypoint_index", "def longest_path_callback(self, data):\n min_idx = self.find_minimum_distance(data.poses) # closest point index\n if min_idx is None:\n return\n else:\n # extract 20 points along the closest point\n # use z position of ego car since z displacement doesn't matter\n # truncate if encounter head or tail\n path_points = np.array([\n (pose.pose.position.x, pose.pose.position.y, self.car_pos[2])\n for pose in data.poses[max(min_idx-10, 0):min_idx+10]]\n )\n # use svd to find the approximate tangent direction of longest path\n approx_dir = np.linalg.svd(path_points-np.mean(path_points,axis=0))[2][0]\n self.last_yaw_longestpath = np.arctan2(approx_dir[1], approx_dir[0])\n # perpendicular distance is then the norm of vector\n # (car_pos - pos_point) x approx_dir, x is cross product\n self.last_dist_longestpath = np.linalg.norm(\n np.cross(path_points[0,:] - self.car_pos, approx_dir)\n )\n # publish\n self.pub_closest_dist_longestpath.publish(self.last_dist_longestpath)", "def stop_cb(evt):\n # print('CLOSING on {}'.format(evt))\n speech_recognizer.stop_continuous_recognition()\n global done\n done = True", "def traffic_waypoint_cb(self, msg):\n\n # Save waypoint index for detected traffic light\n self.stopline_waypoint_idx = msg.data", "def stop(self) -> int:\n return self._stop", "def stop(self):\n return S.Infinity", "def wave_tx_stop():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVHLT, 0, 0))", "def stop() -> None:", "def sub_stop_requested(self, value):\n if str(value).lower() == \"stop\":\n self.__gameCanceled = True\n self.__logging(\"Method \\\"sub_stopRequested()\\\" Stop is requested and successful set\")\n else:\n self.__logging(\"Method \\\"sub_stopRequested()\\\" Stop not requested\")", "async def _handle_stop_loss(self, trade: Dict[str, Any]) -> bool:\n\n pair = trade['pair']\n current_value = self.market.adjusted_close_values[pair][-1]\n\n if current_value < trade['cutoff_value']:\n stop_percent = config['trade_dynamic_stop_percent'] * trade['soft_stops']\n trade['stop_value'] *= (1.0 + stop_percent)\n if trade['stop_value'] > trade['check_value']:\n trade['stop_value'] = trade['check_value']\n\n elif current_value < trade['check_value']:\n trade['stop_value'] *= (1.0 + config['trade_dynamic_stop_percent'])\n if trade['stop_value'] > trade['check_value']:\n trade['stop_value'] = trade['check_value']\n\n if current_value <= trade['stop_value']:\n coro = self._trade_methods['sell'](trade, 'SOFT STOP SELL', 'soft_stop')\n utils.async_task(coro, loop=common.loop)\n self.trades[pair]['closed'] = []\n return True\n\n return False", "def _stop(self):\n def process_response(future):\n response = future.result()\n self._window.qtlog.append(response.ErrorResponse.Name(response.error_response)[14:])\n self.scanning = False\n self._window.qtlog.append(\"Scanner Stop\")\n\n response_future = self.client.StopScan.future(scanner_commands_pb2.ScannerRequest(request=1))\n response_future.add_done_callback(process_response)", "def _distanceCheck(self):\n\n # Catches the occasional polling error that occurs with the ultrasonic distance sensor\n try:\n # 3 point averager to smooth out distance data\n dist = self.u.distance\n sleep(0.05)\n dist += self.u.distance\n sleep(0.05)\n dist += self.u.distance\n dist = dist/3\n\n #print(\"Distance check reading: {0:1.3f}\".format(dist))\n\n if( dist <= self.detectDist ):\n if( self.birdHere == 0 ):\n self.statusWrite(\"in\")\n self.birdHere = 1\n\n else:\n if( self.birdHere == 1 ):\n self.statusWrite(\"out\")\n self.birdHere = 0\n\n except RuntimeError:\n pass", "def stop_step_sweep(self):\n self.write(\":SOUR:SWE:CONT:STAT OFF\")", "def is_at_stop(self, location):\n # TODO(ionel): This method doesn't work yet because the opendrive do\n # not contained waypoints annotated as stops.\n loc = to_carla_location(location)\n waypoint = self._map.get_waypoint(loc,\n project_to_road=False,\n lane_type=carla.LaneType.Stop)\n return not waypoint", "def stop():\n status = write_i2c_block(ADDRESS,stop_cmd+[0,0,0])\n set_left_speed(0)\n set_right_speed(0)\n return status", "def check_main_stop(notifier):\n pass", "def stop(self):\n\t\tGPIO.output(self._dir_pin_1, GPIO.HIGH)\n\t\tGPIO.output(self._dir_pin_2, GPIO.HIGH)\n\t\tself._last_dir = 's'\n\t\t# self._motor_pwm.ChangeDutyCycle(0)", "def stop(self):\n return _uhd_swig.usrp_source_sptr_stop(self)", "def is_stop(self) -> bool:\n return self.__stop", "def _callback_wind(self, msg):\n\t\t#self.psi = self.north2east( msg.wind_direction ) \n\n\n\t# def _callback_waypoints(self, msg):\n\t\t\"\"\"\n\t\tNon fonctionnel\n\t\t\"\"\"", "def is_stop_word(self, word):\n pass", "def _init_stop(self):\n def stop(core, args):\n return core.stop()\n\n usage = 'stl stop'\n desc = (\n 'make a log that you just stopped working'\n )\n\n subp = self.subparsers.add_parser(\n 'stop', usage=usage, description=desc, help=desc)\n\n subp.set_defaults(func=stop)", "def lidar_callback(self, data):\n ranges = data.ranges\n proc_ranges = self.preprocess_lidar(ranges)\n\n #Find closest point to LiDAR\n closest = proc_ranges.argmin()\n\n #Eliminate all points inside 'bubble' (set them to zero)\n min_index = closest - self.BUBBLE_RADIUS\n max_index = closest + self.BUBBLE_RADIUS\n if min_index < 0: min_index = 0\n if max_index >= len(proc_ranges): max_index = len(proc_ranges)-1\n proc_ranges[min_index:max_index] = 0\n\n #Find max length gap\n gap_start, gap_end = self.find_max_gap(proc_ranges)\n\n #Find the best point in the gap \n best = self.find_best_point(gap_start, gap_end, proc_ranges)\n\n #Publish Drive message\n steering_angle = self.get_angle(best, len(proc_ranges))\n drive_msg = AckermannDriveStamped()\n drive_msg.header.stamp = rospy.Time.now()\n drive_msg.drive.steering_angle = steering_angle\n if abs(steering_angle) > self.STRAIGHTS_STEERING_ANGLE:\n drive_msg.drive.speed = self.CORNERS_SPEED\n else: drive_msg.drive.speed = self.STRAIGHTS_SPEED\n self.drive_pub.publish(drive_msg)\n rospy.loginfo('Steering angle in degrees: %f', steering_angle*180)", "def __update_speed_stop(self):\n if self.velocidade > SERVO_DUTY_CYCLE_MEIO:\n self.velocidade -= self.incremento_veloc\n \n # Para mesmo que haja arredondamento de float\n if self.velocidade <= SERVO_DUTY_CYCLE_MEIO:\n self.velocidade = SERVO_DUTY_CYCLE_MEIO\n self.servo.set_duty_cycle(0.0)\n else:\n self.servo.set_duty_cycle(self.velocidade)\n elif self.velocidade < SERVO_DUTY_CYCLE_MEIO:\n self.velocidade += self.incremento_veloc\n \n # Para mesmo que haja arredondamento de float\n if self.velocidade >= SERVO_DUTY_CYCLE_MEIO:\n self.velocidade = SERVO_DUTY_CYCLE_MEIO\n self.servo.set_duty_cycle(0.0)\n else:\n self.servo.set_duty_cycle(self.velocidade)\n else:\n self.servo.set_duty_cycle(0.0)" ]
[ "0.59943485", "0.56399006", "0.5583566", "0.5494318", "0.54302335", "0.54221815", "0.5411941", "0.5367256", "0.53257966", "0.53016096", "0.5254286", "0.52245134", "0.5204961", "0.5185182", "0.5171382", "0.5162159", "0.5156524", "0.51438564", "0.5138958", "0.51210594", "0.50905496", "0.50705594", "0.50698704", "0.5066117", "0.5061649", "0.50589126", "0.5058846", "0.50330573", "0.50330573", "0.50330573", "0.50330573", "0.50330573", "0.50330573", "0.50330573", "0.50297534", "0.5027199", "0.5019622", "0.5018159", "0.5014713", "0.50123155", "0.49967912", "0.4995415", "0.49827388", "0.49827388", "0.4979411", "0.49581656", "0.49581027", "0.4956153", "0.49495098", "0.49485996", "0.49355173", "0.49351633", "0.49349248", "0.49349248", "0.49349248", "0.49349248", "0.49297705", "0.4922452", "0.4916227", "0.4908586", "0.49082267", "0.48975697", "0.48975697", "0.48975697", "0.48975697", "0.48924887", "0.48864836", "0.48834258", "0.48782146", "0.48666954", "0.4866557", "0.48552883", "0.48547333", "0.48472333", "0.48468825", "0.48408806", "0.4836843", "0.4827281", "0.48259398", "0.4814682", "0.4806751", "0.48050642", "0.48009914", "0.47974393", "0.47972828", "0.47953242", "0.47912064", "0.47795373", "0.47764313", "0.4774127", "0.47656375", "0.47574005", "0.4756149", "0.47501537", "0.4748489", "0.4746745", "0.47427213", "0.47398648", "0.473268", "0.47321448" ]
0.70093864
0
Do not return anything, modify board inplace instead.
def gameOfLife(self, board: List[List[int]]) -> None: if not board or len(board)==0: return rows = len(board) cols = len(board[0]) #lives = 0 for i in range(rows): for j in range(cols): lives = self.n_neighbors(board,i,j) # Rule 1 and Rule 3 if board[i][j]==1 and (lives <2 or lives >3): board[i][j]= 2 # -1 signifies the cell is now dead but originally was live. if board[i][j]== 0 and lives ==3: board[i][j]=3 # signifies the cell is now live but was originally dead. for i in range(rows): for j in range(cols): board[i][j] = board[i][j]%2 return board
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyMove(self, (from_row,from_col), (to_row,to_col)):\n newboard = deepcopy(self)\n piece = newboard.board[from_row][from_col]\n newboard.board[from_row][from_col] = None\n newboard.board[to_row][to_col] = piece\n newboard.toplay = 'BLACK' if self.toplay == 'WHITE' else 'WHITE'\n return newboard", "def _board_after_move_only(source, dest, board):\n new_board = deepcopy(board)\n x_old, y_old, x_new, y_new = source[0], source[1], dest[0], dest[1]\n new_board[x_new][y_new] = new_board[x_old][y_old]\n new_board[x_old][y_old] = 0\n return new_board", "def resetBoard(self):\n pass", "def updated_board(board_w, board_h, piece_list, board, position):\n board_state = board.state\n new_board = Board(board_w, board_h, 1, piece_list, position)\n new_board.state = board_state\n return new_board", "def board(self):\n return copy.deepcopy(self._board)", "def set_board(board):", "def set_tile(self, row, col, value):\r\n del self.board[row][col]\r\n self.board[row].insert(col,value)\r\n return self.board", "def reset(self, board):", "def get_board(self):\n return copy.deepcopy(self.board)", "def step(self):\n\t\tnewBoard = CellArray(self.size)\n\t\tfor i in range(0, self.size, 1):\n\t\t\tfor j in range(0, self.size, 1):\n\t\t\t\tnewBoard.board[i][j] = self.changeCell(i, j)\n\t\tself.board = newBoard.board", "def resetBoard(self):\n\t\tself.board = np.zeros((self.boardSize,self.boardSize))", "def resetBoard(self):\n\t\tself.board = np.zeros((self.boardSize,self.boardSize))", "def _update_board(self, move: int) -> None:\n row = self._column_to_row[move] # Find what row to place the disk in\n if self._is_red_active:\n self.board_array[row][move] = 1\n self.hash = self.hash ^ int(self._red_hash_keys[row][move]) # Update hash\n else:\n self.board_array[row][move] = -1\n self.hash = self.hash ^ int(self._yellow_hash_keys[row][move]) # # Update hash\n\n self._column_to_row[move] += 1\n if self._column_to_row[move] == 6:\n self._valid_moves.remove(move)", "def reset_board(board):\n for idx in board.keys():\n board[idx] = ' '\n return board", "def copy(board):\r\n\theight = len(board)\r\n\twidth = len(board[0])\r\n\tnewBoard = createBoard(height, width)\r\n\tfor row in range(height):\r\n\t\tfor col in range(width):\r\n\t\t\tnewBoard[row][col] = board[row][col]\r\n\treturn newBoard", "def advance(self, board):", "def update_board(self, value, row=-1, col=-1, cell=-1):\n\n if row != -1 and col != -1 and cell == -1:\n _row,_col = row,col\n\n elif row == -1 and col == -1 and type(cell) == tuple:\n _row,_col = cell\n\n else:\n raise Exception(\"you must provide either row and column or a cell tuple\")\n\n group = self.calc_group(_row, _col)\n\n self.rows[_row].discard(value)\n self.columns[_col].discard(value)\n self.groups[group].discard(value)\n\n self.board[_row][_col] = value", "def move(self, board):\n raise NotImplementedError", "def reset(self):\n # replace with your code\n self.board = [[0 for dummy_index in range(self.grid_width)] for dummy_inner_index in range(self.grid_height)]", "def stored_board(Board, rows):\n for i in range(len(Board)):\n if checkSafe(Board, rows, i):\n Board[rows] = i\n if rows < len(Board) - 1:\n stored_board(Board, rows + 1)\n else:\n print([[i, Board[i]] for i in range(len(Board))])", "def simulate_place_disc(self, board, col_nr, curr_player):\n if board[0, col_nr] != 0:\n return board\n new_board = np.copy(board)\n for row_nr in reversed(range(self.rows())):\n if new_board[row_nr, col_nr] == 0:\n new_board[row_nr, col_nr] = curr_player\n return new_board", "def _move(self, at, to):\r\n copy = self.copy()\r\n i, j = at\r\n r, c = to\r\n copy.board[i][j], copy.board[r][c] = copy.board[r][c], copy.board[i][j]\r\n return copy", "def gameOfLife(self, board: List[List[int]]) -> None:\n temp = copy.deepcopy(board)\n for i in temp:\n i.append(0)\n i.insert(0,0)\n pass", "def rotate_board(self, board):\n\n head = board[0]\n board = board[1:] #this is doing a shallow copy so we need to return board.\n board.append(head)\n\n return board", "def advance_board(self):\n raise NotImplementedError", "def update_board(board: Board, move: Move) -> Board:\n old_position = move[0]\n new_position = move[1]\n character = board[old_position[0]][old_position[1]]\n board = change_position(board, new_position, character)\n board = clear_position(board, old_position)\n \n return board", "def copy(board):\r\n height = len(board)\r\n width = len(board[0])\r\n\r\n copyBoard = createBoard(width, height)\r\n for row in range(height):\r\n for col in range(width):\r\n copyBoard[row][col] = board[row][col]\r\n return copyBoard", "def _update_board(self):\n\n self.game_board.update_board(self.tetrino_set)", "def move(self, row, col, player):\n if self._board[row][col] == EMPTY:\n self._board[row][col] = player", "def result(board, action):\n i, j = action\n new_board = copy.deepcopy(board)\n if board[i][j]:\n raise ValueError\n else:\n new_board[i][j] = player(board)\n return new_board", "def move(self, row, col, player):\n self.board[row][col] = player", "def test_get_board(self):\n copy1 = self.game.get_board()\n self.assertEqual(copy1._board, self.game._board)\n\n for row in range(self.game._dim):\n for col in range(self.game._dim):\n self.game._board[row][col] = PLAYERX\n copy2 = self.game.get_board()\n self.assertEqual(copy2._board, self.game._board)\n\n for row in range(self.game._dim):\n for col in range(self.game._dim):\n self.game._board[row][col] = PLAYERO\n copy3 = self.game.get_board()\n self.assertEqual(copy3._board, self.game._board)", "def reset(self):\r\n self.board = [[0 for i in range(self.width)]\r\n for i in range(self.height)]\r\n self.new_tile()\r\n self.new_tile()", "def reset_board(self):\n\n self.board = np.array(self.initial_board)", "def reset_board(self, board_copy):\n board_copy = [' ' * 20 for _ in range(20)]\n for i in range(20):\n for j in range(20):\n self._board[i][j] = board_copy[i][j]", "def advance_board(self):\n # We can advance the board using a pretty simple convolution,\n # so we don't have to execute a lot of loops in python.\n # Of course, this probably won't be sufficient for extremely\n # large boards.\n self.num_steps += 1\n board = self.board\n cfilter = np.array([[1,1,1],[1,0,1],[1,1,1]], dtype=np.uint16)\n\n alive = board & CellTypes.alive > 0\n spawning = board & CellTypes.spawning > 0\n frozen = board & CellTypes.frozen > 0\n\n can_die = ~frozen & (\n convolve2d(board & CellTypes.preserving, cfilter) == 0)\n can_grow = ~frozen & (\n convolve2d(board & CellTypes.inhibiting, cfilter) == 0)\n\n num_neighbors = convolve2d(alive, cfilter)\n num_spawn = convolve2d(spawning, cfilter)\n spawn_prob = 1 - (1 - self.spawn_prob)**num_spawn\n has_spawned = coinflip(spawn_prob, board.shape)\n\n born_rule = np.zeros(9, dtype=bool)\n born_rule[list(self.born_rule)] = True\n dead_rule = np.ones(9, dtype=bool)\n dead_rule[list(self.survive_rule)] = False\n\n new_alive = (born_rule[num_neighbors] | has_spawned) & ~alive & can_grow\n new_dead = dead_rule[num_neighbors] & alive & can_die\n\n new_flags = np.zeros_like(board)\n color_weights = 1 * alive + 2 * spawning\n for color in CellTypes.colors:\n # For each of the colors, see if there are two or more neighbors\n # that have it. If so, any new cells (whether born or spawned)\n # will also get that color.\n has_color = board & color > 0\n new_color = convolve2d(has_color * color_weights, cfilter) >= 2\n new_flags += color * new_color\n indestructible = alive & (board & CellTypes.destructible == 0)\n new_flags += CellTypes.destructible * (convolve2d(indestructible, cfilter) < 2)\n\n board *= ~(new_alive | new_dead)\n board += new_alive * (CellTypes.alive + new_flags)", "def update_board(self):\n for x in self.board:\n for f in x:\n if f.status == 0:\n if f.name == \"conway\":\n assert type(self.population)==int\n if f.live_neighbors == 3:\n f.symbol =\"*\"\n f.status = 1\n self.population += 1\n elif f.name == \"fredkin\":\n if f.live_neighbors == 1 or f.live_neighbors == 3 :\n f.status = 1\n f.symbol = str(f.age)\n self.population += 1\n else:\n f.status = 0\n\n elif f.status == 1:\n if f.name == \"conway\":\n assert type(self.population)==int\n #assert type(f.status)== 1\n if not((f.live_neighbors == 2 or f.live_neighbors == 3)):\n f.symbol = \".\"\n f.status = 0\n else:\n self.population += 1\n elif f.name == \"fredkin\":\n if f.live_neighbors == 1 or f.live_neighbors == 3:\n f.status = 1\n f.age += 1\n if f.age <= 2:\n f.symbol = str(f.age)\n self.population += 1\n else:\n self.board.replace(f, Conway_Cell(\"*\"))\n else:\n f.status = 0\n f.symbol = \"-\"", "def place(self, board):\r\n self.board = board", "def extensions(self):\n def check_empty_space(gridcopy):\n \"\"\"\n Return the place of the empty space.\n\n @type gridcopy: tuple[tuple[str]]\n @rtype: tuple\n\n # >>> grid = ((\"*\", \"2\", \"3\"), (\"4\", \"5\", \"6\"))\n # >>> check_empty_space(grid)\n # (0, 0)\n # >>> grid = ((\"1\", \"2\", \"3\"), (\"4\", \"5\", \"6\"), (\"7\" , \"8\" , \"*\"))\n # >>> check_empty_space(grid)\n # (2, 2)\n \"\"\"\n for i in range(len(gridcopy)):\n if \"*\" in gridcopy[i]:\n return i, gridcopy[i].index(\"*\")\n # Raise Error if there is no empty space in the puzzle.\n return AssertionError, \"No empty space in the puzzle.\"\n\n def tuple_to_list(tup):\n \"\"\"\n Return a list which was originally tuple.\n\n @type tup: tuple\n @rtype: list[str]\n \"\"\"\n return [element for element in tup]\n\n def shift_right_left(gridcopy, row_num, column_num):\n \"\"\"\n Return the list of affected grid. If * cannot move to the specific\n place, it returns an empty list\n\n @type gridcopy: tuple[tuple[str]]\n @type row_num: int\n @type column_num: int\n @rtype: list[tuple[tuple[str]]]\n \"\"\"\n result = []\n # Extract the specific row to change.\n current_row = gridcopy[row_num]\n # Change the current_row to list in order to mutate.\n current_row_lst = tuple_to_list(current_row)\n if location[1] != 0:\n # Going left!\n # (\"5\", \"*\", \"6\") to (\"*\", \"5\", \"6\")\n current_row_lst[column_num] = current_row_lst[column_num - 1]\n current_row_lst[column_num - 1] = \"*\"\n # Switch back to tuple\n left_altered = tuple(current_row_lst)\n board_lst = tuple_to_list(gridcopy)\n board_lst[row_num] = left_altered\n result.append(tuple(board_lst))\n if location[1] != self.m - 1:\n # Going right!\n # (\"5\", \"*\", \"6\") to (\"5\", \"6\", \"*\")\n # Reset the values to swap right.\n current_row = gridcopy[row_num]\n current_row_lst = tuple_to_list(current_row)\n current_row_lst[column_num] = current_row_lst[column_num + 1]\n current_row_lst[column_num + 1] = \"*\"\n # Switch back to tuple\n right_altered = tuple(current_row_lst)\n board_lst = tuple_to_list(gridcopy)\n board_lst[row_num] = right_altered\n result.append(tuple(board_lst))\n return result\n\n def shift_down_right(gridcopy, row_num, column_num):\n \"\"\"\n Return the list of affected grid. If * cannot move to the specific\n place, it returns an empty list\n\n @type gridcopy: tuple[tuple[str]]\n @type row_num: int\n @type column_num: int\n @rtype: list[tuple[tuple[str]]]\n \"\"\"\n result = []\n if location[0] != 0:\n current_row = gridcopy[location[0]]\n upper_row = gridcopy[location[0] - 1]\n current_row_lst = tuple_to_list(current_row)\n upper_row_lst = tuple_to_list(upper_row)\n current_row_lst[column_num] = upper_row_lst[column_num]\n upper_row_lst[column_num] = \"*\"\n current_row, upper_row = tuple(current_row_lst), \\\n tuple(upper_row_lst)\n board_lst = tuple_to_list(gridcopy)\n board_lst[row_num] = current_row\n board_lst[row_num - 1] = upper_row\n upper_altered = tuple(board_lst)\n result.append(upper_altered)\n if location[0] != self.n - 1:\n upper_row = gridcopy[location[0] + 1]\n lower_row = gridcopy[location[0]]\n upper_lst = tuple_to_list(upper_row)\n lower_lst = tuple_to_list(lower_row)\n lower_lst[location[1]] = upper_lst[location[1]]\n upper_lst[location[1]] = \"*\"\n upper_row, lower_row = tuple(upper_lst), tuple(lower_lst)\n big_lst = tuple_to_list(gridcopy)\n big_lst[location[0]] = lower_row\n big_lst[location[0] + 1] = upper_row\n changed = tuple(big_lst)\n result.append(changed)\n return result\n\n grid = self.from_grid\n # Location is the tuple indicator of location of the empty space.\n # (Row, Column)\n location = check_empty_space(grid)\n row = location[0]\n column = location[1]\n possibilities = shift_right_left(grid, row, column) +\\\n shift_down_right(grid, row, column)\n return [MNPuzzle(x, self.to_grid) for x in possibilities]", "def gameOfLife(board: List[List[int]]) -> None:\n result = [board[i][:] for i, _ in enumerate(board)]\n for x, row in enumerate(board):\n for y, cell in enumerate(row):\n result[x][y] = helper(x, y, board, cell)\n\n for x, row in enumerate(board):\n for y, cell in enumerate(row):\n board[x][y] = result[x][y]", "def reset(self):\n self.board = place_mines(self.board_size, self.num_mines)\n self.my_board = np.ones((self.board_size, self.board_size), dtype=int) * CLOSED\n self.valid_actions = np.ones((self.board_size, self.board_size), dtype=np.bool)\n return self.my_board", "def reset_board(self):\n cell_list = self.get_cells()\n for current_cell in cell_list:\n current_cell.set_cell_state(0) # remove player ownership of cell", "def action(new_board, move, player):\r\n \r\n global nodes_generated \r\n global min_prune\r\n global max_prune\r\n global max_depth\r\n \r\n if player == 1:\r\n for i in range(0,len(new_board.white)):\r\n if new_board.white[i] == move[0]:\r\n new_board.white[i] = move[1]\r\n if len(move) == 3:\r\n new_board.black.remove(move[2])\r\n elif player == -1:\r\n for i in range(0,len(new_board.black)):\r\n if new_board.black[i] == move[0]:\r\n new_board.black[i] = move[1]\r\n if len(move) == 3:\r\n new_board.white.remove(move[2])\r\n return new_board", "def mirror_y(board):\n new_board = board[:]\n new_board.reverse()\n return new_board", "def change(self, start, end):\n newBoard = BoardState([[0 for j in range(self.n)] for i in range(self.n)], self.n)\n newBoard.board = [values[:] for values in self.board]\n newBoard.numPlayer1 = self.numPlayer1\n newBoard.numPlayer2 = self.numPlayer2\n\n # And update it to reflect the move\n row, col = start[0], start[1]\n newrow, newcol = end[0], end[1]\n\n newBoard.board[row][col] = self.board[newrow][newcol]\n newBoard.board[newrow][newcol] = self.board[row][col]\n \n newBoard.ganh(newrow, newcol)\n newBoard.chet(newrow, newcol)\n return newBoard", "def gameOfLife(self, board: List[List[int]]) -> None:\n self.board = copy.deepcopy(board)\n self.rows = len(self.board)\n self.cols = len(self.board[0])\n for i in range(self.rows):\n for j in range(self.cols):\n neighbors = self.count_neighbors(i, j)\n if board[i][j] == 1:\n if neighbors < 2 or neighbors > 3:\n board[i][j] = 0\n else:\n if neighbors == 3:\n board[i][j] = 1", "def update_board(self, symbol, modified_squares):\n\t\tfor coord in modified_squares:\n\t\t\tself.board[coord] = symbol", "def result(board, action):\n copyboard=copy.deepcopy(board)\n i,j=action\n if(copyboard[i][j]!=EMPTY):\n raise Exception(\"invalid action\")\n else:\n copyboard[i][j]=player(board)\n return copyboard\n raise NotImplementedError", "def get_board(self):\n return self.board.copy()", "def settle_board(board):\n while True:\n matches = find_matches(board)\n\n if not matches:\n break\n\n new_board, destroyed_sqs = destroy_tiles(board)\n for match in matches:\n crit = calc_critical_square(match)\n if crit:\n new_board.set_at(crit[0], crit[1], CriticalTile())\n board, _ = apply_gravity(new_board)\n\n return board", "def copy(self):\r\n\t\tnewBoard = BoardClass()\r\n\r\n\t\tfor row in self.board:\r\n\t\t\tnewBoard.board.append(row[:])\r\n\t\tnewBoard.x = self.x\r\n\t\tnewBoard.y = self.y\r\n\t\tnewBoard.heuristic = self.heuristic\r\n\t\tnewBoard.n = self.n\r\n\t\tnewBoard.hType = self.hType\r\n\t\tnewBoard.steps = self.steps\r\n\r\n\t\treturn newBoard", "def reset(self):\n self.my_board = np.ones((self.board_size, self.board_size), dtype=int) * CLOSED\n self.board = place_mines(self.board_size, self.num_mines)\n self.num_actions = 0\n self.valid_actions = np.ones((self.board_size * self.board_size), dtype=bool)\n\n return self.my_board", "def result(board, action):\n\n playersPiece = player(board)\n newBoard = [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]\n\n for row in range(len(board)):\n for column in range(len(board[row])):\n newBoard[row][column] = board[row][column]\n\n\n row,column = action\n\n if newBoard[row][column] == EMPTY:\n newBoard[row][column] = playersPiece\n else:\n raise Exception(\"Invalid action.\")\n return newBoard\n # raise NotImplementedError", "def mirror_x(board):\n new_board = board[:]\n for (ix, item) in enumerate(board):\n new_board[ix] = (len(board) - 1) - item\n\n return new_board", "def reset(self):\r\n\r\n self._board = [[0 for x in range(self._grid_width)]\r\n for y in range(self._grid_height)]\r\n self.new_tile()", "def copy_board(self):\n board_copy = [[' '] * 20 for _ in range(20)]\n for i in range(20):\n for j in range(20):\n board_copy[i][j] = self._board[i][j]", "def advance(board):\n new_board = set()\n for cell in board:\n if ru1(cell):\n continue\n\n else:\n new_board.add(cell) # your code below\n pass\n\n return new_board", "def normalize(self, board):\n self.normalize_columns(board)\n return board", "def dirty_square(self, row: int, column: int) -> None:\n self.__squares[row][column] = Floor._dirty", "def clone(self):\n copy = Board(self.game)\n for old_piece in self.game_pieces:\n copy.game_pieces.append(old_piece.clone())\n \n return copy", "def copy(self):\n\t\tb = Board(self.size, self.end_count)\n\t\tfor x in range(self.size):\n\t\t\tfor y in range(self.size):\n\t\t\t\tb.tiles[x][y] = self.tiles[x][y]\n\t\treturn b", "def copy(self):\r\n copy_board = Board(self._squareCount, self._pebbleCount)\r\n copy_board.squares = [list(row) for row in self.squares]\r\n return copy_board", "def convert_board(board):\n\tboard_copy = list(board)\n\tfor count, instance in enumerate(board_copy):\n\t\tif board_copy[count] == ' ':\n\t\t\tboard_copy[count] = count\n\treturn board_copy", "def result(board, action):\n i, j = action\n\n # Deepcopy so that original board is not affected as it will be needed for recursion\n resultBoard = copy.deepcopy(board)\n\n if resultBoard[i][j] is not EMPTY:\n raise InvalidMoveException\n \n resultBoard[i][j] = player(board)\n return resultBoard", "def result(board, action):\n\n # Create completely new board\n temp_board = copy.deepcopy(board)\n # Location of move to be made\n row_index = action[0]\n col_index = action[1]\n\n # Check for valid action\n if not 0 <= row_index <= 2 or not 0 <= col_index <= 2:\n raise Exception(\"Invalid Action\")\n\n # Make move and update board\n if board[row_index][col_index] is None:\n temp_board[row_index][col_index] = player(board)\n else:\n raise Exception(\"Invalid Action\")\n\n return temp_board", "def toggle(board, pos):\n if pos >= 1 and pos <= length:\n board ^= (1 << pos-1);\n return board;", "def setBoard(self, board):\n\t\tself.gameBoard = board", "def result(board, action):\n try:\n if action in actions(board):\n copy_board = copy.deepcopy(board)\n i, j = action\n player_turn = player(board)\n copy_board[i][j] = player_turn\n print(copy_board)\n return copy_board\n else:\n raise IndexError\n except IndexError:\n print(\"Invalid move\")", "def translate_board(self):\n for y in range(len(self.compressed_board)):\n for x, color in enumerate(self.compressed_board[y]):\n self.board[y][x] = COLORS[color]", "def copy(self):\r\n board = []\r\n for row in self.board:\r\n board.append([x for x in row])\r\n return Puzzle(board)", "def make_move(self,board, action, player_id):\n row = np.max(np.where(board[:, action] == EMPTY_VAL))\n new_board = np.copy(board)\n new_board[row, action] = player_id\n\n return new_board", "def make_move(self, move):\n self.board[int(move) - 1] = self.nplayer", "def main():\n\n # Initializes all game variables.\n gameOver = False\n winner = False\n gameBoard = emptyBoard()\n\n # Randomly fills two tiles to start with.\n for i in range(0, 2):\n gameBoard = fillEmpty(gameBoard)\n\n # Debugging/testing out different cases.\n # gameBoard = [[0, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]\n\n # gameBoard[0][0] = 1024\n # gameBoard[1][0] = 1024\n\n # gameBoard[0] = [2, 2, 2, 2]\n \n # gameBoard[0][0] = 2\n # gameBoard[1][0] = 2\n # gameBoard[2][0] = 4\n # gameBoard[3][0] = 4\n\n # Runs the game loop.\n while not gameOver:\n # Sets the frame rate and launches the game in its default state.\n CLOCK.tick(FPS)\n move = ''\n change = False\n displayBoard(gameBoard)\n\n # Read the player's button inputs.\n for event in pygame.event.get():\n if(event.type == pygame.KEYDOWN) & (move == ''):\n if(event.key == pygame.K_UP):\n move = 'u'\n if(event.key == pygame.K_DOWN):\n move = 'd'\n if(event.key == pygame.K_LEFT):\n move = 'l'\n if(event.key == pygame.K_RIGHT):\n move = 'r'\n if(event.type == pygame.QUIT):\n pygame.quit()\n return\n\n if(move == 'r'):\n # Initial values of the board before movement.\n oldBoard = emptyBoard()\n\n for i in range(0, 4):\n for j in range(0, 4):\n oldBoard[i][j] = gameBoard[i][j]\n\n # Shifts all tiles to the right.\n for i in range(0, 4):\n newRow = []\n empty = 0\n\n for col in gameBoard[i]:\n if col > 0:\n newRow.append(col)\n empty += 1\n\n for n in range(0, 4 - empty):\n newRow.insert(0, 0)\n\n for n in range(0, 4):\n gameBoard[i][n] = newRow[n]\n\n if(newRow[0] == newRow[1] == newRow[2] == newRow[3]):\n doubCheck = True\n checkType = False\n elif(newRow[0] == newRow[1]) & (newRow[2] == newRow[3]):\n doubCheck = True\n checkType = True\n else:\n doubCheck = False\n\n for n in range(2, -1, -1):\n if(gameBoard[i][n] == gameBoard[i][n+1]):\n gameBoard[i][n+1] *= 2\n gameBoard[i][n] = 0\n elif(gameBoard[i][n+1] == 0):\n gameBoard[i][n+1] = gameBoard[i][n]\n gameBoard[i][n] = 0\n \n if doubCheck:\n if checkType:\n for n in range(0, 3):\n if(gameBoard[i][n] == gameBoard[i][n+1]):\n gameBoard[i][n+1] *= 2\n gameBoard[i][n] = 0\n else:\n for n in range(2, -1, -1):\n if(gameBoard[i][n] == gameBoard[i][n+1]):\n gameBoard[i][n+1] *= 2\n gameBoard[i][n] = 0\n elif(gameBoard[i][n+1] == 0):\n gameBoard[i][n+1] = gameBoard[i][n]\n gameBoard[i][n] = 0\n\n\n # Checks for changes pre- and post-movement\n change = gameBoard != oldBoard\n elif(move == 'l'):\n # Initial values of the board before movement.\n oldBoard = emptyBoard()\n\n for i in range(0, 4):\n for j in range(0, 4):\n oldBoard[i][j] = gameBoard[i][j]\n\n # Shifts all tiles to the left.\n for i in range(0, 4):\n newRow = []\n empty = 0\n\n for col in gameBoard[i]:\n if col > 0:\n newRow.append(col)\n empty += 1\n\n for n in range(0, 4 - empty):\n newRow.append(0)\n\n for n in range(0, 4):\n gameBoard[i][n] = newRow[n]\n\n if(newRow[0] == newRow[1] == newRow[2] == newRow[3]):\n doubCheck = True\n checkType = False\n elif(newRow[0] == newRow[1]) & (newRow[2] == newRow[3]):\n doubCheck = True\n checkType = True\n else:\n doubCheck = False\n\n for n in range(0, 3):\n if(gameBoard[i][n] == gameBoard[i][n+1]):\n gameBoard[i][n] *= 2\n gameBoard[i][n+1] = 0\n elif(gameBoard[i][n] == 0):\n gameBoard[i][n] = gameBoard[i][n+1]\n gameBoard[i][n+1] = 0\n\n if doubCheck:\n if checkType:\n for n in range(3, 0, -1):\n if(gameBoard[i][n] == gameBoard[i][n-1]):\n gameBoard[i][n-1] *= 2\n gameBoard[i][n] = 0\n else:\n for n in range(0, 3):\n if(gameBoard[i][n] == gameBoard[i][n+1]):\n gameBoard[i][n] *= 2\n gameBoard[i][n+1] = 0\n elif(gameBoard[i][n] == 0):\n gameBoard[i][n] = gameBoard[i][n+1]\n gameBoard[i][n+1] = 0\n\n\n # Checks for changes pre- and post-movement\n change = gameBoard != oldBoard\n elif(move == 'd'):\n # Initial values of the board before movement.\n oldBoard = emptyBoard()\n\n for i in range(0, 4):\n for j in range(0, 4):\n oldBoard[i][j] = gameBoard[i][j]\n\n # Shifts all tiles downward.\n for i in range(0, 4):\n newCol = []\n empty = 0\n\n for j in range(0, 4):\n if gameBoard[j][i] > 0:\n newCol.append(gameBoard[j][i])\n empty += 1\n\n for n in range(0, 4 - empty):\n newCol.insert(0, 0)\n\n for n in range(0, 4):\n gameBoard[n][i] = newCol[n]\n\n if(newCol[0] == newCol[1] == newCol[2] == newCol[3]):\n doubCheck = True\n checkType = False\n elif(newCol[0] == newCol[1]) & (newCol[2] == newCol[3]):\n doubCheck = True\n checkType = True\n else:\n doubCheck = False\n\n for n in range(2, -1, -1):\n if(gameBoard[n][i] == gameBoard[n+1][i]):\n gameBoard[n+1][i] *= 2\n gameBoard[n][i] = 0\n elif(gameBoard[n+1][i] == 0):\n gameBoard[n+1][i] = gameBoard[n][i]\n gameBoard[n][i] = 0\n\n if doubCheck:\n if checkType:\n for n in range(0, 3):\n if(gameBoard[n][i] == gameBoard[n+1][i]):\n gameBoard[n+1][i] *= 2\n gameBoard[n][i] = 0\n else:\n for n in range(2, -1, -1):\n if(gameBoard[n][i] == gameBoard[n+1][i]):\n gameBoard[n+1][i] *= 2\n gameBoard[n][i] = 0\n elif(gameBoard[n+1][i] == 0):\n gameBoard[n+1][i] = gameBoard[n][i]\n gameBoard[n][i] = 0\n \n # Checks for changes pre- and post-movement\n change = gameBoard != oldBoard\n elif(move == 'u'):\n # Initial values of the board before movement.\n oldBoard = emptyBoard()\n\n for i in range(0, 4):\n for j in range(0, 4):\n oldBoard[i][j] = gameBoard[i][j]\n\n # Shifts all tiles upward.\n for i in range(0, 4):\n newCol = []\n empty = 0\n\n for j in range(0, 4):\n if gameBoard[j][i] > 0:\n newCol.append(gameBoard[j][i])\n empty += 1\n\n for n in range(0, 4 - empty):\n newCol.append(0)\n\n for n in range(0, 4):\n gameBoard[n][i] = newCol[n]\n\n\n if(newCol[0] == newCol[1] == newCol[2] == newCol[3]):\n doubCheck = True\n checkType = False\n elif(newCol[0] == newCol[1]) & (newCol[2] == newCol[3]):\n doubCheck = True\n checkType = True\n else:\n doubCheck = False\n\n for n in range(0, 3):\n if(gameBoard[n][i] == gameBoard[n+1][i]):\n gameBoard[n][i] *= 2\n gameBoard[n+1][i] = 0\n elif(gameBoard[n][i] == 0):\n gameBoard[n][i] = gameBoard[n+1][i]\n gameBoard[n+1][i] = 0\n\n if doubCheck:\n if checkType:\n for n in range(3, 0, -1):\n if(gameBoard[n][i] == gameBoard[n-1][i]):\n gameBoard[n-1][i] *= 2\n gameBoard[n][i] = 0\n else:\n for n in range(0, 3):\n if(gameBoard[n][i] == gameBoard[n+1][i]):\n gameBoard[n][i] *= 2\n gameBoard[n+1][i] = 0\n elif(gameBoard[n][i] == 0):\n gameBoard[n][i] = gameBoard[n+1][i]\n gameBoard[n+1][i] = 0\n\n # Checks for changes pre- and post-movement\n change = gameBoard != oldBoard \n\n # Checks if the player won the game.\n winner = checkWon(gameBoard)\n # Checks if the game is over before the next iteration of the game loop.\n gameOver = checkGameOver(gameBoard)\n\n # Fills the game board if the game is continued and the board has shifted.\n if(not (gameOver | winner)):\n if(change) & (move != ''):\n gameBoard = fillEmpty(gameBoard)\n else:\n # Displays the game board's final state.\n displayBoard(gameBoard)\n break\n \n # Loads the final message to the player.\n pygame.time.delay(10)\n font = pygame.font.SysFont('comic sans ms', 64)\n whatNow = False\n\n while not whatNow:\n if(gameOver):\n message = \"Game Over!\"\n t_color = SIXTEEN\n b_color = BLACK\n elif(winner):\n message = \"You won!\"\n t_color = BLACK\n b_color = GREEN\n\n # Displays the loaded message to screen.\n messageOutline = pygame.Rect(0, H//2 - 64, W, 128)\n endMessage = font.render(message, 0, t_color)\n pygame.draw.rect(WINDOW, b_color, messageOutline)\n WINDOW.blit(endMessage, (512//2 - len(message)*17, 512//2 - 48))\n pygame.display.update()\n\n # Lets the player choose to continue or quit after the game results.\n for event in pygame.event.get():\n if(event.type == pygame.KEYDOWN):\n whatNow = True\n if(event.type == pygame.QUIT):\n pygame.quit()\n return\n\n # Restart game if the player doesn't choose to quit.\n main()", "def update_potential_moves(self):\n\n board = self.get_board()\n\n for row_index, row in enumerate(board):\n\n for column_index, column in enumerate(row):\n\n if column is not None:\n \n position = self.reverse_position(column_index, row_index)\n game_piece_object = self.get_game_piece_object_at_position(position)\n game_piece_object.set_potential_moves(self.generate_moves(position))", "def shift_board_up(self) -> bool:\n for c in range(self.board_size):\n if self.board[0][c] is not None:\n return False\n\n for r in range(0, self.board_size - 1):\n for c in range(self.board_size):\n self.board[r][c] = self.board[r + 1][c]\n\n for c in range(self.board_size):\n self.board[self.board_size - 1][c] = None\n\n return True", "def gameOfLife(self, board) -> None:\n changelist = []\n for i in range(len(board)):\n for j in range(len(board[0])):\n if self.ischange(i, j, board):\n changelist.append([i, j])\n\n for x, y in changelist:\n board[x][y] = ~board[x][y] + 2", "def result(board, action):\n newBoard = copy.deepcopy(board)\n try:\n if newBoard[action[0]][action[1]] != EMPTY:\n raise IndexError\n else:\n newBoard[action[0]][action[1]] = player(newBoard)\n return newBoard\n except IndexError:\n print('Spot occupied')", "def update_board(self, mpos):\n pass", "def move(self, direction):\r\n direc = list(OFFSETS[direction])\r\n line = []\r\n dummy_board = self.board[:]\r\n if direction == 3:\r\n for i in range(self.height):\r\n self.board[i] = merge(self.board[i])\r\n self.compare(dummy_board)\r\n return self.board\r\n \r\n elif direction == 4:\r\n for i in range(self.height):\r\n line = self.board[i][::-1]\r\n self.board[i] = merge(line)\r\n self.board[i] = self.board[i][::-1]\r\n self.compare(dummy_board)\r\n return self.board\r\n \r\n \r\n elif direction == 1 or 2:\r\n dummy_board = str(self.board[:])\r\n if direction == 1:\r\n tile = [0,0]\r\n elif direction == 2:\r\n tile = [self.height - 1, 0]\r\n for i in range(self.width):\r\n tile2 = tile[:]\r\n while len(line) < self.height:\r\n line.append(self.get_tile(*tile2))\r\n tile2 = [x+y for x,y in zip(direc, tile2)]\r\n line = merge(line)\r\n tile2 = tile[:]\r\n for i in range(self.height):\r\n self.set_tile(*(tile2+[line[0]]))\r\n line.remove(line[0])\r\n tile2 = [x+y for x,y in zip(direc, tile2)]\r\n tile = [x+y for x,y in zip(tile, [0,1])]\r\n if dummy_board != self.__str__():\r\n self.new_tile()\r\n return self.board", "def test_copy(self):\n p = hw.create_tile_puzzle(3, 3)\n p2 = p.copy()\n self.assertTrue(p.get_board() == p2.get_board())\n p2.perform_move('up')\n self.assertFalse(p.get_board() == p2.get_board())", "def neighbor(self): \n newBoard = Board(self.n, False)\n for i in range(self.n):\n newBoard.queens[i][0] = self.queens[i][0]\n newBoard.queens[i][1] = self.queens[i][1]\n \n current_moves = self.moves()\n n_moves = len(current_moves)\n move_index = random.choice(range(n_moves))\n newBoard.queens[current_moves[move_index][0]] = current_moves[move_index][1]\n\n return newBoard", "def _update_board(self, start: (int, int), dest: (int, int), extra_info=''):\n\n piece = self.board[start[0]][start[1]]\n\n # Move the piece itself\n self.board[dest[0]][dest[1]] = piece\n self.board[start[0]][start[1]] = EMPTY_SPACE\n\n # Special moves\n if extra_info:\n\n # Castling kingside\n if extra_info == CASTLE_KINGSIDE:\n row = self._get_castling_row()\n\n # We already moved the king, so we just need to move the rook\n self.board[row][5] = self.board[row][7]\n self.board[row][7] = EMPTY_SPACE\n\n elif extra_info == CASTLE_QUEENSIDE:\n row = self._get_castling_row()\n\n # King already moved, so just update the rook\n self.board[row][3] = self.board[row][0]\n self.board[row][0] = EMPTY_SPACE\n\n else: # Pawn promotion\n self.board[dest[0]][dest[1]] = extra_info\n\n # en passant\n self._update_en_passant(start, dest, piece)", "def set_tile(self, row, col, value):\n # replace with your code\n if col < self.grid_height and row < self.grid_width:\n self.board[row][col] = value", "def deepCopy(self):\n clone = Connect_N_Board(self.getWidth(), self.getHeight())\n import copy\n clone.cell = copy.deepcopy(self.cell)\n return clone", "def result(board, action):\n board = copy.deepcopy(board)\n if player(board) == X:\n board[action[0]][action[1]] = X\n else:\n board[action[0]][action[1]] = O\n return board", "def update_board(self, board, self_color, coords):\r\n delta = [(0, -1), (-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1)]\r\n\r\n updated_board = [row[:] for row in board]\r\n updated_board[coords[0]][coords[1]] = self_color\r\n\r\n flip_positions = []\r\n for i in range(0, 8):\r\n flip_positions = self.find_flippable_disks(board, self_color, coords, delta[i])\r\n if flip_positions is not None:\r\n for flip_r, flip_c in flip_positions:\r\n updated_board[flip_r][flip_c] = self_color\r\n return updated_board", "def copy_board(self, temp_board):\r\n board2 = []\r\n\r\n for ele in temp_board:\r\n board2.append(ele)\r\n\r\n return board2", "def board(self, board):\n\n self._board = board", "def make_board():\n board = bingo_numbers()\n board[2][2] = ''\n return board", "def reset_board():\n board = initial_state()\n emit(\"update\", board)", "def make_move(self, move, player, board):\r\n #nBoard = board.copy()\r\n board[move] = player\r\n for d in core.DIRECTIONS:\r\n if self.find_bracket(move, player, board, d)!=None:\r\n self.make_flips(move, player, board, d)\r\n return board", "def get_all_possible_boards(board):\n new_boards = []\n for row in range(len(board)):\n for column in range(len(board)):\n tmp = board.copy()\n if board[row][column] == 0:\n tmp[row][column] = 2\n new_boards.append(tmp)\n return new_boards", "def update_board_with_move(self, cell, mark):\n row, col = cell\n self.storage[row][col] = mark", "def make_move(self, board, fieldy, fieldx):\n board[self.posy][self.posx], board[fieldy][fieldx] = board[fieldy][fieldx], board[self.posy][self.posx]\n self.posy = fieldy\n self.posx = fieldx", "def result(board, action):\n new_board = [row[:] for row in board]\n new_board[action[0]][action[1]] = player(new_board)\n return new_board", "def result(board, action):\n x = action[0]\n y = action[1]\n if x < 0 or x > 2 or y < 0 or y > 2 or not board[x][y] == EMPTY:\n raise ValueError\n temp_board = deepcopy(board)\n temp_board[x][y] = player(board)\n return temp_board", "def copy(self):\n return type(self)(self.game_board.copy(), self.current_piece)", "def resetBoard(self):\n self.space1 = 0\n self.space2 = 0\n self.space3 = 0\n self.space4 = 0\n self.space5 = 0\n self.space6 = 0", "def buildpuzzle(self):\r\n self.puzzle = copy.deepcopy(self.rows)\r\n if self.difficulty == 1:\r\n self.removedigits(1)\r\n if self.difficulty == 2:\r\n self.removedigits(2)\r\n if self.difficulty == 3:\r\n self.removedigits(3)", "def dirty_squares(self) -> None:\n row = ran(0, self.__squares.__len__() - 1)\n column = ran(0, self.__squares[0].__len__() - 1)\n self.__squares[row][column] = Floor._dirty\n print(\"Ensuciamos el piso y quedo así: \", self.__str__())", "def adjust_board(self, game_piece_object, position_to, position_from=str()):\n\n if position_from == str():\n\n position_from = position_to\n\n from_column, from_row = self.transpose_position(position_from)\n to_column, to_row = self.transpose_position(position_to)\n\n # Adjusts row one to the left for indexing; column is already adjusted.\n self.get_board()[from_row][from_column] = None\n self.get_board()[to_row][to_column] = game_piece_object" ]
[ "0.7664789", "0.70971596", "0.70476234", "0.7024398", "0.69861203", "0.6829364", "0.6778242", "0.6727011", "0.66938925", "0.6692377", "0.6625323", "0.6625323", "0.65893996", "0.6565318", "0.6564258", "0.65587217", "0.6539339", "0.65249294", "0.65223134", "0.6520972", "0.6520208", "0.6518662", "0.64938617", "0.6486647", "0.64504176", "0.6440349", "0.6439348", "0.6433935", "0.6427023", "0.64200073", "0.64131206", "0.6411382", "0.64112246", "0.64101225", "0.640923", "0.63940334", "0.63818085", "0.6373341", "0.6372674", "0.6369565", "0.6367701", "0.6358268", "0.63420093", "0.63407683", "0.6339568", "0.63274604", "0.6319517", "0.63142866", "0.6312594", "0.6309642", "0.6301394", "0.62925845", "0.6291022", "0.628496", "0.62805885", "0.62776846", "0.62772226", "0.62558573", "0.6238545", "0.6217149", "0.620535", "0.61941093", "0.61863583", "0.6184984", "0.61582345", "0.61518466", "0.6150735", "0.615045", "0.6147509", "0.6147141", "0.6145481", "0.613513", "0.61348295", "0.6122739", "0.6118617", "0.6118584", "0.6115303", "0.6114508", "0.61127", "0.6108128", "0.6095081", "0.6087648", "0.60803306", "0.6069858", "0.60693157", "0.60601574", "0.60520446", "0.60514647", "0.6050952", "0.6049016", "0.60427076", "0.60391176", "0.60352033", "0.60264754", "0.6025681", "0.60186124", "0.6014497", "0.60040295", "0.60004836", "0.5998216", "0.5981882" ]
0.0
-1
Helper method to create torch.DoubleTensor from pandas DataFrame. Upon creating the tensor, the data is copied to a new memory location. Hence, modifying the tensor won't affect the pandas DataFrame.
def create_tensor(self, idx): sample_tensor = torch.zeros( (self.n_dim, self.sample_lengths[idx].max()), dtype=torch.double) for i, col in enumerate(self.samples.columns): # In rare cases, a sample has different length across it's dimensions dim_length = len(self.samples[col][idx]) # Create zero-padded torch.Tensor using data from pandas DataFrame sample_tensor[i, 0:dim_length] = torch.tensor( self.samples[col][idx], dtype=torch.double) return sample_tensor
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_to_tensor(self, df, target=False):\n if target:\n return torch.LongTensor(df.values)\n return torch.FloatTensor(df.values)", "def transform(self, dataframe: DataFrame) -> DataFrame:", "def as_dataframe(self, dframe: pd.DataFrame, reset_index: bool = False):\n if reset_index:\n dframe = dframe.reset_index(drop=True)\n return self.__class__(dframe, self.meta.copy())", "def make_dataset(self, df, **kwargs):\n\t\treturn df", "def _to_constant_df(self, num):\n if isinstance(num, pd.DataFrame):\n# pdb.set_trace()\n return num\n else:\n return self.data['ones'].copy() * num", "def SweepFrame(*args, **kwargs):\n underride(kwargs, dtype=float)\n return pd.DataFrame(*args, **kwargs)", "def generate_proxy(element_type):\n # type: (type) -> pd.DataFrame\n dtype = dtype_from_typehint(element_type)\n if dtype is not object:\n return pd.Series(dtype=dtype)\n else:\n fields = named_fields_from_element_type(element_type)\n proxy = pd.DataFrame(columns=[name for name, _ in fields])\n for name, typehint in fields:\n dtype = dtype_from_typehint(typehint)\n proxy[name] = proxy[name].astype(dtype)\n\n return proxy", "def augment_dataframe(self, df: pd.DataFrame) -> pd.DataFrame:", "def pandas_data_frame_to_rpy2_data_frame(pDataframe):\n orderedDict = OrderedDict()\n\n for columnName in pDataframe.columns:\n columnValues = pDataframe[columnName].values\n filteredValues = \\\n [value if pandas.notnull(value) else robj.NA_Real \\\n for value in columnValues]\n try:\n orderedDict[columnName] = robj.FloatVector(filteredValues)\n except ValueError:\n orderedDict[columnName] = robj.StrVector(filteredValues)\n\n rDataFrame = robj.DataFrame(orderedDict)\n rDataFrame.rownames = robj.StrVector(pDataframe.index)\n\n return rDataFrame", "def test_dataframe_copying() -> None:\n df = pd.DataFrame(\n [\n (\"one\", \"two\"),\n (\"three\", \"four\"),\n ],\n columns=[\"odd\", \"even\"],\n )\n # assignment does *not* copy the underlying df\n df2 = df\n df3 = df.copy(deep=True)\n df.loc[:, \"odd\"] = pd.Series([\"five\", \"seven\"])\n\n assert df2.loc[:, \"odd\"].to_list() == [\"five\", \"seven\"]\n assert df2.loc[:, \"even\"].to_list() == [\"two\", \"four\"]\n assert df3.loc[:, \"odd\"].to_list() == [\"one\", \"three\"]\n assert df3.loc[:, \"even\"].to_list() == [\"two\", \"four\"]", "def _dataframe_conversion(da, order):\n assert da.data.squeeze().ndim == 2, (\n \"Dataframe conversion only possible for connectivity arrays when \"\n \"time dimension is missing\")\n da = da.squeeze().to_dataframe('mi').reset_index()\n da = da.pivot('sources', 'targets', 'mi')\n if isinstance(order, (list, np.ndarray)):\n da = da.reindex(order, axis='index').reindex(order, axis='columns')\n\n return da", "def double(self, column, nullable=False):\n self._last_column = self.table.add_column(column, \"double\", nullable=nullable)\n return self", "def transform(self, df: DataFrame) -> DataFrame:\n return df", "def exchange_df(self, df: pandas.DataFrame) -> \"Predictions\":\n return self.__class__(**collections.ChainMap(dict(df=df), dataclasses.asdict(self)))", "def clone(self):\n return DoubleTpMatrix(len(self)).copy_from_tp_(self)", "def from_dataframe(cls, dataframe):\n return cls(dataframe)", "def __init__(self, tensor, df):\n super().__init__()\n self.tensor = tensor\n self.df = df", "def to_real_series(self, data: pd.Series) -> pd.Series:\n ...", "def copy(self) -> 'DataFrame':\n new_data: Dict[str, ndarray] = {dt: arr.copy() for dt, arr in self._data.items()}\n new_columns: ColumnT = self._columns.copy()\n new_column_info: ColInfoT = self._copy_column_info()\n new_str_reverse_map = deepcopy(self._str_reverse_map)\n return self._construct_from_new(new_data, new_column_info, new_columns, new_str_reverse_map)", "def wrapDBMatrix(self,mat):\n return mat.todense()", "def transform(self, df: DataFrame) -> DataFrame:\n df = deepcopy(df) # don't overwrite input df\n\n for i, column in enumerate(self.columns):\n df[column] = df[column].values + self.delta_mus[i]\n\n return df", "def _tensorize(d, dtype=None, name=None, as_ref=False):\n return d._value(dtype, name, as_ref) # pylint: disable=protected-access", "def get_transformed_data(self, df):\n temp_df = pd.DataFrame(self.fa.transform(df))\n return temp_df", "def TimeFrame(*args, **kwargs):\n underride(kwargs, dtype=float)\n return pd.DataFrame(*args, **kwargs)", "def from_dataframe(cls, df: pd.DataFrame, schema: Schema, dims: List[str], value_column: str) -> \"Flat\":\n index = schema.encode_many(df[dims])\n vals = df[value_column].values\n dim_mask = schema.dims_to_mask(dims)\n vec = grblas.Vector.from_values(index, vals, size=dim_mask + 1)\n return cls(vec, schema, dims)", "def inverse_transform(self, df):\n try:\n df = df.astype(float)\n except Exception:\n raise ValueError(\"Data Cannot Be Converted to Numeric Float\")\n\n X = date_part(df.index, method=self.datepart_method)\n y = pd.DataFrame(self.model.predict(X))\n y.columns = df.columns\n y.index = df.index\n df = df + y\n return df", "def df_to_pred_dataset(dataframe, batch_size=1024):\n dataframe = dataframe.copy()\n ds = tf.data.Dataset.from_tensor_slices((dict(dataframe)))\n ds = ds.batch(batch_size)\n return ds", "def copy(self, **kwargs):\n return Tensor(self.data, **kwargs)", "def data_structure():\n\n items = [1.0, 2.0, 3.0, 4.0, 5.0 ]\n s = pd.Series(items, index=['a', 'b', 'c', 'd', 'e'])\n # s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e'])\n print s\n s = pd.Series(items)\n print s\n\n d= {'one': [1.0, 2.0, 3.0, 4.0], 'two': [4.0, 3.0, 2.0, 1.0]}\n\n df = pd.DataFrame(d)\n print df\n df = pd.DataFrame(d, index=['a', 'b', 'c', 'd'])\n print df\n\n\n data2 = [{'a': 1, 'b': 2}, {'a': 5, 'b': 10, 'c': 20}]\n df = pd.DataFrame(data2)\n\n print df", "def from_pandas(cls, df, data_cls):\n pass", "def copy(self):\n return self.as_dataframe(self.data.copy())", "def copy(self, deep=True):\r\n data = self._data\r\n if deep:\r\n data = data.copy()\r\n return SpatialDataFrame(data, sr=self.sr).__finalize__(self)", "def _to_dataframe(self, dataset_name):\n values = self[dataset_name][:]\n columns = self.get_columns(dataset_name)\n timestamps = self.get_timestamps(dataset_name)[...]\n if len(columns) < values.shape[1]:\n columns.resize(values.shape[1])\n\n # transform missing data into NaNs\n mask = missing_values(values) != 0\n try:\n values[mask] = numpy.nan\n except ValueError: # ValueError: cannot convert float NaN to integer\n # don't bother converting non-float arrays' -0.0 into NaNs\n pass\n\n dataframe = pandas.DataFrame(data=values,\n index=[datetime.datetime.fromtimestamp(t) for t in timestamps],\n columns=columns)\n return dataframe", "def dataframe(self):\n\n if self._dataframe is None:\n try:\n import pandas as pd\n except ImportError:\n raise RuntimeError('To enable dataframe support, '\n 'run \\'pip install datadotworld[pandas]\\'')\n\n self._dataframe = pd.DataFrame.from_records(self._iter_rows(),\n coerce_float=True)\n\n return self._dataframe", "def wrapDBVector(self,vec):\n return vec.todense()", "def to_dataframe(self, nan_as_null: bool = False, allow_copy: bool = True):\n pass", "def inverse_transform(self, df, trans_method: str = \"forecast\"):\n tile_len = len(self.tile_values_lag_1.index)\n df_len = df.shape[0]\n sdf = pd.DataFrame(\n np.tile(self.tile_values_lag_1, (int(np.ceil(df_len / tile_len)), 1))\n )\n if trans_method == 'original':\n sdf = sdf.tail(df_len)\n else:\n sdf = sdf.head(df_len)\n sdf.index = df.index\n sdf.columns = df.columns\n return df + sdf", "def df(self):\n return self._df", "def copy(self):\n return DataFrameDefault.register(pandas.DataFrame.copy)(self)", "def _fit(self, df):\n return df", "def to_scalar_df(df: pd.DataFrame) -> pd.DataFrame:\n scalar_df = df\n column_ordering = []\n for c, s in df.items():\n if s.dtype == \"object\":\n s_list = s.to_list()\n try:\n ncols = s_list[0].shape[0]\n split_cols = [f\"{c}_{k}\" for k in range(ncols)]\n sdf = pd.DataFrame(s_list, columns=split_cols)\n scalar_df = pd.concat([scalar_df, sdf], axis=1)\n column_ordering += split_cols\n except AttributeError as e:\n raise ValueError(f\"Expected series of lists, but found {s_list[0]}\") from e\n else:\n column_ordering.append(c)\n return scalar_df[column_ordering]", "def df_to_train_dataset(dataframe, target_col, shuffle=True, batch_size=1024):\n dataframe = dataframe.copy()\n labels = dataframe.pop(target_col)\n ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels))\n if shuffle:\n ds = ds.shuffle(buffer_size=len(dataframe))\n ds = ds.batch(batch_size)\n return ds", "def inverse_transform(self, df):\n return df", "def prepareDataframeForTable(self, result):\n df = result\n if isinstance(df, pd.Series):\n df = pd.DataFrame({\"values\": df})\n\n if self._isIndexedDataframe(df):\n if df.size == 0:\n df[\"values\"] = np.nan\n elif len(df.columns) > 1:\n if isinstance(df.columns, pd.MultiIndex):\n df.columns = df.columns.map(' | '.join)\n df = df.stack()\n if isinstance(df, pd.Series):\n df = pd.DataFrame({\"values\": df})\n current_columns_name = list(df.index.names)\n current_columns_name[len(current_columns_name)-1] = \"Measures\"\n df.index.names = current_columns_name\n\n return df", "def data_from_dataframe(self, dataframe):\n self.dataframe = dataframe.drop_duplicates()\n #Convert numerical values into float type\n self.dataframe.apply(pandas.to_numeric, errors='ignore')\n #Convert timestamps into regular dates\n time_range = [datetime.datetime.fromtimestamp(time) for time in list(self.dataframe['time'])]\n beg = time_range[0]\n end = time_range[len(time_range)-1]\n #Attribute begining and ending dates\n self.beg = beg\n self.end = end", "def to_dataframe(self, copy=False):\n\n if not copy:\n return self.__df_timings\n else:\n return self.__df_timings.copy()", "def double_data(X):\r\n X.copy()\r\n X = X.append(X, ignore_index=True)\r\n return X", "def df_normalizer(df):\n df = tf.keras.utils.normalize(df, axis=1)\n\n return df", "def test_roundtrip_from_dataframe2(self):\n import pandas as pd\n df = pd.DataFrame(data={\n 'a': np.arange(3),\n 'b': np.arange(3)[::-1]\n })\n ca = carray(df, dtype=np.dtype(np.float))\n assert_array_equal(df, ca)\n self.assertEqual(ca.dtype, np.dtype(np.float),\n msg='carray has been created with invalid dtype')", "def _view_(a):\r\n return a.view((a.dtype[0], len(a.dtype.names)))", "def df_to_dataset(df: pd.DataFrame, shuffle=True, weighted=False, batch_size=32):\r\n labels = df.pop('Valor')\r\n\r\n if weighted: # Weight sample according to frequency of it's combustion type\r\n weights_combustion = {'Gasolina': 1, 'Diesel': 3.9, 'Álcool': 39.}\r\n weights = df['Combustível'].apply(lambda x: weights_combustion[x])\r\n ds = tf.data.Dataset.from_tensor_slices((dict(df), labels, weights))\r\n else:\r\n ds = tf.data.Dataset.from_tensor_slices((dict(df), labels))\r\n\r\n if shuffle:\r\n ds = ds.shuffle(buffer_size=len(df))\r\n\r\n ds = ds.batch(batch_size)\r\n\r\n return ds", "def from_dataframe(cls, df, data_cls):\n pass", "def setup_work_dataframe(raw_dataframe):\n try:\n return copy.deepcopy(raw_dataframe)\n except Exception('Failed to deepcopy raw_df to work_df') as exp:\n raise exp", "def getNewDF_X(self, originalDF):\n new_temps = [x for x in range(-10, 10, 1)]\n for unit in range(-10, 10, 1):\n new_temps[unit] = originalDF[['R1', 'G1', 'B1', 'R2', 'G2', 'B2', 'R3', 'G3', 'B3']].iloc[:] + unit\n new_temps[unit]['W1'] = originalDF['W1']\n new_temps[unit]['W2'] = originalDF['W2']\n new_temps[unit]['W3'] = originalDF['W3']\n returnVal = pd.concat(new_temps)\n return returnVal", "def at_df(self, df):\n result = self.at(**df).reset_coords(drop=True).to_dataframe()\n if isinstance(df, pd.DataFrame):\n result.index = df.index\n return result", "def xyz_triple_as_pandas_df(numpy_xyz):\n\t\t\n\tindx=np.arange(0, numpy_xyz.shape[0])\n\txyz_df = pd.DataFrame(numpy_xyz, index=indx)\n\txyz_df.columns = ['x','y','z']\n\n\treturn xyz_df", "def to_pandas(df):\n pd_df = pd.concat(ray.get(df._df))\n pd_df.index = df.index\n pd_df.columns = df.columns\n return pd_df", "def copy(self):\n\n return BenchmarkObj(self.__df_timings.copy(), dtype=self.dtype, multivar=self.multivar, multiindex=self.multiindex)", "def test_roundtrip_from_dataframe1(self):\n import pandas as pd\n df = pd.DataFrame(data={\n 'a': np.arange(3),\n 'b': np.arange(3)[::-1]\n })\n assert_array_equal(df, carray(df, dtype=None))", "def convert_dtype(data_df, settings):\n data_df = data_df.astype(settings[\"dtype\"])\n return data_df", "def getNewDF_Y(self, originalDF):\n new_temps = [x for x in range(-10, 10, 1)]\n for unit in range(-10, 10, 1):\n new_temps[unit] = originalDF\n returnVal = pd.concat(new_temps)\n return returnVal", "def series_from_dataframe(df, index_column: str, value_column: str=None):\n\n if len(df.columns) > 2:\n df = df[[index_column, value_column]].copy()\n else:\n df = df.copy()\n df.set_index(index_column, inplace=True)\n sr = df.squeeze()\n sr.name = value_column\n return sr", "def test_create_from_dataframe(self):\n self.insert()\n data = self.tbl.select()\n data.index.name = None\n tbl = Table.create(':memory:', \"Foo_2\", data, verbose=True,\n primary_key='id', autoincrement=True)\n self.check(self.idata, tbl.select())", "def forward(self, raw_X):\n return make_df(raw_X, self.features)", "def __to_torch(self):\n self.adj = Variable(torch.LongTensor(self.adj))\n \n if self.cuda:\n self.adj = self.adj.cuda()", "def from_pandas(self, obj, index=True):\n return Reader(_from_pandas(obj, index=index))", "def _to_tensor(cls, tensor):\n if isinstance(tensor, Tensor):\n return tensor\n return Tensor(data=tensor)", "def to_dataframe(self, value_column=\"* values *\") -> pd.DataFrame:\n index, vals = self.vector.to_values()\n df = self.schema.decode_many(index, self.dims_list)\n df[value_column] = vals\n return df", "def bench(df, dtype='t', copy=False, multivar=False, multiindex=False):\n\n map_dtype = {'t': 'timings', 's': 'speedups', 'st': 'scaled_timings'}\n if dtype in map_dtype:\n dt = map_dtype[dtype]\n elif dtype in map_dtype.values():\n dt = dtype\n else:\n raise TypeError('data type ' + '\"' + str(dtype) + '\" not understood')\n\n if copy:\n df_new = df.copy()\n else:\n df_new = df\n\n return BenchmarkObj(df_new, dtype=dt, multivar=multivar, multiindex=multiindex)", "def MatConvert(x, device, dtype):\r\n x = torch.from_numpy(x).to(device, dtype)\r\n return x", "def transform(\n self,\n *,\n df: pd.DataFrame,\n destination: Optional[Union[FieldModel, ColumnModel]] = None,\n source: Optional[List[Union[ColumnModel, ModifierModel]]] = None,\n ) -> pd.DataFrame:\n return df", "def as_dframe(cls, dataset):\n return dataset.dframe()", "def make_date(cls, df: pd.DataFrame, date_field: str) -> pd.DataFrame:\n field_dtype = df[date_field].dtype\n if isinstance(field_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):\n field_dtype = np.datetime64\n if not np.issubdtype(field_dtype, np.datetime64):\n df[date_field] = pd.to_datetime(df[date_field], infer_datetime_format=True)\n return df", "def create_ts_by_column(ts: \"TSDataset\", column: str) -> \"TSDataset\":\n from etna.datasets import TSDataset\n\n new_df = ts[:, :, [column]]\n new_columns_tuples = [(x[0], \"target\") for x in new_df.columns.tolist()]\n new_df.columns = pd.MultiIndex.from_tuples(new_columns_tuples, names=new_df.columns.names)\n return TSDataset(new_df, freq=ts.freq)", "def to_dataset(df, batch_size=5):\n df = df.copy()\n labels = df.pop('Relevant')\n ds = tf.data.Dataset.from_tensor_slices((dict(df), labels))\n ds = ds.batch(batch_size)\n return ds", "def as_dataframe(self) -> \"pd.DataFrame\":\n import pandas as pd\n\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df", "def get_as_pandas_dataframe(self):\n pd_df = pd.DataFrame()\n for name in self.dict_colname_to_index:\n pd_df[name] = np.copy(self[name])\n return pd_df", "def to_data( x):\n if torch.cuda.is_available():\n x = x.cpu()\n return x.data.numpy()", "def df_to_dmatrix(df, label=None, save_to=None):\n if label:\n label_col = pd.DataFrame(df.loc[:, label], columns=[label])\n label_col.to_csv(\"../var/%s.csv\" % label, index=False)\n print(\"Label written to ../var/%s.csv\" % label)\n del label_col\n df.drop(label, axis=1, inplace=True)\n\n to_replace = [\"int64\", \"uint8\", \"float64\"]\n replace_with = [\"int\", \"int\", \"float\"]\n f_types = df.dtypes.replace(to_replace, replace_with)\n\n dmat = xgboost.DMatrix(data=df.values, feature_names=cols(df),\n feature_types=f_types)\n if save_to:\n dmat.save_binary(save_to)\n print(\"DMatrix saved to %s\" % save_to)\n return dmat", "def dtype(self) -> tf.dtypes.DType:", "def change_to_object(column, data):\n data[column] = data[column].astype('object')", "def _convert(frame):\n frame = frame.convert_objects(convert_numeric=True)\n for column in frame:\n if column in c.dates:\n frame[column] = frame[column].astype('datetime64')\n return frame", "def DOUBLE(*args, df=None, ts_col=None, **kwargs):\n\n return __smoothing(\n *args,\n kind='double',\n df=df,\n ts_col=ts_col,\n **kwargs,\n )", "def astype(self, dtype, copy=True):\n if self.dtype == dtype:\n return self\n\n if isinstance(dtype, FletcherDtype):\n dtype = dtype.arrow_dtype.to_pandas_dtype()\n arrow_type = dtype.arrow_dtype\n elif isinstance(dtype, pa.DataType):\n dtype = dtype.to_pandas_dtype()\n arrow_type = dtype\n else:\n dtype = np.dtype(dtype)\n arrow_type = None\n # NumPy's conversion of list->unicode is differently from Python's\n # default. We want to have the default Python output, so force it here.\n if pa.types.is_list(self.dtype.arrow_dtype) and dtype.kind == \"U\":\n return np.vectorize(six.text_type)(np.asarray(self))\n if arrow_type is not None:\n return FletcherArray(np.asarray(self).astype(dtype), dtype=arrow_type)\n else:\n return np.asarray(self).astype(dtype)", "def _repivot_dataframe(armscore_df: pd.DataFrame) -> pd.DataFrame:\n\n transform = (\n armscore_df.set_index([\"trial_index\", \"arm_name\", \"metric_name\"])\n .unstack(\"metric_name\")\n .reset_index()\n )\n new_cols = transform.columns.to_flat_index()\n parameters_holder = transform[\n list(filter(lambda x: \"parameters\" in x, new_cols))[0]\n ]\n transform.drop(columns=\"parameters\", level=0, inplace=True)\n new_cols = new_cols.drop(labels=filter(lambda x: \"parameters\" in x, new_cols))\n transform.columns = [\"trial_index\", \"arm_name\"] + [\n \"_\".join(tpl) for tpl in new_cols[2:]\n ]\n transform[\"parameters\"] = parameters_holder\n # pyre-fixme[7]: Expected `DataFrame` but got `Union[DataFrame, Series]`.\n return transform", "def torch(self):\n tensor = self.data * 2**self.scale\n \n # Check for and warn about errors in conversion\n if bad_conversion(self, tensor):\n warnings.warn(\"Underflow and/or overflow detected \"\n \"during torch() call\", RuntimeWarning)\n\n return tensor", "def _transfer(self, dfnew):\n newobj = copy.deepcopy(self) #This looks like None, but is it type (MetaPandasObject, just __union__ prints None\n newobj._frame = dfnew\n \n # THESE ARE NEVER TRANSFERED AT DF LEVEL, JUST CREATED NEW. TRY\n # df.loc\n # a = df*50\n # a._loc ---> Will be None\n #newobj._loc = self._loc\n #newobj._iloc = self._iloc\n #newobj._ix = self._ix \n return newobj", "def norm2rand(self, df):\n ## Check invariants\n if not set(self.var_rand).issubset(set(df.columns)):\n raise ValueError(\"model.var_rand must be subset of df.columns\")\n\n data = zeros((df.shape[0], self.n_var_rand))\n for i in range(df.shape[0]):\n data[i] = self.z2x(df[self.var_rand].iloc[i].values)\n\n return DataFrame(data=data, columns=self.var_rand)", "def _create_dataset(self, *data):\n # Make sure data is a tuple of dense tensors\n data = [self._to_torch(x, dtype=torch.FloatTensor) for x in data]\n return TensorDataset(*data)", "def to_data(x):\n if torch.cuda.is_available():\n x = x.cpu()\n return x.data.numpy()", "def forward(self, x, dt, do_normalization=True):\n return x", "def dst(df):\n pass", "def transform(self, data: pd.DataFrame):\n raise NotImplementedError", "def _convert_to_dummies_sklearn(df: pd.DataFrame) -> pd.DataFrame:\n raise NotImplementedError", "def build_dataframe(self):\n #Freq 0.0 2.5\n #ElementID NodeID Item\n #6901 6901 angle 0.000000+0.000000j 0.000000+0.000000j\n # sc 13.847674-0.461543j 13.855294-0.462052j\n # sd 0.625892-0.020861j 0.623742-0.020717j\n # se -12.178029+0.405894j -12.185331+0.406381j\n # sf 1.043753-0.034788j 1.046222-0.034953j\n # 6904 angle 0.000000+0.000000j 0.000000+0.000000j\n # sc -1.660571-0.416504j -1.663256-0.416978j\n # sd -2.790551+0.024178j -2.789738+0.024356j\n # se 0.627616+0.450933j 0.629571+0.451455j\n # sf 1.757596+0.010251j 1.756053+0.010121j\n #6902 6901 angle 0.000000+0.000000j 0.000000+0.000000j\n headers = self.headers\n column_names, column_values = self._build_dataframe_transient_header()\n self.data_frame = self._build_pandas_transient_element_node(\n column_values, column_names,\n headers, self.element_node, self.data)", "def to_tensor(data):\n if np.iscomplexobj(data):\n data = np.stack((data.real, data.imag), axis=-1)\n return torch.from_numpy(data)", "def to_tensor(data):\n if np.iscomplexobj(data):\n data = np.stack((data.real, data.imag), axis=-1)\n return torch.from_numpy(data)", "def to_tensor(data):\n if np.iscomplexobj(data):\n data = np.stack((data.real, data.imag), axis=-1)\n return torch.from_numpy(data)", "def convert(self, df):\n return convert_df_to_model(\n model_type=self.model_type, df=df,\n outcome_variables=self.outcome_variables,\n fixed_effects=self.fixed_effects,\n random_effect=self.random_effect,\n spline=self.spline,\n offset=self.offset,\n weight=self.weight\n )", "def dataframe(self):\n return self._df" ]
[ "0.61371243", "0.571318", "0.5686274", "0.55425775", "0.55360436", "0.5491242", "0.541506", "0.5355122", "0.5336436", "0.5318567", "0.5315715", "0.5312427", "0.5299223", "0.5269183", "0.52476364", "0.5246885", "0.5219819", "0.52038544", "0.51658046", "0.51648957", "0.51505226", "0.514884", "0.51460254", "0.5129648", "0.5110298", "0.50946623", "0.50665236", "0.50564545", "0.5049797", "0.5039609", "0.502814", "0.50108117", "0.5003568", "0.50033224", "0.50024915", "0.5001319", "0.4997125", "0.49902132", "0.49886128", "0.49763104", "0.49720848", "0.49560288", "0.49474224", "0.49279147", "0.49176848", "0.49137717", "0.4913681", "0.49041378", "0.49035895", "0.4902409", "0.49021095", "0.48925617", "0.48858958", "0.488286", "0.48797277", "0.48711607", "0.48638558", "0.48444358", "0.48420644", "0.4838952", "0.48233667", "0.48223707", "0.48173535", "0.48158872", "0.48118615", "0.48083514", "0.4804794", "0.4797363", "0.47898668", "0.47822812", "0.47772443", "0.47771862", "0.4770982", "0.47707063", "0.47530946", "0.47490656", "0.47432926", "0.47401324", "0.4738941", "0.47387376", "0.4734102", "0.47338217", "0.47333685", "0.47240666", "0.47157097", "0.47100902", "0.47040874", "0.47009826", "0.4698306", "0.46899635", "0.46897522", "0.4680564", "0.46722892", "0.46652675", "0.4658537", "0.46580583", "0.46580583", "0.46580583", "0.46536398", "0.46522343" ]
0.5500474
5
Custom collate_fn that is called with list of multivariate samples to yield a minibatch It preserves the data structure, e.g., if each sample is a dictionary, it outputs a dictionary with the same set of keys but batched Tensors as values (or lists if the values can not be converted into Tensors).
def collate_fn(sample_list): x_ref_batch = [] x_pos_batch = [] x_negs_batch = [] label_batch = [] for sample in sample_list: x_ref_batch.append(sample["x_ref"]) x_pos_batch.append(sample["x_pos"]) x_negs_batch.append(sample["x_negs"]) label_batch.append(sample["label"]) # Use torch API for RNNs to pad samples to fixed length, L, and stack them in batch-tensor of dim (B,n_dim,L). x_ref_batch = pad_sequence( x_ref_batch, batch_first=True, padding_value=0) # (B,L,n_dim) x_ref_batch = x_ref_batch.transpose(1, 2) # (B,n_dim,L) x_pos_batch = pad_sequence( x_pos_batch, batch_first=True, padding_value=0) # (B,L,n_dim) x_pos_batch = x_pos_batch.transpose(1, 2) # (B,n_dim,L) # Pad neg tensors with varying length of first dim L, and produce batch (B,K,n_dim,L') where L' is padded length x_negs_batch = pad_sequence(x_negs_batch, batch_first=True, padding_value=0) # (B, L', K, n_dim) x_negs_batch = x_negs_batch.transpose(1, 2) # (B, K, L', n_dim) x_negs_batch = x_negs_batch.transpose(2, 3) # (B, K, n_dim, L') return { 'x_ref': x_ref_batch, 'x_pos': x_pos_batch, 'x_negs': x_negs_batch, 'label': label_batch }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collate_fn(batch):\n\n flattened_batch = []\n for data in batch:\n num_examples = len(data['image'])\n for i in range(num_examples):\n flattened_batch.append({\n k: v[i] for k, v in data.items()\n })\n\n return default_collate(flattened_batch)", "def collate_minibatch(list_of_blobs):\n Batch = {key: [] for key in list_of_blobs[0]}\n # Because roidb consists of entries of variable length, it can't be batch into a tensor.\n # So we keep roidb in the type of \"list of ndarray\".\n list_of_roidb = [blobs.pop('roidb') for blobs in list_of_blobs]\n for i in range(0, len(list_of_blobs), cfg.TRAIN.IMS_PER_BATCH):\n mini_list = list_of_blobs[i:(i + cfg.TRAIN.IMS_PER_BATCH)]\n # Pad image data\n # mini_list = pad_image_data(mini_list)\n minibatch = default_collate(mini_list)\n minibatch['roidb'] = list_of_roidb[i:(i + cfg.TRAIN.IMS_PER_BATCH)]\n for key in minibatch:\n Batch[key].append(minibatch[key])\n\n return Batch", "def collate_fn(list_samples):\n data = dict(outputs=None) # compliant with DataManager <collate_fn>\n data[\"inputs\"] = torch.stack([torch.from_numpy(sample[0]) for sample in list_samples], dim=0).float()\n data[\"labels\"] = torch.stack([torch.tensor(sample[1]) for sample in list_samples], dim=0).squeeze().float()\n return DataItem(**data)", "def collate_minibatch(list_of_blobs):\n Batch = {key: [] for key in list_of_blobs[0]}\n\n list_of_target = [blobs.pop('target') for blobs in list_of_blobs]\n # list_of_image = [blobs.pop('image') for blobs in list_of_blobs]\n batch_size = Logo_512['numpergpu']\n\n for i in range(0, len(list_of_blobs), batch_size):\n # minibatch = {}\n mini_list = list_of_blobs[i:(i + batch_size)]\n # Pad image data\n minibatch = default_collate(mini_list)\n minibatch['target'] = list_of_target[i:(i + batch_size)]\n for key in minibatch:\n Batch[key].append(minibatch[key])\n\n return Batch", "def collate_minibatch(self, list_of_blobs):\n def pad_image_data(list_of_blobs):\n max_shape = np.array([blobs['data'].shape[1:] for blobs in list_of_blobs]).max(axis=0)\n output_list = []\n for blobs in list_of_blobs:\n data_padded = np.zeros((3, max_shape[0], max_shape[1]), dtype=np.float32)\n _, h, w = blobs['data'].shape\n data_padded[:, :h, :w] = blobs['data']\n blobs['data'] = data_padded\n output_list.append(blobs)\n return output_list\n \n Batch = {key: [] for key in list_of_blobs[0]}\n # Because roidb consists of entries of variable length, it can't be batch into a tensor.\n # So we keep roidb in the type of \"list of ndarray\".\n list_of_roidb = [blobs.pop('roidb') for blobs in list_of_blobs]\n for i in range(0, len(list_of_blobs), CONFIG.SOLVER.IMS_PER_BATCH):\n mini_list = list_of_blobs[i:(i + CONFIG.SOLVER.IMS_PER_BATCH)]\n # Pad image data\n mini_list = pad_image_data(mini_list)\n minibatch = default_collate(mini_list)\n minibatch['roidb'] = list_of_roidb[i:(i + CONFIG.SOLVER.IMS_PER_BATCH)]\n for key in minibatch:\n Batch[key].append(minibatch[key])\n\n return Batch", "def collate_fn(batch, samples_per_gpu=1):\n if not isinstance(batch, Sequence):\n raise TypeError(f'{batch.dtype} is not supported.')\n\n if isinstance(batch[0], list):\n batch = [item for _ in batch for item in _]\n\n if isinstance(batch[0], DataContainer):\n assert len(batch) % samples_per_gpu == 0\n stacked = []\n if batch[0].cpu_only:\n for i in range(0, len(batch), samples_per_gpu):\n stacked.append(\n [sample.data for sample in batch[i:i + samples_per_gpu]])\n return DataContainer(\n stacked, batch[0].stack, batch[0].padding_value, cpu_only=True)\n elif batch[0].stack:\n for i in range(0, len(batch), samples_per_gpu):\n assert isinstance(batch[i].data, torch.Tensor)\n\n if batch[i].pad_dims is not None:\n ndim = batch[i].dim()\n assert ndim > batch[i].pad_dims\n max_shape = [0 for _ in range(batch[i].pad_dims)]\n for dim in range(1, batch[i].pad_dims + 1):\n max_shape[dim - 1] = batch[i].size(-dim)\n for sample in batch[i:i + samples_per_gpu]:\n for dim in range(0, ndim - batch[i].pad_dims):\n assert batch[i].size(dim) == sample.size(dim)\n for dim in range(1, batch[i].pad_dims + 1):\n max_shape[dim - 1] = max(max_shape[dim - 1],\n sample.size(-dim))\n padded_samples = []\n for sample in batch[i:i + samples_per_gpu]:\n pad = [0 for _ in range(batch[i].pad_dims * 2)]\n for dim in range(1, batch[i].pad_dims + 1):\n pad[2 * dim -\n 1] = max_shape[dim - 1] - sample.size(-dim)\n padded_samples.append(\n F.pad(\n sample.data, pad, value=sample.padding_value))\n stacked.append(collate(padded_samples))\n elif batch[i].pad_dims is None:\n stacked.append(\n collate([\n sample.data\n for sample in batch[i:i + samples_per_gpu]\n ]))\n else:\n raise ValueError(\n 'pad_dims should be either None or integers (1-3)')\n\n else:\n for i in range(0, len(batch), samples_per_gpu):\n stacked.append(\n [sample.data for sample in batch[i:i + samples_per_gpu]])\n return DataContainer(stacked, batch[0].stack, batch[0].padding_value)\n elif isinstance(batch[0], Sequence):\n transposed = zip(*batch)\n return [collate(samples, samples_per_gpu) for samples in transposed]\n\n elif isinstance(batch[0], Mapping):\n res = dict()\n for key in batch[0]:\n if isinstance(batch[0][key], torch.Tensor):\n res.update({key: collate([d[key] for d in batch], samples_per_gpu)})\n else:\n res.update({key: [d[key] for d in batch]})\n\n return res\n # return {\n # key: collate([d[key] for d in batch], samples_per_gpu)\n # for key in batch[0]\n # }\n else:\n return collate(batch)", "def collate_fn(batch: list[dict[str, Tensor]]) -> dict[str, Any]:\n output: dict[str, Any] = {}\n output[\"image\"] = torch.stack([sample[\"image\"] for sample in batch])\n output[\"boxes\"] = [sample[\"boxes\"] for sample in batch]\n output[\"labels\"] = [torch.tensor([1] * len(sample[\"boxes\"])) for sample in batch]\n return output", "def collate_fn(self, *args):\n return TupleMiniBatch(default_collate(*args))", "def collate_fn(batch):\n metadata = []\n for el in batch:\n metadata.append(el[\"metadata\"])\n del el[\"metadata\"]\n\n batch = default_collate(batch)\n\n batch[\"metadata\"] = metadata\n\n return batch", "def collate_fn(batch):\r\n transposed = zip(*batch)\r\n lbd = lambda batch:torch.cat([torch.from_numpy(b).long() for b in batch])\r\n return [lbd(samples) for samples in transposed]", "def collate_fn(batch):\n # eliminate invalid data (where boxes is [] tensor)\n old_batch_len = len(batch)\n batch = [x for x in batch if x[1]['boxes'].shape[0] != 0]\n # try refill empty sample by other sample in current batch\n #print('batch len = ', old_batch_len)\n #print('new batch len = ', len(batch))\n new_batch_len = len(batch)\n for i in range(new_batch_len, old_batch_len):\n batch.append(copy.deepcopy(batch[i%new_batch_len]))\n #print('batch = ', batch)\n #print('filled batch len = ', len(batch))\n batch = list(zip(*batch)) # batch[0]: data tensor, batch[1]: targets dict\n\n batch[0] = nested_tensor_from_tensor_list(batch[0])\n return tuple(batch)", "def collate_fn(self, batch: List[Dict]) -> List[Dict]:\n # package up a list of individual interventions into multiple batched interventions\n # batch may contain interventions on different locations\n high_node_to_minibatches = defaultdict(list)\n for d in batch:\n high_nodes = tuple(sorted(d[\"high_intervention\"].intervention._values.keys()))\n high_node_to_minibatches[high_nodes].append(d)\n\n minibatches = []\n for minibatch_dicts in high_node_to_minibatches.values():\n low_base_dict, low_ivn_dict, low_loc_dict = pack_interventions(\n [d[\"low_intervention\"] for d in minibatch_dicts],\n batch_dim=self.batch_dim,\n non_batch_inputs=self.low_non_batch_leaves\n )\n low_base_input = GraphInput(\n low_base_dict, batched=True, batch_dim=self.batch_dim,\n cache_results=self.cache_base_results,\n key_leaves=self.low_key_leaves,\n non_batch_leaves=self.low_non_batch_leaves\n )\n low_realizations = [d[\"low_intervention\"].realization for d in minibatch_dicts]\n if all(rzn is None for rzn in low_realizations):\n low_realizations = None\n low_ivn = Intervention.batched(\n low_base_input, low_ivn_dict, low_loc_dict,\n batch_dim=self.batch_dim, cache_base_results=self.cache_interv_results,\n realization=low_realizations\n )\n\n high_base_dict, high_ivn_dict, high_loc_dict = pack_interventions(\n [d[\"high_intervention\"] for d in minibatch_dicts],\n batch_dim=self.batch_dim,\n non_batch_inputs=self.high_non_batch_leaves\n )\n high_base_input = GraphInput(\n high_base_dict, batched=True, batch_dim=self.batch_dim,\n cache_results=self.cache_base_results,\n key_leaves=self.high_key_leaves,\n non_batch_leaves=self.high_non_batch_leaves\n )\n high_ivn = Intervention.batched(\n high_base_input, high_ivn_dict, high_loc_dict,\n batch_dim=self.batch_dim, cache_base_results=self.cache_interv_results)\n\n minibatches.append({\"low_intervention\": low_ivn,\n \"high_intervention\": high_ivn})\n\n return minibatches", "def collate_fn(\n self,\n batch: List[\n Tuple[\n np.ndarray,\n np.ndarray,\n np.ndarray,\n np.ndarray,\n int,\n int,\n bool,\n bool,\n Optional[np.ndarray],\n Optional[np.ndarray],\n ]\n ],\n ) -> Union[\n Tuple[Tensor, Tensor, Tensor, Tensor, Any, Any, Any, Any],\n Tuple[Tensor, Tensor, Tensor, Tensor, Any, Any, Any, Any, Any, Any],\n ]:\n if not self.use_audio:\n inp_ids, segment_ids, inp_mask, st_mask, n_preceding, query_ids, is_first, is_last = zip(*batch)\n return (\n pad_sequence([torch.tensor(x) for x in inp_ids], batch_first=True, padding_value=0),\n pad_sequence([torch.tensor(x) for x in segment_ids], batch_first=True, padding_value=0),\n pad_sequence([torch.tensor(x) for x in inp_mask], batch_first=True, padding_value=0),\n pad_sequence([torch.tensor(x) for x in st_mask], batch_first=True, padding_value=0),\n n_preceding,\n query_ids,\n is_first,\n is_last,\n )\n (\n inp_ids,\n segment_ids,\n inp_mask,\n st_mask,\n n_preceding,\n query_ids,\n is_first,\n is_last,\n features,\n features_length,\n ) = zip(*batch)\n return (\n pad_sequence([torch.tensor(x) for x in inp_ids], batch_first=True, padding_value=0),\n pad_sequence([torch.tensor(x) for x in segment_ids], batch_first=True, padding_value=0),\n pad_sequence([torch.tensor(x) for x in inp_mask], batch_first=True, padding_value=0),\n pad_sequence([torch.tensor(x) for x in st_mask], batch_first=True, padding_value=0),\n n_preceding,\n query_ids,\n is_first,\n is_last,\n pad_sequence([torch.tensor(x) for x in features], batch_first=True, padding_value=0).float(),\n torch.tensor(features_length, dtype=torch.long),\n )", "def collate_without_batching_dict(batch):\n\n error_msg = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n elem_type = type(batch[0])\n if isinstance(batch[0], torch.Tensor):\n out = None\n if _use_shared_memory:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n numel = sum([x.numel() for x in batch])\n storage = batch[0].storage()._new_shared(numel)\n out = batch[0].new(storage)\n return torch.stack(batch, 0, out=out)\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n elem = batch[0]\n if elem_type.__name__ == 'ndarray':\n # array of string classes and object\n if re.search('[SaUO]', elem.dtype.str) is not None:\n raise TypeError(error_msg.format(elem.dtype))\n\n return torch.stack([torch.from_numpy(b) for b in batch], 0)\n if elem.shape == (): # scalars\n py_type = float if elem.dtype.name.startswith('float') else int\n return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n elif isinstance(batch[0], int_classes):\n return torch.LongTensor(batch)\n elif isinstance(batch[0], float):\n return torch.DoubleTensor(batch)\n elif isinstance(batch[0], string_classes):\n return batch\n elif isinstance(batch[0], collections.Mapping):\n return [d for d in batch]\n # return {key: collate_without_batching_dict_list([d[key] for d in batch]) for key in batch[0]}\n elif isinstance(batch[0], collections.Sequence):\n transposed = zip(*batch)\n return [collate_without_batching_dict(samples) for samples in transposed]\n\n raise TypeError((error_msg.format(type(batch[0]))))", "def collate_fn(self, batch):\n images, boxes, categories = [], [], []\n\n for b in batch:\n images.append(b['img'])\n boxes.append(b['box'])\n categories.append(b['category'])\n\n images = torch.stack(images, dim=0)\n\n # tensor (N, 3, 300, 300), 3 lists of N tensors each\n return {\n 'imgs': images,\n 'boxes': boxes,\n 'categories': categories\n }", "def collate_batch(self) -> Dict[str, Any]:\n pass", "def collate_fn(data: list):\n def pad_tensor(inp):\n assert type(inp[0]) == torch.Tensor\n it = iter(inp)\n t = next(it)\n max_shape = list(t.shape)\n while True:\n try:\n t = next(it)\n for i in range(len(max_shape)):\n max_shape[i] = int(max(max_shape[i], t.shape[i]))\n except StopIteration:\n break\n max_shape = np.array(max_shape)\n\n padded_ts = []\n for t in inp:\n pad_pattern = np.zeros(2 * len(max_shape), dtype=np.int64)\n pad_pattern[::-2] = max_shape - np.array(t.shape)\n pad_pattern = tuple(pad_pattern.tolist())\n padded_ts.append(F.pad(t, pad_pattern, 'constant', 0))\n\n return padded_ts\n\n def stack(inp):\n if type(inp[0]) == list:\n ret = []\n for vs in zip(*inp):\n ret.append(stack(vs))\n elif type(inp[0]) == dict:\n ret = {}\n for kvs in zip(*[x.items() for x in inp]):\n ks, vs = zip(*kvs)\n for k in ks:\n assert k == ks[0], \"Key value mismatch.\"\n ret[k] = stack(vs)\n elif type(inp[0]) == torch.Tensor:\n new_t = pad_tensor(inp)\n ret = torch.stack(new_t, 0)\n elif type(inp[0]) == np.ndarray:\n new_t = pad_tensor([torch.from_numpy(x) for x in inp])\n ret = torch.stack(new_t, 0)\n elif type(inp[0]) == str:\n ret = inp\n else:\n raise ValueError('Cannot handle type {}'.format(type(inp[0])))\n return ret\n\n ret = stack(data)\n\n # compute CPU-intensive matrix K1, K2 here to leverage multi-processing nature of dataloader\n # if 'Gs' in ret and 'Hs' in ret and :\n # try:\n # G1_gt, G2_gt = ret['Gs']\n # H1_gt, H2_gt = ret['Hs']\n # sparse_dtype = np.float32\n # K1G = [kronecker_sparse(x, y).astype(sparse_dtype) for x, y in zip(G2_gt, G1_gt)] # 1 as source graph, 2 as target graph\n # K1H = [kronecker_sparse(x, y).astype(sparse_dtype) for x, y in zip(H2_gt, H1_gt)]\n # K1G = CSRMatrix3d(K1G)\n # K1H = CSRMatrix3d(K1H).transpose()\n #\n # ret['Ks'] = K1G, K1H #, K1G.transpose(keep_type=True), K1H.transpose(keep_type=True)\n # except ValueError:\n # pass\n\n return ret", "def collate_fn(batch):\n all_input_ids, all_attention_mask, all_token_type_ids, all_lens, all_labels = map(torch.stack, zip(*batch))\n max_len = max(all_lens).item()\n all_input_ids = all_input_ids[:, :max_len]\n all_attention_mask = all_attention_mask[:, :max_len]\n all_token_type_ids = all_token_type_ids[:, :max_len]\n return all_input_ids, all_attention_mask, all_token_type_ids, all_labels", "def custom_collate(batch):\n error_msg = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n elem_type = type(batch[0])\n if isinstance(batch[0], torch.Tensor):\n out = None\n matched = True\n for dim in range(batch[0].dim()):\n lst = list(map(lambda x: x.size(dim), batch))\n matched = not lst or lst.count(lst[0]) == len(lst)\n if not matched:\n break\n if matched:\n return torch.stack(batch, 0, out=out)\n else:\n return pad_sequence(batch, batch_first=True)\n # indices, items = zip(*sorted(enumerate(batch), key=lambda x: x[1].size(0), reverse=True))\n # lengths = [batch[i].size(0) for i in indices]\n # logger.info(lengths)\n # return pad_sequence([batch[i] for i in indices], batch_first=True), lengths\n elif isinstance(batch[0], np.ndarray):\n matched = True\n for dim in range(batch[0].ndim):\n lst = list(map(lambda x: x.shape[dim], batch))\n matched = not lst or lst.count(lst[0]) == len(lst)\n if not matched:\n break\n if matched:\n return np.stack(batch, 0)\n else:\n raise ValueError('dimensions are not matched {}'.format(batch[0].shape))\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n raise ValueError('cannot handle numpy data')\n elif isinstance(batch[0], int_classes):\n return torch.LongTensor(batch)\n elif isinstance(batch[0], float):\n return torch.DoubleTensor(batch)\n elif isinstance(batch[0], string_classes):\n return batch\n elif isinstance(batch[0], collections.abc.Mapping):\n return {key: custom_collate([d[key] for d in batch]) for key in batch[0]}\n elif isinstance(batch[0], collections.abc.Sequence):\n transposed = zip(*batch)\n return [custom_collate(samples) for samples in transposed]\n raise TypeError((error_msg.format(type(batch[0]))))", "def collate_fn(data, device=default_device):\n # batch.sort(key=lambda x: len(x[1]), reverse=True)\n has_mask_tensor = True if data[0][-1] is not None else False\n input_tensor, target_tensor, bs_tensor, db_tensor, mask_tensor = zip(*data)\n\n input_tensor, input_lengths = padSequence(input_tensor)\n target_tensor, target_lengths = padSequence(target_tensor)\n bs_tensor = torch.as_tensor(bs_tensor, dtype=torch.float, device=device)\n db_tensor = torch.as_tensor(db_tensor, dtype=torch.float, device=device)\n mask_tensor = torch.stack(mask_tensor).permute((1, 0, 2)) if has_mask_tensor else None\n # mask_tensor = torch.stack(mask_tensor).permute((1, 0, 2)) if mask_tensor[0] and mask_tensor[0] != [] else None\n\n # data = input_tensor, target_tensor, bs_tensor, db_tensor, mask_tensor\n # if torch.cuda.is_available():\n # data = [data[i].cuda() if isinstance(data[i], torch.Tensor) else data[i] for i in range(len(data))]\n return input_tensor, input_lengths, target_tensor, target_lengths, bs_tensor, db_tensor, mask_tensor # tensors [batch_size, *]", "def trivial_batch_collator(batch):\n return batch", "def trivial_batch_collator(batch):\n return batch", "def trivial_batch_collator(batch):\n return batch", "def basic_collate(batch):\n\n minibatch, targets = zip(*[(a, b) for (a,b) in batch])\n minibatch = stack(minibatch, dim=0)\n return minibatch, targets", "def customize_collate(batch):\n\n elem = batch[0]\n elem_type = type(elem)\n if isinstance(elem, torch.Tensor):\n # this is the main part to handle varied length data in a batch\n # batch = [data_tensor_1, data_tensor_2, data_tensor_3 ... ]\n # \n batch_new = pad_sequence(batch)\n \n out = None\n if torch.utils.data.get_worker_info() is not None:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n\n # allocate the memory based on maximum numel\n numel = max([x.numel() for x in batch_new]) * len(batch_new)\n storage = elem.storage()._new_shared(numel)\n out = elem.new(storage)\n return torch.stack(batch_new, 0, out=out)\n\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':\n # array of string classes and object\n if np_str_obj_array_pattern.search(elem.dtype.str) is not None:\n raise TypeError(customize_collate_err_msg.format(elem.dtype))\n # this will go to loop in the last case\n return customize_collate([torch.as_tensor(b) for b in batch])\n elif elem.shape == (): # scalars\n return torch.as_tensor(batch)\n \n elif isinstance(elem, float):\n return torch.tensor(batch, dtype=torch.float64)\n elif isinstance(elem, int_classes):\n return torch.tensor(batch)\n elif isinstance(elem, string_classes):\n return batch\n elif isinstance(elem, container_abcs.Mapping):\n return {key: customize_collate([d[key] for d in batch]) for key in elem}\n elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple\n return elem_type(*(customize_collate(samples) \\\n for samples in zip(*batch)))\n elif isinstance(elem, container_abcs.Sequence):\n # check to make sure that the elements in batch have consistent size\n it = iter(batch)\n elem_size = len(next(it))\n if not all(len(elem) == elem_size for elem in it):\n raise RuntimeError('each element in batch should be of equal size')\n \n # zip([[A, B, C], [a, b, c]]) -> [[A, a], [B, b], [C, c]]\n transposed = zip(*batch)\n return [customize_collate(samples) for samples in transposed]\n\n raise TypeError(customize_collate_err_msg.format(elem_type))", "def _collate(cls, inbatch, num_devices=None):\n item0 = inbatch[0]\n bsize = len(inbatch)\n if num_devices is None:\n num_devices = 1\n\n samples_per_device = int(np.ceil(bsize / num_devices))\n\n # assert bsize % samples_per_device == 0\n stacked = []\n if item0.cpu_only:\n # chunking logic\n stacked = []\n for i in range(0, bsize, samples_per_device):\n stacked.append(\n [sample.data for sample in inbatch[i:i + samples_per_device]])\n\n elif item0.stack:\n for i in range(0, bsize, samples_per_device):\n item = inbatch[i]\n pad_dims_ = item.pad_dims\n assert isinstance(item.data, torch.Tensor)\n\n if pad_dims_ is not None:\n # Note: can probably reimplement this using padded collate\n # logic\n ndim = item.dim()\n assert ndim > pad_dims_\n max_shape = [0 for _ in range(pad_dims_)]\n for dim in range(1, pad_dims_ + 1):\n max_shape[dim - 1] = item.shape[-dim]\n for sample in inbatch[i:i + samples_per_device]:\n for dim in range(0, ndim - pad_dims_):\n assert item.shape[dim] == sample.shape[dim]\n for dim in range(1, pad_dims_ + 1):\n max_shape[dim - 1] = max(max_shape[dim - 1], sample.shape[-dim])\n padded_samples = []\n for sample in inbatch[i:i + samples_per_device]:\n pad = [0 for _ in range(pad_dims_ * 2)]\n for dim in range(1, pad_dims_ + 1):\n pad[2 * dim - 1] = max_shape[dim - 1] - sample.shape[-dim]\n padded_samples.append(\n F.pad(sample.data, pad, value=sample.padding_value))\n stacked.append(default_collate(padded_samples))\n\n elif pad_dims_ is None:\n stacked.append(\n default_collate([\n sample.data\n for sample in inbatch[i:i + samples_per_device]\n ]))\n else:\n raise ValueError(\n 'pad_dims should be either None or integers (1-3)')\n\n else:\n for i in range(0, bsize, samples_per_device):\n stacked.append(\n [sample.data for sample in inbatch[i:i + samples_per_device]])\n result = BatchContainer(stacked, **item0.meta)\n return result", "def _collate_fn(batch):\r\n batch = list(zip(*batch))\r\n batch[0] = torch.stack(batch[0])\r\n batch[1] = list(batch[1])\r\n batch[2] = torch.stack(batch[2])\r\n return tuple(batch)", "def custom_collate_fn(data):\n features, labels = zip(*data)\n return pack_sequence(features, enforce_sorted=False), torch.tensor(labels)", "def list_data_collate(batch: Sequence):\n elem = batch[0]\n data = [i for k in batch for i in k] if isinstance(elem, list) else batch\n key = None\n try:\n if config.USE_META_DICT:\n data = pickle_operations(data) # bc 0.9.0\n if isinstance(elem, Mapping):\n ret = {}\n for k in elem:\n key = k\n data_for_batch = [d[key] for d in data]\n ret[key] = collate_meta_tensor(data_for_batch)\n else:\n ret = collate_meta_tensor(data)\n return ret\n except RuntimeError as re:\n re_str = str(re)\n if \"equal size\" in re_str:\n if key is not None:\n re_str += f\"\\nCollate error on the key '{key}' of dictionary data.\"\n re_str += (\n \"\\n\\nMONAI hint: if your transforms intentionally create images of different shapes, creating your \"\n + \"`DataLoader` with `collate_fn=pad_list_data_collate` might solve this problem (check its \"\n + \"documentation).\"\n )\n _ = dev_collate(data)\n raise RuntimeError(re_str) from re\n except TypeError as re:\n re_str = str(re)\n if \"numpy\" in re_str and \"Tensor\" in re_str:\n if key is not None:\n re_str += f\"\\nCollate error on the key '{key}' of dictionary data.\"\n re_str += (\n \"\\n\\nMONAI hint: if your transforms intentionally create mixtures of torch Tensor and numpy ndarray, \"\n + \"creating your `DataLoader` with `collate_fn=pad_list_data_collate` might solve this problem \"\n + \"(check its documentation).\"\n )\n _ = dev_collate(data)\n raise TypeError(re_str) from re", "def collater(self, samples):\r\n return collate(\r\n samples, self.src_dict, self.tgt_dict,\r\n left_pad_source=self.left_pad_source, left_pad_target=self.left_pad_target,\r\n max_sent_len=self.max_sent_len,\r\n mask_other_sents=self.mask_other_sents\r\n )", "def collate_fn(batch):\n file = [item[\"file\"] for item in batch]\n wave = torch.cat([item[\"wave\"] for item in batch], dim=0)\n return {\"file\": file, \"wave\": wave}", "def collate_fn(batch):\n file = [item[\"file\"] for item in batch]\n wave = torch.cat([item[\"wave\"] for item in batch], dim=0)\n return {\"file\": file, \"wave\": wave}", "def container_collate(inbatch, num_devices=None):\n\n if not isinstance(inbatch, collections.Sequence):\n raise TypeError(\"{} is not supported.\".format(inbatch.dtype))\n item0 = inbatch[0]\n if isinstance(item0, ItemContainer):\n return item0.__class__._collate(inbatch, num_devices=num_devices)\n elif isinstance(item0, collections.Sequence):\n transposed = zip(*inbatch)\n return [container_collate(samples,\n num_devices=num_devices)\n for samples in transposed]\n elif isinstance(item0, collections.Mapping):\n return {\n key: container_collate([d[key] for d in inbatch],\n num_devices=num_devices)\n for key in item0\n }\n else:\n return default_collate(inbatch)\n # return _collate_else(inbatch, container_collate)", "def collate_fn(batch):\n text = [item[0] for item in batch]\n audio = [item[1] for item in batch]\n\n text_lengths = [len(x) for x in text]\n audio_lengths = [len(x) for x in audio]\n\n max_text = max(text_lengths)\n max_audio = max(audio_lengths)\n\n text_batch = np.stack(pad_text(x, max_text) for x in text)\n audio_batch = np.stack(pad_spectrogram(x, max_audio) for x in audio)\n\n return (torch.LongTensor(text_batch),\n torch.FloatTensor(audio_batch).permute(1, 0, 2),\n text_lengths, audio_lengths)", "def dynamic_padding_collate_fn(batch_list):\n batch_uncollated = [[] for i in range(3)]\n\n for features in batch_list:\n length = features[1].sum().item()\n for i, feature in enumerate(features):\n batch_uncollated[i].append(feature[:length])\n\n batch_collated = []\n for batch in batch_uncollated:\n batch_collated.append(pad_sequence(batch, batch_first=True))\n\n return batch_collated", "def collate_fn(batch):\r\n names, images, annos = zip(*batch)\r\n images = default_collate(images)\r\n return names, images, annos", "def collate_fn(batch):\r\n names, images, annos = zip(*batch)\r\n images = default_collate(images)\r\n return names, images, annos", "def mycollate(batch):\n elem_type = type(batch[0])\n if isinstance(batch[0], torch.Tensor):\n out = None\n if _use_shared_memory:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n numel = sum([x.numel() for x in batch])\n storage = batch[0].storage()._new_shared(numel)\n out = batch[0].new(storage)\n return torch.stack(batch, 0, out=out)\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n elem = batch[0]\n if elem_type.__name__ == 'ndarray':\n # array of string classes and object\n if np_str_obj_array_pattern.search(elem.dtype.str) is not None:\n raise TypeError(error_msg_fmt.format(elem.dtype))\n\n return default_collate([torch.from_numpy(b) for b in batch])\n if elem.shape == (): # scalars\n py_type = float if elem.dtype.name.startswith('float') else int\n return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n elif isinstance(batch[0], float):\n return torch.tensor(batch, dtype=torch.float64)\n elif isinstance(batch[0], int_classes):\n return torch.tensor(batch)\n elif isinstance(batch[0], string_classes):\n return batch\n elif isinstance(batch[0], container_abcs.Mapping):\n if 'dataset' not in batch[0] or batch[0]['dataset'].neib_samp not in ('sampling', 'best', 'relation'):\n return {key: default_collate([d[key] for d in batch]) for key in batch[0] if key not in ['weight','impt','dataset']}\n relations = batch[0]['dataset'].tr_grp\n if batch[0]['dataset'].neib_samp == 'relation':\n nodes2 = sum([d['impt'] for d in batch],[])\n else:\n w= sum([d['weight'] for d in batch], Counter())\n [w.pop(d['index'], None) for d in batch] \n if batch[0]['dataset'].neib_samp == 'sampling':\n p = FlexCounter(w)/sum(w.values())\n nodes2 = np.random.choice(list(p.keys()), batch[0]['dataset'].k, replace=False, p=list(p.values()))\n elif batch[0]['dataset'].neib_samp == 'best':\n nodes2 = nlargest(batch[0]['dataset'].k, w, key = w.get) \n \n neib_batch = [batch[0]['dataset']._getimage(x,True,1) for x in nodes2]\n [(d.pop('weight', None), d.pop('dataset', None)) for d in batch]\n batch = neib_batch + batch\n coll = default_collate(batch)\n adj_mats = {r: np.zeros((len(batch), len(batch))) for r in relations}\n for r in relations:\n for i, b1 in enumerate(coll[r]):\n for j, b2 in enumerate(coll[r]):\n if i!=j:\n adj_mats[r][i,j] = 1 if b1==b2 else 0\n adj_mats[r] = adj_norm(adj_mats[r]) \n coll['adj'] = adj_mats\n coll['k'] = len(nodes2)\n return coll\n \n elif isinstance(batch[0], tuple) and hasattr(batch[0], '_fields'): # namedtuple\n return type(batch[0])(*(default_collate(samples) for samples in zip(*batch)))\n elif isinstance(batch[0], container_abcs.Sequence):\n transposed = zip(*batch)\n return [default_collate(samples) for samples in transposed]\n\n raise TypeError((error_msg_fmt.format(type(batch[0]))))", "def collater(self, samples):\r\n raise NotImplementedError", "def list_data_collate(batch):\n elem = batch[0]\n data = [i for k in batch for i in k] if isinstance(elem, list) else batch\n return default_collate(data)", "def _collate_else(batch, collate_func):\n error_msg = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n elem_type = type(batch[0])\n if elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n elem = batch[0]\n if elem_type.__name__ == 'ndarray':\n # array of string classes and object\n if re.search('[SaUO]', elem.dtype.str) is not None:\n raise TypeError(error_msg.format(elem.dtype))\n\n return torch.stack([torch.from_numpy(b) for b in batch], 0)\n if elem.shape == (): # scalars\n py_type = float if elem.dtype.name.startswith('float') else int\n return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n elif isinstance(batch[0], slice):\n batch = default_collate([{\n 'start': sl.start,\n 'stop': sl.stop,\n 'step': 1 if sl.step is None else sl.step\n } for sl in batch])\n return batch\n elif isinstance(batch[0], int_classes):\n return torch.LongTensor(batch)\n elif isinstance(batch[0], float):\n return torch.DoubleTensor(batch)\n elif isinstance(batch[0], string_classes):\n return batch\n elif isinstance(batch[0], container_abcs.Mapping):\n # Hack the mapping collation implementation to print error info\n if _DEBUG:\n collated = {}\n try:\n for key in batch[0]:\n collated[key] = collate_func([d[key] for d in batch])\n except Exception:\n print('\\n!!Error collating key = {!r}\\n'.format(key))\n raise\n return collated\n else:\n return {key: collate_func([d[key] for d in batch]) for key in batch[0]}\n elif isinstance(batch[0], tuple) and hasattr(batch[0], '_fields'): # namedtuple\n return type(batch[0])(*(default_collate(samples) for samples in zip(*batch)))\n elif isinstance(batch[0], container_abcs.Sequence):\n transposed = zip(*batch)\n return [collate_func(samples) for samples in transposed]\n else:\n raise TypeError((error_msg.format(type(batch[0]))))", "def collate_fn(self, batch):\n images = list()\n boxes = list()\n labels = list()\n difficulties = list()\n\n for b in batch:\n images.append(b[0])\n boxes.append(b[1])\n labels.append(b[2])\n difficulties.append(b[3])\n\n images = torch.stack(images, dim=0)\n\n return images, boxes, labels, difficulties # tensor (N, 3, 300, 300), 3 lists of N tensors each", "def _msdd_train_collate_fn(self, batch):\n packed_batch = list(zip(*batch))\n features, feature_length, ms_seg_timestamps, ms_seg_counts, clus_label_index, scale_mapping, targets = packed_batch\n features_list, feature_length_list = [], []\n ms_seg_timestamps_list, ms_seg_counts_list, scale_clus_label_list, scale_mapping_list, targets_list = (\n [],\n [],\n [],\n [],\n [],\n )\n\n max_raw_feat_len = max([x.shape[0] for x in features])\n max_target_len = max([x.shape[0] for x in targets])\n max_total_seg_len = max([x.shape[0] for x in clus_label_index])\n\n for feat, feat_len, ms_seg_ts, ms_seg_ct, scale_clus, scl_map, tgt in batch:\n seq_len = tgt.shape[0]\n pad_feat = (0, max_raw_feat_len - feat_len)\n pad_tgt = (0, 0, 0, max_target_len - seq_len)\n pad_sm = (0, max_target_len - seq_len)\n pad_ts = (0, 0, 0, max_target_len - seq_len)\n pad_sc = (0, max_total_seg_len - scale_clus.shape[0])\n padded_feat = torch.nn.functional.pad(feat, pad_feat)\n padded_tgt = torch.nn.functional.pad(tgt, pad_tgt)\n padded_sm = torch.nn.functional.pad(scl_map, pad_sm)\n padded_ms_seg_ts = torch.nn.functional.pad(ms_seg_ts, pad_ts)\n padded_scale_clus = torch.nn.functional.pad(scale_clus, pad_sc)\n\n features_list.append(padded_feat)\n feature_length_list.append(feat_len.clone().detach())\n ms_seg_timestamps_list.append(padded_ms_seg_ts)\n ms_seg_counts_list.append(ms_seg_ct.clone().detach())\n scale_clus_label_list.append(padded_scale_clus)\n scale_mapping_list.append(padded_sm)\n targets_list.append(padded_tgt)\n\n features = torch.stack(features_list)\n feature_length = torch.stack(feature_length_list)\n ms_seg_timestamps = torch.stack(ms_seg_timestamps_list)\n clus_label_index = torch.stack(scale_clus_label_list)\n ms_seg_counts = torch.stack(ms_seg_counts_list)\n scale_mapping = torch.stack(scale_mapping_list)\n targets = torch.stack(targets_list)\n return features, feature_length, ms_seg_timestamps, ms_seg_counts, clus_label_index, scale_mapping, targets", "def collater(self, samples):\n\n return dual_collate(\n samples, pad_idx=self.d1.src_dict.pad(), eos_idx=self.d1.src_dict.eos(),\n left_pad_source=self.d1.left_pad_source, left_pad_target=self.d1.left_pad_target,\n input_feeding=self.d1.input_feeding,\n )\n\n #prev_output_tokens doesn't match!\n #id doesn't match\n #both of these keys are lengths 248 for both dictionaries\n #length only captures the first dimension of a multidimensional tensor\n #248 is likely the batch size here\n #error occurs because of the sorting by descending source length in the collate method\n #may be possible to fix by replace the sort_order line with: sort_order = torch.LongTensor(range(len(id)))\n #also it seems like there's more keys in c1 and c2 than we explicitly account for here \n #also fix DualSourceSequenceGenerator.generate\n\n indexes = [sample['id'] for sample in samples]\n\n c1 = self.d1.collater([self.d1[index] for index in indexes])\n c2 = self.d2.collater([self.d2[index] for index in indexes])\n\n # c1 = self.d1.collater([self.d1[sample['id']] for sample in samples])\n # c2 = self.d2.collater([self.d2[sample['id']] for sample in samples])\n\n net_input1 = c1['net_input']; net_input2 = c2['net_input']\n net_input = {}\n for key in net_input1.keys():\n if 'src_' in key:\n net_input[key+'1'] = net_input1[key]\n elif key == 'prev_output_tokens':\n net_input[key] = net_input1[key]\n # elif key == 'ntokens':\n # net_input[key] = net_input1[key]\n else:\n raise AssertionError\n for key in net_input2.keys():\n if 'src_' in key:\n net_input[key+'2'] = net_input2[key]\n elif key == 'prev_output_tokens':\n if self.dual_decoder:\n net_input[key+'_extra'] = net_input2[key]\n else:\n # net_input[key] = net_input2[key]\n pass\n # err = \"NET_INPUT ASSERTION: \"+str(len(indexes))+\";\\n\"\n # err += str(len(net_input[key])) + \"\\t\" + str(net_input[key]) + \"\\n\"\n # err += str(len(net_input2[key])) + \"\\t\" + str(net_input2[key]) + \"\\n\"\n # assert False, err\n # if not net_input[key] == net_input2[key]:\n # print(\"NET_INPUT ASSERTION:\")\n # print(net_input[key])\n # print(net_input2[key])\n # raise AssertionError\n else:\n raise AssertionError\n\n c = {'net_input': net_input}\n for key in c1.keys():\n if key == 'target':\n c[key] = c1[key]\n elif key == 'ntokens':\n c[key] = c1[key]\n elif key == 'id' or key == 'nsentences':\n c[key] = c1[key]\n else:\n assert key == 'net_input',key\n for key in c2.keys():\n if key == 'target':\n c[key] = c2[key]\n elif key == 'ntokens':\n if 'target' not in samples[0]:\n c[key] += c2[key] # source tokens\n elif self.dual_decoder:\n c[key+'_extra'] = c2[key] # target tokens for decoder 2\n else:\n assert c[key] == c2[key], \"NTOKENS:\\n\"+str(c[key])+\"\\n\"+str(c2[key]) # target tokens for decoder\n elif key == 'id':\n # set1 = set(c[key])\n # set2 = set(c2[key])\n # assert set1 == set2\n assert False, \"ID: lengths: \"+str(len(indexes))+\"; \"+str(len(c[key]))+\", \"+str(len(c2[key]))+\"\\n\"+str(c[key][:10])+\"...\\n\"+str(c2[key][:10])+\"...\\n\" \n assert c[key] == c2[key], \"ID:\\n\"+str(c[key])+\"\\n\"+str(c2[key])\n elif key == 'nsentences':\n assert c[key] == c2[key], \"NSENT:\\n\"+str(c[key])+\"\\n\"+str(c2[key])\n else:\n assert key == 'net_input',key\n return c\n\n\n\n net_input1['src_tokens1'] = net_input1.pop('src_tokens') \n net_input1['src_lengths1'] = net_input1.pop('src_lengths')\n net_input1['src_tokens2'] = net_input2['src_tokens'] \n net_input1['src_lengths2'] = net_input2['src_lengths']\n\n if self.dual_decoder:\n net_input1['prev_output_tokens_extra'] = net_input2['prev_output_tokens']\n c1['target_extra'] = c2['target']\n c1['ntokens_extra'] = c2['ntokens']\n if 'target' not in samples[0]:\n #ntokens and ntokens_extra represent the total number of source tokens\n c1['ntokens'] = c1['ntokens'] + c2['ntokens']\n if 'ntokens_extra' in c1:\n c1['ntokens_extra'] = c1['ntokens']\n #else ntokens is the total number of target tokens\n return c1", "def collate_fn(self, batch):\r\n batch = list(map(torch.stack, zip(*batch)))\r\n max_seq_len = torch.max(torch.sum(batch[1], 1)).item()\r\n for i in range(len(batch) - 1):\r\n if batch[i].size()[1] > max_seq_len:\r\n batch[i] = batch[i][:, :max_seq_len]\r\n if self.truncate_label:\r\n batch[-1] = batch[-1][:, :max_seq_len]\r\n return batch", "def collate_fn(self, batch):\n images = list()\n targets = list()\n\n for b in batch:\n images.append(b[0])\n targets.append(b[1])\n\n # images = torch.stack(images, dim=0)\n\n return images, targets # tensor (N, 3, 300, 300), 3 lists of N tensors each", "def collater(self, samples):\n batch = self.base_dataset.collater(samples)\n # In case of an empty batch, return an empty dict\n if len(batch) == 0:\n return {}\n auxiliary_targets_map = {}\n for i, s in enumerate(samples):\n auxiliary_targets_map[s['id']] = i\n sort_order = []\n for s_id in batch['id'].tolist():\n sort_order.append(auxiliary_targets_map[s_id])\n sort_order = torch.tensor(sort_order)\n auxiliary_target = torch.stack([s[\"auxiliary_target\"] for s in samples])\n batch['auxiliary_target'] = auxiliary_target.index_select(0, sort_order)\n return batch", "def individual_collate(batch):\n\n data = batch\n\n collected_data = defaultdict(list)\n\n for i in range(len(list(data))):\n for k in data[i].keys():\n collected_data[k].append(data[i][k])\n\n for k in collected_data.keys():\n collected_data[k] = torch.stack(collected_data[k])\n\n return collected_data", "def customize_collate_from_batch(batch):\n\n elem = batch[0]\n elem_type = type(elem)\n if isinstance(elem, torch.Tensor):\n batch_new = pad_sequence(batch) \n out = None\n if torch.utils.data.get_worker_info() is not None:\n numel = max([x.numel() for x in batch_new]) * len(batch_new)\n storage = elem.storage()._new_shared(numel)\n out = elem.new(storage)\n # here is the difference\n return torch.cat(batch_new, 0, out=out)\n\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':\n if np_str_obj_array_pattern.search(elem.dtype.str) is not None:\n raise TypeError(customize_collate_err_msg.format(elem.dtype))\n return customize_collate_from_batch(\n [torch.as_tensor(b) for b in batch])\n elif elem.shape == (): # scalars\n return torch.as_tensor(batch)\n elif isinstance(elem, float):\n return torch.tensor(batch, dtype=torch.float64)\n elif isinstance(elem, int_classes):\n return torch.tensor(batch)\n elif isinstance(elem, string_classes):\n return batch\n elif isinstance(elem, tuple):\n # concatenate two tuples\n tmp = elem\n for tmp_elem in batch[1:]:\n tmp += tmp_elem \n return tmp\n elif isinstance(elem, container_abcs.Sequence):\n it = iter(batch)\n elem_size = len(next(it))\n if not all(len(elem) == elem_size for elem in it):\n raise RuntimeError('each element in batch should be of equal size')\n transposed = zip(*batch)\n return [customize_collate_from_batch(samples) for samples in transposed]\n\n raise TypeError(customize_collate_err_msg.format(elem_type))", "def default_collate(batch):\n elem_type = type(batch[0])\n if isinstance(batch[0], torch.Tensor):\n out = None\n if _use_shared_memory:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n numel = sum([x.numel() for x in batch])\n storage = batch[0].storage()._new_shared(numel)\n out = batch[0].new(storage)\n return torch.stack(batch, 0, out=out)\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n elem = batch[0]\n if elem_type.__name__ == 'ndarray':\n # array of string classes and object\n if np_str_obj_array_pattern.search(elem.dtype.str) is not None:\n raise TypeError(error_msg_fmt.format(elem.dtype))\n\n return default_collate([torch.from_numpy(b) for b in batch])\n if elem.shape == (): # scalars\n py_type = float if elem.dtype.name.startswith('float') else int\n return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n elif isinstance(batch[0], float):\n return torch.tensor(batch, dtype=torch.float64)\n elif isinstance(batch[0], int_classes):\n return torch.tensor(batch)\n elif isinstance(batch[0], string_classes):\n return batch\n elif isinstance(batch[0], container_abcs.Mapping): \n return {key: default_collate([d[key] for d in batch]) for key in batch[0]}\n elif isinstance(batch[0], tuple) and hasattr(batch[0], '_fields'): # namedtuple\n return type(batch[0])(*(default_collate(samples) for samples in zip(*batch)))\n elif isinstance(batch[0], container_abcs.Sequence):\n transposed = zip(*batch)\n return [default_collate(samples) for samples in transposed]\n\n raise TypeError((error_msg_fmt.format(type(batch[0]))))", "def custom_collate_segmentation(\n batch: List[Dict[str, Tensor]], groundtruth: bool = True\n) -> Dict[str, Union[Any, List[Tensor]]]:\n\n batch = default_collate(batch)\n return batch", "def _collater(batch):\n return batch[0]", "def custom_collate_fn(batch):\n images, bboxes, context_indices, labels = zip(*batch)\n # images = (img_1, ..., img_N) each element of size [3, img_H, img_W]\n # bboxes = (bboxes_1, ..., bboxes_N) each element of size [n_bboxes_in_image, 4]\n # context_indices = (ci_1, ..., ci_N) each element of size [n_bboxes_in_image, 2*context_size]\n # labels = (labels_1, ..., labels_N) each element of size [n_bboxes_in_image]\n \n images = torch.stack(images, 0)\n \n bboxes_with_batch_index = []\n observed_bboxes = 0\n for i, bbox in enumerate(bboxes):\n batch_indices = torch.Tensor([i]*bbox.shape[0]).view(-1,1)\n bboxes_with_batch_index.append(torch.cat((batch_indices, bbox), dim=1))\n context_indices[i][context_indices[i] != -1] += observed_bboxes\n observed_bboxes += bbox.shape[0]\n bboxes_with_batch_index = torch.cat(bboxes_with_batch_index)\n context_indices = torch.cat(context_indices)\n \n labels = torch.cat(labels)\n \n return images, bboxes_with_batch_index, context_indices, labels", "def collate_meta_tensor(batch):\n if not isinstance(batch, Sequence):\n raise NotImplementedError()\n elem_0 = first(batch)\n if isinstance(elem_0, MetaObj):\n collated = default_collate(batch)\n collated.meta = default_collate([i.meta or TraceKeys.NONE for i in batch])\n collated.applied_operations = [i.applied_operations or TraceKeys.NONE for i in batch]\n collated.is_batch = True\n return collated\n if isinstance(elem_0, Mapping):\n return {k: collate_meta_tensor([d[k] for d in batch]) for k in elem_0}\n if isinstance(elem_0, (tuple, list)):\n return [collate_meta_tensor([d[i] for d in batch]) for i in range(len(elem_0))]\n\n # no more recursive search for MetaTensor\n return default_collate(batch)", "def collate_fn(self, batch):\n # Sort a data list by caption length (descending order).\n #sample.sort(key=lambda x: len(x[1]), reverse=True)\n images, words = [b.get('image') for b in batch], [b.get('word') for b in batch]\n \n # Merge images (from tuple of 3D tensor to 4D tensor).\n images = torch.stack(images, 0)\n \n # Merge captions (from tuple of 1D tensor to 2D tensor).\n lengths = [len(word) for word in words]\n targets = torch.zeros(sum(lengths)).long()\n lengths = torch.tensor(lengths)\n for j, word in enumerate(words):\n start = sum(lengths[:j])\n end = lengths[j]\n targets[start:start+end] = torch.tensor([self.ds.char_dict.get(letter) for letter in word]).long()\n \n if self.device == 'cpu':\n dev = torch.device('cpu')\n else:\n dev = torch.device('cuda')\n return images.to(dev), targets.to(dev), lengths.to(dev)", "def preprocessing_fn(inputs):\n outputs = {}\n\n for key in ONE_HOT_FEATURES.keys():\n dim = ONE_HOT_FEATURES[key]\n int_value = tft.compute_and_apply_vocabulary(\n fill_in_missing(inputs[key]), top_k=dim + 1\n )\n outputs[transformed_name(key)] = convert_num_to_one_hot(\n int_value, num_labels=dim + 1\n )\n\n for key, bucket_count in BUCKET_FEATURES.items():\n temp_feature = tft.bucketize(\n convert_zip_code(fill_in_missing(inputs[key])),\n bucket_count,\n )\n outputs[transformed_name(key)] = convert_num_to_one_hot(\n temp_feature, num_labels=bucket_count + 1\n )\n\n for key in TEXT_FEATURES.keys():\n outputs[transformed_name(key)] = fill_in_missing(inputs[key])\n\n outputs[transformed_name(LABEL_KEY)] = fill_in_missing(inputs[LABEL_KEY])\n\n return outputs", "def build_collate_fn(\n cls, args: argparse.Namespace, train: bool\n ) -> Callable[[Sequence[Dict[str, np.ndarray]]], Dict[str, torch.Tensor]]:\n raise NotImplementedError", "def dev_collate(batch, level: int = 1, logger_name: str = \"dev_collate\"):\n elem = batch[0]\n elem_type = type(elem)\n l_str = \">\" * level\n batch_str = f\"{batch[:10]}{' ... ' if len(batch) > 10 else ''}\"\n if isinstance(elem, torch.Tensor):\n try:\n logging.getLogger(logger_name).critical(f\"{l_str} collate/stack a list of tensors\")\n return torch.stack(batch, 0)\n except TypeError as e:\n logging.getLogger(logger_name).critical(\n f\"{l_str} E: {e}, type {[type(elem).__name__ for elem in batch]} in collate({batch_str})\"\n )\n return\n except RuntimeError as e:\n logging.getLogger(logger_name).critical(\n f\"{l_str} E: {e}, shape {[elem.shape for elem in batch]} in collate({batch_str})\"\n )\n return\n elif elem_type.__module__ == \"numpy\" and elem_type.__name__ != \"str_\" and elem_type.__name__ != \"string_\":\n if elem_type.__name__ in [\"ndarray\", \"memmap\"]:\n logging.getLogger(logger_name).critical(f\"{l_str} collate/stack a list of numpy arrays\")\n return dev_collate([torch.as_tensor(b) for b in batch], level=level, logger_name=logger_name)\n elif elem.shape == (): # scalars\n return batch\n elif isinstance(elem, (float, int, str, bytes)):\n return batch\n elif isinstance(elem, abc.Mapping):\n out = {}\n for key in elem:\n logging.getLogger(logger_name).critical(f'{l_str} collate dict key \"{key}\" out of {len(elem)} keys')\n out[key] = dev_collate([d[key] for d in batch], level=level + 1, logger_name=logger_name)\n return out\n elif isinstance(elem, abc.Sequence):\n it = iter(batch)\n els = list(it)\n try:\n sizes = [len(elem) for elem in els] # may not have `len`\n except TypeError:\n types = [type(elem).__name__ for elem in els]\n logging.getLogger(logger_name).critical(f\"{l_str} E: type {types} in collate({batch_str})\")\n return\n logging.getLogger(logger_name).critical(f\"{l_str} collate list of sizes: {sizes}.\")\n if any(s != sizes[0] for s in sizes):\n logging.getLogger(logger_name).critical(\n f\"{l_str} collate list inconsistent sizes, got size: {sizes}, in collate({batch_str})\"\n )\n transposed = zip(*batch)\n return [dev_collate(samples, level=level + 1, logger_name=logger_name) for samples in transposed]\n logging.getLogger(logger_name).critical(f\"{l_str} E: unsupported type in collate {batch_str}.\")\n return", "def collate_fn(data):\n\toutput = dict()\n\n\tfor name in ['answer_ID','query_ID']:\n\t\toutput[name] = [ _[name] for _ in data]\n\n\n\tfor name in ['query_len','answer_len']:\n\t\ttemp = [ _[name] for _ in data]\t \n\t\toutput[name] = torch.stack(temp, dim=0) \n\t\n\t#deal with source and target\n\tfor name in ['answer','query']:\n\t\tlength = output['{0}_len'.format(name)]\n\t\tl = length.max().item()\n\n\t\tfor i in range(len(data)):\n\t\t\tif(l-length[i].item()>0):\n\t\t\t\tdata[i][name] = torch.cat([data[i][name],torch.zeros(l-length[i].item(),dtype=torch.long)],dim=-1)\n\n\t\ttemp = [ _[name] for _ in data]\n\t\t\n\t\toutput[name] = torch.stack(temp, dim=0).long()\n\t\t\n\n\treturn output", "def collate_fn(batch):\n # From\n # https://github.com/sgrvinod/a-PyTorch-Tutorial-to-Object-Detection/blob/43fd8be9e82b351619a467373d211ee5bf73cef8/datasets.py#L60\n\n images = list()\n boxes = list()\n labels = list()\n\n for b in batch:\n if b[0] is not None:\n images.append(b[0])\n boxes.append(b[1])\n labels.append(b[2])\n\n if len(images) > 0:\n images = torch.stack(images, dim=0)\n\n return images, boxes, labels", "def collate_fn_Siamese(batch):\n all_input_ids, all_attention_mask, all_token_type_ids, all_lens_a, all_lens_b, all_labels = map(torch.stack,\n zip(*batch))\n max_len_a = max(all_lens_a).item()\n max_len_b = max(all_lens_b).item()\n if max_len_b == 2:\n all_input_ids = all_input_ids[:, :max_len_a]\n all_attention_mask = all_attention_mask[:, :max_len_a]\n all_token_type_ids = all_token_type_ids[:, :max_len_a]\n else:\n all_input_ids = all_input_ids[:, :512 + max_len_b]\n all_attention_mask = all_attention_mask[:, :512 + max_len_b]\n all_token_type_ids = all_token_type_ids[:, :512 + max_len_b]\n return all_input_ids, all_attention_mask, all_token_type_ids, all_labels", "def batch_transformer(\n f: Callable[..., np.ndarray],\n *,\n inplace: bool,\n mode: str = \"each\",\n batchwise_apply: bool = False,\n in_keys: Sequence[str] = None,\n out_keys: Sequence[Union[str, Tuple[int, str]]] = None,\n pop_in_keys: bool = True,\n train_only: bool = False,\n):\n\n in_keys = as_list(in_keys)\n if out_keys and inplace:\n raise ValueError(\"cannot set explicit out_keys if inplace is True\")\n\n batch_gen_t = Generator[Union[DataDict, Tuple[DataDict, ...]], None, None]\n\n def bt(gen: batch_gen_t, train=True):\n \"\"\"\n Arguments:\n gen: the generator whose data stream to transform. Must output\n dicts or tuples of dicts.\n train: if False, and the batch_transformer defining this function\n was called with `train_only=True`, the data will be returned\n unchanged\n \"\"\"\n nonlocal in_keys\n nonlocal out_keys\n nonlocal inplace\n\n def split_flatbatch(flatbatch, key_to_dix):\n out: List[DataDict] = [\n {} for i in range(max(key_to_dix.values()) + 1)\n ]\n for key, dix in key_to_dix.items():\n out[dix][key] = flatbatch[key]\n\n return tuple(out)\n\n if mode not in [\"mix\", \"each\"]:\n raise ValueError(\"Unrecognized mode f{mode}\")\n\n if mode == \"mix\" and inplace:\n warn(\"Using inplace with 'mix' mode, inplace will be ignored\")\n inplace = False\n\n # should loop forever\n for batch in gen:\n if not isinstance(batch, tuple):\n batch = (batch,)\n\n keys: Set = set()\n for d in batch:\n for key in d.keys():\n if key in keys:\n raise ValueError(\n \"Duplicate keys in data dicts not supported!\"\n )\n keys.add(key)\n del keys\n\n bs = check_all_same_length(\n *flatten([list(d.values()) for d in batch])\n )\n\n if train_only and not train:\n yield batch\n\n flatbatch: DataDict = {\n k: arr for d in batch for k, arr in d.items()\n }\n key_to_dix: Dict[str, int] = {\n k: ix for ix, d in enumerate(batch) for k in d.keys()\n }\n\n # by default, apply transformations to all inputs and no labels\n if not in_keys:\n in_keys = list(flatbatch.keys())\n\n if out_keys and mode == \"each\" and inplace and not pop_in_keys:\n warn(\n \"Using explicit output keys with an inplace function and\"\n \"without popping output keys - data will be duplicated!\"\n )\n\n # if out_keys is not passed, assume they're the same as the in keys\n out_keys = out_keys or in_keys\n\n # canonicalize to explicit output dict index\n # novel keys without an explicit dict index will go into output 0\n clean_out_keys = []\n for ok in out_keys:\n if isinstance(ok, tuple):\n clean_out_keys.append(ok)\n elif ok in key_to_dix:\n clean_out_keys.append((key_to_dix[ok], ok))\n else:\n if max(key_to_dix.values()) > 1:\n warn(\n f\"\"\"\n Output key {ok} is neither an input key nor has an\n output index. It will be put in the first output\n dict.\"\"\"\n )\n clean_out_keys.append((0, ok))\n\n out_keys = clean_out_keys\n\n if mode == \"each\" and len(out_keys) != len(in_keys):\n raise ValueError(\n 'In mode \"each\", the number of out_keys '\n \"must equal the number of in_keys\"\n )\n\n if batchwise_apply:\n if mode == \"each\":\n if inplace:\n for key in in_keys:\n f(flatbatch[key])\n out = [flatbatch[key] for key in in_keys]\n else:\n out = [f(flatbatch[key]) for key in in_keys]\n elif mode == \"mix\":\n out = f(*[flatbatch[key] for key in in_keys])\n else:\n die(\"unreachable\")\n else:\n if mode == \"each\" and inplace:\n for key in in_keys:\n varr = flatbatch[key]\n for bx in range(bs):\n f(varr[bx])\n out = [flatbatch[key] for key in in_keys]\n else:\n if mode == \"each\":\n first_elems = [f(flatbatch[key][0]) for key in in_keys]\n elif mode == \"mix\":\n first_elems = f(*[flatbatch[key][0] for key in in_keys])\n if len(first_elems) != len(out_keys):\n raise ValueError(\n f\"Transformer function {f} returned \"\n f\"{len(first_elems)} values, but \"\n f\"{len(out_keys)} was expected based on the \"\n f\"output keys {out_keys}.\"\n )\n else:\n die(\"unreachable\")\n\n out = [\n np.zeros((bs,) + elem.shape, elem.dtype)\n for elem in first_elems\n ]\n for outarr, elem in zip(out, first_elems):\n outarr[0, ...] = elem\n\n if mode == \"each\":\n for kx, key in enumerate(in_keys):\n varr = flatbatch[key]\n for bx in range(1, bs):\n out[kx][bx, ...] = f(varr[bx, ...])\n # this might be slow\n elif mode == \"mix\":\n for bx in range(1, bs):\n res = f(*[flatbatch[key][bx] for key in in_keys])\n for outarr, r in zip(out, res):\n outarr[bx, ...] = r\n else:\n die(\"unreachable\")\n\n in_keys_with_ixes = set((ix, k) for k, ix in key_to_dix.items())\n\n for (oix, okey), res in zip(out_keys, out):\n flatbatch[okey] = res\n key_to_dix[okey] = oix\n\n if pop_in_keys:\n would_del_output = set(out_keys) & in_keys_with_ixes\n to_pop = set(in_keys) - {x[1] for x in would_del_output}\n for ikey in to_pop:\n del flatbatch[ikey]\n del key_to_dix[ikey]\n\n yield split_flatbatch(flatbatch, key_to_dix)\n\n return bt", "def _collate_fn(batch):\n # imgs = [b[0] for b in batch]\n # labels = [b[1] for b in batch]\n # imgs = torch.stack(imgs, dim=0)\n # return [imgs, labels]\n imgs = [b[0] for b in batch]\n labels = [b[1] for b in batch]\n imgs = torch.cat(imgs, dim=0)\n labels = [l for sublist in labels for l in sublist]\n return [imgs, labels]", "def collate_fn_pad_image_only(batch):\n output = {\n 'id': [],\n 'label': {\n 'intent': [],\n 'semiotic': [],\n 'contextual': [],\n },\n 'image': [],\n }\n\n for sample in batch:\n output['id'].append(sample['id'])\n output['label']['intent'].append(sample['label']['intent'])\n output['label']['semiotic'].append(sample['label']['semiotic'])\n output['label']['contextual'].append(sample['label']['contextual'])\n output['image'].append(sample['image'])\n\n output['label']['intent'] = torch.LongTensor(output['label']['intent'])\n output['label']['semiotic'] = torch.LongTensor(output['label']['semiotic'])\n output['label']['contextual'] = torch.LongTensor(output['label']['contextual'])\n output['image'] = torch.stack(output['image'], dim=0)\n return output", "def collate_fn_predict(batch):\n all_input_ids, all_attention_mask, all_token_type_ids, all_lens = map(torch.stack, zip(*batch))\n max_len = max(all_lens).item()\n all_input_ids = all_input_ids[:, :max_len]\n all_attention_mask = all_attention_mask[:, :max_len]\n all_token_type_ids = all_token_type_ids[:, :max_len]\n return all_input_ids, all_attention_mask, all_token_type_ids", "def collate_fn(batch):\n images, word_seqs, ex_lengths, question_seqs, question_lengths = zip(*batch)\n\n word_seqs = [torch.LongTensor(seq) for seq in word_seqs]\n ex_lengths = torch.LongTensor(ex_lengths)\n # Targets as next-word predictions:\n targets = [x[1:, ] for x in word_seqs]\n # Padding\n word_seqs = torch.nn.utils.rnn.pad_sequence(\n word_seqs, batch_first=True)\n targets = torch.nn.utils.rnn.pad_sequence(\n targets, batch_first=True)\n\n question_seqs = [torch.LongTensor(seq) for seq in question_seqs]\n question_seqs = torch.nn.utils.rnn.pad_sequence(\n question_seqs, batch_first=True)\n\n question_lengths = torch.LongTensor(question_lengths)\n\n images_tensor = torch.stack(images)\n return images_tensor, word_seqs, ex_lengths, targets, question_seqs, question_lengths", "def custom_collate_alldata(\n batch: List[Dict[str, Tensor]], groundtruth: bool = True\n) -> Dict[str, Union[Any, List[Tensor]]]:\n #box_coordinates_list = []\n class_list = []\n blob_centers_list = []\n det_target_list = []\n seg_target_list = []\n\n for element in batch:\n # Clean Detection Dataset\n\n if element[\"dataset_class\"] == \"detection\":\n #box_coordinates_list.append(element.pop(\"det_boxcord\"))\n class_list.append(element.pop(\"det_class\"))\n blob_centers_list.append(element.pop(\"blob_centers\"))\n det_target_list.append(element.pop(\"det_target\"))\n seg_target_list.append([])\n\n # Clean Segmentation Dataset\n if element[\"dataset_class\"] == \"segmentation\":\n #box_coordinates_list.append([])\n class_list.append([])\n blob_centers_list.append([])\n det_target_list.append([])\n\n seg_target_list.append(element.pop(\"seg_target\"))\n\n batch = default_collate(batch)\n #batch[\"det_boxcord\"] = box_coordinates_list\n batch[\"det_class\"] = class_list\n batch[\"blob_centers\"] = blob_centers_list\n batch[\"det_target\"] = det_target_list\n batch[\"seg_target\"] = seg_target_list\n\n return batch", "def collate_fn(batch):\n # Unzip the batch\n imgs,qs, answers = list(zip(*batch))\n\n # concatenate the vectors\n imgs = torch.stack(imgs)\n \n #concatenate the labels\n q = torch.stack(qs)\n a = torch.stack(answers)\n \n return imgs, q, a", "def batch_collate_fn(batch):\n images = []\n masks = []\n \n for (image, trimap, mask) in batch:\n mask = mask.unsqueeze(0)\n trimap = trimap.unsqueeze(0)\n image = torch.cat([image, trimap], 0).unsqueeze(0)\n \n images.append(image)\n masks.append(mask)\n\n images = torch.cat(images, 0)\n masks = torch.cat(masks, 0)\n\n return (images, masks)", "def variable_time_collate_fn3(batch, args, device = torch.device(\"cpu\"), data_type = \"train\", \n data_min = None, data_max = None):\n D = batch[0][2].shape[1]\n len_tt = [ex[1].size(0) for ex in batch]\n maxlen = np.max(len_tt)\n enc_combined_tt = torch.zeros([len(batch), maxlen]).to(device)\n enc_combined_vals = torch.zeros([len(batch), maxlen, D]).to(device)\n enc_combined_mask = torch.zeros([len(batch), maxlen, D]).to(device)\n for b, (record_id, tt, vals, mask, labels) in enumerate(batch):\n currlen = tt.size(0)\n enc_combined_tt[b, :currlen] = tt.to(device) \n enc_combined_vals[b, :currlen] = vals.to(device) \n enc_combined_mask[b, :currlen] = mask.to(device) \n \n enc_combined_vals, _, _ = utils.normalize_masked_data(enc_combined_vals, enc_combined_mask, \n att_min = data_min, att_max = data_max)\n\n if torch.max(enc_combined_tt) != 0.:\n enc_combined_tt = enc_combined_tt / torch.max(enc_combined_tt)\n \n data_dict = {\n \"observed_data\": enc_combined_vals, \n \"observed_tp\": enc_combined_tt,\n \"observed_mask\": enc_combined_mask}\n\n return data_dict", "def from_minibatch(minibatch: Dict[str, Any]) -> List[ClassificationItemSequence[ScalarItem]]:\n # batched is a de-generate ClassificationItemSequence, with id being a list of strings, and items being\n # a list of lists.\n batched = ClassificationItemSequence(**minibatch)\n return [ClassificationItemSequence(id=sample_id, items=items)\n for (sample_id, items) in zip(batched.id, batched.items)]", "def batchify(fn, chunk, world_fn = lambda x:x, gather_func = None):\n if chunk is None:\n return fn\n def ret(inputs, training = False, world_fn=world_fn):\n embedded = inputs[0]\n attention_poses = inputs[1]\n intrinsic = inputs[2]\n images_features = inputs[3]\n pts = inputs[4]\n\n ret_list = [fn([embedded[i:i+chunk], gather_func( world_fn(pts[i:i+chunk]), attention_poses, intrinsic, images_features),pts[i:i+chunk] ]\n , training=training) for i in range(0, int(embedded.shape[0]), chunk)]\n #necessary to cache computed results from coarse model\n if fn.coarse:\n return tf.concat([pred[0] for pred in ret_list], 0), tf.concat([pred[1] for pred in ret_list], 0)\n else:\n return tf.concat([pred[0] for pred in ret_list], 0), None\n return ret", "def _default_collate(batch):\n\n elem = batch[0]\n elem_type = type(elem)\n if isinstance(elem, pt.Tensor):\n out = None\n if pt.utils.data.get_worker_info() is not None:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n numel = sum([x.numel() for x in batch])\n storage = elem.storage()._new_shared(numel)\n out = elem.new(storage)\n return pt.stack(batch, 0, out=out)\n elif (\n elem_type.__module__ == \"numpy\"\n and elem_type.__name__ != \"str_\"\n and elem_type.__name__ != \"string_\"\n ):\n elem = batch[0]\n if elem_type.__name__ == \"ndarray\":\n # array of string classes and object\n if np_str_obj_array_pattern.search(elem.dtype.str) is not None:\n raise TypeError(\n _default_collate_err_msg_format.format(elem.dtype)\n )\n return _default_collate([pt.as_tensor(b) for b in batch])\n elif elem.shape == (): # scalars\n return pt.as_tensor(batch)\n elif isinstance(elem, float):\n return pt.tensor(batch, dtype=pt.float)\n elif isinstance(elem, int_classes):\n return pt.tensor(batch, dtype=pt.long)\n elif isinstance(elem, string_classes):\n return batch\n elif isinstance(elem, container_abcs.Mapping):\n return {key: _default_collate([d[key] for d in batch]) for key in elem}\n elif isinstance(elem, tuple) and hasattr(elem, \"_fields\"): # namedtuple\n return elem_type(\n *(_default_collate(samples) for samples in zip(*batch))\n )\n elif isinstance(elem, container_abcs.Sequence):\n transposed = zip(*batch)\n return [_default_collate(samples) for samples in transposed]\n elif elem is None:\n return None\n\n raise TypeError(_default_collate_err_msg_format.format(elem_type))", "def collate_sparse_tensors(batch):\n batch_inds = []\n F_list = []\n C_list = []\n for i in range(len(batch)):\n n = batch[i].F.shape[0]\n F_list.append(batch[i].F)\n C_list.append(batch[i].C)\n batch_inds.append(i * np.ones((n, 1), dtype=np.int32))\n\n F_batch = np.concatenate(F_list, axis=0)\n C_batch = np.concatenate(C_list, axis=0)\n batch_inds = np.concatenate(batch_inds, axis=0)\n C_batch = np.concatenate((C_batch, batch_inds), axis=1)\n\n return SparseTensor(\n torch.from_numpy(F_batch).float(), torch.from_numpy(C_batch).int(), batch[0].s\n )", "def collate_fn(batch):\n pad_index = 1 # the <PAD> index in vocabulary\n src_list = [sample[0] for sample in batch] # list of each language sentences\n trg_list = [sample[1] for sample in batch]\n\n def padding(sentence_list):\n \"\"\"padding each sentence to the right\"\"\"\n max_len = max([sentence.size(0) for sentence in sentence_list])\n pad_sen = [sen.tolist() + [pad_index] * max(0, max_len - len(sen))\n for sen in sentence_list]\n return torch.LongTensor(pad_sen).transpose(0, 1) # shape of (T, B)\n\n return padding(src_list), padding(trg_list)", "def variable_time_collate_fn(batch, args, device = torch.device(\"cpu\"), data_type = \"train\", \n\tdata_min = None, data_max = None):\n\tD = batch[0][2].shape[1]\n\tcombined_tt, inverse_indices = torch.unique(torch.cat([ex[1] for ex in batch]), sorted=True, return_inverse=True)\n\tcombined_tt = combined_tt.to(device)\n\n\toffset = 0\n\tcombined_vals = torch.zeros([len(batch), len(combined_tt), D]).to(device)\n\tcombined_mask = torch.zeros([len(batch), len(combined_tt), D]).to(device)\n\t\n\tcombined_labels = None\n\tN_labels = 1\n\n\tcombined_labels = torch.zeros(len(batch), N_labels) + torch.tensor(float('nan'))\n\tcombined_labels = combined_labels.to(device = device)\n\t\n\tfor b, (record_id, tt, vals, mask, labels) in enumerate(batch):\n\t\ttt = tt.to(device)\n\t\tvals = vals.to(device)\n\t\tmask = mask.to(device)\n\t\tif labels is not None:\n\t\t\tlabels = labels.to(device)\n\n\t\tindices = inverse_indices[offset:offset + len(tt)]\n\t\toffset += len(tt)\n\n\t\tcombined_vals[b, indices] = vals\n\t\tcombined_mask[b, indices] = mask\n\n\t\tif labels is not None:\n\t\t\tcombined_labels[b] = labels\n\n\tcombined_vals, _, _ = utils.normalize_masked_data(combined_vals, combined_mask, \n\t\tatt_min = data_min, att_max = data_max)\n\n\tif torch.max(combined_tt) != 0.:\n\t\tcombined_tt = combined_tt / torch.max(combined_tt)\n\t\t\n\tdata_dict = {\n\t\t\"data\": combined_vals, \n\t\t\"time_steps\": combined_tt,\n\t\t\"mask\": combined_mask,\n\t\t\"labels\": combined_labels}\n\n\tdata_dict = utils.split_and_subsample_batch(data_dict, args, data_type = data_type)\n\treturn data_dict", "def variable_time_collate_fn2(batch, args, device = torch.device(\"cpu\"), data_type = \"train\", \n data_min = None, data_max = None):\n D = batch[0][2].shape[1]\n len_tt = [ex[1].size(0) for ex in batch]\n maxlen = np.max(len_tt)\n enc_combined_tt = torch.zeros([len(batch), maxlen]).to(device)\n enc_combined_vals = torch.zeros([len(batch), maxlen, D]).to(device)\n enc_combined_mask = torch.zeros([len(batch), maxlen, D]).to(device)\n for b, (record_id, tt, vals, mask, labels) in enumerate(batch):\n currlen = tt.size(0)\n enc_combined_tt[b, :currlen] = tt.to(device) \n enc_combined_vals[b, :currlen] = vals.to(device) \n enc_combined_mask[b, :currlen] = mask.to(device) \n \n combined_tt, inverse_indices = torch.unique(torch.cat([ex[1] for ex in batch]), sorted=True, return_inverse=True)\n combined_tt = combined_tt.to(device)\n\n offset = 0\n combined_vals = torch.zeros([len(batch), len(combined_tt), D]).to(device)\n combined_mask = torch.zeros([len(batch), len(combined_tt), D]).to(device)\n\n combined_labels = None\n N_labels = 1\n\n combined_labels = torch.zeros(len(batch), N_labels) + torch.tensor(float('nan'))\n combined_labels = combined_labels.to(device = device)\n\n for b, (record_id, tt, vals, mask, labels) in enumerate(batch):\n tt = tt.to(device)\n vals = vals.to(device)\n mask = mask.to(device)\n if labels is not None:\n labels = labels.to(device)\n\n indices = inverse_indices[offset:offset + len(tt)]\n offset += len(tt)\n\n combined_vals[b, indices] = vals\n combined_mask[b, indices] = mask\n\n if labels is not None:\n combined_labels[b] = labels\n\n combined_vals, _, _ = utils.normalize_masked_data(combined_vals, combined_mask, \n att_min = data_min, att_max = data_max)\n enc_combined_vals, _, _ = utils.normalize_masked_data(enc_combined_vals, enc_combined_mask, \n att_min = data_min, att_max = data_max)\n\n if torch.max(combined_tt) != 0.:\n combined_tt = combined_tt / torch.max(combined_tt)\n enc_combined_tt = enc_combined_tt / torch.max(enc_combined_tt)\n \n data_dict = {\n \"enc_data\":enc_combined_vals,\n \"enc_mask\":enc_combined_mask,\n \"enc_time_steps\":enc_combined_tt,\n \"data\": combined_vals, \n \"time_steps\": combined_tt,\n \"mask\": combined_mask,\n \"labels\": combined_labels}\n\n data_dict = utils.split_and_subsample_batch(data_dict, args, data_type = data_type)\n return data_dict", "def imed_collate(batch):\n error_msg = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n elem_type = type(batch[0])\n if torch.is_tensor(batch[0]):\n stacked = torch.stack(batch, 0)\n return stacked\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n elem = batch[0]\n if elem_type.__name__ == 'ndarray':\n # array of string classes and object\n if re.search('[SaUO]', elem.dtype.str) is not None:\n raise TypeError(error_msg.format(elem.dtype))\n return torch.stack([torch.from_numpy(b) for b in batch], 0)\n if elem.shape == (): # scalars\n py_type = float if elem.dtype.name.startswith('float') else int\n return __numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n elif isinstance(batch[0], int_classes):\n return torch.LongTensor(batch)\n elif isinstance(batch[0], float):\n return torch.DoubleTensor(batch)\n elif isinstance(batch[0], string_classes):\n return batch\n elif isinstance(batch[0], collections.abc.Mapping):\n return {key: imed_collate([d[key] for d in batch]) for key in batch[0]}\n elif isinstance(batch[0], collections.abc.Sequence):\n return [imed_collate(samples) for samples in batch]\n\n return batch", "def batchify(input_lists, batch_dim, multi_loc_nodes, non_batch_inputs=None):\n input_dict = {}\n for key, vals in input_lists.items():\n if key in multi_loc_nodes:\n input_dict[key] = []\n for _vals in vals:\n one_val = _vals[0]\n if non_batch_inputs and key in non_batch_inputs:\n input_dict[key].append(one_val)\n elif utils.is_torch_tensor(one_val):\n input_dict[key].append(torch.stack(_vals, dim=batch_dim))\n elif utils.is_numpy_array(one_val):\n input_dict[key].append(np.stack(_vals, axis=batch_dim))\n else:\n raise RuntimeError(\n f\"Currently does not support automatically batchifying inputs with type {type(one_val)} for node `{key}`\")\n else:\n one_val = vals[0]\n if non_batch_inputs and key in non_batch_inputs:\n input_dict[key] = one_val\n elif utils.is_torch_tensor(one_val):\n input_dict[key] = torch.stack(vals, dim=batch_dim)\n elif utils.is_numpy_array(one_val):\n input_dict[key] = np.stack(vals, axis=batch_dim)\n else:\n raise RuntimeError(\n f\"Currently does not support automatically batchifying inputs with type {type(one_val)} for node `{key}`\")\n return input_dict", "def collate_fn(batch):\n sentence1 = [item[0] for item in batch]\n sentence2 = [item[1] for item in batch]\n label = [item[2] for item in batch]\n label = torch.tensor(label)\n return sentence1, sentence2, label", "def collate_wrapper(self, batch):\n idx = list(set([v.item() for sample in batch for v in sample[0][:2]]))\n\n node_layers, mappings = self._form_computation_graph(idx)\n\n rows = self.nbrs_s[node_layers[0]]\n features = self.features[node_layers[0], :]\n labels = torch.FloatTensor([sample[1] for sample in batch])\n edges = np.array([sample[0] for sample in batch])\n edges = np.array([mappings[-1][v] for v in edges.flatten()]).reshape(\n edges.shape\n )\n return edges, features, node_layers, mappings, rows, labels", "def batchify(fn, chunk):\n if chunk is None:\n return fn\n\n def ret(inputs, styles, alpha, feature):\n results = []\n for i in range(0, inputs.shape[0], chunk):\n input_chunk = inputs[i:i + chunk]\n style_chunk = styles[i:i + chunk]\n alpha_chunk = alpha[i:i + chunk] if alpha is not None else None\n feature_chunk = feature[i:i + chunk] if feature is not None else None\n results.append(fn(input_chunk, style_chunk, alpha_chunk, feature_chunk))\n return torch.cat(results, 0)\n return ret", "def collate(self, batch):\n \n images = []\n indices = []\n roi_size = 5 if self.Train else 4\n rois = torch.zeros((len(batch), 20, roi_size), dtype=torch.float32)\n rois = rois.to(batch[0][1].device)\n \n for _b in range(len(batch)):\n # Accumulate patches:\n images.append(batch[_b][0].to(torch.float32))\n indices.append(batch[_b][2])\n \n # Accumulate ROI:\n \"\"\"\n image_num = torch.Tensor([_b]).expand(batch[_b][1].size(0))\n image_num = image_num.type(batch[_b][1].dtype).view(-1,1)\n image_num = image_num.to(batch[_b][1].device)\n _roi = torch.cat([image_num, batch[_b][1]], dim=1)\n rois = torch.cat([rois, _roi], dim=0)\n \"\"\"\n num_boxes = batch[_b][1].size(0)\n rois[_b,:num_boxes,:] = batch[_b][1]\n \n \n # Stack outputs and return\n batch = [torch.stack(images, dim=0), rois, torch.Tensor(indices)]\n return batch", "def collate(items):\n # return batch items as a list\n return items", "def coco_collate_fn_inferece(vocab, batch):\n # batch is a list, and each element is (image, objs, boxes, triplets)\n all_imgs, all_boxes, all_triplets, all_triplet_type, all_source_edges = [], [], [], [], []\n all_objs = []\n all_masks = []\n all_image_ids = []\n\n max_triplets = 0\n max_objects = 0\n for i, (img, objs, boxes, triplets, triplet_type, source_edges, masks, image_id) in enumerate(batch):\n O = objs[list(objs.keys())[0]].size(0)\n T = triplets.size(0)\n\n if max_objects < O:\n max_objects = O\n\n if max_triplets < T:\n max_triplets = T\n\n for i, (img, objs, boxes, triplets, triplet_type, source_edges, masks, image_id) in enumerate(batch):\n all_imgs.append(img[None])\n all_image_ids.append(image_id)\n O, T = objs[list(objs.keys())[0]].size(0), triplets.size(0)\n\n # Padded objs\n attributes = list(objs.keys())\n sorted(attributes)\n attributes_to_index = {attributes[i]: i for i in range(len(attributes))}\n attributes_objects = torch.zeros(len(attributes), max_objects, dtype=torch.long)\n\n for k, v in objs.items():\n # Padded objects\n if max_objects - O > 0:\n zeros_v = torch.zeros(max_objects - O, dtype=torch.long)\n padd_v = torch.cat([v, zeros_v])\n else:\n padd_v = v\n attributes_objects[attributes_to_index[k], :] = padd_v\n attributes_objects = attributes_objects.transpose(1, 0)\n\n # Padded boxes\n if max_objects - O > 0:\n padded_boxes = torch.FloatTensor([[-1, -1, -1, -1]]).repeat(max_objects - O, 1)\n boxes = torch.cat([boxes, padded_boxes])\n\n # Padded masks\n if masks is not None and max_objects - O > 0:\n padded_masks = torch.zeros([max_objects - O, masks.size(1), masks.size(2)]).type(torch.LongTensor)\n masks = torch.cat([masks, padded_masks])\n\n # Padded triplets\n if max_triplets - T > 0:\n padded_triplets = torch.LongTensor([[0, vocab[\"pred_name_to_idx\"][\"__padding__\"], 0]]).repeat(\n max_triplets - T, 1)\n triplets = torch.cat([triplets, padded_triplets])\n triplet_type = torch.cat([triplet_type, torch.LongTensor([0]*(max_triplets - T))])\n source_edges = torch.cat([source_edges, torch.LongTensor([vocab[\"pred_name_to_idx\"][\"__padding__\"]]*(max_triplets - T))])\n\n all_objs.append(attributes_objects)\n all_boxes.append(boxes)\n all_triplets.append(triplets)\n if masks is not None:\n all_masks.append(masks)\n else:\n all_masks = None\n all_triplet_type.append(triplet_type)\n all_source_edges.append(source_edges)\n\n all_imgs = torch.cat(all_imgs)\n all_objs = torch.stack(all_objs, dim=0)\n all_boxes = torch.stack(all_boxes, dim=0)\n if all_masks is not None:\n all_masks = torch.stack(all_masks, dim=0)\n all_triplets = torch.stack(all_triplets, dim=0)\n # all_image_ids = torch.LongTensor(all_image_ids)\n all_triplet_type = torch.stack(all_triplet_type, dim=0)\n all_source_edges = torch.stack(all_source_edges, dim=0)\n all_image_ids = torch.LongTensor(all_image_ids)\n\n out = (all_imgs, all_objs, all_boxes, all_triplets, all_triplet_type, all_source_edges, all_masks, all_image_ids)\n return out", "def custom_collate_detection(\n batch: List[Dict[str, Tensor]], groundtruth: bool = True\n) -> Dict[str, Union[Any, List[Tensor]]]:\n\n #box_coordinates_list = []\n class_list = []\n blob_centers_list = []\n\n for element in batch:\n #box_coordinates_list.append(element.pop(\"det_boxcord\"))\n class_list.append(element.pop(\"det_class\"))\n blob_centers_list.append(element.pop(\"blob_centers\"))\n\n batch = default_collate(batch)\n #batch[\"det_boxcord\"] = box_coordinates_list\n batch[\"det_class\"] = class_list\n batch[\"blob_centers\"] = blob_centers_list\n\n return batch", "def collate(\n batch,\n config,\n plate,\n base_directory=\"../..\",\n column=None,\n munge=False,\n csv_dir=\"analysis\",\n aws_remote=None,\n aggregate_only=False,\n tmp_dir=\"/tmp\",\n overwrite=False,\n add_image_features=True,\n image_feature_categories=[\"Granularity\", \"Texture\", \"ImageQuality\", \"Threshold\"],\n printtoscreen=True,\n):\n\n from pycytominer.cyto_utils.cells import SingleCells\n\n # Set up directories (these need to be abspaths to keep from confusing makedirs later)\n input_dir = pathlib.Path(f\"{base_directory}/analysis/{batch}/{plate}/{csv_dir}\")\n backend_dir = pathlib.Path(f\"{base_directory}/backend/{batch}/{plate}\")\n cache_backend_dir = pathlib.Path(f\"{tmp_dir}/backend/{batch}/{plate}\")\n\n aggregated_file = pathlib.Path(f\"{backend_dir}/{plate}.csv\")\n backend_file = pathlib.Path(f\"{backend_dir}/{plate}.sqlite\")\n cache_backend_file = pathlib.Path(f\"{cache_backend_dir}/{plate}.sqlite\")\n\n if not aggregate_only:\n if os.path.exists(cache_backend_file):\n if not overwrite:\n sys.exit(\n f\"An SQLite file for {plate} already exists at {cache_backend_file} and overwrite is set to False. Terminating.\"\n )\n else:\n os.remove(cache_backend_file)\n\n for eachdir in [input_dir, backend_dir, cache_backend_dir]:\n if not os.path.exists(eachdir):\n os.makedirs(eachdir, exist_ok=True)\n\n if aws_remote:\n remote_input_dir = f\"{aws_remote}/analysis/{batch}/{plate}/{csv_dir}\"\n\n remote_backend_file = f\"{aws_remote}/backend/{batch}/{plate}/{plate}.sqlite\"\n\n remote_aggregated_file = f\"{aws_remote}/backend/{batch}/{plate}/{plate}.csv\"\n\n sync_cmd = f\"aws s3 sync --exclude * --include */Cells.csv --include */Nuclei.csv --include */Cytoplasm.csv --include */Image.csv {remote_input_dir} {input_dir}\"\n if printtoscreen:\n print(f\"Downloading CSVs from {remote_input_dir} to {input_dir}\")\n run_check_errors(sync_cmd)\n\n if printtoscreen:\n print(f\"Ingesting {input_dir}\")\n # Run cytominer-database ingest\n if munge:\n cytominer_database.munge.munge(config_path=config, source=input_dir)\n\n cytominer_database.ingest.seed(\n source=input_dir,\n target=f\"sqlite:///{cache_backend_file}\",\n config_file=config,\n )\n\n # Create a sqlite3 connection\n with sqlite3.connect(cache_backend_file, isolation_level=None) as connection:\n cursor = connection.cursor()\n if column:\n if print:\n print(f\"Adding a Metadata_Plate column based on column {column}\")\n cursor.execute(\"ALTER TABLE Image ADD COLUMN Metadata_Plate TEXT;\")\n cursor.execute(f\"UPDATE image SET Metadata_Plate ={column};\")\n\n if printtoscreen:\n print(f\"Indexing database {cache_backend_file}\")\n cursor.execute(\n \"CREATE INDEX IF NOT EXISTS table_image_idx ON Image(TableNumber, ImageNumber);\"\n )\n for eachcompartment in [\"Cells\", \"Cytoplasm\", \"Nuclei\"]:\n cursor.execute(\n f\"\"\"CREATE INDEX IF NOT EXISTS table_image_object_{eachcompartment.lower()}_idx \n ON {eachcompartment}(TableNumber, ImageNumber, ObjectNumber);\"\"\"\n )\n cursor.execute(\n \"CREATE INDEX IF NOT EXISTS plate_well_image_idx ON Image(Metadata_Plate, Metadata_Well);\"\n )\n cursor.close()\n connection.close()\n\n if aws_remote:\n if printtoscreen:\n print(f\"Uploading {cache_backend_file} to {remote_backend_file}\")\n cp_cmd = [\"aws\", \"s3\", \"cp\", cache_backend_file, remote_backend_file]\n run_check_errors(cp_cmd)\n\n if printtoscreen:\n print(\n f\"Removing analysis files from {input_dir} and {cache_backend_dir}\"\n )\n import shutil\n\n shutil.rmtree(input_dir)\n\n if printtoscreen:\n print(f\"Renaming {cache_backend_file} to {backend_file}\")\n os.rename(cache_backend_file, backend_file)\n\n if printtoscreen:\n print(f\"Aggregating sqlite:///{backend_file}\")\n\n if aggregate_only and aws_remote:\n remote_backend_file = f\"{aws_remote}/backend/{batch}/{plate}/{plate}.sqlite\"\n\n remote_aggregated_file = f\"{aws_remote}/backend/{batch}/{plate}/{plate}.csv\"\n\n cp_cmd = [\"aws\", \"s3\", \"cp\", remote_backend_file, backend_file]\n if printtoscreen:\n print(\n f\"Downloading SQLite files from {remote_backend_file} to {backend_file}\"\n )\n run_check_errors(cp_cmd)\n\n if not os.path.exists(backend_file):\n sys.exit(f\"{backend_file} does not exist. Exiting.\")\n\n if add_image_features:\n pass\n else:\n image_feature_categories = None # defensive but not sure what will happen if we give a list but set to False\n\n database = SingleCells(\n f\"sqlite:///{backend_file}\",\n aggregation_operation=\"mean\",\n add_image_features=add_image_features,\n image_feature_categories=image_feature_categories,\n )\n database.aggregate_profiles(output_file=aggregated_file)\n\n if aws_remote:\n if printtoscreen:\n print(f\"Uploading {aggregated_file} to {remote_aggregated_file}\")\n csv_cp_cmd = [\"aws\", \"s3\", \"cp\", aggregated_file, remote_aggregated_file]\n run_check_errors(csv_cp_cmd)\n\n if printtoscreen:\n print(f\"Removing backend files from {backend_dir}\")\n import shutil\n\n shutil.rmtree(backend_dir)", "def collate_fn_padd(batch):\n # get sequence lengths\n spects = [t[0] for t in batch]\n segs = [t[1] for t in batch]\n labels = [t[2] for t in batch]\n lengths = [t[3] for t in batch]\n fnames = [t[4] for t in batch]\n\n padded_spects = torch.nn.utils.rnn.pad_sequence(spects, batch_first=True)\n lengths = torch.LongTensor(lengths)\n\n return padded_spects, segs, labels, lengths, fnames", "def _collate_fn(batch):\n def _pad(seqs, dtype=torch.float32):\n \"\"\" Pads a batch of sequences of varying seq_len. \"\"\"\n assert len(seqs) > 0 and all(x.shape[1:] == seqs[0].shape[1:] for x in seqs)\n lens = torch.LongTensor([len(x) for x in seqs])\n max_seq_len = torch.max(lens)\n\n # padded_seq_dims: (batch, max_seq_len, ...).\n padded_seq_dims = (len(seqs), max_seq_len,) + seqs[0].shape[1:]\n res = torch.zeros(padded_seq_dims, dtype=dtype)\n for i, seq in enumerate(seqs):\n src_len = lens[i]\n res[i, :src_len] = torch.Tensor(seq)\n return res, lens\n\n assert all(len(x) == 2 for x in batch)\n # (1, batch, (seq_len, 68, 3))\n frames, captions = zip(*batch)\n\n # Merge sequences (from tuple of 1D tensor to 2D tensor)\n # (batch, seq_len, ...)\n src_seqs, src_lens = _pad(frames, dtype=torch.float32)\n tgt_seqs, tgt_lens = _pad(captions, dtype=torch.long)\n return src_seqs, src_lens, tgt_seqs, tgt_lens", "def collect_fn(batch):\r\n # max_detection = max(list(map(lambda x: len(x[5]), batch)))\r\n max_detection = max(list(map(lambda x: len(x), batch)))\r\n for i in range(len(batch)):\r\n batch[i] = list(batch[i]) # because the element in the batch is a tuple\r\n dummy = torch.zeros((1,5), dtype=batch[i][5].dtype)\r\n temp = batch[i][5]\r\n # make the detection to the same length in order to stack the\r\n while temp.size(0) < max_detection:\r\n temp = torch.cat((temp, dummy))\r\n batch[i][5] = temp\r\n \r\n return default_collate(batch)", "def collate_fn(data):\n images, idxs, captions = zip(*data)\n # Merge images (from tuple of 3D tensor to 4D tensor).\n images = torch.stack(images, 0)\n batch_size = images.shape[0]\n # p\n nums = []\n for idx in idxs:\n num = [0] * num_concept\n for id in idx:\n num[id[1]] = 1\n nums.append(num)\n concepts = torch.FloatTensor(nums)\n lengths = [len(cap) for cap in captions]\n targets = torch.zeros(len(captions), max(lengths)).long()\n for i, cap in enumerate(captions):\n end = lengths[i]\n targets[i, :end] = cap[:end]\n return images,concepts, targets", "def test_custom_collate() -> None:\n metadata = PatientMetadata(patient_id='42')\n foo = \"foo\"\n d1 = {foo: 1, SAMPLE_METADATA_FIELD: \"something\"}\n d2 = {foo: 2, SAMPLE_METADATA_FIELD: metadata}\n result = collate_with_metadata([d1, d2])\n assert foo in result\n assert SAMPLE_METADATA_FIELD in result\n assert isinstance(result[SAMPLE_METADATA_FIELD], list)\n assert result[SAMPLE_METADATA_FIELD] == [\"something\", metadata]\n assert isinstance(result[foo], torch.Tensor)\n assert result[foo].tolist() == [1, 2]", "def map_flat_values(op, *args, **kwargs):\n # Replace RaggedTensors with their values; and collect the partitions tensors\n # from each RaggedTensor.\n partition_lists = []\n flat_values_nrows = []\n inner_args = _replace_ragged_with_flat_values(args, partition_lists,\n flat_values_nrows)\n inner_kwargs = _replace_ragged_with_flat_values(kwargs, partition_lists,\n flat_values_nrows)\n if not partition_lists:\n return op(*args, **kwargs)\n\n # If we can statically determine that the inputs are incompatible, then raise\n # an error. (We can't guarantee full compatibility statically, so we need to\n # perform some runtime checks too; but this allows us to fail sooner in some\n # cases.)\n if flat_values_nrows:\n flat_values_nrows = set(flat_values_nrows)\n if len(flat_values_nrows) != 1:\n raise ValueError(\"Input RaggedTensors' flat_values must all have the \"\n \"same outer-dimension size. Got sizes: %s\" %\n flat_values_nrows)\n flat_values_nrows = flat_values_nrows.pop() # Get the single element\n else:\n flat_values_nrows = None\n\n partition_dtypes = set(p[0].dtype for p in partition_lists)\n if len(partition_dtypes) > 1:\n if not ragged_config.auto_cast_partition_dtype():\n raise ValueError(\"Input RaggedTensors have mismatched row partition \"\n \"dtypes; use RaggedTensor.with_row_splits_dtype() to \"\n \"convert them to compatible dtypes.\")\n\n partition_lists = [\n [p.with_dtype(dtypes.int64)\n for p in partition_list] # pylint: disable=g-complex-comprehension\n for partition_list in partition_lists\n ]\n\n # Delegate to `op`\n op_output = op(*inner_args, **inner_kwargs)\n # Check that the result has the expected shape (if known).\n if flat_values_nrows is not None:\n if not op_output.shape[:1].is_compatible_with([flat_values_nrows]):\n raise ValueError(\n \"tf.ragged.map_flat_values requires that the output of `op` have \"\n \"the same outer-dimension size as flat_values of any ragged \"\n \"inputs. (output shape: %s; expected outer dimension size: %s)\" %\n (op_output.shape, flat_values_nrows))\n # Compose the result from the transformed values and the partitions.\n return ragged_tensor.RaggedTensor._from_nested_row_partitions( # pylint: disable=protected-access\n op_output,\n _merge_partition_lists(partition_lists),\n validate=False)", "def featurize_batch(\n self, input_record_list: Sequence[InputRecord]\n ) -> Sequence[OutputRecord]:\n return [self.featurize(record) for record in input_record_list]", "def collate(samples: List[List[int]]):\n words = [torch.LongTensor(sample[0]) for sample in samples]\n tags = [torch.LongTensor(sample[1]) for sample in samples]\n # lens of initial sequences, before the padding (should be identical for words and tags)\n lens = [len(seq) for seq in words]\n\n words = pad_sequence(words, padding_value=PAD_ID)\n tags = pack_sequence(tags, enforce_sorted=False)\n return (words, lens), tags", "def collect_fn_local(batch):\r\n max_detection = max(list(map(lambda x: len(x[4]), batch)))\r\n for i in range(len(batch)):\r\n batch[i] = list(batch[i]) # because the element in the batch is a tuple\r\n dummy = torch.zeros((1,128,64), dtype=batch[i][4][0].dtype)\r\n temp = batch[i][4]\r\n # make the detection to the same length in order to stack the\r\n while temp.size(0) < max_detection:\r\n # while len(temp) < max_detection:\r\n temp = torch.cat((temp, dummy))\r\n # temp.append(dummy)\r\n batch[i][4] = temp\r\n \r\n return default_collate(batch)", "def collate_classification(batch):\n\n elem = batch[0]\n elem_type = type(elem)\n\n if isinstance(elem, torch.Tensor):\n out = None\n if torch.utils.data.get_worker_info() is not None:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n numel = sum([x.numel() for x in batch])\n storage = elem.storage()._new_shared(numel)\n out = elem.new(storage)\n return torch.stack(batch, 0, out=out)\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n elem = batch[0]\n if elem_type.__name__ == 'ndarray':\n # array of string classes and object\n if np_str_obj_array_pattern.search(elem.dtype.str) is not None:\n raise TypeError(collate_classification_err_msg_format.format(elem.dtype))\n\n return collate_classification([torch.as_tensor(b) for b in batch])\n elif elem.shape == (): # scalars\n return torch.as_tensor(batch)\n elif isinstance(elem, float):\n return torch.tensor(batch, dtype=torch.float64)\n elif isinstance(elem, int_classes):\n return torch.tensor(batch)\n elif isinstance(elem, string_classes):\n return batch\n elif isinstance(elem, container_abcs.Mapping):\n return {key: collate_classification([d[key] for d in batch]) for key in elem}\n elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple\n return elem_type(*(collate_classification(samples) for samples in zip(*batch)))\n elif isinstance(elem, container_abcs.Sequence):\n transposed = zip(*batch)\n return [collate_classification(samples) for samples in transposed]\n\n raise TypeError(collate_classification_err_msg_format.format(elem_type))", "def detection_collate(batch):\n targets = []\n imgs = []\n inputs = {}\n ws = []\n hs = []\n im_ids = []\n scales = []\n crop_box = []\n for sample in batch:\n imgs.append(sample[0]['data'])\n targets.append(torch.FloatTensor(sample[1]))\n ws.append(sample[0]['width'])\n hs.append(sample[0]['height'])\n im_ids.append(sample[0]['image_id'])\n if 'scale' in sample[0]:\n scales.append(sample[0]['scale'])\n if 'crop_box' in sample[0]:\n crop_box.append(sample[0]['crop_box'])\n inputs['data'] = torch.stack(imgs, 0)\n inputs['width'] = ws\n inputs['height'] = hs\n inputs['image_id'] = im_ids\n inputs['scale'] = scales\n inputs['crop_box'] = crop_box\n return inputs, targets", "def dict_collate(data):\n\n # Assuming there's at least one instance in the batch\n add_data_keys = data[0].keys()\n collected_data = {k: [] for k in add_data_keys}\n\n for i in range(len(list(data))):\n for k in add_data_keys:\n collected_data[k].append(data[i][k])\n\n for k in add_data_keys:\n collected_data[k] = torch.cat(collected_data[k], 0)\n\n # Passing redundant information for compatibility\n return collected_data, collected_data[\"target\"]", "def format_data(input_data=None, world_size=4, init_batch_size=128):\n bsz = int(init_batch_size / float(world_size))\n data = DataUtil.generate_minibatches(data=input_data, minibatch_size=bsz)\n return data" ]
[ "0.71019036", "0.6699454", "0.66386545", "0.6537352", "0.65236694", "0.6492617", "0.64010304", "0.6400702", "0.6383652", "0.6318756", "0.62709737", "0.6211533", "0.6105718", "0.59736997", "0.59524226", "0.5950687", "0.5934938", "0.5909209", "0.5867133", "0.58421636", "0.5756922", "0.5756922", "0.5756922", "0.5754641", "0.5752697", "0.57488185", "0.5743005", "0.5713209", "0.57090425", "0.5707598", "0.57025915", "0.57025915", "0.56886613", "0.5630162", "0.5625463", "0.5619005", "0.5619005", "0.5592521", "0.55722314", "0.5570825", "0.55587685", "0.55541277", "0.5550567", "0.5545312", "0.5534009", "0.5506911", "0.5506466", "0.54942733", "0.5486434", "0.5443329", "0.54381096", "0.5423914", "0.54088235", "0.5387294", "0.5380346", "0.53680015", "0.53602505", "0.53381586", "0.5330534", "0.53146523", "0.5303053", "0.5293303", "0.5284432", "0.52749795", "0.52648926", "0.5231115", "0.52271026", "0.52065164", "0.5192617", "0.518366", "0.51827806", "0.5175979", "0.51685536", "0.51587564", "0.5146406", "0.5130651", "0.5126198", "0.51160884", "0.5103471", "0.5095086", "0.50877494", "0.50767297", "0.5040221", "0.50214964", "0.5002945", "0.5002869", "0.49805343", "0.4968363", "0.49619126", "0.4952368", "0.49323443", "0.49195096", "0.48901698", "0.48767495", "0.48724228", "0.4858773", "0.48562244", "0.48544475", "0.4852535", "0.48404992" ]
0.67624515
1
Import a function from a full module path
def import_from(full_name): module_name, function_name = full_name.rsplit('.', 1) mod = import_module(module_name) return getattr(mod, function_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_module(self, location, name):", "def load_function(path):\r\n module_path, _, name = path.rpartition('.')\r\n return getattr(import_module(module_path), name)", "def load_function(path):\r\n module_path, _, name = path.rpartition('.')\r\n return getattr(import_module(module_path), name)", "def load_function(path):\r\n module_path, _, name = path.rpartition('.')\r\n return getattr(import_module(module_path), name)", "def import_function(name: str):\n module_name, function_name = name.rsplit(\".\", 1)\n module = importlib.import_module(module_name)\n return getattr(module, function_name)", "def import_from_path(module: str, path: str, name: str):\n\n spec = importlib.util.spec_from_file_location(module, path)\n foo = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(foo)\n return getattr(foo, name)", "def import_from_cwd(module, imp=..., package=...):\n ...", "def importFromPath(filename):\n try:\n path, name = os.path.split(filename)\n name, ext = os.path.splitext(name)\n file, filename, data = imp.find_module(name, [path])\n importedModule = imp.load_module(name, file, filename, data)\n except Exception as ae:\n raise Exception('Importing module '+ filename + ' at ' + path + os.sep + name + ' failed with error '+ str(ae))\n return importedModule", "def relative_import(path):\n caller_path = os.path.abspath(inspect.getfile(inspect.currentframe().f_back))\n\n script_path = os.path.abspath(os.path.join(os.path.dirname(caller_path), path))\n script_name = os.path.splitext(os.path.basename(script_path))[0]\n\n sys.path.append(os.path.dirname(script_path))\n try:\n module = importlib.import_module(script_name)\n importlib.reload(module)\n return module\n finally:\n del sys.path[-1]", "def import_module(module):\n return importlib.import_module(module)", "def import_module(name, path):\n spec = importlib.util.spec_from_file_location(name, path)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n return module", "def import_by_source(path: str):\n\n module = splitext(basename(path))[0]\n\n sys.path.append(dirname(path))\n\n spec = importlib.util.spec_from_file_location(module, path)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n\n sys.path.pop()\n\n return module", "def import_module_from(mod_path):\n if '.' in mod_path:\n bits = mod_path.split('.')\n mod_name = bits.pop()\n mod_path = '.'.join(bits)\n return import_module(mod_path, mod_name)\n else:\n return import_module(mod_path)", "def import_module(module, from_where):\n from_module = __import__(from_where, globals(), locals(), [module])\n return getattr(from_module, module)", "def import_module_from_module_path(path):\n return SourceFileLoader('', path).load_module()", "def importModule(filename):\n\tfrom os.path import abspath, split, splitext\n\tfrom sys import path\n\tif isPython2():\n\t\tfrom imp import reload\n\telse:\n\t\tfrom importlib import reload\n\t\n\tfilename = adaptPath(filename)\n\tmodulePath = abspath(split(filename)[0])\n\tmoduleName = splitext(split(filename)[1])[0]\n\t\n\tif not modulePath in path:\n\t\tpath.append (modulePath)\n\tmodule = __import__(moduleName)\n\treload (module)\n\treturn module", "def _import_module(name):\r\n __import__(name)\r\n return sys.modules[name]", "def _import_module(name):\r\n __import__(name)\r\n return sys.modules[name]", "def _import_module(name):\r\n __import__(name)\r\n return sys.modules[name]", "def _import_module(name):\n __import__(name)\n return sys.modules[name]", "def _import_module(name):\n __import__(name)\n return sys.modules[name]", "def _import_from(mod, path, mod_dir=None):\n\n if mod in sys.modules:\n return sys.modules[mod]\n\n if mod_dir is None:\n full_mod = mod\n else:\n full_mod = mod_dir.replace(os.sep, '.')\n\n if mod_dir is None:\n mod_dir = mod.replace('.', os.sep)\n\n if not os.path.exists(path):\n return None\n\n source_path = os.path.join(path, mod_dir, '__init__.py')\n if not os.path.exists(source_path):\n source_path = os.path.join(path, mod_dir + '.py')\n\n if not os.path.exists(source_path):\n return None\n\n if os.sep in mod_dir:\n append, mod_dir = mod_dir.rsplit(os.sep, 1)\n path = os.path.join(path, append)\n\n try:\n if sys.version_info < (3, 5):\n mod_info = imp.find_module(mod_dir, [path])\n return imp.load_module(mod, *mod_info)\n\n else:\n package = mod.split('.', 1)[0]\n package_dir = full_mod.split('.', 1)[0]\n package_path = os.path.join(path, package_dir)\n CUSTOM_FINDER.add_module(package, package_path)\n\n return importlib.import_module(mod)\n\n except ImportError:\n return None", "def _load_module(modulepath):\n\n mod = __import__(modulepath)\n path = []\n for token in modulepath.split(\".\")[1:]:\n path.append(token)\n mod = getattr(mod, token)\n return mod", "def _import_from(mod, path, mod_dir=None):\n\n if mod_dir is None:\n mod_dir = mod\n\n if not os.path.exists(path):\n return None\n\n if not os.path.exists(os.path.join(path, mod_dir)):\n return None\n\n try:\n mod_info = imp.find_module(mod_dir, [path])\n return imp.load_module(mod, *mod_info)\n except ImportError:\n return None", "def import_from(module: str, name: str):\n\n module = __import__(module, fromlist=[name])\n return getattr(module, name)", "def import_function(s):\n a = s.split('.')\n j = lambda x: '.'.join(x)\n return getattr(import_module(j(a[:-1])), a[-1])", "def import_file(name: Text, file_path: Text):\n\n spec = spec_from_file_location(f\"luh3417.{name}\", file_path)\n module = module_from_spec(spec)\n spec.loader.exec_module(module)\n\n return module", "def import_module_from_file(f_path, verbose=True):\n # get absolute path\n f_path = os.path.abspath(f_path)\n assert os.path.isfile(f_path)\n\n f_dir = os.path.dirname(f_path)\n f_name = os.path.basename(f_path)\n f_id = os.path.splitext(f_name)[0]\n\n try:\n # add f_dir to system path for later import\n sys.path.insert(0, f_dir)\n # import module by name\n module = importlib.import_module(f_id)\n return module\n except ImportError:\n err_str = \"ERROR: Could not import module '{}' from '{}'.\\n\"\n err_str = err_str.format(f_name, f_dir)\n raise ImportError(err_str)", "def call_function_from_import_path(import_path: str) -> Any:\n try:\n callback_func = import_attr(import_path)\n except Exception as e:\n raise ValueError(f\"The import path {import_path} cannot be imported: {e}\")\n\n if not callable(callback_func):\n raise TypeError(f\"The import path {import_path} is not callable.\")\n\n try:\n return callback_func()\n except Exception as e:\n raise RuntimeError(f\"The function {import_path} raised an exception: {e}\")", "def load_function(engine_path):\r\n module_path, _, name = engine_path.rpartition('.')\r\n return getattr(importlib.import_module(module_path), name)", "def import_module(module_name,module_path):\n try:\n if not isinstance(module_path,list):\n module_path = [module_path]\n file,filename,desc = imp.find_module(module_name,module_path)\n globals()[module_name] = imp.load_module(module_name, file, filename, desc)\n return\n except Exception as err:\n print 'import_module error', err\n traceback.print_exc()\n\n sys.exit()", "def load_function(engine_path):\r\n module_path, _, name = engine_path.rpartition('.')\r\n return getattr(import_module(module_path), name)", "def _import_symbol(symbol_path):\n\n components = symbol_path.split(\".\")\n\n module_name = \".\".join(components[:-1])\n symbol_name = components[-1]\n\n module = __import__(module_name, globals(), locals(), [symbol_name])\n symbol = getattr(module, symbol_name)\n\n return symbol", "def import_function(spec, kwargs=None):\n spec = spec.split(':')\n module = spec[0]\n fn = spec[1]\n module = import_module(module)\n fn = getattr(module, fn)\n\n if kwargs is not None:\n fn = functools.partial(fn, **kwargs)\n return fn", "def absolute_import(file_path):\n\n # module name\n _, name, _ = file_parts(file_path)\n\n # load the spec and module\n spec = importlib.util.spec_from_file_location(name, file_path)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n\n return module", "def _import_by_path(path):\n module_path, attr_name = path.rsplit('.', 1)\n module = import_module(module_path)\n return getattr(module, attr_name)", "def Importer(self, module, globals='', locals='', fromlist=None):\n if fromlist is None:\n module = module.split('.')[0]\n try:\n return self.modules[module]\n except KeyError:\n raise ImportError()", "def DynamicImport(import_path, alias=dict(), log=None):\n if import_path not in alias and ':' not in import_path:\n raise ValueError(\n 'import_path should be one of {} or '\n 'include \":\", e.g. \"locata_wrapper.utils.music:MUSIC\" : '\n '{}'.format(set(alias), import_path))\n if ':' not in import_path:\n import_path = alias[import_path]\n\n module_name, objname = import_path.split(':')\n try:\n m = importlib.import_module(module_name)\n except Exception as e: # NOQA\n log.error('Function specified by my_alg_name not found!')\n sys.exit(1)\n return getattr(m, objname)", "def import_from_path(path_to_module, obj_name = None):\n module_name = path_to_module.replace(\"/\",\".\").strip(\".py\")\n module = import_module(module_name)\n if obj_name == None:\n return module\n obj = getattr(module, obj_name)\n return obj", "def quick_import(module_name, path=None, build=False, suffix=\"so\", with_html=False, re_build_func=re_build_func,\n re_build_func_kwargs=None\n ):\n\n def_pwd(path)\n print(\"Move to {}\".format(os.getcwd()))\n\n if build:\n if re_build_func_kwargs is None:\n re_build_func(module_name, with_html)\n else:\n re_build_func(**re_build_func_kwargs)\n\n ext = [i for i in os.listdir() if module_name in i and suffix in i]\n if len(ext) > 0:\n module = import_module(module_name, os.getcwd())\n msg = \"The {module_name} module methods:{methods}\"\n names = dir(module)\n names = [i for i in names if \"__\" not in i]\n print(msg.format(module_name=module_name, methods=names))\n return module\n else:\n raise FileNotFoundError(\": There is no './{}.***.{}' in '{}',\\n\".format(module_name, suffix, path),\n \"There are just {},\\n\".format(os.listdir()),\n \"Please try to build=Ture again.\")", "def import_module(name):\n __import__(name)\n return sys.modules[name]", "def get_function(function_path):\n try:\n mod_name, func_name = function_path.rsplit('.', 1)\n mod = import_module(mod_name)\n except ImportError as e:\n raise ImproperlyConfigured(('Error importing module %s: \"%s\"' %\n (mod_name, e)))\n return getattr(mod, func_name)", "def test_import_local_function(self):\n import_function(determine_package(f))\n assert f() == \"My name is f.\"", "def import_(filename):\n (path, name) = os.path.split(filename)\n (name, ext) = os.path.splitext(name)\n try:\n return sys.modules[name]\n except KeyError:\n pass\n try:\n file, filename, data = imp.find_module(name, [path])\n except ImportError:\n print('No module {} found'.format(name))\n try:\n mod = imp.load_module(name, file, filename, data)\n return mod\n except UnboundLocalError:\n pass\n finally:\n # Since we may exit via an exception, close fp explicitly.\n try:\n if file:\n file.close()\n except UnboundLocalError:\n if not os.path.exists(path):\n os.makedirs(path)\n from shutil import copyfile\n if os.name == 'nt':\n copyfile(os.path.join(path_to_module, 'models\\myfitmodels.py'), filename)\n else:\n copyfile(os.path.join(path_to_module, './models/myfitmodels.py'), filename)\n # open(filename, 'a').close()", "def load_module(path: os.PathLike):\n path = Path(path)\n pwd = Path(os.getcwd())\n os.chdir(path.parent)\n try:\n mod = import_module(path.stem)\n except ModuleNotFoundError as err:\n raise err\n finally:\n os.chdir(pwd)\n return mod", "def __import_locustfile__(filename, path):\n try:\n # Python 3 compatible\n source = importlib.machinery.SourceFileLoader(os.path.splitext(locustfile)[0], path)\n imported = source.load_module()\n except AttributeError:\n # Python 2.7 compatible\n import imp\n imported = imp.load_source(os.path.splitext(locustfile)[0], path)\n\n return imported", "def import_module(module_name, path):\n file, path, description = imp.find_module(module_name, [path])\n # Close the .so file after load.\n with file:\n return imp.load_module(module_name, file, path, description)", "def import_module(path, package=None):\n if path.startswith('.'):\n if not package:\n raise TypeError(\"Relative imports require the 'package' argument\")\n start = 0\n while path[start] == \".\" or start < len(path):\n start += 1\n path = _resolve_name(path[start:], package, start)\n __import__(path)\n\n return sys.modules[path]", "def my_import(name):\n components = name.split('.')\n mod = __import__(components[0], globals(), locals(), components[1:], -1)\n for comp in components[1:]:\n mod = getattr(mod, comp)\n return mod", "def _importer(name, root_package=False, relative_globals=None, level=0):\n return __import__(name, locals=None, # locals has no use\n globals=relative_globals,\n fromlist=[] if root_package else [None],\n level=level)", "def import_function(\n name: Optional[str]\n) -> Optional[Callable]:\n\n if name is None:\n return None\n\n module_name, function_name = name.rsplit('.', maxsplit=1)\n function_module = import_module(module_name)\n function = getattr(function_module, function_name)\n\n return function", "def import_from_file(module_name: str, filepath: str):\n return SourceFileLoader(module_name, filepath).load_module()", "def import_module_by_name(mod_name):\n return importlib.__import__(mod_name)", "def import_module(name, package=None):\n level = 0\n if name.startswith('.'):\n if not package:\n msg = (\"the 'package' argument is required to perform a relative \"\n \"import for {!r}\")\n raise TypeError(msg.format(name))\n for character in name:\n if character != '.':\n break\n level += 1\n return _bootstrap._gcd_import(name[level:], package, level)", "def import_module(name, package=None): # NOQA\r\n if name.startswith('.'):\r\n if not package:\r\n raise TypeError(\"relative imports require the 'package' \"\r\n \"argument\")\r\n level = 0\r\n for character in name:\r\n if character != '.':\r\n break\r\n level += 1\r\n name = _resolve_name(name[level:], package, level)\r\n __import__(name)\r\n return sys.modules[name]", "def import_module(mod_path, mod_name = None):\n if mod_name is None:\n try:\n return sys.modules[mod_path]\n except KeyError:\n __import__(mod_path)\n return sys.modules[mod_path]\n else:\n if mod_name.find('.') != -1:\n raise ValueError('second argument to import_module must not contain dots')\n mod_ = __import__(mod_path, globals(), locals(), [mod_name,], -1)\n return getattr(mod_, mod_name)", "def import_source(module_name):\n module_file_path = module_name.__file__\n module_name = module_name.__name__\n\n module_spec = importlib.util.spec_from_file_location(module_name, module_file_path)\n module = importlib.util.module_from_spec(module_spec)\n module_spec.loader.exec_module(module)\n print(dir(module))\n\n msg = \"The {module_name} module has the following methods:{methods}\"\n print(msg.format(module_name=module_name, methods=dir(module)))", "def load_module(path):\n spec = spec_from_file_location(\"module.name\", path)\n module = module_from_spec(spec)\n try:\n spec.loader.exec_module(module)\n except Exception as err:\n # ToDo: Append functions found from spec.loader.get_code(\"module.name\")\n # To some hidden attribute of the module object to be returned.\n warn(f'Exception when loading module {path}: \\n{err}')\n return module", "def load_modules_manually():\n #cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]))\n cmd_folder = '../myutils/'\n if cmd_folder not in sys.path:\n sys.path.insert(0, cmd_folder)\n #print sys.path", "def importfile(path):\n path = getpath(path, custom=True)\n assert _os.path.isfile(path) == True\n\n file_handler = _SourceFileLoader(*path.splitpath())\n return file_handler", "def import_from_string(import_path: str) -> Any:\n\n import_classname = import_path.split(\".\")[-1]\n import_module = \".\".join(import_path.split(\".\")[:-1])\n\n module = importlib.import_module(import_module)\n return getattr(module, import_classname)", "def import_module(name, package=None):\n if name.startswith('.'):\n if not package:\n raise TypeError(\"relative imports require the 'package' argument\")\n level = 0\n for character in name:\n if character != '.':\n break\n level += 1\n name = _resolve_name(name[level:], package, level)\n __import__(name)\n return sys.modules[name]", "def load_module(name, path):\n loader = importlib.machinery.SourceFileLoader(name, path)\n module = types.ModuleType(loader.name)\n loader.exec_module(module)\n return module", "def module_file(module):\n ...", "def load_functions(self, module_name, path=None):\n# try:\n if True:\n if not path:\n path = os.getcwd()\n if not isinstance(path,list):\n path = [path]\n file,filename,desc = imp.find_module(module_name,path)\n funcs = imp.load_module(module_name, file, filename, desc)\n if hasattr(funcs,'_init'):\n getattr(funcs,'_init')(self)\n attrs = [attr for attr in funcs.__dict__ \n if not attr.startswith('__')\n and attr is not '_init'\n and not hasattr(getattr(funcs,attr),'__base__')]\n for attr in attrs:\n try:\n print 'Adding', attr, 'to', self._name\n self.add_function(getattr(funcs,attr))\n except:\n print 'Error adding', attr, 'to', self._name", "def getModulePath(*args, moduleName: AnyStr=\"\", **kwargs)->AnyStr:\n pass", "def import_load(pkg, name):\n def loader():\n mod = importlib.import_module(pkg)\n return getattr(mod, name)\n return loader", "def load_module(name):\n return __import__(\"metaswitch.%s\" % name,\n fromlist=[\"ROUTES\"])", "def import_module(self, module): # pylint: disable=R0201\r\n if isinstance(module, list):\r\n all_modules = module\r\n else:\r\n all_modules = [module]\r\n for mod in all_modules:\r\n globals()[mod] = __import__(mod.strip())", "def ppimport(name):\n global _ppimport_is_enabled\n\n level = 1\n parent_frame = p_frame = _get_frame(level)\n while not p_frame.f_locals.has_key('__name__'):\n level = level + 1\n p_frame = _get_frame(level)\n\n p_name = p_frame.f_locals['__name__']\n if p_name=='__main__':\n p_dir = ''\n fullname = name\n elif p_frame.f_locals.has_key('__path__'):\n # python package\n p_path = p_frame.f_locals['__path__']\n p_dir = p_path[0]\n fullname = p_name + '.' + name\n else:\n # python module\n p_file = p_frame.f_locals['__file__']\n p_dir = os.path.dirname(p_file)\n fullname = p_name + '.' + name\n\n # module may be imported already\n module = sys.modules.get(fullname)\n if module is not None:\n if _ppimport_is_enabled or isinstance(module, types.ModuleType):\n return module\n return module._ppimport_importer()\n\n so_ext = _get_so_ext()\n py_exts = ('.py','.pyc','.pyo')\n so_exts = (so_ext,'module'+so_ext)\n\n for d,n,fn,e in [\\\n # name is local python module or local extension module\n (p_dir, name, fullname, py_exts+so_exts),\n # name is local package\n (os.path.join(p_dir, name), '__init__', fullname, py_exts),\n # name is package in parent directory (scipy specific)\n (os.path.join(os.path.dirname(p_dir), name), '__init__', name, py_exts),\n ]:\n location = _is_local_module(d, n, e)\n if location is not None:\n fullname = fn\n break\n\n if location is None:\n # name is to be looked in python sys.path.\n fullname = name\n location = 'sys.path'\n\n # Try once more if module is imported.\n # This covers the case when importing from python module\n module = sys.modules.get(fullname)\n\n if module is not None:\n if _ppimport_is_enabled or isinstance(module,types.ModuleType):\n return module\n return module._ppimport_importer()\n # It is OK if name does not exists. The ImportError is\n # postponed until trying to use the module.\n\n loader = _ModuleLoader(fullname,location,p_frame=parent_frame)\n if _ppimport_is_enabled:\n return loader\n\n return loader._ppimport_importer()", "def load_script(filename):\n path, module_name, ext = _extract_script_components(filename)\n add_search_path(path)\n return importlib.import_module(module_name)\n # return _load_module(module_name)", "def import_python_module_by_filename(name, module_filename):\n\n sys.path.append(abspath(dirname(module_filename)))\n spec = importlib.util.spec_from_file_location(\n name,\n location=module_filename)\n imported_module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(imported_module)\n return imported_module", "def xocImport(self, name, *args, **kwargs):\n trace(\"Import invoked:\", name, kwargs.keys())\n if name in sys.builtin_module_names:\n trace(\"Loading builtin module\", name)\n return self.load_module(name)\n else:\n return self.oldImport(name, *args, **kwargs)", "def import_module(self, path):\n\n try:\n module = import_module(path)\n except ImportError:\n self.error('Failed to Load module: {0}'.format(path))\n return False\n else:\n self.out('Loaded module: {0}'.format(path))\n return module", "def import_from_dotted_path(dotted_names, path=None):\n next_module, remaining_names = dotted_names.split('.', 1)\n file, pathname, description = imp.find_module(next_module, path)\n module = imp.load_module(next_module, file, pathname, description)\n\n if hasattr(module, remaining_names):\n return getattr(module, remaining_names)\n\n if '.' not in remaining_names:\n return module\n\n return import_from_dotted_path(remaining_names, path=module.__path__)", "def pseudo_import( pkg_name ):\n init = os.path.join( pkg_name, '__init__.py' )\n\n # remove imports and 'from foo import'\n lines = open(init, 'r').readlines()\n lines = filter( lambda l: l.startswith('__'), lines)\n\n code = '\\n'.join(lines)\n\n import imp\n module = imp.new_module(pkg_name)\n\n exec(code, module.__dict__)\n return module", "def get_full_path_of_import(import_module_reference):\n f = inspect.getfile(import_module_reference)\n p = os.path.split(f)\n return p[0]", "def load_pyfunc(path, run_id=None, suppress_warnings=False):\n if run_id:\n path = tracking.utils._get_model_log_dir(path, run_id)\n conf = _load_model_conf(path)\n model_py_version = conf.get(PY_VERSION)\n if not suppress_warnings:\n _warn_potentially_incompatible_py_version_if_necessary(model_py_version=model_py_version)\n if CODE in conf and conf[CODE]:\n code_path = os.path.join(path, conf[CODE])\n sys.path = [code_path] + _get_code_dirs(code_path) + sys.path\n data_path = os.path.join(path, conf[DATA]) if (DATA in conf) else path\n return importlib.import_module(conf[MAIN]).load_pyfunc(data_path)", "def test_taskfile_import(monkeypatch, modpath):\n monkeypatch.setattr(loadlimit.importhook, 'lstaskfiles', fake_lstaskfiles)\n monkeypatch.setattr(loadlimit.importhook, 'SourceFileLoader',\n FakeSourceFileLoader)\n\n taskfiles = ['a_{}.py'.format(i) for i in range(10)]\n names = [splitext(n)[0] for n in taskfiles]\n pypath = ['{}.{}'.format(modpath, n) for n in names]\n randpath = choice(pypath)\n\n assert modpath not in sys.modules\n assert all(not p.startswith(modpath) for p in sys.modules)\n\n sys.meta_path.append(TaskImporter(*taskfiles))\n taskfile = import_module(randpath)\n\n expected = set(pypath) | set([modpath])\n result = set(p for p in sys.modules if p.startswith(modpath))\n\n assert modpath in sys.modules\n assert result == expected\n assert taskfile.TEST == randpath", "def import_(*args, **kwargs):\n if using___import__:\n return __import__(*args, **kwargs)\n else:\n return importlib.__import__(*args, **kwargs)", "def test_import_local_method(self):\n import_function(determine_package(LocalClass().foo_method))\n assert f() == \"My name is f.\"", "def _import(self, module, name):\n try:\n return getattr(__import__(module, fromlist=[name]), name)\n except (AttributeError, ImportError):\n msg = \"Failed to load %s from %s: %s\" % (name, module,\n sys.exc_info()[1])\n if not self.fail_silently:\n print(msg)\n else:\n _debug(msg)\n return None", "def path_for_import(name):\n return os.path.dirname(os.path.abspath(import_module(name).__file__))", "def import_by_path(name, path_list):\n try:\n # Handle submodules and additional paths\n path_index = len(sys.path)\n sys.path.extend(path_list)\n # Attempt the actual import\n return __import__(name)\n finally:\n # Safely remove paths\n for path in path_list:\n if sys.path.pop(path_index) != path:\n raise ImportError('Returned path entry from sys.path does not match appended path')", "def load_code(mfile, fname):\n mname = mfile.split('.py')[0].replace('/', '.')\n try:\n mod = __import__(mname, fromlist=['model'])\n func = getattr(mod, fname)\n print(\"load {} {} {}\".format(mfile, func, func.__doc__))\n return func\n except ImportError:\n traceback.print_exc()\n msg = \"Please provide file name with 'def %s' implementation\" % fname\n msg += \"\\nThe file should be available in PYTHONPATH\"\n print(msg)\n raise", "def importer(name) -> ContextType:\n try:\n # try importing as a module (using importlib from standard import mechanism)\n return __import__(name, globals=globals(), locals=locals())\n except:\n route_steps = name.split(\".\")\n route_steps = route_steps[1:] if not route_steps[0] else route_steps\n is_name_module, is_name_package = is_module(name), is_package(name)\n assert is_name_module or is_name_package\n file_path = os.path.join(*route_steps)\n if is_name_module:\n file_path = f\"{file_path}.py\"\n else: # name is definitely a package (because of the assertion)\n file_path = os.path.join(file_path, \"__init__.py\")\n spec = importlib.util.spec_from_file_location(name, file_path)\n foo = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(foo)\n return foo", "def import_file(path: Union[PurePath, str]) -> Generator[ModuleType, None, None]:\n\n pathdir = os.path.dirname(path)\n if pathdir in sys.path:\n added_to_sys_path = False\n else:\n sys.path.insert(0, pathdir)\n added_to_sys_path = True\n try:\n name = os.path.basename(path).split(\".\")[0]\n spec = spec_from_file_location(name, str(path))\n assert isinstance(spec, ModuleSpec)\n module = module_from_spec(spec)\n assert isinstance(spec.loader, Loader)\n loader: Loader = spec.loader\n try:\n loader.exec_module(module)\n except Exception as error:\n log.bad(error)\n raise\n yield module\n finally:\n if added_to_sys_path:\n sys.path.remove(pathdir)", "def import_pymodule(scheme):\n if not SchModule._ready:\n raise ValueError(u\"not mounted\")\n\n p = SchModule.DIR.hpath(scheme)\n p = path.join(p, SchModule.PYMODULE)\n p = p.encode(sys.getfilesystemencoding())\n # In load_source(name, path): name is name of module (without extension),\n # path is full path to the file of module\n return imp.load_source(path.splitext(SchModule.PYMODULE)[0], p)", "def python_like_mod_finder(import_line, alt_path=None,\r\n stop_token=None):\r\n if stop_token and '.' in stop_token:\r\n stop_token = stop_token.split('.')[-1]\r\n tokens = re.split(r'\\W', import_line)\r\n if tokens[0] in ['from', 'import']:\r\n # find the base location\r\n try:\r\n _, path, _ = imp.find_module(tokens[1])\r\n except ImportError:\r\n if alt_path:\r\n path = osp.join(alt_path, tokens[1])\r\n else:\r\n path = None\r\n if path:\r\n path = osp.realpath(path)\r\n if not tokens[1] == stop_token:\r\n for part in tokens[2:]:\r\n if part in ['import', 'cimport', 'as']:\r\n break\r\n path = osp.join(path, part)\r\n if part == stop_token:\r\n break\r\n # from package import module\r\n if stop_token and not stop_token in path:\r\n for ext in python_like_exts():\r\n fname = '%s%s' % (stop_token, ext)\r\n if osp.exists(osp.join(path, fname)):\r\n return osp.join(path, fname)\r\n # from module import name\r\n for ext in python_like_exts():\r\n fname = '%s%s' % (path, ext)\r\n if osp.exists(fname):\r\n return fname\r\n # if it is a file, return it\r\n if osp.exists(path) and not osp.isdir(path):\r\n return path\r\n # default to the package file\r\n path = osp.join(path, '__init__.py')\r\n if osp.exists(path):\r\n return path", "def f_import(self, name, *args, **kwargs):\r\n import types\r\n if self.locals_ptr is not None and name in self.locals_ptr and isinstance(self.locals_ptr[name], types.ModuleType):\r\n return self.locals_ptr[name]\r\n else:\r\n raise ImportError(\"import not allowed in pseudo-sandbox; try to import '%s' yourself and pass it to the sandbox/template\" % name)", "def load_module(name_or_path):\n if os.path.exists(name_or_path):\n path = name_or_path.rstrip(\"/\")\n modname = os.path.splitext(os.path.basename(path))[0]\n if os.path.isdir(path):\n path = os.path.join(path, \"__init__.py\")\n spec = importlib.util.spec_from_file_location(modname, path)\n mod = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(mod)\n else:\n mod = importlib.import_module(name_or_path)\n try:\n path = mod.__path__[0]\n except AttributeError:\n path = mod.__file__\n return mod, path", "def import_file(filename, context=None):\n\n # First thing to try: see if this is a module and not a file\n if not filename.endswith('.py'):\n module = None\n try:\n # is the module already imported?\n module = sys.modules[filename]\n except KeyError:\n try:\n module = __import__(filename)\n except ImportError:\n pass\n if module is not None:\n if not context is None:\n context[name] = module\n return module\n\n #\n # Parse the filename to get the name of the module to be imported\n #\n if '/' in filename:\n name = (filename).split(\"/\")[-1]\n elif '\\\\' in filename:\n name = (filename).split(\"\\\\\")[-1]\n else:\n name = filename\n\n # NB: endswith accepts tuples of strings starting in python 2.5.\n # For 2.4 compatibility we will call endswith() twice.\n if name.endswith('.py') or name.endswith('.pyc'):\n name = name.rsplit('.', 1)[0]\n if '.' in name:\n raise RuntimeError(\"Invalid python module name '%s'. The head of the filename cannot contain a period.\" % filename)\n\n #\n # Get the module if it already exists, and otherwise\n # import it\n #\n try:\n module = sys.modules[name]\n except KeyError:\n dirname = os.path.dirname( os.path.abspath(filename) )\n sys.path.insert( 0, dirname )\n try:\n module = imp.load_source( name, filename )\n except Exception:\n e = sys.exc_info()[1]\n import logging\n logger = logging.getLogger('pyutilib.misc')\n logger.error(\"Failed to load python module=\"+str(filename)+\\\n \":\\n\" + str(e))\n raise\n except:\n import logging\n logger = logging.getLogger(\"pyutilib.misc\")\n logger.error(\"Failed to load python module=\"+str(filename))\n raise\n finally:\n sys.path.remove( dirname )\n #\n # Add module to the give context\n #\n if not context is None:\n context[name] = module\n return module", "def load_module(module_name, file_name):\n from importlib.machinery import SourceFileLoader\n home_dir = os.path.expanduser(\"~\")\n valid_paths = [\n os.path.join(home_dir, \"Google Drive\"),\n os.path.join(home_dir, \"GoogleDrive\"),\n os.path.join(os.path.join(home_dir, \"Desktop\"), \"Google Drive\"),\n os.path.join(os.path.join(home_dir, \"Desktop\"), \"GoogleDrive\"),\n os.path.join(\"C:/\", \"GoogleDrive\"),\n os.path.join(\"C:/\", \"Google Drive\"),\n os.path.join(\"D:/\", \"GoogleDrive\"),\n os.path.join(\"D:/\", \"Google Drive\"),\n ]\n\n drive_path = None\n for path in valid_paths:\n if os.path.isdir(path):\n drive_path = path\n break\n\n if drive_path is None:\n raise Exception(\"Couldn't find google drive folder!\")\n\n utils_path = os.path.join(drive_path, \"_pyutils\")\n print(\"Loading [{}] package...\".format(os.path.join(utils_path,file_name)),flush = True)\n logger_lib = SourceFileLoader(module_name, os.path.join(utils_path, file_name)).load_module()\n print(\"Done loading [{}] package.\".format(os.path.join(utils_path,file_name)),flush = True)\n\n return logger_lib", "def _import(self, module_name):\n # load keywords\n kw = __import__('keywords')\n # set real rpc proxy\n kw.var_cache['proxy'] = device_proxy\n kw.var_cache['reflection'] = reflection_proxy\n kw.var_cache['local'] = local_proxy\n # load script\n __import__(module_name)\n # register all kw func from keywords.kw_func\n self.kw_func.update(kw.kw_func)", "def import_provider_directory():\n for fl in os.listdir(os.path.dirname(__file__)):\n if os.path.basename(fl).endswith('.py') and not os.path.basename(fl).startswith('__'):\n importlib.import_module('.' + os.path.basename(fl)[:-3], package=__package__)", "def load_module(module_name: str, module_path: str) -> object:\n spec = module_util.spec_from_file_location(module_name, module_path)\n module = module_util.module_from_spec(spec)\n spec.loader.exec_module(module) # type: ignore\n return module", "def import_package(name):\r\n mod = __import__(name)\r\n components = name.split('.')\r\n for comp in components[1:]:\r\n mod = getattr(mod, comp)\r\n return mod", "def _load_module(module_name):\n last_dot = module_name.rfind('.')\n if last_dot == -1:\n return __import__(module_name, globals(), locals())\n from_module = module_name[:last_dot]\n import_module = module_name[last_dot+1:]\n m = __import__(from_module, globals(), locals(), [import_module])\n return getattr(m, import_module)", "def _import_string(import_name):\n if \".\" in import_name:\n module, obj = import_name.rsplit(\".\", 1)\n else:\n return importlib.import_module(import_name)\n return getattr(importlib.import_module(module), obj)", "def mocked_import(*args, **kwargs):\n try:\n return import_module(*args, **kwargs)\n except Exception:\n mocked_module = mock.MagicMock()\n mocked_module.__name__ = args[0]\n return mocked_module" ]
[ "0.7085891", "0.698463", "0.698463", "0.698463", "0.69600534", "0.69119257", "0.6854556", "0.6701928", "0.6670122", "0.6668526", "0.6647451", "0.66429377", "0.66402143", "0.6558814", "0.6548901", "0.652248", "0.6475477", "0.6475477", "0.6475477", "0.6468391", "0.6468391", "0.646328", "0.64550793", "0.64433074", "0.6435672", "0.64285463", "0.6390404", "0.63809305", "0.63442934", "0.6311481", "0.6303931", "0.62921286", "0.62861365", "0.6250831", "0.62389183", "0.6218885", "0.6211747", "0.61864984", "0.6186384", "0.6180358", "0.6180269", "0.6157606", "0.6147549", "0.6146831", "0.61271983", "0.61259806", "0.61037457", "0.6096318", "0.60897475", "0.60894734", "0.60537237", "0.6037214", "0.60151213", "0.600815", "0.6003145", "0.6002727", "0.59960186", "0.5982683", "0.5973565", "0.59592307", "0.59368175", "0.59313726", "0.5894346", "0.58728606", "0.58695877", "0.586822", "0.5818832", "0.58018017", "0.5799558", "0.579267", "0.57844234", "0.57420623", "0.5737138", "0.5732118", "0.57280225", "0.5715935", "0.57138246", "0.5710207", "0.5705979", "0.5697342", "0.56970507", "0.56878716", "0.5644915", "0.5640694", "0.56387526", "0.56076366", "0.56058234", "0.56036675", "0.55967534", "0.5590476", "0.55888385", "0.55815333", "0.555496", "0.55546427", "0.55522716", "0.554722", "0.5531774", "0.5524662", "0.55237085", "0.5517564" ]
0.73028684
0
Creates a new RedisBloom client.
def __init__(self, *args, **kwargs): Redis.__init__(self, *args, **kwargs) # Set the module commands' callbacks MODULE_CALLBACKS = { self.BF_RESERVE : bool_ok, #self.BF_ADD : spaceHolder, #self.BF_MADD : spaceHolder, #self.BF_INSERT : spaceHolder, #self.BF_EXISTS : spaceHolder, #self.BF_MEXISTS : spaceHolder, #self.BF_SCANDUMP : spaceHolder, #self.BF_LOADCHUNK : spaceHolder, self.BF_INFO : BFInfo, self.CF_RESERVE : bool_ok, #self.CF_ADD : spaceHolder, #self.CF_ADDNX : spaceHolder, #self.CF_INSERT : spaceHolder, #self.CF_INSERTNX : spaceHolder, #self.CF_EXISTS : spaceHolder, #self.CF_DEL : spaceHolder, #self.CF_COUNT : spaceHolder, #self.CF_SCANDUMP : spaceHolder, #self.CF_LOADCHUNK : spaceHolder, self.CF_INFO : CFInfo, self.CMS_INITBYDIM : bool_ok, self.CMS_INITBYPROB : bool_ok, #self.CMS_INCRBY : spaceHolder, #self.CMS_QUERY : spaceHolder, self.CMS_MERGE : bool_ok, self.CMS_INFO : CMSInfo, self.TOPK_RESERVE : bool_ok, self.TOPK_ADD : parseToList, #self.TOPK_QUERY : spaceHolder, #self.TOPK_COUNT : spaceHolder, self.TOPK_LIST : parseToList, self.TOPK_INFO : TopKInfo, } for k, v in six.iteritems(MODULE_CALLBACKS): self.set_response_callback(k, v)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_rbclient(self):\n return RBClient(url=self.TEST_SERVER_URL,\n transport_cls=URLMapTransport)", "def create_connection():\n # REDIS_URL is defined in .env and loaded into the environment by Honcho\n redis_url = os.getenv('REDIS_URL')\n # If it's not defined, use the Redis default\n if not redis_url:\n redis_url = 'redis://localhost:6379'\n urlparse.uses_netloc.append('redis')\n url = urlparse.urlparse(redis_url)\n return redis.StrictRedis(\n host=url.hostname,\n port=url.port,\n db=0,\n password=url.password\n )", "def _CreateRedisClient(self):\n try:\n redis_client = redis.from_url(self._REDIS_URL, socket_timeout=60)\n redis_client.ping()\n except redis.exceptions.ConnectionError:\n redis_client = fakeredis.FakeStrictRedis()\n\n return redis_client", "def _CreateRedisClient(self):\n try:\n redis_client = redis.from_url(self._REDIS_URL, socket_timeout=60)\n redis_client.ping()\n except redis.exceptions.ConnectionError:\n redis_client = fakeredis.FakeStrictRedis()\n\n return redis_client", "def get_redis_client(self):\n\n client = Client(\n #connection_pool=connection_pool,\n host=self.backend_settings.get('HOST', 'localhost'),\n port=int(self.backend_settings.get('PORT', 6379)),\n io_loop=self.io_loop,\n password=self.backend_settings.get('PASSWORD', None),\n selected_db=int(self.backend_settings.get('DB', 0)),\n reconnect_callback=self.listen)\n\n return client", "def client():\n\n client = Client()\n return client", "def create_client(self) -> None:\n pass", "def create_redis_connection(app=None):\n\n if app:\n app.logger.info('Instantiated new redis connection.')\n\n redis_connection = redis.StrictRedis(\n host=\"localhost\",\n port=6379,\n db=0\n )\n\n if not redis_connection.exists('last_queue_idx'):\n redis_connection.set('last_queue_idx', 0)\n\n return redis_connection", "def init_redis_client(\n experiment_secrets: Secrets) -> RedisManagementClient:\n return __azure_client_factory(\"RedisManagementClient\", Secrets)", "def __init__(self, dbname='', client=None, client_args={}):\n assert safechar_re.match(dbname)\n if client is None:\n client = redis.Redis(**client_args)\n self.client = client\n self.schema = schema.Schema()\n self.dbprefix = dbname + ':'\n self.cache_timeout = 1000000 # Number of seconds cached items are kept", "def create_client(self):\n client = iperf3.Client()\n client.duration = self._host[CONF_DURATION]\n client.server_hostname = self._host[CONF_HOST]\n client.port = self._host[CONF_PORT]\n client.num_streams = self._host[CONF_PARALLEL]\n client.protocol = self._host[CONF_PROTOCOL]\n client.verbose = False\n return client", "def __init__(self, host, port):\n self.r = redis.StrictRedis(host=host, port=port)", "def create_client(self, version=None, unstable=False, **kwargs):\n version_data = self._calculate_version(version, unstable)\n return self._create_client(version_data, **kwargs)", "def __init__(self, config):\n self.r = redis.StrictRedis(host=config['REDIS_HOST'],\n port=config['REDIS_PORT'],\n db=config['REDIS_DB'])", "def __init__(self):\n self._redis = redis.Redis(host=\"localhost\", port=6379)\n self._redis.flushdb()", "def configure_client(self):\n self.client = self.get_redis_client()\n return self.client", "def new(\n host: str = \"localhost\",\n port: int = 4110,\n user: str = \"pyserval\",\n passwd: str = \"pyserval\",\n ):\n connection = RestfulConnection(host=host, port=port, user=user, passwd=passwd)\n return LowLevelClient(connection=connection)", "def create_client(name):\n client = Client(name=name)\n print(client.client_secret)\n db.session.add(client)\n db.session.commit()\n return client", "def get_redis_client(host='localhost', port=6379, db=0):\n host = os.environ.get('REDIS_HOST') or host\n port = os.environ.get('REDIS_PORT') or port\n return StrictRedis(host=host, port=port, db=db)", "def redis_client(self) -> Redis:\n if self._redis_client is None:\n redis_client = Redis(connection_pool=self.redis_conn_pool)\n\n self._redis_client = redis_client\n\n self._logger.debug(\n \"[%s]: Initialized Redis client: %s\", self.__name__, self._redis_client\n )\n\n return self._redis_client", "def client():\n return IceCubedSyncClient(\"api_key\", \"secret\")", "def mock_redis_client(**kwargs):\n return MockRedis(**kwargs)", "def __init__(self, host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD):\n self.db = redis.StrictRedis(host=host, port=port, password=password, decode_responses=True)", "def get_client():\n client_class = _import_by_path(settings.REDISIO_CLIENT_CLASS)\n return client_class(host=settings.REDISIO_HOST,\n port=settings.REDISIO_PORT,\n db=settings.REDISIO_DB)", "def copy(self):\n raise NotImplementedError(\"RedisLocalBloomFilter not support copy\")", "def _getMemcacheClient(self, refresh=False):\n if refresh or not hasattr(self, \"memcacheClient\"):\n\n if config.Memcached.Pools.Default.MemcacheSocket:\n client_addr = \"unix:{}\".format(config.Memcached.Pools.Default.MemcacheSocket)\n else:\n client_addr = \"{}:{}\".format(\n config.Memcached.Pools.Default.BindAddress,\n config.Memcached.Pools.Default.Port,\n )\n self.memcacheClient = ClientFactory.getClient([client_addr], debug=0, pickleProtocol=2)\n return self.memcacheClient", "def make_client(service_key, constructor=None, options=None, **kwargs):\n cloud = get_config(service_key=service_key, options=options, **kwargs)\n if not constructor:\n constructor = cloud_config._get_client(service_key)\n return cloud.get_legacy_client(service_key, constructor)", "def __init__(self, config):\n params = config.get('ccc', {})\n thin = params.get('thin', True)\n self.testnet = config.get('testnet', False)\n\n if thin:\n color_data_class = ThinColorData\n color_data_builder = AidedColorDataBuilder\n else:\n color_data_class = ThickColorData\n color_data_builder = FullScanColorDataBuilder\n\n if thin and not params.get('use_bitcoind', False):\n chromanode_url = params.get('chromanode_url', None)\n if not chromanode_url:\n if self.testnet:\n chromanode_url = \"http://chromanode-tn.bitcontracts.org\"\n else:\n chromanode_url = \"http://chromanode.bitcontracts.org\"\n self.blockchain_state = ChromaBlockchainState(\n chromanode_url,\n self.testnet)\n else:\n self.blockchain_state = BlockchainState.from_url(\n None, self.testnet)\n\n if not thin and not self.testnet:\n try:\n # try fetching transaction from the second block of\n # the bitcoin blockchain to see whether txindex works\n self.blockchain_state.bitcoind.getrawtransaction(\n \"9b0fc92260312ce44e74ef369f5c66bbb85848f2eddd5\"\n \"a7a1cde251e54ccfdd5\")\n except Exception as e:\n # use Electrum to request transactions\n self.blockchain_state = EnhancedBlockchainState(\n \"electrum.cafebitcoin.com\", 50001)\n\n self.store_conn = DataStoreConnection(\n params.get(\"colordb_path\", \"color.db\"))\n self.cdstore = ColorDataStore(self.store_conn.conn)\n self.metastore = ColorMetaStore(self.store_conn.conn)\n self.colormap = ColorMap(self.metastore)\n\n cdbuilder = ColorDataBuilderManager(\n self.colormap, self.blockchain_state, self.cdstore,\n self.metastore, color_data_builder)\n\n self.colordata = color_data_class(\n cdbuilder, self.blockchain_state, self.cdstore, self.colormap)", "def get_connection(self, params):\r\n return Redis(connection_pool=self.get_or_create_connection_pool(params))", "def get_client():\n return Client(__address, authkey='strumamor')", "def __init__(self, config):\n # Initialize key variables\n connection_string = (\n '{}:{}'\n ''.format(\n config.memcached_hostname(), config.memcached_port()))\n self.cache = memcache.Client([connection_string], debug=0)", "def make_client(self, context):\n return Client(self.settings['client_routing'], context=context)", "def client_rabbit(url, username, password):\n client = Client(url, username, password)\n return client", "def connect_to_redis():\n return Redis(host=redis_host, port=redis_port, db=0)", "def create_client():\n logger.debug(\"=====create_client fired...\")\n try:\n session = boto3.Session()\n client = session.client('dynamodb', region_name='us-east-1')\n return client\n except ClientError as err:\n logger.error(\n \"[BOTO3_ERROR]Failed to create boto3 client: %s\", str(err))", "def __init__(self):\n self._redis = redis.Redis()\n self._redis.flushdb()", "def create_client(self) -> None:\n self._client = discovery.build('ml', 'v1')", "def fake_db() -> Callable[[None], FakeRedis]:\n @lru_cache\n def wrapper() -> FakeRedis:\n db = FakeRedis(decode_responses=True)\n return db\n\n return wrapper", "def __init__(self):\n self.redis = RedisClient()\n self.crawlers = [crawler_cls() for crawler_cls in crawlers_cls]", "def gen_heat_client(self):\n\n print \"\\t* Generating heat client\"\n # request a new auth token from keystone\n keystone = ksclient.Client(auth_url=self.auth_url,\n username=self.username,\n password=self.password,\n tenant_name=self.tenant_name,\n region_name=self.region_name)\n auth_token = keystone.auth_token\n heat_url = 'http://%s:8004/v1/%s' % (self.ip, self.tenant_id)\n\n # instantiate client\n self.heatclient = hClient('1', endpoint=heat_url, token=auth_token)", "def client(db):\n client = ClientFactory()\n db.session.commit()\n return client", "def __init__(self, *args, **kwargs):\n Redis.__init__(self, *args, **kwargs)\n \n # Set the module commands' callbacks\n MODULE_CALLBACKS = {\n self.CREATE_CMD : bool_ok,\n self.ADD_CMD : int_or_none,\n self.INCRBY_CMD : bool_ok,\n self.DECRBY_CMD : bool_ok,\n self.CREATERULE_CMD : bool_ok,\n self.DELETERULE_CMD : bool_ok,\n self.RANGE_CMD : parse_range,\n self.MRANGE_CMD : parse_m_range,\n self.GET_CMD : lambda x: (int(x[0]), float(x[1])),\n self.INFO_CMD : parse_info,\n }\n for k, v in six.iteritems(MODULE_CALLBACKS):\n self.set_response_callback(k, v)", "def new(*args, **kwargs):\n return BinarySharedTensor(*args, **kwargs)", "def __new__(cls, host=None, user=None, client=None):\n cls.__check_parameters(host=host, user=user)\n if client is None:\n raise InvalidClientException(\"Integrated Client during connection creation can't be None\")\n return super(Connection, cls).__new__(cls, host=host, user=user, client=client)", "def _conn_redis(self) -> Redis:\n return Redis(host=self._REDIS_DB_HOST, port=self._REDIS_DB_PORT, db=0,decode_responses=True)", "def client(self,\n name,\n method=None,\n url=None,\n status_callback_event=None,\n status_callback_method=None,\n status_callback=None,\n **kwargs):\n return self.append(Client(\n name,\n method=method,\n url=url,\n status_callback_event=status_callback_event,\n status_callback_method=status_callback_method,\n status_callback=status_callback,\n **kwargs\n ))", "def __new__(\n cls, \n config: 'bittensor.Config' = None,\n name: str = None,\n hotkey: str = None,\n path: str = None,\n ) -> 'bittensor.Wallet':\n if config == None: \n config = wallet.config()\n config = copy.deepcopy( config )\n config.wallet.name = name if name != None else config.wallet.name\n config.wallet.hotkey = hotkey if hotkey != None else config.wallet.hotkey\n config.wallet.path = path if path != None else config.wallet.path\n wallet.check_config( config )\n return wallet_impl.Wallet(\n name = config.wallet.name, \n hotkey = config.wallet.hotkey, \n path = config.wallet.path\n )", "def client(self) -> 'BaseClient':\n return self", "def memcached_client(memcached_socket, memcached):\n mc = pytest.importorskip('pylibmc')\n return mc.Client([memcached_socket])", "def conn_redis(host, port, db=0):\r\n r = redis.Redis(host=host, port=port, db=db)\r\n return r", "def __init__(self, **kwargs):\n creator = kwargs.pop(\"creator\", None)\n if not creator:\n import MySQLdb\n creator = MySQLdb\n mincached = kwargs.pop(\"mincached\", 2)\n maxcached = kwargs.pop(\"maxcached\", 10)\n maxshared = kwargs.pop(\"maxshared\", 10)\n maxconnections = kwargs.pop(\"maxconnections\", 20)\n blocking = kwargs.pop(\"blocking\", 0)\n reset = kwargs.pop(\"reset\", True)\n maxusage = kwargs.pop(\"maxusage\", 0)\n setsession = kwargs.pop(\"setsession\", [\"set autocommit = 0\"])\n ping = kwargs.pop(\"ping\", 1)\n\n self._pool = PooledDB(creator=creator, mincached=mincached, maxcached=maxcached,\n maxshared=maxshared, maxconnections=maxconnections,\n blocking=blocking, maxusage=maxusage,reset=reset,\n setsession=setsession, ping=ping, **kwargs)", "def create_lock(self, resource, **kwargs):\n lock = DistLock(resource=resource, created_by_factory=True, **kwargs)\n lock.redis_nodes = self.redis_nodes\n lock.quorum = self.quorum\n lock.factory = self\n return lock", "def get_client() -> RabbitmqClient:\n # Replace the parameters with proper values for host, port, login and password\n # Change the value of exchange if needed.\n #\n # For any parameter that is not given here, the client tries to use a value from an environment variable\n # and most of the parameters also have a default value that is used if neither the constructor parameter\n # nor the environmental variable exist.\n # See tools/clients.py for details about the environmental variables and the default values.\n return RabbitmqClient(\n host=\"\",\n port=0,\n login=\"\",\n password=\"\",\n exchange=\"procem.examples_testing\",\n ssl=True,\n ssl_version=\"PROTOCOL_TLS\",\n exchange_autodelete=True,\n exchange_durable=False\n )", "def mock_strict_redis_client(**kwargs):\n return MockRedis(strict=True, **kwargs)", "def copy(self) -> \"WOQLClient\":\n return copy.deepcopy(self)", "def __init__(self, address=('', 50000), authkey=b'tradingbot'):\n self.id = 0\n _ClientBot.__init__(self, address=address, authkey=authkey)\n self.conn_tbm = ConnTradingBotManager(self.id)", "def copy(self):\n new_filter = BloomFilter(self.capacity, self.error_rate)\n new_filter.filter = self.filter.copy()\n return new_filter", "def buildProtocol(self, addr):\n return ClientConnection()", "def create_client(service_name: str, config_name: str = None, **client_args):\n session = get_session(config_name)\n return session.client(service_name, **client_args)", "def create_boto3_client(config, service):\n session = boto3.Session(profile_name=config.get('AWS_ACCESS', 'AWS_PROFILE'))\n return session.client(service, region_name=config.get('AWS_ACCESS', 'AWS_REGION'))", "def _get_conn(self):\n return redis.Redis(connection_pool=self.pool)", "def BMemcache(app, config, *args, **kwargs):\n if config.get('FRAGMENT_CACHING'):\n import bmemcached\n return bmemcached.Client(**{\n 'servers': config.get('FRAGMENT_MEMCACHED_SERVERS',\n config.get('CACHE_MEMCACHED_SERVERS', ('127.0.0.1:11211',))),\n 'username': config.get('FRAGMENT_MEMCACHED_USERNAME',\n config.get('CACHE_MEMCACHED_PASSWORD')),\n 'password': config.get('FRAGMENT_MEMCACHED_PASSWORD',\n config.get('CACHE_MEMCACHED_PASSWORD')),\n 'compression': Compressor()\n })\n return None", "def init_ne(cls, n, err):\n b = Bloom(n)\n b.err = err\n b.m = Bloom.get_m(n,err)\n b.k = Bloom.get_k(n, b.m)\n b.bits = bitarray(b.m)\n b.bits.setall(0)\n return b", "def __init__(self, *args, **kwargs):\n super(Client, self).__init__(role='c', *args, **kwargs)\n\n # Internal variables\n self._bulksize = None\n self._server_hostname = None\n self._port = None\n self._num_streams = None\n self._zerocopy = False", "def create_search_console_client(credentials):\n http_auth = httplib2.Http()\n http_auth = credentials.authorize(http_auth)\n service = build('webmasters', 'v3', http=http_auth)\n return service", "def new(cls, **kwargs):\n return cls(**kwargs)", "def make_client(db, hdfs_client=None):\n return ImpalaClient(db, hdfs_client=hdfs_client)", "def init(config_file_path=None, **kwargs):\n if __debug__:\n logger.debug(HEADER + \"Initializing the storage client.\")\n global redis_connection\n global hosts\n # If config_file_path is None we will assume that we only have localhost\n # as storage node\n if config_file_path is None:\n try:\n import StringIO as sio\n except ImportError:\n from io import StringIO as sio\n config_file_handler = sio.StringIO('localhost\\n')\n else:\n config_file_handler = open(config_file_path)\n # As accorded in the API standar, this file must contain all the hosts\n # names with no port, one per line\n hosts = [x.strip() for x in config_file_handler.readlines()]\n config_file_handler.close()\n # If we have more than one host then we will assume that our backend is a\n # Redis cluster. If not, we will assume that we are dealing with a Redis\n # standalone instance\n if len(hosts) > 1:\n # Given that cluster clients are capable to perform master\n # slave hierarchy discovery, we will simply connect to the first\n # node we got\n redis_connection = \\\n rediscluster.RedisCluster(host=hosts[0], port=REDIS_PORT)\n else:\n # We are in standalone mode\n redis_connection = \\\n redis.StrictRedis(host=hosts[0], port=REDIS_PORT)\n # StrictRedis is not capable to know if we had success when connecting by\n # simply calling the constructor. We need to perform an actual query to\n # the backend\n # If we had no success this first line should crash\n redis_connection.set('PYCOMPSS_TEST', 'OK')\n # Beware py2 vs py3 - b'string' works for both.\n assert redis_connection.get('PYCOMPSS_TEST') == b'OK'\n redis_connection.delete('PYCOMPSS_TEST')\n if __debug__:\n logger.debug(HEADER + \"Initialization finished successfully.\")", "def make_client(instance):\n network_client = utils.get_client_class(\n API_NAME,\n instance._api_version[API_NAME],\n API_VERSIONS)\n LOG.debug('Instantiating network client: %s', network_client)\n\n endpoint = instance.get_endpoint_for_service_type(\n API_NAME,\n region_name=instance._region_name,\n )\n\n return network_client(\n username=instance._username,\n tenant_name=instance._project_name,\n password=instance._password,\n region_name=instance._region_name,\n auth_url=instance._auth_url,\n endpoint_url=endpoint,\n token=instance.auth.get_token(instance.session),\n insecure=instance._insecure,\n ca_cert=instance._cacert,\n )", "def _get_shadow_client(self):\n self.shadowClient = AWSIoTMQTTClient(clientId)\n\n self.shadowClient.configureEndpoint(host, port)\n self.shadowClient.configureCredentials(rootCAPath,\n privateKeyPath,\n certificatePath)\n self.shadowClient.configureAutoReconnectBackoffTime(1, 32, 20)\n self.shadowClient.configureConnectDisconnectTimeout(10)\n self.shadowClient.configureMQTTOperationTimeout(1)\n \n return self.shadowClient", "def create(cls):\n pass\n return cls()", "def create_bq_client(credentials: str = None, project_id: str = None) -> bigquery.Client:\n if credentials is None and project_id is None:\n credentials, project_id = google.auth.default()\n\n return bigquery.Client(credentials=credentials,\n project=project_id)", "def create_client(self, module_name, version, client_class):\n # NOTE(kiennt): Get created client rather create a new one.\n # The key is the combination of module_name and version.\n # because we can create multiple clients of a module with\n # different versions.\n client = self.created_clients.get(module_name + version)\n if client:\n return client\n module_client = self._import_client(module_name)\n try:\n client = getattr(module_client, client_class)(\n version=version,\n session=self._sess)\n self.created_clients[module_name+version] = client\n return client\n except Exception as err:\n raise err", "def client():\n _, p, _ = docker_run_etcd_main()\n c = Client(host, p, protocol)\n yield c\n c.close()", "def create_client():\n hostname = \"localhost\"\n username = \"she393\"\n password = os.getenv(\"PASSWORD\")\n\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(hostname=hostname, username=username, password=password)\n return client", "def getBuilder():\n if (MinicapStream.__instance == None):\n MinicapStream.__mutex.acquire()\n if (MinicapStream.__instance == None):\n MinicapStream.__instance = MinicapStream()\n MinicapStream.__mutex.release()\n return MinicapStream.__instance", "def get_rpc_client(id: int):\n try:\n return connkeeper[id]\n except KeyError:\n connkeeper[id] = shardRPC()\n return connkeeper[id]", "def get_redis():\n return redis.StrictRedis(host='redis', port=6379)", "def make_client(instance):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n instance._api_version[API_NAME],\r\n API_VERSIONS,\r\n )\r\n instance.initialize()\r\n url = instance._url\r\n url = url.rstrip(\"/\")\r\n if '2.0' == instance._api_version[API_NAME]:\r\n client = neutron_client(username=instance._username,\r\n tenant_name=instance._tenant_name,\r\n password=instance._password,\r\n region_name=instance._region_name,\r\n auth_url=instance._auth_url,\r\n endpoint_url=url,\r\n token=instance._token,\r\n auth_strategy=instance._auth_strategy,\r\n insecure=instance._insecure,\r\n ca_cert=instance._ca_cert)\r\n return client\r\n else:\r\n raise exceptions.UnsupportedVersion(_(\"API version %s is not \"\r\n \"supported\") %\r\n instance._api_version[API_NAME])", "def get_redis_client():\n return redis.from_url(settings.REDIS_URI)", "def __init__(self):\n if Config.USEMEMCACHED is True:\n self.mc = MCache(server = Config.MEMCACHED_SERVER,\n username = Config.MEMCACHED_USERNAME,\n password = Config.MEMCACHED_PASSWORD)\n else:\n self.mc = None\n self.api = DozensApi()", "def client(self) -> WebClient:\n return WebClient(**self._get_conn_params())", "def build_client(module):\n return drac.DRACClient(module.params['address'],\n module.params['username'],\n module.params['password'])", "def get_redis(**kwargs):\n redis_cls = kwargs.pop('redis_cls', DEFAULT_REDIS_CLS)\n url = kwargs.pop('url', None)\n if url:\n return redis_cls.from_url(url, **kwargs)\n else:\n return redis_cls(**kwargs)", "def create_coin(self, symbol, name, price, bought, quantity):\n update_time = datetime.now(pytz.timezone('US/Eastern'))\n coin_obj = CoinHistory(update_time=update_time,\n symbol=symbol,\n name=name,\n price=price,\n bought=bought,\n quantity=quantity)\n return coin_obj", "def create_cloudwatch_client_instance():\n try:\n cloudwatch = boto3.client('cloudwatch')\n return cloudwatch\n except Exception, e:\n responseData = { 'Reason': 'Error creating CloudWatch Client instance'}\n send(event, context, FAILED, responseData, \"CustomResourcePhysicalID\") \n return 1", "def create_client():\n host_api_id = Config.api_id\n host_api_hash = Config.api_hash\n host_user_id = Config.user_id\n host_phone = Config.phone\n\n client = TelegramClient(host_user_id, host_api_id, host_api_hash)\n client.connect()\n if not client.is_user_authorized():\n client.send_code_request(host_phone)\n client.sign_in(host_phone, input('Enter code sent to your telegram: '))\n return client", "def create_instance(c_instance):\n return RpycHost(c_instance)", "def __init__(self, config, queue_name):\n self.work_queue_client = WorkQueueClient(config, queue_name)", "def __init__(self, config, queue_name):\n self.work_queue_client = WorkQueueClient(config, queue_name)", "def transport_factory(conf):\n\n return MemcachedTransport(conf)", "def buildProtocol(self, addr):\n if hasattr(settings, \"DISCORD_SESSION_CLASS\"):\n protocol_class = class_from_module(\n settings.DISCORD_SESSION_CLASS, fallback=DiscordClient\n )\n protocol = protocol_class()\n else:\n protocol = DiscordClient()\n\n protocol.factory = self\n protocol.sessionhandler = self.sessionhandler\n return protocol", "def __init__(self, client):\n self.client = client", "def create_client(wsdl: str, raw_response: bool = True) -> CachingClient:\n # We want the raw response as there is an error when Zeep parses the XML\n settings: Settings = Settings(raw_response=raw_response)\n\n # Client that caches the WSDL\n client: CachingClient = CachingClient(\n wsdl=wsdl,\n # TODO: Store PW encrypted\n wsse=UsernameToken(\"n00394gz\", \"g427Ix19LMB\"),\n settings=settings,\n )\n logger.debug(f\"Client created\")\n\n return client", "def __init__(self, client):\n\n self.client = client", "def create(self, **kargs):\n return self(**kargs)", "def newBlock(preBlock, remitter, number, payee):\r\n index = preBlock.index + 1\r\n timestamp = int(round(time.time() * 1000))\r\n data = (remitter, number, payee).__str__()\r\n previousHash = preBlock.hash\r\n nounce = 0\r\n return Blockchain(index, data, timestamp, nounce, previousHash)", "def run_redis_example():\n\n try:\n r = redis.StrictRedis(host=host, port=port, password=pw,\n decode_responses=True)\n except Exception as e:\n print(f'Error connecting to Redis DB: {e}')\n\n return r", "def __init__( self, **params ):\n \n host = custom( CPULimitedHost, cpu=cpuShare() ) \n link = custom( TCLink, bw=args.bandwidth, delay=delay() )\n \n Mininet.__init__(\n self,\n topo=BarrierTransactionTopo( **params ),\n host=host,\n link=link )", "def __init__(self, redis_connection=None):\n self._redis_connection = redis_connection or get_websocket_redis_connection()" ]
[ "0.57075137", "0.5469258", "0.5429889", "0.5429889", "0.5411458", "0.5335622", "0.5313152", "0.5284318", "0.52696276", "0.52184993", "0.51899725", "0.5167407", "0.5128039", "0.51219803", "0.5082389", "0.50596845", "0.5056017", "0.505512", "0.5052801", "0.50405574", "0.5034328", "0.5025915", "0.50166714", "0.49417877", "0.49273184", "0.49266714", "0.49229008", "0.49086487", "0.490407", "0.4881542", "0.48802686", "0.4880142", "0.48674172", "0.48645678", "0.48506197", "0.48487595", "0.48428237", "0.4823011", "0.47983706", "0.47967723", "0.4787709", "0.47770444", "0.47729364", "0.47655353", "0.4754024", "0.4748551", "0.4738045", "0.47350925", "0.47282618", "0.47093567", "0.46982157", "0.46709445", "0.46699983", "0.4655236", "0.46537238", "0.463965", "0.46390763", "0.4638191", "0.46371534", "0.4634411", "0.4634381", "0.46329662", "0.46297956", "0.4626547", "0.46204814", "0.46116334", "0.46101284", "0.46090287", "0.4608303", "0.460509", "0.46046266", "0.45979536", "0.45959467", "0.45925182", "0.4584386", "0.45760792", "0.4561", "0.4558358", "0.45577776", "0.4557279", "0.4549281", "0.4541226", "0.45365348", "0.45246577", "0.45194012", "0.45164156", "0.45159754", "0.45155632", "0.45096", "0.45096", "0.45024568", "0.44987234", "0.44972762", "0.44936484", "0.44926605", "0.44864357", "0.44790962", "0.4474557", "0.44732568", "0.4473096" ]
0.50316715
21
Creates a new Bloom Filter ``key`` with desired probability of false positives ``errorRate`` expected entries to be inserted as ``capacity``. Default expansion value is 2. By default, filter is autoscaling.
def bfCreate(self, key, errorRate, capacity, expansion=None, noScale=None): params = [key, errorRate, capacity] self.appendExpansion(params, expansion) self.appendNoScale(params, noScale) return self.execute_command(self.BF_RESERVE, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cfCreate(self, key, capacity, expansion=None, bucket_size=None, max_iterations=None):\n params = [key, capacity]\n self.appendExpansion(params, expansion)\n self.appendBucketSize(params, bucket_size)\n self.appendMaxIterations(params, max_iterations)\n\n return self.execute_command(self.CF_RESERVE, *params)", "def __init__(self, server, bfkeypreffix, capacity, error_rate=0.001):\n if not (0 < error_rate < 1):\n raise ValueError(\"Error_Rate must be between 0 and 1.\")\n if not capacity > 0:\n raise ValueError(\"Capacity must be > 0\")\n # given M = num_bits, k = num_slices, P = error_rate, n = capacity\n # k = log2(1/P)\n # solving for m = bits_per_slice\n # n ~= M * ((ln(2) ** 2) / abs(ln(P)))\n # n ~= (k * m) * ((ln(2) ** 2) / abs(ln(P)))\n # m ~= n * abs(ln(P)) / (k * (ln(2) ** 2))\n num_slices = int(math.ceil(math.log(1.0 / error_rate, 2)))\n bits_per_slice = int(math.ceil(\n (capacity * abs(math.log(error_rate))) /\n (num_slices * (math.log(2) ** 2))))\n if bits_per_slice > MAX_PER_SLICE_SIZE:\n raise ValueError(\"Capacity and error_rate make per slice size extended, MAX_PER_SLICE_SIZE is %s\" % (MAX_PER_SLICE_SIZE))\n self._setup(error_rate, num_slices, bits_per_slice, capacity, 0, server, bfkeypreffix)", "def bfInsert(self, key, items, capacity=None, error=None, noCreate=None, expansion=None, noScale=None):\n params = [key]\n self.appendCapacity(params, capacity)\n self.appendError(params, error)\n self.appendExpansion(params, expansion)\n self.appendNoCreate(params, noCreate)\n self.appendNoScale(params, noScale)\n self.appendItems(params, items)\n\n return self.execute_command(self.BF_INSERT, *params)", "def __init__(self, capacity):\n self.capacity = capacity # Number of buckets in the hash table\n self.storage = [None] * capacity\n self.key_count = 0", "def __init__(self, capacity=100):\n \n self.capacity = capacity\n self.size = 0\n self._keys = []\n self._entry = [[] for _ in range(capacity)]", "def __init__(\n self, capacity: int, operation: Any, neutral_element: Optional[Any] = None\n ):\n\n assert (\n capacity > 0 and capacity & (capacity - 1) == 0\n ), \"Capacity must be positive and a power of 2!\"\n self.capacity = capacity\n if neutral_element is None:\n neutral_element = (\n 0.0\n if operation is operator.add\n else float(\"-inf\")\n if operation is max\n else float(\"inf\")\n )\n self.neutral_element = neutral_element\n self.value = [self.neutral_element for _ in range(2 * capacity)]\n self.operation = operation", "def __init__(self, capacity, operation, neutral_element):\n assert (\n capacity > 0 and capacity & (capacity - 1) == 0\n ), \"capacity must be positive and a power of 2.\"\n self._capacity = capacity\n self._value = [neutral_element for _ in range(2 * capacity)]\n self._operation = operation\n self.neutral_element = neutral_element", "def __init__(self, capacity, operation, neutral_element):\n assert capacity > 0 and capacity & (capacity - 1) == 0, \"capacity must be positive and a power of 2.\"\n self._capacity = capacity\n self._value = [neutral_element for _ in range(2 * capacity)]\n self._operation = operation", "def __init__(self, capacity, operation, neutral_element):\n assert capacity > 0 and capacity & (capacity - 1) == 0, \"capacity must be positive and a power of 2.\"\n self._capacity = capacity\n self._value = [neutral_element for _ in range(2 * capacity)]\n self._operation = operation", "def __init__(self, capacity, operation, neutral_element):\n assert capacity > 0 and capacity & (capacity - 1) == 0, \"capacity must be positive and a power of 2.\"\n self._capacity = capacity\n self._value = [neutral_element for _ in range(2 * capacity)]\n self._operation = operation", "def __init__(self, capacity, alpha, beta_i, beta_f, beta_anneal,\n weight_offset):\n self.weight_offset = weight_offset\n self.alpha = alpha\n\n assert beta_i < beta_f, \"Beta update assumes beta_i < beta_f\"\n self.beta = beta_i\n self.beta_f = beta_f\n self.beta_update = (beta_f - beta_i) / beta_anneal\n\n self.experiences = WeightedRingBuf(capacity)\n # ids of experiences that haven't been used for training yet.\n self.unplayed_experiences = deque(maxlen=capacity)", "def bf_counter(file_name, k, n, capacity, error_rate, verbose=False):\n if verbose:\n start = time.time()\n print('BFCounter started.')\n\n heap = []\n for i in range(n):\n heap.append((0, ''))\n\n bf = BloomFilter(capacity, error_rate, 'kmer_bf')\n\n kmer_counter = defaultdict(lambda: 1)\n\n # Assign functions to local variables for performance improvement\n add_to_bf = bf.add\n heap_pushpop = heapq.heappushpop\n\n with open(file_name, 'r') as f:\n line_num = 0\n for line in f:\n if line_num % 4 == 1: # dna sequence\n kmer_count = len(line) - k\n for i in range(kmer_count):\n kmer = line[i:i + k]\n if kmer not in bf: # not in Bloom Filter\n add_to_bf(kmer)\n else: # in Bloom Filter\n kmer_counter[kmer] += 1\n line_num += 1\n if verbose:\n end_hash = time.time()\n hash_table_size = sys.getsizeof(kmer_counter) / (1024 ** 2)\n print('Hash table is created in {:.2f} seconds.'.format(\n end_hash - start))\n print('Hash table size: {:.2f} MB.'.format(hash_table_size))\n start_populate = time.time()\n print('Populating the heap...')\n\n for count, kmer in kmer_counter.items():\n # insert to the heap if count is bigger than minimum\n if count > heap[0][0]:\n heap_pushpop(heap, (count, kmer))\n\n if verbose:\n end_populate = time.time()\n print('Heap is populated in {:.2f} seconds.'.format(\n end_populate - start_populate\n ))\n\n os.remove('kmer_bf')\n if verbose:\n end = time.time()\n print('BFCounter is completed in {:.2f} seconds.'.format(end - start))\n\n return heap", "def __init__(self, capacity, initial):\n\t\tself.capacity = capacity\n\t\tself.amount = initial", "def __init__(self, capacity, fillValue=None):\r\n self._items = list()\r\n for count in range(capacity):\r\n self._items.append(fillValue)", "def __init__(self, capacity: int, function) -> None:\n self.buckets = DynamicArray()\n for _ in range(capacity):\n self.buckets.append(LinkedList())\n self.capacity = capacity\n self.hash_function = function\n self.size = 0", "def __init__(self, capacity, fillValue = None):\n \n self._items = list() \n self._fillValue = fillValue\n self._DEFAULT_CAPACITY = capacity\n self._logicalSize = 0 #as required by exercise 1\n \n \n for count in xrange(capacity):\n self._items.append(self._fillValue)", "def knapsack(items, capacity):\r\n pass", "def __init__(self, capacity, fillValue=None):\r\n self._items = list()\r\n self._logicalSize = 0\r\n # Track the capacity and fill value for adjustments later\r\n self._capacity = capacity\r\n self._fillValue = fillValue\r\n for count in range(capacity):\r\n self._items.append(fillValue)", "def create_capacity_limiter(total_tokens: float) -> abc.CapacityLimiter:\n return get_asynclib().CapacityLimiter(total_tokens)", "def cfInsert(self, key, items, capacity=None, nocreate=None):\n params = [key]\n self.appendCapacity(params, capacity)\n self.appendNoCreate(params, nocreate)\n self.appendItems(params, items)\n\n return self.execute_command(self.CF_INSERT, *params)", "def expandable_capacity(self, expandable_capacity):\n\n self._expandable_capacity = expandable_capacity", "def _generate_table(self):\n for i in xrange(32):\n self._table.append(\n BloomFilter(\n capacity=self.__capacity,\n error_rate=self.__error_rate\n )\n )", "def __init__(self, capacity, units=0):\n self.capacity = capacity\n self.units = units", "def FixedWidthBucketer(width, num_finite_buckets=100):\n return Bucketer(width=width, growth_factor=0.0,\n num_finite_buckets=num_finite_buckets)", "def __init__(self, width, growth_factor, num_finite_buckets):\n\n if num_finite_buckets < 0:\n raise ValueError('num_finite_buckets must be >= 0 (was %d)' %\n num_finite_buckets)\n\n self.width = width\n self.growth_factor = growth_factor\n self.num_finite_buckets = num_finite_buckets\n self.total_buckets = num_finite_buckets + 2\n self.underflow_bucket = 0\n self.overflow_bucket = self.total_buckets - 1\n\n self._lower_bounds = list(self._generate_lower_bounds())", "def __init__(self, bucket_size, bucket_fill_rate, current_time=None):\n self.__bucket_contents = bucket_size\n self.__bucket_size = bucket_size\n self.__bucket_fill_rate = bucket_fill_rate\n\n if current_time is None:\n current_time = time.time()\n\n self.__last_bucket_fill_time = current_time", "def capacity_factor(self, value: float) -> None:\n # State S, I, E, SE, or EE\n self._capacity_factor = value", "def __init__(self, capacity=4):\n self.capacity = capacity\n self.size = 0\n self.table = [None] * capacity", "def new_capacity_rule(mod, prj, prd):\n return 0", "def __init__(__self__, *,\n capacity: Optional[int] = None,\n name: Optional[str] = None):\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if name is not None:\n pulumi.set(__self__, \"name\", name)", "def __init__(self, n, m, k=2):\n # expecting to hold n elements\n self.n = n\n if m%4: m += (4-m%4)\n self.m = m*8\n print \"bit map size set to %d (%d bytes)after round up to 32bits\"%(self.m, self.m/8)\n self.bm = BitVector(size=self.m, intVal=0)\n if k in BloomFilter.KRange:\n self.k = k\n else:\n self.k = BloomFilter.KRange[-1]\n # round k to closest allowed value\n for i in range(len(BloomFilter.KRange)-1):\n if k < BloomFilter.KRange[i]:\n self.k = BloomFilter.KRange[i]\n break\n elif k < BloomFilter.KRange[1+i]:\n if (BloomFilter.KRange[+i]-k) >= k-BloomFilter.KRange[1+i]:\n self.k = BloomFilter.KRange[i]\n else:\n self.k = BloomFilter.KRange[i+1]\n break\n print \"k set to %d after validation\"%(self.k)\n p=BloomFilter.calPFP(self.n, self.m, self.k)\n print \"false positive probability will be %f when filtering %d elements\"%(p, self.n)\n #slice bitmap into k slices\n self.ms = self.m/self.k\n self.hashf = MurmurHash3_x86_32", "def _create_capacity(self, m, comp, prod_name):\n name = comp.name\n cap_res = comp.get_capacity_var() # name of resource that defines capacity\n r = m.resource_index_map[comp][cap_res] # production index of the governing resource\n # production is always lower than capacity\n ## NOTE get_capacity returns (data, meta) and data is dict\n ## TODO does this work with, e.g., ARMA-based capacities?\n ### -> \"time\" is stored on \"m\" and could be used to correctly evaluate the capacity\n cap = comp.get_capacity(None, None, None, None)[0][cap_res] # value of capacity limit (units of governing resource)\n rule = partial(self._capacity_rule, prod_name, r, cap)\n constr = pyo.Constraint(m.T, rule=rule)\n setattr(m, '{c}_{r}_capacity_constr'.format(c=name, r=cap_res), constr)\n # minimum production\n print('DEBUGG dispatchable?', comp.name, comp.is_dispatchable())\n if comp.is_dispatchable() == 'fixed':\n minimum = cap\n var = getattr(m, prod_name)\n values = var.get_values()\n for k in values:\n values[k] = cap\n var.set_values(values)\n else:\n minimum = 0 # -> for now just use 0, but fix this! XXX\n print('DEBUGG ... min:', minimum)\n rule = partial(self._min_prod_rule, prod_name, r, cap, minimum)\n constr = pyo.Constraint(m.T, rule=rule)\n setattr(m, '{c}_{r}_minprod_constr'.format(c=name, r=cap_res), constr)", "def resize_table(self, capacity):\r\n\t\t# make a new HashMap with the desired capacity\r\n\t\tnew_map = HashMap(capacity, self._hash_function)\r\n\r\n\t\t# for each linked list in the map, rehash and add to new_map\r\n\t\tfor i in self._buckets:\r\n\t\t\tcur_node = i.head\r\n\t\t\twhile cur_node is not None:\r\n\t\t\t\tindex = self._hash_function(cur_node.key) % capacity # get the new index for the key\r\n\t\t\t\tnew_map._buckets[index].add_front(cur_node.key, cur_node.value) # add the key-value pair to the new map\r\n\t\t\t\tcur_node = cur_node.next # move to the next node in the linked list\r\n\t\tself._buckets = new_map._buckets # reassign self._buckets to the new_map\r\n\t\tself.capacity = capacity", "def __init__(self, key):\n self.key = key\n self.BLOCK_SIZE = 16", "def __init__(self, k: int):\n self.capacity = k\n self.head = None\n self.tail = None\n self.count = 0", "def __init__(self, capacity):\n self.experiences = RingBuf(capacity)", "def __init__(self, k):\n self.capacity = k\n self.head = None\n self.tail = None\n self.count = 0", "def __init__(self, knapsack_size, items):\n self.knapsack_size = knapsack_size\n self.items = items\n self._cache = dict()\n # fill-in the cache with base cases' (subproblems') solutions\n for size in range(knapsack_size + 1):\n # if there are no items, the max value is 0\n self._cache[(0, size)] = 0\n for end in range(len(items) + 1):\n # if the knapsack's size is 0 no items fit, the max value is 0\n self._cache[(end, 0)] = 0", "def new_capacity_rule(mod, g, p):\n return 0", "def __init__(self, capacity=2):\r\n self._capacity = capacity\r\n self._data = [0] * self._capacity\r\n self._size = 0", "def resize(self, new_capacity):\n # Your code here\n self.capacity = new_capacity\n\n # make new array to store the current self.hash_table\n # update self.hash_table to be array of size new_capacity\n # for each item in our copy array\n # self.put(item) in our newly size self.hash_table\n # if item.next is not None\n # make sure to self.put(item.next) to get all chained nodes\n\n old_storage = self.hash_table\n self.hash_table = [None] * new_capacity\n\n for i, el in enumerate(old_storage):\n if el is not None:\n self.put(el.key, el.value)\n\n curr_node = el\n\n if curr_node is not None:\n # add all chained nodes\n while curr_node.next is not None:\n curr_node = curr_node.next\n if curr_node is not None:\n self.put(curr_node.key, curr_node.value)", "def capacity(self, capacity):\n\n self._capacity = capacity", "def __init__(self, capacity=10):\n\n self._board = [None] * capacity # list of 10 None elements\n self._n = 0 # number of actual entries", "def clamp(self, key):\n\t\treturn DiscreteDistribution({ k : 0. if k != key else 1. for k in self.keys() })", "def capacity_enlarge(self, k):\n count = 0\n idx = self.capacity - 1\n while count < k:\n left = self.tree[idx]\n right = priorityNode(0, None)\n insert_pos = self.tree.shape[0]\n self.tree = np.insert(self.tree, insert_pos, [left,right])\n idx += 1\n count += 1\n\n self.last_capacity = self.capacity # mark down the last capacity for adding operation\n self.capacity += k # Update the value of capacity", "def grow(self):\n self.capacity = self.capacity * 2\n self.rehash()", "def __init__(self, capacity):\n self.capacity = capacity\n self.map = {}\n self.head = self.Node(0, 0)\n self.tail = self.Node(0, 0)\n self.head.next = self.tail\n self.tail.pre = self.head\n self.cnt = 0", "def __init__(__self__, *,\n capacity: Optional[pulumi.Input[int]] = None,\n family: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None):\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if family is not None:\n pulumi.set(__self__, \"family\", family)\n if name is not None:\n pulumi.set(__self__, \"name\", name)", "def __init__(self, capacity: int):\n self._pax_with_carry_on = PaxStack()\n self._pax_without_carry_on = PaxStack()\n self._capacity = capacity\n self._current_pax = 0", "def __init__(self, iterable_input, batch_size, buckets, pad_index, only_full=False, field=None,\n shuffle=False, buffer_size=None, name='Bucket Batch', verbose=True):\n super().__init__(iterable_input=iterable_input, name=name, verbose=verbose)\n self.batch_size = batch_size\n self.buckets = buckets\n self.max_length = buckets[-1]\n self.pad_index = pad_index\n self.only_full = only_full\n self.field = field\n self.shuffle = shuffle\n self.buffer_size = self.batch_size if buffer_size is None else buffer_size", "def capacity_factor(self, update=False,\n min_cap_fact=None, max_cap_fact=None):\n if update or self._dfs['capacity_factor'] is None:\n self._dfs['capacity_factor'] = pudl.analysis.mcoe.capacity_factor(\n self, min_cap_fact=min_cap_fact, max_cap_fact=max_cap_fact)\n return self._dfs['capacity_factor']", "def __init__(self, key):\n self.bs = 16\n self.key = hashlib.sha256(key.encode()).digest()", "def test_create_bucket(self):\n bucket = pmp.utils.create_bucket(3, 5.0)\n self.assertIsInstance(bucket, pmp.Bucket)\n\n POS_INF = float(\"inf\")\n bucket = pmp.utils.create_bucket(0, POS_INF)\n self.assertIsInstance(bucket, pmp.Bucket)", "def __init__(__self__, *,\n active_capacity: int,\n capacity: Optional[int] = None,\n scale_type: Optional[str] = None):\n pulumi.set(__self__, \"active_capacity\", active_capacity)\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if scale_type is not None:\n pulumi.set(__self__, \"scale_type\", scale_type)", "def topkReserve(self, key, k, width, depth, decay):\n params = [key, k, width, depth, decay]\n \n return self.execute_command(self.TOPK_RESERVE, *params)", "def resize_table(self, capacity):\n old_buckets = self._buckets # save current buckets with variable\n\n self._buckets = [] # initialize new empty buckets\n self.capacity = capacity\n for i in range(capacity):\n self._buckets.append(LinkedList())\n self.size = 0\n\n for bucket in old_buckets: # loops through old hashmap and adds key/value pairs to new hashmap\n if bucket.size == 0:\n pass\n else:\n while bucket.head is not None:\n self.put(bucket.head.key, bucket.head.value)\n bucket.remove(bucket.head.key)", "def set_capacity(self, cap):\n self._capacity.type = 'value'\n self._capacity._value = float(cap) # TODO getter/setter", "def __init__(self, k: int) -> None:\n\n assert k > 2, \"for k = 2 use Bernoulli distribution.\"\n\n self.k = k", "def __init__(self, capacity):\n assert isinstance(capacity, int)\n if capacity <= 0:\n raise ValueError(\n 'Sum tree capacity should be positive. Got: {}'.format(capacity))\n\n self.nodes = []\n self.depth = int(np.ceil(np.log2(capacity)))\n self.low_idx = (2**self.depth) - 1 # pri_idx + low_idx -> tree_idx\n self.high_idx = capacity + self.low_idx\n self.nodes = np.zeros(2**(self.depth + 1) - 1) # Double precision.\n self.capacity = capacity\n\n self.highest_set = 0\n\n self.max_recorded_priority = 1.0", "def copy(self):\n new_filter = BloomFilter(self.capacity, self.error_rate)\n new_filter.filter = self.filter.copy()\n return new_filter", "def __init__(self, color='brown', name='knapsack', max_size=3):\n \n Backpack.__init__(self, color, name, max_size)\n self.closed = True", "def __init__(self, k, num_buckets, fp_size, bucket_size, max_iter):\n self.children: List[Node] = []\n self.parent: Optional[Node] = None\n self.filter = CuckooFilterBit(num_buckets, fp_size, bucket_size, max_iter)\n\n self.dataset_id: Optional[str] = None\n self.k = k", "def __init__(self, alpha=80, beta=13, gamma=3, spatial_ker_weight=3, bilateral_ker_weight=10):\n self.alpha = alpha\n self.beta = beta\n self.gamma = gamma\n self.spatial_ker_weight = spatial_ker_weight\n self.bilateral_ker_weight = bilateral_ker_weight", "def set_capacity(self, capacity):\r\n params = {\r\n 'AutoScalingGroupName' : self.name,\r\n 'DesiredCapacity' : capacity,\r\n }\r\n req = self.connection.get_object('SetDesiredCapacity', params,\r\n Request)\r\n self.connection.last_request = req\r\n return req", "def __init__(self):\n self.capacity = 10000\n self.table = [[] for _ in range(self.capacity)]", "def __init__(self, k: int):\n self.queue = [0]*k\n self.headIndex = 0\n self.count = 0\n self.capacity = k", "def ffa(items_list, bin_capacity):\n bins =[]\n randomised_np_list = np.random.permutation(items_list) # list containing initial items in a random order\n items_list = randomised_np_list.tolist() \n \n for item in items_list:\n # foeach item we search if there's an open bin where it can fit\n for bin in bins:\n if bin.total_weight + item <= bin_capacity: #if it fits\n bin.add_item(item) #we add the item in the bin\n break\n else:\n # there is no open bin where the item can fit\n #so we open a new bin and add the item in it\n bin = Bin()\n bin.add_item(item)\n bins.append(bin)\n\n return bins", "def __init__(self, k: int):\r\n self.capacity = k\r\n self.frontIndex = 0\r\n self.lastIndex = 1\r\n self.deque = [0] * self.capacity\r\n self.size = 0 # current size\r", "def __init__(self, key):\n\n def keys(key, num_rounds):\n \"\"\"Yields the permuted key bitstring for i = 1..num_rounds\"\"\"\n C, D = key[:28], key[28:]\n # Rounds are 1-indexed, so shift array over by one\n left_shifts = [None, 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]\n for i in range(1, num_rounds + 1):\n # Negate each rotation to rotate left.\n C, D = rotate(C, -left_shifts[i]), rotate(D, -left_shifts[i])\n yield self.permute(C + D, self._CD_permutation)\n\n self.key = list(bits_of(key, 64))\n # Permute the key. The permutation discards the parity bits...\n self.key = self.permute(self.key, self._key_permutation)\n self.number_of_rounds = 16\n # A list of the 16 keys K1 .. K16, shifted over by one to allow 1-indexing.\n self.keys = [None] + list(keys(self.key, self.number_of_rounds))", "def __init__(self, k):\n self.queue = [0]*k\n self.headIndex = 0\n self.count = 0\n self.capacity = k", "def cfInsertNX(self, key, items, capacity=None, nocreate=None):\n params = [key]\n self.appendCapacity(params, capacity)\n self.appendNoCreate(params, nocreate)\n self.appendItems(params, items)\n\n return self.execute_command(self.CF_INSERTNX, *params)", "def __init__(self,key):\n self.block_size = 32\n self.key = hashlib.sha256(key).digest()", "def __init__(self):\n self.buckets = [-1] * 10\n self.length = len(self.buckets)", "def set_baggage_item(self, key, value):\n return self", "def resize_table(self, capacity):\n\n temp = HashMap(capacity, self._hash_function) # Temporary hash map to store new values\n\n for index in range(self.capacity):\n if self._buckets[index].head is not None:\n node = self._buckets[index].head\n while node is not None:\n temp.put(node.key, node.value) # Iterate over the values and re-hash them into the temp table\n node = node.next\n\n self._buckets = temp._buckets # Update the hash map to use the new buckets\n self.capacity = capacity", "def __init__(self):\n self.size = 1000\n self.bucket = [None] * self.size", "def add(self, key):\n if key in self:\n return True\n if not self.filters:\n filter = RedisLocalBloomFilter(\n server=self.server, \n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=self.initial_capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n else:\n filter = self.filters[-1]\n if filter.count >= filter.capacity:\n capacity = filter.capacity * self.scale\n if capacity > MAX_PER_SLICE_SIZE:\n capacity = MAX_PER_SLICE_SIZE\n filter = RedisLocalBloomFilter(\n server=self.server,\n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n if self.max_filters > 0 and len(self.filters) >= self.max_filters:\n f = self.filters[0]\n f.clear()\n del self.filters[0]\n filter.add(key, skip_check=True)\n return False", "def capacity(self):\n raise NotImplementedError()", "def resize(self, new_capacity):\n # Your code here \n self.capacity= new_capacity\n new_data= [LinkedList()]* new_capacity \n \n # resizing of storage needed if loadfactor >0.7 or <0.2\n if self.get_load_factor() > 0.7: \n #iterate through all the items in the orginal data\n for i in self.data:\n cur = i.head\n while cur: \n # rehash the key/value pairs of data with new_capacity and get the new index\n \n index = self.hash_index(cur.key)\n\n # now add all the items to the new list\n if new_data[index].head is None:\n #make new node the head\n new_data[index].head= HashTableEntry(cur.key, cur.value)\n else:\n new_node = HashTableEntry(cur.key, cur.value)\n print(\"Keys\",cur.key) \n print(\"Values\",cur.value)\n # add new_node to the head and shift the head pointers \n new_node.next = new_data[index].head\n new_data[index].head = new_node\n # repeat till all the nodes have been added to new storage \n cur = cur.next \n # Once all the nodes have been added to new_data: self.data== new_data\n self.data= new_data", "def __init__(__self__, *,\n capacity: Optional[pulumi.Input[int]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tier: Optional[pulumi.Input[str]] = None):\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)", "def set_capacity(self, cap):\n return self.get_interaction().set_capacity(cap)", "def __init__(\n self, dimension=128, init_gap=10, init_max_distance=7, trainable=False\n ):\n super(RBFExpansion, self).__init__()\n self.init_gap = init_gap\n self.init_max_distance = init_max_distance\n self.dimension = dimension\n self.trainable = trainable", "def set_limit(self, key: str, max_hits: int, window_seconds: float) -> None:\n assert (window_seconds > 0)\n self.keys[key] = RateLimiterLimit(max_hits, window_seconds)", "def testBucketSize(self):\n b = SomeBucket()\n fit = b.add(1000)\n self.assertEqual(100, fit)", "def __init__(self, name, budget, bid_increase_perc):\n self._name = name\n self._budget = budget\n self._bid_probability = random.random()\n self._bid_increase_perc = bid_increase_perc\n self._highest_bid = 0", "def __init__(self, capacity):\n self.memory = deque([], maxlen=capacity)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n account_id: Optional[pulumi.Input[str]] = None,\n action_threshold: Optional[pulumi.Input[pulumi.InputType['BudgetActionActionThresholdArgs']]] = None,\n action_type: Optional[pulumi.Input[str]] = None,\n approval_model: Optional[pulumi.Input[str]] = None,\n budget_name: Optional[pulumi.Input[str]] = None,\n definition: Optional[pulumi.Input[pulumi.InputType['BudgetActionDefinitionArgs']]] = None,\n execution_role_arn: Optional[pulumi.Input[str]] = None,\n notification_type: Optional[pulumi.Input[str]] = None,\n subscribers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BudgetActionSubscriberArgs']]]]] = None,\n __props__=None):\n ...", "def __init__(self):\n self.m = 1000\n self.bucket = [None] * 1000", "def put(self, key, value):\n while len(self.buckets) <= key:\n self.buckets = self.buckets + [-1] * len(self.buckets)\n self.length = self.length * 2\n self.buckets[key] = value", "def __init__(self, voltage=6, capacity=220, stateBounds = np.array([0.2,0.9]), storedEnergy = None):\n\t\tself.energyCapacity = voltage*capacity\n\t\tself.storedEnergy = storedEnergy or self.energyCapacity/2.0\n\t\tself.minState = stateBounds[0]\n\t\tself.maxState = stateBounds[1]", "def test_capacity_cannot_be_empty(self):\n with self.assertRaises(Exception) as context:\n self.client.post(\n url_for('teams'),\n data={\n 'name': 'team',\n 'capacity': 'hello',\n 'number_players': '1',\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n }\n )\n self.assertTrue('Capacity must be a number' in context.exception)\n self.assertEqual(db.session.query(Team).count(), 0)", "def __init__(self):\n self.bucket_length = 997\n self.bucket_array = [Bucket() for i in range(self.bucket_length)]", "def add_hit(self, key: str, weight: int = 1) -> bool:\n assert self.reactor is not None\n\n if key not in self.keys:\n return True\n max_hits, window_seconds = self.keys[key]\n\n if key not in self.hits:\n self.hits[key] = RateLimiterLimit(weight, self.reactor.seconds())\n return True\n\n hits, latest_time = self.hits[key]\n\n dt = self.reactor.seconds() - latest_time\n\n # rate = max_hits / window_seconds (hits per second)\n # x = dt * rate\n # leaked_hits = floor(x) (hits obtained after dt seconds)\n leaked_hits, remainder = divmod(dt * max_hits, window_seconds)\n\n # leaked_hits * window_seconds + remainder = dt * max_hits\n # dt - remainder / max_hits = leaked_hits / rate\n new_time: float = latest_time + dt - remainder / float(max_hits)\n\n # First, update the bucket subtracting the leakage amount.\n new_hits: int = hits - int(leaked_hits)\n if new_hits < 0:\n new_hits = 0\n\n # Then, add the new hits and check if it overflows.\n new_hits += weight\n allowance = True\n if new_hits > max_hits:\n allowance = False\n new_hits = max_hits\n\n self.hits[key] = RateLimiterLimit(new_hits, new_time)\n return allowance", "def __init__(self):\n self.bucket_of_keys = {}\n self.buckets = LinkedList()", "def __init__(self, key):\n self._block_size = AES.block_size\n self._key = hashlib.sha256(get_as_bytes(key)).digest()", "def __init__(self, k):\r\n self.maxlen = k\r\n self.queue = []", "def capacity(self, value: typing.Union[str, int, None]):\n self._properties[\"capacity\"] = _types.integer_or_string(value)", "def key_gt(self, key_gt):\n\n self._key_gt = key_gt", "def __init__(self, b=['_'] * 20, f=[1] * 20, extra_pop=0):\n self.b = b\n self.f = f\n self.extra_pop = extra_pop", "def __init__(self, key: bytearray):\n self.__key = key\n self.__KSA(bytearray([i for i in range(256)]))" ]
[ "0.6714165", "0.6105351", "0.6015295", "0.573277", "0.5619034", "0.55716175", "0.55599296", "0.5556067", "0.5556067", "0.5556067", "0.5469874", "0.5467277", "0.54228985", "0.5409745", "0.54063326", "0.5370435", "0.53439593", "0.52427536", "0.5227065", "0.52156657", "0.51400864", "0.50792253", "0.50605166", "0.5004958", "0.49932584", "0.497809", "0.49667922", "0.49578026", "0.49486753", "0.49344996", "0.49331588", "0.49234542", "0.48837698", "0.48725054", "0.48467216", "0.48440552", "0.48391536", "0.4823034", "0.4819518", "0.47972158", "0.4774119", "0.4764172", "0.47539893", "0.47533306", "0.47419503", "0.47310314", "0.47248495", "0.4719845", "0.47158822", "0.47108647", "0.46847016", "0.466962", "0.46515965", "0.46486098", "0.46377763", "0.46362904", "0.4625676", "0.4617005", "0.4580404", "0.45697668", "0.4569643", "0.45456284", "0.45367113", "0.45287234", "0.45196256", "0.45090982", "0.4504508", "0.45044917", "0.44995382", "0.44921368", "0.44841108", "0.4475064", "0.44668323", "0.44626498", "0.445969", "0.44548056", "0.44512972", "0.44448724", "0.4441685", "0.44383818", "0.44142857", "0.44092718", "0.44060344", "0.43995464", "0.43962684", "0.43956542", "0.43941164", "0.43895224", "0.43883845", "0.43800828", "0.43789396", "0.4374479", "0.43727732", "0.43709046", "0.4356924", "0.43545777", "0.4351784", "0.43475848", "0.43335575", "0.43325925" ]
0.7681637
0
Adds to a Bloom Filter ``key`` an ``item``.
def bfAdd(self, key, item): params = [key, item] return self.execute_command(self.BF_ADD, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.CF_ADD, *params)", "def add(self, key):\n\t\t#super(CountingBloomFilter, self).add(key)\n\t\t#super(CountingBloomFilter, self).generateStats()\n\t\tfor i in self.getBitArrayIndices(key):\n\t\t\tself.ba[i] += 1\n\t\tself.n += 1", "def add(self, item):\n self.num_item += 1\n indexs = self.__get_indexs(item)\n for index in indexs:\n self.filter_bitarray[index] = True", "def add_item(self, key, item):\n self[key].add(item)\n try:\n self._reverse_store[item].add(key)\n except KeyError:\n self._reverse_store[item] = set([key])", "def append(self, item):\n # FIXME: this is only append if the key isn't already present\n key, value = item\n self._main[key] = value", "def add(self, item):\n self._dict[item] = item", "def add(self, key):\n if key in self:\n return True\n if not self.filters:\n filter = RedisLocalBloomFilter(\n server=self.server, \n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=self.initial_capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n else:\n filter = self.filters[-1]\n if filter.count >= filter.capacity:\n capacity = filter.capacity * self.scale\n if capacity > MAX_PER_SLICE_SIZE:\n capacity = MAX_PER_SLICE_SIZE\n filter = RedisLocalBloomFilter(\n server=self.server,\n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n if self.max_filters > 0 and len(self.filters) >= self.max_filters:\n f = self.filters[0]\n f.clear()\n del self.filters[0]\n filter.add(key, skip_check=True)\n return False", "def add(self, key, value):", "def put(self, key, item):\n # Do nothing if key is NoneType or item is NoneType\n if (key is None or item is None):\n return\n\n # If Key exists, update Cache Item\n if key in self.cache_data:\n for ci in self.LFU:\n if ci.key is key:\n ci.updateItem(item)\n else:\n # Length will be longer than max capacity, make room\n if len(self.cache_data) == self.MAX_ITEMS:\n\n # Determine discarded cache item\n discard = self.LFU[0]\n for x in self.LFU:\n if x.freq < discard.freq or \\\n x.freq == discard.freq and x.age > discard.age:\n discard = x\n\n # Discard Cache Item\n print(\"DISCARD: {}\".format(discard.key))\n del self.cache_data[discard.key]\n self.LFU.remove(discard)\n\n # Add new Cache Item\n data = LFUCacheItem(key, item, 0, 0)\n self.LFU.append(data)\n\n # increase age of all items\n for x in self.LFU:\n x.age += 1\n\n self.cache_data[key] = item", "def add(self, key, value):\n\t\tself.__add_key_to_bt(key)[3] = self.__add_key_value_to_ll(key, value)", "def add_item (self, item):\n new_item = CacheItem (item)\n cached = self.cache.get(hash(item))\n if cached is None:\n self.evict_or_add (new_item)\n cached.hits += 1", "def add_item(self, key, data):\n hash_key = self.count_hash(key, len(self.slots))\n\n if self.slots[hash_key] is None:\n self.slots[hash_key] = key\n self.data[hash_key] = data\n else:\n if self.slots[hash_key] == key:\n self.data[hash_key] = data\n elif isinstance(self.slots[hash_key], int):\n self.slots[hash_key] = (self.slots[hash_key], key,)\n self.data[hash_key] = (self.data[hash_key], data,)\n elif len(self.slots[hash_key]) > 1:\n list_slot = list(self.slots[hash_key])\n list_data = list(self.data[hash_key])\n list_slot.append(key)\n list_data.append(data)\n self.slots[hash_key] = tuple(list_slot)\n self.data[hash_key] = tuple(list_data)", "def put(self, key, item):\n if key and item:\n self.cache_data[key] = item", "def put(self, key, item):\n if key and item:\n self.cache_data[key] = item", "def add(self, item):", "def add_to_bag(self, item):\n self._bag.append(item)", "def put(self, key, item):\n if key or item:\n self.cache_data[key] = item\n if len(self.cache_data) > BaseCaching.MAX_ITEMS:\n print(\"DISCARD: {}\".format(self.last))\n del self.cache_data[self.last]\n self.last = key", "def __setitem__(self, key, item):\n self.set_field(key, item)", "def insert(self, item):\n for h_num in xrange(self.k):\n val = self.hash_value(item, h_num)\n self.arr[val] = True", "def add(self, key, value):\n self._data.add_last(self._Item(key, value))", "def put(self, key, item):\n if key and item:\n if key in self.cache_data.keys():\n self.cache_data[key] = item\n self.stack.append(key)\n else:\n if len(self.cache_data.keys()) < self.MAX_ITEMS:\n self.cache_data[key] = item\n self.stack.append(key)\n elif len(self.cache_data.keys()) == self.MAX_ITEMS:\n k = self.stack.pop(-1)\n self.cache_data.pop(k)\n print(\"DISCARD: {}\".format(k))\n self.cache_data[key] = item\n self.stack.append(key)", "def add(self, key, value):\n self._data.append(self._Item(key, value))\n self._upheap(len(self._data) - 1) # upheap newly added position", "def add(self, key, value):\n self._data.append(self._Item(key, value))\n self._upheap(len(self._data) - 1) # upheap newly added position", "def add(self, key, value):\n self.data.append((key, value))", "def add(self, key, skip_check=False):\n bits_per_slice = self.bits_per_slice\n hashes = self.make_hashes(key)\n found_all_bits = True\n if self.count > self.capacity:\n raise IndexError(\"RedisLocalBloomFilter is at capacity\")\n pipe = self.server.pipeline(transaction=False) \n sliceIdx = 0\n for k in hashes:\n sliceKey = self.SLICE_KEY_FMT % (self.bfkeypreffix, sliceIdx)\n pipe.setbit(sliceKey, k, 1)\n sliceIdx += 1\n pipeResults = pipe.execute()\n if not skip_check:\n for pipeResult in pipeResults:\n if not pipeResult:\n found_all_bits = False\n break\n if skip_check:\n self.count += 1\n return False\n elif not found_all_bits:\n self.count += 1\n return False\n else:\n return True", "def _add_item_by_item(self, item):\n self.item_list[item.call_number] = item", "def add(self, item: Mapping[Hashable, Any], **kwargs: Any) -> None:\n self.contents.update(item, **kwargs)\n return", "def add(self, key, value):\n newest = self._Item(key, value) # make new item instance\n walk = self._data.last() # walk backward looking for smaller key\n while walk is not None and newest < walk.element():\n walk = self._data.before(walk)\n if walk is None:\n self._data.add_first(newest) # new key is smallest\n else:\n self._data.add_after(walk, newest) # newest goes after walk", "def _single_setitem(self, key, item):\n self._dict[key] = item", "def put(self, key, item):\n raise NotImplementedError(\"put must be implemented in your cache class\")", "def put(self, key, item):\n if key is not None and item is not None:\n self.cache_data[key] = item\n if key not in self.cache_list:\n self.cache_list.append(key)\n if len(self.cache_data) > self.MAX_ITEMS:\n popped_key = self.cache_list.pop(0)\n print(f\"DISCARD: {popped_key}\")\n del self.cache_data[popped_key]", "def put(self, key, item):\n if key is None or item is None:\n return\n if key in self.key_tracker.keys():\n self.key_tracker.pop(key)\n if len(self.key_tracker) >= BaseCaching.MAX_ITEMS:\n x = self.most_recent_use_key()\n print(\"DISCARD: {}\".format(x))\n self.key_tracker.pop(x)\n self.cache_data.pop(x)\n self.cache_data.update({key: item})\n self.key_tracker.update({key: self.count})\n self.count += 1", "def addItem(self, item):\n self.items.append(item)\n self.totalWeight += item", "def add_item(self, item):\n self.items_with_price.update(item)", "def add(self, key, value):\r\n newest = Item(key, value) # make new item instance\r\n walk = self.data.last() # walk backward looking for smaller key\r\n while walk is not None and newest < walk.element():\r\n walk = self.data.before(walk)\r\n if walk is None:\r\n self.data.add_first(newest) # new key is smallest\r\n else:\r\n self.data.add_after(walk, newest) # newest goes after walk\r", "def __setitem__(self, key, item):\n assert isinstance(key,list) and isinstance(item,list) and len(key)==2 and len(item)==2\n self._data[self.__ptBin(key[0])][self.__etaBin(key[1])] = item", "def add(self, key, value):\n self._data.append(self._Item(key, value))\n self._up_heap(len(self) - 1)", "def append(self, item):\n\t\theapq.heappush(self.heap, (self.f(item), item))", "def add(self, item):\n self.update(set([item]))", "def put(self, key, item):\n if key is None or item is None:\n return\n self.cache_data[key] = item", "def put(self, key, item):\n if key is not None and item is not None:\n # modify the time and change the next newer value\n self.timesKey[key] = self.time\n self.time += 1\n\n # add the new item\n self.cache_data[key] = item\n\n if len(self.cache_data) > BaseCaching.MAX_ITEMS:\n discard_key = None\n newer = self.time - 2\n\n for _key, _value in self.timesKey.items():\n if newer == _value:\n discard_key = _key\n break\n\n # del key in time and cache data\n del self.cache_data[discard_key]\n del self.timesKey[discard_key]\n\n print(\"DISCARD: {}\".format(discard_key))", "def __setitem__(self, key, value):\n index=self._index(key)\n if index==-1:\n self._item.append(Item(key,value))\n self._size+=1\n else:\n self._item[index].value=value", "def bfExists(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.BF_EXISTS, *params)", "def put(self, key, item):\n # Do nothing if key is NoneType or item is NoneType\n if (key is None or item is None):\n return\n\n if key in self.cache_data:\n self.LRU = [ci for ci in self.LRU if ci.key != key]\n\n # increase age of all items\n for x in self.LRU:\n x.age += 1\n\n self.cache_data[key] = item\n data = LRUCacheItem(key, item, 0)\n self.LRU.append(data)\n\n # Length is longer than max capacity, make room\n if len(self.cache_data) > self.MAX_ITEMS:\n discard = self.LRU[0]\n for x in self.LRU:\n if x.age > discard.age:\n discard = x\n print(\"DISCARD: {}\".format(discard.key))\n del self.cache_data[discard.key]\n self.LRU.remove(discard)", "def add_item(self, item):\r\n bag_res = consts.BAG_PUT_FAILED\r\n for i in range(len(self._items)):\r\n res = self.put_item_at(i, item, allow_switch=False)\r\n if res == consts.PUT_FORBIDDEN:\r\n return consts.BAG_PUT_FAILED\r\n if res == consts.PUT_SWITCH or \\\r\n res == consts.PUT_INTO_EMPTY or \\\r\n res == consts.PUT_MERGE_TOTALLY:\r\n return consts.BAG_PUT_TOTALLY\r\n if res == consts.PUT_MERGE_PARTIALLY:\r\n bag_res = consts.BAG_PUT_PARTIALLY\r\n continue\r\n if res == consts.PUT_MERGE_FAILED or \\\r\n res == consts.PUT_SWITCH_FORBIDDEN:\r\n continue\r\n return bag_res", "def add(self, key, value):\n new = self._Item(key, value)\n\n if self.is_empty():\n self._data.append(new)\n else:\n for i, item in enumerate(self._data):\n if new <= item:\n self._data.insert(i, new)\n break\n if i == len(self) - 1:\n self._data.append(new)\n break", "def cfAddNX(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.CF_ADDNX, *params)", "def add_item(self, item: _T) -> None:\n if item not in self.item_to_index:\n self.item_to_index[item] = len(self.index_to_item)\n self.index_to_item.append(item)", "def add(self, item):\n item = self._prepare_item(len(self), item)\n if item not in self._data:\n self._data.append(item)\n self.__log__.append(SetAdd(value=item))", "def put(self, item): \n self.__db.rpush(self.key, item)", "def set_baggage_item(self, key, value):\n return self", "def add(self, key, val):\n self.obtain(key).append(val)", "def add_new(self, item, key):\n if key in self._items:\n raise DuplicateListHeapItemException(key)\n if len(self._items) >= self._max_limit:\n raise MaxItemLimitReachedException()\n self._items[key] = item\n self._listbox.insert(END, key)", "def add_item(self, item):\n self.items.append(item)", "def add_item(self, item):\n self.items.append(item)\n self.length += 1", "def __setitem__(self, key, value):\n\n bucket_key = self.key_for_bucket(key)\n self.buckets[bucket_key][key] = value", "def add_item(dct,item):\r\n if item not in dct[0]:\r\n print \"itemNotFound \",str(item)\r\n return False\r\n\r\n num=len(item)\r\n if num in dct:\r\n if item in dct[num]:\r\n return False\r\n else:\r\n dct[num].append(item)\r\n return True\r\n else:\r\n dct[num]=[item]\r\n return True", "def append(self, item, **data):\n self._items.append(item)\n if data:\n self._data[item] = data", "def add(self, item: Any) -> None:\n pass", "def add_to_inventory(self, item, quantity):\n\t\tincreaseQuantity = None\n\t\taddToDict = True\n\t\tfor key in self.inventoryDictionary:\n\t\t\tif key.name == item.name:\n\t\t\t\taddToDict = False\n\t\t\t\tincreaseQuantity = key\n\t\t\t\tbreak\n\t\t\t\t\n\n\t\t\telse:\n\t\t\t\taddToDict = True\n\t\t\t\t\n\n\t\tif addToDict:\n\t\t\tself.inventoryDictionary[item] = quantity\n\t\telse:\n\t\t\tself.inventoryDictionary[increaseQuantity] += quantity", "def add(self, item):\n if not (item in self.set):\n self.set[item] = True\n heapq.heappush(self.heap, item)", "def add(self, item):\n\n if item not in self:\n self._index_map[item] = len(self._list)\n self._list.append(item)", "def add_item(self, item):\n self.items.append(item)\n self.item_count += 1\n self.max_length = max(self.max_length, len(item.samples))", "def filter_matches_add(self, key, value):\n\t\tif key in self.filter_matches.keys():\n\t\t\tself.filter_matches[key].append(value)\n\t\telse:\n\t\t\tself.filter_matches[key]=[value]", "def put(self, key, value):\n hashv = self.hash(key)\n bucket=self.hashmap[hashv]\n for i,(k,v) in enumerate(bucket):\n if k==key:\n bucket[i]=(key,value)\n return\n bucket.append((key,value))", "def __setitem__(self, key, item):\n if key>=len(self.trained_rqrmi):\n raise KeyError('Stage index invalid')\n self.trained_rqrmi[key]=item\n self.rqrmi_state_changed=True", "def add_item(self, item):\n item_exists = self.get_item(item.id)\n\n if item_exists:\n item_exists._increment_quantity(item.quantity)\n else:\n self.items.append(item)", "def addItem(self, item, rank):\n with self.lock:\n if self.ItemHashList.get(item, -1) == -1:\n self.ItemHashList[item] = None\n if rank < 0:\n rank = 0\n heapq.heappush(self.ItemList, (rank, item))", "def put(self, item):\n if self.closed:\n print \"Knapsack closed!\"\n else:\n Backpack.put(self, item)", "def add_item(self, item_to_append):\n self.items.append(item_to_append)", "def add(self, key):\r\n if key not in self.map:\r\n end = self.end\r\n curr = end[PREV]\r\n curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end]\r\n if self.emitter:\r\n self.emitter.emit()", "def add(self, item, issue):\n if self.has_key(item):\n self[item].append(issue)\n else:\n self[item] = [issue]\n return 1", "def add_item(self, item, index):\n if index in self.d_buffer.keys():\n return True\n elif len(self) < self._size:\n self.d_buffer.update({index: item})\n return True\n else:\n return False", "def add(self, key, value):\n raise NotImplementedError('must be implemented by subclass')", "def add(self, key, value):\n raise NotImplementedError('must be implemented by subclass')", "def addItem(*args):", "def addItem(*args):", "def addItem(*args):", "def append(self, item):\n self.update([item])", "def add(self, key, value):\n self._store[key] = value", "def _insert_item(self, key: _KT, value: _VT) -> None:\n dict.__setitem__(self, key, value)", "def carry(self, item):\r\n\r\n # If you can add with the tier,\r\n # you have to check that its viable to carry\r\n if self.add is True:\r\n\r\n # This takes the new item and makes it your current item\r\n if item.size is True:\r\n self.item = item", "def add(self, key, obj):\n with self._lock:\n slot = self._dict.get(key, None)\n if slot is None:\n slot = [obj, 0]\n else:\n slot[1] += 1\n self._dict[key] = slot", "def __setitem__(self, key, obj):\n self.add(key, obj, self._mode)", "def put(self, key: int, value: int) -> None:\n t = key % 20011\n for item in self.hash[t]:\n if item[0] == key:\n item[1] = value\n return\n self.hash[t].append([key, value])", "def add(self, item):\n if self.has_item(item):\n return\n\n self.cache.append(item)\n\n if self.size() > self.max_size:\n self.cache.popleft()", "def add_to_basket(self, item):\n self._products.append(item)", "def put(self, key, value):\r\n\r\n\r\n\t\tindex = self.get_index(key) # get the index\r\n\t\tcur_list = self._buckets[index] # this is the linked list\r\n\r\n\t\t# remove the key and assign the returned boolean in removed\r\n\t\tremoved = cur_list.remove(key)\r\n\t\tcur_list.add_front(key, value) # re-add the key with updated value\r\n\r\n\t\t# if removed is false, then a new key was added so increase size by 1\r\n\t\tif not removed:\r\n\t\t\tself.size += 1", "def add(self, item):\n if item in self:\n self._set(item, self._get(item) + 1)\n else:\n self._set(item, 1)", "def add(self, item):\n self._set(item, None)", "def add_item(self,itm,qty=1):\n inv = self.get_inventory()\n s = str(itm)\n inv[s] = inv.get(s, 0) + qty\n self.put_inventory(inv)", "def add(self, key):\r\n if key not in self.map:\r\n end = self.end\r\n curr = end[PREV]\r\n curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end]", "def addItem(self, key):\n if key in self.dictionary:\n raise Exception(\"Key already exist in dictionary\")\n self.dictionary[key] = WordInformation(self.MAX_RATING)", "def append(self, item):\n self.items.append(item)", "def _hash(self, item):\r\n pass # TODO\r", "def append_to(self, key, value):\n self.get_data()[key].append(value.get_data())", "def push(self, item):\n pass", "def push(self, item):\n\t\tself.items.append(item)", "def add_item(product, price):\n ADD_PRODUCTS[product] = price", "def __setitem__(self, key, item):\n self.attrib[key] = item" ]
[ "0.7277136", "0.7030493", "0.6990854", "0.6836308", "0.66995275", "0.66974354", "0.66621774", "0.66100615", "0.65638524", "0.6553088", "0.6477446", "0.6454946", "0.640302", "0.640302", "0.63999856", "0.63677585", "0.63595116", "0.6356604", "0.63035226", "0.6302042", "0.6256967", "0.62406826", "0.6236075", "0.6231482", "0.62213266", "0.6210293", "0.62102187", "0.61980146", "0.61943066", "0.6194135", "0.61937606", "0.6179238", "0.6167675", "0.6146766", "0.6141641", "0.61341286", "0.61237806", "0.6112425", "0.61123496", "0.61070865", "0.6094295", "0.6093903", "0.60768664", "0.6058082", "0.60557413", "0.6035462", "0.60216945", "0.6015179", "0.6012418", "0.59960526", "0.59677416", "0.59639734", "0.5961572", "0.5957875", "0.59519887", "0.59249467", "0.5923214", "0.5921751", "0.590098", "0.5897045", "0.5876309", "0.5874343", "0.5871295", "0.5844242", "0.58351654", "0.5834547", "0.5833827", "0.5818586", "0.5816933", "0.5813084", "0.57941246", "0.57896036", "0.5782607", "0.5770214", "0.5770214", "0.5760893", "0.5760893", "0.5760893", "0.5758901", "0.5747695", "0.57345116", "0.5734171", "0.5733015", "0.5729901", "0.5728527", "0.572457", "0.5723529", "0.5716404", "0.57029647", "0.56966144", "0.56923604", "0.56714904", "0.5667964", "0.56457376", "0.56282127", "0.5618555", "0.5616277", "0.56162703", "0.56034756", "0.5592233" ]
0.8320729
0
Adds to a Bloom Filter ``key`` multiple ``items``.
def bfMAdd(self, key, *items): params = [key] params += items return self.execute_command(self.BF_MADD, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.BF_ADD, *params)", "def add(self, key):\n\t\t#super(CountingBloomFilter, self).add(key)\n\t\t#super(CountingBloomFilter, self).generateStats()\n\t\tfor i in self.getBitArrayIndices(key):\n\t\t\tself.ba[i] += 1\n\t\tself.n += 1", "def add(self, item):\n self.num_item += 1\n indexs = self.__get_indexs(item)\n for index in indexs:\n self.filter_bitarray[index] = True", "def add(self, *items):", "def add(self, *items):\n for item in items:\n self.unsorted.append(item)\n key = item[0]\n self.index[key] = item\n return self", "def cfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.CF_ADD, *params)", "def update(self, *items):\n for item in items:\n self.add(item)", "def add(self, key):\n if key in self:\n return True\n if not self.filters:\n filter = RedisLocalBloomFilter(\n server=self.server, \n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=self.initial_capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n else:\n filter = self.filters[-1]\n if filter.count >= filter.capacity:\n capacity = filter.capacity * self.scale\n if capacity > MAX_PER_SLICE_SIZE:\n capacity = MAX_PER_SLICE_SIZE\n filter = RedisLocalBloomFilter(\n server=self.server,\n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n if self.max_filters > 0 and len(self.filters) >= self.max_filters:\n f = self.filters[0]\n f.clear()\n del self.filters[0]\n filter.add(key, skip_check=True)\n return False", "def add_items(self, items):\n for item in items:\n self.add(item)", "def add(self, key, value):\n\t\tself.__add_key_to_bt(key)[3] = self.__add_key_value_to_ll(key, value)", "def add(self, key, value):", "def add(self, key, skip_check=False):\n bits_per_slice = self.bits_per_slice\n hashes = self.make_hashes(key)\n found_all_bits = True\n if self.count > self.capacity:\n raise IndexError(\"RedisLocalBloomFilter is at capacity\")\n pipe = self.server.pipeline(transaction=False) \n sliceIdx = 0\n for k in hashes:\n sliceKey = self.SLICE_KEY_FMT % (self.bfkeypreffix, sliceIdx)\n pipe.setbit(sliceKey, k, 1)\n sliceIdx += 1\n pipeResults = pipe.execute()\n if not skip_check:\n for pipeResult in pipeResults:\n if not pipeResult:\n found_all_bits = False\n break\n if skip_check:\n self.count += 1\n return False\n elif not found_all_bits:\n self.count += 1\n return False\n else:\n return True", "def append(self, items):\n self.__add__(items)", "def addItems(*args):", "def filter_matches_add(self, key, value):\n\t\tif key in self.filter_matches.keys():\n\t\t\tself.filter_matches[key].append(value)\n\t\telse:\n\t\t\tself.filter_matches[key]=[value]", "def add(self, key, value):\n self.data.append((key, value))", "def add(self, keys: List[Tuple[int, int]], vectors: np.ndarray, weights: List[float], *args, **kwargs):\n pass", "def __setitem__(self, key, item):\n assert isinstance(key,list) and isinstance(item,list) and len(key)==2 and len(item)==2\n self._data[self.__ptBin(key[0])][self.__etaBin(key[1])] = item", "def topkAdd(self, key, *items):\n params = [key]\n params += items\n \n return self.execute_command(self.TOPK_ADD, *params)", "def add(self, key, values):\n self.watchlists[key] = list(enumerate(values))", "def add_item(self, key, data):\n hash_key = self.count_hash(key, len(self.slots))\n\n if self.slots[hash_key] is None:\n self.slots[hash_key] = key\n self.data[hash_key] = data\n else:\n if self.slots[hash_key] == key:\n self.data[hash_key] = data\n elif isinstance(self.slots[hash_key], int):\n self.slots[hash_key] = (self.slots[hash_key], key,)\n self.data[hash_key] = (self.data[hash_key], data,)\n elif len(self.slots[hash_key]) > 1:\n list_slot = list(self.slots[hash_key])\n list_data = list(self.data[hash_key])\n list_slot.append(key)\n list_data.append(data)\n self.slots[hash_key] = tuple(list_slot)\n self.data[hash_key] = tuple(list_data)", "def add(self, item):\n self.update(set([item]))", "def add_item(self, key, item):\n self[key].add(item)\n try:\n self._reverse_store[item].add(key)\n except KeyError:\n self._reverse_store[item] = set([key])", "def addItem(*args):", "def addItem(*args):", "def addItem(*args):", "def add(self, key, value):\n self._data.add_last(self._Item(key, value))", "def add_items(self, items):\n for item in items:\n self.addItem(item)\n # end for item in items", "def insert_many(self, conn, key, **kwargs):\n conn.zadd(key, **kwargs)", "def append(self, item):\n # FIXME: this is only append if the key isn't already present\n key, value = item\n self._main[key] = value", "def add(self, key, val):\n self.obtain(key).append(val)", "def add(self, items):\n if isinstance(items, list):\n self.items.extend(items)\n else:\n self.items.append(items)", "def add_items(self, items: Iterable[_T]) -> None:\n for item in items:\n self.add_item(item)", "def add(self, item):", "def update(self, items: Mapping[Any, Any]) -> None:\n self.extend(list(items.values()))\n return", "def append(self, *items: BOSminer) -> None:\n for item in items:\n self.miners[item.ip] = item", "def add(self, key, value):\n new = self._Item(key, value)\n\n if self.is_empty():\n self._data.append(new)\n else:\n for i, item in enumerate(self._data):\n if new <= item:\n self._data.insert(i, new)\n break\n if i == len(self) - 1:\n self._data.append(new)\n break", "def add_to_bag(self, item):\n self._bag.append(item)", "def append_to(self, key, value):\n self.get_data()[key].append(value.get_data())", "def bfMExists(self, key, *items):\n params = [key]\n params += items\n\n return self.execute_command(self.BF_MEXISTS, *params)", "def addItem(self, item):\n self.items.append(item)\n self.totalWeight += item", "def add(self, item):\n self._dict[item] = item", "def __setitem__(self, key, value):\r\n key = self.key(key)\r\n if key in self.data_with_same_key:\r\n self.data_with_same_key[key] += [self.data[key]]\r\n elif key in self.data:\r\n self.data_with_same_key[key] = [self.data[key]]\r\n self.data[key] = value", "def add_items(self, library_items):\n for item in library_items:\n self._all_items.append(item)", "def _add_item_by_item(self, item):\n self.item_list[item.call_number] = item", "def add(self, item):\n item = self._prepare_item(len(self), item)\n if item not in self._data:\n self._data.append(item)\n self.__log__.append(SetAdd(value=item))", "def add(self, key, value):\n newest = self._Item(key, value) # make new item instance\n walk = self._data.last() # walk backward looking for smaller key\n while walk is not None and newest < walk.element():\n walk = self._data.before(walk)\n if walk is None:\n self._data.add_first(newest) # new key is smallest\n else:\n self._data.add_after(walk, newest) # newest goes after walk", "def bfInsert(self, key, items, capacity=None, error=None, noCreate=None, expansion=None, noScale=None):\n params = [key]\n self.appendCapacity(params, capacity)\n self.appendError(params, error)\n self.appendExpansion(params, expansion)\n self.appendNoCreate(params, noCreate)\n self.appendNoScale(params, noScale)\n self.appendItems(params, items)\n\n return self.execute_command(self.BF_INSERT, *params)", "def add(self, keys: Iterator[int], values: Iterator[bytes], *args, **kwargs):\n redis_docs = [{'_id': i, 'values': j} for i, j in zip(keys, values)]\n\n with self.get_add_handler() as redis_handler:\n for k in redis_docs:\n redis_handler.set(k['_id'], k['values'])", "def add(self, key, value):\r\n newest = Item(key, value) # make new item instance\r\n walk = self.data.last() # walk backward looking for smaller key\r\n while walk is not None and newest < walk.element():\r\n walk = self.data.before(walk)\r\n if walk is None:\r\n self.data.add_first(newest) # new key is smallest\r\n else:\r\n self.data.add_after(walk, newest) # newest goes after walk\r", "def insert(self, item):\n for h_num in xrange(self.k):\n val = self.hash_value(item, h_num)\n self.arr[val] = True", "def add_set(self, elts):\n self.bloom.add_set([x.filter_type for x in elts])", "def add(self, item: Mapping[Hashable, Any], **kwargs: Any) -> None:\n self.contents.update(item, **kwargs)\n return", "def add(self, key, value):\n self._data.append(self._Item(key, value))\n self._up_heap(len(self) - 1)", "def add(self, x):\n for i in range(self.k):\n self.bits[mmh3.hash(x,i) % self.m] = True", "def __setitem__(self, key, value) :\n attributeslist = getattr(self.request, \"_%s_attributes\" % self.name)\n for i in range(len(attributeslist)) :\n attribute = attributeslist[i]\n for j in range(len(attribute)) :\n (attrname, attrvalue) = attribute[j]\n if attrname == key :\n attribute[j][1].append(value)\n return\n attribute.append((key, [value]))", "def add(self,**kwargs):\n if self._extract:\n raise RuntimeError('This archive is write-only!')\n\n items = kwargs.iteritems() if PY2 else kwargs.items()\n for key,value in items:\n self._setitem(key,value)", "def add_toolbar_items(self, *toolbar_items):\n self.items += [self._map_item(item) for item in toolbar_items]", "def add(self, key):\r\n if key not in self.map:\r\n end = self.end\r\n curr = end[PREV]\r\n curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end]\r\n if self.emitter:\r\n self.emitter.emit()", "def add_to_queue(self, items):\n\n for i in items:\n self.r.rpush(self.joblist, i)", "def add(self, key, value):\n self._data.append(self._Item(key, value))\n self._upheap(len(self._data) - 1) # upheap newly added position", "def __setitem__(self, key, value):\n self.setdefault(key, []).append(value)", "def __setitem__(self, key, value):\n self.setdefault(key, []).append(value)", "def append(self, item):\n self.update([item])", "def add(self, key, value):\n self._data.append(self._Item(key, value))\n self._upheap(len(self._data) - 1) # upheap newly added position", "def add_items(self, items: typing.Iterable[str]) -> None:\n for item in items:\n self.add_item(item)", "def test_neg_operate_append_items_not_a_list(self):\n key = (\"test\", \"demo\", \"list_key\")\n\n list = [\n {\"op\": aerospike.OP_LIST_APPEND_ITEMS, \"bin\": \"int_bin\", \"val\": 7},\n ]\n\n try:\n key, _, bins = self.as_connection.operate(key, list)\n except e.ParamError as exception:\n assert exception.code == -2", "def __setitem__(self, key, value):\r\n self.setdefault(key, []).append(value)", "def extend(self, items):\n\t\tfor item in items:\n\t\t\tself.append(item)", "def add_all(self, *values):\n for value in values:\n self.add(value)", "def add_lists(self, key, value, pos):\n if pos == 'r':\n return self.redis.rpush(key, value)\n else:\n return self.redis.lpush(key, value)", "def process_new_items(self, new_items):\n self.items_hat = np.hstack([self.items_hat, new_items])", "def append(self, item, **data):\n self._items.append(item)\n if data:\n self._data[item] = data", "def add_to_group(self,item):\n self.items.append(item)\n self.n += 1", "def multi_set(self, items, no_update_log=False):\n opts = (no_update_log and TyrantProtocol.RDBMONOULOG or 0)\n lst = []\n for k, v in items.iteritems():\n if isinstance(v, (dict)):\n new_v = []\n for kk, vv in v.items():\n new_v.append(kk)\n new_v.append(vv)\n v = new_v\n if isinstance(v, (list, tuple)):\n assert self.separator, \"Separator is not set\"\n\n v = self.separator.join(v)\n lst.extend((k, v))\n\n wait(self.proto.misc(\"putlist\", lst, opts))", "def add(self, key):\r\n if key not in self.map:\r\n end = self.end\r\n curr = end[PREV]\r\n curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end]", "def add_item(self, item):\r\n bag_res = consts.BAG_PUT_FAILED\r\n for i in range(len(self._items)):\r\n res = self.put_item_at(i, item, allow_switch=False)\r\n if res == consts.PUT_FORBIDDEN:\r\n return consts.BAG_PUT_FAILED\r\n if res == consts.PUT_SWITCH or \\\r\n res == consts.PUT_INTO_EMPTY or \\\r\n res == consts.PUT_MERGE_TOTALLY:\r\n return consts.BAG_PUT_TOTALLY\r\n if res == consts.PUT_MERGE_PARTIALLY:\r\n bag_res = consts.BAG_PUT_PARTIALLY\r\n continue\r\n if res == consts.PUT_MERGE_FAILED or \\\r\n res == consts.PUT_SWITCH_FORBIDDEN:\r\n continue\r\n return bag_res", "def add_key(self, key_list: list) -> None:\n\n for key, funct, desc in key_list:\n # Force keys to be lowercase\n key = key.lower()\n \n self.key_functs[key] = funct\n self.key_satified[key] = False\n self.key_description[key] = desc\n self.key_values[key] = None", "def add_bag(self, bag, quantity):\n self.bags.append((bag, quantity))", "def __setitem__(self, key, value):\n index=self._index(key)\n if index==-1:\n self._item.append(Item(key,value))\n self._size+=1\n else:\n self._item[index].value=value", "def add(self, key, value):\n raise NotImplementedError('must be implemented by subclass')", "def add(self, key, value):\n raise NotImplementedError('must be implemented by subclass')", "def _append_row(self, key, value, item):\n self._items.append(item)\n self.key_listbox.insert(tk.END, key)\n self.value_listbox.insert(tk.END, value)", "def add(self, **kwargs):\n for key, value in kwargs.items():\n if key not in self.keys:\n self.keys.append(key)\n # making 'key' new method of class = an empty listh\n setattr(self, key, [])\n if isinstance(value, torch.Tensor):\n value = value.detach().numpy()\n # value = np.nan_to_num(value) #was converting list to arrays :S\n if isinstance(value, np.float32):\n value = float(value)\n if isinstance(value, np.int64):\n value = int(value)\n # calling method key of class and since a list\n getattr(self, key).append(value)\n # appending new val", "def add(self, item: Any) -> None:\n pass", "def append(self, data):\n if self._expand_mode:\n new_keys = set(data.keys()) - self.keys & self._keys\n self._expand(new_keys)\n self._cubas = None\n self._append(data)\n self._number_of_items += 1", "def add_item(self, item):\n self.items.append(item)\n self.item_count += 1\n self.max_length = max(self.max_length, len(item.samples))", "def append_all_agent_batch_to_update_buffer(\n self,\n update_buffer: AgentBuffer,\n key_list: List[str] = None,\n batch_size: int = None,\n training_length: int = None,\n ) -> None:\n for agent_id in self.keys():\n self.append_to_update_buffer(\n update_buffer, agent_id, key_list, batch_size, training_length\n )", "def add_many(self, count, *args, **kwargs):\n for idx in range(count):\n kw = {k: v[idx] for k, v in kwargs.items()}\n arg = () if not len(args) else args[idx]\n self.add(*arg, **kw)", "def cfAddNX(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.CF_ADDNX, *params)", "def add_item(self, item):\n self.items_with_price.update(item)", "def add(self, el):\n for i in range(self.k):\n self.bm[self.hashf(el,i)%self.ms + self.ms*i] = 1", "def define(self, *keys):\n for key in keys:\n self.add(key)\n self._frozen = True", "def __setitem__(self, key, item):\n self.set_field(key, item)", "def add_item(self, item_to_append):\n self.items.append(item_to_append)", "def _update_append_key(self):\n self.append_key += 1", "def Add(self, key, *args):\n temp_error = Errors()\n if ErrMsg.isValidKey(key, ErrMsg._MsgKey__class, temp_error):\n if key.argcount != len(args):\n if not self._keychainExists(key):\n self._keychainExists(key, True)\n exception = self._validateException(key.exception)\n if exception:\n self.Raise(exception, key, args)\n else:\n self._add(key, args)\n else:\n self.Add(ErrMsg.Error.Add.Invalid_Msgformat, key.message, args)\n\n elif ErrMsg.isValidKey(key, None, temp_error):\n # Assume GENERIC status\n\n key = ErrMsg._defaultKeyChain(key, temp_error)\n if temp_error:\n pass\n else:\n self.Add(key, args)\n else:\n self.Add(ErrMsg.Error.Add.Invalid_Errorkey, key.message, args)", "def append_to_update_buffer(\n self,\n update_buffer: AgentBuffer,\n agent_id: Union[int, str],\n key_list: List[str] = None,\n batch_size: int = None,\n training_length: int = None,\n ) -> None:\n if key_list is None:\n key_list = self[agent_id].keys()\n if not self[agent_id].check_length(key_list):\n raise BufferException(\n \"The length of the fields {0} for agent {1} were not of same length\".format(\n key_list, agent_id\n )\n )\n for field_key in key_list:\n update_buffer[field_key].extend(\n self[agent_id][field_key].get_batch(\n batch_size=batch_size, training_length=training_length\n )\n )", "def add_item(dct,item):\r\n if item not in dct[0]:\r\n print \"itemNotFound \",str(item)\r\n return False\r\n\r\n num=len(item)\r\n if num in dct:\r\n if item in dct[num]:\r\n return False\r\n else:\r\n dct[num].append(item)\r\n return True\r\n else:\r\n dct[num]=[item]\r\n return True", "def add_to_heap(self, key, count=0):\n entry = [1 + count, next(self.counter), key, HeapItemStatus.ACTIVE]\n self.map[key] = entry\n heappush(self.heap, entry)" ]
[ "0.75810397", "0.6952061", "0.6553019", "0.6400322", "0.6362373", "0.63382643", "0.6300669", "0.62907505", "0.62171847", "0.6120615", "0.6111401", "0.61021876", "0.6070599", "0.6009563", "0.59573513", "0.5943457", "0.59371245", "0.5929365", "0.59166753", "0.59138656", "0.58661884", "0.5822714", "0.5806336", "0.5801251", "0.5801251", "0.5801251", "0.5776631", "0.5773301", "0.5770413", "0.5759221", "0.5729419", "0.5682418", "0.5657775", "0.56534", "0.5633782", "0.56326437", "0.55657965", "0.5548693", "0.5540494", "0.5532926", "0.55185866", "0.5514787", "0.5473642", "0.547226", "0.54601777", "0.54388523", "0.5430785", "0.5424556", "0.5418512", "0.54161006", "0.54048353", "0.5399717", "0.53992325", "0.5390991", "0.5379317", "0.53736395", "0.53563607", "0.5350343", "0.53500015", "0.5348082", "0.5341145", "0.5338471", "0.5338471", "0.53359175", "0.5331865", "0.5321067", "0.5308007", "0.5305774", "0.5304302", "0.5302939", "0.5302462", "0.5293666", "0.5288746", "0.5283975", "0.5281428", "0.52753395", "0.52544457", "0.525357", "0.5249646", "0.524748", "0.5233114", "0.5233114", "0.52323174", "0.52315664", "0.5229082", "0.52231133", "0.5222723", "0.5214405", "0.52142507", "0.521297", "0.5211569", "0.5202014", "0.5201585", "0.5199091", "0.5195786", "0.5193786", "0.51934564", "0.5183309", "0.51696324", "0.51680696" ]
0.588327
20
Adds to a Bloom Filter ``key`` multiple ``items``. If ``nocreate`` remain ``None`` and ``key does not exist, a new Bloom Filter ``key`` will be created with desired probability of false positives ``errorRate`` and expected entries to be inserted as ``size``.
def bfInsert(self, key, items, capacity=None, error=None, noCreate=None, expansion=None, noScale=None): params = [key] self.appendCapacity(params, capacity) self.appendError(params, error) self.appendExpansion(params, expansion) self.appendNoCreate(params, noCreate) self.appendNoScale(params, noScale) self.appendItems(params, items) return self.execute_command(self.BF_INSERT, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.BF_ADD, *params)", "def add(self, key):\n\t\t#super(CountingBloomFilter, self).add(key)\n\t\t#super(CountingBloomFilter, self).generateStats()\n\t\tfor i in self.getBitArrayIndices(key):\n\t\t\tself.ba[i] += 1\n\t\tself.n += 1", "def add(self, key):\n if key in self:\n return True\n if not self.filters:\n filter = RedisLocalBloomFilter(\n server=self.server, \n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=self.initial_capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n else:\n filter = self.filters[-1]\n if filter.count >= filter.capacity:\n capacity = filter.capacity * self.scale\n if capacity > MAX_PER_SLICE_SIZE:\n capacity = MAX_PER_SLICE_SIZE\n filter = RedisLocalBloomFilter(\n server=self.server,\n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n if self.max_filters > 0 and len(self.filters) >= self.max_filters:\n f = self.filters[0]\n f.clear()\n del self.filters[0]\n filter.add(key, skip_check=True)\n return False", "def cfInsert(self, key, items, capacity=None, nocreate=None):\n params = [key]\n self.appendCapacity(params, capacity)\n self.appendNoCreate(params, nocreate)\n self.appendItems(params, items)\n\n return self.execute_command(self.CF_INSERT, *params)", "def add_new(self, item, key):\n if key in self._items:\n raise DuplicateListHeapItemException(key)\n if len(self._items) >= self._max_limit:\n raise MaxItemLimitReachedException()\n self._items[key] = item\n self._listbox.insert(END, key)", "def cfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.CF_ADD, *params)", "def add(self, key, skip_check=False):\n bits_per_slice = self.bits_per_slice\n hashes = self.make_hashes(key)\n found_all_bits = True\n if self.count > self.capacity:\n raise IndexError(\"RedisLocalBloomFilter is at capacity\")\n pipe = self.server.pipeline(transaction=False) \n sliceIdx = 0\n for k in hashes:\n sliceKey = self.SLICE_KEY_FMT % (self.bfkeypreffix, sliceIdx)\n pipe.setbit(sliceKey, k, 1)\n sliceIdx += 1\n pipeResults = pipe.execute()\n if not skip_check:\n for pipeResult in pipeResults:\n if not pipeResult:\n found_all_bits = False\n break\n if skip_check:\n self.count += 1\n return False\n elif not found_all_bits:\n self.count += 1\n return False\n else:\n return True", "def add(self, key, value):\n new = self._Item(key, value)\n\n if self.is_empty():\n self._data.append(new)\n else:\n for i, item in enumerate(self._data):\n if new <= item:\n self._data.insert(i, new)\n break\n if i == len(self) - 1:\n self._data.append(new)\n break", "def topkAdd(self, key, *items):\n params = [key]\n params += items\n \n return self.execute_command(self.TOPK_ADD, *params)", "def add_to_items(items, name, size, price):\n index = items_contains_name(items, name)\n if index == 0:\n temp = {'name': name, 'size': size, 'count': 1, 'price': price}\n items.append(temp)\n else:\n items[index]['count'] = items[index]['count'] + 1\n return items", "def Add(self, key, *args):\n temp_error = Errors()\n if ErrMsg.isValidKey(key, ErrMsg._MsgKey__class, temp_error):\n if key.argcount != len(args):\n if not self._keychainExists(key):\n self._keychainExists(key, True)\n exception = self._validateException(key.exception)\n if exception:\n self.Raise(exception, key, args)\n else:\n self._add(key, args)\n else:\n self.Add(ErrMsg.Error.Add.Invalid_Msgformat, key.message, args)\n\n elif ErrMsg.isValidKey(key, None, temp_error):\n # Assume GENERIC status\n\n key = ErrMsg._defaultKeyChain(key, temp_error)\n if temp_error:\n pass\n else:\n self.Add(key, args)\n else:\n self.Add(ErrMsg.Error.Add.Invalid_Errorkey, key.message, args)", "def bfCreate(self, key, errorRate, capacity, expansion=None, noScale=None):\n params = [key, errorRate, capacity]\n self.appendExpansion(params, expansion)\n self.appendNoScale(params, noScale)\n\n return self.execute_command(self.BF_RESERVE, *params)", "def add(self, key, value):\n newest = self._Item(key, value) # make new item instance\n walk = self._data.last() # walk backward looking for smaller key\n while walk is not None and newest < walk.element():\n walk = self._data.before(walk)\n if walk is None:\n self._data.add_first(newest) # new key is smallest\n else:\n self._data.add_after(walk, newest) # newest goes after walk", "def test_sample_container_add_exceeds_limit(self):\n self.assertEqual(self.container._data, defaultdict(list))\n\n retval = self.container.add(\"key1\", [\"1\", \"2\", \"3\", ], 2)\n\n self.assertEqual(retval, [\"1\", \"2\", \"3\", ])\n self.assertEqual([], self.container._data[\"key1\"])", "def add_item(self, item):\n self.items.append(item)\n self.item_count += 1\n self.max_length = max(self.max_length, len(item.samples))", "def addItem(self, key):\n if key in self.dictionary:\n raise Exception(\"Key already exist in dictionary\")\n self.dictionary[key] = WordInformation(self.MAX_RATING)", "def add_item(self, item, index):\n if index in self.d_buffer.keys():\n return True\n elif len(self) < self._size:\n self.d_buffer.update({index: item})\n return True\n else:\n return False", "def add(self, key, value):\n self._data.append(self._Item(key, value))\n self._upheap(len(self._data) - 1) # upheap newly added position", "def cfInsertNX(self, key, items, capacity=None, nocreate=None):\n params = [key]\n self.appendCapacity(params, capacity)\n self.appendNoCreate(params, nocreate)\n self.appendItems(params, items)\n\n return self.execute_command(self.CF_INSERTNX, *params)", "def add(self, key, value):\n self._data.append(self._Item(key, value))\n self._upheap(len(self._data) - 1) # upheap newly added position", "def append ( self , item ) :\n self.cond.acquire()\n try:\n if self.closed :\n raise Exception( \"Trying to append to a closed queue\" )\n else :\n self.weight += int( item['size'] )\n self.push( item )\n self.cond.notify()\n finally:\n self.cond.release()", "def appenddictitemsize(self, key, numents):\n self._dentsvertsdata[key].appendsize(numents * self._multFactor)", "def add(self, key, value):\r\n newest = Item(key, value) # make new item instance\r\n walk = self.data.last() # walk backward looking for smaller key\r\n while walk is not None and newest < walk.element():\r\n walk = self.data.before(walk)\r\n if walk is None:\r\n self.data.add_first(newest) # new key is smallest\r\n else:\r\n self.data.add_after(walk, newest) # newest goes after walk\r", "def add(self, item):\n self.num_item += 1\n indexs = self.__get_indexs(item)\n for index in indexs:\n self.filter_bitarray[index] = True", "def add(self, key, value):\n self._data.append(self._Item(key, value))\n self._up_heap(len(self) - 1)", "def cfAddNX(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.CF_ADDNX, *params)", "def insert(self, key, value):\n # Resize array here if necessary.\n if key < 0: key = 0\n elif key > len(self): key = len(self)\n if key < len(self):\n for j in range(len(self), key, -1):\n self._items[j] = self._items[j - 1]\n self._items[key] = value\n self._size += 1\n self.incModCount()", "def add_item(self):\n item = LibGen.create_item()\n if not self.item_exists(item.call_number):\n self.item_list[item.call_number] = item\n print(f\"Item({item.call_number}) bas been added.\")\n else:\n print(\"This item already exists.\")", "def _additems(self, w,h):\n for idx in range(len(self.data['items'])):\n default={\n 'color': self.data['itemscolor'],\n 'textscale': self.data['itemsscale'],\n 'textfont': self.data['textfont'],\n 'width': w-(self.data['margin'][0]*2.),\n }\n self.data['items'][idx].update(default)\n self.addItem(idx, **self.data['items'][idx])", "def add_item(self, item):\r\n bag_res = consts.BAG_PUT_FAILED\r\n for i in range(len(self._items)):\r\n res = self.put_item_at(i, item, allow_switch=False)\r\n if res == consts.PUT_FORBIDDEN:\r\n return consts.BAG_PUT_FAILED\r\n if res == consts.PUT_SWITCH or \\\r\n res == consts.PUT_INTO_EMPTY or \\\r\n res == consts.PUT_MERGE_TOTALLY:\r\n return consts.BAG_PUT_TOTALLY\r\n if res == consts.PUT_MERGE_PARTIALLY:\r\n bag_res = consts.BAG_PUT_PARTIALLY\r\n continue\r\n if res == consts.PUT_MERGE_FAILED or \\\r\n res == consts.PUT_SWITCH_FORBIDDEN:\r\n continue\r\n return bag_res", "def add(self, key: str, value: str) -> Optional[None]:\n threshhold = self.capacity * 0.75\n if self.length >= threshhold:\n self._increase_size()\n\n hashkey = self._gethash(key)\n if not self.HashMap[hashkey]:\n # The key does not exist so add it\n value_to_store = [key, value]\n self.HashMap[hashkey] = value_to_store\n self.length += 1\n elif self.HashMap[hashkey] and key not in self.HashMap[hashkey]:\n # There is a hashclash append to the location\n self.HashMap[hashkey].extend([key, value])\n self.length += 1\n else:\n # The key exists and matches so the value gets overlayed\n self.HashMap[hashkey] = [key, value]", "def insert(self, item):\r\n if not self.is_full():\r\n for i in range(1,len(self.items)):\r\n if self.items[i] is None:\r\n self.items[i] = item\r\n self.size += 1\r\n self.perc_up(i)\r\n return True\r\n return False", "def test_neg_operate_append_items_not_a_list(self):\n key = (\"test\", \"demo\", \"list_key\")\n\n list = [\n {\"op\": aerospike.OP_LIST_APPEND_ITEMS, \"bin\": \"int_bin\", \"val\": 7},\n ]\n\n try:\n key, _, bins = self.as_connection.operate(key, list)\n except e.ParamError as exception:\n assert exception.code == -2", "def put(self, key, value):\n while len(self.buckets) <= key:\n self.buckets = self.buckets + [-1] * len(self.buckets)\n self.length = self.length * 2\n self.buckets[key] = value", "def put(self, key, item):\n # Do nothing if key is NoneType or item is NoneType\n if (key is None or item is None):\n return\n\n # If Key exists, update Cache Item\n if key in self.cache_data:\n for ci in self.LFU:\n if ci.key is key:\n ci.updateItem(item)\n else:\n # Length will be longer than max capacity, make room\n if len(self.cache_data) == self.MAX_ITEMS:\n\n # Determine discarded cache item\n discard = self.LFU[0]\n for x in self.LFU:\n if x.freq < discard.freq or \\\n x.freq == discard.freq and x.age > discard.age:\n discard = x\n\n # Discard Cache Item\n print(\"DISCARD: {}\".format(discard.key))\n del self.cache_data[discard.key]\n self.LFU.remove(discard)\n\n # Add new Cache Item\n data = LFUCacheItem(key, item, 0, 0)\n self.LFU.append(data)\n\n # increase age of all items\n for x in self.LFU:\n x.age += 1\n\n self.cache_data[key] = item", "def process(self, key, value):\n if key in self.elements:\n raise Exception(\"This implementation works only for aggregated data\")\n seed = np.random.exponential(1.0 / (value**self.sample_p))\n self.elements[key] = (seed, value)\n\n # Optimization: instead of removing excess elements from the sample\n # every time its size reaches k+1, we only remove elements after the\n # number of elements in the sample exceeds 2k.\n if len(self.elements) > 2 * self.k:\n self._remove_additional_elements()", "def insert(self, item):\n for h_num in xrange(self.k):\n val = self.hash_value(item, h_num)\n self.arr[val] = True", "def test_fail_repeated_buckelist_item(self):\r\n user = User.query.filter_by(email=\"[email protected]\").first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n response = self.add_bucketlist_item(\"[email protected]\", \"test\", bucketlist.id, \"test item\")\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '409 CONFLICT')\r\n self.assertEqual(result['message'], 'Bucketlist Item Exists')\r\n new_item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n self.assertEqual(item_no, new_item_no)", "def add_to_shoppingbag(request, item_id):\n\n quantity = int(request.POST.get('quantity'))\n redirect_url = request.POST.get('redirect_url')\n size = None\n if 'merchandise_size' in request.POST:\n size = request.POST['merchandise_size']\n shoppingbag = request.session.get('shoppingbag', {})\n\n if size:\n if item_id in list(shoppingbag.keys()):\n if size in shoppingbag[item_id]['items_by_size'].keys():\n shoppingbag[item_id]['items_by_size'][size] += quantity\n else:\n shoppingbag[item_id]['items_by_size'][size] = quantity\n else:\n shoppingbag[item_id] = {'items_by_size': {size: quantity}}\n else:\n if item_id in list(shoppingbag.keys()):\n shoppingbag[item_id] += quantity\n else:\n shoppingbag[item_id] = quantity\n\n request.session['shoppingbag'] = shoppingbag\n return redirect(redirect_url)", "def add(self, key):\n addition_idx = self._reduce(self._hash(key))\n\n if self.table[addition_idx] != \"_\":\n # collision\n new_idx = self._resolve_collision(addition_idx)\n if new_idx == addition_idx:\n # table is full; do not insert\n print(\"Did not add key: hash table is full!\")\n else:\n # found a new\n self.table[new_idx] = key\n else:\n # no collision; place value at index\n self.table[addition_idx] = key", "def add_item(self, item):\n self.items.append(item)\n self.length += 1", "def add(self, *items):\n for item in items:\n self.unsorted.append(item)\n key = item[0]\n self.index[key] = item\n return self", "def add_item(self, key, data):\n hash_key = self.count_hash(key, len(self.slots))\n\n if self.slots[hash_key] is None:\n self.slots[hash_key] = key\n self.data[hash_key] = data\n else:\n if self.slots[hash_key] == key:\n self.data[hash_key] = data\n elif isinstance(self.slots[hash_key], int):\n self.slots[hash_key] = (self.slots[hash_key], key,)\n self.data[hash_key] = (self.data[hash_key], data,)\n elif len(self.slots[hash_key]) > 1:\n list_slot = list(self.slots[hash_key])\n list_data = list(self.data[hash_key])\n list_slot.append(key)\n list_data.append(data)\n self.slots[hash_key] = tuple(list_slot)\n self.data[hash_key] = tuple(list_data)", "def add(self, key, value):", "def add_to_heap(self, key, count=0):\n entry = [1 + count, next(self.counter), key, HeapItemStatus.ACTIVE]\n self.map[key] = entry\n heappush(self.heap, entry)", "def _newKey(self, key):\n self._testKeySubNsAdd()\n self._getKeyList().append(key)", "def add_item(items, coder, tag, start, n):\n if start is not None:\n # close opened items\n add_zero_item(items, coder, tag, start) # default tag\n items[tag][coder].append(item(b=start, l=n-start, v=1)) # found tag", "def add_to_group(self,item):\n self.items.append(item)\n self.n += 1", "def add(self,key,value):\r\n try:\r\n self._data.append(self.Item(key,value)) # insert as Item class object with key value.\r\n self._heapify_after_add(len(self._data) - 1 )\r\n return self._data\r\n except Exception, e:\r\n print \"Error occurred in HeapDistance: add method\", e", "def create_add_new_item(self, name, count=1):\r\n item = Item.create_by_name(name)\r\n stackable = item.get_stackable()\r\n if stackable:\r\n stackable.set_count(count)\r\n else:\r\n assert count == 1, 'cannot set count attributes on non-stackable item %s' % name\r\n return self.add_item(item)", "def add(self, key, value):\n\t\tself.__add_key_to_bt(key)[3] = self.__add_key_value_to_ll(key, value)", "def add(self, item):\r\n if len(self.buff)==self.size: self.buff.pop(0)\r\n self.buff.append(item)", "def _append(self, key):\n \"\"\" Returns: The index at which the new value is appended \"\"\"\n if len(self.queue) > self.size:# There is still space left in queue\n self.queue[self.size] = key\n # No space left in queue\n self.queue.append(key)\n atIndex = self.size\n self.size += 1\n return atIndex", "def add(self, *items):", "def appendsize(self, numents):\n pass", "def process_new_items(self, new_items):\n self.items_hat = np.hstack([self.items_hat, new_items])", "def add(self, key: keyType, value: valueType) -> None:\n\n self.validate(key, value)\n hash_address = self.get_hash_address(key)\n head_node = self.hashTable[hash_address]\n\n # To uniform form of key\n uniform_key = key\n if isinstance(key, (list, set)):\n uniform_key = tuple(key)\n # else:\n # uniform_key = key\n # Create a new node and assign values.\n node_new = ChainNode()\n node_new.key = uniform_key\n node_new.values.append(value)\n\n # 'head_node.count == 0' means that there is no collision.\n if head_node.count == 0:\n head_node.singlyLinkedList.append(node_new)\n head_node.count = 1\n head_node.keys.append(uniform_key)\n else:\n # To deal with collision.\n if uniform_key not in head_node.keys:\n head_node.singlyLinkedList.append(node_new)\n head_node.keys.append(uniform_key)\n head_node.count = head_node.count + 1\n else:\n # For the same 'key', determine whether 'value' already exists. If not, then store.\n for index in range(len(head_node.singlyLinkedList)):\n if uniform_key == head_node.singlyLinkedList[index].key:\n if value not in head_node.singlyLinkedList[index].values:\n head_node.singlyLinkedList[index].values.append(value)\n head_node.count = head_node.count + 1\n break\n logger.info(\"Successfully add a new element.\")", "def bfMExists(self, key, *items):\n params = [key]\n params += items\n\n return self.execute_command(self.BF_MEXISTS, *params)", "def addItem(self, item):\n self.items.append(item)\n self.totalWeight += item", "def construct_score_book(self, items_and_size: List[Tuple[str, float]]) -> None:\n self.score_book = {}\n\n for item, size in items_and_size:\n self.score_book[item] = size", "def process(self, key, value):\n if key in self.elements:\n seed, count = self.elements[key]\n self.elements[key] = (seed, count + value)\n else:\n pred = self.advice_obj.predict(key)\n # The seed hash(key) is drawn from the exponential distribution,\n # with parameter that is the predicted frequency raised to the p-th\n # power.\n seed = self.hash_func(key) / float(self.func_of_freq(pred))\n self.elements[key] = (seed, value)\n\n # Optimization: instead of removing excess elements from the sample\n # every time its size reaches k+1, we only remove elements after\n # the number of elements in the sample exceeds 2k.\n if len(self.elements) > 2 * self.k:\n self._remove_additional_elements()", "def appendsize(self, numents):\n self._numents += numents", "def insert(self, key, value):\r\n hash_val = Hash(key, value)\r\n self.hash_table[self.horner_hash(key)] = hash_val\r\n self.num_items += 1\r\n\r\n if self.get_load_factor() > 0.5:\r\n prev = HashTable(self.table_size)\r\n prev.num_items = self.num_items\r\n prev.hash_table = self.hash_table\r\n prev.table_size = self.table_size\r\n\r\n self.table_size = self.table_size * 2 + 1\r\n self.num_items = 0\r\n self.hash_table = [None] * self.table_size\r\n\r\n for i in range(prev.table_size):\r\n if prev.hash_table[i] is not None:\r\n self.insert(prev.hash_table[i].key, prev.hash_table[i].value)", "def __setitem__(self, key, value):\n index=self._index(key)\n if index==-1:\n self._item.append(Item(key,value))\n self._size+=1\n else:\n self._item[index].value=value", "def add_to_bag(request, item_id):\n\n product = get_object_or_404(Product, pk=item_id)\n\n quantity = int(request.POST.get('quantity'))\n redirect_url = request.POST.get('redirect_url')\n\n size = None\n if 'product_size' in request.POST:\n size = request.POST['product_size']\n\n # Get the bag variable if it exists in the session, or\n # create it as a new dictionary if it doesn't\n bag = request.session.get('bag', {})\n\n # Check if a product with sizes is being added\n if size:\n\n # If the item is already in the bag\n if item_id in list(bag.keys()):\n # Check if another item of the same id and size exists,\n # and if so, increment the quantity for that size.\n if size in bag[item_id]['items_by_size'].keys():\n bag[item_id]['item_by_size'][size] += quantity\n messages.success(request, f'Updated size {size.upper()} {product.name} quantity to {bag[item_id][\"items_by_size\"][size]}')\n # Otherwise set it equal to the quantity since the item\n # already exists in the bag, but this is a new size\n # for that item.\n else:\n bag[item_id]['items_by_size'][size] = quantity\n messages.success(request, f'Added size {size.upper()} {product.name} to your bag')\n\n # If the items are not already in the bag then add it, but we're\n # going to do it as a dictionary with a key of 'items_by_size'\n # since we may have multiple items with this item id,\n # but with different sizes.\n # This allows us to have a single item id for each item,\n # but still track multiple sizes.\n else:\n bag[item_id] = {'items_by_size': {size: quantity}}\n messages.success(request, f'Added size {size.upper()} {product.name} to your bag')\n\n # If there's no size\n else:\n # Add item to bag or update quantity if it already exists\n if item_id in list(bag.keys()):\n bag[item_id] += quantity\n messages.success(request, f'Updated {product.name} quantity to {bag[item_id]}')\n else:\n bag[item_id] = quantity\n messages.success(request, f'Added {product.name} to your bag')\n\n # Overwrite the variable in the session with the updated version\n request.session['bag'] = bag\n return redirect(redirect_url)", "def add(self, key, value):\n self.data.append((key, value))", "def add(self, item):\n if self.has_item(item):\n return\n\n self.cache.append(item)\n\n if self.size() > self.max_size:\n self.cache.popleft()", "def put(self, key, value):\r\n\r\n\r\n\t\tindex = self.get_index(key) # get the index\r\n\t\tcur_list = self._buckets[index] # this is the linked list\r\n\r\n\t\t# remove the key and assign the returned boolean in removed\r\n\t\tremoved = cur_list.remove(key)\r\n\t\tcur_list.add_front(key, value) # re-add the key with updated value\r\n\r\n\t\t# if removed is false, then a new key was added so increase size by 1\r\n\t\tif not removed:\r\n\t\t\tself.size += 1", "def put(self, key, value):\n hash_key = self._hash_function(key) % self.capacity # finds index for new key/value pair\n\n if self._buckets[hash_key].size == 0: # if bucket is empty, add new key/value pair\n self.size += 1\n self._buckets[hash_key].add_front(key, value)\n else:\n if self._buckets[hash_key].contains(key):\n self._buckets[hash_key].remove(key) # if key already exists, delete old key/value pair\n self.size -= 1\n self._buckets[hash_key].add_front(key, value) # add new key/value pair\n self.size += 1", "def add(self, key, value):\n self._data.add_last(self._Item(key, value))", "def addItem(self, item, rank):\n with self.lock:\n if self.ItemHashList.get(item, -1) == -1:\n self.ItemHashList[item] = None\n if rank < 0:\n rank = 0\n heapq.heappush(self.ItemList, (rank, item))", "def encounter(self,item,count=1,size=0,add=True):\n sig = str(item)\n try:\n idx = self.index[sig]\n self.counts[idx] += count\n self.sizes[idx] = max(self.sizes[idx],size)\n except:\n idx = len(self.items)\n self.sigs.append(sig)\n self.items.append(item)\n self.counts = np.append(self.counts,count)\n self.sizes = np.append(self.sizes,size)\n self.index[sig] = idx\n return idx", "def pad_keys(items, keys):\n for key in keys:\n if key not in items:\n items[key] = EmptySignature()\n return items", "def insert_many(self, conn, key, **kwargs):\n conn.zadd(key, **kwargs)", "def NewItems(self) -> _n_1_t_7:", "def add(self, key, val, expiry_time=0, min_compress_len=0):\n\t\treturn self._set(\"add\", key, val, expiry_time, min_compress_len)", "def test_add_items():\n ngrams = NgramFrequencies()\n assert \"the\" not in ngrams.unigrams_dic\n ngrams.add_item(\"the\", ngrams.unigrams_dic)\n assert ngrams.unigrams_dic[\"the\"] == 1\n ngrams.add_item(\"the\", ngrams.unigrams_dic)\n assert ngrams.unigrams_dic[\"the\"] == 2\n assert ngrams.unigrams_dic[\"COUNT\"] == 2", "def add_item(dct,item):\r\n if item not in dct[0]:\r\n print \"itemNotFound \",str(item)\r\n return False\r\n\r\n num=len(item)\r\n if num in dct:\r\n if item in dct[num]:\r\n return False\r\n else:\r\n dct[num].append(item)\r\n return True\r\n else:\r\n dct[num]=[item]\r\n return True", "def create(self, good, quantity):\n self._haves[good] += quantity", "def add_hit(self, key: str, weight: int = 1) -> bool:\n assert self.reactor is not None\n\n if key not in self.keys:\n return True\n max_hits, window_seconds = self.keys[key]\n\n if key not in self.hits:\n self.hits[key] = RateLimiterLimit(weight, self.reactor.seconds())\n return True\n\n hits, latest_time = self.hits[key]\n\n dt = self.reactor.seconds() - latest_time\n\n # rate = max_hits / window_seconds (hits per second)\n # x = dt * rate\n # leaked_hits = floor(x) (hits obtained after dt seconds)\n leaked_hits, remainder = divmod(dt * max_hits, window_seconds)\n\n # leaked_hits * window_seconds + remainder = dt * max_hits\n # dt - remainder / max_hits = leaked_hits / rate\n new_time: float = latest_time + dt - remainder / float(max_hits)\n\n # First, update the bucket subtracting the leakage amount.\n new_hits: int = hits - int(leaked_hits)\n if new_hits < 0:\n new_hits = 0\n\n # Then, add the new hits and check if it overflows.\n new_hits += weight\n allowance = True\n if new_hits > max_hits:\n allowance = False\n new_hits = max_hits\n\n self.hits[key] = RateLimiterLimit(new_hits, new_time)\n return allowance", "def _add_error(self, key, message):\n if key not in self._error_key_list:\n self._error_key_list.append(key)\n self.add_error(key, str(message))", "def add(self, item_name, number=1):\n if self.get(item_name):\n self.storage[item_name] = self.storage[item_name] + number\n else:\n self.storage[item_name] = number", "def append(self, data):\n if self._expand_mode:\n new_keys = set(data.keys()) - self.keys & self._keys\n self._expand(new_keys)\n self._cubas = None\n self._append(data)\n self._number_of_items += 1", "def add_item(self, new_value):\n\n # Allocate more memory if necessary\n # This keeps add_item to O(1), generally.\n # Otherwise, have to duplicate ndarray every time\n # last_item is an index, heap_size is a limit (index + 1)\n if self.last_item >= self.heap_size - 1:\n # Allocate double the memory\n new_heap_list = self.heap.tolist() + [0] * self.heap_size\n self.heap = np.array(new_heap_list, dtype=np.int)\n self.heap_size *= 2\n\n # Add item index and value\n # to already allocated memory\n self.last_item = self.last_item + 1\n self.heap[self.last_item] = new_value\n\n # Update heap level\n self.level = np.floor(np.log(self.last_item + 1) / np.log(2))", "def put(self, item): \n if len(self.contents) < self.max_size:\n self.contents.append(item)\n elif len(self.contents) >= self.max_size:\n print \"Backpack Full.\"", "def test_adding_item_to_list(create_shopping_item, create_shopping_list):\n shopping_list = create_shopping_list\n items_before = shopping_list.items.values_list().count()\n new_item = create_shopping_item\n shopping_list.items.add(new_item)\n items_after = shopping_list.items.values_list().count()\n assert items_after > items_before\n assert items_before == 0\n assert items_after == 1", "def add_new_bitfinex_item(new_type, new_price, new_count):\n new_item = Book_Item(exchange=\"Bitfinex\", pairname=\"BTCUSD\", type=new_type, price=new_price, count=new_count)\n return new_item", "def add(self, keys: List[int], docs: List['gnes_pb2.Document'], *args, **kwargs):\n pass", "def put(self, key, item):\n # Do nothing if key is NoneType or item is NoneType\n if (key is None or item is None):\n return\n\n if key in self.cache_data:\n self.LRU = [ci for ci in self.LRU if ci.key != key]\n\n # increase age of all items\n for x in self.LRU:\n x.age += 1\n\n self.cache_data[key] = item\n data = LRUCacheItem(key, item, 0)\n self.LRU.append(data)\n\n # Length is longer than max capacity, make room\n if len(self.cache_data) > self.MAX_ITEMS:\n discard = self.LRU[0]\n for x in self.LRU:\n if x.age > discard.age:\n discard = x\n print(\"DISCARD: {}\".format(discard.key))\n del self.cache_data[discard.key]\n self.LRU.remove(discard)", "def append(self, item):\n\t\theapq.heappush(self.heap, (self.f(item), item))", "def put(self, key, item):\n if key is not None and item is not None:\n self.cache_data[key] = item\n if key not in self.cache_list:\n self.cache_list.append(key)\n if len(self.cache_data) > self.MAX_ITEMS:\n popped_key = self.cache_list.pop(0)\n print(f\"DISCARD: {popped_key}\")\n del self.cache_data[popped_key]", "def create_and_add_item(self, word, samples):\n item = LibraryItem(word, samples)\n self.items.append(item)\n self.item_count += 1\n self.max_length = max(self.max_length, len(samples))", "def append(self, item):\n # FIXME: this is only append if the key isn't already present\n key, value = item\n self._main[key] = value", "def put(self, key, item):\n if key and item:\n if key in self.cache_data.keys():\n self.cache_data[key] = item\n self.stack.append(key)\n else:\n if len(self.cache_data.keys()) < self.MAX_ITEMS:\n self.cache_data[key] = item\n self.stack.append(key)\n elif len(self.cache_data.keys()) == self.MAX_ITEMS:\n k = self.stack.pop(-1)\n self.cache_data.pop(k)\n print(\"DISCARD: {}\".format(k))\n self.cache_data[key] = item\n self.stack.append(key)", "def knapsack(items, capacity):\r\n pass", "def add(self, items: Iterable[requests.LeaseRequest]) -> None:\n with self._add_remove_lock:\n for item in items:\n # Add the ack ID to the set of managed ack IDs, and increment\n # the size counter.\n if item.ack_id not in self._leased_messages:\n self._leased_messages[item.ack_id] = _LeasedMessage(\n sent_time=float(\"inf\"),\n size=item.byte_size,\n ordering_key=item.ordering_key,\n )\n self._bytes += item.byte_size\n else:\n _LOGGER.debug(\"Message %s is already lease managed\", item.ack_id)", "def set_new(self, new_key, new_value):\r\n hashed_key = self.hash_key(new_key) \r\n\r\n self._items.insert(hashed_key, new_value)\r\n \r\n return self.get_items()", "def _append_or_create(dict_, key_, value):\n if key_ not in dict_:\n dict_[key_] = [value]\n else:\n assert isinstance(dict_[key_], list)\n dict_[key_].append(value)\n return dict_", "def add(self, item):\n item = self._prepare_item(len(self), item)\n if item not in self._data:\n self._data.append(item)\n self.__log__.append(SetAdd(value=item))", "def add(self, key, value):\n if not key in self:\n self.keys.append(key)\n self.dict[key] = value" ]
[ "0.6203647", "0.6000729", "0.5843988", "0.57451206", "0.5711649", "0.56672007", "0.5634117", "0.5580528", "0.54817045", "0.541424", "0.5405912", "0.53877556", "0.53802043", "0.5330853", "0.53062", "0.52977407", "0.5295973", "0.5289593", "0.52855736", "0.5276881", "0.5263222", "0.5257719", "0.52493656", "0.52452296", "0.5184029", "0.5172518", "0.51582766", "0.511162", "0.50999177", "0.5088622", "0.50885355", "0.5030734", "0.50207704", "0.5018073", "0.50014734", "0.4989744", "0.49567974", "0.49549598", "0.49443588", "0.49384475", "0.4933958", "0.49288884", "0.4917151", "0.490649", "0.49059746", "0.49036822", "0.4894755", "0.48923331", "0.4885465", "0.4867801", "0.48496047", "0.48492664", "0.48444733", "0.48425856", "0.48351005", "0.48338574", "0.4832597", "0.48053175", "0.4804859", "0.4800718", "0.47953814", "0.47895956", "0.47857592", "0.478342", "0.47800827", "0.47797284", "0.47776955", "0.47745708", "0.47724319", "0.47713923", "0.476809", "0.47660995", "0.47649932", "0.47581413", "0.47545335", "0.47541064", "0.4748588", "0.47480232", "0.47331464", "0.4724732", "0.47183758", "0.47148928", "0.47148827", "0.47146967", "0.47133306", "0.4711478", "0.4710853", "0.47101882", "0.46987465", "0.4696963", "0.46950266", "0.46919262", "0.46916696", "0.4689504", "0.4683288", "0.4682125", "0.46799356", "0.46762094", "0.46700865", "0.46680743" ]
0.6028828
1
Checks whether an ``item`` exists in Bloom Filter ``key``.
def bfExists(self, key, item): params = [key, item] return self.execute_command(self.BF_EXISTS, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __contains__(self, item, key):\n ndx = self._findPostion(key)\n return ndx is not None", "def item_has_key(self, item, key):\n if key in self._reverse_store[item]:\n return True\n else:\n return False", "def contains(self, item):\n for h_num in xrange(self.k):\n val = self.hash_value(item, h_num)\n if not self.arr[val]:\n return False\n else:\n return True", "def contains(self, item):\n return self._dict.has_key(item)\n\n self.__contains__ = contains", "def item_exists(item_id):\n return item_id in all_items", "def has_item(self, item):\n return item in self.set", "def has_item(self, item):\n return item in self.set", "def cfExists(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.CF_EXISTS, *params)", "def has_item(self, item):\n return item in self.cache", "def exista(self, item):\n if item not in self._items:\n return False\n for x in self._items:\n if x == item:\n return True", "def __contains__(self, item):\n try:\n self[item]\n return True\n except KeyError:\n return False", "def has_item(self, usage_key):\r\n try:\r\n self._find_one(usage_key)\r\n return True\r\n except ItemNotFoundError:\r\n return False", "def contains(self, key):\n # TODO: Check if the given key exists in a bucket\n hash_key = self._bucket_index(key) # Gets the index of the key\n if self.buckets[hash_key].is_empty() is False: # If the hask_key exists\n for key_value_pair in self.buckets[hash_key]: # Iteratre through the value pair\n if key_value_pair[0] is key: # If the key matches\n return True\n return False", "def contains(self, key: int) -> bool:\n _hash = self.get_hash(key)\n return self.bucket_array[_hash].exist(key)", "def __contains__(self, item):\n try:\n hdu = self[item] # noqa\n return True\n except Exception:\n return False", "def __contains__(self, item: object) -> bool:\n val = conv_kv(item) # type: ignore\n for fixup in self._mapping._fixup.values():\n if fixup.value == val:\n return True\n return False", "def array_key_exists(name, item):\n return item.has_key(name);", "def has(cls, item):\n return item in cls.values()", "def __contains__(self, item):\n\n if self[item]:\n return True\n return False", "def bfMExists(self, key, *items):\n params = [key]\n params += items\n\n return self.execute_command(self.BF_MEXISTS, *params)", "def __contains__(self, item):\n return item in self._data", "def has(self, item):\n return item in self.mut", "def contains(self, key: int) -> bool:\n return self._find_key(key, find_empty=False) >= 0", "def _has(self, key):\n path = self._get_key_path(key)\n return exists(path)", "def item_exists(self, call_number):\n return call_number in self.item_list.keys()", "def contains(self, key: int) -> bool:\n lv1, lv2 = self.hashing(key)\n \n for item in self.cont[lv1][lv2]:\n if item==key:\n return True\n \n return False", "def __contains__(self, key):\n return self.keys[self._linear_probe(key, \"contains\")] is not None", "def has_item(self, item_name):\n if item_name in self.item_list:\n return True\n return False", "def __contains__(self, item):\n\t\treturn item in self.__dict__.values()", "def has(self, key):", "def contains(self, item):\n if isinstance(item, dict):\n return _(item).all(lambda key: self._.get(key) == item[key])\n return item in self", "def has_item(self, item: Inventory) -> bool:\n return (item.pk,) in self.orderitem_set.values_list('item')", "def has_key(self, key):\n return self.contains(key)", "def contains(self, key):\n bus=key%100000\n pos=key//100000\n return self.li[bus][pos]==1", "def contains(self, key: int) -> bool:\n hash1, hash2 = self._hash(key)\n if not self.buckets[hash1]:\n return False\n return self.buckets[hash1][hash2]", "def __contains__(self, item):\n # return item in self._items\n # leverage improved performance index() function\n try:\n self.index(item)\n return True\n except ValueError:\n return False", "def has_item(self, usage_key):\r\n store = self._get_modulestore_for_courseid(usage_key.course_key)\r\n return store.has_item(usage_key)", "def contains(self, key):\n h = self.hash_value(key)\n return key in self.hs[h]", "def contains_key(self, key):\r\n\t\t# call the linked list contains() method for each bucket\r\n\t\tfor i in self._buckets:\r\n\t\t\tif i.contains(key):\r\n\t\t\t\treturn True\r\n\t\treturn False", "def existsitem(self,item,listwidgets):\n exists = listwidgets.findItems(item, Qt.MatchExactly)\n if exists:\n return True\n else:\n return False", "def __contains__(self, item):\n return item in self.__keys or item in self.__vals", "def key_exists(key, value):\n\n response = table.query(\n KeyConditionExpression = Key(key).eq(value)\n )\n\n if response['Items']:\n return True\n\n return False", "def __contains__(self, item):\n return item in self._fetch()", "def contains(self, key):\n if key in self.key_list:\n return True\n return False", "def contains(self, key):\n hashkey = self.hash(key)\n return key in self.table[hashkey]", "def contains_key(self, key):\n\n index = self._hash_function(key) % self.capacity\n li = self._buckets[index]\n if li.contains(key) is not None:\n return True\n return False", "def has_key(self, key):\n return key in self", "def __contains__(self, key):\n found = True\n try:\n self.__getitem__(key)\n except:\n found = False\n return found", "def __contains__(self, key):\n bits_per_slice = self.bits_per_slice\n hashes = self.make_hashes(key)\n pipe = self.server.pipeline(transaction=False) \n sliceIdx = 0\n for k in hashes:\n sliceKey = self.SLICE_KEY_FMT % (self.bfkeypreffix, sliceIdx)\n pipe.getbit(sliceKey, k)\n sliceIdx += 1\n getbits = pipe.execute() \n for bit in getbits:\n if not bit:\n return False\n return True", "def hasItem(self, path): \n\t\treturn (path in self.items and self.items[path])", "def has(self, key):\n return key in self._store", "def contains(self, key: int) -> bool:\n if key >= len(self.b):\n return False\n return self.b[key]", "def contains(self, key):\n try:\n self.keyvaluepair_set.get(key=key)\n return True\n except KeyValuePair.DoesNotExist:\n return False", "def contains(self, key: int) -> bool:\n if key in self.d:\n return True\n else:\n return False", "def __contains__(self, item: object) -> bool:\n return item in self._used", "def __contains__(self, key):\n return self._lookup(key).value is not None", "def has_item(self, item):\n if item in self._reverse_store:\n return True\n else:\n return False", "def contains(self, key):\n\t\tfor i in self.getBitArrayIndices(key):\n\t\t\tif self.ba[i] <= 0:\n\t\t\t\treturn False\n\t\treturn True", "def is_in_bag(self, item):\n return item in self._bag", "def contains(self, key):\n\n return key in self.keys()", "def __contains__(self, key):\n try:\n if self[key]:\n return True\n except KeyError:\n return False", "def has(self, key):\n return False", "def has_item(self, usage_key):\r\n return usage_key in self.modules[usage_key.course_key]", "def __contains__(self, key):\n return key in self._index", "def has_key(self, key):\n return key in self.db", "def __contains__(self, item):\n return item.upper() in self.keys", "def __contains__(self, item):\n return item in self._index_map", "def contains(bank, key):\n try:\n c_key = \"{}/{}\".format(bank, key or \"\")\n _, value = api.kv.get(c_key, keys=True)\n except Exception as exc: # pylint: disable=broad-except\n raise SaltCacheError(f\"There was an error getting the key, {c_key}: {exc}\")\n return value is not None", "def contains(self, item):\n # Find a node with the given item, if any\n node = self._find_node(item)\n # Return True if a node was found, or False\n return node is not None", "def __contains__(self, item):\n for _, _, _, cur_item in self.queue:\n if cur_item == item:\n return True\n return False", "def contains(self, key):\n # Find bucket where given key belongs\n # Check if key-value entry exists in bucket\n key_bucket = self._bucket_index(key)\n in_hash_table = False\n\n for key_value_tuple in self.buckets[key_bucket].items():\n if key_value_tuple[0] is key:\n in_hash_table = True\n\n return in_hash_table", "def __contains__(self, item: Any) -> bool:\n return item in self.item_to_index", "def contains(self, key: int) -> bool:\n hashedVal = self.hashValue(key)\n head = self.array[hashedVal] \n while(head != None): \n if head.val == key:\n return True\n head = head.next\n return False", "async def contains(self, key: str) -> bool:", "def tag_key_exists(self, key):\n return key in self.map", "def __contains__(self, key):\n try:\n self._get(key)\n return True\n except Exception:\n return False", "def check_item_in(self, url):\n item_hash = tools.url_hash(url)\n if item_hash not in self.__items:\n self.__item_lock.acquire()\n self.__items.add(item_hash)\n self.__item_lock.release()\n return False\n else:\n return True", "def hasKey(self,\n key):\n return self.__keyCount.has_key(key)", "def contains(self, key):\n index = self.key_to_index(key)\n node = self.hash_set[index]\n\n while node:\n if node.key == key:\n return True\n\n node = node.next\n\n return False", "def contains(self, key):\n return key in self.hashset[key % self.N]", "def __contains__(self, key):\n self._remove_expired()\n\n log.debug(\"__contains__: {}\".format(key))\n return key in self._d", "def contains(self, key):\n return self.__db.contains(key)", "def __contains__(self, key):\n return (key in self.index)", "def __contains__(self, item):\n index = bisect_left(self.sequence, item)\n if (len(self.sequence) != index) and (self.sequence[index] == item):\n return True\n return False", "def __contains__(self, key):\n position = self.hash(key)\n\n for _ in range(self.table_capacity):\n if self.array[position] is None:\n return False\n elif self.array[position][0] == key:\n return True\n else:\n position = (position + 1) % self.table_capacity\n return False", "def containsKey(self, key):\n return get(key) != None", "def check_item(self, item, key, db):\n data = [record for record in db if record[key] == item]\n return data", "def item_exists(item_pid):\n raise NotImplementedConfigurationError(\n config_variable=\"CIRCULATION_ITEM_EXISTS\"\n )", "def containskey(self, essid, key):\n return self.cli.essids.containskey(essid, key)", "def check_container_contains_item(context, container, item):\n assert_true(context.uuid[item] in get_container(context, container)[f\"{item}s\"])", "def __contains__(self, key):\n\t\treturn any([item == key for _, item in self.heap])", "def __contains__(self, item: Any) -> bool:\n curr = self._first\n\n while curr is not None:\n if curr.item == item:\n return True\n\n curr = curr.next\n\n return False", "def exists(self, key, predicate=None):\n with self._cv:\n return self._has(key, predicate)", "def __contains__(self, key):\n return key in self.keys", "async def _exists(self, key):\n return key in SimpleMemoryBackend._cache", "def __contains__(self, key):\n\n return key in self.keys_set", "def _item_exists(self, item):\n cursor = self.conn.cursor()\n cursor.execute(\n 'SELECT * FROM Members where first_name = ?;',\n (item['first_name'])\n )\n return True if len(cursor.fetchall()) else False", "def __contains__(self, key):\n\t\treturn key in self.__dStore", "def __contains__(self, item): # __iter__ would do this job by itself\n return (item in self.__values)", "def contains(self, key: int) -> bool:\n return key in self.res" ]
[ "0.77040607", "0.76314753", "0.75677925", "0.7561435", "0.75477517", "0.7416592", "0.7416592", "0.7373087", "0.73644096", "0.7320269", "0.7269573", "0.71763974", "0.7118146", "0.70699257", "0.7025244", "0.6994611", "0.6993277", "0.6964525", "0.6963223", "0.6961354", "0.6954093", "0.6912245", "0.6889395", "0.68624604", "0.68619245", "0.6860283", "0.6854506", "0.68280953", "0.68246704", "0.6814358", "0.680378", "0.6798623", "0.67956287", "0.6787672", "0.6775903", "0.67712253", "0.676514", "0.67594934", "0.6758538", "0.6750646", "0.67493176", "0.67490804", "0.6748112", "0.6742399", "0.67248183", "0.67084426", "0.66963804", "0.66959494", "0.6669972", "0.66450083", "0.6617", "0.66030985", "0.65982234", "0.65954995", "0.65723854", "0.65627533", "0.65597993", "0.6551974", "0.6538145", "0.65346676", "0.6533196", "0.65305614", "0.65260684", "0.65178627", "0.65161186", "0.65106815", "0.65104115", "0.6507318", "0.6505253", "0.6500579", "0.6498496", "0.64971954", "0.6497069", "0.64933455", "0.6487224", "0.6482612", "0.64741427", "0.64691496", "0.64682466", "0.6467317", "0.6465897", "0.64617616", "0.64507973", "0.6436762", "0.64204603", "0.6409257", "0.63998544", "0.6391322", "0.63866585", "0.6384331", "0.6379805", "0.63690174", "0.6367702", "0.63676786", "0.6367289", "0.63661575", "0.63659936", "0.63642967", "0.6363119", "0.63588274" ]
0.83642733
0
Checks whether ``items`` exist in Bloom Filter ``key``.
def bfMExists(self, key, *items): params = [key] params += items return self.execute_command(self.BF_MEXISTS, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bfExists(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.BF_EXISTS, *params)", "def item_exists(item_id):\n return item_id in all_items", "def __contains__(self, items):\n if type(items) != list:\n raise PJFInvalidType(items, list)\n ret = 0\n for item in items:\n for key in self.__dict__:\n if isinstance(self.__dict__[key], JsonFactory):\n ret += item in self.__dict__[key]\n elif item == key:\n ret += 1\n return len(items) == ret", "def contains(self, item):\n for h_num in xrange(self.k):\n val = self.hash_value(item, h_num)\n if not self.arr[val]:\n return False\n else:\n return True", "def __contains__(self, item, key):\n ndx = self._findPostion(key)\n return ndx is not None", "def item_has_key(self, item, key):\n if key in self._reverse_store[item]:\n return True\n else:\n return False", "def has_item(self, item):\n return item in self.set", "def has_item(self, item):\n return item in self.set", "def has_items(self):\r\n return self.orderitem_set.exists() # pylint: disable=E1101\r", "def contains(self, item):\n return self._dict.has_key(item)\n\n self.__contains__ = contains", "def array_key_exists(name, item):\n return item.has_key(name);", "def item_exists(self, call_number):\n return call_number in self.item_list.keys()", "def exista(self, item):\n if item not in self._items:\n return False\n for x in self._items:\n if x == item:\n return True", "def has_item(self, usage_key):\r\n try:\r\n self._find_one(usage_key)\r\n return True\r\n except ItemNotFoundError:\r\n return False", "def has(cls, item):\n return item in cls.values()", "def contains(self, key):\n\t\tfor i in self.getBitArrayIndices(key):\n\t\t\tif self.ba[i] <= 0:\n\t\t\t\treturn False\n\t\treturn True", "def contains(self, key: int) -> bool:\n _hash = self.get_hash(key)\n return self.bucket_array[_hash].exist(key)", "def __contains__(self, item):\n return item in self.__keys or item in self.__vals", "def contains(self, key):\n if key in self.key_list:\n return True\n return False", "def has_item(self, usage_key):\r\n store = self._get_modulestore_for_courseid(usage_key.course_key)\r\n return store.has_item(usage_key)", "def has_item(self, item_name):\n if item_name in self.item_list:\n return True\n return False", "def contains(self, key: int) -> bool:\n return self._find_key(key, find_empty=False) >= 0", "def check_item_in(self, url):\n item_hash = tools.url_hash(url)\n if item_hash not in self.__items:\n self.__item_lock.acquire()\n self.__items.add(item_hash)\n self.__item_lock.release()\n return False\n else:\n return True", "def has_item(self, item):\n return item in self.cache", "def contains(self, item):\n if isinstance(item, dict):\n return _(item).all(lambda key: self._.get(key) == item[key])\n return item in self", "def __contains__(self, item):\n\t\treturn item in self.__dict__.values()", "def has_item(self, usage_key):\r\n return usage_key in self.modules[usage_key.course_key]", "def __contains__(self, key):\n bits_per_slice = self.bits_per_slice\n hashes = self.make_hashes(key)\n pipe = self.server.pipeline(transaction=False) \n sliceIdx = 0\n for k in hashes:\n sliceKey = self.SLICE_KEY_FMT % (self.bfkeypreffix, sliceIdx)\n pipe.getbit(sliceKey, k)\n sliceIdx += 1\n getbits = pipe.execute() \n for bit in getbits:\n if not bit:\n return False\n return True", "def __contains__(self, key):\n\n return key in self.keys_set", "def cfExists(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.CF_EXISTS, *params)", "def __contains__(self, item):\n return item in self._data", "def __contains__(self, key):\n return key in self.keys", "def has(self, key):\n return key in self._store", "def key_exists(key, value):\n\n response = table.query(\n KeyConditionExpression = Key(key).eq(value)\n )\n\n if response['Items']:\n return True\n\n return False", "def contains(self, key):\n\n return key in self.keys()", "def hasItem(self, path): \n\t\treturn (path in self.items and self.items[path])", "def has(self, key):", "def __contains__(self, key, *args, **kwargs):\n if key in self._list(*args, **kwargs):\n return True\n return False", "def contains(self, key):\n # TODO: Check if the given key exists in a bucket\n hash_key = self._bucket_index(key) # Gets the index of the key\n if self.buckets[hash_key].is_empty() is False: # If the hask_key exists\n for key_value_pair in self.buckets[hash_key]: # Iteratre through the value pair\n if key_value_pair[0] is key: # If the key matches\n return True\n return False", "def contains(self, key: int) -> bool:\n lv1, lv2 = self.hashing(key)\n \n for item in self.cont[lv1][lv2]:\n if item==key:\n return True\n \n return False", "def __contains__(self, key):\n return self.keys[self._linear_probe(key, \"contains\")] is not None", "def __contains__(self, item):\n return item.upper() in self.keys", "def has_item(self, item: Inventory) -> bool:\n return (item.pk,) in self.orderitem_set.values_list('item')", "def contains_key(self, key):\r\n\t\t# call the linked list contains() method for each bucket\r\n\t\tfor i in self._buckets:\r\n\t\t\tif i.contains(key):\r\n\t\t\t\treturn True\r\n\t\treturn False", "def has_key(self, key):\n return key in self", "def __contains__(self, key):\n\t\treturn key in self.__dStore", "def has(self, item):\n return item in self.mut", "def is_in_bag(self, item):\n return item in self._bag", "def contains(self, key):\n bus=key%100000\n pos=key//100000\n return self.li[bus][pos]==1", "def __contains__(self, item): # __iter__ would do this job by itself\n return (item in self.__values)", "def __contains__(self, item):\n return item in self._fetch()", "def __contains__(self, item: object) -> bool:\n return item in self._used", "def contains_all(self, *items):\n return all(item in self for item in items)", "def existsitem(self,item,listwidgets):\n exists = listwidgets.findItems(item, Qt.MatchExactly)\n if exists:\n return True\n else:\n return False", "def __contains__(self, item):\n try:\n self[item]\n return True\n except KeyError:\n return False", "def checkitems(f):\n if hasattr(f, 'keys'):\n for key in f.keys():\n checkitems(f[key])", "def _in_keys(self, key, keys):\n # sorting required for comparison\n key.sort()\n return key in keys", "def __contains__(self, key):\n return key in self._index", "def contains(self, key):\n return key in self.hashset[key % self.N]", "def __contains__(self, key):\n self._remove_expired()\n\n log.debug(\"__contains__: {}\".format(key))\n return key in self._d", "def _has(self, key):\n path = self._get_key_path(key)\n return exists(path)", "async def contains(self, key: datastore.Key) -> bool:\n\t\tfor store in self._stores:\n\t\t\tif await store.contains(key):\n\t\t\t\treturn True\n\t\treturn False", "def has_key(self, key):\n return self.contains(key)", "def __contains__(self, key):\n return key in self._get_storage()", "def contains(self, key):\n h = self.hash_value(key)\n return key in self.hs[h]", "def _key_check(self, key_list, chk_dict=None):\n exists = False\n if chk_dict is None:\n chk_dict = self._e_dict\n for key in key_list:\n exists = key in chk_dict.keys()\n if exists:\n chk_dict = chk_dict[key]\n else:\n break\n return exists", "def __contains__(self, item: object) -> bool:\n val = conv_kv(item) # type: ignore\n for fixup in self._mapping._fixup.values():\n if fixup.value == val:\n return True\n return False", "def has(key):\n return not not (key in current().values)", "def __contains__(self, key):\n keys = list(self._indexer(key))\n if len(keys) == 1:\n return keys[0] in self._data\n return [k in self._data for k in keys]", "def contains(self, key: int) -> bool:\n return key in self.res", "def contains(self, key):\n hashkey = self.hash(key)\n return key in self.table[hashkey]", "def contains(self, key: int) -> bool:\n if key >= len(self.b):\n return False\n return self.b[key]", "def hasKey(self,\n key):\n return self.__keyCount.has_key(key)", "async def contains(self, key: str) -> bool:", "def contains(self, key: int) -> bool:\n if key in self.d:\n return True\n else:\n return False", "def has(self, key):\n return False", "def tag_key_exists(self, key):\n return key in self.map", "def dict_has_items(obj, items):\n has_items = False\n if isinstance(obj, basestring):\n obj = json.loads(obj)\n for item in items:\n for lookup_key, lookup_val in item.iteritems():\n if lookup_key in obj and obj[lookup_key] == lookup_val:\n has_items = True\n else:\n return False\n return has_items", "def contains(self, key: int) -> bool:\n hash1, hash2 = self._hash(key)\n if not self.buckets[hash1]:\n return False\n return self.buckets[hash1][hash2]", "def __contains__(self, key):\n return (key in self.index)", "def has_key(self, key):\n return key in self.db", "def assert_keys_exist(self, caller, *keys):\n assert keys, (\"*keys parameter must be specified.\")\n for key in keys:\n self.assert_key_exists(key, caller)", "async def _exists(self, key):\n return key in SimpleMemoryBackend._cache", "def __contains__(self, item):\n\n if self[item]:\n return True\n return False", "def contains(self, key: int) -> bool:\n hashedVal = self.hashValue(key)\n head = self.array[hashedVal] \n while(head != None): \n if head.val == key:\n return True\n head = head.next\n return False", "def contains(self, key):\n return self.__db.contains(key)", "def __contains__(self, key):\n found = True\n try:\n self.__getitem__(key)\n except:\n found = False\n return found", "def __contains__(self, key):\n for f in reversed(self.filters):\n if key in f:\n return True\n return False", "def oneof(item_list, items):\n for i in item_list:\n if type(i) == type(list()) or type(i) == type(dict()):\n if sublist_in(item_list, i):\n return True\n else:\n if i in items: return True\n\n return False", "def __contains__(self, key):\n\t\treturn key in self.cache", "def __contains__(self, item):\n try:\n hdu = self[item] # noqa\n return True\n except Exception:\n return False", "def keys_exist(self, *keys):\n return tuple(key in self.keys() for key in keys)", "def __contains__(self, item):\n # return item in self._items\n # leverage improved performance index() function\n try:\n self.index(item)\n return True\n except ValueError:\n return False", "def __contains__(self, item):\n return item in self._terms", "def __contains__(self, item):\n return item in self._index_map", "def contains_key(self, key):\n\n index = self._hash_function(key) % self.capacity\n li = self._buckets[index]\n if li.contains(key) is not None:\n return True\n return False", "def contains(self, key):\n if key in self.nums:\n return True\n return False", "def has_key(self, name, *args, **kwargs):\n if not name in self._list(*args, **kwargs):\n return False\n return True", "def has_item(self, usage_key):\r\n if usage_key.block_id is None:\r\n raise InsufficientSpecificationError(usage_key)\r\n try:\r\n course_structure = self._lookup_course(usage_key)['structure']\r\n except ItemNotFoundError:\r\n # this error only occurs if the course does not exist\r\n return False\r\n\r\n return self._get_block_from_structure(course_structure, usage_key.block_id) is not None", "def contains(self, key):\n # Find bucket where given key belongs\n # Check if key-value entry exists in bucket\n key_bucket = self._bucket_index(key)\n in_hash_table = False\n\n for key_value_tuple in self.buckets[key_bucket].items():\n if key_value_tuple[0] is key:\n in_hash_table = True\n\n return in_hash_table" ]
[ "0.73868114", "0.73612565", "0.7006057", "0.6978514", "0.6682587", "0.6636079", "0.6593363", "0.6593363", "0.6583643", "0.65689707", "0.6546252", "0.6544462", "0.6530537", "0.6525098", "0.6517376", "0.65107256", "0.64853024", "0.6446855", "0.6445891", "0.64283794", "0.64225173", "0.6404468", "0.64029586", "0.6397683", "0.6396188", "0.63960636", "0.637843", "0.63750595", "0.63697594", "0.6350113", "0.63419414", "0.63227165", "0.63198066", "0.63016206", "0.6300828", "0.6292385", "0.6292208", "0.62884045", "0.6276599", "0.62609136", "0.6233757", "0.62329805", "0.62072396", "0.61953396", "0.61889726", "0.6174415", "0.6169045", "0.61671567", "0.61565006", "0.61548096", "0.6145549", "0.61381215", "0.6135011", "0.6127766", "0.61244714", "0.61185706", "0.6112143", "0.6102439", "0.6093851", "0.60922015", "0.60910445", "0.60891956", "0.6082543", "0.6081488", "0.607997", "0.607017", "0.6058614", "0.60562426", "0.6055903", "0.60531896", "0.60465205", "0.6042578", "0.60258377", "0.60092115", "0.6002384", "0.6002156", "0.6002037", "0.59913534", "0.59700084", "0.59575963", "0.5950673", "0.59449136", "0.592925", "0.5925755", "0.59151185", "0.5914915", "0.5914382", "0.59138316", "0.5913799", "0.59104025", "0.5909788", "0.58963615", "0.5885914", "0.5884746", "0.5880516", "0.587966", "0.58776134", "0.5873808", "0.587142", "0.5847915" ]
0.77390194
0
Begins an incremental save of the bloom filter ``key``. This is useful for large bloom filters which cannot fit into the normal SAVE and RESTORE model. The first time this command is called, the value of ``iter`` should be 0. This command will return successive (iter, data) pairs until (0, NULL) to indicate completion.
def bfScandump(self, key, iter): params = [key, iter] return self.execute_command(self.BF_SCANDUMP, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cfScandump(self, key, iter):\n params = [key, iter]\n \n return self.execute_command(self.CF_SCANDUMP, *params)", "def save(self) -> None:\n self._bin_iter.save()", "def save(self) -> dict:\n for pair in self._buffer:\n yield pair.save()", "def _iter(self, key, count, increment=1):\n key %= self.size\n while count > 0:\n try:\n yield self.db[key]\n except KeyError:\n # This shouldn't happen, but there's really nothing we can do if it does.\n # Skip over the damaged part of our database, ignoring the missing item.\n pass\n key = (key + increment) % self.size\n count -= 1", "def bfLoadChunk(self, key, iter, data):\n params = [key, iter, data]\n \n return self.execute_command(self.BF_LOADCHUNK, *params)", "def __iter__(self):\n try:\n i = self.db[self._headKey]\n while True:\n yield i\n i = self.db[self._getNextKey(i)]\n except KeyError:\n pass", "def __iter__(self):\r\n for item in self._data:\r\n yield item # yield the KEY\r", "def add(self, key):\n\t\t#super(CountingBloomFilter, self).add(key)\n\t\t#super(CountingBloomFilter, self).generateStats()\n\t\tfor i in self.getBitArrayIndices(key):\n\t\t\tself.ba[i] += 1\n\t\tself.n += 1", "def save(self, key=None):\n\n # we can override our key by passing one in explicitly\n if key: self._key = key\n\n # now save in the db\n if self._key:\n self._dbag[self._key] = self.to_python()\n else:\n self._key = self._dbag.add(self.to_python())\n return self._key", "def __iter__(self):\n if not self.loading:\n self.reset_loading()\n self.current_batch_index = 0\n return self", "def keys_fetch(self):\n with self.env.begin(write=False) as txn:\n cursor = txn.cursor()\n tot = txn.stat()['entries']\n i = 0\n\n path = self.db_path\n base_name = self.base_path\n cache_file_path = os.path.join(path, '_cache_' + base_name + '.pkl')\n print('cache_file_path = ', cache_file_path) # DEBUG\n\n if os.path.isfile(cache_file_path):\n self.keys = pickle.load(open(cache_file_path, 'rb'))\n self._num_examples = tot\n else:\n keys = []\n for key, _ in cursor:\n i += 1\n if i % 1000 == 0 or i == tot:\n print('Fetching {:>8d} /{:>8d} keys'.format(i, tot),\n end='\\r')\n keys.append(key)\n print('\\nDone.')\n self._num_examples = tot\n self.keys = np.asarray(keys)\n pickle.dump(self.keys, open(cache_file_path, 'wb'))", "def __iter__(self):\n # This could be as simple as \"return self._getKeyList().__iter__()\"\n # but this performs some extra consistency checking to make sure the\n # key we iterate to actually exists, to keep us from crashing if\n # our db is a little out of sync with itself.\n\n # This is a nasty hack because our db seems prone to circular links\n nItems = 0\n for item in self._getKeyList():\n if item in self:\n yield item\n nItems += 1\n # NASTY HACK!\n if nItems > 1000:\n self.reindex()\n raise Exception(\"Circular link corrected, try again\")\n else:\n self._delKey(item)", "def add(self, key, skip_check=False):\n bits_per_slice = self.bits_per_slice\n hashes = self.make_hashes(key)\n found_all_bits = True\n if self.count > self.capacity:\n raise IndexError(\"RedisLocalBloomFilter is at capacity\")\n pipe = self.server.pipeline(transaction=False) \n sliceIdx = 0\n for k in hashes:\n sliceKey = self.SLICE_KEY_FMT % (self.bfkeypreffix, sliceIdx)\n pipe.setbit(sliceKey, k, 1)\n sliceIdx += 1\n pipeResults = pipe.execute()\n if not skip_check:\n for pipeResult in pipeResults:\n if not pipeResult:\n found_all_bits = False\n break\n if skip_check:\n self.count += 1\n return False\n elif not found_all_bits:\n self.count += 1\n return False\n else:\n return True", "def add(self, key):\n if key in self:\n return True\n if not self.filters:\n filter = RedisLocalBloomFilter(\n server=self.server, \n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=self.initial_capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n else:\n filter = self.filters[-1]\n if filter.count >= filter.capacity:\n capacity = filter.capacity * self.scale\n if capacity > MAX_PER_SLICE_SIZE:\n capacity = MAX_PER_SLICE_SIZE\n filter = RedisLocalBloomFilter(\n server=self.server,\n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n if self.max_filters > 0 and len(self.filters) >= self.max_filters:\n f = self.filters[0]\n f.clear()\n del self.filters[0]\n filter.add(key, skip_check=True)\n return False", "def inc(self, key: str) -> None:\n if key not in self.bucket_of_keys:\n self.bucket_of_keys[key] = self.buckets.insert(self.buckets.begin(), Node(0, {key}))\n bucket, next_bucket = self.bucket_of_keys[key], self.bucket_of_keys[key].next\n if next_bucket is self.buckets.end() or next_bucket.value > bucket.value + 1:\n next_bucket = self.buckets.insert(next_bucket, Node(bucket.value + 1, set()))\n next_bucket.keys.add(key)\n self.bucket_of_keys[key] = next_bucket\n\n bucket.keys.remove(key)\n if not bucket.keys:\n self.buckets.erase(bucket)", "def _save(self, itr):\n # using keep_checkpoint_every_n_hours as proxy for iterations between saves\n if self.saver and (itr + 1) % self.saver._keep_checkpoint_every_n_hours == 0:\n\n # collect params (or stuff to keep in general)\n params = dict()\n params['critic'] = self.critic.network.get_param_values()\n\n # if the environment is wrapped in a normalizing env, save those stats\n normalized_env = hgail.misc.utils.extract_normalizing_env(self.env)\n if normalized_env is not None:\n params['normalzing'] = dict(\n obs_mean=normalized_env._obs_mean,\n obs_var=normalized_env._obs_var\n )\n\n # save hierarchy\n for i, level in enumerate(self.hierarchy):\n params[i] = dict()\n params[i]['policy'] = level.algo.policy.get_param_values()\n \n # save params \n save_dir = os.path.split(self.saver_filepath)[0]\n hgail.misc.utils.save_params(save_dir, params, itr+1, max_to_keep=50)", "def inc(self, key: str) -> None:\n if key not in self.mapping:\n cur_block = self.head\n else:\n cur_block = self.mapping[key]\n cur_block.keys.remove(key)\n\n if cur_block.val + 1 != cur_block.next.val:\n new_block = Block(cur_block.val + 1)\n cur_block.insert_after(new_block)\n else:\n new_block = cur_block.next\n new_block.keys.add(key)\n self.mapping[key] = new_block\n\n if not cur_block.keys and cur_block.val != 0:\n cur_block.remove()", "def inc(self, key):\n if key in self.keyCountMap:\n self._updateCount(key, 1)\n else:\n self.keyCountMap[key] = 1\n if self.head.next.count != 1:\n self._addBucketAfter(Bucket(1), self.head)\n self.head.next.keySet.add(key)\n self.countBucketMap[1] = self.head.next", "def fisher_iterate(\n self,\n cbl,\n map_tag=None,\n iter_max=200,\n converge_criteria=0.005,\n qb_start=None,\n transfer_run=False,\n save_iters=False,\n null_first_cmb=False,\n delta_beta_prior=None,\n cond_noise=None,\n cond_criteria=None,\n like_profiles=False,\n like_profile_sigma=3.0,\n like_profile_points=100,\n file_tag=None,\n ):\n\n save_name = \"transfer\" if transfer_run else \"bandpowers\"\n\n if transfer_run:\n null_first_cmb = False\n\n # previous fqb iterations to monitor convergence and adjust conditioning\n prev_fqb = []\n cond_adjusted = False\n\n if qb_start is None:\n qb = OrderedDict()\n for k, v in self.bin_def.items():\n if transfer_run:\n if \"cmb\" not in k or \"eb\" in k or \"tb\" in k:\n continue\n if k == \"delta_beta\":\n # qb_delta beta is a coefficient on the change from beta,\n # so expect that it should be small if beta_ref is close\n # (zeroes cause singular matrix problems)\n qb[k] = [self.delta_beta_fix]\n elif k.startswith(\"res_\") or k.startswith(\"fg_\"):\n # res qb=0 means noise model is 100% accurate.\n qb[k] = 1e-5 * np.ones(len(v))\n else:\n # start by assuming model is 100% accurate\n qb[k] = np.ones(len(v))\n else:\n qb = qb_start\n\n obs, nell, debias = self.get_data_spectra(\n map_tag=map_tag, transfer_run=transfer_run\n )\n\n bin_index = pt.dict_to_index(self.bin_def)\n\n success = False\n for iter_idx in range(iter_max):\n self.log(\n \"Doing Fisher step {}/{}...\".format(iter_idx + 1, iter_max), \"info\"\n )\n\n qb_new, inv_fish = self.fisher_calc(\n qb,\n cbl,\n obs,\n cls_noise=nell,\n cls_debias=debias,\n cond_noise=cond_noise,\n delta_beta_prior=delta_beta_prior,\n cond_criteria=cond_criteria,\n null_first_cmb=null_first_cmb,\n )\n\n qb_arr = pt.dict_to_arr(qb, flatten=True)\n qb_new_arr = pt.dict_to_arr(qb_new, flatten=True)\n dqb = qb_new_arr - qb_arr\n fqb = dqb / qb_arr\n max_fqb = np.nanmax(np.abs(fqb))\n\n prev_fqb.append(max_fqb)\n\n fnan = np.isnan(fqb)\n if fnan.any():\n (nanidx,) = np.where(fnan)\n self.log(\n \"Iter {}: Ignoring {} bins with fqb=nan: bins={}, qb_new={}, \"\n \"qb={}\".format(\n iter_idx,\n len(nanidx),\n nanidx,\n qb_new_arr[nanidx],\n qb_arr[nanidx],\n ),\n \"warning\",\n )\n\n self.log(\"Max fractional change in qb: {}\".format(max_fqb), \"info\")\n\n # put qb_new in original dict\n qb = copy.deepcopy(qb_new)\n cls_model = self.get_model_spectra(\n qb, cbl, delta=True, cls_noise=nell, cond_noise=None\n )\n\n if \"delta_beta\" in qb:\n # get beta fit and beta error\n beta_fit = qb[\"delta_beta\"][0] + self.beta_ref\n db_idx = slice(*bin_index[\"delta_beta\"])\n beta_err = np.sqrt(np.diag(inv_fish[db_idx, db_idx]))[0]\n else:\n beta_fit = None\n beta_err = None\n\n if save_iters:\n # save only the quantities that change with each iteration\n out = dict(\n map_tag=map_tag,\n map_tags=self.map_tags,\n iter_index=iter_idx,\n bin_def=self.bin_def,\n bin_weights=self.bin_weights,\n cls_shape=self.cls_shape,\n cls_obs=obs,\n qb=qb,\n fqb=fqb,\n inv_fish=inv_fish,\n cls_model=cls_model,\n cbl=cbl,\n map_freqs=self.map_freqs,\n cls_signal=self.cls_signal,\n cls_noise=self.cls_noise,\n Dmat_obs=self.Dmat_obs,\n gmat_ell=self.gmat_ell,\n extra_tag=file_tag,\n )\n\n if \"fg_tt\" in self.bin_def:\n out.update(\n beta_fit=beta_fit,\n beta_err=beta_err,\n ref_freq=self.ref_freq,\n beta_ref=self.beta_ref,\n )\n\n self.save_data(save_name, bp_opts=not transfer_run, **out)\n\n (nans,) = np.where(np.isnan(qb_new_arr))\n if len(nans):\n msg = \"Found NaN values in qb bins {} at iter {}\".format(nans, iter_idx)\n break\n\n if fnan.all():\n msg = (\n \"All bins have fqb=NaN at iter {}, \"\n \"something has gone horribly wrong.\".format(iter_idx)\n )\n break\n\n negs = np.where(np.diag(inv_fish) < 0)[0]\n if len(negs):\n self.log(\n \"Iter {}: Found negatives in inv_fish diagonal at locations \"\n \"{}\".format(iter_idx, negs),\n \"warning\",\n )\n\n if np.nanmax(np.abs(fqb)) < converge_criteria:\n if not transfer_run:\n # Calculate final fisher matrix without conditioning\n self.log(\"Calculating final Fisher matrix.\", \"info\")\n _, inv_fish = self.fisher_calc(\n qb,\n cbl,\n obs,\n cls_noise=nell,\n cls_debias=debias,\n cond_noise=None,\n delta_beta_prior=delta_beta_prior,\n null_first_cmb=null_first_cmb,\n )\n\n # If any diagonals of inv_fisher are negative, something went wrong\n negs = np.where(np.diag(inv_fish) < 0)[0]\n if len(negs):\n self.log(\n \"Found negatives in inv_fish diagonal at locations \"\n \"{}\".format(negs),\n \"warning\",\n )\n\n success = True\n break\n\n else:\n msg = \"{} {} did not converge in {} iterations\".format(\n \"Multi-map\" if map_tag is None else \"Map {}\".format(map_tag),\n \"transfer function\" if transfer_run else \"spectrum\",\n iter_max,\n )\n # Check the slope of the last ten fqb_maxpoints.\n # If there's not a downward trend, adjust conditioning\n # criteria to help convergence.\n if len(prev_fqb) <= 10 or transfer_run:\n continue\n m, b = np.polyfit(np.arange(10), prev_fqb[-10:], 1)\n if m > 0: # Not converging\n # First, start from very little conditioning\n if not cond_adjusted:\n cond_criteria = 5e3\n cond_adjusted = True\n self.log(\n \"Iter {}: Not converging. Setting cond_criteria={}\".format(\n iter_idx, cond_criteria\n ),\n \"warning\",\n )\n\n elif cond_criteria > 100:\n cond_criteria /= 2.0\n self.log(\n \"Iter {}: Tightening condition criteria to help convergence. \"\n \"cond_criteria={}\".format(iter_idx, cond_criteria),\n \"warning\",\n )\n else:\n self.log(\n \"Iter {}: Can't reduce cond_criteria any more.\".format(\n iter_idx\n ),\n \"warning\",\n )\n # give it ten tries to start converging\n prev_fqb = []\n\n # save and return\n out = dict(\n qb=qb,\n inv_fish=inv_fish,\n fqb=fqb,\n bin_def=self.bin_def,\n bin_weights=self.bin_weights,\n iters=iter_idx,\n success=success,\n map_tags=self.map_tags,\n map_freqs=self.map_freqs,\n converge_criteria=converge_criteria,\n cond_noise=cond_noise,\n cond_criteria=cond_criteria,\n null_first_cmb=null_first_cmb,\n apply_gcorr=self.apply_gcorr,\n weighted_bins=self.weighted_bins,\n )\n\n if \"fg_tt\" in self.bin_def:\n out.update(\n delta_beta_prior=delta_beta_prior,\n beta_fit=beta_fit,\n beta_err=beta_err,\n ref_freq=self.ref_freq,\n beta_ref=self.beta_ref,\n )\n\n if self.debug:\n out.update(\n cbl=cbl,\n cls_obs=obs,\n cls_signal=self.cls_signal,\n cls_noise=self.cls_noise,\n cls_model=cls_model,\n cls_shape=self.cls_shape,\n cond_noise=cond_noise,\n Dmat_obs=self.Dmat_obs,\n )\n\n if not transfer_run:\n out.update(qb_transfer=self.qb_transfer)\n if self.template_cleaned:\n out.update(template_alpha=self.template_alpha)\n\n if success and not transfer_run:\n # do one more fisher calc that doesn't include sample variance\n # set qb=very close to 0. 0 causes singular matrix problems.\n # don't do this for noise residual bins\n self.log(\"Calculating final Fisher matrix without sample variance.\", \"info\")\n qb_zeroed = copy.deepcopy(qb)\n qb_new_ns = copy.deepcopy(qb)\n for comp in [\"cmb\", \"fg\"]:\n for spec in self.specs:\n stag = \"{}_{}\".format(comp, spec)\n if stag not in qb_zeroed:\n continue\n qb_zeroed[stag][:] = 1e-20\n qb_new_ns[stag][:] = 1.0\n if \"delta_beta\" in qb:\n qb_zeroed[\"delta_beta\"][:] = 1e-20\n qb_new_ns[\"delta_beta\"][:] = 0\n\n _, inv_fish_ns = self.fisher_calc(\n qb_zeroed,\n cbl,\n obs,\n cls_noise=nell,\n cls_debias=debias,\n cond_noise=None,\n delta_beta_prior=None,\n null_first_cmb=null_first_cmb,\n )\n\n out.update(\n invfish_nosampvar=inv_fish_ns,\n )\n\n # compute window functions for CMB bins\n self.log(\"Calculating window functions for CMB bins\", \"info\")\n wbl_qb = self.fisher_calc(\n qb,\n cbl,\n obs,\n cls_noise=nell,\n cls_debias=None,\n cond_noise=None,\n delta_beta_prior=delta_beta_prior,\n null_first_cmb=null_first_cmb,\n windows=True,\n inv_fish=inv_fish,\n )\n out.update(wbl_qb=wbl_qb)\n\n # compute bandpowers and covariances\n cb, dcb, ellb, cov, qb2cb, wbl_cb = self.do_qb2cb(qb, inv_fish, wbl_qb)\n _, dcb_ns, _, cov_ns, _, _ = self.do_qb2cb(qb, inv_fish_ns, wbl_qb)\n\n out.update(\n cb=cb,\n dcb=dcb,\n ellb=ellb,\n cov=cov,\n qb2cb=qb2cb,\n wbl_cb=wbl_cb,\n dcb_nosampvar=dcb_ns,\n cov_nosampvar=cov_ns,\n )\n\n if like_profiles:\n # compute bandpower likelihoods\n self.log(\"Calculating bandpower profile likelihoods\", \"info\")\n max_like = self.fisher_calc(\n qb,\n cbl,\n obs,\n cls_noise=nell,\n cond_noise=None,\n delta_beta_prior=delta_beta_prior,\n null_first_cmb=null_first_cmb,\n likelihood=True,\n )\n\n dqb = pt.arr_to_dict(np.sqrt(np.abs(np.diag(inv_fish))), qb)\n qb_like = OrderedDict()\n\n for stag, qbs in qb.items():\n qb_like[stag] = np.zeros(\n (len(qbs), 2, like_profile_points), dtype=float\n )\n\n for ibin, q in enumerate(qbs):\n qb1 = copy.deepcopy(qb)\n dq = dqb[stag][ibin] * like_profile_sigma\n q_arr = np.linspace(q - dq, q + dq, like_profile_points)\n like_arr = np.zeros_like(q_arr)\n\n for iq, q1 in enumerate(q_arr):\n qb1[stag][ibin] = q1\n try:\n like = self.fisher_calc(\n qb1,\n cbl,\n obs,\n cls_noise=nell,\n cond_noise=None,\n delta_beta_prior=delta_beta_prior,\n null_first_cmb=null_first_cmb,\n likelihood=True,\n )\n except np.linalg.LinAlgError:\n like = np.nan\n\n like_arr[iq] = like\n\n self.log(\n \"{} bin {} delta qb {} delta like: {}\".format(\n stag, ibin, q1 - q, like - max_like\n ),\n \"debug\",\n )\n\n qb_like[stag][ibin] = np.vstack([q_arr, like_arr])\n\n out.update(max_like=max_like, qb_like=qb_like)\n\n if not success:\n save_name = \"ERROR_{}\".format(save_name)\n self.log(msg, \"error\")\n self.warn(msg)\n\n return self.save_data(\n save_name, map_tag=map_tag, bp_opts=True, extra_tag=file_tag, **out\n )", "def cfLoadChunk(self, key, iter, data):\n params = [key, iter, data]\n \n return self.execute_command(self.CF_LOADCHUNK, *params)", "def store(self,key,start,end,data):\n\n pass", "def knapsack_iterate_back(save):\n pass", "def __iter__(self):\n cursor=0\n while cursor<len(self):\n yield self._item[cursor].key\n cursor+=1", "def train_bloom_filter(self, train_data):\n for val in train_data:\n if self.debug:\n print('val: ', val)\n for i in range(0, self.hash_size):\n k = self.hashes[i](val[0])\n if self.debug:\n print('k: ', k)\n self.bitarray[k] = 1\n if self.debug:\n print('___end training____')", "def train_callback(self, model, iteration):\n if (self.rewind_it == iteration and self.rewind_state_dict is None):\n # Save the current model weights\n self.rewind_state_dict = copy.deepcopy(model.state_dict())", "def put(self, key, processed_query):\n data = json.dumps(processed_query.to_cache())\n\n def commit_to_db(connection):\n cursor = connection.cursor()\n cursor.execute(\"\"\"\n INSERT OR IGNORE into queries values (?, ?, ?, ?, ?);\n \"\"\", (key,\n data,\n processed_query.query.text,\n processed_query.domain,\n processed_query.intent,\n ))\n connection.commit()\n\n if self.memory_connection:\n commit_to_db(self.memory_connection)\n rowid = self.key_to_row_id(key)\n self.batch_writes.append(str(rowid))\n if len(self.batch_writes) == self.batch_write_size:\n self.flush_to_disk()\n else:\n commit_to_db(self.disk_connection)\n\n return self.key_to_row_id(key)", "def save (self):\n if self.newobj:\n using_sequence = self.sequence ()\n self.keyvals['id'] = using_sequence\n self.seq = using_sequence\n else:\n using_sequence = self.seq\n for key, val in self.keyvals.items ():\n r_key = self.prepare_key (key, using_sequence)\n r.set (r_key, val)\n self.keyvals = {}\n self.newobj = False", "def _set(self, cmd, key, val, expiry_time, min_compress_len = 0):\n\t\tcheck_key(key)\n\t\tserver, key = yield self._get_server_for(key)\n\t\tif not server:\n\t\t\traise StopIteration(False)\n\n\t\tstored_info = self._value_to_stored(val, min_compress_len)\n\t\tif stored_info is None:\n\t\t\t# If it's not storable due to length, just return.\n\t\t\traise StopIteration(True)\n\t\tflags, stored = stored_info\n\t\t\n\n\t\tfull_cmd = \"%s %s %d %d %d\\r\\n%s\\r\\n\" % (cmd, key, flags, expiry_time, len(stored), stored)\n\n\t\ttry:\n\t\t\tyield server.sendall(full_cmd)\n\t\t\tres = yield server.read_line()\n\t\t\traise StopIteration(res == \"STORED\")\n\n\t\texcept tcp.ConnectionClosedException:\n\t\t\tserver.mark_dead()\n\n\t\traise StopIteration(False)", "def inc(self, key):\n if key in self.cache:\n curr_freq = self.cache[key]\n self.freq[curr_freq].remove(key)\n\n if len(self.freq[curr_freq]) == 0:\n del self.freq[curr_freq]\n\n curr_freq += 1\n self.freq[curr_freq].add(key)\n self.cache[key] = curr_freq\n\n else:\n self.cache[key] = 1\n self.freq[1].add(key)", "def __iter__(self):\n while True:\n if self.stop:\n return\n for item in self.get_next_batch():\n yield item", "def _update_append_key(self):\n self.append_key += 1", "def run(self, iter: int = -1):\n try:\n while iter != 0:\n self.iteration_count += 1\n iso = self._iso_observe()\n self._propagate(iso)\n iter -= 1\n except _FinishedObserving:\n return True\n except _Contradiction:\n return False", "def begin_ga(key):\n _population = cache_get(key)\n population = [_population[idx] for idx in _population]\n base_key = cache_get('settings')['base_key']\n next_generation = population[0]['generation'] + 1\n name = '{}:{}'.format(base_key, next_generation)\n\n # need to convert the population dictionary to a tuple of tuples so we can\n # take the set of it. even though notes are a list of lists, python throws\n # and unhasable error if everything isnt of the same type\n for individual in population:\n individual['notes'] = tuple(tuple(x) for x in individual['notes'])\n\n _future_population = m_pipe(population, tournament, crossover, mutation)\n for idx, notes in enumerate(_future_population, start=1):\n individual = render_individual(notes=notes, _id=idx, generation=next_generation)\n logger.debug(\"Individual << %s >> for generation << %s >>:\\n%s\", idx, next_generation, individual)\n cache_set(name, idx, individual, serialize=True)\n return next_generation", "def process(self, data_itr):\n for data in data_itr:\n self.update(data)\n while True:\n try:\n out = self.next()\n yield out\n except StopIteration:\n break", "def _key_generated(self, key, index):\n self.keys[self.get_address(key)] = key\n self.last_generated_index = index", "def post_prepared_commit(self, key, prepared):\n docs = self.__splitprepared(prepared)\n docs[0][\"key\"] = key\n return self.client.post_commit(docs[0], docs[1])", "def __getitem__(self, key):\n with open(self._get_path(key), 'rb') as f:\n unpickler = pickle.Unpickler(f)\n while f.peek(1):\n yield unpickler.load()", "def iterkeys(self):\n self.proto.iterinit()\n try:\n while True:\n yield wait(self.proto.iternext())\n except TyrantError:\n pass", "def _put(self, key, data):\n path = self._get_key_path(key)\n with open(path, \"wb\") as pickle_file:\n pickle.dump(data, pickle_file)", "def key_lookup_batch(self, batchiter):\n pass", "def save(self) -> None:\n self._save_marker = self.get_next()", "def iterator(self):\n return self.KeyIterator()", "def snapshot(self, sess, iter, epoch=0):\n\n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n\n infix = ('_' + cfg.TRAIN.SNAPSHOT_INFIX\n if cfg.TRAIN.SNAPSHOT_INFIX != '' else '')\n filename = (cfg.TRAIN.SNAPSHOT_PREFIX + infix + '_iter_{:d}_epoch_{:d}'.format(iter + 1, epoch + 1) + '.ckpt')\n filename = os.path.join(self.output_dir, filename)\n\n # self.saver.save(sess, filename, write_meta_graph=False)\n self.saver.save(sess, filename)\n print 'Wrote snapshot to: {:s}'.format(filename)", "def save(self, key, data):\n overloads = self._load_index()\n try:\n # If key already exists, we will overwrite the file\n data_name = overloads[key]\n except KeyError:\n # Find an available name for the data file\n existing = set(overloads.values())\n for i in itertools.count(1):\n data_name = self._data_name(i)\n if data_name not in existing:\n break\n overloads[key] = data_name\n self._save_index(overloads)\n self._save_data(data_name, data)", "def addSequence(self, key):\r\n self.is_empty = False\r\n current = self.root\r\n i = 0\r\n highest_leaf = self.addSequence_aux(current, key, i)\r\n # Updating the root\r\n # If it is the first element that is added to the database\r\n if highest_leaf.highest_freq is None:\r\n current.highest_freq = highest_leaf\r\n current.frequency = highest_leaf.frequency\r\n current.index_next = highest_leaf.index\r\n else:\r\n # Compare the frequency if it is not the first element on the database\r\n if current.frequency < highest_leaf.frequency:\r\n current.frequency = highest_leaf.frequency\r\n current.highest_freq = highest_leaf.highest_freq\r\n current.index_next = highest_leaf.index\r\n # If the frequency is equal then compare the lexicographical order\r\n elif current.frequency == highest_leaf.frequency:\r\n if current.index_next >= highest_leaf.index:\r\n current.frequency = highest_leaf.frequency\r\n current.highest_freq = highest_leaf.highest_freq\r\n current.index_next = highest_leaf.index", "def touchKBucket(self, key):", "def writeMuchData(self, len, start, delta, context):\n print(\"start: {}\".format(start))\n print(\"len: {}\".format(len))\n print(\"delta: {}\".format(delta))\n\n for i in range(start, start + len):\n key = \"key_{}\".format(i)\n address = _make_benchcontract_address(key)\n print(\"address: {}\".format(address))\n value = i + delta\n print(\"value: {}\".format(value))\n value = \"{}\".format(value)\n print(\"value: {}\".format(value))\n value_encoded = value.encode()\n print(\"encoded value: {}\".format(value_encoded))\n context.set_state({address: value_encoded}, timeout=self.timeout)\n print(\"writeMuchData stored {} --> {} to state\".format(key, value))\n return 0", "def cb(xk):\n self.iteration += 1\n t_current = time.time()\n t_elapsed = t_current - self.t_store\n self.t_store = t_current\n \n self.of_list.append(self.of_last)\n self.params = xk\n self._disp(t_elapsed)\n\n # Call the custom callback function if any\n if callback is not None:\n callback(self)", "def save_all(self):\r\n for index in range(self.count()):\r\n self.save(index)", "def __iter__(self):\n # Return an iterator for the keys in the underlying dictionary.\n return iter(self.data)", "def save_to_kvstore(helper, name, entries, stats):\n helper.log_info('Saving {0} entries for MineMeld feed \"{1}\"'.format(\n len(entries), name))\n url = '{0}/batch_save'.format(_uri(helper))\n\n # We need to batch in groups of 500, the default.\n for i in range(0, len(entries), 500):\n resp = helper.send_http_request(\n url=url,\n headers=_headers(helper),\n method='POST',\n verify=False,\n payload=entries[i:i+500])\n resp.raise_for_status()", "def compress_pending_metadata_updates(key):\n instance = key.get()\n if not instance:\n logging.warning('Instance does not exist: %s', key)\n return\n\n if instance.active_metadata_update:\n logging.warning('Instance already has active metadata update: %s', key)\n return\n\n if not instance.pending_metadata_updates:\n return\n\n instance.active_metadata_update = compress_metadata_updates(\n instance.pending_metadata_updates)\n instance.pending_metadata_updates = []\n instance.put()", "def _write_current_buffer_for_group_key(self, key):\n write_info = self.write_buffer.pack_buffer(key)\n self.write(write_info.get('file_path'),\n self.write_buffer.grouping_info[key]['membership'])\n self.write_buffer.clean_tmp_files(write_info)\n self.write_buffer.add_new_buffer_for_group(key)", "def _do_flush(self, cache):\n try:\n while cache and not self._stop_flushing:\n key, value = cache.popitem()\n self._shelf[self._encode_key(key)] = value\n if cache:\n cache.clear()\n except BaseException as exception:\n self._flush_exception = exception", "def next_window(self) -> Iterator[Optional[np.ndarray]]:\n while self._count >= self._window_width:\n # Preserve what we want to return by copying it.\n p1 = np.copy(self._data_store[:self._window_width, :])\n\n # Remove the data we don't need any more from the front of the buffer.\n frames_to_keep = self._count - self._window_step\n self._data_store[:frames_to_keep,\n :] = self._data_store[self._window_step:self._count, :]\n self._count -= self._window_step\n yield p1", "def test02StoreRefresh(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 5):\n keys.append(s.Put(i, i))\n\n # This should not raise because keys[0] should be refreshed each time its\n # gotten\n for i in range(0, 1000):\n s.Get(keys[0])\n s.Put(i, i)", "def inc(self, key):\n if key in self.key_dict:\n self.increase(key)\n return\n self.key_dict[key] = key_node = KeyNode(key, 1)\n value_node = self.value_dict.get(1)\n if value_node is None:\n self.value_dict[1] = value_node = ValueNode(1, None, self.head)\n if self.head:\n self.head.prev = value_node\n self.head = value_node\n if self.last is None:\n self.last = value_node\n self.insert_key_node(key_node)", "def key_upload(self, key=None):\n raise NotImplementedError", "def commit(self):\n if self.keys:\n # If there used to be some keys, there must exist an old dictionary blob somewhere in the database. It should be deallocated after a successful commit to disk.\n self.file.seek(0)\n headerdump = self.file.read(16)\n if sum(headerdump):\n dictat,dictlen = struct.unpack(\"<QQ\", headerdump)\n self.awaitingpunch.append((dictat,dictlen))\n\n # All buffered (modified but uncommited) values get serialized and sent to disk.\n for key,value in self.buffered.items():\n valuedump = pickle.dumps(value)\n valueat = self.file.safeappend(valuedump, 16)\n self.keys[key] = (valueat,len(valuedump))\n self.buffered.clear()\n\n # A new dictionary blob gets serialized and sent to disk.\n dictdump = pickle.dumps(self.keys)\n dictat = self.file.safeappend(dictdump, 16)\n\n # Finally, the header gets overwritten atomically and orderly.\n headerdump = struct.pack(\"<QQ\", dictat, len(dictdump))\n self.file.fsync()\n self.file.writep(0, headerdump)\n self.file.fsync()\n\n # Whatever value blobs and dictionary blobs are no longer being pointed to, they can be safely deallocated.\n for (punchat,punchlen) in self.awaitingpunch:\n self.file.fallocate(2|1, punchat, punchlen)\n self.awaitingpunch.clear()\n\n self.buffered.clear()\n\n else:\n self.awaitingpunch.clear()\n self.file.fsync()\n self.file.truncate(0)\n self.file.fsync()", "def __iter__(self) :\n for s in self._samples_to_cache :\n yield s", "def _save_input(self, mod, i):\n if mod.training:\n self.state[mod][\"x\"] = i[0]", "def insert(self, i, key):\n if self.contains(i):\n raise IndexError(\"index is already in pq\")\n self.__n += 1\n self.__pq[self.__n] = i\n self.__qp[i] = self.__n\n self.__keys[i] = key\n self.__swim(self.__n)", "def __iter__(self):\n self.current_epoch = self.current_epoch + 1\n self.num_samples_yield = -1\n return self", "def __iter__(self):\n self.current_epoch = self.current_epoch + 1\n self.num_samples_yield = -1\n return self", "def __iter__(self):\n\t\treturn iter(self.__dStore)", "async def _put(self, key: datastore.Key, value: RT, **kwargs: typing.Any) -> None:\n\t\tresult_stream: typing.Union[\n\t\t\tdatastore.core.util.stream.TeeingReceiveStream,\n\t\t\tdatastore.core.util.stream.TeeingReceiveChannel[T_co]\n\t\t]\n\t\tif isinstance(self, datastore.abc.BinaryDatastore):\n\t\t\tresult_stream = datastore.core.util.stream.TeeingReceiveStream(value)\n\t\telif isinstance(self, datastore.abc.ObjectDatastore):\n\t\t\tresult_stream = datastore.core.util.stream.TeeingReceiveChannel(value)\n\t\telse:\n\t\t\tassert False\n\t\t\n\t\ttry:\n\t\t\tfor store in self._stores:\n\t\t\t\tif store is self._stores[-1]:\n\t\t\t\t\tbreak # Last store drives this `TeeingReceiveStream`\n\t\t\t\tresult_stream.start_task_soon(run_put_task, store, key, kwargs)\n\t\t\tawait self._stores[-1]._put(key, result_stream, **kwargs) # type: ignore[arg-type]\n\t\texcept BaseException:\n\t\t\t# Ensure the other tasks are immediately canceled if the final\n\t\t\t# store's put raises an exception\n\t\t\t#\n\t\t\t# Without this the nursery and its attached tasks will stay open\n\t\t\t# and cause an undecipherable “the init task should be the last\n\t\t\t# task to exit” error on loop exit.\n\t\t\tawait result_stream.aclose()\n\t\t\traise", "def change_key(self, i, key):\n self.__keys[i] = key\n self.__swim(self.__qp[i])\n self.__sink(self.__qp[i])", "def run(self, iteration_key):\n record_provider = SqlDocumentProvider(iteration_key, self.case_accessor())\n processor = BulkDocProcessor(record_provider, self.doc_processor(self.domain))\n return processor.run()", "def encode_recent_observation(self):\n assert self.num_in_buffer > 0\n return self._encode_observation((self.next_idx - 1) % self.size)", "def delete(self, key):\n self._cur_batch.delete(key)\n self._num_mutations += 1\n if self._num_mutations >= MAX_MUTATIONS_IN_BATCH:\n self.commit()\n self.begin()", "def __iter__(self):\n self.iterator = 0\n return self", "def __iter__(self):\n self.iterator = 0\n return self", "def __iter__(self):\n self.iterator = 0\n return self", "def __iter__(self):\n self.iterator = 0\n return self", "def run(self, iteration_key):\n record_provider = SqlDocumentProvider(iteration_key, self.case_accessor())\n logger = SQLBasedProgressLogger(iteration_key)\n processor = BulkDocProcessor(record_provider, self.doc_processor(self.domain),\n progress_logger=logger)\n processed, skipped = processor.run()\n return processed, skipped, logger.logs", "def store(key):\n\n @node(name=\"store('{}')\".format(key))\n @bind_call_state\n def storer(call_state, val):\n call_state.data[key] = val\n return val\n\n return storer", "def __init__(self):\n super(KeyIterator, self).__init__()\n self.iterator = self.ValueIterator()", "def shard(self, dataset_iter):\n return dataset_iter", "def iterkeys(self):\r\n for wr in self.data.iterkeys():\r\n obj = wr()\r\n if obj is not None:\r\n yield obj", "def iterkeys(self):\r\n for wr in self.data.iterkeys():\r\n obj = wr()\r\n if obj is not None:\r\n yield obj", "def commit(self):\n self._cur_batch.commit()\n self._cur_batch = None\n self._num_mutations = 0", "def successor(self, key):\r\n index = self.locate_successor(key)\r\n self.keys[index] if index < self.num_keys() else None", "def put(self, key, item):\n if key or item:\n self.cache_data[key] = item\n if len(self.cache_data) > BaseCaching.MAX_ITEMS:\n print(\"DISCARD: {}\".format(self.last))\n del self.cache_data[self.last]\n self.last = key", "def _process(self, start_key, batch_size):\n\n query = self.MODEL.all()\n if start_key:\n query.filter('__key__ > ', start_key)\n\n try:\n entities = query.fetch(batch_size)\n\n if not entities:\n # all entities has already been processed\n return\n\n for entity in entities:\n try:\n self._processEntity(entity)\n except db.Error, e:\n import logging\n logging.exception(e)\n logging.error(\"Broke on %s: %s\" % (entity.key().name(), self.MODEL))\n\n # process the next batch of entities\n start_key = entities[-1].key()\n deferred.defer(self._process, start_key, batch_size)\n except DeadlineExceededError:\n # here we should probably be more careful\n deferred.defer(self._process, start_key, batch_size)", "def inc(self, key: str) -> None:\n if key in self.keyCnt:\n self.changeKey(key, 1)\n else:\n self.keyCnt[key] = 1\n # 说明没有计数为1的节点,在self.head后面加入\n if self.head.next.cnt != 1:\n self.addNodeAfter(Node(1), self.head)\n self.head.next.keySet.add(key)\n self.cntKey[1] = self.head.next", "def flush_to_disk(self):\n logger.info(\"Flushing %s queries from in-memory cache to disk\", len(self.batch_writes))\n rows = self.memory_connection.execute(f\"\"\"\n SELECT hash_id, query, raw_query, domain, intent FROM queries\n WHERE rowid IN ({\",\".join(self.batch_writes)});\n \"\"\")\n self.disk_connection.executemany(\"\"\"\n INSERT OR IGNORE into queries values (?, ?, ?, ?, ?);\n \"\"\", rows)\n self.disk_connection.commit()\n self.batch_writes = []", "def put(self, key, item):\n if key is None or item is None:\n return\n if key in self.key_tracker.keys():\n self.key_tracker.pop(key)\n if len(self.key_tracker) >= BaseCaching.MAX_ITEMS:\n x = self.most_recent_use_key()\n print(\"DISCARD: {}\".format(x))\n self.key_tracker.pop(x)\n self.cache_data.pop(x)\n self.cache_data.update({key: item})\n self.key_tracker.update({key: self.count})\n self.count += 1", "def self_insert():\r\n insert_char(last_key())", "def batchstore(self, reward, next_obs):\n self.count_oa[self.current_obs, self.current_act] += 1\n self.count_oao[self.current_obs, self.current_act, next_obs] += 1\n self.reward_oa[self.current_obs, self.current_act] += reward\n \n # updating the value table, estiamting the current state-action values\n self.valQoa[self.current_obs, self.current_act]\\\n += self.alpha * ((1-self.gamma) * reward\\\n + self.gamma * np.dot(self.X[next_obs], self.valQoa[next_obs])\\\n - self.valQoa[self.current_obs, self.current_act])\n\n self.next_obs = next_obs # just for consistency checking\n \n self.ret = (1-self.gamma)*reward + self.gamma * self.ret\n self.batch_step += 1\n self.total_step += 1", "def put(self, entity):\n self._cur_batch.put(entity)\n self._num_mutations += 1\n if self._num_mutations >= MAX_MUTATIONS_IN_BATCH:\n self.commit()\n self.begin()", "def writeData(self, key, value, context):\n print(\"key_{}\".format(key))\n print(\"val_{}\".format(value))\n\n address = _make_benchcontract_address(\"key_{}\".format(key))\n print(\"address: {}\".format(address))\n value_encoded = (\"{}\".format(value)).encode()\n print(\"encoded value: {}\".format(value_encoded))\n context.set_state(\n {address: value_encoded},\n timeout=self.timeout)\n print(\"writeData stored {} --> {} to state\".format(key, value))\n return 0", "def compress(key):\n instance = key.get()\n if not instance:\n logging.warning('Instance does not exist: %s', key)\n return\n\n if instance.active_metadata_update:\n logging.warning('Instance already has active metadata update: %s', key)\n return\n\n if not instance.pending_metadata_updates:\n return\n\n compress_pending_metadata_updates(key)\n metrics.send_machine_event('METADATA_UPDATE_READY', instance.hostname)", "def __getitem__(self, key):\n if isinstance(key, int):\n if key < 0:\n raise IndexError('Cannot index less than 0')\n offset = key % BLOCK_SIZE\n index = (key/BLOCK_SIZE) % CACHE_ENTRIES\n tag = self.start + ((((key/BLOCK_SIZE)*BLOCK_SIZE)/SAMPLE_RATE)*qdf.SECOND)\n if self.cache[index][CACHE_INDEX_TAG] == None:\n #cache entry is empty\n yield self._query_data(index, tag)\n elif self.cache[index][CACHE_INDEX_TAG] != tag:\n #cache miss\n yield self._query_data(index, tag)\n datapoint = self.cache[index][CACHE_INDEX_DATA][offset]\n if datapoint == None:\n defer.returnValue(datapoint)\n if datapoint.time > self.end:\n raise IndexError('Requested date past end-date:\\n'+\n 'End-Date: '+str(self.end)+'\\n'+\n 'Requested-Date: '+str(self.cache[index][offset].time))\n defer.returnValue(datapoint)\n\n elif isinstance(key, slice):\n #not implemented yet\n raise TypeError('list indices must be integers, not '+type(key))\n else: #slice error\n raise TypeError('list indices must be integers, not '+type(key))", "def sample_states(self, key):\n states = tf.TensorArray(tf.int32, size=0, dynamic_size=True)\n key, subkey = split_key(key)\n state = self.sample_state(subkey, 0) # pytype: disable=wrong-arg-types # dynamic-method-lookup\n len_x, len_y = 0, 0\n i = 0\n while state != self.END and tf.maximum(len_x, len_y) < self._max_len - 1:\n states = states.write(i, state)\n len_x += self._delta_len_x[state]\n len_y += self._delta_len_y[state]\n i += 1\n key, subkey = split_key(key)\n state = self.sample_state(subkey, state)\n return states.stack()", "def filter(self, key):\n with suppress(KeyError):\n yield from self.data[key]", "def iter_batch(self):\n\n # model initialization\n self._set_train()\n\n if not self.batch_process:\n self.batch_process = self._train_batch()\n return self.batch_process.__next__()\n else:\n try:\n return self.batch_process.__next__()\n except StopIteration:\n # update the state if StopIteration\n if self.info:\n print(f\"\\rEpoch: { self.epoch } | Average loss: { self.epoch_loss.avg }\")\n\n # update epoch and reset the epoch_loss\n self.epoch_loss.reset()\n self.epoch += 1\n\n # reset the batch process\n del self.batch_process\n self.batch_process = self._train_batch()\n return self.batch_process.__next__()", "def optimise_for(self, key):\n\n self.optimisation_keys.append(key)\n self.entry.incoming_node_refs_list.add_optimisation_key(key)\n self.entry.outgoing_node_refs_list.add_optimisation_key(key)\n if key not in self.entry.data:\n self.entry.update_data(key, 0)", "def __iter__(self):\n\n return iter([key for key in self._data.keys()])", "def deepmd_update_checkpoint(iter_index: int):\n # Now update will preserve the previous steps information\n with open('generator_checkpoint.json', 'r') as generate_ckpt:\n ckpt = json.load(generate_ckpt)\n ckpt['status'] = 'deepmd'\n ckpt['config_index'] = 0 # multiprocessing don't need graph_index\n ckpt['set_index'] = 0\n ckpt['iter_index'] = iter_index\n ckpt['dump_to_poscar'] = False\n\n # Dump to the same file and erase the former\n with open('generator_checkpoint.json', 'w') as generate_ckpt:\n json.dump(ckpt, generate_ckpt, indent=2)", "def __iter__(self):\n self.count = 0\n return self" ]
[ "0.5560349", "0.5399405", "0.5302348", "0.5262011", "0.5024422", "0.50054467", "0.49560073", "0.49460107", "0.47173524", "0.4702185", "0.4692565", "0.46744853", "0.46606264", "0.4658389", "0.46374193", "0.45671514", "0.45634228", "0.45548865", "0.45259008", "0.45051858", "0.447539", "0.4470084", "0.44618055", "0.44412106", "0.44157273", "0.44156754", "0.4412035", "0.43980506", "0.43676764", "0.43339434", "0.43333077", "0.42928973", "0.42928937", "0.42904833", "0.42723444", "0.425927", "0.42580375", "0.42539468", "0.4248059", "0.422918", "0.42278197", "0.4224785", "0.4213798", "0.42026013", "0.41970232", "0.41888562", "0.41850737", "0.41757247", "0.41742077", "0.4165643", "0.41611937", "0.41581306", "0.41555744", "0.41534796", "0.4139965", "0.41327432", "0.41275015", "0.41210926", "0.41206884", "0.41201288", "0.41128546", "0.4110076", "0.40966025", "0.40966025", "0.40960345", "0.40943858", "0.40941313", "0.408895", "0.408886", "0.40825865", "0.40791592", "0.40791592", "0.40791592", "0.40791592", "0.40746215", "0.4069159", "0.40689743", "0.406704", "0.40596598", "0.40596598", "0.40500557", "0.40483612", "0.4044782", "0.40422812", "0.40413317", "0.40374002", "0.40305504", "0.4028531", "0.4021021", "0.4020863", "0.4014852", "0.40138668", "0.40103054", "0.40068159", "0.4006468", "0.40061906", "0.40057066", "0.40040433", "0.4003639", "0.39998242" ]
0.6080377
0
Restores a filter previously saved using SCANDUMP. See the SCANDUMP command for example usage. This command will overwrite any bloom filter stored under key. Ensure that the bloom filter will not be modified between invocations.
def bfLoadChunk(self, key, iter, data): params = [key, iter, data] return self.execute_command(self.BF_LOADCHUNK, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeAutoSaveRestoreFilter(filter):", "def addAutoSaveRestoreFilter(filter):", "def highpass_filter_reset(self, data):\n zi = scipy.signal.sosfilt_zi(self._highpass_sos)\n print('Zi shape: ', zi.shape, data.shape)\n self._highpass_state = data[0, :] * np.repeat(zi[:, :, np.newaxis],\n data.shape[1], axis=2)\n logging.info('Resetting the high-pass filter state.')", "def lowpass_filter_reset(self, data):\n zi = scipy.signal.sosfilt_zi(self._lowpass_sos)\n self._lowpass_state = data[0, :] * np.repeat(zi[:, :, np.newaxis],\n data.shape[1], axis=2)\n logging.info('Resetting the low-pass filter state.')", "def removeAutoSaveFilter(filter):", "def copy(self):\n new_filter = BloomFilter(self.capacity, self.error_rate)\n new_filter.filter = self.filter.copy()\n return new_filter", "def copy(self):\n raise NotImplementedError(\"RedisLocalBloomFilter not support copy\")", "def do_reset(self, args):\n\t\tself.parent.filter = {}\n\t\tself.apply_filter()\n\t\tself._update_prompts()", "def autoSaveRestoreFilter(filename):\n return _doAutoSaveCallbacks( autoSaveRestoreFilters, filename )", "def restore(self):\n\n self.dispersion = self.raw_dispersion\n self.flux = self.raw_flux\n self.flux_err = self.raw_flux_err\n self.reset_mask()", "def ResetAvgFilter(self):\n self.k = 1\n self.prevAvg = 0", "def reset_filter(self):\n arlen = len(self.variant_list)\n self.filter = np.zeros((arlen, arlen)) == 0", "def restore(self):\n self.weight = self._backup_weight", "def restore(self, key, history):\n self.goal, used = key\n self._used = []\n for row in used:\n self._used.append(list(row))\n self.history = list(history)", "def apply_filters(self, new_filters):\n\t\tself.filters = new_filters", "def addAutoSaveFilter(filter):", "def load(self, source: Union[str, Any], key: str) -> None: # type: ignore\n self._logger.info(f\"Loading filter policy model from {source} to {key}\")\n if 'torch' in key:\n model = load_torch_model(source,'filter',device=self._config.device)\n else:\n model = load_model(source, key, self._config.use_remote_models)\n self._items[key] = {\n \"model\": model\n }", "def apply_filter(self, inplace=True):\n\n if self.filter is None:\n if not inplace:\n return copy.deepcopy(self)\n else:\n return None\n\n x = copy.copy(self.__dict__)\n x['data'] = self.get_data()\n x['locs'] = self.get_locs()\n\n if self.filter == 'kurtosis':\n x['kurtosis'] = x['kurtosis'][x['kurtosis'] <= x['kurtosis_threshold']]\n\n for key in ['n_subs', 'n_elecs', 'n_sessions', 'dur', 'filter_inds', 'nifti_shape']:\n if key in x.keys():\n x.pop(key)\n\n boc = Brain(**x)\n boc.filter = None\n boc.update_info()\n if inplace:\n self.__init__(boc)\n else:\n return boc", "def _initial_blur(self):\n if self.init_sigma > self.cur_sigma:\n sigma = sqrt(self.init_sigma * self.init_sigma - self.cur_sigma * self.cur_sigma)\n self.data = gaussian_filter(self.raw, sigma)\n else:\n self.data = self.raw", "def save(self):\n saved_filter = SavedFilterIterator()\n source_field = self._source.serialized_name() + '_source'\n getattr(saved_filter, source_field).CopyFrom(self._source.save())\n saved_filter.expression = self._raw_expression\n if self._mu is not None:\n pyDict_to_protoDict(self._mu, saved_filter.mu)\n return saved_filter", "def _restore(self):\n self._logger = LOGGER\n self._param_store = pyro.get_param_store()\n self.set_state(self.best_params)\n self._alpha_guide_prior_params = dict(\n self._param_store.named_parameters()\n )", "def reset_filters():\n logger.info(\"reset filters\")\n global filter_item\n filter_item = -1\n filter_topics_table.view.filters = [IndexFilter()]\n filter_custom_table.view.filters = [IndexFilter()]\n filter_label.text = \"\"", "def prepend_crds_filter(self, filter):\n if filter not in self.filters:\n self.filters = [filter] + self.filters", "def autoSaveFilter(filename):", "def update_filters(self, **kwargs):\n self._FILTERS = kwargs", "def reinitialize_level_set_image_filter(*args, **kwargs):\n import itk\n instance = itk.ReinitializeLevelSetImageFilter.New(*args, **kwargs)\n return instance.__internal_call__()", "def restore(self):\n self.u = self.ub.copy()\n self.w = self.wb.copy()\n self.v = self.vb.copy()\n if self.en_bias: self.b = self.bb.copy()", "def filterToSat( bmp, savefile = '' ):\n for h in range(bmp.height):\n for w in range(bmp.width):\n HSL = RGBtoHSL( bmp.pixels[h][w] )\n sat = int(255*HSL[1]) # convert to 0-255 range\n bmp.pixels[h][w] = (sat,sat,sat)\n if( savefile != '' ):\n bmp.save(savefile)\n return bmp", "def use_effect(self):\n if self.preview_name in FILTERS:\n photo = Image.open(self.path.url[1:])\n preview = photo.filter(FILTERS.get(self.preview_name))\n preview.save(self.path.url[1:])", "def xor_folding(self):\n lbs = round(self.bit_size / 2)\n cap = round(self.capacity / 2)\n\n fold_pos = round( len(self.filter) / 2 )\n\n a = self.filter[0:fold_pos]\n b = self.filter[fold_pos:]\n # print('======>',self.bit_size,'#',len(a),len(b))\n\n r = BloomFilter(cap=cap, fpr=self.false_positive_rate, bfpower=self.potencia, bs=lbs)\n r.filter = a.__ixor__(b)\n return r", "def restore_full_state(self, state):\n state_ref = self.ale.decodeState(state)\n self.ale.restoreSystemState(state_ref)\n self.ale.deleteState(state_ref)", "def restore(self, restore):\n self._restore = restore", "def filter(self, filter):\n self._filter = filter", "def filter(self, data):\n self.data = pysap.Image(data=self.flt.filter(data))", "def save_filter(self, filename, overwrite=False):\n hdu = fits.PrimaryHDU(self.filter, self.header)\n hdu.writeto(filename, clobber=overwrite)\n fits.append(filename, self.approx, self.header)\n fits.append(filename, self.filter + self.approx, self.header)\n fits.append(filename, self.max_scale_image(), self.header)", "def union(self, other):\n if self.capacity != other.capacity or \\\n self.error_rate != other.error_rate:\n raise ValueError(\"Unioning filters requires both filters to have both the same capacity and error rate\")\n new_bloom = self.copy()\n new_bloom.filter = new_bloom.filter | other.filter\n return new_bloom", "def restore(self) -> 'BaseImage':\n self._surface = self._original_surface.copy()\n return self", "def reload(self):\n self.restore()", "def dbtrace_filter_off_change(new_state):\n\n pass", "def maybe_outfeed(self, key, value):\n if self._filters is not None:\n if any(f in key for f in self._filters):\n self._vals[key] = value\n else:\n self._vals[key] = value", "def reset_stash(self):\n for particle in self.get_active_particles():\n self.stash_particle(particle)\n\n self._stashed_particles.clear()\n self._stashed_particles.extend(self._all_particles)", "def restore_fast_weights(self):\n for group in self.optimizer.param_groups:\n for p in group[\"params\"]:\n param_state = self.state[p]\n p.data.copy_(param_state[\"backup_params\"])\n del param_state[\"backup_params\"]", "def add(self, key):\n if key in self:\n return True\n if not self.filters:\n filter = RedisLocalBloomFilter(\n server=self.server, \n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=self.initial_capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n else:\n filter = self.filters[-1]\n if filter.count >= filter.capacity:\n capacity = filter.capacity * self.scale\n if capacity > MAX_PER_SLICE_SIZE:\n capacity = MAX_PER_SLICE_SIZE\n filter = RedisLocalBloomFilter(\n server=self.server,\n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n if self.max_filters > 0 and len(self.filters) >= self.max_filters:\n f = self.filters[0]\n f.clear()\n del self.filters[0]\n filter.add(key, skip_check=True)\n return False", "def full_reset(self):\n self.at_cmd('CFUN=1')", "def test_restore_with_filter_regex(self):\n key_name = \"ent-backup\"\n if self.backupset.random_keys:\n key_name = \"random_keys\"\n self.validate_keys = self.input.param(\"validate_keys\", False)\n if self.validate_keys:\n gen = BlobGenerator(key_name, \"ent-backup-\", self.value_size,\n end=self.num_items)\n else:\n gen = DocumentGenerator('random_keys', '{{\"age\": {0}}}', list(range(100)),\n start=0, end=self.num_items)\n\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.log.info(\"Start backup\")\n self.backup_create()\n self.backup_cluster()\n self.backup_restore()\n self.merged = False\n regex_check = self.backupset.filter_keys\n if not self.backupset.filter_keys:\n regex_check = self.backupset.filter_values\n self.validate_backup_data(self.backupset.backup_host,\n [self.backupset.restore_cluster_host],\n key_name, False, False, \"memory\",\n self.num_items, None,\n validate_keys=self.validate_keys,\n regex_pattern=regex_check)", "def filter_op(self, filter_option):\n print(\"===||| Initiating Filter-Resize Fill Operation |||===\")\n fill_op = shmops.Fill_Operation(id='4321')\n\n with Image.open(self.path) as map_img:\n map_img.thumbnail((self.map_major_dim, self.map_major_dim),resample=filter_option)\n pixels = map_img.convert('RGB').load()\n for x in progress_bar.progress_bar(range(map_img.width), \"Processing: \",width=36):\n for y in range(map_img.height):\n r,g,b = pixels[x,y]\n fill_op.add_fill(x,y,palette.rgb_to_hex(r,g,b))\n return fill_op", "def write_filter_cache_scratch(filter_cache, cache_dir=None, skip_keys=None):\n if skip_keys is None:\n skip_keys = []\n # if the keys_before instantiation wasn't a list, then\n # keys_before would just be the current keys of cache and we\n # wouldn't have any new keys.\n new_filters = {k: filter_cache[k] for k in filter_cache if k not in skip_keys}\n if len(new_filters) > 0:\n # generate new file name\n if cache_dir is None:\n cache_dir = os.getcwd()\n cache_file_name = '%032x' % random.getrandbits(128) + '.filter_cache'\n cfile = open(os.path.join(cache_dir, cache_file_name), 'ab')\n pickle.dump(new_filters, cfile)\n else:\n warnings.warn(\"No new keys provided. No cache file written.\")", "def clear_crossfilter2(self):\n print ('Trigger clear')\n self.query_dict = {}\n self.plot_data = None\n self.create_figure_new()\n layout_doc.children[4].children[1] = self.p", "def restore_speedsters(apps, schema_editor):\n\n Pokemon = apps.get_model(\"stats\", \"Pokemon\")\n Pokemon.objects.filter(id__in=[\"ZERAORA\", \"TALONFLAME\", \"ABSOL\", \"GENGAR\"]).update(category=\"SS\")", "def filterWithSITK(self):\n #research\n profbox()\n backgroundNode = slicer.app.layoutManager().sliceWidget(\"Red\").sliceLogic().GetBackgroundLayer().GetVolumeNode()\n backgroundNodeName = backgroundNode.GetName()\n backgroundImage = sitk.ReadImage( sitkUtils.GetSlicerITKReadWriteAddress( backgroundNodeName ) )\n filterImage = sitk.GradientMagnitudeRecursiveGaussian( backgroundImage, float(2) );\n del backgroundImage\n sitk.WriteImage( filterImage, sitkUtils.GetSlicerITKReadWriteAddress( backgroundNodeName ) )\n \n # notify\n backgroundNode.GetImageData().Modified()\n backgroundNode.Modified()", "def use_effect(effect, photo_edit):\n if effect in FILTERS:\n photo = Image.open(photo_edit.upload)\n photo = photo.filter(FILTERS.get(effect))\n\n photo.save(photo_edit.upload.url[1:])", "def replicate(self, filter_state: FilterResult) -> \"FilterAlgorithmState\":\n\n return FilterAlgorithmState(filter_state)", "def swarpfilter(d, dir, directory, images, keys, filter, lamp, camera, done, output, type):\n filt = images.files_filtered(FWINAME=filter, FLSPECTR=lamp, CAMNAME=camera, HISTORY=done)\n files = [d + x for x in filt.tolist()]\n print(files)\n if files:\n swarp(files, output=directory + '/' + output + '.fits', celestial_type=type)", "def setSelectionfilter(self, scenefilter):\n self._selectionFilter = scenefilter\n sceneviewerfilter = self._sceneviewer.getScenefilter()\n if self._selectionFilter is not None:\n scenefiltermodule = self._context.getScenefiltermodule()\n scenefilter = scenefiltermodule.createScenefilterOperatorAnd()\n scenefilter.appendOperand(sceneviewerfilter)\n if self._selectionFilter is not None:\n scenefilter.appendOperand(self._selectionFilter)\n else:\n scenefilter = sceneviewerfilter\n self._scenepicker.setScenefilter(scenefilter)", "def filter_clear(client, args):\n client.context.set_query([])", "def apply_filters(\n isovar_result,\n filter_thresholds={},\n filter_flags=[]):\n filter_values = OrderedDict(isovar_result.filter_values.items())\n new_filter_values = evaluate_filters(\n isovar_result,\n filter_thresholds=filter_thresholds,\n filter_flags=filter_flags)\n filter_values.update(new_filter_values)\n return isovar_result.clone_with_updates(filter_values=filter_values)", "def remove_crds_filter(self, filter):\n if filter in self.filters:\n self.filters.remove(filter)", "def reset_data():\n shutil.copy2(\n 'data/one_producer_many_consumers.ORIG.json',\n 'data/one_producer_many_consumers.json'\n )", "def bfScandump(self, key, iter):\n params = [key, iter]\n \n return self.execute_command(self.BF_SCANDUMP, *params)", "def clear_crossfilter1(self):\n print ('Trigger clear')\n self.query_dict = {}\n self.plot_data = None\n self.create_figure_new()\n layout_doc.children[4].children[0] = self.p", "def update_filter_data(cache_dir=CACHE_DIR):\n # Obtain all filter IDs from cache as old_filters\n old_index = load_local_filters_index(cache_dir)\n old_filters = np.array(old_index)\n\n # Obtain all filter IDs from SVO FPS as new_filters\n logger.info(\"Fetching latest index of all filters at SVO (in batches) ...\")\n download_svo_filters_index(cache_dir)\n new_index = load_svo_filters_index(cache_dir)\n new_filters = new_index[\"filterID\"].to_numpy()\n\n # Check whether there is need to update\n if np.array_equal(old_filters, new_filters):\n logger.info('Filter data is already up-to-date!')\n set_cache_updation_date()\n return False\n\n # Iterate & remove (old_filters - new_filters) from cache\n filters_to_remove = np.setdiff1d(old_filters, new_filters)\n logger.info(\"Removing outdated filters ...\")\n for filter_id in filters_to_remove:\n facility, instrument, filter_name = re.split('/|\\.', filter_id)\n filter_file = os.path.join(cache_dir, facility, instrument,\n '{0}.vot'.format(filter_name))\n if os.path.exists(filter_file):\n os.remove(filter_file)\n remove_empty_dirs(cache_dir)\n\n # Iterate & download (new_filters - old_filters) into cache\n filters_to_add = np.setdiff1d(new_filters, old_filters)\n logger.info(\"Caching new filters ...\")\n iterative_download_transmission_data(filters_to_add, cache_dir)\n\n # Save in config that all filters were updated successfully\n set_cache_updation_date()\n return True", "def removeAutoSaveDeleteFilter(filter):", "def filter(data_raw: dict, sigma: int=1) -> dict:\n data = Filter.__band_filter(data_raw, lowFreq=2, highFreq=70, filterType='bandstop')\n data = Filter.__laplacian_filter(data,sigma) #Need to write test for this once its complete\n return data", "def copy(self) -> \"FilterAlgorithmState\":\n\n # NB: This is untested and might not be optimal tbh\n return deepcopy(self)", "def filter(self, filter_dict):\n pass", "def filterWithSITK(self):\r\n # research\r\n profbox()\r\n backgroundNode = slicer.app.layoutManager().sliceWidget(\"Red\").sliceLogic().GetBackgroundLayer().GetVolumeNode()\r\n backgroundNodeName = backgroundNode.GetName()\r\n backgroundImage = sitk.ReadImage(sitkUtils.GetSlicerITKReadWriteAddress(backgroundNodeName))\r\n filterImage = sitk.GradientMagnitudeRecursiveGaussian(backgroundImage, float(2));\r\n del backgroundImage\r\n sitk.WriteImage(filterImage, sitkUtils.GetSlicerITKReadWriteAddress(backgroundNodeName))\r\n\r\n # notify\r\n backgroundNode.GetImageData().Modified()\r\n backgroundNode.Modified()", "def reset(self):\n self.current_exposure = None\n self.scores = {}", "def apply_filter(self, image):\n pass", "def _backup_and_load_cache(self):\n for group in self.optimizer.param_groups:\n for p in group['params']:\n param_state = self.state[p]\n param_state['backup_params'] = torch.zeros_like(p.data)\n param_state['backup_params'].copy_(p.data)\n p.data.copy_(param_state['cached_params'])", "def _backup_and_load_cache(self):\n for group in self.optimizer.param_groups:\n for p in group['params']:\n param_state = self.state[p]\n param_state['backup_params'] = torch.zeros_like(p.data)\n param_state['backup_params'].copy_(p.data)\n p.data.copy_(param_state['cached_params'])", "def get_filters(self, saving):\n self.filter_entry_dict.clear()\n\n for entry, var in self.filter_entries_list:\n if (entry.get() != \"\") and (var.get() != \"\") and (not saving):\n self.filter_entry_dict[var.get()] = entry.get()\n elif saving and var.get() != \"\":\n self.filter_entry_dict[var.get()] = entry.get()", "def reset(self):\n for item in TextChannelFilterItem.objects(channel_filter=self):\n item.delete()\n self.reset_counters()\n self.retrain()", "def restore(self):\n raise NotImplementedError", "def pop_and_restore(hsh, key, default=None):\n if key in hsh:\n value = hsh.pop(key)\n was_there = True\n else:\n value = default\n was_there = False\n\n yield value\n\n if was_there:\n hsh[key] = value\n else:\n hsh.pop(key, None)", "def filter_keys(self):\n filters = self.args.keyfilter.split('.')\n self.logger.info(u'Filtering with:{f}'.format(f=filters))\n data = self.inputdata\n newdata = {}\n for key, value in data.items():\n self.logger.info(u'\\nProcessing Key:{k}'.format(k=key))\n returned_data = dict_key_filter(key, value, filters, self.logger)\n if bool(returned_data):\n newdata[key] = returned_data\n self.logger.info(u'Data After filter:{d}'.format(d=newdata))\n self.outputdata = newdata", "def remove_baseline(self):\n\n print(\" \\t Apply Savitzky-Golay filter \\t %d\" %self.nwin)\n base_savgol = signal.savgol_filter(self.input, self.nwin, 1)\n self.input_nobase = self.input - base_savgol", "def blurImage(self):\n\n print (\"--Blurring Main Image--\")\n self.blurButton.setDown(True)\n im = Image.open(self.ActivePhoto)\n blurred_image = im.filter(ImageFilter.GaussianBlur(1))\n blurred_image.save(self.ActivePhoto)\n self.photo.setPixmap(QtGui.QPixmap(self.ActivePhoto))", "def append_crds_filter(self, filter):\n if filter not in self.filters:\n self.filters = self.filters + [filter]", "def _restore_default(self):\n self._data = self._default", "def filters(self, value):\n if not isinstance(value, dict):\n raise TypeError(\"input must be a dictionary\")\n\n self._filters = value", "def resetFilter(self, column):\n if self.hasFilter(column):\n column_name = self._dataframe.columns[column]\n del self._filters[column_name]\n self._applyFilters()", "def restore(self, event):\n\n self.undo_add()\n\n key_list = list(self.patch.engine.misc_data.keys())\n key = key_list[self.selected_index]\n self.patch.misc[key] = copy.copy(self.patch.engine.misc[key])\n\n self.misclist_update_row(self.selected_index)\n self.update_properties()", "def rfilter(self,state0):\n ok,tchi2 = True,0.\n state = state0.copy()\n ks = range(len(self.nodes))\n ks.reverse() \n for k in ks:\n node = self.nodes[k]\n zrun = node.zrun\n ok, state,F,Q = self.model.propagate(state,zrun)\n if (not ok):\n warning(\"kfilter.rfilter not possible to rfilter \",zrun)\n debug(\"kfilter.rfilter ok,chi2 \",(ok,tchi2))\n return ok,tchi2\n node.Fr = F\n node.Qr = Q\n node.setstate('rpred',state)\n fstate,fchi2 = node.predict(state)\n node.setstate('rfilter',fstate)\n node.setchi2('rfilter',fchi2)\n tchi2+=fchi2\n self.status='rfilter'\n debug(\"kfilter.rfilter ok,chi2 \",(ok,tchi2))\n return ok,tchi2", "def restore(self, checkpoint):\n raise NotImplementedError", "def filter(self, op=GaussianFilter):\n\n if self._verbose > 0:\n print(\"Filtering...\")\n\n # Import from utils specified params.\n params = get_filtering_params()\n\n negative = self.image_raw - op(sigma=params['sigma_bgd']).convolve(self.image_raw)\n\n self.image_filtered = op(sigma=params['sigma_spots']).convolve(negative)", "def _restore(self, checkpoint):\n checkpoint_path = os.path.join(checkpoint, \"model_weights\")\n self.model.load_weights(checkpoint_path)", "def filter_SF(commande,indexSF,min,max):\n commande+=\" -set_active_sf \"+str(indexSF)+\" -filter_sf \"+str(min)+\" \"+str(max)+\" -save_clouds \" \n subprocess.call(commande)\n return", "def switch_off():\n app.redis.flushall()\n colour = [0, 0, 0]\n app.data = {\"colour\": colour, \"mode\": \"sector-diverge\"}\n return enqueue_and_return(app.data)", "def reset_state(self):\n for name in self._buffers:\n self._buffers[name] = self._defaults[name]", "def absorb_one(self, fname, snver=1):\n\n gain = Gains()\n gain.load(fname, snver=snver)\n\n # if no any gains in\n if self._absorbed_gains._data is None:\n self._absorbed_gains = gain\n else:\n self._absorbed_gains = self._absorbed_gains * gain\n\n self.fnames.append(fname)", "def apply(self,src,dst):\n cv2.filter2D(src,-1,self._kernel,dst) #The second argument specifies the per-channel depth of the destination image\n #(such as cv2.CV_8U for 8 bits per channel). A negative value (as used here) means\n #that the destination image has the same depth as the source image.", "def butter_filter(datalist):\n fs = 200.00\n fHigh = 50.00\n fLow = 5.00\n N=4\n [b,a]=sg.butter(N,[fLow/fs, fHigh/fs], btype='band')\n global filtered\n #IIR filter\n return sg.filtfilt(b,a,datalist)", "def filterToLight( bmp, savefile = '' ):\n for h in range(bmp.height):\n for w in range(bmp.width):\n HSL = RGBtoHSL( bmp.pixels[h][w] )\n lit = int(255*HSL[2]) # convert to 0-255 range\n bmp.pixels[h][w] = (lit,lit,lit)\n if( savefile != '' ):\n bmp.save(savefile)\n return bmp", "def restore_from_snapshot(SnapshotId=None):\n pass", "def restore(self, weights_file):\r\n\r\n self.model.load_weights(weights_file, by_name=True)", "def end_filter_selection(update, context):\n context.user_data['START_OVER'] = True\n start(update, context)\n\n logger.info(\"User [%s] selected to return to previous menu, [Main Menu], \"\n \"from [Add Filter / Show Filters]\",\n update.callback_query.message.chat.first_name)\n return SELECT_ACTION", "def restore_model(self, file_name: str, only_load_processor: bool = False):\n path = os.path.join(self.checkpoint_path, file_name)\n with open(path, 'rb') as f:\n restored_state = pickle.load(f)\n if only_load_processor:\n restored_params = _filter_processor(restored_state['params'])\n else:\n restored_params = restored_state['params']\n self.params = hk.data_structures.merge(self.params, restored_params)\n self.opt_state = restored_state['opt_state']", "def restore(effect: list, target: \"PlayerCharacter or Monster\"):\n heal = effect[1]\n if target.ko:\n return\n if \"ALL\" in effect or \"HP\" in effect:\n if target.hp + heal > target.stats[\"MAXHP\"]:\n target.hp = target.stats[\"MAXHP\"]\n else:\n target.hp += heal\n if \"ALL\" in effect or \"MP\" in effect:\n if target.mp + heal > target.stats[\"MAXMP\"]:\n target.mp = target.stats[\"MAXMP\"]\n else:\n target.mp += heal", "def truncateHairCache(*args, q=True, query=True, e=True, edit=True, **kwargs)->Union[None,\n Any]:\n pass", "def reset_instances_filter(self):\n page_instances = self.page_instances()\n page_instances.field_filter_instances.value = ''\n page_instances.button_filter_instances.click()", "def _restore(self, a_path):\n super(RDPAnalyzer, self)._restore(a_path)\n self._model._restore()" ]
[ "0.5884355", "0.57030326", "0.5571721", "0.5458497", "0.54263365", "0.5365855", "0.53208745", "0.5198635", "0.50720936", "0.5028035", "0.5008022", "0.49658895", "0.4942818", "0.4931641", "0.48837966", "0.4855681", "0.4792813", "0.47841853", "0.47665617", "0.4763505", "0.46963236", "0.4681879", "0.46758863", "0.46728206", "0.4636246", "0.46359146", "0.46218073", "0.4608314", "0.46040952", "0.46031442", "0.45764622", "0.45578352", "0.45403206", "0.45363146", "0.44983244", "0.4496909", "0.44776177", "0.4464035", "0.44611457", "0.44494566", "0.44390154", "0.443866", "0.4431457", "0.4419995", "0.44176483", "0.44130453", "0.43973163", "0.4394511", "0.43810648", "0.43804145", "0.4360482", "0.43593782", "0.43569866", "0.43519622", "0.43487653", "0.43483925", "0.43390208", "0.43272507", "0.43253598", "0.43229324", "0.4291433", "0.42824447", "0.4279059", "0.4274247", "0.42709488", "0.42672947", "0.42544788", "0.42539564", "0.42517987", "0.42517987", "0.42478645", "0.4247635", "0.42359567", "0.42359045", "0.42355904", "0.42288014", "0.4223813", "0.42216504", "0.42203397", "0.42162284", "0.42143035", "0.42114192", "0.42095688", "0.420546", "0.4193259", "0.41904306", "0.41845122", "0.41822898", "0.4179132", "0.41682848", "0.41675878", "0.4166475", "0.4165475", "0.41521862", "0.41520712", "0.41514018", "0.4148514", "0.41473305", "0.41459236", "0.41445833", "0.41341078" ]
0.0
-1
Returns capacity, size, number of filters, number of items inserted, and expansion rate.
def bfInfo(self, key): return self.execute_command(self.BF_INFO, key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def capacity(self):\n return sum(f.capacity for f in self.filters)", "def capacity(self):\n raise NotImplementedError()", "def capacity(self):\n return self.buffer_capacity.mean(dim=1)", "def Capacity(self) -> int:", "def get_capacity():\n fs.get_capacity()", "def capacity_used(self):\n raise NotImplementedError()", "def capacity(self):\n return self._capacity", "def capacity(self):\n return self._capacity", "def capacity(self):\n return self._cap", "def get_load_factor(self):\n # Your code here\n return self.total_items / self.capacity", "def get_num_slots(self):\n # Your code here\n return self.capacity", "def test_capacity_factor(pudl_out_eia):\n print(\"\\nCalculating generator capacity factors...\")\n cf = pudl_out_eia.capacity_factor()\n print(f\" capacity_factor: {len(cf)} records\")", "def knapsack(items, capacity):\r\n pass", "def __len__(self):\n return self.capacity", "def capacity(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"capacity\")", "def capacity(self):\r\n return len(self.frames)", "def compute_capacity(self, totalflow, i):\r\n self.capacity[i] = self.phi * self.flow[i] / (self.b[i] \\\r\n - self.beta*(self.flow[i]**self.theta) + np.log(1-totalflow) - np.log(self.flow[i]))", "def total_sdram_requirements(self):", "def capacity(self):\n capacity = {}\n resources = self.nodes[0].capacity.keys()\n for r in resources:\n values = [n.capacity[r] for n in self.nodes]\n capacity[r] = mean(values) if len(values) > 0 else 0.0\n return capacity", "def getCapacity(self):\n\n return self._dfa._capacity", "def available_capacity(self):\r\n return self.capacity - len(self.passengers)", "def _get_free_capacity(self):\n\n capacity = np.ones(len(self.grid.T)) * len(self.grid)\n capacity -= np.count_nonzero(self.grid, axis=0)\n return capacity", "def size(self):\r\n if self.full():\r\n return self.capacity()\r\n else:\r\n size = self._read_index - self._write_index\r\n if size < 0:\r\n return self.capacity() + size # wrap around\r\n else:\r\n return size", "def capacity(self):\n return str(int(self._properties.get('capacity')) * 1073741824)", "def size(self):\n return self.num_item", "def get_size(self):\n return (\n sys.getsizeof(self.children) +\n sys.getsizeof(self.parent) +\n sys.getsizeof(self.dataset_id) +\n sys.getsizeof(self.k) +\n self.filter.get_size()\n )", "def capacity(self):\n return self._ndef_file_size - 2", "def get_load_factor(self):\r\n return self.num_items / self.table_size", "def fsizes(self):\n return self._cache.fsizes", "def itemsize(self):\n return self.initial_value.itemsize", "def __init__(self, capacity, initial):\n\t\tself.capacity = capacity\n\t\tself.amount = initial", "def size(self):\n return self.N # Number of items in the stack", "def __init__(self, capacity, units=0):\n self.capacity = capacity\n self.units = units", "def bandwidth(self):\n self._filter()\n return 1. * self._aggregate / self._window", "def __init__(self, capacity=100):\n \n self.capacity = capacity\n self.size = 0\n self._keys = []\n self._entry = [[] for _ in range(capacity)]", "def capacity(self) -> Capacity:\n raw = self._call('GET', 'capacity')\n return Capacity.parse_raw(raw)", "def _numQueuedTotal(self):\n queueSize = len(self.__queue) + len(self.__clientQueue)\n return queueSize", "def usedspace(self):\n self.log.info(\"freespace\")\n nbytes = 0\n keys = list(self.downloads.keys())\n keys.sort()\n for key in keys:\n download = self.downloads[key]\n nbytes += download['size']\n self.log.info(\"returning:\" + str(nbytes))\n return nbytes", "def __init__(self, capacity=4):\n self.capacity = capacity\n self.size = 0\n self.table = [None] * capacity", "def _load_factor(self):\n return self.size / len(self.buckets)", "def return_item_collection_metrics_size(self):\n return self.__return_item_collection_metrics.size()", "def __len__(self):\n return sum(f.count for f in self.filters)", "def qsize(self) -> int:\n pass", "def size(self):\n return self._N", "def size(self) -> int:\n return self.num_items", "def __init__(self, capacity, fillValue=None):\r\n self._items = list()\r\n self._logicalSize = 0\r\n # Track the capacity and fill value for adjustments later\r\n self._capacity = capacity\r\n self._fillValue = fillValue\r\n for count in range(capacity):\r\n self._items.append(fillValue)", "def fs_percent_used_capacity(self):\n return self._fs_percent_used_capacity", "def __init__(self, capacity):\n self.capacity = capacity # Number of buckets in the hash table\n self.storage = [None] * capacity\n self.key_count = 0", "def CacheCapacity(self):\n if self.force_auto_sync:\n self.get('CacheCapacity')\n return self._CacheCapacity", "def test_bound_size_of_output_queue_size_reader(synthetic_dataset):\n TIME_TO_GET_TO_STATIONARY_STATE = 0.5\n\n with make_reader(synthetic_dataset.url, reader_pool_type='process', workers_count=1) as reader:\n assert 0 == reader.diagnostics['items_produced']\n next(reader)\n # Verify that we did not consume all rowgroups (should be 10) and ventilator throttles number of ventilated\n # items\n sleep(TIME_TO_GET_TO_STATIONARY_STATE)\n assert reader.diagnostics['items_consumed'] < 5\n assert reader.diagnostics['items_inprocess'] < 5", "def __init__(self, capacity=2):\r\n self._capacity = capacity\r\n self._data = [0] * self._capacity\r\n self._size = 0", "def get_num_items(self):\r\n return self.num_items", "def numberConsumed(self):\n\n\t\treturn len([bottle for bottle in self.bottles if bottle.consumption != None])", "def _grow(self):\n self.capacity *= self.factor\n temp = [None] * self.capacity\n for i in range(self.size):\n temp[i] = self.store[i]\n self.store = temp", "def __init__(self, capacity, fillValue = None):\n \n self._items = list() \n self._fillValue = fillValue\n self._DEFAULT_CAPACITY = capacity\n self._logicalSize = 0 #as required by exercise 1\n \n \n for count in xrange(capacity):\n self._items.append(self._fillValue)", "def capacity(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"capacity\")", "def capacity(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"capacity\")", "def capacity(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"capacity\")", "def capacity(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"capacity\")", "def capacity(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"capacity\")", "def capacity(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"capacity\")", "def capacity(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"capacity\")", "def totalsize(self):\n return sum([sz for sz in self.iterate()])", "def get_size(self):\n cum_size = 0\n for stream in self.__streams.values():\n cum_size += sys.getsizeof(stream)\n for trace in stream:\n cum_size += sys.getsizeof(trace)\n cum_size += sys.getsizeof(trace.stats)\n cum_size += sys.getsizeof(trace.stats.__dict__)\n cum_size += sys.getsizeof(trace.data)\n cum_size += trace.data.nbytes\n # Add one percent buffer just in case.\n return cum_size * 1.01", "def weighted_capacity(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"weighted_capacity\")", "def itemsize(self):\n return self.dtype.base.itemsize", "def itemsize(self):\n return self.dtype.base.itemsize", "def itemsize(self):\n return self.dtype.base.itemsize", "def itemsize(self):\n return self.dtype.base.itemsize", "def consumed_spice_capacity_in_bytes(self) -> pulumi.Output[float]:\n return pulumi.get(self, \"consumed_spice_capacity_in_bytes\")", "def size(self):\n\t\treturn self._count", "def get_capacity_var(self):\n return self._capacity_var", "def stats(self):\n nqbits = self.operator.num_qubits", "def get_capacity_param(self):\n intr = self.get_interaction()\n return intr.get_capacity(None, None, None, None, raw=True)", "def memory_usage(self):\n\n def multiply_iter(iterable):\n res = 1\n for x in iterable:\n res *= x\n return res\n\n def add_params(parameter):\n res = 0\n for x in parameter:\n res += multiply_iter(x.shape)\n return res\n\n feat = add_params(self.features.parameters())\n clsf = add_params(self.classifier.parameters())\n total = feat + clsf\n\n mb_f = 4 / 1024 ** 2\n\n print(\"Conv : {0}\".format(feat))\n print(\"FC : {0}\".format(clsf))\n print(\"-----------------\")\n print(\"Total : {0}\".format(total))\n print(\"Memory : {0:.2f}MB\".format(total * mb_f))\n print(\"\")", "def inventoryCapacity(self):\n # TODO: Worry about how +Strength and +Capacity gear could allow you to carry more than your capacity.\n if self.totalStrength <= 15:\n return int(6 * self.totalStrength + self._baseInventoryCapacity + self._equipmentCarryingCapacity)\n else:\n return int(90 + (self.totalStrength - 15) * 9 + self._baseInventoryCapacity + self._equipmentCarryingCapacity)", "def get_space_used():\n fs.get_space_used()", "def size(self): \n return self.qSize", "def capacity(self) -> Optional[int]:\n return pulumi.get(self, \"capacity\")", "def capacity(self) -> Optional[int]:\n return pulumi.get(self, \"capacity\")", "def capacity(self) -> Optional[int]:\n return pulumi.get(self, \"capacity\")", "def capacity(self) -> Optional[int]:\n return pulumi.get(self, \"capacity\")", "def capacity_step_size_gb(self) -> str:\n return pulumi.get(self, \"capacity_step_size_gb\")", "def __len__(self):\n return self._used - self._deleted", "def get_space_used():\n files = jobtracker.query(\"SELECT * FROM files \" \\\n \"WHERE status IN ('added', 'downloaded', 'unverified')\")\n\n total_size = 0\n for file in files:\n total_size += int(file['size'])\n return total_size", "def size(self): #returns the size or number of items in the stack\n if self.is_empty():\n return 0\n else:\n return self.num_items", "def update_size(self):\n return 3 + self.memory_unit_size", "def __init__(self):\n INIT_CAPACITY = 8\n LOAD_FACTOR = 2 / 3\n self.capacity = INIT_CAPACITY\n self.size = 0\n self.slots = [None] * INIT_CAPACITY\n self.load_factor = LOAD_FACTOR", "def get_capacity(self, meta, raven_vars, dispatch, t, raw=False):\n return self.get_interaction().get_capacity(meta, raven_vars, dispatch, t, raw=raw)", "def overall_reduction(self):\n return 84", "def __init__(self, capacity):\n self.capacity = capacity #this is example for list implementation\n self.head = [None] * capacity #this is example for list implementation\n self.num_items = 0 #this is example for list implementation", "def load_factor(self):\n return self.__num_records / self.__size", "def get_additional_ball_capacity(self):\n return 999", "def trace_buffer_capacity(self):\n cmd = enums.JLinkTraceCommand.GET_CONF_CAPACITY\n data = ctypes.c_uint32(0)\n res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n if (res == 1):\n raise errors.JLinkException('Failed to get trace buffer size.')\n return data.value", "def available_sizes(self, mode='normal', state='on'):\n raise NotImplementedError", "def spill_size(self):\n return self._spill_size", "def calculate_cache_size(self):\n cache_size = self._total_chunk_size_left()\n N_l = self.N_l // self.conv_factor\n cache_sizes = []\n for lth in range(self.n_layers):\n cache_sizes.append(cache_size)\n if self.lc_bidir:\n cache_size = max(0, cache_size - N_l)\n N_l //= self.subsample_factors[lth]\n cache_size //= self.subsample_factors[lth]\n return cache_sizes", "def search_space_size(self):", "def items_num(self):\n\t\treturn len(self.items)", "def items_num(self):\n\t\treturn len(self.items)", "def size(self):\n return len(self._queue_items)" ]
[ "0.75668544", "0.6732171", "0.6672159", "0.6671824", "0.6568066", "0.6558056", "0.64776075", "0.6394633", "0.62894094", "0.6270864", "0.6231363", "0.6148819", "0.6145135", "0.6037437", "0.6036127", "0.59975326", "0.5950704", "0.59222466", "0.59161794", "0.5884257", "0.5837646", "0.5831668", "0.57741886", "0.5770942", "0.57362926", "0.571444", "0.57008183", "0.5695673", "0.56929874", "0.56813246", "0.5659317", "0.56578827", "0.5650556", "0.5585062", "0.5582291", "0.5581234", "0.55769175", "0.5571557", "0.5560189", "0.5559186", "0.55580413", "0.55467963", "0.5541247", "0.5523646", "0.55194396", "0.5509775", "0.549826", "0.5482996", "0.54808754", "0.5478942", "0.5477677", "0.5477555", "0.5473201", "0.5458015", "0.54551697", "0.5453125", "0.5453125", "0.5453125", "0.5453125", "0.5453125", "0.5453125", "0.5453125", "0.54478544", "0.54195416", "0.5417873", "0.54090893", "0.54090893", "0.54090893", "0.54090893", "0.5403726", "0.5377591", "0.5375748", "0.53672624", "0.5365633", "0.53635377", "0.53617185", "0.5356588", "0.53482455", "0.5344981", "0.5344981", "0.5344981", "0.5344981", "0.5344293", "0.5343975", "0.5343506", "0.53430957", "0.5341137", "0.53382105", "0.53314495", "0.5329013", "0.53155893", "0.5311137", "0.5310114", "0.53094816", "0.5307613", "0.53035617", "0.53033465", "0.53006583", "0.5296715", "0.5296715", "0.5293226" ]
0.0
-1
Creates a new Cuckoo Filter ``key`` an initial ``capacity`` items.
def cfCreate(self, key, capacity, expansion=None, bucket_size=None, max_iterations=None): params = [key, capacity] self.appendExpansion(params, expansion) self.appendBucketSize(params, bucket_size) self.appendMaxIterations(params, max_iterations) return self.execute_command(self.CF_RESERVE, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, capacity=100):\n \n self.capacity = capacity\n self.size = 0\n self._keys = []\n self._entry = [[] for _ in range(capacity)]", "def __init__(self, capacity):\n self.capacity = capacity # Number of buckets in the hash table\n self.storage = [None] * capacity\n self.key_count = 0", "def bfCreate(self, key, errorRate, capacity, expansion=None, noScale=None):\n params = [key, errorRate, capacity]\n self.appendExpansion(params, expansion)\n self.appendNoScale(params, noScale)\n\n return self.execute_command(self.BF_RESERVE, *params)", "def __init__(self, capacity, fillValue=None):\r\n self._items = list()\r\n for count in range(capacity):\r\n self._items.append(fillValue)", "def __init__(self, capacity, fillValue = None):\n \n self._items = list() \n self._fillValue = fillValue\n self._DEFAULT_CAPACITY = capacity\n self._logicalSize = 0 #as required by exercise 1\n \n \n for count in xrange(capacity):\n self._items.append(self._fillValue)", "def __init__(self, capacity, initial):\n\t\tself.capacity = capacity\n\t\tself.amount = initial", "def __init__(self, capacity, fillValue=None):\r\n self._items = list()\r\n self._logicalSize = 0\r\n # Track the capacity and fill value for adjustments later\r\n self._capacity = capacity\r\n self._fillValue = fillValue\r\n for count in range(capacity):\r\n self._items.append(fillValue)", "def __init__(self, key):\n self.key = key\n self.BLOCK_SIZE = 16", "def __init__(self, key):\n\n def keys(key, num_rounds):\n \"\"\"Yields the permuted key bitstring for i = 1..num_rounds\"\"\"\n C, D = key[:28], key[28:]\n # Rounds are 1-indexed, so shift array over by one\n left_shifts = [None, 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]\n for i in range(1, num_rounds + 1):\n # Negate each rotation to rotate left.\n C, D = rotate(C, -left_shifts[i]), rotate(D, -left_shifts[i])\n yield self.permute(C + D, self._CD_permutation)\n\n self.key = list(bits_of(key, 64))\n # Permute the key. The permutation discards the parity bits...\n self.key = self.permute(self.key, self._key_permutation)\n self.number_of_rounds = 16\n # A list of the 16 keys K1 .. K16, shifted over by one to allow 1-indexing.\n self.keys = [None] + list(keys(self.key, self.number_of_rounds))", "def __init__(self, capacity, units=0):\n self.capacity = capacity\n self.units = units", "def __init__(self, key=None):\n self.key = key", "def __init__(self, k: int):\r\n self.capacity = k\r\n self.frontIndex = 0\r\n self.lastIndex = 1\r\n self.deque = [0] * self.capacity\r\n self.size = 0 # current size\r", "def __init__(self, key):\n self.key = key", "def new_key(self, key_name=None):\r\n return self.key_class(self, key_name)", "def __init__(self, key: bytearray):\n self.__key = key\n self.__KSA(bytearray([i for i in range(256)]))", "def __init__(self, key, default=NOT_GIVEN):\n self.key = adapt(key,IComponentKey)\n self.default = default", "def __init__(self, knapsack_size, items):\n self.knapsack_size = knapsack_size\n self.items = items\n self._cache = dict()\n # fill-in the cache with base cases' (subproblems') solutions\n for size in range(knapsack_size + 1):\n # if there are no items, the max value is 0\n self._cache[(0, size)] = 0\n for end in range(len(items) + 1):\n # if the knapsack's size is 0 no items fit, the max value is 0\n self._cache[(end, 0)] = 0", "def __init__(self, capacity, operation, neutral_element):\n assert capacity > 0 and capacity & (capacity - 1) == 0, \"capacity must be positive and a power of 2.\"\n self._capacity = capacity\n self._value = [neutral_element for _ in range(2 * capacity)]\n self._operation = operation", "def __init__(self, capacity, operation, neutral_element):\n assert capacity > 0 and capacity & (capacity - 1) == 0, \"capacity must be positive and a power of 2.\"\n self._capacity = capacity\n self._value = [neutral_element for _ in range(2 * capacity)]\n self._operation = operation", "def __init__(self, capacity, operation, neutral_element):\n assert capacity > 0 and capacity & (capacity - 1) == 0, \"capacity must be positive and a power of 2.\"\n self._capacity = capacity\n self._value = [neutral_element for _ in range(2 * capacity)]\n self._operation = operation", "def __init__(self, k: int):\n self.capacity = k\n self.head = None\n self.tail = None\n self.count = 0", "def __init__(self, capacity, operation, neutral_element):\n assert (\n capacity > 0 and capacity & (capacity - 1) == 0\n ), \"capacity must be positive and a power of 2.\"\n self._capacity = capacity\n self._value = [neutral_element for _ in range(2 * capacity)]\n self._operation = operation\n self.neutral_element = neutral_element", "def __init__(self, server, bfkeypreffix, capacity, error_rate=0.001):\n if not (0 < error_rate < 1):\n raise ValueError(\"Error_Rate must be between 0 and 1.\")\n if not capacity > 0:\n raise ValueError(\"Capacity must be > 0\")\n # given M = num_bits, k = num_slices, P = error_rate, n = capacity\n # k = log2(1/P)\n # solving for m = bits_per_slice\n # n ~= M * ((ln(2) ** 2) / abs(ln(P)))\n # n ~= (k * m) * ((ln(2) ** 2) / abs(ln(P)))\n # m ~= n * abs(ln(P)) / (k * (ln(2) ** 2))\n num_slices = int(math.ceil(math.log(1.0 / error_rate, 2)))\n bits_per_slice = int(math.ceil(\n (capacity * abs(math.log(error_rate))) /\n (num_slices * (math.log(2) ** 2))))\n if bits_per_slice > MAX_PER_SLICE_SIZE:\n raise ValueError(\"Capacity and error_rate make per slice size extended, MAX_PER_SLICE_SIZE is %s\" % (MAX_PER_SLICE_SIZE))\n self._setup(error_rate, num_slices, bits_per_slice, capacity, 0, server, bfkeypreffix)", "def __init__(self, capacity=4):\n self.capacity = capacity\n self.size = 0\n self.table = [None] * capacity", "def knapsack(items, capacity):\r\n pass", "def __init__(self, capacity=2):\r\n self._capacity = capacity\r\n self._data = [0] * self._capacity\r\n self._size = 0", "def __init__(__self__, *,\n capacity: Optional[int] = None,\n name: Optional[str] = None):\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if name is not None:\n pulumi.set(__self__, \"name\", name)", "def __init__(self, key):\n self.bs = 16\n self.key = hashlib.sha256(key.encode()).digest()", "def __init__(self, k, num_buckets, fp_size, bucket_size, max_iter):\n self.children: List[Node] = []\n self.parent: Optional[Node] = None\n self.filter = CuckooFilterBit(num_buckets, fp_size, bucket_size, max_iter)\n\n self.dataset_id: Optional[str] = None\n self.k = k", "def __init__(self, key):\n self.key = [int_mapping(k) for k in key]", "def __init__(self, k):\n self.capacity = k\n self.head = None\n self.tail = None\n self.count = 0", "def __init__(self, key):\n self.block_size = 16\n self.cipher = Cipher(algorithms.AES(key), modes.ECB(), default_backend())", "def __init__(\n self, capacity: int, operation: Any, neutral_element: Optional[Any] = None\n ):\n\n assert (\n capacity > 0 and capacity & (capacity - 1) == 0\n ), \"Capacity must be positive and a power of 2!\"\n self.capacity = capacity\n if neutral_element is None:\n neutral_element = (\n 0.0\n if operation is operator.add\n else float(\"-inf\")\n if operation is max\n else float(\"inf\")\n )\n self.neutral_element = neutral_element\n self.value = [self.neutral_element for _ in range(2 * capacity)]\n self.operation = operation", "def __init__(self,key):\n self.block_size = 32\n self.key = hashlib.sha256(key).digest()", "def _newKey(self, key):\n pass", "def __init__(self):\n self.hashmap = [[] for _ in range(self._cap)]", "def __init__(self, key):\n self._block_size = AES.block_size\n self._key = hashlib.sha256(get_as_bytes(key)).digest()", "def __init__(self, capacity):\n self.capacity = capacity\n self.map = {}\n self.head = self.Node(0, 0)\n self.tail = self.Node(0, 0)\n self.head.next = self.tail\n self.tail.pre = self.head\n self.cnt = 0", "def __init__(self, capacity: int, function) -> None:\n self.buckets = DynamicArray()\n for _ in range(capacity):\n self.buckets.append(LinkedList())\n self.capacity = capacity\n self.hash_function = function\n self.size = 0", "def __init__(self, key):\n if len(key) > KEY_SIZE:\n raise ParameterError(\"Key must be <%d bytes\" % (KEY_SIZE))\n\n self.key = key.ljust(KEY_SIZE, b\"\\xff\")\n self.encryptIV = b\"\\xff\" * BLOCK_SIZE\n self.decryptIV = b\"\\xff\" * BLOCK_SIZE\n self.remainingData = b\"\"\n self.oldDecrypt = b\"\"", "def __init__(self, key):\n try:\n # Python 2\n if not isinstance(key, (unicode, str)):\n raise TypeError('key is not of type unicode or str.')\n except NameError:\n # Python 3\n if not isinstance(key, str):\n raise TypeError('key is not of type str.')\n\n self.key = key\n\n logger.debug('WU(key: %s)', self.key)", "def cfInsert(self, key, items, capacity=None, nocreate=None):\n params = [key]\n self.appendCapacity(params, capacity)\n self.appendNoCreate(params, nocreate)\n self.appendItems(params, items)\n\n return self.execute_command(self.CF_INSERT, *params)", "def __init__(self, k: int):\n self.queue = [0]*k\n self.headIndex = 0\n self.count = 0\n self.capacity = k", "def __init__(\n self,\n key, # type: Key\n exclude_from_indexes=() # type: Iterable[str]\n ):\n self.key = key\n self.exclude_from_indexes = set(exclude_from_indexes)\n self.properties = {}", "def __init__(self, key, value):\n self.key = key\n self.value = value", "def __init__(self, k: int):\n self.front = 0\n self.rear = 0\n self.capacity = k + 1\n self.arr = [0 for _ in range(self.capacity)]", "def new(key,mode=MODE_ECB,IV=None,counter=None,segment_size=None):\n return AES(key,mode,IV,counter,segment_size)", "def __init__(__self__, *,\n capacity: Optional[pulumi.Input[int]] = None,\n family: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None):\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if family is not None:\n pulumi.set(__self__, \"family\", family)\n if name is not None:\n pulumi.set(__self__, \"name\", name)", "def __init__(self, aKey):\n self.key = aKey\n\n # CRC can be used to validate a key (very roughly)\n # if you store the CRC from a previous keyword\n # and then compare with a newly generated one and\n # they are the same then chances are the keyword\n # is correct - only a single byte so not that reliable\n self.crc = 0 \n for x in self.key:\n intX = ord(x)\n self.crc = self.crc ^ intX", "def __init__(self, k):\n self.queue = [0]*k\n self.headIndex = 0\n self.count = 0\n self.capacity = k", "def __init__(__self__, *,\n capacity: Optional[pulumi.Input[int]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tier: Optional[pulumi.Input[str]] = None):\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)", "def __init__(self, capacity: int):\n self._pax_with_carry_on = PaxStack()\n self._pax_without_carry_on = PaxStack()\n self._capacity = capacity\n self._current_pax = 0", "def __init__(self, iterable_input, batch_size, buckets, pad_index, only_full=False, field=None,\n shuffle=False, buffer_size=None, name='Bucket Batch', verbose=True):\n super().__init__(iterable_input=iterable_input, name=name, verbose=verbose)\n self.batch_size = batch_size\n self.buckets = buckets\n self.max_length = buckets[-1]\n self.pad_index = pad_index\n self.only_full = only_full\n self.field = field\n self.shuffle = shuffle\n self.buffer_size = self.batch_size if buffer_size is None else buffer_size", "def __init__(self):\n # better to be a prime number, less collision\n self.key_space = 2069\n self.hash_table = [Bucket() for i in range(self.key_space)]", "def __init__(self):\n self.bucket_of_keys = {}\n self.buckets = LinkedList()", "def __init__(self):\n self.capacity = 10000\n self.table = [[] for _ in range(self.capacity)]", "def __init__(self, key, value, *args, **options):\n\n super().__init__()\n\n self._created_on = time.time() * 1000\n self._key = key\n self._value = self._prepare_cache(value)", "def clamp(self, key):\n\t\treturn DiscreteDistribution({ k : 0. if k != key else 1. for k in self.keys() })", "def initialize(self):\n self.keys = [None] * BUCKET_SIZE\n self.values = [None] * BUCKET_SIZE", "def __init__(__self__,\n resource_name: str,\n args: CryptoKeyArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(self, key, value):\n self._key = key\n self._value = value", "def __init__(self, capacity=10):\n\n self._board = [None] * capacity # list of 10 None elements\n self._n = 0 # number of actual entries", "def __init__(self, key):\n Base.__init__(self, key)", "def __init__(self, key):\n Base.__init__(self, key)", "def __init__(self, items, ratings, features, K):\n\n self.items = items\n self.ratings = ratings\n self.features = features\n self.K = K", "def __init__(__self__, *,\n bucket: pulumi.Input[str],\n key: pulumi.Input[str]):\n pulumi.set(__self__, \"bucket\", bucket)\n pulumi.set(__self__, \"key\", key)", "def __init__(self, key: bytes):\n\n if len(key) != 32:\n raise ValueError('Key must be 32 bytes long')\n self.key = key", "def __init__(self, items: List[T], min_freq: int = 1):\n counter_ = Counter(items)\n unique_items = [x for x, freq in counter_.items() if freq >= min_freq]\n self._dict = {item: i + 1 for i, item in enumerate(unique_items)}\n self._items: List[Union[str, T]] = [\"UNK\"]\n self._items.extend(unique_items)", "def __init__(self):\n self.capacity = 1000\n self.data = [None]*self.capcity", "def new(num_buckets=256):\n aMap=[]", "def new(num_buckets=256):\n\taMap = [] #creating empty list aMap\n\tfor i in range(0, num_buckets):\n\t\taMap.append([]) #append num_buckets into aMap\n\treturn aMap", "def __init__(self, k: int):\n self.queue = [0]*k\n self.headIndex = 0\n self.count = 0\n self.capacity = k\n # the additional attribute to protect the access of our queue\n self.queueLock = Lock()", "def __init__(__self__, *,\n name: pulumi.Input[Union[str, 'SkuType']],\n capacity: Optional[pulumi.Input[int]] = None):\n pulumi.set(__self__, \"name\", name)\n if capacity is None:\n capacity = 1\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)", "def __init__(self):\n self._key = ''", "def __init__(__self__, *,\n capacity: Optional[pulumi.Input[int]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tier: Optional[pulumi.Input[str]] = None):\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if name is None:\n name = 'S0'\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if tier is None:\n tier = 'Standard'\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)", "def create_capacity_limiter(total_tokens: float) -> abc.CapacityLimiter:\n return get_asynclib().CapacityLimiter(total_tokens)", "def new_key(self, key_name=None, key_type=Key.KEY_REGULAR_FILE):\r\n if key_name == '-':\r\n return Key(self.name, '-', key_type=Key.KEY_STREAM_WRITABLE)\r\n else:\r\n dir_name = os.path.dirname(key_name)\r\n if dir_name and not os.path.exists(dir_name):\r\n os.makedirs(dir_name)\r\n fp = open(key_name, 'wb')\r\n return Key(self.name, key_name, fp)", "def new(num_buckets=256):\n\t#sets aMap variable to an empty list\n\t#then fills that list with the specified number of other empty lists ('buckets') \n\t#returns the new aMap\n\taMap = []\n\tfor i in range(0, num_buckets):\n\t\taMap.append([])\n\treturn aMap", "def new_simple(self, key, value):\n\n s = self._new_simple()\n s.key = key\n s.value = value\n return s", "def __init__(self, key=\"\", itype=INDEX_TYPE_ALP, level=1):\n self.key = key\n self.type = itype\n self.level = level", "def new(num_buckets=256):#用空列表初始化字典\n\taMap=[]\n\tfor i in range(num_buckets):\n\t\taMap.append([])\n\treturn aMap", "def __init__(__self__, *,\n key_data: pulumi.Input[str]):\n pulumi.set(__self__, \"key_data\", key_data)", "def __init__(self):\n super(KeyIterator, self).__init__()\n self.iterator = self.ValueIterator()", "def init_capacities(self, G):\n\n if self.capacity_function is not None:\n # Initialize all the capacities, either pass topo object or don't depending on signature\n if len(inspect.signature(self.capacity_function).parameters) == 2:\n for (u, v) in G.edges:\n G.edges[u, v]['capacity'] = self.capacity_function(u, v)\n elif len(inspect.signature(self.capacity_function).parameters) == 3:\n for (u, v) in G.edges:\n G.edges[u, v]['capacity'] = self.capacity_function(u, v, self)\n return G", "def __init__(__self__, *,\n name: str,\n capacity: Optional[int] = None,\n family: Optional[str] = None,\n size: Optional[str] = None,\n tier: Optional[str] = None):\n pulumi.set(__self__, \"name\", name)\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if family is not None:\n pulumi.set(__self__, \"family\", family)\n if size is not None:\n pulumi.set(__self__, \"size\", size)\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)", "def __init__(__self__, *,\n name: str,\n capacity: Optional[int] = None,\n family: Optional[str] = None,\n size: Optional[str] = None,\n tier: Optional[str] = None):\n pulumi.set(__self__, \"name\", name)\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if family is not None:\n pulumi.set(__self__, \"family\", family)\n if size is not None:\n pulumi.set(__self__, \"size\", size)\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)", "def __init__(self):\n self.size = 1000\n self.bucket = [None] * self.size", "def add(self, key):\n if key in self:\n return True\n if not self.filters:\n filter = RedisLocalBloomFilter(\n server=self.server, \n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=self.initial_capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n else:\n filter = self.filters[-1]\n if filter.count >= filter.capacity:\n capacity = filter.capacity * self.scale\n if capacity > MAX_PER_SLICE_SIZE:\n capacity = MAX_PER_SLICE_SIZE\n filter = RedisLocalBloomFilter(\n server=self.server,\n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n if self.max_filters > 0 and len(self.filters) >= self.max_filters:\n f = self.filters[0]\n f.clear()\n del self.filters[0]\n filter.add(key, skip_check=True)\n return False", "def __init__(self, capacity):\n self.experiences = RingBuf(capacity)", "def __init__(self, k):\r\n self.maxlen = k\r\n self.queue = []", "def _create_capacity(self, m, comp, prod_name):\n name = comp.name\n cap_res = comp.get_capacity_var() # name of resource that defines capacity\n r = m.resource_index_map[comp][cap_res] # production index of the governing resource\n # production is always lower than capacity\n ## NOTE get_capacity returns (data, meta) and data is dict\n ## TODO does this work with, e.g., ARMA-based capacities?\n ### -> \"time\" is stored on \"m\" and could be used to correctly evaluate the capacity\n cap = comp.get_capacity(None, None, None, None)[0][cap_res] # value of capacity limit (units of governing resource)\n rule = partial(self._capacity_rule, prod_name, r, cap)\n constr = pyo.Constraint(m.T, rule=rule)\n setattr(m, '{c}_{r}_capacity_constr'.format(c=name, r=cap_res), constr)\n # minimum production\n print('DEBUGG dispatchable?', comp.name, comp.is_dispatchable())\n if comp.is_dispatchable() == 'fixed':\n minimum = cap\n var = getattr(m, prod_name)\n values = var.get_values()\n for k in values:\n values[k] = cap\n var.set_values(values)\n else:\n minimum = 0 # -> for now just use 0, but fix this! XXX\n print('DEBUGG ... min:', minimum)\n rule = partial(self._min_prod_rule, prod_name, r, cap, minimum)\n constr = pyo.Constraint(m.T, rule=rule)\n setattr(m, '{c}_{r}_minprod_constr'.format(c=name, r=cap_res), constr)", "def __new__(cls, *_args, **kwargs):\n if _args:\n if len(_args) == 1 and isinstance(_args[0], dict):\n if kwargs:\n raise TypeError('Key() takes no keyword arguments when a dict is the '\n 'the first and only non-keyword argument (for '\n 'unpickling).')\n kwargs = _args[0]\n else:\n if 'flat' in kwargs:\n raise TypeError('Key() with positional arguments '\n 'cannot accept flat as a keyword argument.')\n kwargs['flat'] = _args\n self = super(Key, cls).__new__(cls)\n # Either __reference or (__pairs, __app, __namespace) must be set.\n # Either one fully specifies a key; if both are set they must be\n # consistent with each other.\n if 'reference' in kwargs or 'serialized' in kwargs or 'urlsafe' in kwargs:\n (self.__reference,\n self.__pairs,\n self.__app,\n self.__namespace) = self._parse_from_ref(cls, **kwargs)\n elif 'pairs' in kwargs or 'flat' in kwargs:\n self.__reference = None\n (self.__pairs,\n self.__app,\n self.__namespace) = self._parse_from_args(**kwargs)\n else:\n raise TypeError('Key() cannot create a Key instance without arguments.')\n return self", "def __init__(self, capacity):\n self.capacity = capacity #this is example for list implementation\n self.head = [None] * capacity #this is example for list implementation\n self.num_items = 0 #this is example for list implementation", "def __init__(self, name, path, password=None, key_size=2048, **kwargs):\n self.key_size = key_size\n super().__init__(name, path, password)", "def _newKey(self, key):\n self._testKeySubNsAdd()\n self._getKeyList().append(key)", "def __init__(self):\n INIT_CAPACITY = 8\n LOAD_FACTOR = 2 / 3\n self.capacity = INIT_CAPACITY\n self.size = 0\n self.slots = [None] * INIT_CAPACITY\n self.load_factor = LOAD_FACTOR", "def key_gte(self, key_gte):\n\n self._key_gte = key_gte", "def __init__(__self__, *,\n name: pulumi.Input[Union[str, 'SkuName']],\n capacity: Optional[pulumi.Input[int]] = None):\n pulumi.set(__self__, \"name\", name)\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)", "def __init__(__self__, *,\n active_capacity: int,\n capacity: Optional[int] = None,\n scale_type: Optional[str] = None):\n pulumi.set(__self__, \"active_capacity\", active_capacity)\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if scale_type is not None:\n pulumi.set(__self__, \"scale_type\", scale_type)", "def split_kbucket(self):\n new_kbucket = super(CachingKBucket, self).split_kbucket()\n\n cache_self, cache_new = util.partition(\n self._replacement_cache,\n self.contact_in_range\n )\n\n # Replacement caches are deques, so we can't directly assign\n # the values returned by partition.\n new_kbucket._replacement_cache.extend(cache_new)\n self._replacement_cache.clear()\n self._replacement_cache.extend(cache_self)\n\n self.fill_from_cache()\n new_kbucket.fill_from_cache()\n\n return new_kbucket" ]
[ "0.63611585", "0.6257026", "0.6124389", "0.6099812", "0.5895444", "0.5895253", "0.58910316", "0.58668506", "0.58037305", "0.5686704", "0.5662281", "0.5616573", "0.5591175", "0.5572409", "0.5540789", "0.5507857", "0.54945195", "0.5490829", "0.5490829", "0.5490829", "0.5427748", "0.54269576", "0.5423259", "0.54201984", "0.54197884", "0.5410173", "0.54084855", "0.5400501", "0.5374076", "0.5364273", "0.53638816", "0.5356164", "0.53344506", "0.5329512", "0.5327628", "0.53056324", "0.5291093", "0.52861816", "0.5282006", "0.5277882", "0.5274128", "0.52223444", "0.5220989", "0.5203331", "0.51793027", "0.51748425", "0.5171664", "0.51655793", "0.515555", "0.5147634", "0.51393837", "0.5137214", "0.50950557", "0.508413", "0.5081705", "0.50732636", "0.5071", "0.50671315", "0.5064427", "0.5057981", "0.50554585", "0.50512934", "0.50395083", "0.50395083", "0.5038021", "0.5027017", "0.50251585", "0.50176483", "0.5014096", "0.50069946", "0.49990475", "0.49965277", "0.49942744", "0.49875548", "0.49865004", "0.49819386", "0.49730384", "0.49693877", "0.4968084", "0.4967134", "0.49607474", "0.49574447", "0.4955572", "0.49416482", "0.49395227", "0.49395227", "0.49311602", "0.4929278", "0.49267834", "0.49255708", "0.49249747", "0.4920603", "0.4918189", "0.4911496", "0.4903546", "0.4888686", "0.4887327", "0.4881416", "0.48540735", "0.48451602" ]
0.675632
0
Adds an ``item`` to a Cuckoo Filter ``key``.
def cfAdd(self, key, item): params = [key, item] return self.execute_command(self.CF_ADD, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.BF_ADD, *params)", "def add_item(self, key, item):\n self[key].add(item)\n try:\n self._reverse_store[item].add(key)\n except KeyError:\n self._reverse_store[item] = set([key])", "def add(self, item):\n self._dict[item] = item", "def append(self, item):\n # FIXME: this is only append if the key isn't already present\n key, value = item\n self._main[key] = value", "def add(self, key, value):", "def filter_matches_add(self, key, value):\n\t\tif key in self.filter_matches.keys():\n\t\t\tself.filter_matches[key].append(value)\n\t\telse:\n\t\t\tself.filter_matches[key]=[value]", "def add(self, item):\n self.num_item += 1\n indexs = self.__get_indexs(item)\n for index in indexs:\n self.filter_bitarray[index] = True", "def _add_item_by_item(self, item):\n self.item_list[item.call_number] = item", "def cfAddNX(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.CF_ADDNX, *params)", "def add(self, item):", "def add_item (self, item):\n new_item = CacheItem (item)\n cached = self.cache.get(hash(item))\n if cached is None:\n self.evict_or_add (new_item)\n cached.hits += 1", "def put(self, key, item):\n if key and item:\n self.cache_data[key] = item", "def put(self, key, item):\n if key and item:\n self.cache_data[key] = item", "def add_filter(self, filter):\n self._filters.append(filter.as_dict())", "def add_item(self, item):\n self.items.append(item)", "def add(self, key, value):\n self.data.append((key, value))", "def add(self, key):\n if key in self:\n return True\n if not self.filters:\n filter = RedisLocalBloomFilter(\n server=self.server, \n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=self.initial_capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n else:\n filter = self.filters[-1]\n if filter.count >= filter.capacity:\n capacity = filter.capacity * self.scale\n if capacity > MAX_PER_SLICE_SIZE:\n capacity = MAX_PER_SLICE_SIZE\n filter = RedisLocalBloomFilter(\n server=self.server,\n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n if self.max_filters > 0 and len(self.filters) >= self.max_filters:\n f = self.filters[0]\n f.clear()\n del self.filters[0]\n filter.add(key, skip_check=True)\n return False", "def __setitem__(self, key, item):\n self.set_field(key, item)", "def add_item(dct,item):\r\n if item not in dct[0]:\r\n print \"itemNotFound \",str(item)\r\n return False\r\n\r\n num=len(item)\r\n if num in dct:\r\n if item in dct[num]:\r\n return False\r\n else:\r\n dct[num].append(item)\r\n return True\r\n else:\r\n dct[num]=[item]\r\n return True", "def add_to_inventory(self, item, quantity):\n\t\tincreaseQuantity = None\n\t\taddToDict = True\n\t\tfor key in self.inventoryDictionary:\n\t\t\tif key.name == item.name:\n\t\t\t\taddToDict = False\n\t\t\t\tincreaseQuantity = key\n\t\t\t\tbreak\n\t\t\t\t\n\n\t\t\telse:\n\t\t\t\taddToDict = True\n\t\t\t\t\n\n\t\tif addToDict:\n\t\t\tself.inventoryDictionary[item] = quantity\n\t\telse:\n\t\t\tself.inventoryDictionary[increaseQuantity] += quantity", "def add(self, key, value):\n self._data.add_last(self._Item(key, value))", "def add_item(self, item):\n self.items_with_price.update(item)", "def put(self, key, item):\n raise NotImplementedError(\"put must be implemented in your cache class\")", "def add(self, item: Mapping[Hashable, Any], **kwargs: Any) -> None:\n self.contents.update(item, **kwargs)\n return", "def add(self, key, val):\n self.obtain(key).append(val)", "def add(self, item, issue):\n if self.has_key(item):\n self[item].append(issue)\n else:\n self[item] = [issue]\n return 1", "def _single_setitem(self, key, item):\n self._dict[key] = item", "def add_item(self, item_to_append):\n self.items.append(item_to_append)", "def addItem(*args):", "def addItem(*args):", "def addItem(*args):", "def put(self, key, item):\n # Do nothing if key is NoneType or item is NoneType\n if (key is None or item is None):\n return\n\n # If Key exists, update Cache Item\n if key in self.cache_data:\n for ci in self.LFU:\n if ci.key is key:\n ci.updateItem(item)\n else:\n # Length will be longer than max capacity, make room\n if len(self.cache_data) == self.MAX_ITEMS:\n\n # Determine discarded cache item\n discard = self.LFU[0]\n for x in self.LFU:\n if x.freq < discard.freq or \\\n x.freq == discard.freq and x.age > discard.age:\n discard = x\n\n # Discard Cache Item\n print(\"DISCARD: {}\".format(discard.key))\n del self.cache_data[discard.key]\n self.LFU.remove(discard)\n\n # Add new Cache Item\n data = LFUCacheItem(key, item, 0, 0)\n self.LFU.append(data)\n\n # increase age of all items\n for x in self.LFU:\n x.age += 1\n\n self.cache_data[key] = item", "def add(self, key, value):\n\t\tself.__add_key_to_bt(key)[3] = self.__add_key_value_to_ll(key, value)", "def add(self, item: Any) -> None:\n pass", "def add(self, item):\n self.update(set([item]))", "def add_item(self, item):\n self.items.append(item)\n self.length += 1", "def add_item(self, item_id, item_title, score, filter_stopwords=False):\n with self._r.pipeline() as pipe:\n for prefix in self._prefixes(item_title, filter_stopwords=filter_stopwords):\n pipe.zadd(prefix, item_id, score)\n pipe.hset('$titles', item_id, item_title)\n pipe.execute()\n return True", "def add_item(self, key, data):\n hash_key = self.count_hash(key, len(self.slots))\n\n if self.slots[hash_key] is None:\n self.slots[hash_key] = key\n self.data[hash_key] = data\n else:\n if self.slots[hash_key] == key:\n self.data[hash_key] = data\n elif isinstance(self.slots[hash_key], int):\n self.slots[hash_key] = (self.slots[hash_key], key,)\n self.data[hash_key] = (self.data[hash_key], data,)\n elif len(self.slots[hash_key]) > 1:\n list_slot = list(self.slots[hash_key])\n list_data = list(self.data[hash_key])\n list_slot.append(key)\n list_data.append(data)\n self.slots[hash_key] = tuple(list_slot)\n self.data[hash_key] = tuple(list_data)", "def put(self, key, item):\n if key is None or item is None:\n return\n self.cache_data[key] = item", "def addItem(self, item):\n self.items.append(item)\n self.totalWeight += item", "def add(self, key):\n\t\t#super(CountingBloomFilter, self).add(key)\n\t\t#super(CountingBloomFilter, self).generateStats()\n\t\tfor i in self.getBitArrayIndices(key):\n\t\t\tself.ba[i] += 1\n\t\tself.n += 1", "def add_item(self, item: _T) -> None:\n if item not in self.item_to_index:\n self.item_to_index[item] = len(self.index_to_item)\n self.index_to_item.append(item)", "def add(self, key, value):\n newest = self._Item(key, value) # make new item instance\n walk = self._data.last() # walk backward looking for smaller key\n while walk is not None and newest < walk.element():\n walk = self._data.before(walk)\n if walk is None:\n self._data.add_first(newest) # new key is smallest\n else:\n self._data.add_after(walk, newest) # newest goes after walk", "def append(self, item, **data):\n self._items.append(item)\n if data:\n self._data[item] = data", "def add(self, item):\n item = self._prepare_item(len(self), item)\n if item not in self._data:\n self._data.append(item)\n self.__log__.append(SetAdd(value=item))", "def _insert_item(self, key: _KT, value: _VT) -> None:\n dict.__setitem__(self, key, value)", "def put(self, key, item):\n if key or item:\n self.cache_data[key] = item\n if len(self.cache_data) > BaseCaching.MAX_ITEMS:\n print(\"DISCARD: {}\".format(self.last))\n del self.cache_data[self.last]\n self.last = key", "def add(self, new_filter: Filter) -> None:\r\n self.filters.append(new_filter)", "def add(self, key, value):\r\n newest = Item(key, value) # make new item instance\r\n walk = self.data.last() # walk backward looking for smaller key\r\n while walk is not None and newest < walk.element():\r\n walk = self.data.before(walk)\r\n if walk is None:\r\n self.data.add_first(newest) # new key is smallest\r\n else:\r\n self.data.add_after(walk, newest) # newest goes after walk\r", "def add(self, key, value):\n raise NotImplementedError('must be implemented by subclass')", "def add(self, key, value):\n raise NotImplementedError('must be implemented by subclass')", "def add_to_group(self,item):\n self.items.append(item)\n self.n += 1", "def adauga(self, item):\n if item in self._items:\n raise RepoError(\"item deja existent!\\n\")\n self._items.append(item)", "def add_item(key, obj, dst):\n\n if key not in dst:\n dst[key] = []\n dst[key].append(obj)", "def add_item(product, price):\n ADD_PRODUCTS[product] = price", "def put(self, key, item):\n if key is not None and item is not None:\n self.cache_data[key] = item\n if key not in self.cache_list:\n self.cache_list.append(key)\n if len(self.cache_data) > self.MAX_ITEMS:\n popped_key = self.cache_list.pop(0)\n print(f\"DISCARD: {popped_key}\")\n del self.cache_data[popped_key]", "def add_item(self, item):\n item_exists = self.get_item(item.id)\n\n if item_exists:\n item_exists._increment_quantity(item.quantity)\n else:\n self.items.append(item)", "def add_key(mu_key):\n params['key'] = mu_key", "def add(self, item):\n\n if item not in self:\n self._index_map[item] = len(self._list)\n self._list.append(item)", "def add_new(self, item, key):\n if key in self._items:\n raise DuplicateListHeapItemException(key)\n if len(self._items) >= self._max_limit:\n raise MaxItemLimitReachedException()\n self._items[key] = item\n self._listbox.insert(END, key)", "def put(self, key, item):\n if key and item:\n if key in self.cache_data.keys():\n self.cache_data[key] = item\n self.stack.append(key)\n else:\n if len(self.cache_data.keys()) < self.MAX_ITEMS:\n self.cache_data[key] = item\n self.stack.append(key)\n elif len(self.cache_data.keys()) == self.MAX_ITEMS:\n k = self.stack.pop(-1)\n self.cache_data.pop(k)\n print(\"DISCARD: {}\".format(k))\n self.cache_data[key] = item\n self.stack.append(key)", "def add_item(self, item):\n self.items.append(item)\n self.item_count += 1\n self.max_length = max(self.max_length, len(item.samples))", "def append(self, item):\n self.items.append(item)", "def add(self, key, value):\n self._store[key] = value", "def put(self, key, item):\n # Do nothing if key is NoneType or item is NoneType\n if (key is None or item is None):\n return\n\n if key in self.cache_data:\n self.LRU = [ci for ci in self.LRU if ci.key != key]\n\n # increase age of all items\n for x in self.LRU:\n x.age += 1\n\n self.cache_data[key] = item\n data = LRUCacheItem(key, item, 0)\n self.LRU.append(data)\n\n # Length is longer than max capacity, make room\n if len(self.cache_data) > self.MAX_ITEMS:\n discard = self.LRU[0]\n for x in self.LRU:\n if x.age > discard.age:\n discard = x\n print(\"DISCARD: {}\".format(discard.key))\n del self.cache_data[discard.key]\n self.LRU.remove(discard)", "def __setitem__(self, key, obj):\n self.add(key, obj, self._mode)", "def add_item(self, item: Item):\n self.__items_list.append(item)", "def add(self, key, value):\n new = self._Item(key, value)\n\n if self.is_empty():\n self._data.append(new)\n else:\n for i, item in enumerate(self._data):\n if new <= item:\n self._data.insert(i, new)\n break\n if i == len(self) - 1:\n self._data.append(new)\n break", "def __setitem__(self, key, value):\n index=self._index(key)\n if index==-1:\n self._item.append(Item(key,value))\n self._size+=1\n else:\n self._item[index].value=value", "def add(self, key, value):\n if not key in self:\n self.keys.append(key)\n self.dict[key] = value", "def add_to_bag(self, item):\n self._bag.append(item)", "def add(self, key, value):\n self._data.append(self._Item(key, value))\n self._upheap(len(self._data) - 1) # upheap newly added position", "def addKey(self, time, name, value, view) -> None:\n ...", "def add(self, key, value):\n self._data.append(self._Item(key, value))\n self._upheap(len(self._data) - 1) # upheap newly added position", "def add(self, key):\r\n if key not in self.map:\r\n end = self.end\r\n curr = end[PREV]\r\n curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end]\r\n if self.emitter:\r\n self.emitter.emit()", "def put(self, key, item):\n if key is not None and item is not None:\n # modify the time and change the next newer value\n self.timesKey[key] = self.time\n self.time += 1\n\n # add the new item\n self.cache_data[key] = item\n\n if len(self.cache_data) > BaseCaching.MAX_ITEMS:\n discard_key = None\n newer = self.time - 2\n\n for _key, _value in self.timesKey.items():\n if newer == _value:\n discard_key = _key\n break\n\n # del key in time and cache data\n del self.cache_data[discard_key]\n del self.timesKey[discard_key]\n\n print(\"DISCARD: {}\".format(discard_key))", "def put(self, key, item):\n if key is None or item is None:\n return\n if key in self.key_tracker.keys():\n self.key_tracker.pop(key)\n if len(self.key_tracker) >= BaseCaching.MAX_ITEMS:\n x = self.most_recent_use_key()\n print(\"DISCARD: {}\".format(x))\n self.key_tracker.pop(x)\n self.cache_data.pop(x)\n self.cache_data.update({key: item})\n self.key_tracker.update({key: self.count})\n self.count += 1", "def addItem(self, key):\n if key in self.dictionary:\n raise Exception(\"Key already exist in dictionary\")\n self.dictionary[key] = WordInformation(self.MAX_RATING)", "def append(self, item):\n self.update([item])", "def add(self, item, order = 'append'):\n try:\n item = item.items\n except AttributeError:\n item = [item]\n\n self.items = {\n 'prepend': item + self.items,\n 'append': self.items + item\n }[order]", "def add(self, key, value):\n self._data.append(self._Item(key, value))\n self._up_heap(len(self) - 1)", "def put(self, item): \n self.__db.rpush(self.key, item)", "def insert(self, key):\r\n index = self.search(key)\r\n self.keys.insert(index, key)", "def additem(d, key, value):\n if key in d:\n if not isinstance(d[key], list):\n d[key] = [d[key]]\n d[key].append(value)\n else:\n d[key] = value", "def push(self, new_item):\n self.items.append(new_item)", "def topkAdd(self, key, *items):\n params = [key]\n params += items\n \n return self.execute_command(self.TOPK_ADD, *params)", "def add_item(self,itm,qty=1):\n inv = self.get_inventory()\n s = str(itm)\n inv[s] = inv.get(s, 0) + qty\n self.put_inventory(inv)", "def addKey(self, key, val):\n self.dict[key].append(val)", "def add_to_inventory(self, item):\n\t\tif item in self.inventory:\n\t\t\tself.inventory[item] += 1\n\t\telse:\n\t\t\tself.inventory[item] = 1", "def _append_row(self, key, value, item):\n self._items.append(item)\n self.key_listbox.insert(tk.END, key)\n self.value_listbox.insert(tk.END, value)", "def add(self, key, val, expiry_time=0, min_compress_len=0):\n\t\treturn self._set(\"add\", key, val, expiry_time, min_compress_len)", "def internal_add(self,item,lookahead):\n assert isinstance(item, Item)\n assert isinstance(lookahead, LookaheadSet)\n index = item.reg_info.index\n assert isinstance(index,int)\n assert index not in self.id_to_item\n self.id_to_item[index] = item\n self.id_to_lookahead[index] = lookahead", "def add_value(self, key, value):\r\n if key in self:\r\n # We already have this key on the item.\r\n if not isinstance(self[key], list):\r\n # The key isn't already a list, take its current value and\r\n # convert it to a list with the only member being the\r\n # current value.\r\n self[key] = [self[key]]\r\n # Add the new value to the list.\r\n self[key].append(value)\r\n else:\r\n # This is a new attribute, just set it.\r\n self[key] = value", "def append_to(self, key, value):\n self.get_data()[key].append(value.get_data())", "def add(self, item):\n self._set(item, None)", "def addKey(self, time, value) -> None:\n ...", "def __iadd__(self,new_item):\n name = new_item.name\n # check for correct card type\n if self._card_class != None and not isinstance(new_item,self._card_class):\n raise TypeError('Submitted card not correct type!')\n # check for unique name\n if name in self._name2database:\n print name\n raise KeyError('Submitted card name already in use!')\n\n self._name2database[name] = new_item\n\n for category in self._categories:\n if not getattr(new_item,category) in self._category2id[category]:\n self._category2id[category][getattr(new_item,category)] = {}\n self._category2id[category][getattr(new_item,category)][name] = self.id\n\n self._id2database[self.id] = new_item\n\n new_item.db_link = self\n new_item.id = self.id\n\n self.id += 1\n return self", "def test_add_item_at_using_put(self):\n pass", "def __setitem__(self, key, value):\n self.insert(key, value)", "def __setitem__(self, key, item):\n self.attrib[key] = item" ]
[ "0.7453558", "0.6998339", "0.6921159", "0.69202787", "0.6884508", "0.6801688", "0.6676187", "0.655837", "0.65446305", "0.65341115", "0.64497375", "0.63943213", "0.63943213", "0.63741636", "0.63548404", "0.63431346", "0.6286365", "0.6278622", "0.62722087", "0.6255055", "0.62299794", "0.62193507", "0.61958456", "0.6178707", "0.61615276", "0.61432326", "0.61413556", "0.61286783", "0.6123936", "0.6123936", "0.6123936", "0.6122773", "0.6119166", "0.6115477", "0.6108297", "0.60719067", "0.60710186", "0.6067524", "0.6053758", "0.60510534", "0.6038668", "0.6036344", "0.6026398", "0.6014332", "0.5994055", "0.5987423", "0.5977117", "0.59500104", "0.5947381", "0.59447086", "0.59447086", "0.5940374", "0.59343654", "0.5928423", "0.5915321", "0.5904822", "0.58834195", "0.58794886", "0.58752894", "0.5858137", "0.584112", "0.58341736", "0.5828618", "0.5828522", "0.5826685", "0.58199483", "0.5808664", "0.58079034", "0.58046234", "0.5795943", "0.5776183", "0.5774913", "0.57727206", "0.57705545", "0.57662374", "0.5764518", "0.573587", "0.57354367", "0.57347006", "0.57200503", "0.57128716", "0.5710956", "0.5710018", "0.57081217", "0.56946486", "0.56925344", "0.5690209", "0.56786853", "0.56751454", "0.5669301", "0.5669242", "0.566894", "0.5660695", "0.56548446", "0.5635513", "0.5628865", "0.5628687", "0.561332", "0.561149", "0.55967456" ]
0.7630527
0
Adds an ``item`` to a Cuckoo Filter ``key`` only if item does not yet exist. Command might be slower that ``cfAdd``.
def cfAddNX(self, key, item): params = [key, item] return self.execute_command(self.CF_ADDNX, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.CF_ADD, *params)", "def bfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.BF_ADD, *params)", "def add(self, key):\n if key in self:\n return True\n if not self.filters:\n filter = RedisLocalBloomFilter(\n server=self.server, \n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=self.initial_capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n else:\n filter = self.filters[-1]\n if filter.count >= filter.capacity:\n capacity = filter.capacity * self.scale\n if capacity > MAX_PER_SLICE_SIZE:\n capacity = MAX_PER_SLICE_SIZE\n filter = RedisLocalBloomFilter(\n server=self.server,\n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n if self.max_filters > 0 and len(self.filters) >= self.max_filters:\n f = self.filters[0]\n f.clear()\n del self.filters[0]\n filter.add(key, skip_check=True)\n return False", "def add_item(self, key, item):\n self[key].add(item)\n try:\n self._reverse_store[item].add(key)\n except KeyError:\n self._reverse_store[item] = set([key])", "def put(self, key, item):\n if key is not None and item is not None:\n self.cache_data[key] = item\n if key not in self.cache_list:\n self.cache_list.append(key)\n if len(self.cache_data) > self.MAX_ITEMS:\n popped_key = self.cache_list.pop(0)\n print(f\"DISCARD: {popped_key}\")\n del self.cache_data[popped_key]", "def put(self, key, item):\n if key or item:\n self.cache_data[key] = item\n if len(self.cache_data) > BaseCaching.MAX_ITEMS:\n print(\"DISCARD: {}\".format(self.last))\n del self.cache_data[self.last]\n self.last = key", "def add_item(dct,item):\r\n if item not in dct[0]:\r\n print \"itemNotFound \",str(item)\r\n return False\r\n\r\n num=len(item)\r\n if num in dct:\r\n if item in dct[num]:\r\n return False\r\n else:\r\n dct[num].append(item)\r\n return True\r\n else:\r\n dct[num]=[item]\r\n return True", "def put(self, key, item):\n # Do nothing if key is NoneType or item is NoneType\n if (key is None or item is None):\n return\n\n # If Key exists, update Cache Item\n if key in self.cache_data:\n for ci in self.LFU:\n if ci.key is key:\n ci.updateItem(item)\n else:\n # Length will be longer than max capacity, make room\n if len(self.cache_data) == self.MAX_ITEMS:\n\n # Determine discarded cache item\n discard = self.LFU[0]\n for x in self.LFU:\n if x.freq < discard.freq or \\\n x.freq == discard.freq and x.age > discard.age:\n discard = x\n\n # Discard Cache Item\n print(\"DISCARD: {}\".format(discard.key))\n del self.cache_data[discard.key]\n self.LFU.remove(discard)\n\n # Add new Cache Item\n data = LFUCacheItem(key, item, 0, 0)\n self.LFU.append(data)\n\n # increase age of all items\n for x in self.LFU:\n x.age += 1\n\n self.cache_data[key] = item", "def add_item (self, item):\n new_item = CacheItem (item)\n cached = self.cache.get(hash(item))\n if cached is None:\n self.evict_or_add (new_item)\n cached.hits += 1", "def append(self, item):\n # FIXME: this is only append if the key isn't already present\n key, value = item\n self._main[key] = value", "def put(self, key, item):\n if key and item:\n self.cache_data[key] = item", "def put(self, key, item):\n if key and item:\n self.cache_data[key] = item", "def put(self, key, item):\n if key is None or item is None:\n return\n if key in self.key_tracker.keys():\n self.key_tracker.pop(key)\n if len(self.key_tracker) >= BaseCaching.MAX_ITEMS:\n x = self.most_recent_use_key()\n print(\"DISCARD: {}\".format(x))\n self.key_tracker.pop(x)\n self.cache_data.pop(x)\n self.cache_data.update({key: item})\n self.key_tracker.update({key: self.count})\n self.count += 1", "def put(self, key, item):\n if key and item:\n if key in self.cache_data.keys():\n self.cache_data[key] = item\n self.stack.append(key)\n else:\n if len(self.cache_data.keys()) < self.MAX_ITEMS:\n self.cache_data[key] = item\n self.stack.append(key)\n elif len(self.cache_data.keys()) == self.MAX_ITEMS:\n k = self.stack.pop(-1)\n self.cache_data.pop(k)\n print(\"DISCARD: {}\".format(k))\n self.cache_data[key] = item\n self.stack.append(key)", "def add_item(self, item, index):\n if index in self.d_buffer.keys():\n return True\n elif len(self) < self._size:\n self.d_buffer.update({index: item})\n return True\n else:\n return False", "def adauga(self, item):\n if item in self._items:\n raise RepoError(\"item deja existent!\\n\")\n self._items.append(item)", "def add(self, item):\n if self.has_item(item):\n return\n\n self.cache.append(item)\n\n if self.size() > self.max_size:\n self.cache.popleft()", "def add_item(self):\n item = LibGen.create_item()\n if not self.item_exists(item.call_number):\n self.item_list[item.call_number] = item\n print(f\"Item({item.call_number}) bas been added.\")\n else:\n print(\"This item already exists.\")", "def add(self, item):\n self._set(item, None)", "def add(self, key, skip_check=False):\n bits_per_slice = self.bits_per_slice\n hashes = self.make_hashes(key)\n found_all_bits = True\n if self.count > self.capacity:\n raise IndexError(\"RedisLocalBloomFilter is at capacity\")\n pipe = self.server.pipeline(transaction=False) \n sliceIdx = 0\n for k in hashes:\n sliceKey = self.SLICE_KEY_FMT % (self.bfkeypreffix, sliceIdx)\n pipe.setbit(sliceKey, k, 1)\n sliceIdx += 1\n pipeResults = pipe.execute()\n if not skip_check:\n for pipeResult in pipeResults:\n if not pipeResult:\n found_all_bits = False\n break\n if skip_check:\n self.count += 1\n return False\n elif not found_all_bits:\n self.count += 1\n return False\n else:\n return True", "def put(self, key, item):\n if key is None or item is None:\n return\n self.cache_data[key] = item", "def add(self, item):\n item = self._prepare_item(len(self), item)\n if item not in self._data:\n self._data.append(item)\n self.__log__.append(SetAdd(value=item))", "def add_item(self, item: _T) -> None:\n if item not in self.item_to_index:\n self.item_to_index[item] = len(self.index_to_item)\n self.index_to_item.append(item)", "def add(self, item):\n if not (item in self.set):\n self.set[item] = True\n heapq.heappush(self.heap, item)", "def put(self, key, item):\n if key is not None and item is not None:\n # modify the time and change the next newer value\n self.timesKey[key] = self.time\n self.time += 1\n\n # add the new item\n self.cache_data[key] = item\n\n if len(self.cache_data) > BaseCaching.MAX_ITEMS:\n discard_key = None\n newer = self.time - 2\n\n for _key, _value in self.timesKey.items():\n if newer == _value:\n discard_key = _key\n break\n\n # del key in time and cache data\n del self.cache_data[discard_key]\n del self.timesKey[discard_key]\n\n print(\"DISCARD: {}\".format(discard_key))", "def add_item(self, item_name):\n if not self.has_item(item_name):\n self.item_list.append(item_name)", "def add_item(self, item):\n item_exists = self.get_item(item.id)\n\n if item_exists:\n item_exists._increment_quantity(item.quantity)\n else:\n self.items.append(item)", "def __replace_or_add_item(self, item: ClientWorklistItem):\n # print('__replace_or_add_item: __items=', self.__items)\n for i in range(len(self.__items)):\n val = self.__items[i]\n if item.id == val.id:\n self.__items[i] = item\n return\n # not found above, append it\n self.__items.append(item)", "def add_item(self, item):\r\n bag_res = consts.BAG_PUT_FAILED\r\n for i in range(len(self._items)):\r\n res = self.put_item_at(i, item, allow_switch=False)\r\n if res == consts.PUT_FORBIDDEN:\r\n return consts.BAG_PUT_FAILED\r\n if res == consts.PUT_SWITCH or \\\r\n res == consts.PUT_INTO_EMPTY or \\\r\n res == consts.PUT_MERGE_TOTALLY:\r\n return consts.BAG_PUT_TOTALLY\r\n if res == consts.PUT_MERGE_PARTIALLY:\r\n bag_res = consts.BAG_PUT_PARTIALLY\r\n continue\r\n if res == consts.PUT_MERGE_FAILED or \\\r\n res == consts.PUT_SWITCH_FORBIDDEN:\r\n continue\r\n return bag_res", "def cfExists(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.CF_EXISTS, *params)", "def _add_item_by_item(self, item):\n self.item_list[item.call_number] = item", "def bfExists(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.BF_EXISTS, *params)", "def evict_or_add (self, item):", "def add_item(item):\n # Check first if the item already exists in the inventory\n for i in get_inventory():\n if i['name'] == item['name']:\n print(f\"[ERROR] item with name {i['name']} already exists\")\n break\n else:\n print(f'[INFO] Adding item {item}')\n INVENTORY.append(item)\n # mongo.collection().insert_one(item)", "def put(self, key, item):\n # Do nothing if key is NoneType or item is NoneType\n if (key is None or item is None):\n return\n\n if key in self.cache_data:\n self.LRU = [ci for ci in self.LRU if ci.key != key]\n\n # increase age of all items\n for x in self.LRU:\n x.age += 1\n\n self.cache_data[key] = item\n data = LRUCacheItem(key, item, 0)\n self.LRU.append(data)\n\n # Length is longer than max capacity, make room\n if len(self.cache_data) > self.MAX_ITEMS:\n discard = self.LRU[0]\n for x in self.LRU:\n if x.age > discard.age:\n discard = x\n print(\"DISCARD: {}\".format(discard.key))\n del self.cache_data[discard.key]\n self.LRU.remove(discard)", "def add(self, item):\n self.num_item += 1\n indexs = self.__get_indexs(item)\n for index in indexs:\n self.filter_bitarray[index] = True", "def add_item(self, new_item):\n [self.item_list.append(new_item) for item in self.item_list\n if new_item not in self.item_list]", "def add(self, item):\n self.update(set([item]))", "def add_to_cluster(self, item: str, c_id: Optional[str]) -> None:\n if item in self._clusters.keys(): # Check if conflicting add\n assert self._clusters[item] == c_id\n assert c_id is None or c_id in self._clusters.values() # Cluster already exists\n self._clusters[item] = c_id\n self.store()", "def insert_and_check(self, item) -> bool:\n with Monitor.acquire(self):\n if item in self:\n return False\n self.add(item)\n return True", "def filter_matches_add(self, key, value):\n\t\tif key in self.filter_matches.keys():\n\t\t\tself.filter_matches[key].append(value)\n\t\telse:\n\t\t\tself.filter_matches[key]=[value]", "def add(self, item: object, uid: str) -> None:\n if self.active:\n self._input[uid] = item\n # `NoRunpathPool` adds item after calling `_prepopulate_runnables`\n # so the following step is still needed\n if uid not in self.ongoing:\n self.ongoing.append(uid)", "def add(self, item):\n self._dict[item] = item", "def add(self, item):\n\n if item not in self:\n self._index_map[item] = len(self._list)\n self._list.append(item)", "def put(self, item: Any):\n has_item = True\n with self._lock:\n if item not in self._items:\n self._items.add(item)\n has_item = False\n if not has_item:\n self._queue.put(item)", "def add_new(self, item, key):\n if key in self._items:\n raise DuplicateListHeapItemException(key)\n if len(self._items) >= self._max_limit:\n raise MaxItemLimitReachedException()\n self._items[key] = item\n self._listbox.insert(END, key)", "def add(self, item):", "def insert(self, item):\r\n self.fetch()\r\n t = self.make_item_tuple(item)\r\n changed = False\r\n if t not in self.data:\r\n self.data.insert(0, t)\r\n changed = True\r\n\r\n if changed:\r\n query_cache.set(self.iden, self.data[:precompute_limit])", "def add(self, key, value, timeout=None):\n try:\n key = self.prepare_key(key)\n if self._cache.exists(key):\n return False\n return self.set(key, value, timeout)\n except Exception as err:\n return self.warn_or_error(err, False)", "def add_item(self, item):\n if item.price > 0:\n self.items.append(item)\n else:\n raise ItemPriceIsBad(\"zla cena\")", "def add(self, key, value):\n newest = self._Item(key, value) # make new item instance\n walk = self._data.last() # walk backward looking for smaller key\n while walk is not None and newest < walk.element():\n walk = self._data.before(walk)\n if walk is None:\n self._data.add_first(newest) # new key is smallest\n else:\n self._data.add_after(walk, newest) # newest goes after walk", "def add(self, item, issue):\n if self.has_key(item):\n self[item].append(issue)\n else:\n self[item] = [issue]\n return 1", "def add(self, key, value):\r\n newest = Item(key, value) # make new item instance\r\n walk = self.data.last() # walk backward looking for smaller key\r\n while walk is not None and newest < walk.element():\r\n walk = self.data.before(walk)\r\n if walk is None:\r\n self.data.add_first(newest) # new key is smallest\r\n else:\r\n self.data.add_after(walk, newest) # newest goes after walk\r", "def put(self, key, item):\n raise NotImplementedError(\"put must be implemented in your cache class\")", "def _SetItemIf(container, condition, item, value):\n if condition:\n container[item] = value\n\n return condition", "async def _exists(self, key):\n return await self.client.append(key, b'')", "def add(self, key, value):", "def adduniq(self, item, issue):\n if self.has_key(item):\n if issue in self[item]:\n return 0\n self.add(item, issue)\n return 1", "def push(self, item):\n if item not in self._items:\n self._items.append(item)", "def add(self, key, value):\n self._data.add_last(self._Item(key, value))", "def add_item(self, item):\n self.items.append(item)", "def add(self, item: Any) -> None:\n pass", "def take_item(self, item):\r\n if len(self.items) <= 2:\r\n self.items.append(item)\r\n if self.got_both():\r\n self.working = True", "def insert(self, item):\r\n if not self.is_full():\r\n for i in range(1,len(self.items)):\r\n if self.items[i] is None:\r\n self.items[i] = item\r\n self.size += 1\r\n self.perc_up(i)\r\n return True\r\n return False", "def add(self, key, value):\n if not key in self:\n self.keys.append(key)\n self.dict[key] = value", "def append(self, item, **data):\n self._items.append(item)\n if data:\n self._data[item] = data", "def add_item(self, key, data):\n hash_key = self.count_hash(key, len(self.slots))\n\n if self.slots[hash_key] is None:\n self.slots[hash_key] = key\n self.data[hash_key] = data\n else:\n if self.slots[hash_key] == key:\n self.data[hash_key] = data\n elif isinstance(self.slots[hash_key], int):\n self.slots[hash_key] = (self.slots[hash_key], key,)\n self.data[hash_key] = (self.data[hash_key], data,)\n elif len(self.slots[hash_key]) > 1:\n list_slot = list(self.slots[hash_key])\n list_data = list(self.data[hash_key])\n list_slot.append(key)\n list_data.append(data)\n self.slots[hash_key] = tuple(list_slot)\n self.data[hash_key] = tuple(list_data)", "def add(self, key: str, value: str) -> Optional[None]:\n threshhold = self.capacity * 0.75\n if self.length >= threshhold:\n self._increase_size()\n\n hashkey = self._gethash(key)\n if not self.HashMap[hashkey]:\n # The key does not exist so add it\n value_to_store = [key, value]\n self.HashMap[hashkey] = value_to_store\n self.length += 1\n elif self.HashMap[hashkey] and key not in self.HashMap[hashkey]:\n # There is a hashclash append to the location\n self.HashMap[hashkey].extend([key, value])\n self.length += 1\n else:\n # The key exists and matches so the value gets overlayed\n self.HashMap[hashkey] = [key, value]", "def add_item(self, item_id, item_title, score, filter_stopwords=False):\n with self._r.pipeline() as pipe:\n for prefix in self._prefixes(item_title, filter_stopwords=filter_stopwords):\n pipe.zadd(prefix, item_id, score)\n pipe.hset('$titles', item_id, item_title)\n pipe.execute()\n return True", "def test_add_item_at_using_put(self):\n pass", "def add_no_defer(self, key):\n with self._lock:\n self._no_defer.add(key)", "def add(self, key, value):\n new = self._Item(key, value)\n\n if self.is_empty():\n self._data.append(new)\n else:\n for i, item in enumerate(self._data):\n if new <= item:\n self._data.insert(i, new)\n break\n if i == len(self) - 1:\n self._data.append(new)\n break", "def add_item(self, product, price):\n if not product in self.items_in_cart:\n self.items_in_cart[product] = price\n print (product + \" added.\")\n else:\n print (product + \" is already in the cart.\")", "def insert(self, item):\n for h_num in xrange(self.k):\n val = self.hash_value(item, h_num)\n self.arr[val] = True", "def add(self, item: Mapping[Hashable, Any], **kwargs: Any) -> None:\n self.contents.update(item, **kwargs)\n return", "def add(self, key):\r\n if key not in self.map:\r\n end = self.end\r\n curr = end[PREV]\r\n curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end]\r\n if self.emitter:\r\n self.emitter.emit()", "def add_item(self, item):\n self.items_with_price.update(item)", "def addItem(self, item, rank):\n with self.lock:\n if self.ItemHashList.get(item, -1) == -1:\n self.ItemHashList[item] = None\n if rank < 0:\n rank = 0\n heapq.heappush(self.ItemList, (rank, item))", "def add(self, key):\n addition_idx = self._reduce(self._hash(key))\n\n if self.table[addition_idx] != \"_\":\n # collision\n new_idx = self._resolve_collision(addition_idx)\n if new_idx == addition_idx:\n # table is full; do not insert\n print(\"Did not add key: hash table is full!\")\n else:\n # found a new\n self.table[new_idx] = key\n else:\n # no collision; place value at index\n self.table[addition_idx] = key", "async def add(self, category, key, value=None):\n await super(MemoryKVCache, self).add(category, key, value)\n\n if self.in_transaction:\n self.dirty_categories.add(category)", "def put(self, operation, item, date=None):\n try:\n self.queue.put({\"operation\": operation, \"item\": item, \"date\": date or datetime.utcnow()})\n self.flush()\n except Exception as e:\n logger.critical('unable to put an item in the queue :: {}'.format(e))\n return False\n else:\n return True", "def add(self, key, data):\n if key not in self.vertices:\n self.numberOfVertices += 1\n self.vertices[key] = Vertex(key, data)\n return True\n\n return False", "def addItem(*args):", "def addItem(*args):", "def addItem(*args):", "def add_item(self, item_to_append):\n self.items.append(item_to_append)", "def add_item(key, obj, dst):\n\n if key not in dst:\n dst[key] = []\n dst[key].append(obj)", "def add(self, key):\n\t\t#super(CountingBloomFilter, self).add(key)\n\t\t#super(CountingBloomFilter, self).generateStats()\n\t\tfor i in self.getBitArrayIndices(key):\n\t\t\tself.ba[i] += 1\n\t\tself.n += 1", "def add_item(todo_list, todo_new_item):\n check = True\n try:\n todo_list.append(todo_new_item)\n except todo_list:\n print(\"Could not add new item to todo list\")\n check = False\n\n return check", "async def add(self, category: str, key: str, value: any = None) -> None:\n async with self.lock:\n await self.storage.add(category, key, value)\n\n try:\n await self.cache.add(category, key, value)\n except KeyError:\n await self.set_category_cache(category)", "def add_to_inventory(self, item, quantity):\n\t\tincreaseQuantity = None\n\t\taddToDict = True\n\t\tfor key in self.inventoryDictionary:\n\t\t\tif key.name == item.name:\n\t\t\t\taddToDict = False\n\t\t\t\tincreaseQuantity = key\n\t\t\t\tbreak\n\t\t\t\t\n\n\t\t\telse:\n\t\t\t\taddToDict = True\n\t\t\t\t\n\n\t\tif addToDict:\n\t\t\tself.inventoryDictionary[item] = quantity\n\t\telse:\n\t\t\tself.inventoryDictionary[increaseQuantity] += quantity", "def _single_setitem(self, key, item):\n self._dict[key] = item", "def set_item(filename, item):\n with atomic_write(os.fsencode(str(filename))) as temp_file:\n with open(os.fsencode(str(filename))) as products_file:\n # load the JSON data into memory\n products_data = json.load(products_file)\n # check if UUID already exists\n uuid_list = [i for i in filter(\n lambda z: z[\"uuid\"] == str(item[\"uuid\"]), products_data)]\n if len(uuid_list) == 0:\n # add the new item to the JSON file\n products_data.append(item)\n # save the new JSON to the temp file\n json.dump(products_data, temp_file)\n return True\n return None # record already exists", "def add_to_bag(self, item):\n self._bag.append(item)", "def add_item(self, product, price):\r\n if not product in self.items_in_cart:\r\n self.items_in_cart[product] = price\r\n print(product + \" added.\")\r\n else:\r\n print(product + \" is already in the cart.\")", "def add(self, key, value):\n self._data.append(self._Item(key, value))\n self._upheap(len(self._data) - 1) # upheap newly added position", "def add(self, key, value):\n self._data.append(self._Item(key, value))\n self._upheap(len(self._data) - 1) # upheap newly added position", "async def _add(self, key, value, ttl=None):\n\n with await self._connect() as redis:\n was_set = await redis.set(key, value, expire=ttl, exist=redis.SET_IF_NOT_EXIST)\n if not was_set:\n raise ValueError(\n \"Key {} already exists, use .set to update the value\".format(key))\n return was_set", "def add_item(self, item):\n self.items.append(item)\n self.length += 1", "async def _add(self, key, value, ttl=0):\n ret = await self.client.add(key, str.encode(value), exptime=ttl or 0)\n if not ret:\n raise ValueError(\n \"Key {} already exists, use .set to update the value\".format(key))\n\n return True" ]
[ "0.7416731", "0.7239735", "0.6776708", "0.65681887", "0.65602005", "0.6497649", "0.64799464", "0.6416331", "0.6377396", "0.63700354", "0.6295288", "0.6295288", "0.6282548", "0.62538064", "0.62458456", "0.6231878", "0.6221746", "0.6212229", "0.61935914", "0.6185903", "0.61596084", "0.61460596", "0.61120886", "0.6093864", "0.6091784", "0.6056667", "0.6025139", "0.60221475", "0.60097337", "0.60053575", "0.5989661", "0.59728634", "0.59708446", "0.59650236", "0.5956699", "0.5934968", "0.5934375", "0.59149283", "0.59148645", "0.5914578", "0.59032667", "0.5887027", "0.5871957", "0.5865301", "0.5858321", "0.5821293", "0.57619953", "0.5755172", "0.57437575", "0.57436436", "0.5725218", "0.5723309", "0.5684233", "0.56814945", "0.5668419", "0.5666166", "0.5664802", "0.56560886", "0.5631627", "0.56206256", "0.5610954", "0.5604139", "0.5581357", "0.5577595", "0.55706614", "0.5565488", "0.5558403", "0.55064684", "0.5505143", "0.54998916", "0.54951835", "0.54934543", "0.5472647", "0.54703784", "0.54676664", "0.5463347", "0.54618394", "0.54573345", "0.5446011", "0.5428451", "0.54259026", "0.54188085", "0.5413484", "0.5413484", "0.5413484", "0.5408438", "0.5395882", "0.5395673", "0.5393159", "0.5392456", "0.5379607", "0.5379302", "0.53772795", "0.53761077", "0.5375127", "0.5375029", "0.53735965", "0.53734326", "0.53702396", "0.53657037" ]
0.6687297
3
Adds multiple ``items`` to a Cuckoo Filter ``key``, allowing the filter to be created with a custom ``capacity` if it does not yet exist. ``items`` must be provided as a list.
def cfInsert(self, key, items, capacity=None, nocreate=None): params = [key] self.appendCapacity(params, capacity) self.appendNoCreate(params, nocreate) self.appendItems(params, items) return self.execute_command(self.CF_INSERT, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.CF_ADD, *params)", "def bfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.BF_ADD, *params)", "def add_items(self, items):\n for item in items:\n self.add(item)", "def addItems(c, items):\n\t\tcontainer.containersToSave[c['id_item_container']] = item.inventory.addItems(\n\t\t\titem.inventory.fromStr(c['items']),\n\t\t\titems\n\t\t)", "def bfInsert(self, key, items, capacity=None, error=None, noCreate=None, expansion=None, noScale=None):\n params = [key]\n self.appendCapacity(params, capacity)\n self.appendError(params, error)\n self.appendExpansion(params, expansion)\n self.appendNoCreate(params, noCreate)\n self.appendNoScale(params, noScale)\n self.appendItems(params, items)\n\n return self.execute_command(self.BF_INSERT, *params)", "def addItems(*args):", "def add(self, *items):\n for item in items:\n self.unsorted.append(item)\n key = item[0]\n self.index[key] = item\n return self", "def append(self, items):\n self.__add__(items)", "def topkAdd(self, key, *items):\n params = [key]\n params += items\n \n return self.execute_command(self.TOPK_ADD, *params)", "def add_items(self, items):\n for item in items:\n self.addItem(item)\n # end for item in items", "def append(self, *items: BOSminer) -> None:\n for item in items:\n self.miners[item.ip] = item", "def add(self, *items):", "def update(self, *items):\n for item in items:\n self.add(item)", "def add_items(self, items: typing.Iterable[str]) -> None:\n for item in items:\n self.add_item(item)", "def add(self, item):\n self.num_item += 1\n indexs = self.__get_indexs(item)\n for index in indexs:\n self.filter_bitarray[index] = True", "def extend(self, items):\n\t\tfor item in items:\n\t\t\tself.append(item)", "def add(self, items):\n if isinstance(items, list):\n self.items.extend(items)\n else:\n self.items.append(items)", "def update(self, items: Mapping[Any, Any]) -> None:\n self.extend(list(items.values()))\n return", "def addToWatchlist(self, items):\n if not isinstance(items, list):\n items = [items]\n\n for item in items:\n if self.onWatchlist(item):\n raise BadRequest(f'\"{item.title}\" is already on the watchlist')\n ratingKey = item.guid.rsplit('/', 1)[-1]\n self.query(f'{self.METADATA}/actions/addToWatchlist?ratingKey={ratingKey}', method=self._session.put)\n return self", "def add_items(self, items: Iterable[_T]) -> None:\n for item in items:\n self.add_item(item)", "def knapsack(items, capacity):\r\n pass", "def add_toolbar_items(self, *toolbar_items):\n self.items += [self._map_item(item) for item in toolbar_items]", "def add(self, key):\n if key in self:\n return True\n if not self.filters:\n filter = RedisLocalBloomFilter(\n server=self.server, \n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=self.initial_capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n else:\n filter = self.filters[-1]\n if filter.count >= filter.capacity:\n capacity = filter.capacity * self.scale\n if capacity > MAX_PER_SLICE_SIZE:\n capacity = MAX_PER_SLICE_SIZE\n filter = RedisLocalBloomFilter(\n server=self.server,\n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n if self.max_filters > 0 and len(self.filters) >= self.max_filters:\n f = self.filters[0]\n f.clear()\n del self.filters[0]\n filter.add(key, skip_check=True)\n return False", "def cfInsertNX(self, key, items, capacity=None, nocreate=None):\n params = [key]\n self.appendCapacity(params, capacity)\n self.appendNoCreate(params, nocreate)\n self.appendItems(params, items)\n\n return self.execute_command(self.CF_INSERTNX, *params)", "def add_items(items, cities, img_path, mask_path, mask_postfix):\n\n for c in cities:\n c_items = [name.split('_leftImg8bit.png')[0] for name in\n os.listdir(os.path.join(img_path, c))]\n for it in c_items:\n item = (os.path.join(img_path, c, it + '_leftImg8bit.png'),\n os.path.join(mask_path, c, it + mask_postfix))\n items.append(item)", "def _additems(self, w,h):\n for idx in range(len(self.data['items'])):\n default={\n 'color': self.data['itemscolor'],\n 'textscale': self.data['itemsscale'],\n 'textfont': self.data['textfont'],\n 'width': w-(self.data['margin'][0]*2.),\n }\n self.data['items'][idx].update(default)\n self.addItem(idx, **self.data['items'][idx])", "def filter_matches_add(self, key, value):\n\t\tif key in self.filter_matches.keys():\n\t\t\tself.filter_matches[key].append(value)\n\t\telse:\n\t\t\tself.filter_matches[key]=[value]", "def apply_filters(filters, items):\n return scom.apply_filters(filters, items)", "def cache_db_items(self, key, items, item_key='id'):\n db_items = self._extension_data['db_items'].setdefault(key, {})\n for item in items:\n db_items[item[item_key]] = item", "def add_items(self, items):\n\n self.model.add_items(items)\n self.refreshed.emit()", "def addItem(*args):", "def addItem(*args):", "def addItem(*args):", "def add_to_queue(self, items):\n\n for i in items:\n self.r.rpush(self.joblist, i)", "def add(self, Filter=None, FilterItems=None, MaxWaitTime=None, Source=None, Stats=None):\r\n\t\treturn self._create(locals())", "def add_to_items(items, name, size, price):\n index = items_contains_name(items, name)\n if index == 0:\n temp = {'name': name, 'size': size, 'count': 1, 'price': price}\n items.append(temp)\n else:\n items[index]['count'] = items[index]['count'] + 1\n return items", "def build_heap(self, items):\n for key in items:\n self.insert(key)", "def callback_extend_list(item):\n fisher_contingency_pval_parallel_insertion.extend(item)", "def process_items_cfg(self, itemscfg):\n\n for item in itemscfg:\n key = list(item.keys())[0]\n if key not in self.aciitemcfg[\"aci_items\"]:\n self.aciitemcfg[\"aci_items\"][key] = []\n\n self.aciitemcfg[\"aci_items\"][key].append(item[key])", "async def setMany(self, items):\n\n # is caching enabled?\n if CACHE_DISABLED:\n return None\n\n # make sure pool exists\n init()\n\n # check the input\n if not isinstance(items, list):\n raise Exception('Invalid input type provided')\n for item in items:\n if 'key' not in item:\n raise Exception('Missing key in item')\n if 'val' not in item:\n raise Exception('Missing value in item')\n if any(k not in item['key'] for k in self._keys):\n raise Exception('Missing field in key')\n\n # store in db\n async with pool.put(\n config.CACHE_ENDPOINT + '/' + config.CACHE_PREFIX + self._name,\n json=[{'key': self.buildKey(item['key']), 'value': item['val']} for item in items],\n headers={\n \"Accept\": \"application/json\",\n }\n ) as resp:\n\n if resp.status == 200:\n # success\n return True\n else:\n # fail\n raise Exception( await resp.text() )", "def pad_keys(items, keys):\n for key in keys:\n if key not in items:\n items[key] = EmptySignature()\n return items", "def multi_set(self, items, no_update_log=False):\n opts = (no_update_log and TyrantProtocol.RDBMONOULOG or 0)\n lst = []\n for k, v in items.iteritems():\n if isinstance(v, (dict)):\n new_v = []\n for kk, vv in v.items():\n new_v.append(kk)\n new_v.append(vv)\n v = new_v\n if isinstance(v, (list, tuple)):\n assert self.separator, \"Separator is not set\"\n\n v = self.separator.join(v)\n lst.extend((k, v))\n\n wait(self.proto.misc(\"putlist\", lst, opts))", "def add_item(self, item):\n self.items.append(item)\n self.item_count += 1\n self.max_length = max(self.max_length, len(item.samples))", "def cfAddNX(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.CF_ADDNX, *params)", "def _tattle_add_item(resource, item, resourcesalloweddict, resourcesuseddict):\n\n resourcesuseddict['fungible_locks'][resource].acquire()\n\n # always unlock as we exit...\n try: \n\n # It's already acquired. This is always allowed.\n if item in resourcesuseddict[resource]:\n return\n\n if len(resourcesuseddict[resource]) > resourcesalloweddict[resource]:\n raise InternalRepyError, \"Should not be able to exceed resource count\"\n\n if len(resourcesuseddict[resource]) == resourcesalloweddict[resource]:\n # it's clobberin time!\n raise ResourceExhaustedError(\"Resource '\"+resource+\"' limit exceeded!!\")\n\n # add the item to the list. We're done now...\n resourcesuseddict[resource].add(item)\n\n finally:\n resourcesuseddict['fungible_locks'][resource].release()", "def cmsIncrBy(self, key, items, increments):\n params = [key]\n self.appendItemsAndIncrements(params, items, increments)\n \n return self.execute_command(self.CMS_INCRBY, *params)", "def ffa(items_list, bin_capacity):\n bins =[]\n randomised_np_list = np.random.permutation(items_list) # list containing initial items in a random order\n items_list = randomised_np_list.tolist() \n \n for item in items_list:\n # foeach item we search if there's an open bin where it can fit\n for bin in bins:\n if bin.total_weight + item <= bin_capacity: #if it fits\n bin.add_item(item) #we add the item in the bin\n break\n else:\n # there is no open bin where the item can fit\n #so we open a new bin and add the item in it\n bin = Bin()\n bin.add_item(item)\n bins.append(bin)\n\n return bins", "def extend(self, items):\n self.work.extend(items)", "def __init__(self, items=[]):\n self.items = [*items]", "def bfMAdd(self, key, *items):\n params = [key]\n params += items\n\n return self.execute_command(self.BF_MADD, *params)", "def add_item(items, coder, tag, start, n):\n if start is not None:\n # close opened items\n add_zero_item(items, coder, tag, start) # default tag\n items[tag][coder].append(item(b=start, l=n-start, v=1)) # found tag", "def groupInputItem(name,items=[],**kargs):\n kargs['name'] = name\n kargs['items'] = items\n kargs['itemtype'] = 'group'\n return kargs", "def additemtoinventory(item):\n global ITEM_COUNT\n for i in range(0, 10): # first 10 items are weapons, (this code sux, need a better way of doing this)\n if ITEMTYPES[ITEM_LIST[ZERO_BASE_PLYR_POS]] == ITEMTYPES[i]: \n cur_weapon_strength = WEAPON_STRENGTHS[ITEMS[0]]\n new_weapon_strength = WEAPON_STRENGTHS[ITEMTYPES[i]]\n if new_weapon_strength > cur_weapon_strength:\n change_weapon(ITEMTYPES[i])\n ITEMS[0] = ITEMTYPES[i] # 'overwrite' the main weapon with the new one\n remove_item_from_map()\n return # exit here if item is weapon\n else:\n remove_item_from_map()\n return # remove the inferior weapon from the map and return\n ITEMS.append(ITEMTYPES[item])\n ITEM_COUNT = len(ITEMS)\n remove_item_from_map()", "def addCollectionItems_patch(self, target, items):\n if not items: return None\n auth_token = self.obtainSecurityToken()\n target_type = target._type\n item_types = [item._type for item in items]\n item_type = item_types[0]\n # Fix for _type in outliers\n outliers = [item for item in item_types if item != item_type]\n if outliers:\n raise RallyRESTAPIError(\"addCollectionItems: all items must be of the same type\")\n resource = \"%s/%s/%ss/add\" % (target_type, target.oid, item_type)\n collection_url = '%s/%s?fetch=Name&key=%s' % (self.service_url, resource, auth_token)\n payload = {\"CollectionItems\":[{'_ref' : \"%s/%s\" % (str(item._type), str(item.oid))}\n for item in items]}\n response = self.session.post(collection_url, data=json.dumps(payload), headers=RALLY_REST_HEADERS)\n context = self.contextHelper.currentContext()\n response = RallyRESTResponse(self.session, context, resource, response, \"shell\", 0)\n added_items = [str(item[u'Name']) for item in response.data[u'Results']]\n return response, added_items", "def put(self, key, item):\n # Do nothing if key is NoneType or item is NoneType\n if (key is None or item is None):\n return\n\n if key in self.cache_data:\n self.LRU = [ci for ci in self.LRU if ci.key != key]\n\n # increase age of all items\n for x in self.LRU:\n x.age += 1\n\n self.cache_data[key] = item\n data = LRUCacheItem(key, item, 0)\n self.LRU.append(data)\n\n # Length is longer than max capacity, make room\n if len(self.cache_data) > self.MAX_ITEMS:\n discard = self.LRU[0]\n for x in self.LRU:\n if x.age > discard.age:\n discard = x\n print(\"DISCARD: {}\".format(discard.key))\n del self.cache_data[discard.key]\n self.LRU.remove(discard)", "def addItem(self, item):\n self.items.append(item)\n self.totalWeight += item", "def cloudflare_waf_ip_list_item_create_command(client: Client, args: Dict[str, Any]) -> CommandResults:\n\n list_id = args['list_id']\n items = [{'ip': item} for item in argToList(args.get('items'))]\n\n response = client.cloudflare_waf_ip_list_item_create_request(list_id, items)\n output = response['result']\n\n return CommandResults(\n readable_output=f'Create items in the IP List {list_id} is executing',\n raw_response=output)", "def test_neg_operate_append_items_not_a_list(self):\n key = (\"test\", \"demo\", \"list_key\")\n\n list = [\n {\"op\": aerospike.OP_LIST_APPEND_ITEMS, \"bin\": \"int_bin\", \"val\": 7},\n ]\n\n try:\n key, _, bins = self.as_connection.operate(key, list)\n except e.ParamError as exception:\n assert exception.code == -2", "def __init__(self, items: List[T], min_freq: int = 1):\n counter_ = Counter(items)\n unique_items = [x for x, freq in counter_.items() if freq >= min_freq]\n self._dict = {item: i + 1 for i, item in enumerate(unique_items)}\n self._items: List[Union[str, T]] = [\"UNK\"]\n self._items.extend(unique_items)", "def add(self, key):\n\t\t#super(CountingBloomFilter, self).add(key)\n\t\t#super(CountingBloomFilter, self).generateStats()\n\t\tfor i in self.getBitArrayIndices(key):\n\t\t\tself.ba[i] += 1\n\t\tself.n += 1", "def reduce_task(*items):\n merged = dict()\n keys = set().union(*items)\n for key in keys:\n merged[key] = sum([x.get(key, 0) for x in items])\n return merged", "def append(self, item):\n # FIXME: this is only append if the key isn't already present\n key, value = item\n self._main[key] = value", "def add_item(self, item):\r\n bag_res = consts.BAG_PUT_FAILED\r\n for i in range(len(self._items)):\r\n res = self.put_item_at(i, item, allow_switch=False)\r\n if res == consts.PUT_FORBIDDEN:\r\n return consts.BAG_PUT_FAILED\r\n if res == consts.PUT_SWITCH or \\\r\n res == consts.PUT_INTO_EMPTY or \\\r\n res == consts.PUT_MERGE_TOTALLY:\r\n return consts.BAG_PUT_TOTALLY\r\n if res == consts.PUT_MERGE_PARTIALLY:\r\n bag_res = consts.BAG_PUT_PARTIALLY\r\n continue\r\n if res == consts.PUT_MERGE_FAILED or \\\r\n res == consts.PUT_SWITCH_FORBIDDEN:\r\n continue\r\n return bag_res", "def process_input_items(args):\n return dict(sum([Counter({sku: value * SCORES[k] for sku, value in\n Counter(args[k].split(',')).items()}) or Counter() for k in\n set(SCORES.keys()) & set(args.keys())], Counter()))", "def __init__(self, capacity, fillValue=None):\r\n self._items = list()\r\n for count in range(capacity):\r\n self._items.append(fillValue)", "def put(self, key, item):\n # Do nothing if key is NoneType or item is NoneType\n if (key is None or item is None):\n return\n\n # If Key exists, update Cache Item\n if key in self.cache_data:\n for ci in self.LFU:\n if ci.key is key:\n ci.updateItem(item)\n else:\n # Length will be longer than max capacity, make room\n if len(self.cache_data) == self.MAX_ITEMS:\n\n # Determine discarded cache item\n discard = self.LFU[0]\n for x in self.LFU:\n if x.freq < discard.freq or \\\n x.freq == discard.freq and x.age > discard.age:\n discard = x\n\n # Discard Cache Item\n print(\"DISCARD: {}\".format(discard.key))\n del self.cache_data[discard.key]\n self.LFU.remove(discard)\n\n # Add new Cache Item\n data = LFUCacheItem(key, item, 0, 0)\n self.LFU.append(data)\n\n # increase age of all items\n for x in self.LFU:\n x.age += 1\n\n self.cache_data[key] = item", "def add(self, item):\n self.update(set([item]))", "def batch_put_attributes(self, items, replace=True):\r\n return self.connection.batch_put_attributes(self, items, replace)", "def cloudflare_waf_ip_list_item_create_request(self, list_id: str, items: list) -> Dict[str, Any]:\n return self._http_request(\n method='POST',\n url_suffix=f'accounts/{self.account_id}/rules/lists/{list_id}/items',\n json_data=items)", "def add_items(self, library_items):\n for item in library_items:\n self._all_items.append(item)", "def add(self, key, values):\n self.watchlists[key] = list(enumerate(values))", "def process_new_items(self, new_items):\n self.items_hat = np.hstack([self.items_hat, new_items])", "def __init__(self, items = None):\n self._items = sorted(set(items)) if items is not None else []", "def __init__(self, items=None):\n self._items = sorted(set(items)) if items is not None else []", "def put(self, key, item):\n if key and item:\n if key in self.cache_data.keys():\n self.cache_data[key] = item\n self.stack.append(key)\n else:\n if len(self.cache_data.keys()) < self.MAX_ITEMS:\n self.cache_data[key] = item\n self.stack.append(key)\n elif len(self.cache_data.keys()) == self.MAX_ITEMS:\n k = self.stack.pop(-1)\n self.cache_data.pop(k)\n print(\"DISCARD: {}\".format(k))\n self.cache_data[key] = item\n self.stack.append(key)", "def test_add_item_adds_multiple_entries():\n sc.menu = sc.default_menu\n sc.current.add_item('Coffee', 2)\n sc.current.add_item('Coffee', 1)\n sc.current.add_item('Tea', 1)\n assert sc.current.receipt == {'subtotal': 6.36, 'Coffee': 3, 'Tea': 1}", "def cfCreate(self, key, capacity, expansion=None, bucket_size=None, max_iterations=None):\n params = [key, capacity]\n self.appendExpansion(params, expansion)\n self.appendBucketSize(params, bucket_size)\n self.appendMaxIterations(params, max_iterations)\n\n return self.execute_command(self.CF_RESERVE, *params)", "def add(self, key, skip_check=False):\n bits_per_slice = self.bits_per_slice\n hashes = self.make_hashes(key)\n found_all_bits = True\n if self.count > self.capacity:\n raise IndexError(\"RedisLocalBloomFilter is at capacity\")\n pipe = self.server.pipeline(transaction=False) \n sliceIdx = 0\n for k in hashes:\n sliceKey = self.SLICE_KEY_FMT % (self.bfkeypreffix, sliceIdx)\n pipe.setbit(sliceKey, k, 1)\n sliceIdx += 1\n pipeResults = pipe.execute()\n if not skip_check:\n for pipeResult in pipeResults:\n if not pipeResult:\n found_all_bits = False\n break\n if skip_check:\n self.count += 1\n return False\n elif not found_all_bits:\n self.count += 1\n return False\n else:\n return True", "def insert_many(self, conn, key, **kwargs):\n conn.zadd(key, **kwargs)", "def do_add(self, args):\n\t\tif len(args) == 0:\n\t\t\tself.parent.printErr(\"Missing argument(s)\")\n\t\t\treturn False\n\t\tdef try_add(ftype, fvalue):\n\t\t\tif ftype == \"has\" and value not in self.FILTER_HAS_ARGUMENTS:\n\t\t\t\tself.parent.printErr(\"Could not add '%s': Invalid filter argument\" % (fvalue))\n\t\t\t\treturn False\n\t\t\telif ftype not in self.FILTER_ARGUMENTS:\n\t\t\t\tself.parent.printErr(\"Could not add '%s': Invalid filter\" % (ftype))\n\t\t\t\treturn False\n\n\t\t\ttry:\n\t\t\t\tif value not in self.parent.filter[ftype]:\n\t\t\t\t\tself.parent.filter[ftype].append(fvalue)\n\t\t\t\telse:\n\t\t\t\t\tself.parent.printErr(\"Could not add '%s': Item already in filter\" % (fvalue))\n\t\t\t\t\treturn False\n\t\t\texcept KeyError:\n\t\t\t\tself.parent.filter[ftype] = [fvalue]\n\n\t\t\tself.apply_filter()\n\t\t\treturn True\n\n\t\targs = args.split()\n\t\tftype = args[0]\n\t\tvalues = args[1:]\n\n\t\tif len(values) == 0:\n\t\t\tself.parent.printErr(\"Could not add '%s': Filter expects arguments\" % (ftype))\n\n\t\tfor value in values:\n\t\t\ttry_add(ftype, value)\n\n\t\tself._update_prompts()", "def setitems(self, items):\n self.clear()\n # FIXME: this allows you to pass in an OrderedDict as well :-)\n self.update(items)", "def item_components(self, item_components):\n\n self._item_components = item_components", "def bfMExists(self, key, *items):\n params = [key]\n params += items\n\n return self.execute_command(self.BF_MEXISTS, *params)", "def __init__(self, items={}, strict=True):\n\n self.strict = strict\n self._names = []\n self._items = {}\n\n for name, value in items.iteritems():\n self[name] = value", "def add_item(self, item_to_append):\n self.items.append(item_to_append)", "def ffda(items_list, bin_capacity):\n decreased_list = sorted(items_list,reverse=True) #sorts the items list in a decreasing order\n bins =[]\n for item in decreased_list:\n # foeach item we search if there's an open bin where it can fit\n for bin in bins:\n if bin.total_weight + item <= bin_capacity: #if it fits\n bin.add_item(item) #we add the item in the bin\n break\n else:\n # there is no open bin where the item can fit\n #so we open a new bin and add the item in it\n bin = Bin()\n bin.add_item(item)\n bins.append(bin)\n\n return bins", "def add_new(self, item, key):\n if key in self._items:\n raise DuplicateListHeapItemException(key)\n if len(self._items) >= self._max_limit:\n raise MaxItemLimitReachedException()\n self._items[key] = item\n self._listbox.insert(END, key)", "def add(self, keys: List[Tuple[int, int]], vectors: np.ndarray, weights: List[float], *args, **kwargs):\n pass", "def append(self, item, **data):\n self._items.append(item)\n if data:\n self._data[item] = data", "def extend(self, in_items):\n\n items = self.list\n items.extend(in_items)\n self.value = self.__class__.SEPARATOR.join(items)", "def add_item(self, item):\n self.items.append(item)\n self.length += 1", "def NewItems(self) -> _n_1_t_7:", "def carry(self, item):\r\n\r\n # If you can add with the tier,\r\n # you have to check that its viable to carry\r\n if self.add is True:\r\n\r\n # This takes the new item and makes it your current item\r\n if item.size is True:\r\n self.item = item", "def _add_item_by_item(self, item):\n self.item_list[item.call_number] = item", "def add_to_group(self,item):\n self.items.append(item)\n self.n += 1", "def add_items(self,items,form,prefix=''):\n for item in items:\n\n if isinstance(item,list) or isinstance(item,tuple):\n warnings.warn(\"warn_deprecated_inputitem\")\n try:\n item = compatInputItem(*item)\n except:\n pass\n \n if isinstance(item,dict):\n\n itemtype = item.get('itemtype',None)\n \n if itemtype == 'tab':\n self.add_tab(form,prefix=prefix,**item)\n\n elif itemtype == 'group':\n self.add_group(form,prefix=prefix,**item)\n\n else:\n self.add_input(form,prefix=prefix,**item)\n\n form.last = itemtype\n \n elif isinstance(item,QtGui.QWidget):\n # this allows including widgets which are not\n # input fields\n form.addWidget(item)\n form.last = None\n \n else:\n raise ValueError,\"Invalid input item (type %s). Expected a dict or a QWidget.\" % type(item)", "def on_new_items(self, items_params, new_items):\n if new_items:\n self.point_vector_layer.startEditing()\n self.line_vector_layer.startEditing()\n self.polygon_vector_layer.startEditing()\n\n point_data_provider = self.point_vector_layer.dataProvider()\n line_data_provider = self.line_vector_layer.dataProvider()\n polygon_data_provider = self.polygon_vector_layer.dataProvider()\n\n if KEY_ESRI_GEOMETRY_POINT in self.geometries and self.geometries[KEY_ESRI_GEOMETRY_POINT].is_checked:\n point_data_provider.addFeatures(new_items[KEY_POINT])\n if KEY_ESRI_GEOMETRY_MULTI_POINT in self.geometries and \\\n self.geometries[KEY_ESRI_GEOMETRY_MULTI_POINT].is_checked:\n point_data_provider.addFeatures(new_items[KEY_MULTI_POINT])\n if KEY_ESRI_GEOMETRY_POLYLINE in self.geometries and self.geometries[KEY_ESRI_GEOMETRY_POLYLINE].is_checked:\n line_data_provider.addFeatures(new_items[KEY_LINE])\n if KEY_ESRI_GEOMETRY_POLYGON in self.geometries and self.geometries[KEY_ESRI_GEOMETRY_POLYGON].is_checked:\n polygon_data_provider.addFeatures(new_items[KEY_POLYGON])\n\n self.point_vector_layer.commitChanges()\n self.point_vector_layer.updateExtents()\n\n self.line_vector_layer.commitChanges()\n self.line_vector_layer.updateExtents()\n\n self.polygon_vector_layer.commitChanges()\n self.polygon_vector_layer.updateExtents()\n\n self.items[items_params.source][items_params.type_name] += new_items\n self.on_task_complete()", "def build_complex_list_params(self, params, items, label, names):\n for i, item in enumerate(items, 1):\n current_prefix = '%s.%s' % (label, i)\n for key, value in zip(names, item):\n full_key = '%s.%s' % (current_prefix, key)\n params[full_key] = value", "def add_to_inventory(self, item, quantity):\n\t\tincreaseQuantity = None\n\t\taddToDict = True\n\t\tfor key in self.inventoryDictionary:\n\t\t\tif key.name == item.name:\n\t\t\t\taddToDict = False\n\t\t\t\tincreaseQuantity = key\n\t\t\t\tbreak\n\t\t\t\t\n\n\t\t\telse:\n\t\t\t\taddToDict = True\n\t\t\t\t\n\n\t\tif addToDict:\n\t\t\tself.inventoryDictionary[item] = quantity\n\t\telse:\n\t\t\tself.inventoryDictionary[increaseQuantity] += quantity", "def __init__(self, capacity, fillValue = None):\n \n self._items = list() \n self._fillValue = fillValue\n self._DEFAULT_CAPACITY = capacity\n self._logicalSize = 0 #as required by exercise 1\n \n \n for count in xrange(capacity):\n self._items.append(self._fillValue)" ]
[ "0.60938436", "0.6008798", "0.5990414", "0.58841145", "0.58353513", "0.5737808", "0.5727663", "0.57137096", "0.565862", "0.56526506", "0.5644674", "0.56005716", "0.5593892", "0.5568903", "0.55669373", "0.55430824", "0.55396664", "0.55076975", "0.5490069", "0.54595166", "0.5393595", "0.53759456", "0.53673685", "0.5350819", "0.5266138", "0.5203471", "0.5194724", "0.51828015", "0.51543474", "0.5128592", "0.5125567", "0.5125567", "0.5125567", "0.5088306", "0.5061213", "0.50440425", "0.502373", "0.50215477", "0.500915", "0.50002164", "0.49990964", "0.49987885", "0.49980742", "0.49851775", "0.49767342", "0.49645162", "0.49587178", "0.49309444", "0.49269536", "0.49266633", "0.49237457", "0.49212986", "0.49121618", "0.49012148", "0.4883398", "0.48796597", "0.48690805", "0.48644003", "0.4859446", "0.48559082", "0.48546824", "0.48456645", "0.48416892", "0.48391378", "0.48382473", "0.48314357", "0.482559", "0.48255074", "0.47848618", "0.47778374", "0.47719705", "0.4770137", "0.4755708", "0.47527725", "0.47387186", "0.47350323", "0.47350124", "0.47345698", "0.47246084", "0.4717348", "0.4716718", "0.47164294", "0.47036642", "0.47030306", "0.46996483", "0.46973035", "0.46951", "0.46900046", "0.46839398", "0.4683565", "0.46826416", "0.4682437", "0.46793184", "0.46786407", "0.4674373", "0.46700686", "0.46678948", "0.46659505", "0.4665089", "0.46565807" ]
0.6239115
0
Adds multiple ``items`` to a Cuckoo Filter ``key`` only if they do not exist yet, allowing the filter to be created with a custom ``capacity` if it does not yet exist. ``items`` must be provided as a list.
def cfInsertNX(self, key, items, capacity=None, nocreate=None): params = [key] self.appendCapacity(params, capacity) self.appendNoCreate(params, nocreate) self.appendItems(params, items) return self.execute_command(self.CF_INSERTNX, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cfInsert(self, key, items, capacity=None, nocreate=None):\n params = [key]\n self.appendCapacity(params, capacity)\n self.appendNoCreate(params, nocreate)\n self.appendItems(params, items)\n\n return self.execute_command(self.CF_INSERT, *params)", "def bfInsert(self, key, items, capacity=None, error=None, noCreate=None, expansion=None, noScale=None):\n params = [key]\n self.appendCapacity(params, capacity)\n self.appendError(params, error)\n self.appendExpansion(params, expansion)\n self.appendNoCreate(params, noCreate)\n self.appendNoScale(params, noScale)\n self.appendItems(params, items)\n\n return self.execute_command(self.BF_INSERT, *params)", "def bfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.BF_ADD, *params)", "def cfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.CF_ADD, *params)", "def add(self, key):\n if key in self:\n return True\n if not self.filters:\n filter = RedisLocalBloomFilter(\n server=self.server, \n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=self.initial_capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n else:\n filter = self.filters[-1]\n if filter.count >= filter.capacity:\n capacity = filter.capacity * self.scale\n if capacity > MAX_PER_SLICE_SIZE:\n capacity = MAX_PER_SLICE_SIZE\n filter = RedisLocalBloomFilter(\n server=self.server,\n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n if self.max_filters > 0 and len(self.filters) >= self.max_filters:\n f = self.filters[0]\n f.clear()\n del self.filters[0]\n filter.add(key, skip_check=True)\n return False", "def add_items(self, items):\n for item in items:\n self.add(item)", "def pad_keys(items, keys):\n for key in keys:\n if key not in items:\n items[key] = EmptySignature()\n return items", "def add(self, *items):\n for item in items:\n self.unsorted.append(item)\n key = item[0]\n self.index[key] = item\n return self", "def bfMExists(self, key, *items):\n params = [key]\n params += items\n\n return self.execute_command(self.BF_MEXISTS, *params)", "def topkAdd(self, key, *items):\n params = [key]\n params += items\n \n return self.execute_command(self.TOPK_ADD, *params)", "def addItems(c, items):\n\t\tcontainer.containersToSave[c['id_item_container']] = item.inventory.addItems(\n\t\t\titem.inventory.fromStr(c['items']),\n\t\t\titems\n\t\t)", "def update(self, items: Mapping[Any, Any]) -> None:\n self.extend(list(items.values()))\n return", "def append(self, *items: BOSminer) -> None:\n for item in items:\n self.miners[item.ip] = item", "def addToWatchlist(self, items):\n if not isinstance(items, list):\n items = [items]\n\n for item in items:\n if self.onWatchlist(item):\n raise BadRequest(f'\"{item.title}\" is already on the watchlist')\n ratingKey = item.guid.rsplit('/', 1)[-1]\n self.query(f'{self.METADATA}/actions/addToWatchlist?ratingKey={ratingKey}', method=self._session.put)\n return self", "def add(self, item):\n self.num_item += 1\n indexs = self.__get_indexs(item)\n for index in indexs:\n self.filter_bitarray[index] = True", "def append(self, items):\n self.__add__(items)", "def filter_matches_add(self, key, value):\n\t\tif key in self.filter_matches.keys():\n\t\t\tself.filter_matches[key].append(value)\n\t\telse:\n\t\t\tself.filter_matches[key]=[value]", "def cache_db_items(self, key, items, item_key='id'):\n db_items = self._extension_data['db_items'].setdefault(key, {})\n for item in items:\n db_items[item[item_key]] = item", "def knapsack(items, capacity):\r\n pass", "def update(self, *items):\n for item in items:\n self.add(item)", "def addItems(*args):", "def put(self, key, item):\n # Do nothing if key is NoneType or item is NoneType\n if (key is None or item is None):\n return\n\n if key in self.cache_data:\n self.LRU = [ci for ci in self.LRU if ci.key != key]\n\n # increase age of all items\n for x in self.LRU:\n x.age += 1\n\n self.cache_data[key] = item\n data = LRUCacheItem(key, item, 0)\n self.LRU.append(data)\n\n # Length is longer than max capacity, make room\n if len(self.cache_data) > self.MAX_ITEMS:\n discard = self.LRU[0]\n for x in self.LRU:\n if x.age > discard.age:\n discard = x\n print(\"DISCARD: {}\".format(discard.key))\n del self.cache_data[discard.key]\n self.LRU.remove(discard)", "def test_neg_operate_append_items_not_a_list(self):\n key = (\"test\", \"demo\", \"list_key\")\n\n list = [\n {\"op\": aerospike.OP_LIST_APPEND_ITEMS, \"bin\": \"int_bin\", \"val\": 7},\n ]\n\n try:\n key, _, bins = self.as_connection.operate(key, list)\n except e.ParamError as exception:\n assert exception.code == -2", "def add_items(self, items):\n for item in items:\n self.addItem(item)\n # end for item in items", "def extend(self, items):\n\t\tfor item in items:\n\t\t\tself.append(item)", "def put(self, key, item):\n if key is not None and item is not None:\n self.cache_data[key] = item\n if key not in self.cache_list:\n self.cache_list.append(key)\n if len(self.cache_data) > self.MAX_ITEMS:\n popped_key = self.cache_list.pop(0)\n print(f\"DISCARD: {popped_key}\")\n del self.cache_data[popped_key]", "def process_items_cfg(self, itemscfg):\n\n for item in itemscfg:\n key = list(item.keys())[0]\n if key not in self.aciitemcfg[\"aci_items\"]:\n self.aciitemcfg[\"aci_items\"][key] = []\n\n self.aciitemcfg[\"aci_items\"][key].append(item[key])", "def put(self, key, item):\n # Do nothing if key is NoneType or item is NoneType\n if (key is None or item is None):\n return\n\n # If Key exists, update Cache Item\n if key in self.cache_data:\n for ci in self.LFU:\n if ci.key is key:\n ci.updateItem(item)\n else:\n # Length will be longer than max capacity, make room\n if len(self.cache_data) == self.MAX_ITEMS:\n\n # Determine discarded cache item\n discard = self.LFU[0]\n for x in self.LFU:\n if x.freq < discard.freq or \\\n x.freq == discard.freq and x.age > discard.age:\n discard = x\n\n # Discard Cache Item\n print(\"DISCARD: {}\".format(discard.key))\n del self.cache_data[discard.key]\n self.LFU.remove(discard)\n\n # Add new Cache Item\n data = LFUCacheItem(key, item, 0, 0)\n self.LFU.append(data)\n\n # increase age of all items\n for x in self.LFU:\n x.age += 1\n\n self.cache_data[key] = item", "def _tattle_add_item(resource, item, resourcesalloweddict, resourcesuseddict):\n\n resourcesuseddict['fungible_locks'][resource].acquire()\n\n # always unlock as we exit...\n try: \n\n # It's already acquired. This is always allowed.\n if item in resourcesuseddict[resource]:\n return\n\n if len(resourcesuseddict[resource]) > resourcesalloweddict[resource]:\n raise InternalRepyError, \"Should not be able to exceed resource count\"\n\n if len(resourcesuseddict[resource]) == resourcesalloweddict[resource]:\n # it's clobberin time!\n raise ResourceExhaustedError(\"Resource '\"+resource+\"' limit exceeded!!\")\n\n # add the item to the list. We're done now...\n resourcesuseddict[resource].add(item)\n\n finally:\n resourcesuseddict['fungible_locks'][resource].release()", "def put(self, key, item):\n if key and item:\n if key in self.cache_data.keys():\n self.cache_data[key] = item\n self.stack.append(key)\n else:\n if len(self.cache_data.keys()) < self.MAX_ITEMS:\n self.cache_data[key] = item\n self.stack.append(key)\n elif len(self.cache_data.keys()) == self.MAX_ITEMS:\n k = self.stack.pop(-1)\n self.cache_data.pop(k)\n print(\"DISCARD: {}\".format(k))\n self.cache_data[key] = item\n self.stack.append(key)", "def add(self, *items):", "def add_items(self, items: typing.Iterable[str]) -> None:\n for item in items:\n self.add_item(item)", "def add_items(self, items: Iterable[_T]) -> None:\n for item in items:\n self.add_item(item)", "def add_items(items, cities, img_path, mask_path, mask_postfix):\n\n for c in cities:\n c_items = [name.split('_leftImg8bit.png')[0] for name in\n os.listdir(os.path.join(img_path, c))]\n for it in c_items:\n item = (os.path.join(img_path, c, it + '_leftImg8bit.png'),\n os.path.join(mask_path, c, it + mask_postfix))\n items.append(item)", "def reduce_task(*items):\n merged = dict()\n keys = set().union(*items)\n for key in keys:\n merged[key] = sum([x.get(key, 0) for x in items])\n return merged", "def add(self, key, skip_check=False):\n bits_per_slice = self.bits_per_slice\n hashes = self.make_hashes(key)\n found_all_bits = True\n if self.count > self.capacity:\n raise IndexError(\"RedisLocalBloomFilter is at capacity\")\n pipe = self.server.pipeline(transaction=False) \n sliceIdx = 0\n for k in hashes:\n sliceKey = self.SLICE_KEY_FMT % (self.bfkeypreffix, sliceIdx)\n pipe.setbit(sliceKey, k, 1)\n sliceIdx += 1\n pipeResults = pipe.execute()\n if not skip_check:\n for pipeResult in pipeResults:\n if not pipeResult:\n found_all_bits = False\n break\n if skip_check:\n self.count += 1\n return False\n elif not found_all_bits:\n self.count += 1\n return False\n else:\n return True", "def __init__(self, items: List[T], min_freq: int = 1):\n counter_ = Counter(items)\n unique_items = [x for x, freq in counter_.items() if freq >= min_freq]\n self._dict = {item: i + 1 for i, item in enumerate(unique_items)}\n self._items: List[Union[str, T]] = [\"UNK\"]\n self._items.extend(unique_items)", "def add(self, items):\n if isinstance(items, list):\n self.items.extend(items)\n else:\n self.items.append(items)", "def __init__(self, items = None):\n self._items = sorted(set(items)) if items is not None else []", "def additemtoinventory(item):\n global ITEM_COUNT\n for i in range(0, 10): # first 10 items are weapons, (this code sux, need a better way of doing this)\n if ITEMTYPES[ITEM_LIST[ZERO_BASE_PLYR_POS]] == ITEMTYPES[i]: \n cur_weapon_strength = WEAPON_STRENGTHS[ITEMS[0]]\n new_weapon_strength = WEAPON_STRENGTHS[ITEMTYPES[i]]\n if new_weapon_strength > cur_weapon_strength:\n change_weapon(ITEMTYPES[i])\n ITEMS[0] = ITEMTYPES[i] # 'overwrite' the main weapon with the new one\n remove_item_from_map()\n return # exit here if item is weapon\n else:\n remove_item_from_map()\n return # remove the inferior weapon from the map and return\n ITEMS.append(ITEMTYPES[item])\n ITEM_COUNT = len(ITEMS)\n remove_item_from_map()", "def apply_filters(filters, items):\n return scom.apply_filters(filters, items)", "def add_item(self, item):\r\n bag_res = consts.BAG_PUT_FAILED\r\n for i in range(len(self._items)):\r\n res = self.put_item_at(i, item, allow_switch=False)\r\n if res == consts.PUT_FORBIDDEN:\r\n return consts.BAG_PUT_FAILED\r\n if res == consts.PUT_SWITCH or \\\r\n res == consts.PUT_INTO_EMPTY or \\\r\n res == consts.PUT_MERGE_TOTALLY:\r\n return consts.BAG_PUT_TOTALLY\r\n if res == consts.PUT_MERGE_PARTIALLY:\r\n bag_res = consts.BAG_PUT_PARTIALLY\r\n continue\r\n if res == consts.PUT_MERGE_FAILED or \\\r\n res == consts.PUT_SWITCH_FORBIDDEN:\r\n continue\r\n return bag_res", "def filter_0_items(inventory):\r\n\r\n\tnew_list = [] # create an empty dictionary\r\n\tfor key in inventory: # iterate through the list\r\n\t\tif inventory[key] == 0: # check for key = 0, if it is then\r\n\t\t\tnew_list.append(key) # add it to a new list\r\n\r\n\tfor keys in new_list:#iterting through new_list\r\n\t\tdel inventory[keys]\r\n\r\n\treturn inventory", "def __init__(self, items=None):\n self._items = sorted(set(items)) if items is not None else []", "def bfMAdd(self, key, *items):\n params = [key]\n params += items\n\n return self.execute_command(self.BF_MADD, *params)", "def build_heap(self, items):\n for key in items:\n self.insert(key)", "def cfAddNX(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.CF_ADDNX, *params)", "async def setMany(self, items):\n\n # is caching enabled?\n if CACHE_DISABLED:\n return None\n\n # make sure pool exists\n init()\n\n # check the input\n if not isinstance(items, list):\n raise Exception('Invalid input type provided')\n for item in items:\n if 'key' not in item:\n raise Exception('Missing key in item')\n if 'val' not in item:\n raise Exception('Missing value in item')\n if any(k not in item['key'] for k in self._keys):\n raise Exception('Missing field in key')\n\n # store in db\n async with pool.put(\n config.CACHE_ENDPOINT + '/' + config.CACHE_PREFIX + self._name,\n json=[{'key': self.buildKey(item['key']), 'value': item['val']} for item in items],\n headers={\n \"Accept\": \"application/json\",\n }\n ) as resp:\n\n if resp.status == 200:\n # success\n return True\n else:\n # fail\n raise Exception( await resp.text() )", "def add_item(self, new_item):\n [self.item_list.append(new_item) for item in self.item_list\n if new_item not in self.item_list]", "def _additems(self, w,h):\n for idx in range(len(self.data['items'])):\n default={\n 'color': self.data['itemscolor'],\n 'textscale': self.data['itemsscale'],\n 'textfont': self.data['textfont'],\n 'width': w-(self.data['margin'][0]*2.),\n }\n self.data['items'][idx].update(default)\n self.addItem(idx, **self.data['items'][idx])", "def ffa(items_list, bin_capacity):\n bins =[]\n randomised_np_list = np.random.permutation(items_list) # list containing initial items in a random order\n items_list = randomised_np_list.tolist() \n \n for item in items_list:\n # foeach item we search if there's an open bin where it can fit\n for bin in bins:\n if bin.total_weight + item <= bin_capacity: #if it fits\n bin.add_item(item) #we add the item in the bin\n break\n else:\n # there is no open bin where the item can fit\n #so we open a new bin and add the item in it\n bin = Bin()\n bin.add_item(item)\n bins.append(bin)\n\n return bins", "def add_toolbar_items(self, *toolbar_items):\n self.items += [self._map_item(item) for item in toolbar_items]", "def add_item(dct,item):\r\n if item not in dct[0]:\r\n print \"itemNotFound \",str(item)\r\n return False\r\n\r\n num=len(item)\r\n if num in dct:\r\n if item in dct[num]:\r\n return False\r\n else:\r\n dct[num].append(item)\r\n return True\r\n else:\r\n dct[num]=[item]\r\n return True", "def Filter(self, name, items):\n self.changed = True\n if name in self.ticker_lists:\n self.ticker_lists[name] = [\n t for t in self.ticker_lists[name] if t not in items]", "def add(self, item):\n self.update(set([item]))", "def put(self, key, item):\n if key or item:\n self.cache_data[key] = item\n if len(self.cache_data) > BaseCaching.MAX_ITEMS:\n print(\"DISCARD: {}\".format(self.last))\n del self.cache_data[self.last]\n self.last = key", "def multi_set(self, items, no_update_log=False):\n opts = (no_update_log and TyrantProtocol.RDBMONOULOG or 0)\n lst = []\n for k, v in items.iteritems():\n if isinstance(v, (dict)):\n new_v = []\n for kk, vv in v.items():\n new_v.append(kk)\n new_v.append(vv)\n v = new_v\n if isinstance(v, (list, tuple)):\n assert self.separator, \"Separator is not set\"\n\n v = self.separator.join(v)\n lst.extend((k, v))\n\n wait(self.proto.misc(\"putlist\", lst, opts))", "def put(self, key, item):\n if key is not None and item is not None:\n # modify the time and change the next newer value\n self.timesKey[key] = self.time\n self.time += 1\n\n # add the new item\n self.cache_data[key] = item\n\n if len(self.cache_data) > BaseCaching.MAX_ITEMS:\n discard_key = None\n newer = self.time - 2\n\n for _key, _value in self.timesKey.items():\n if newer == _value:\n discard_key = _key\n break\n\n # del key in time and cache data\n del self.cache_data[discard_key]\n del self.timesKey[discard_key]\n\n print(\"DISCARD: {}\".format(discard_key))", "def add_to_items(items, name, size, price):\n index = items_contains_name(items, name)\n if index == 0:\n temp = {'name': name, 'size': size, 'count': 1, 'price': price}\n items.append(temp)\n else:\n items[index]['count'] = items[index]['count'] + 1\n return items", "def filter(self, items, relative=True):\n if relative: items = self.items[items]\n self.items = np.intersect1d(self.items, items)", "def add_items_quantity_not_duplicates(request):\n all_items_no_duplicates = []\n\n for loop_index, item in enumerate(all_shopping_items(request)):\n item_dict = {\n 'item': item.item,\n 'quantity': item.quantity,\n 'category': item.category.category,\n 'id': item.id,\n 'user': {\n 'username': item.user.first_name\n }\n }\n\n if loop_index == 0:\n all_items_no_duplicates.append(item_dict)\n else:\n item_is_not_a_copy = True\n for list_item in all_items_no_duplicates:\n if list_item['item'] == item.item:\n item_is_not_a_copy = False\n list_item['quantity'] += item.quantity\n list_item['user']['username'] += ' / ' + item.user.first_name\n if item_is_not_a_copy:\n all_items_no_duplicates.append(item_dict)\n\n return all_items_no_duplicates", "def add(self, Filter=None, FilterItems=None, MaxWaitTime=None, Source=None, Stats=None):\r\n\t\treturn self._create(locals())", "def _stash_items(sender, **kwargs):\n json_values = kwargs[\"json_values\"]\n stash = kwargs[\"stash\"]\n\n if \"items\" not in json_values:\n return\n\n json_items = json_values[\"items\"]\n\n stash[\"updated_items\"] = []\n stash[\"new_items\"] = []\n\n # create the items\n for item in json_items:\n # put the item in either new or updated items\n if \"id\" in item:\n stash[\"updated_items\"].append(ItemStash(item))\n else:\n stash[\"new_items\"].append(ItemStash(item))", "def addCollectionItems_patch(self, target, items):\n if not items: return None\n auth_token = self.obtainSecurityToken()\n target_type = target._type\n item_types = [item._type for item in items]\n item_type = item_types[0]\n # Fix for _type in outliers\n outliers = [item for item in item_types if item != item_type]\n if outliers:\n raise RallyRESTAPIError(\"addCollectionItems: all items must be of the same type\")\n resource = \"%s/%s/%ss/add\" % (target_type, target.oid, item_type)\n collection_url = '%s/%s?fetch=Name&key=%s' % (self.service_url, resource, auth_token)\n payload = {\"CollectionItems\":[{'_ref' : \"%s/%s\" % (str(item._type), str(item.oid))}\n for item in items]}\n response = self.session.post(collection_url, data=json.dumps(payload), headers=RALLY_REST_HEADERS)\n context = self.contextHelper.currentContext()\n response = RallyRESTResponse(self.session, context, resource, response, \"shell\", 0)\n added_items = [str(item[u'Name']) for item in response.data[u'Results']]\n return response, added_items", "def add_items(self, items):\n\n self.model.add_items(items)\n self.refreshed.emit()", "def append(self, item):\n # FIXME: this is only append if the key isn't already present\n key, value = item\n self._main[key] = value", "def __init__(self, capacity, fillValue=None):\r\n self._items = list()\r\n for count in range(capacity):\r\n self._items.append(fillValue)", "def mask(self, item_or_items: Union[str, list]) -> None:\n if isinstance(item_or_items, str):\n self._masked_items.add(item_or_items)\n elif isinstance(item_or_items, list):\n for item in item_or_items:\n assert isinstance(item, str)\n self._masked_items.add(item)", "def cmsIncrBy(self, key, items, increments):\n params = [key]\n self.appendItemsAndIncrements(params, items, increments)\n \n return self.execute_command(self.CMS_INCRBY, *params)", "def process_input_items(args):\n return dict(sum([Counter({sku: value * SCORES[k] for sku, value in\n Counter(args[k].split(',')).items()}) or Counter() for k in\n set(SCORES.keys()) & set(args.keys())], Counter()))", "def put(self, key, item):\n if key is None or item is None:\n return\n if key in self.key_tracker.keys():\n self.key_tracker.pop(key)\n if len(self.key_tracker) >= BaseCaching.MAX_ITEMS:\n x = self.most_recent_use_key()\n print(\"DISCARD: {}\".format(x))\n self.key_tracker.pop(x)\n self.cache_data.pop(x)\n self.cache_data.update({key: item})\n self.key_tracker.update({key: self.count})\n self.count += 1", "def evict_or_add (self, item):", "def test_sample_container_add_exceeds_limit(self):\n self.assertEqual(self.container._data, defaultdict(list))\n\n retval = self.container.add(\"key1\", [\"1\", \"2\", \"3\", ], 2)\n\n self.assertEqual(retval, [\"1\", \"2\", \"3\", ])\n self.assertEqual([], self.container._data[\"key1\"])", "def add_item(item):\n # Check first if the item already exists in the inventory\n for i in get_inventory():\n if i['name'] == item['name']:\n print(f\"[ERROR] item with name {i['name']} already exists\")\n break\n else:\n print(f'[INFO] Adding item {item}')\n INVENTORY.append(item)\n # mongo.collection().insert_one(item)", "def do_add(self, args):\n\t\tif len(args) == 0:\n\t\t\tself.parent.printErr(\"Missing argument(s)\")\n\t\t\treturn False\n\t\tdef try_add(ftype, fvalue):\n\t\t\tif ftype == \"has\" and value not in self.FILTER_HAS_ARGUMENTS:\n\t\t\t\tself.parent.printErr(\"Could not add '%s': Invalid filter argument\" % (fvalue))\n\t\t\t\treturn False\n\t\t\telif ftype not in self.FILTER_ARGUMENTS:\n\t\t\t\tself.parent.printErr(\"Could not add '%s': Invalid filter\" % (ftype))\n\t\t\t\treturn False\n\n\t\t\ttry:\n\t\t\t\tif value not in self.parent.filter[ftype]:\n\t\t\t\t\tself.parent.filter[ftype].append(fvalue)\n\t\t\t\telse:\n\t\t\t\t\tself.parent.printErr(\"Could not add '%s': Item already in filter\" % (fvalue))\n\t\t\t\t\treturn False\n\t\t\texcept KeyError:\n\t\t\t\tself.parent.filter[ftype] = [fvalue]\n\n\t\t\tself.apply_filter()\n\t\t\treturn True\n\n\t\targs = args.split()\n\t\tftype = args[0]\n\t\tvalues = args[1:]\n\n\t\tif len(values) == 0:\n\t\t\tself.parent.printErr(\"Could not add '%s': Filter expects arguments\" % (ftype))\n\n\t\tfor value in values:\n\t\t\ttry_add(ftype, value)\n\n\t\tself._update_prompts()", "def add_new(self, item, key):\n if key in self._items:\n raise DuplicateListHeapItemException(key)\n if len(self._items) >= self._max_limit:\n raise MaxItemLimitReachedException()\n self._items[key] = item\n self._listbox.insert(END, key)", "def addItem(*args):", "def addItem(*args):", "def addItem(*args):", "def add_to_queue(self, items):\n\n for i in items:\n self.r.rpush(self.joblist, i)", "def empty_filter(item, *args, **kwargs):\n return True", "def ffda(items_list, bin_capacity):\n decreased_list = sorted(items_list,reverse=True) #sorts the items list in a decreasing order\n bins =[]\n for item in decreased_list:\n # foeach item we search if there's an open bin where it can fit\n for bin in bins:\n if bin.total_weight + item <= bin_capacity: #if it fits\n bin.add_item(item) #we add the item in the bin\n break\n else:\n # there is no open bin where the item can fit\n #so we open a new bin and add the item in it\n bin = Bin()\n bin.add_item(item)\n bins.append(bin)\n\n return bins", "def add(self, items):\n logging.debug(\"Adding items to %s: %s\" % (self.directory, items))\n files = [i for i in items if os.path.isfile(i)]\n directories = [i for i in items if os.path.isdir(i)]\n for file in files:\n shared_file = os.path.join(self.directory, os.path.basename(file))\n self._link_files(file, shared_file)\n for directory in directories:\n try:\n self._duplicate_as_linked_tree(directory)\n except FileExistsError as e:\n logging.debug(\"Directory %s already exists! Going to remove and re-add it!\" % e.filename)\n # It is already there, either by having been added before or within an update\n self._unshare_linked_tree(e.filename)\n self._duplicate_as_linked_tree(directory)\n for unhandled_item in set(items) - set(directories).union(set(files)):\n logging.error(\"Did not handle input item '%s'\" % unhandled_item)", "def process_new_items(self, new_items):\n self.items_hat = np.hstack([self.items_hat, new_items])", "def __init__(self, items=None):\n\n if items is None:\n items = []\n self.set = dict((item, []) for item in items)\n self.heap = list(self.set.keys())\n hpq.heapify(self.heap)\n self.counter = itertools.count()", "def add(self, key):\n\t\t#super(CountingBloomFilter, self).add(key)\n\t\t#super(CountingBloomFilter, self).generateStats()\n\t\tfor i in self.getBitArrayIndices(key):\n\t\t\tself.ba[i] += 1\n\t\tself.n += 1", "def items(self, value):\n if value is None:\n self._items = None\n self.active = None\n else:\n self._items = value\n self.active = [True] * len(self._items)", "def add_item(items, coder, tag, start, n):\n if start is not None:\n # close opened items\n add_zero_item(items, coder, tag, start) # default tag\n items[tag][coder].append(item(b=start, l=n-start, v=1)) # found tag", "def callback_extend_list(item):\n fisher_contingency_pval_parallel_insertion.extend(item)", "def groupInputItem(name,items=[],**kargs):\n kargs['name'] = name\n kargs['items'] = items\n kargs['itemtype'] = 'group'\n return kargs", "def mixed_train_items(train_items: List[JSONDict]) -> List[JSONDict]:\n train_items[1][\"categoryid\"] = 9107252648\n return train_items", "def cloudflare_waf_ip_list_item_create_request(self, list_id: str, items: list) -> Dict[str, Any]:\n return self._http_request(\n method='POST',\n url_suffix=f'accounts/{self.account_id}/rules/lists/{list_id}/items',\n json_data=items)", "def cloudflare_waf_ip_list_item_create_command(client: Client, args: Dict[str, Any]) -> CommandResults:\n\n list_id = args['list_id']\n items = [{'ip': item} for item in argToList(args.get('items'))]\n\n response = client.cloudflare_waf_ip_list_item_create_request(list_id, items)\n output = response['result']\n\n return CommandResults(\n readable_output=f'Create items in the IP List {list_id} is executing',\n raw_response=output)", "def __init__(self, capacity, fillValue = None):\n \n self._items = list() \n self._fillValue = fillValue\n self._DEFAULT_CAPACITY = capacity\n self._logicalSize = 0 #as required by exercise 1\n \n \n for count in xrange(capacity):\n self._items.append(self._fillValue)", "def __init__(self, items=[]):\n self.set = dict((item, True) for item in items)\n self.heap = self.set.keys()\n heapq.heapify(self.heap)", "def put_in(self, items):\n try:\n if items[0] not in self.items:\n print(\"you don't have a \" + str(items[0]))\n return self\n if items[2] not in self.items:\n print(\"you don't have a \" + str(items[1]))\n return self\n except IndexError:\n print('put ' + str(items[0]) + ' where')\n except TypeError:\n print('you don\\'t have anything')\n return self\n # implement", "def add_item(key, obj, dst):\n\n if key not in dst:\n dst[key] = []\n dst[key].append(obj)", "def exclusively(self, keys, lst=None):\n minimal = self.minimal() if lst is None else lst\n\n def make_exclusive(d, keys):\n dct = {}\n for k in keys:\n if k in d:\n dct[k] = d[k]\n else:\n dct[k] = -999\n return dct\n\n lst = []\n for d in minimal:\n dct = make_exclusive(d, keys)\n if len(dct) > 0:\n lst.append(dct)\n return lst", "def take(self, pitem):\n\n #if adding one more item is exceeding the max item carry , say no to add \n if self.max_items <= len(self.items):\n \n print('The player item list has been exceeded the maximum number of \\n items the player can carry')\n\n #if not add the item to the list \n else:\n self.items.append(pitem)", "def setitems(self, items):\n self.clear()\n # FIXME: this allows you to pass in an OrderedDict as well :-)\n self.update(items)" ]
[ "0.59240794", "0.5758097", "0.56953305", "0.566628", "0.5583825", "0.54426944", "0.5348594", "0.5331868", "0.53105265", "0.5299334", "0.5295137", "0.5279402", "0.5263114", "0.52315265", "0.5224183", "0.52107865", "0.52102387", "0.5190047", "0.51768357", "0.5145545", "0.513705", "0.51137596", "0.50994134", "0.50900924", "0.50776726", "0.5076613", "0.50726473", "0.5050566", "0.5025415", "0.5023811", "0.50232494", "0.5022723", "0.5011198", "0.5005076", "0.49994275", "0.49904263", "0.49749687", "0.49534807", "0.4938751", "0.4936802", "0.493511", "0.49311227", "0.49268517", "0.49216285", "0.49209747", "0.49172032", "0.49040172", "0.48956326", "0.48818558", "0.4877299", "0.48647732", "0.48615322", "0.48382685", "0.48170716", "0.4807459", "0.48073956", "0.47864616", "0.47790033", "0.477379", "0.47396028", "0.47388974", "0.473694", "0.47318602", "0.47175696", "0.47161776", "0.471606", "0.47116932", "0.4709916", "0.47096804", "0.4705211", "0.4693505", "0.46881467", "0.46734202", "0.46638328", "0.46574762", "0.46399054", "0.46391815", "0.46391815", "0.46391815", "0.46296376", "0.46263653", "0.46239787", "0.46136832", "0.45975605", "0.4595584", "0.45898363", "0.45887256", "0.45820308", "0.45812148", "0.45736483", "0.45609486", "0.4555606", "0.45499036", "0.4541962", "0.45357153", "0.45297363", "0.4527656", "0.4521267", "0.45165744", "0.4494046" ]
0.5301296
9
Checks whether an ``item`` exists in Cuckoo Filter ``key``.
def cfExists(self, key, item): params = [key, item] return self.execute_command(self.CF_EXISTS, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __contains__(self, item, key):\n ndx = self._findPostion(key)\n return ndx is not None", "def bfExists(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.BF_EXISTS, *params)", "def item_has_key(self, item, key):\n if key in self._reverse_store[item]:\n return True\n else:\n return False", "def contains(self, item):\n return self._dict.has_key(item)\n\n self.__contains__ = contains", "def __contains__(self, item):\n try:\n self[item]\n return True\n except KeyError:\n return False", "def has_item(self, item):\n return item in self.cache", "def has_item(self, item):\n return item in self.set", "def has_item(self, item):\n return item in self.set", "def item_exists(item_id):\n return item_id in all_items", "def has_item(self, usage_key):\r\n try:\r\n self._find_one(usage_key)\r\n return True\r\n except ItemNotFoundError:\r\n return False", "def __contains__(self, item):\n\n if self[item]:\n return True\n return False", "def exista(self, item):\n if item not in self._items:\n return False\n for x in self._items:\n if x == item:\n return True", "def __contains__(self, item):\n try:\n hdu = self[item] # noqa\n return True\n except Exception:\n return False", "def __contains__(self, item: object) -> bool:\n val = conv_kv(item) # type: ignore\n for fixup in self._mapping._fixup.values():\n if fixup.value == val:\n return True\n return False", "def contains(self, item):\n if isinstance(item, dict):\n return _(item).all(lambda key: self._.get(key) == item[key])\n return item in self", "def __contains__(self, key):\n return self.keys[self._linear_probe(key, \"contains\")] is not None", "def __contains__(self, item):\n return item in self._data", "def has(self, key):", "def has(cls, item):\n return item in cls.values()", "def __contains__(self, key):\n found = True\n try:\n self.__getitem__(key)\n except:\n found = False\n return found", "def __contains__(self, item):\n\t\treturn item in self.__dict__.values()", "def array_key_exists(name, item):\n return item.has_key(name);", "def has_key(self, key):\n return self.contains(key)", "def has_item(self, usage_key):\r\n return usage_key in self.modules[usage_key.course_key]", "def __contains__(self, key):\n return self._lookup(key).value is not None", "def _has(self, key):\n path = self._get_key_path(key)\n return exists(path)", "def contains(self, key: int) -> bool:\n lv1, lv2 = self.hashing(key)\n \n for item in self.cont[lv1][lv2]:\n if item==key:\n return True\n \n return False", "def __contains__(self, item):\n return item in self.__keys or item in self.__vals", "def has_key(self, key):\n return key in self", "def __contains__(self, key):\n for f in reversed(self.filters):\n if key in f:\n return True\n return False", "def has_item(self, usage_key):\r\n store = self._get_modulestore_for_courseid(usage_key.course_key)\r\n return store.has_item(usage_key)", "def __contains__(self, key):\n try:\n if self[key]:\n return True\n except KeyError:\n return False", "def contains(self, item):\n for h_num in xrange(self.k):\n val = self.hash_value(item, h_num)\n if not self.arr[val]:\n return False\n else:\n return True", "def contains(self, key):\n # TODO: Check if the given key exists in a bucket\n hash_key = self._bucket_index(key) # Gets the index of the key\n if self.buckets[hash_key].is_empty() is False: # If the hask_key exists\n for key_value_pair in self.buckets[hash_key]: # Iteratre through the value pair\n if key_value_pair[0] is key: # If the key matches\n return True\n return False", "def contains(self, key):\n try:\n self.keyvaluepair_set.get(key=key)\n return True\n except KeyValuePair.DoesNotExist:\n return False", "def __contains__(self, item):\n return item.upper() in self.keys", "def contains(self, key):\n if key in self.key_list:\n return True\n return False", "def key_exists(key, value):\n\n response = table.query(\n KeyConditionExpression = Key(key).eq(value)\n )\n\n if response['Items']:\n return True\n\n return False", "def __contains__(self, item):\n return item in self._fetch()", "def contains(self, key: int) -> bool:\n return self._find_key(key, find_empty=False) >= 0", "def hasItem(self, path): \n\t\treturn (path in self.items and self.items[path])", "def existsitem(self,item,listwidgets):\n exists = listwidgets.findItems(item, Qt.MatchExactly)\n if exists:\n return True\n else:\n return False", "def item_exists(self, call_number):\n return call_number in self.item_list.keys()", "def has_item(self, item_name):\n if item_name in self.item_list:\n return True\n return False", "async def contains(self, key: str) -> bool:", "def __contains__(self, key):\n try:\n self._get(key)\n return True\n except Exception:\n return False", "def __contains__(self, key):\n self._remove_expired()\n\n log.debug(\"__contains__: {}\".format(key))\n return key in self._d", "def has(self, item):\n return item in self.mut", "def __contains__(self, key):\n return key in self._index", "def __contains__(self, item):\n # return item in self._items\n # leverage improved performance index() function\n try:\n self.index(item)\n return True\n except ValueError:\n return False", "def contains(self, key):\n\n return key in self.keys()", "def __contains__(self, key, *args, **kwargs):\n if key in self._list(*args, **kwargs):\n return True\n return False", "def containsKey(self, key):\n return get(key) != None", "def has(self, key):\n return self.collection.find_one({'_id': key}) is not None", "def contains(self, key: int) -> bool:\n if key in self.d:\n return True\n else:\n return False", "def __contains__(self, key):\n return (key in self.index)", "def contains(self, key: int) -> bool:\n _hash = self.get_hash(key)\n return self.bucket_array[_hash].exist(key)", "def has(self, key):\n return key in self._store", "def contains(self, key: int) -> bool:\n return key in self.res", "def __contains__(self, key):\n return self.contains(key)", "def has(self, key):\n return False", "def __contains__(self, item: Any) -> bool:\n return item in self.item_to_index", "def contains(self, key):\n h = self.hash_value(key)\n return key in self.hs[h]", "def has_item(self, item: Inventory) -> bool:\n return (item.pk,) in self.orderitem_set.values_list('item')", "def __contains__(self, key):\n return key in self.keys", "def tag_key_exists(self, key):\n return key in self.map", "def __contains__(self, item: object) -> bool:\n if isinstance(item, tuple) and len(item) == 2:\n var, value = item\n else:\n return False\n if isinstance(var, str):\n if var and var[0] == '$':\n var = var[1:]\n try:\n return self._mapping._fixup[var.casefold()].value == conv_kv(value)\n except KeyError:\n return False\n return False", "def exists(self, key, predicate=None):\n with self._cv:\n return self._has(key, predicate)", "def __contains__(self, key):\n\t\treturn key in self.cache", "def __contains__(self, key):\n keys = list(self._indexer(key))\n if len(keys) == 1:\n return keys[0] in self._data\n return [k in self._data for k in keys]", "def __contains__(self, key):\n\t\treturn key in self.__dStore", "def has_key(self, key):\n return key in self.db", "def __contains__(self, key):\n try:\n self[key]\n return True\n except:\n return False", "def __contains__(self, key):\n\n return key in self.keys_set", "def __contains__(self, item: object) -> bool:\n return item in self._used", "def _item_exists(self, item):\n cursor = self.conn.cursor()\n cursor.execute(\n 'SELECT * FROM Members where first_name = ?;',\n (item['first_name'])\n )\n return True if len(cursor.fetchall()) else False", "def contains(name, key):\n\n return get_component(CachingPackage.COMPONENT_NAME).contains(name, key)", "def contains(self, key):\n hashkey = self.hash(key)\n return key in self.table[hashkey]", "def __contains__(self, item):\n return item in self._index_map", "def has(self, key):\n return self.data.get(key, None) is not None", "def __contains__(self, key):\n return hasattr(self, key)", "def contains(self, key):\n return self.__db.contains(key)", "def contains(bank, key):\n try:\n c_key = \"{}/{}\".format(bank, key or \"\")\n _, value = api.kv.get(c_key, keys=True)\n except Exception as exc: # pylint: disable=broad-except\n raise SaltCacheError(f\"There was an error getting the key, {c_key}: {exc}\")\n return value is not None", "def contains(self, key):\n bus=key%100000\n pos=key//100000\n return self.li[bus][pos]==1", "def __contains__(self, item):\n return self.contains(item)", "def __contains__(self, key):\n return key in self._get_storage()", "def isin(self, item):\n return self.get(item) is not None", "def search_db(self, key, item):\n db = self.check_db()\n data = [record for record in db if record[key] == item]\n if data:\n return data[0]\n else:\n return False", "def check_item(self, item, key, db):\n data = [record for record in db if record[key] == item]\n return data", "def contains_key(self, key):\r\n\t\t# call the linked list contains() method for each bucket\r\n\t\tfor i in self._buckets:\r\n\t\t\tif i.contains(key):\r\n\t\t\t\treturn True\r\n\t\treturn False", "def __contains__(self, item):\n if item == self.profile_id:\n return True", "def __contains__(self, item):\n return self.settings.has(item)", "async def has(self, category: str, key: str, check_category: bool = False) -> bool:\n stmt = select(func.count()).select_from(self.model)\n\n if self.category_field:\n stmt = stmt.where(getattr(self.model, self.category_field) == category)\n\n if self.key_field:\n stmt = stmt.where(getattr(self.model, self.key_field) == key)\n\n result = self.session.execute(stmt)\n count = result.scalars().one()\n if count > 0:\n return True\n\n if not check_category:\n return False\n\n # Check if the category exists.\n if not self.category_field:\n return False\n\n stmt = select(func.count()).select_from(self.model).where(getattr(self.model, self.category_field) == category)\n result = self.session.execute(stmt)\n count = result.scalars().one()\n if count > 0:\n return False\n else:\n raise KeyError", "def has(self, key):\n return os.path.isfile(self._filename(key))", "def __contains__(self, item): # __iter__ would do this job by itself\n return (item in self.__values)", "def containskey(self, essid, key):\n return self.cli.essids.containskey(essid, key)", "def key_exists(self, bucket, key):\n\n return len(list(self._s3.Bucket(bucket).objects.filter(Prefix=key))) > 0", "def __contains__(self, key):\n with SessionContext(self.SessionClass) as session:\n q = session.query(PAW2_DBObject)\n return q.filter(PAW2_DBObject.key == key).count() == 1", "def has_key(self, key):\n return key.lower() in self._data", "async def _exists(self, key):\n return key in SimpleMemoryBackend._cache" ]
[ "0.77499324", "0.746354", "0.74272346", "0.73767465", "0.7316772", "0.7260363", "0.72131723", "0.72131723", "0.72055244", "0.7191276", "0.70799226", "0.7073482", "0.70224476", "0.701648", "0.6999903", "0.69994754", "0.6997449", "0.6991005", "0.6959211", "0.69314", "0.69124043", "0.690909", "0.6902491", "0.68843997", "0.688126", "0.6871489", "0.6857937", "0.6829655", "0.68289304", "0.6809889", "0.68098724", "0.6769976", "0.6767626", "0.6759754", "0.67501587", "0.6733152", "0.6731822", "0.67198735", "0.67130524", "0.67077076", "0.6705284", "0.6701175", "0.66973454", "0.6689328", "0.6681763", "0.6671351", "0.6670268", "0.6660396", "0.6653654", "0.6652041", "0.66439664", "0.6642412", "0.6632514", "0.66250247", "0.66158", "0.6603687", "0.6603452", "0.6596498", "0.6588206", "0.657472", "0.6573659", "0.65699804", "0.6565092", "0.656434", "0.6563222", "0.6557312", "0.6546339", "0.6527261", "0.6506742", "0.6499182", "0.6497926", "0.64956015", "0.6487531", "0.64868724", "0.6462194", "0.645284", "0.6452097", "0.64492923", "0.6447611", "0.6443695", "0.6441498", "0.6440193", "0.64328897", "0.6426662", "0.6420256", "0.6402806", "0.6402674", "0.6398435", "0.6387584", "0.6385393", "0.6362944", "0.6360352", "0.63542485", "0.6350593", "0.63423306", "0.63287866", "0.6324", "0.63191134", "0.63010484", "0.63005126" ]
0.76099366
1
Deletes ``item`` from ``key``.
def cfDel(self, key, item): params = [key, item] return self.execute_command(self.CF_DEL, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_item(self, key, item):\n self[key].remove(item)\n self._remove_reverse_mapping(item, key)", "def __delitem__(self, key):\n self.delete(key)", "def __delitem__(self, key):\n self.delete(key)", "def __delitem__(self, key):\n self.f_remove(key)", "def delete_item(self, key):\n deleted_slot = self.count_hash(key, len(self.slots))\n\n if self.slots[deleted_slot] == key:\n self.slots[deleted_slot] = None\n self.data[deleted_slot] = None\n elif isinstance(self.slots[deleted_slot], tuple):\n index_tuple = (self.slots[deleted_slot].index(key))\n list_slot = list(self.slots[deleted_slot])\n list_data = list(self.data[deleted_slot])\n list_slot.pop(index_tuple)\n list_data.pop(index_tuple)\n self.slots[deleted_slot] = tuple(list_slot)\n self.data[deleted_slot] = tuple(list_data)", "def __delitem__(self, key: T) -> None:\n self.delete(key)", "def __delitem__(self, key):\n with self.__lock:\n log.debug(\"__delitem__: {}\".format(key))\n del self._d[key]", "def __delitem__(self, key):\n del self.list[key]", "def __delitem__(self, key: tuple):\n s, a = key\n del self.store[s][a]", "def __delitem__(self, key):\r\n self.client.delete(id=key, ignore=[404], **self.kwargs)", "def __delitem__(self, key, *args, **kwargs):\n self._del(key, *args, **kwargs)", "def remove(self, item):\n del self._dict[item]", "def __delitem__(self, key):\n try:\n kvp = self.keyvaluepair_set.get(key=key)\n except KeyValuePair.DoesNotExist:\n raise KeyError\n else:\n kvp.delete()", "def __delitem__(self, key):\r\n key = self.key(key)\r\n if key in self.data_with_same_key:\r\n if len(self.data_with_same_key[key]) == 1:\r\n self.data[key] = self.data_with_same_key.pop(key)[0]\r\n else:\r\n self.data[key] = self.data_with_same_key[key].pop(-1)\r\n else:\r\n del self.data[key]", "def __delitem__(self, key):\n del self._data[key]", "def del_item(self, item):\n index = self.board[item.pos[0]][item.pos[1]].index(item)\n del self.board[item.pos[0]][item.pos[1]][index]", "def __delitem__(self, key):\n self.deleteAttributes([key])", "def remove_item(self, item):\r\n\r\n for key in self._inner_dict:\r\n if item in self._inner_dict[key]:\r\n idx = self._inner_dict[key].index(item)\r\n del self._inner_dict[key][idx]", "def __delitem__(self, key):\n pass", "def __delitem__(self, key):\n pass", "def __delitem__(self, key):\n del self._get_storage()[key]", "def __delitem__(self, key):\n\t\tdel self.__dStore[key]", "def __delitem__(self, key):\n if self._size > 1:\n node_to_delete = self._getItemHelper(key, self._root)\n if node_to_delete:\n self._delItemHelper(node_to_delete)\n self._size -= 1\n else:\n raise KeyError('Key is not in the tree.')\n elif self._size == 1 and self._root.key == key:\n self._root = None\n self._size -= 1\n else:\n raise KeyError('Key is not in the tree.')", "def __delitem__(self, key):\n del self.elements[key]", "def __delitem__(self, key):\n i, kv_pair = self._lookup(key, self._backing)\n if kv_pair and not kv_pair.value is Hashmap.absent:\n self._backing[i] = KeyValue(key, Hashmap.absent)\n self._deleted += 1\n\n size = len(self._backing)\n utilization = (self._used - self._deleted)/size \n if utilization < 0.16:\n self._resize(self._decr_size(size))\n else:\n raise KeyError('no such item!')", "def delete(self, item):\r\n self.fetch()\r\n t = self.make_item_tuple(item)\r\n changed = False\r\n while t in self.data:\r\n self.data.remove(t)\r\n changed = True\r\n \r\n if changed:\r\n query_cache.set(self.iden, self.data)", "def _delete(self, item):\n self.cv.delete(item)", "def __delitem__(self, key: Hashable) -> None:\n del self.contents[key]\n return", "def _map___delitem__(self, key):\n if not isinstance(key, self.keytype):\n raise KeyError('type of `key` should be ' + repr(self.keytype) + ' but got ' + repr(type(key)))\n if key not in self:\n raise KeyError('key not found')\n self.erase(self.find(key))\n return", "def __delitem__(self, key):\n\n bucket_key = self.key_for_bucket(key)\n del self.buckets[bucket_key][key]\n\n if not self.buckets[bucket_key]:\n del self.buckets[bucket_key]", "def remove_item_from_all_keys(self, item):\n for key in self._reverse_store[item]:\n self[key].remove(item)\n del self._reverse_store[item]", "def delete(self, key):\n try: \n self.pop(key)\n \n except KeyError: \n raise KeyError", "def __delitem__(self, key):\n if isinstance(key, types.SliceType):\n # FIXME: efficiency?\n keys = self._sequence[key]\n for entry in keys:\n dict.__delitem__(self, entry)\n del self._sequence[key]\n else:\n # do the dict.__delitem__ *first* as it raises\n # the more appropriate error\n dict.__delitem__(self, key)\n self._sequence.remove(key)", "def delete(self, key):", "def __delitem__(self, key):\n\n dict.__delitem__(self, key)\n self.changed()", "def delitem(self, key):\n\n key = build_block(key)\n\n if not isinstance(key, ResourceBlock):\n raise KeyError(\"Expected ResourceBlock, got %s\" % type(key))\n\n dict.__delitem__(self, key)", "def delete(self, item):\n # eg. node=item to attrs, telling item type to Graphviz._setattr\n self.graph._del(self.parent.handle, **{self.type: item})", "def remove(self, key: int | str):\n self.__delitem__(key)", "def _delete(self, key):\n path = self._get_key_path(key)\n remove(path)", "def delete(self, key):\n pass", "def delete(self, key):\n pass", "def __delitem__(self,key):\n if key in self.changed: self.changed.remove(key)\n if key not in self.deleted: self.deleted.append(key)\n del self.data[key]", "def delete(self, key):\r\n index = self.search(key)\r\n if self.contains_key_at(key, index):\r\n del self.keys[index]", "def __delitem__(self, key):\n\n # If key is in hash map\n if self.__contains__(key):\n\n # Get hashed key\n i = self.hash(key)\n\n # Get chain index of key value pair\n chain_idx = self.keys_ref[i].index(key)\n\n # Delete value associated with key in hash map\n del self.table[i][chain_idx]\n\n # Delete key from hash table\n del self.keys_ref[i][chain_idx]\n\n # Remove key from set of keys\n self.keys_set.remove(key)\n\n # Decrement size\n self.size -= 1\n\n # If key not in hash map\n else:\n\n # Raise error\n raise KeyError(key)", "def __delitem__(self,key):\n self.table.delItem(key,self.column)", "def delete(self, item):\n try:\n if \".\" in item:\n keys = item.split(\".\")\n else:\n del self.data[item]\n return\n self._delete_keys_from_dict(self.data, keys)\n except Exception as e:\n print(e)\n raise ValueError(\"unkown error\")", "def __delitem__(self, key):\n with SessionContext(self.SessionClass) as session:\n q = session.query(PAW2_DBObject)\n q = q.filter(PAW2_DBObject.key == key)\n assert q.delete(synchronize_session=False) == 1\n session.commit()", "def discard(self, item):\n try:\n self._del(item)\n except KeyError:\n pass", "def __delitem__(self, key: tuple):\n s, a = key\n if not isinstance(s, self.observation_space) or not isinstance(a, self.action_space):\n raise KeyError\n del self.store[s][a]", "def __delitem__(self, key):\n del self._ctx[key]", "def _bucket_delitem(self, j, k):\n pass", "def __delitem__(self, key):\n\n del self._vertices[key]", "def delete(self, key):\n raise NotImplementedError", "def delete(self, key):\n raise NotImplementedError", "def remove_item(self, idx_of_item):\n del self.items[idx_of_item]", "def __delitem__(self, key):\n self.deleteCurve(key)", "def delete(self, item):\n self._createAction(item, \"delete\")", "def delete(self, key):\n raise NotImplementedError()", "def delRepoItem(self, key):\n\n ACCESS_TOKEN = initZenodo(self.hostDefn['localhost']['localSettings']/'zenodoSettings.dat')\n r = requests.delete('https://zenodo.org/api/deposit/depositions/%s' % self.nbDetails[key]['repoInfo']['id'],\n params={'access_token': ACCESS_TOKEN})\n if r.ok:\n print(f\"Item {self.nbDetails[key]['title']} deleted from repo.\")\n self.nbDetails[key]['repoInfo'] = None\n self.nbDetails[key]['doi'] = None\n else:\n print(f\"Failed to remove item {self.nbDetails[key]['title']}, code: {r.status_code}\")", "def __delitem__(self, key):\n bucket = self._buckets[self._index(key)]\n for node in bucket.linked_list:\n bucket_object_key, bucket_object_value = node.value\n if bucket_object_key.load_value() == key:\n # remove objects from object -> list_node dict\n key_list_node = self._object_to_list_node.pop(bucket_object_key)\n value_list_node = self._object_to_list_node.pop(bucket_object_value)\n # remove list_node from in_memory and disk objects\n self._in_memory_objects.remove(key_list_node)\n self._in_memory_objects.remove(value_list_node)\n self._disk_objects.remove(key_list_node)\n self._disk_objects.remove(value_list_node)\n # remove node from bucket linked list\n assert bucket.linked_list.remove(node) == True\n self._balance()\n return\n raise KeyError(\"Key `{}` is not exists\".format(key))", "def __delitem__(self, key):\n try:\n del self._maps[0][key]\n except KeyError:\n raise KeyError(\n 'Key not found in the last mapping: {!r}'.format(key))", "def delete_item(self):\n\n\t\tdb.session.delete(self)\n\t\tdb.session.commit()", "def deleteKey(self, key):\n key.delete()", "def remove(self, item: T) -> None:\n index = self.index(item)\n self.delete_at_index(index)", "def remove(self, item: T) -> None:\n index = self.index(item)\n self.delete_at_index(index)", "def __delitem__(self, key):\n if not self._set:\n raise TypeError('This dict is read-only')\n return self._set(key, None)", "def __delitem__(self, key):\n if self.__pepth__ != 0:\n return plist.__getattr__(self, '__delitem__')(key)\n try:\n if (isinstance(key, list)\n and plist(key).all(isinstance, int)):\n for k in sorted(key, reverse=True):\n operator.__delitem__(self, k)\n else:\n # Handles slices and ints. Other key types will fail.\n list.__delitem__(self, key)\n except Exception as first_exception:\n try:\n if isinstance(key, list):\n for i, k in enumerate(key):\n operator.__delitem__(self[i], k)\n elif isinstance(key, tuple):\n try:\n for x in self:\n operator.__delitem__(x, key)\n except Exception:\n for x in self:\n for k in key:\n operator.__delitem__(x, k)\n else:\n for x in self:\n operator.__delitem__(x, key)\n except Exception as second_exception:\n raise TypeError('Failed to apply index to self or elements.\\nself exception: %s\\nelements exception: %s' % (str(first_exception), str(second_exception)))\n\n # Allow chaining of set ops when using apply('__delitem__', k) and apply(operators.__delitem__, k)\n return self", "def __delitem__(self, key):\n\n if key not in self:\n raise KeyError(key)\n\n if self.is_view:\n self._view.remove(key)\n\n # resolve orphan data pointers\n # TODO: this may be a performance bottle neck in large graphs\n for target_key, target_value in self._storage.items():\n if target_value.get(self._data_pointer_key) == key:\n\n self._storage[target_key].update(self._storage[key])\n del self._storage[target_key][self._data_pointer_key]\n\n del self._storage[key]", "def delete_item(item_id: uuid.UUID):\n coll_items = data_access.get_items_collection()\n\n item = coll_items.find_one({\"item_id\": item_id})\n if item is None:\n raise HTTPException(status.HTTP_404_NOT_FOUND,\n f\"Could not find the item with id {item_id}\")\n\n coll_items.delete_one({\"item_id\": item_id})", "def item_remove(self, item):\n\t\treturn self._modify_object(item=item, new_item=\"\")", "def _bucket_delitem(self, j, k):\n bucket = self._table[j]\n if bucket is None: # no match found\n raise KeyError(\"Key Error: \" + repr(k))\n del bucket[k]", "def _delKey(self, key):\n pass", "def remove(self, item):\n try:\n entry = self.set.pop(item)\n entry[-1] = self.REMOVED\n except KeyError:\n print(\"Can't remove a non-existing item\")", "def remove_item(self, item_id):\n self.items.pop(item_id)", "def delete(self, key: int) -> None:\n node = self.search(key)\n if node:\n self._delete(node)", "def deleteItem(list,item):\n print \"I deleted this item:\", item\n list.remove(item)", "def remove(self, item: Item) -> None:\n raise NotImplementedError(\"remove\")", "def delete(self, key: str):\n raise NotImplementedError", "def delete(self, key):\n\n hi = self.hash_index(key)\n\n # if that hi is empty ignore\n # if self.storage[hi] is None:\n # print(\"WARNING: no key\")\n # return\n\n current = self.storage[hi]\n prev = self.storage[hi]\n while current and current.key != key:\n prev = current\n current = current.next\n\n if (current and current.key == key):\n # if its the first link in the list\n if (current == self.storage[hi]):\n self.storage[hi] = current.next\n else:\n prev.next = current.next\n\n self.numberOfItems -= 1\n else:\n print(\"WARNING: no key\")\n\n self.calculateLoad()", "def __delitem__(self, key):\n\n if '.' in key:\n path = key.split('.', 1)\n self.parser.remove_option(path[0], path[1])\n else:\n raise KeyError", "def delete(self, key):\n hash_key = hash(key) % self.length\n bucket = self.array[hash_key]\n if not bucket:\n raise ValueError('Key does not exist')\n for key_val_pair in bucket:\n if key_val_pair[0] == key:\n bucket.remove(key_val_pair)", "def delete(self, key):\n node = self.search(key)\n if node:\n self.remove_node(node)", "def __delitem__(self, path):\n\n path = self.__check_path__(path)\n\n # d - dict\n def is_empty(d):\n if not d:\n return True\n return False\n\n # d - dict, p - path (keys sequence)\n def remove_key(d, p):\n k = p[0]\n\n if len(p) == 1:\n if not isinstance(d, dict):\n raise KeyError(k)\n del d[k]\n return is_empty(d)\n\n if not isinstance(d, dict):\n raise KeyError(k)\n if remove_key(d[k], p[1:]):\n del d[k]\n return is_empty(d)\n\n remove_key(self.__dict__, path)", "def delete(self, key):\n return None", "def delete(self, key):\n self.tree.delete(key)", "def decrease_key(self, old_item, new_item):", "def __delitem__(self, key: Union[Hashable, Sequence[Hashable]]) -> None:\n self.contents = {i: self.contents[i] for i in self.contents \n if i not in more_itertools.always_iterable(key)}\n return", "def remove(self, item):\n # self._probeCount = 0\n self._homeIndex = abs(self._hash(item)) % len(self._table)\n distance = 1\n index = self._homeIndex\n\n while (self._table[index] != HashTable.EMPTY or \\\n self._table[index] == HashTable.DELETED) and \\\n self._table[index] != item:\n\n if self._liner:\n increment = index + 1\n else:\n increment = self._homeIndex + distance ** 2\n distance += 1\n index = increment % len(self._table)\n self._probeCount += 1\n\n if self._table[index] == item:\n self._table[index] = HashTable.DELETED\n self._actualIndex = index\n self._size -= 1\n return index\n else:\n self._actualIndex = -1\n return -1", "def delete(self, key):\n self.map.pop(key, None)", "def delete(self, key) -> None:\n with self.__lock:\n if key not in self.__data:\n return # Ignore if key is non-existent\n del self.__data[key]\n self.flush()", "def _delKey(self, key):\n try:\n self._getKeyList().remove(key)\n except KeyError:\n # This shouldn't happen, but what do we do if it does?\n pass\n self._testKeySubNsDel()", "def _del_item(dic: dict, keys: list):\n\tdic = _get_item(dic, keys[:-1])\n\tdel dic[keys[-1]]", "def remove_item(self, item: tuple) -> None:\n self._antecedent.remove(item)\n self._is_updated = False", "def remove(self, item) -> None:\n entry = self.entry_finder.pop(item)\n entry[-1][0] = None", "def delete(self, mapitem_id: int):\n pass", "def delete_key(self, key):\n self.dest.delete(key)", "def remove(self, item):\n try:\n self._data.remove(item)\n except ValueError as exc:\n raise KeyError from exc\n else:\n self.__log__.append(SetRemove(value=item))", "def __delitem__(self, k):\n j = self._hash_function(k)\n self._bucket_delitem(j, k)\n self._n -= 1", "def __delitem__(self, k):\n if not self.is_empty():\n p = self._subtree_search(self.root(), k)\n if k == p.key():\n self.delete(p)\n return\n self._rebalance_access(p)\n raise KeyError('Key Error:' + repr(k))", "def _delete(self, key):\n return self._store.delete(key)" ]
[ "0.78592277", "0.7817014", "0.7817014", "0.7764789", "0.77646744", "0.7654799", "0.76423573", "0.75042117", "0.7504201", "0.74854916", "0.7483856", "0.7433788", "0.74314106", "0.73982894", "0.73960876", "0.7380565", "0.73796886", "0.7366733", "0.73262364", "0.73262364", "0.73161906", "0.7287357", "0.72806454", "0.720359", "0.719625", "0.71946526", "0.71944094", "0.71775186", "0.71710634", "0.7161939", "0.7146866", "0.7133994", "0.7124036", "0.7117396", "0.71113825", "0.7111117", "0.7105022", "0.7081658", "0.7078427", "0.7059889", "0.7059889", "0.7032785", "0.7026254", "0.70103645", "0.6977595", "0.6975285", "0.6974661", "0.69422305", "0.692952", "0.6919756", "0.6914492", "0.6905524", "0.68957", "0.68957", "0.68808824", "0.6862814", "0.6861362", "0.6841975", "0.68397534", "0.68392116", "0.6830959", "0.6823806", "0.68231153", "0.68174887", "0.68174887", "0.68165463", "0.6809968", "0.6780506", "0.6780506", "0.67569286", "0.67514396", "0.6748616", "0.6739859", "0.67271566", "0.67191195", "0.6718852", "0.67180425", "0.6709731", "0.6705988", "0.66814166", "0.66800237", "0.6679328", "0.66686326", "0.6667126", "0.6665176", "0.6663437", "0.6663204", "0.6646841", "0.6643563", "0.6637086", "0.663573", "0.66349685", "0.6613389", "0.6607478", "0.66063786", "0.6603387", "0.66033715", "0.6590978", "0.65838504", "0.6572925" ]
0.7879571
0
Returns the number of times an ``item`` may be in the ``key``.
def cfCount(self, key, item): params = [key, item] return self.execute_command(self.CF_COUNT, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count(self, item):\n if item in self: \n return self[item]\n else: \n return 0", "def getKeyCount(self,\n key):\n if (self.hasKey(key) == 1):\n return self.__keyCount[key]\n else:\n return 0", "def count(self, item):\n # TODO: complete this function!\n if item not in self:\n return 0\n else:\n num_occur = 0\n if self._first == item:\n num_occur += 1\n num_occur += self._rest.count(item)\n return num_occur", "def count(self, item: Any) -> int:\n curr = self._first\n count = 0\n\n while curr is not None:\n if curr.item == item:\n count += 1\n curr = curr.next\n\n return count", "def count(self, item):\n return _(self._.count(item))", "def getFreq(TDB,key):\n \"\"\" key is set of subitems to count frequancy of repeatation this key in the TDB \"\"\"\n freq = 0\n for items in TDB:\n exist = True\n for element in key:\n if element not in items:\n exist = False\n break\n if exist:\n freq+=1\n return freq", "def count(self, item):\n # type: (Any) -> int\n return list.count(self, self.ref(item))", "def count(item):\n return len(item)", "def __incKeyCount(self,\n key):\n if (self.__keyCount.has_key(key) == 0): self.__keyCount[key] = 0\n self.__keyCount[key] = self.__keyCount[key] + 1\n return self.__keyCount[key]", "def number_with_key(key):\n # good for checking proliferation of sort_key etc\n db = TinyDB(CARD_DATA_FILE)\n card_data = db.table('card_data')\n packs = card_data.all()\n total = 0\n with_key = 0\n for pack in packs:\n total += 1\n if key in pack:\n with_key += 1\n print('{} out of {} have sort keys'.format(with_key, total))", "def count_singlesA(key, alist):\n result = 0\n for x in range(len(alist)):\n if alist[x] == key:\n if not is_next_to(x, alist):\n result += 1\n return result", "def count(self, conn, key):\n return conn.llen(key)", "def count(self, item: Any) -> int:\n # If this recursive list is empty\n if self.is_empty():\n return 0\n # If there is a first and a rest.\n else:\n # Check if the first is equal and add the count on the rest of the list.\n return int(self._first == item) + self._rest.count(item)", "def size(self, key):\n return len(self[key])", "def index(self, key):\n count = 0\n for k in self.__ordered_keys:\n if k.lower() == key.lower():\n return count\n count = count + 1\n raise KeyError(key)", "def count_singlesB(key, alist):\n num = 0\n result = 0\n for element in alist:\n if num == 0 and element == key:\n result += 1\n num = 1\n elif num == 1 and element == key:\n result -= 1\n num += 1\n elif num > 1 and element == key:\n num += 1\n elif element == key:\n num = 1\n else:\n num = 0\n return result", "def count_hash(cls, key, size):\n return key%size", "def getCount(self, event):\n # Attempt 2: Still too slow\n count = 0\n \n for mEvent in self:\n if event.__st__(mEvent):\n count += 1\n \n return count\n \n # Attempt 1: Too slow\n #return reduce((lambda x, y: x+y),\n # map((lambda i: itemset <= i), self))", "def _index(self,key):\n index=0\n for item in self._item:\n if item.key==key:\n return index\n index+=1\n return -1", "def supportCk(ckItem, transactions):\n count = 0\n for trans in transactions:\n if ckItem.issubset(frozenset(trans['itemId'])):\n count += 1\n return count", "def total(my_list, item):\n return my_list.count(item)", "def __len__(self):\n return sum(1 for item in self.wishlist.values())", "def __len__(self):\n return sum(1 for item in self.wishlist.values())", "def get_num_values(self, item):\n\tnum_values = 1\n\t\n\t# Valor mas antiguo de la linked list\n\t# Siempre tiene valor, si no, no tenemos la entrada en el hashset\n\tvalue = item[\"tail\"][\"next\"]\n \twhile long(value) != 0:\n\t num_values += 1\n\t value = value[\"next\"]\n\n\treturn num_values", "def frequency(item, the_list):\n list_length = len(the_list)\n # initialising counters\n i = 0\n item_count = 0\n\n # looping through every item in the list\n while i < list_length:\n # if the item being checked in the list equals the item being searched for, increment the count\n if the_list[i] == item:\n item_count = item_count + 1\n i = i + 1\n\n # printing the result\n print(str(item) + ' appears ' + str(item_count) + ' times')\n\n return item_count", "def topkCount(self, key, *items):\n params = [key]\n params += items\n\n return self.execute_command(self.TOPK_COUNT, *params)", "def get_count(name, key):\n total = 0\n query = CounterShard.all().filter('name = ', name).filter('reference_key = ', key)\n for counter in query:\n total += counter.count\n \n return total", "def number_keys(a_dictionary):\n return(len(a_dictionary))", "def keycount(self, essid):\n if essid not in self.essids:\n raise KeyError(\"ESSID not in store.\")\n return len(self.essids[essid][1])", "def count(self, value):\n self.__validate_value(value)\n counter = 0\n for v in self.__list:\n if v == value:\n counter += 1\n return counter", "def getNumberOfKeys(self) -> int:\n ...", "def __len__(self):\n return sum(item['qty'] for item in self.basket.values()) # counts all the values of the key qty", "def locate_successor(self, key):\r\n index = 0\r\n while index < self.num_keys() and self.keys[index] <= key:\r\n index += 1\r\n return index", "def num_keys(self):\n return len(self.counter.keys())", "def increment_count(dictionary, key):\n if key:\n if key in dictionary:\n dictionary[key] += 1\n else:\n dictionary[key] = 1", "def getItemCount(self, ItemBase):\n Found = 0\n for CurrItem in self.List:\n if CurrItem.Base == ItemBase:\n Found = 1\n break\n\n if not Found: return 0\n else: return CurrItem.Count", "def hasKey(self,\n key):\n return self.__keyCount.has_key(key)", "def find_pos(self, _node, _key):\n for i, key in enumerate(_node.keys):\n if _key < key:\n return i\n \n return len(_node.pt)-1", "def get_number_of_items(self):\n return len(self.__item_map)", "def count_item(*, item : Any, list : Union[List[Any], ConduitVariable]) -> List[Any]:\n return list.count(item)", "def count(self, value):\n # Note: objects are never coerced into other types for comparison\n if type(value).__eq__ in _int__eq__s:\n return int(self._contains_int(value))\n # take the slow path, compare every single item\n return sum(1 for self_item in self if self_item == value)", "def count(self, key):\n self._metrics[key] += 1", "def linear_search(self, key):\r\n index = 0\r\n while index < self.num_keys() and self.keys[index] < key:\r\n index += 1\r\n return index", "def getNoOfKeys(self):\n return len(self.__keyList)", "def increment_count(count_dict, key):\n if key in count_dict:\n count_dict[key] += 1\n else:\n count_dict[key] = 1", "def size(self, key):\n with SessionContext(self.SessionClass) as session:\n q = session.query(PAW2_DBObject.numElems)\n return q.filter(PAW2_DBObject.key == key).one()[0]", "def keycount(self, essid):\n return self.cli.essids.keycount(essid)", "def countSubStringMatch(target,key):\n count = 0\n for i in range(0,len(target)-len(key)):\n if target[i:i+len(key)] == key:\n count += 1\n return count", "def count(self) -> int:\n if self._cached_items is not None:\n return len(self._cached_items)\n return self.items.count()", "def get_item_count(self):\n resp = self.app.get('/items')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)", "def item_count(self):\n return self.items.shape[0]", "def count(self):\n return len([i for i in self.iteritems()])", "def _increment_count(self, key):\n\n if not self._count.has_key(key):\n self._count[key] = 0\n\n self._count[key] += 1", "def _increment_count(self, key):\n\n if not self._count.has_key(key):\n self._count[key] = 0\n\n self._count[key] += 1", "def number_of_keys_with_foo_in_name(my_dict):\n z = []\n for x in my_dict:\n b = x.find(\"foo\", 0)\n if b is not -1:\n z.append(b)\n print len(z)\n return len(z)", "def number_in_set(c,s):\n return sum(v for k,v in c.items() if k in s)", "def numSprites(self, key):\n deleted = len([s for s in self.kill_list if key in s.stypes])\n if key in self.sprite_groups:\n return len(self.sprite_groups[key])-deleted\n else: \n return len([s for s in self if key in s.stypes])-deleted", "def count(self):\n return self.connection.llen(self.key)", "def count(self):\n return self.connection._llen(self.key)", "def count_if(self, criteria):\n # set count to 0\n count = 0\n # iterate through nodes in deque\n for item in self:\n # if the node's data meets the criteria passed,\n if criteria(item):\n # increment count\n count += 1\n # return the count\n return count", "def count(self, element):\n count = 0\n for i in range(self._length): # Increment count when equal value is found\n if self._arr[i] == element:\n count += 1\n return count", "def count_products(list_products):\n for each_item in ADD_PRODUCTS: #This iterates in the dictionary\n num_of_products = list_products.count(each_item) #This count each product\n if num_of_products > 0:\n price = ADD_PRODUCTS[each_item]\n print num_of_products, each_item + \"(s)\", \"a\", (\"Q%.2f c/u\") % price", "def item_count(item_id, arg):\n global database\n table = database.Tables.items\n upd = table.update(None).where(table.c.id == item_id).values(count=table.c.count+(int(arg)))\n database.conn.execute(upd)", "def countSynergies(item, polynomial_gains):\n\tpositive_syn=0\n\tnegative_syn=0\n\tfor k_poly in polynomial_gains.keys():\n\t\tif item in k_poly[1:-1].split(', '): \n\t\t\tif polynomial_gains[k_poly]>0:\n\t\t\t\tpositive_syn+=1\n\t\t\telse:\n\t\t\t\tnegative_syn+=1\n\t\treturn (positive_syn, negative_syn)", "def check(self, k, x):\n k = self._checkIndex(k)\n return bool(self.caches[k].count(x))", "def count(self, value: object) -> int:\n count = 0\n for _ in range(self.da.length()):\n if self.da[_] == value:\n count += 1\n return count", "def _dict_values_count_hashed(a_dict, count_this):\n counter = 0\n for value in a_dict.values():\n if value == count_this:\n counter += 1\n elif (\n isinstance(value, dict)\n and isinstance(count_this, dict)\n and \"hash\" in value\n and \"hash\" in count_this\n and \"size\" in value\n and \"size\" in count_this\n and value[\"hash\"] == count_this[\"hash\"]\n ):\n counter += 1\n \"hash\" in value and isinstance(count_this, dict) and \"hash\" in count_this\n return counter", "def count(self, i):\n return sum([1 for j in self if i==j])", "def count(self, elem):\n return self.iter.count(elem)", "def getNumberOfKeys(self, attr, view) -> int:\n ...", "def _bucket_index(self, key):\n # return hash(key) % len(self.buckets)\n hash_value = 0 # hash is set to 0\n for char in key: # iterates through as much as the number of characters in key\n hash_value += ord(char) # return the unicode value to make the number different everytime\n return hash_value % len(self.buckets) # returns a number that will never be greater than the length of the bucket", "def num_cached(self):\n return len(self._item_list)", "def __decKeyCount(self,\n key):\n if (self.__keyCount.has_key(key) == 0): self.__keyCount[key] = 0 \n self.__keyCount[key] = self.__keyCount[key] - 1\n return self.__keyCount[key]", "def items_count(self):\n return len(self.items)", "def num_keys(self):\r\n return len(self.keys)", "def get_number_of_useful_items(nodes, a: str, b: str) -> int:\n return sum(int(a <= item.key <= b) for node in nodes for item in node.elements)", "def contains(self, key):\n bus=key%100000\n pos=key//100000\n return self.li[bus][pos]==1", "def k_ary_support_count(itemset, tagnamesdict):\n X = itemset[0]\n x_list = tagnamesdict[X]\n inter = set(x_list)\n\n for i in range(1, len(itemset)):\n Y = itemset[i]\n y_list = tagnamesdict[Y]\n inter = inter.intersection(y_list)\n\n support_count = len(inter)\n return support_count", "def __setKeyCount(self,\n key,\n count):\n self.__keyCount[key] = count\n return self.__keyCount[key]", "def num_keys_command():\n # Get Args needed for the command\n incident = demisto.args().get('id', get_investigation_id())\n # Search Collection counting matching incident_id\n cursor = COLLECTION.find({})\n count = 0\n for i in cursor:\n if incident in i:\n count += 1\n return f'The count of the key/value pairs for the incident - {str(count)}', {}, {}", "def __len__(self):\n return self.keyvaluepair_set.count()", "def count_items(self):\n count = 0\n for o in self.order_lst:\n count += o.count()\n \n return count", "def counts(sequence):\n # initialize the countainer\n count = defaultdict(int)\n # iterates through sequence elements\n for item in sequence:\n # if element not in counts add 0\n # else add 1\n count[item] = count.get(item, 0) + 1\n return dict(count)", "def get_number_of_empty_pages(cls, pages, item_key):\n empty = [page for page in pages if page[item_key] == []]\n return len(empty)", "def count(self, value): # real signature unknown; restored from __doc__\n return 0", "def moreThanOne(dict, key):\n\treturn key in dict and dict[key] > 0", "def items_num(self):\n\t\treturn len(self.items)", "def items_num(self):\n\t\treturn len(self.items)", "def count(self, e):\n try:\n return self.vals[e]\n except:\n return 0", "def count(self):\n return sum(1 for _ in self)", "def countOccurrences(lst, x):\n res = 0\n for i in lst:\n if i == x:\n res += 1\n return res", "def search(self, key):\r\n left = 0 \r\n right = self.num_keys()\r\n while right > left:\r\n mid = (left + right)//2\r\n if self.keys[mid] >= key:\r\n right = mid\r\n else:\r\n left = mid + 1\r\n return left", "def keycount(self, essid):\n with SessionContext(self.SessionClass) as session:\n q = session.query(PAW2_DBObject.key)\n q = q.join(PYR2_DBObject).join(ESSID_DBObject)\n q = q.filter(ESSID_DBObject.essid == essid)\n return q.count()", "def _count_elements(mapping, iterable): # real signature unknown; restored from __doc__\n pass", "def score(item, fd, key):\n return fd.get(key(item), 0)", "def __contains__(self, item, key):\n ndx = self._findPostion(key)\n return ndx is not None", "def index(self, item: T) -> int:\n current = self.head\n index = 0\n while current is not None and current.item != item:\n current = current.link\n index += 1\n if current is None:\n raise ValueError(\"Item is not in list\")\n else:\n return index", "def __contains__(self, items):\n if type(items) != list:\n raise PJFInvalidType(items, list)\n ret = 0\n for item in items:\n for key in self.__dict__:\n if isinstance(self.__dict__[key], JsonFactory):\n ret += item in self.__dict__[key]\n elif item == key:\n ret += 1\n return len(items) == ret", "def countitems(self):\n count = 0\n sid = self.client.scannerOpen(self.table, '', ['f:s'])\n while 1:\n r = self.client.scannerGetList(sid, 1000)\n #r = self.client.scannerGet(sid)\n if not r: break\n count += len(r)\n logging.debug('%d %s', count, r[-1].row)\n self.scannerClose(sid)\n return count", "def count_occurrences(x):\r\n tmp_x = sorted(copy(x))\r\n ux = unique(x)\r\n return searchsorted(tmp_x, ux, 'right') - searchsorted(tmp_x, ux, 'left')" ]
[ "0.754235", "0.7415456", "0.7260023", "0.71076053", "0.7074687", "0.6912356", "0.6888656", "0.6855236", "0.6669268", "0.666577", "0.6609228", "0.65588135", "0.652157", "0.650688", "0.6440919", "0.6308882", "0.62916195", "0.6257994", "0.62517", "0.6243789", "0.6169287", "0.6159211", "0.6159211", "0.6146827", "0.61295086", "0.6125167", "0.6120678", "0.6115279", "0.60913575", "0.6087484", "0.6087129", "0.6068779", "0.60649866", "0.6044266", "0.60311", "0.6028833", "0.60244036", "0.6021284", "0.60200244", "0.59939885", "0.59721386", "0.59401923", "0.59391356", "0.5935818", "0.59248394", "0.5898334", "0.5883522", "0.5882171", "0.5868861", "0.5865016", "0.58625144", "0.5839197", "0.58350813", "0.58350813", "0.58034086", "0.57974845", "0.57759094", "0.57706493", "0.5754462", "0.5744916", "0.57425374", "0.5740246", "0.5732351", "0.57073766", "0.57004654", "0.5700096", "0.56975234", "0.5696522", "0.56925905", "0.5692255", "0.5689797", "0.56863475", "0.5686283", "0.5672959", "0.5670551", "0.566512", "0.56574196", "0.56554127", "0.56492937", "0.56459224", "0.5639001", "0.56313384", "0.5628615", "0.5627127", "0.5595601", "0.5579229", "0.5576631", "0.5576631", "0.55721694", "0.55704945", "0.5566467", "0.55643755", "0.5556639", "0.5544968", "0.55412203", "0.55378723", "0.5537782", "0.5535767", "0.5529212", "0.5526093" ]
0.67729896
8
Begins an incremental save of the Cuckoo filter ``key``. This is useful for large Cuckoo filters which cannot fit into the normal SAVE and RESTORE model. The first time this command is called, the value of ``iter`` should be 0. This command will return successive (iter, data) pairs until (0, NULL) to indicate completion.
def cfScandump(self, key, iter): params = [key, iter] return self.execute_command(self.CF_SCANDUMP, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bfScandump(self, key, iter):\n params = [key, iter]\n \n return self.execute_command(self.BF_SCANDUMP, *params)", "def save(self) -> dict:\n for pair in self._buffer:\n yield pair.save()", "def __iter__(self):\r\n for item in self._data:\r\n yield item # yield the KEY\r", "def save(self) -> None:\n self._bin_iter.save()", "def _iter(self, key, count, increment=1):\n key %= self.size\n while count > 0:\n try:\n yield self.db[key]\n except KeyError:\n # This shouldn't happen, but there's really nothing we can do if it does.\n # Skip over the damaged part of our database, ignoring the missing item.\n pass\n key = (key + increment) % self.size\n count -= 1", "def __iter__(self):\n if not self.loading:\n self.reset_loading()\n self.current_batch_index = 0\n return self", "def __iter__(self):\n try:\n i = self.db[self._headKey]\n while True:\n yield i\n i = self.db[self._getNextKey(i)]\n except KeyError:\n pass", "def knapsack_iterate_back(save):\n pass", "def add(self, key):\n\t\t#super(CountingBloomFilter, self).add(key)\n\t\t#super(CountingBloomFilter, self).generateStats()\n\t\tfor i in self.getBitArrayIndices(key):\n\t\t\tself.ba[i] += 1\n\t\tself.n += 1", "def train_callback(self, model, iteration):\n if (self.rewind_it == iteration and self.rewind_state_dict is None):\n # Save the current model weights\n self.rewind_state_dict = copy.deepcopy(model.state_dict())", "def bfLoadChunk(self, key, iter, data):\n params = [key, iter, data]\n \n return self.execute_command(self.BF_LOADCHUNK, *params)", "def _save(self, itr):\n # using keep_checkpoint_every_n_hours as proxy for iterations between saves\n if self.saver and (itr + 1) % self.saver._keep_checkpoint_every_n_hours == 0:\n\n # collect params (or stuff to keep in general)\n params = dict()\n params['critic'] = self.critic.network.get_param_values()\n\n # if the environment is wrapped in a normalizing env, save those stats\n normalized_env = hgail.misc.utils.extract_normalizing_env(self.env)\n if normalized_env is not None:\n params['normalzing'] = dict(\n obs_mean=normalized_env._obs_mean,\n obs_var=normalized_env._obs_var\n )\n\n # save hierarchy\n for i, level in enumerate(self.hierarchy):\n params[i] = dict()\n params[i]['policy'] = level.algo.policy.get_param_values()\n \n # save params \n save_dir = os.path.split(self.saver_filepath)[0]\n hgail.misc.utils.save_params(save_dir, params, itr+1, max_to_keep=50)", "def cfLoadChunk(self, key, iter, data):\n params = [key, iter, data]\n \n return self.execute_command(self.CF_LOADCHUNK, *params)", "def __iter__(self):\n while True:\n if self.stop:\n return\n for item in self.get_next_batch():\n yield item", "def keys_fetch(self):\n with self.env.begin(write=False) as txn:\n cursor = txn.cursor()\n tot = txn.stat()['entries']\n i = 0\n\n path = self.db_path\n base_name = self.base_path\n cache_file_path = os.path.join(path, '_cache_' + base_name + '.pkl')\n print('cache_file_path = ', cache_file_path) # DEBUG\n\n if os.path.isfile(cache_file_path):\n self.keys = pickle.load(open(cache_file_path, 'rb'))\n self._num_examples = tot\n else:\n keys = []\n for key, _ in cursor:\n i += 1\n if i % 1000 == 0 or i == tot:\n print('Fetching {:>8d} /{:>8d} keys'.format(i, tot),\n end='\\r')\n keys.append(key)\n print('\\nDone.')\n self._num_examples = tot\n self.keys = np.asarray(keys)\n pickle.dump(self.keys, open(cache_file_path, 'wb'))", "def fisher_iterate(\n self,\n cbl,\n map_tag=None,\n iter_max=200,\n converge_criteria=0.005,\n qb_start=None,\n transfer_run=False,\n save_iters=False,\n null_first_cmb=False,\n delta_beta_prior=None,\n cond_noise=None,\n cond_criteria=None,\n like_profiles=False,\n like_profile_sigma=3.0,\n like_profile_points=100,\n file_tag=None,\n ):\n\n save_name = \"transfer\" if transfer_run else \"bandpowers\"\n\n if transfer_run:\n null_first_cmb = False\n\n # previous fqb iterations to monitor convergence and adjust conditioning\n prev_fqb = []\n cond_adjusted = False\n\n if qb_start is None:\n qb = OrderedDict()\n for k, v in self.bin_def.items():\n if transfer_run:\n if \"cmb\" not in k or \"eb\" in k or \"tb\" in k:\n continue\n if k == \"delta_beta\":\n # qb_delta beta is a coefficient on the change from beta,\n # so expect that it should be small if beta_ref is close\n # (zeroes cause singular matrix problems)\n qb[k] = [self.delta_beta_fix]\n elif k.startswith(\"res_\") or k.startswith(\"fg_\"):\n # res qb=0 means noise model is 100% accurate.\n qb[k] = 1e-5 * np.ones(len(v))\n else:\n # start by assuming model is 100% accurate\n qb[k] = np.ones(len(v))\n else:\n qb = qb_start\n\n obs, nell, debias = self.get_data_spectra(\n map_tag=map_tag, transfer_run=transfer_run\n )\n\n bin_index = pt.dict_to_index(self.bin_def)\n\n success = False\n for iter_idx in range(iter_max):\n self.log(\n \"Doing Fisher step {}/{}...\".format(iter_idx + 1, iter_max), \"info\"\n )\n\n qb_new, inv_fish = self.fisher_calc(\n qb,\n cbl,\n obs,\n cls_noise=nell,\n cls_debias=debias,\n cond_noise=cond_noise,\n delta_beta_prior=delta_beta_prior,\n cond_criteria=cond_criteria,\n null_first_cmb=null_first_cmb,\n )\n\n qb_arr = pt.dict_to_arr(qb, flatten=True)\n qb_new_arr = pt.dict_to_arr(qb_new, flatten=True)\n dqb = qb_new_arr - qb_arr\n fqb = dqb / qb_arr\n max_fqb = np.nanmax(np.abs(fqb))\n\n prev_fqb.append(max_fqb)\n\n fnan = np.isnan(fqb)\n if fnan.any():\n (nanidx,) = np.where(fnan)\n self.log(\n \"Iter {}: Ignoring {} bins with fqb=nan: bins={}, qb_new={}, \"\n \"qb={}\".format(\n iter_idx,\n len(nanidx),\n nanidx,\n qb_new_arr[nanidx],\n qb_arr[nanidx],\n ),\n \"warning\",\n )\n\n self.log(\"Max fractional change in qb: {}\".format(max_fqb), \"info\")\n\n # put qb_new in original dict\n qb = copy.deepcopy(qb_new)\n cls_model = self.get_model_spectra(\n qb, cbl, delta=True, cls_noise=nell, cond_noise=None\n )\n\n if \"delta_beta\" in qb:\n # get beta fit and beta error\n beta_fit = qb[\"delta_beta\"][0] + self.beta_ref\n db_idx = slice(*bin_index[\"delta_beta\"])\n beta_err = np.sqrt(np.diag(inv_fish[db_idx, db_idx]))[0]\n else:\n beta_fit = None\n beta_err = None\n\n if save_iters:\n # save only the quantities that change with each iteration\n out = dict(\n map_tag=map_tag,\n map_tags=self.map_tags,\n iter_index=iter_idx,\n bin_def=self.bin_def,\n bin_weights=self.bin_weights,\n cls_shape=self.cls_shape,\n cls_obs=obs,\n qb=qb,\n fqb=fqb,\n inv_fish=inv_fish,\n cls_model=cls_model,\n cbl=cbl,\n map_freqs=self.map_freqs,\n cls_signal=self.cls_signal,\n cls_noise=self.cls_noise,\n Dmat_obs=self.Dmat_obs,\n gmat_ell=self.gmat_ell,\n extra_tag=file_tag,\n )\n\n if \"fg_tt\" in self.bin_def:\n out.update(\n beta_fit=beta_fit,\n beta_err=beta_err,\n ref_freq=self.ref_freq,\n beta_ref=self.beta_ref,\n )\n\n self.save_data(save_name, bp_opts=not transfer_run, **out)\n\n (nans,) = np.where(np.isnan(qb_new_arr))\n if len(nans):\n msg = \"Found NaN values in qb bins {} at iter {}\".format(nans, iter_idx)\n break\n\n if fnan.all():\n msg = (\n \"All bins have fqb=NaN at iter {}, \"\n \"something has gone horribly wrong.\".format(iter_idx)\n )\n break\n\n negs = np.where(np.diag(inv_fish) < 0)[0]\n if len(negs):\n self.log(\n \"Iter {}: Found negatives in inv_fish diagonal at locations \"\n \"{}\".format(iter_idx, negs),\n \"warning\",\n )\n\n if np.nanmax(np.abs(fqb)) < converge_criteria:\n if not transfer_run:\n # Calculate final fisher matrix without conditioning\n self.log(\"Calculating final Fisher matrix.\", \"info\")\n _, inv_fish = self.fisher_calc(\n qb,\n cbl,\n obs,\n cls_noise=nell,\n cls_debias=debias,\n cond_noise=None,\n delta_beta_prior=delta_beta_prior,\n null_first_cmb=null_first_cmb,\n )\n\n # If any diagonals of inv_fisher are negative, something went wrong\n negs = np.where(np.diag(inv_fish) < 0)[0]\n if len(negs):\n self.log(\n \"Found negatives in inv_fish diagonal at locations \"\n \"{}\".format(negs),\n \"warning\",\n )\n\n success = True\n break\n\n else:\n msg = \"{} {} did not converge in {} iterations\".format(\n \"Multi-map\" if map_tag is None else \"Map {}\".format(map_tag),\n \"transfer function\" if transfer_run else \"spectrum\",\n iter_max,\n )\n # Check the slope of the last ten fqb_maxpoints.\n # If there's not a downward trend, adjust conditioning\n # criteria to help convergence.\n if len(prev_fqb) <= 10 or transfer_run:\n continue\n m, b = np.polyfit(np.arange(10), prev_fqb[-10:], 1)\n if m > 0: # Not converging\n # First, start from very little conditioning\n if not cond_adjusted:\n cond_criteria = 5e3\n cond_adjusted = True\n self.log(\n \"Iter {}: Not converging. Setting cond_criteria={}\".format(\n iter_idx, cond_criteria\n ),\n \"warning\",\n )\n\n elif cond_criteria > 100:\n cond_criteria /= 2.0\n self.log(\n \"Iter {}: Tightening condition criteria to help convergence. \"\n \"cond_criteria={}\".format(iter_idx, cond_criteria),\n \"warning\",\n )\n else:\n self.log(\n \"Iter {}: Can't reduce cond_criteria any more.\".format(\n iter_idx\n ),\n \"warning\",\n )\n # give it ten tries to start converging\n prev_fqb = []\n\n # save and return\n out = dict(\n qb=qb,\n inv_fish=inv_fish,\n fqb=fqb,\n bin_def=self.bin_def,\n bin_weights=self.bin_weights,\n iters=iter_idx,\n success=success,\n map_tags=self.map_tags,\n map_freqs=self.map_freqs,\n converge_criteria=converge_criteria,\n cond_noise=cond_noise,\n cond_criteria=cond_criteria,\n null_first_cmb=null_first_cmb,\n apply_gcorr=self.apply_gcorr,\n weighted_bins=self.weighted_bins,\n )\n\n if \"fg_tt\" in self.bin_def:\n out.update(\n delta_beta_prior=delta_beta_prior,\n beta_fit=beta_fit,\n beta_err=beta_err,\n ref_freq=self.ref_freq,\n beta_ref=self.beta_ref,\n )\n\n if self.debug:\n out.update(\n cbl=cbl,\n cls_obs=obs,\n cls_signal=self.cls_signal,\n cls_noise=self.cls_noise,\n cls_model=cls_model,\n cls_shape=self.cls_shape,\n cond_noise=cond_noise,\n Dmat_obs=self.Dmat_obs,\n )\n\n if not transfer_run:\n out.update(qb_transfer=self.qb_transfer)\n if self.template_cleaned:\n out.update(template_alpha=self.template_alpha)\n\n if success and not transfer_run:\n # do one more fisher calc that doesn't include sample variance\n # set qb=very close to 0. 0 causes singular matrix problems.\n # don't do this for noise residual bins\n self.log(\"Calculating final Fisher matrix without sample variance.\", \"info\")\n qb_zeroed = copy.deepcopy(qb)\n qb_new_ns = copy.deepcopy(qb)\n for comp in [\"cmb\", \"fg\"]:\n for spec in self.specs:\n stag = \"{}_{}\".format(comp, spec)\n if stag not in qb_zeroed:\n continue\n qb_zeroed[stag][:] = 1e-20\n qb_new_ns[stag][:] = 1.0\n if \"delta_beta\" in qb:\n qb_zeroed[\"delta_beta\"][:] = 1e-20\n qb_new_ns[\"delta_beta\"][:] = 0\n\n _, inv_fish_ns = self.fisher_calc(\n qb_zeroed,\n cbl,\n obs,\n cls_noise=nell,\n cls_debias=debias,\n cond_noise=None,\n delta_beta_prior=None,\n null_first_cmb=null_first_cmb,\n )\n\n out.update(\n invfish_nosampvar=inv_fish_ns,\n )\n\n # compute window functions for CMB bins\n self.log(\"Calculating window functions for CMB bins\", \"info\")\n wbl_qb = self.fisher_calc(\n qb,\n cbl,\n obs,\n cls_noise=nell,\n cls_debias=None,\n cond_noise=None,\n delta_beta_prior=delta_beta_prior,\n null_first_cmb=null_first_cmb,\n windows=True,\n inv_fish=inv_fish,\n )\n out.update(wbl_qb=wbl_qb)\n\n # compute bandpowers and covariances\n cb, dcb, ellb, cov, qb2cb, wbl_cb = self.do_qb2cb(qb, inv_fish, wbl_qb)\n _, dcb_ns, _, cov_ns, _, _ = self.do_qb2cb(qb, inv_fish_ns, wbl_qb)\n\n out.update(\n cb=cb,\n dcb=dcb,\n ellb=ellb,\n cov=cov,\n qb2cb=qb2cb,\n wbl_cb=wbl_cb,\n dcb_nosampvar=dcb_ns,\n cov_nosampvar=cov_ns,\n )\n\n if like_profiles:\n # compute bandpower likelihoods\n self.log(\"Calculating bandpower profile likelihoods\", \"info\")\n max_like = self.fisher_calc(\n qb,\n cbl,\n obs,\n cls_noise=nell,\n cond_noise=None,\n delta_beta_prior=delta_beta_prior,\n null_first_cmb=null_first_cmb,\n likelihood=True,\n )\n\n dqb = pt.arr_to_dict(np.sqrt(np.abs(np.diag(inv_fish))), qb)\n qb_like = OrderedDict()\n\n for stag, qbs in qb.items():\n qb_like[stag] = np.zeros(\n (len(qbs), 2, like_profile_points), dtype=float\n )\n\n for ibin, q in enumerate(qbs):\n qb1 = copy.deepcopy(qb)\n dq = dqb[stag][ibin] * like_profile_sigma\n q_arr = np.linspace(q - dq, q + dq, like_profile_points)\n like_arr = np.zeros_like(q_arr)\n\n for iq, q1 in enumerate(q_arr):\n qb1[stag][ibin] = q1\n try:\n like = self.fisher_calc(\n qb1,\n cbl,\n obs,\n cls_noise=nell,\n cond_noise=None,\n delta_beta_prior=delta_beta_prior,\n null_first_cmb=null_first_cmb,\n likelihood=True,\n )\n except np.linalg.LinAlgError:\n like = np.nan\n\n like_arr[iq] = like\n\n self.log(\n \"{} bin {} delta qb {} delta like: {}\".format(\n stag, ibin, q1 - q, like - max_like\n ),\n \"debug\",\n )\n\n qb_like[stag][ibin] = np.vstack([q_arr, like_arr])\n\n out.update(max_like=max_like, qb_like=qb_like)\n\n if not success:\n save_name = \"ERROR_{}\".format(save_name)\n self.log(msg, \"error\")\n self.warn(msg)\n\n return self.save_data(\n save_name, map_tag=map_tag, bp_opts=True, extra_tag=file_tag, **out\n )", "def __iter__(self):\n cursor=0\n while cursor<len(self):\n yield self._item[cursor].key\n cursor+=1", "def cb(xk):\n self.iteration += 1\n t_current = time.time()\n t_elapsed = t_current - self.t_store\n self.t_store = t_current\n \n self.of_list.append(self.of_last)\n self.params = xk\n self._disp(t_elapsed)\n\n # Call the custom callback function if any\n if callback is not None:\n callback(self)", "def iterator(self):\n return self.KeyIterator()", "def add(self, key):\n if key in self:\n return True\n if not self.filters:\n filter = RedisLocalBloomFilter(\n server=self.server, \n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=self.initial_capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n else:\n filter = self.filters[-1]\n if filter.count >= filter.capacity:\n capacity = filter.capacity * self.scale\n if capacity > MAX_PER_SLICE_SIZE:\n capacity = MAX_PER_SLICE_SIZE\n filter = RedisLocalBloomFilter(\n server=self.server,\n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n if self.max_filters > 0 and len(self.filters) >= self.max_filters:\n f = self.filters[0]\n f.clear()\n del self.filters[0]\n filter.add(key, skip_check=True)\n return False", "def __iter__(self):\n # This could be as simple as \"return self._getKeyList().__iter__()\"\n # but this performs some extra consistency checking to make sure the\n # key we iterate to actually exists, to keep us from crashing if\n # our db is a little out of sync with itself.\n\n # This is a nasty hack because our db seems prone to circular links\n nItems = 0\n for item in self._getKeyList():\n if item in self:\n yield item\n nItems += 1\n # NASTY HACK!\n if nItems > 1000:\n self.reindex()\n raise Exception(\"Circular link corrected, try again\")\n else:\n self._delKey(item)", "def next_window(self) -> Iterator[Optional[np.ndarray]]:\n while self._count >= self._window_width:\n # Preserve what we want to return by copying it.\n p1 = np.copy(self._data_store[:self._window_width, :])\n\n # Remove the data we don't need any more from the front of the buffer.\n frames_to_keep = self._count - self._window_step\n self._data_store[:frames_to_keep,\n :] = self._data_store[self._window_step:self._count, :]\n self._count -= self._window_step\n yield p1", "def store(self,key,start,end,data):\n\n pass", "def filter_keys(self):\n filters = self.args.keyfilter.split('.')\n self.logger.info(u'Filtering with:{f}'.format(f=filters))\n data = self.inputdata\n newdata = {}\n for key, value in data.items():\n self.logger.info(u'\\nProcessing Key:{k}'.format(k=key))\n returned_data = dict_key_filter(key, value, filters, self.logger)\n if bool(returned_data):\n newdata[key] = returned_data\n self.logger.info(u'Data After filter:{d}'.format(d=newdata))\n self.outputdata = newdata", "def inc(self, key):\n if key in self.cache:\n curr_freq = self.cache[key]\n self.freq[curr_freq].remove(key)\n\n if len(self.freq[curr_freq]) == 0:\n del self.freq[curr_freq]\n\n curr_freq += 1\n self.freq[curr_freq].add(key)\n self.cache[key] = curr_freq\n\n else:\n self.cache[key] = 1\n self.freq[1].add(key)", "def post_prepared_commit(self, key, prepared):\n docs = self.__splitprepared(prepared)\n docs[0][\"key\"] = key\n return self.client.post_commit(docs[0], docs[1])", "def filter(self, key):\n with suppress(KeyError):\n yield from self.data[key]", "def run(self, iter: int = -1):\n try:\n while iter != 0:\n self.iteration_count += 1\n iso = self._iso_observe()\n self._propagate(iso)\n iter -= 1\n except _FinishedObserving:\n return True\n except _Contradiction:\n return False", "def add(self, key, skip_check=False):\n bits_per_slice = self.bits_per_slice\n hashes = self.make_hashes(key)\n found_all_bits = True\n if self.count > self.capacity:\n raise IndexError(\"RedisLocalBloomFilter is at capacity\")\n pipe = self.server.pipeline(transaction=False) \n sliceIdx = 0\n for k in hashes:\n sliceKey = self.SLICE_KEY_FMT % (self.bfkeypreffix, sliceIdx)\n pipe.setbit(sliceKey, k, 1)\n sliceIdx += 1\n pipeResults = pipe.execute()\n if not skip_check:\n for pipeResult in pipeResults:\n if not pipeResult:\n found_all_bits = False\n break\n if skip_check:\n self.count += 1\n return False\n elif not found_all_bits:\n self.count += 1\n return False\n else:\n return True", "def __init__(self):\n super(KeyIterator, self).__init__()\n self.iterator = self.ValueIterator()", "def run(self, iteration_key):\n record_provider = SqlDocumentProvider(iteration_key, self.case_accessor())\n logger = SQLBasedProgressLogger(iteration_key)\n processor = BulkDocProcessor(record_provider, self.doc_processor(self.domain),\n progress_logger=logger)\n processed, skipped = processor.run()\n return processed, skipped, logger.logs", "def inc(self, key: str) -> None:\n if key not in self.bucket_of_keys:\n self.bucket_of_keys[key] = self.buckets.insert(self.buckets.begin(), Node(0, {key}))\n bucket, next_bucket = self.bucket_of_keys[key], self.bucket_of_keys[key].next\n if next_bucket is self.buckets.end() or next_bucket.value > bucket.value + 1:\n next_bucket = self.buckets.insert(next_bucket, Node(bucket.value + 1, set()))\n next_bucket.keys.add(key)\n self.bucket_of_keys[key] = next_bucket\n\n bucket.keys.remove(key)\n if not bucket.keys:\n self.buckets.erase(bucket)", "def key_lookup_batch(self, batchiter):\n pass", "def run(self, iteration_key):\n record_provider = SqlDocumentProvider(iteration_key, self.case_accessor())\n processor = BulkDocProcessor(record_provider, self.doc_processor(self.domain))\n return processor.run()", "def inc(self, key: str) -> None:\n if key not in self.mapping:\n cur_block = self.head\n else:\n cur_block = self.mapping[key]\n cur_block.keys.remove(key)\n\n if cur_block.val + 1 != cur_block.next.val:\n new_block = Block(cur_block.val + 1)\n cur_block.insert_after(new_block)\n else:\n new_block = cur_block.next\n new_block.keys.add(key)\n self.mapping[key] = new_block\n\n if not cur_block.keys and cur_block.val != 0:\n cur_block.remove()", "def _set(self, cmd, key, val, expiry_time, min_compress_len = 0):\n\t\tcheck_key(key)\n\t\tserver, key = yield self._get_server_for(key)\n\t\tif not server:\n\t\t\traise StopIteration(False)\n\n\t\tstored_info = self._value_to_stored(val, min_compress_len)\n\t\tif stored_info is None:\n\t\t\t# If it's not storable due to length, just return.\n\t\t\traise StopIteration(True)\n\t\tflags, stored = stored_info\n\t\t\n\n\t\tfull_cmd = \"%s %s %d %d %d\\r\\n%s\\r\\n\" % (cmd, key, flags, expiry_time, len(stored), stored)\n\n\t\ttry:\n\t\t\tyield server.sendall(full_cmd)\n\t\t\tres = yield server.read_line()\n\t\t\traise StopIteration(res == \"STORED\")\n\n\t\texcept tcp.ConnectionClosedException:\n\t\t\tserver.mark_dead()\n\n\t\traise StopIteration(False)", "def save (self):\n if self.newobj:\n using_sequence = self.sequence ()\n self.keyvals['id'] = using_sequence\n self.seq = using_sequence\n else:\n using_sequence = self.seq\n for key, val in self.keyvals.items ():\n r_key = self.prepare_key (key, using_sequence)\n r.set (r_key, val)\n self.keyvals = {}\n self.newobj = False", "def iterkeys(self):\r\n for wr in self.data.iterkeys():\r\n obj = wr()\r\n if obj is not None:\r\n yield obj", "def iterkeys(self):\r\n for wr in self.data.iterkeys():\r\n obj = wr()\r\n if obj is not None:\r\n yield obj", "def inc(self, key):\n if key in self.keyCountMap:\n self._updateCount(key, 1)\n else:\n self.keyCountMap[key] = 1\n if self.head.next.count != 1:\n self._addBucketAfter(Bucket(1), self.head)\n self.head.next.keySet.add(key)\n self.countBucketMap[1] = self.head.next", "def change_key(self, i, key):\n self.__keys[i] = key\n self.__swim(self.__qp[i])\n self.__sink(self.__qp[i])", "def save(self):\n saved_filter = SavedFilterIterator()\n source_field = self._source.serialized_name() + '_source'\n getattr(saved_filter, source_field).CopyFrom(self._source.save())\n saved_filter.expression = self._raw_expression\n if self._mu is not None:\n pyDict_to_protoDict(self._mu, saved_filter.mu)\n return saved_filter", "def iterkeys(self):\n self.proto.iterinit()\n try:\n while True:\n yield wait(self.proto.iternext())\n except TyrantError:\n pass", "def create_data_iterator(\n wrapped_dummy_env: gym.Env,\n data_pipeline: minerl.data.DataPipeline,\n batch_size: int,\n buffer_size: int = 15000,\n num_epochs: int = None,\n num_batches: int = None,\n remove_no_ops: bool = False,\n) -> dict:\n buffered_iterator = BufferedBatchIter(data_pipeline, buffer_target_size=buffer_size)\n for current_obs, action, reward, next_obs, done in buffered_iterator.buffered_batch_iter(batch_size=batch_size,\n num_epochs=num_epochs,\n num_batches=num_batches):\n wrapped_obs = optional_observation_map(wrapped_dummy_env,\n recursive_squeeze(current_obs))\n wrapped_next_obs = optional_observation_map(wrapped_dummy_env,\n recursive_squeeze(next_obs))\n wrapped_action = optional_action_map(wrapped_dummy_env,\n recursive_squeeze(action))\n\n if remove_no_ops:\n # This definitely makes assumptions about the action space, namely that all-zeros corresponds to a no-op\n not_no_op_indices = wrapped_action.sum(axis=1) != 0\n wrapped_obs = wrapped_obs[not_no_op_indices]\n wrapped_next_obs = wrapped_next_obs[not_no_op_indices]\n wrapped_action = wrapped_action[not_no_op_indices]\n\n return_dict = dict(obs=wrapped_obs,\n acts=wrapped_action,\n rews=reward,\n next_obs=wrapped_next_obs,\n dones=done)\n\n yield return_dict", "def append_prev_itr(self):\n self.data = self.get_all() # need to materialize, since filter can be consumed only once\n min_itr = min([row['Iteration'] for row in self.data])\n basic_runs = DatasetBuilder(self.default_data)\\\n .filter_basic_runs()\\\n .filter_itr(min_itr - 1)\\\n .get_all()\n self.data.extend(basic_runs)\n return self", "def __iter__(self):\n # Return an iterator for the keys in the underlying dictionary.\n return iter(self.data)", "def train_bloom_filter(self, train_data):\n for val in train_data:\n if self.debug:\n print('val: ', val)\n for i in range(0, self.hash_size):\n k = self.hashes[i](val[0])\n if self.debug:\n print('k: ', k)\n self.bitarray[k] = 1\n if self.debug:\n print('___end training____')", "def process(self, data_itr):\n for data in data_itr:\n self.update(data)\n while True:\n try:\n out = self.next()\n yield out\n except StopIteration:\n break", "def __iter__(self):\n self.iterator = 0\n return self", "def __iter__(self):\n self.iterator = 0\n return self", "def __iter__(self):\n self.iterator = 0\n return self", "def __iter__(self):\n self.iterator = 0\n return self", "def __iter__(self) :\n for s in self._samples_to_cache :\n yield s", "def __next__(self) -> dict:\n batches = {}\n terminations = 0\n for iterator in self.iterators:\n \n try:\n data, target = next(iterator)\n batches[data.location] = (data, target)\n\n except (TypeError, AttributeError) as e:\n logging.warning(f\"Dangling pointer detected! Skipping operation... Error: {e}\")\n \n except StopIteration:\n terminations += 1\n\n # Every cached iterator has been iterated through completely\n if terminations == len(self.iterators):\n raise StopIteration\n\n return batches", "def begin_ga(key):\n _population = cache_get(key)\n population = [_population[idx] for idx in _population]\n base_key = cache_get('settings')['base_key']\n next_generation = population[0]['generation'] + 1\n name = '{}:{}'.format(base_key, next_generation)\n\n # need to convert the population dictionary to a tuple of tuples so we can\n # take the set of it. even though notes are a list of lists, python throws\n # and unhasable error if everything isnt of the same type\n for individual in population:\n individual['notes'] = tuple(tuple(x) for x in individual['notes'])\n\n _future_population = m_pipe(population, tournament, crossover, mutation)\n for idx, notes in enumerate(_future_population, start=1):\n individual = render_individual(notes=notes, _id=idx, generation=next_generation)\n logger.debug(\"Individual << %s >> for generation << %s >>:\\n%s\", idx, next_generation, individual)\n cache_set(name, idx, individual, serialize=True)\n return next_generation", "def _do_flush(self, cache):\n try:\n while cache and not self._stop_flushing:\n key, value = cache.popitem()\n self._shelf[self._encode_key(key)] = value\n if cache:\n cache.clear()\n except BaseException as exception:\n self._flush_exception = exception", "def _save_input(self, mod, i):\n if mod.training:\n self.state[mod][\"x\"] = i[0]", "def dict_key_filter(key, data, filters, logger):\n logger.info(u'Dict_key_filter key:{k}, filters:{f}'.format(k=key, f=filters))\n logger.info(u'Data:{d}'.format(d=data))\n remain_filters = []\n if filters:\n curfilter = filters[0]\n else:\n logger.info('No more filters to process')\n return data\n if len(filters) > 1:\n remain_filters = filters[1:]\n\n newdata = {}\n if curfilter == '*':\n logger.info('Setting filter to empty string')\n curfilter = ''\n\n logger.info(u'Filtering on {f}'.format(f=curfilter))\n if curfilter in key:\n logger.info('Setting new data to empty dictionary')\n if isinstance(data, dict) and remain_filters:\n logger.info('Processing next level dictionary')\n for nextkey, nextdata in data.items():\n logger.info(u'\\nProcessing nextkey: {k}'.format(k=nextkey))\n returned_data = dict_key_filter(nextkey, nextdata, remain_filters,\n logger)\n logger.info(u'NextKey returned: {d}'.format(d=newdata))\n if bool(returned_data):\n newdata[nextkey] = returned_data\n else:\n newdata[key] = data\n logger.info('Nothing more to process')\n logger.info(u'Returning data:{d}'.format(d=newdata))\n return newdata", "def __iter__(self):\n\t\treturn iter(self.__dStore)", "def __init__(self, iterator):\n self.iterator = iterator\n self.dirtyflag = False", "def test_flush_key(self):\r\n a = Addon.objects.get(id=1)\r\n eq_(base.flush_key(a.cache_key), base.flush_key(a))", "def __iter__(self):\n self.current_epoch = self.current_epoch + 1\n self.num_samples_yield = -1\n return self", "def __iter__(self):\n self.current_epoch = self.current_epoch + 1\n self.num_samples_yield = -1\n return self", "def keep_first_iteration(self):\n self.keep_first_iteration_flag = True", "def __call__(self,data):\n \n firstkey = True\n \n for key,value in data:\n key = self.unpack_key(key)\n \n\n gray = self.togray(value)\n \n mean = sum(gray)/float(len(gray))\n for i in xrange(len(gray)):\n gray[i] -= mean # center the pixels\n \n # supply to TSQR\n self.collect(key,gray)\n \n #if firstkey:\n #print >>sys.stderr, \"key: %i, sumrgb=\"%(key), self.sum_rgb(value)\n #print >>sys.stderr, \"key: %i, sumgray=%18.16e\"%(key,sum(gray)) \n #print >>sys.stderr, \"key: %i, maxgray=%18.16e\"%(key,max(gray))\n #print >>sys.stderr, \"key: %i, mingray=%18.16e\"%(key,min(gray)) \n #print >>sys.stderr, \"key: %i, lengray=%18.16e\"%(key,len(gray))\n \n firstkey = False\n \n #yield key, gray\n \n # finally, output data\n for k,v in self.close():\n yield k,v", "def save(self, key=None):\n\n # we can override our key by passing one in explicitly\n if key: self._key = key\n\n # now save in the db\n if self._key:\n self._dbag[self._key] = self.to_python()\n else:\n self._key = self._dbag.add(self.to_python())\n return self._key", "def __iter__(self):\n\n return iter([key for key in self._data.keys()])", "def __iter__(self):\n self.count = 0\n return self", "def test_pos_operate_increment_nonexistent_key(self):\n key = (\"test\", \"demo\", \"non_existentkey\")\n llist = [{\"op\": aerospike.OPERATOR_INCR, \"bin\": \"age\", \"val\": 5}]\n\n self.as_connection.operate(key, llist)\n\n (key, _, bins) = self.as_connection.get(key)\n\n assert bins == {\"age\": 5}\n\n self.as_connection.remove(key)", "def _update_append_key(self):\n self.append_key += 1", "def save(self) -> None:\n self._save_marker = self.get_next()", "def __getitem__(self, key):\n with open(self._get_path(key), 'rb') as f:\n unpickler = pickle.Unpickler(f)\n while f.peek(1):\n yield unpickler.load()", "def _write_current_buffer_for_group_key(self, key):\n write_info = self.write_buffer.pack_buffer(key)\n self.write(write_info.get('file_path'),\n self.write_buffer.grouping_info[key]['membership'])\n self.write_buffer.clean_tmp_files(write_info)\n self.write_buffer.add_new_buffer_for_group(key)", "def __iter__(self):\n for batch in self.iterator:\n yield Batch.from_iterator_batch(batch, self.pad_index, self.sos_index, self.eos_index)", "def save(self, key, data):\n overloads = self._load_index()\n try:\n # If key already exists, we will overwrite the file\n data_name = overloads[key]\n except KeyError:\n # Find an available name for the data file\n existing = set(overloads.values())\n for i in itertools.count(1):\n data_name = self._data_name(i)\n if data_name not in existing:\n break\n overloads[key] = data_name\n self._save_index(overloads)\n self._save_data(data_name, data)", "def bookkeep(self) :\n\t\tself.loopiter += 1", "def key_iterator(self):\n return _osgAnimation.mapVertexInfluence_key_iterator(self)", "def reset_iterator(self):\n if self.data_loader:\n self.data_iterator = self.data_loader.__iter__()\n else:\n self.data_iterator = None", "def train(self, K, iter_num, save=True):\n if K > 1: # K + 1 shots overall?\n for _ in range(K):\n self.sample(self.current_policy)\n self.adapt(self.current_policy, self.optimizer,\n self.K) # policy.update(optimizer, K)\n # new trajectory with updated policy\n trajectory, ep_reward = self.sample(self.current_policy)\n kde = self.calculate_KDE(trajectory)\n update = {'trajectory': trajectory,\n 'policy': self.current_policy,\n 'kde': kde,\n 'reward': ep_reward,\n 'optimizer': self.optimizer,\n 'update': iter_num}\n if save:\n try:\n self.memory[iter_num].append(\n update) # could really be a list\n except:\n self.memory[iter_num] = [update]\n else:\n trajectory = self.sample(policy)\n return trajectory, ep_reward", "def touchKBucket(self, key):", "def dispatch_next(self):\n if not self.dispatch_one_batch(self._original_iterator):\n self._iterating = False\n self._original_iterator = None", "def put(self, key, processed_query):\n data = json.dumps(processed_query.to_cache())\n\n def commit_to_db(connection):\n cursor = connection.cursor()\n cursor.execute(\"\"\"\n INSERT OR IGNORE into queries values (?, ?, ?, ?, ?);\n \"\"\", (key,\n data,\n processed_query.query.text,\n processed_query.domain,\n processed_query.intent,\n ))\n connection.commit()\n\n if self.memory_connection:\n commit_to_db(self.memory_connection)\n rowid = self.key_to_row_id(key)\n self.batch_writes.append(str(rowid))\n if len(self.batch_writes) == self.batch_write_size:\n self.flush_to_disk()\n else:\n commit_to_db(self.disk_connection)\n\n return self.key_to_row_id(key)", "def flush_deferred_lowering(self, key):\n with self._lock:\n deferred = self._deferred.pop(key, [])\n for cb in deferred:\n cb()", "def iterate_data(dataset,iter_no=5,pixel_mask=None,plot_clear=True,algo=\"FordRollett\",unit_weights=False):\n import overlap\n start_gain = array.ones(len(dataset))\n if unit_weights is True:\n weights = array.ones_like(dataset)\n else:\n weights = 1.0/dataset.var\n # Use weights as the mask\n if pixel_mask is not None:\n weights = weights*pixel_mask\n if algo == \"FordRollett\":\n gain,first_ave,ar,esds,k = overlap.find_gain_fr(dataset,weights,start_gain,pixel_mask=pixel_mask)\n else:\n raise ValueError(\"No such algorithm: %s\" % algo)\n chisquared,residual_map = overlap.get_statistics_fr(gain,first_ave,dataset,dataset.var,pixel_mask)\n old_result = first_ave #store for later\n chisq_history = [chisquared]\n k_history = [k]\n if iter_no > 0: \n no_iters = iter_no\n else:\n no_iters = abs(iter_no)\n for cycle_no in range(no_iters+1):\n esdflag = (cycle_no == no_iters) # need esds as well, and flags the last cycle\n print 'Cycle %d' % cycle_no\n if cycle_no > 3 and iter_no < 0:\n esdflag = (esdflag or (abs(chisq_history[-2]-chisq_history[-1]))<0.005)\n if algo == \"FordRollett\":\n gain,interim_result,ar,esds,k = overlap.find_gain_fr(dataset,weights,gain,arminus1=ar,pixel_mask=pixel_mask,errors=esdflag)\n chisquared,residual_map = overlap.get_statistics_fr(gain,interim_result,dataset,dataset.var,pixel_mask)\n chisq_history.append(chisquared)\n k_history.append(k)\n if esdflag is True:\n break\n print 'Chisquared: ' + `chisq_history`\n print 'K: ' + `k_history`\n print 'Total cycles: %d' % cycle_no\n print 'Maximum shift/error: %f' % max(ar/esds)\n return gain,dataset,interim_result,residual_map,chisq_history,esds,first_ave,weights", "def learn(self):\n\n for i in range(self.args.n_iters):\n diff = self.iteration()\n\n if diff < self.args.epsilon:\n self.save(self.save_path, i)\n break\n elif (i + 1) % self.args.save_frequency == 0:\n self.save(self.save_path, i)", "def items(self, key=None, lo=None, hi=None, reverse=False, max=None,\n include=False, txn=None, rec=None):\n txn_id = getattr(txn or self.engine, 'txn_id', None)\n it = self._iter(txn, key, lo, hi, reverse, max, include, None)\n for batch, key, data in it:\n obj = self.encoder.unpack(data)\n if rec:\n obj = Record(self, obj, key, batch, txn_id,\n self._index_keys(key, obj))\n yield key, obj", "def __iter__(self):\n self.index = 0\n return self", "def _process(self, data, cache):\n stop = False\n try:\n super(PickleCache, self).process(data)\n except StopIteration:\n stop = True\n\n data_to_save = data\n\n cache = dict() if cache is None else cache\n cache[self.chain_info['chain_hash']] = {\"data\": data_to_save,\n \"stopped\": stop,\n 'chain_repr': self.chain_info[\n 'chain_repr'],\n 'chain_mtime': self.chain_info[\n 'chain_mtime']}\n return cache, stop", "def key_upload(self, key=None):\n raise NotImplementedError", "def __iter__(self):\n self.__index__ = 0\n return self", "def assign_nice_keys(self):\n\n print \"Assigning nice_key values to new documents on {0}...\".format(self.source_client)\n\n empty_nice_keys = self.source_client.find({\"nice_key\": {\"$exists\": False}}, {\"nice_key\": 1})\n\n total_empty_nice_keys = empty_nice_keys.count()\n\n if total_empty_nice_keys:\n\n print \"{0} empty nice key docs found\".format(total_empty_nice_keys)\n progress_report = \"PROCESSED {0}/{1}\".format(\"{0}\", total_empty_nice_keys)\n\n for ct, doc in enumerate(empty_nice_keys):\n\n nice_key = self.generate_nice_key()\n\n if nice_key:\n\n self.update_document_nice_key(doc, nice_key)\n\n elif nice_key is None:\n\n raise Exception(\"FAILED TO GENERATE KEY on doc {0} with ObjectId {1}\".format(ct, doc[\"_id\"]))\n\n if (ct % 10000 == 0):\n\n print progress_report.format(ct + 1)\n\n print progress_report.format(empty_nice_keys.count())", "def __iter__(self):\n with SessionContext(self.SessionClass) as session:\n keys = session.query(PAW2_DBObject.key)\n keys = [c[0] for c in keys]\n random.shuffle(keys)\n return keys.__iter__()", "def save_all(self):\r\n for index in range(self.count()):\r\n self.save(index)", "def cooperative_iter(citer):\n try:\n for chunk in citer:\n sleep(0)\n yield chunk\n except Exception as err:\n msg = (_(\"Error: cooperative_iter exception %(error)s\") %\n dict(error=err))\n LOG.error(msg)\n raise", "def compress(key):\n instance = key.get()\n if not instance:\n logging.warning('Instance does not exist: %s', key)\n return\n\n if instance.active_metadata_update:\n logging.warning('Instance already has active metadata update: %s', key)\n return\n\n if not instance.pending_metadata_updates:\n return\n\n compress_pending_metadata_updates(key)\n metrics.send_machine_event('METADATA_UPDATE_READY', instance.hostname)", "def iter_batch(self):\n\n # model initialization\n self._set_train()\n\n if not self.batch_process:\n self.batch_process = self._train_batch()\n return self.batch_process.__next__()\n else:\n try:\n return self.batch_process.__next__()\n except StopIteration:\n # update the state if StopIteration\n if self.info:\n print(f\"\\rEpoch: { self.epoch } | Average loss: { self.epoch_loss.avg }\")\n\n # update epoch and reset the epoch_loss\n self.epoch_loss.reset()\n self.epoch += 1\n\n # reset the batch process\n del self.batch_process\n self.batch_process = self._train_batch()\n return self.batch_process.__next__()", "def __iter__(self):\n for i, data in enumerate(self.dataloader):\n if i * self.opt.batch_size >= self.opt.max_dataset_size:\n break\n yield data", "def __call__(\n self, document: tp.Optional[dict]\n ) -> tp.Optional[tp.Generator[dict, None, None]]:\n if document is None:\n return\n\n if isstopiteration(document):\n self._count_stop_iteration += 1\n if self._count_stop_iteration == self._nb_input_nodes:\n yield STOP_ITERATION\n else:\n if self._drop:\n new_doc = deepcopy(document)\n for key in self._keys:\n try:\n new_doc.pop(key)\n except KeyError:\n continue\n else:\n new_doc = dict()\n for key in self._keys:\n try:\n new_doc[key] = document[key]\n except KeyError:\n continue\n yield new_doc", "def shard(self, dataset_iter):\n return dataset_iter", "def __iter__(self) -> Generator:\n for k in self.raw.keys():\n yield k" ]
[ "0.557857", "0.5078635", "0.49877542", "0.4933926", "0.4932952", "0.48787275", "0.47174305", "0.46994108", "0.4630205", "0.46070197", "0.45404267", "0.45206273", "0.4477999", "0.44704136", "0.4470278", "0.44687983", "0.44679555", "0.4457995", "0.44183904", "0.44140702", "0.43917415", "0.43813822", "0.43755758", "0.43646127", "0.4361089", "0.4354573", "0.43527192", "0.43489787", "0.4333391", "0.4332018", "0.43316334", "0.4331477", "0.43142477", "0.43129238", "0.42918172", "0.42840433", "0.42824256", "0.42810968", "0.42810968", "0.4276994", "0.4275532", "0.42605737", "0.4258045", "0.42536747", "0.42517546", "0.42492664", "0.42342886", "0.4229921", "0.42242903", "0.42242903", "0.42242903", "0.42242903", "0.42189112", "0.42126733", "0.42112833", "0.42054617", "0.41902846", "0.41837662", "0.4167943", "0.41669098", "0.41551247", "0.4152431", "0.4152431", "0.41472602", "0.414568", "0.4136827", "0.41327217", "0.41274047", "0.4126367", "0.41169742", "0.41149718", "0.41079196", "0.41077313", "0.41021442", "0.40964463", "0.4092663", "0.4091834", "0.40823185", "0.40757012", "0.40680313", "0.40600306", "0.40587598", "0.40562478", "0.40530056", "0.4047549", "0.40421015", "0.4041371", "0.40395358", "0.40374497", "0.4036534", "0.4033214", "0.40310287", "0.40299264", "0.40262046", "0.402182", "0.40183276", "0.40136653", "0.401356", "0.40135005", "0.40123615" ]
0.5583304
0
Restores a filter previously saved using SCANDUMP. See the SCANDUMP command for example usage. This command will overwrite any Cuckoo filter stored under key. Ensure that the Cuckoo filter will not be modified between invocations.
def cfLoadChunk(self, key, iter, data): params = [key, iter, data] return self.execute_command(self.CF_LOADCHUNK, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeAutoSaveRestoreFilter(filter):", "def addAutoSaveRestoreFilter(filter):", "def removeAutoSaveFilter(filter):", "def do_reset(self, args):\n\t\tself.parent.filter = {}\n\t\tself.apply_filter()\n\t\tself._update_prompts()", "def highpass_filter_reset(self, data):\n zi = scipy.signal.sosfilt_zi(self._highpass_sos)\n print('Zi shape: ', zi.shape, data.shape)\n self._highpass_state = data[0, :] * np.repeat(zi[:, :, np.newaxis],\n data.shape[1], axis=2)\n logging.info('Resetting the high-pass filter state.')", "def lowpass_filter_reset(self, data):\n zi = scipy.signal.sosfilt_zi(self._lowpass_sos)\n self._lowpass_state = data[0, :] * np.repeat(zi[:, :, np.newaxis],\n data.shape[1], axis=2)\n logging.info('Resetting the low-pass filter state.')", "def restore(self):\n\n self.dispersion = self.raw_dispersion\n self.flux = self.raw_flux\n self.flux_err = self.raw_flux_err\n self.reset_mask()", "def autoSaveRestoreFilter(filename):\n return _doAutoSaveCallbacks( autoSaveRestoreFilters, filename )", "def addAutoSaveFilter(filter):", "def reset_filter(self):\n arlen = len(self.variant_list)\n self.filter = np.zeros((arlen, arlen)) == 0", "def restore(self, key, history):\n self.goal, used = key\n self._used = []\n for row in used:\n self._used.append(list(row))\n self.history = list(history)", "def autoSaveFilter(filename):", "def restore_speedsters(apps, schema_editor):\n\n Pokemon = apps.get_model(\"stats\", \"Pokemon\")\n Pokemon.objects.filter(id__in=[\"ZERAORA\", \"TALONFLAME\", \"ABSOL\", \"GENGAR\"]).update(category=\"SS\")", "def ResetAvgFilter(self):\n self.k = 1\n self.prevAvg = 0", "def prepend_crds_filter(self, filter):\n if filter not in self.filters:\n self.filters = [filter] + self.filters", "def load(self, source: Union[str, Any], key: str) -> None: # type: ignore\n self._logger.info(f\"Loading filter policy model from {source} to {key}\")\n if 'torch' in key:\n model = load_torch_model(source,'filter',device=self._config.device)\n else:\n model = load_model(source, key, self._config.use_remote_models)\n self._items[key] = {\n \"model\": model\n }", "def _restore(self):\n self._logger = LOGGER\n self._param_store = pyro.get_param_store()\n self.set_state(self.best_params)\n self._alpha_guide_prior_params = dict(\n self._param_store.named_parameters()\n )", "def reinitialize_level_set_image_filter(*args, **kwargs):\n import itk\n instance = itk.ReinitializeLevelSetImageFilter.New(*args, **kwargs)\n return instance.__internal_call__()", "def apply_filters(self, new_filters):\n\t\tself.filters = new_filters", "def save_filter(self, filename, overwrite=False):\n hdu = fits.PrimaryHDU(self.filter, self.header)\n hdu.writeto(filename, clobber=overwrite)\n fits.append(filename, self.approx, self.header)\n fits.append(filename, self.filter + self.approx, self.header)\n fits.append(filename, self.max_scale_image(), self.header)", "def restore(self, event):\n\n self.undo_add()\n\n key_list = list(self.patch.engine.misc_data.keys())\n key = key_list[self.selected_index]\n self.patch.misc[key] = copy.copy(self.patch.engine.misc[key])\n\n self.misclist_update_row(self.selected_index)\n self.update_properties()", "def test_filter_shave(sh_arg, sh_src, sh_dest):\n args = parser.parse_args([\"-sh\", sh_arg])\n filters = renamer.initfilters(args)\n dest = renamer.get_renames(sh_src, filters, args.extension, args.raw)\n assert dest == sh_dest", "def restore(self) -> 'BaseImage':\n self._surface = self._original_surface.copy()\n return self", "def save(self):\n saved_filter = SavedFilterIterator()\n source_field = self._source.serialized_name() + '_source'\n getattr(saved_filter, source_field).CopyFrom(self._source.save())\n saved_filter.expression = self._raw_expression\n if self._mu is not None:\n pyDict_to_protoDict(self._mu, saved_filter.mu)\n return saved_filter", "def swarpfilter(d, dir, directory, images, keys, filter, lamp, camera, done, output, type):\n filt = images.files_filtered(FWINAME=filter, FLSPECTR=lamp, CAMNAME=camera, HISTORY=done)\n files = [d + x for x in filt.tolist()]\n print(files)\n if files:\n swarp(files, output=directory + '/' + output + '.fits', celestial_type=type)", "def copy(self):\n raise NotImplementedError(\"RedisLocalBloomFilter not support copy\")", "def use_effect(self):\n if self.preview_name in FILTERS:\n photo = Image.open(self.path.url[1:])\n preview = photo.filter(FILTERS.get(self.preview_name))\n preview.save(self.path.url[1:])", "def clear_crossfilter2(self):\n print ('Trigger clear')\n self.query_dict = {}\n self.plot_data = None\n self.create_figure_new()\n layout_doc.children[4].children[1] = self.p", "def filter(self, data):\n self.data = pysap.Image(data=self.flt.filter(data))", "def reset_data():\n shutil.copy2(\n 'data/one_producer_many_consumers.ORIG.json',\n 'data/one_producer_many_consumers.json'\n )", "def full_reset(self):\n self.at_cmd('CFUN=1')", "def reload(self):\n self.restore()", "def clear_crossfilter1(self):\n print ('Trigger clear')\n self.query_dict = {}\n self.plot_data = None\n self.create_figure_new()\n layout_doc.children[4].children[0] = self.p", "def use_effect(effect, photo_edit):\n if effect in FILTERS:\n photo = Image.open(photo_edit.upload)\n photo = photo.filter(FILTERS.get(effect))\n\n photo.save(photo_edit.upload.url[1:])", "def filter_op(self, filter_option):\n print(\"===||| Initiating Filter-Resize Fill Operation |||===\")\n fill_op = shmops.Fill_Operation(id='4321')\n\n with Image.open(self.path) as map_img:\n map_img.thumbnail((self.map_major_dim, self.map_major_dim),resample=filter_option)\n pixels = map_img.convert('RGB').load()\n for x in progress_bar.progress_bar(range(map_img.width), \"Processing: \",width=36):\n for y in range(map_img.height):\n r,g,b = pixels[x,y]\n fill_op.add_fill(x,y,palette.rgb_to_hex(r,g,b))\n return fill_op", "def restore(self):\n self.u = self.ub.copy()\n self.w = self.wb.copy()\n self.v = self.vb.copy()\n if self.en_bias: self.b = self.bb.copy()", "def restore(self):\n self.weight = self._backup_weight", "def reset_filters():\n logger.info(\"reset filters\")\n global filter_item\n filter_item = -1\n filter_topics_table.view.filters = [IndexFilter()]\n filter_custom_table.view.filters = [IndexFilter()]\n filter_label.text = \"\"", "def filter(n='I'):\n if n=='':\n n = 'I'\n if type(n) == str:\n fid = filtid(n)\n fnum = filtnum(fid)\n opticalcoupler.SelectFilter(fnum)\n camera.status.filterid = fid\n camera.status.filter = fnum\n logger.info('Moved to filter '+`n`)\n else:\n if (n>=1) and (n<=8):\n opticalcoupler.SelectFilter(n)\n camera.status.filterid = filtid(filtname(n))\n camera.status.filter = n\n logger.info('Moved to filter '+`n`)\n else:\n logger.error(\"Error in filter value: \"+repr(n))", "def undo_settings(self):\r\n cF.undo_settings()", "def restore(self, restore):\n self._restore = restore", "def apply_filter(self, inplace=True):\n\n if self.filter is None:\n if not inplace:\n return copy.deepcopy(self)\n else:\n return None\n\n x = copy.copy(self.__dict__)\n x['data'] = self.get_data()\n x['locs'] = self.get_locs()\n\n if self.filter == 'kurtosis':\n x['kurtosis'] = x['kurtosis'][x['kurtosis'] <= x['kurtosis_threshold']]\n\n for key in ['n_subs', 'n_elecs', 'n_sessions', 'dur', 'filter_inds', 'nifti_shape']:\n if key in x.keys():\n x.pop(key)\n\n boc = Brain(**x)\n boc.filter = None\n boc.update_info()\n if inplace:\n self.__init__(boc)\n else:\n return boc", "def restore(self, arguments):\n puts_err(colored.red(\"Not implemented!\"))", "def reset(self):\n if self._key:\n self._lib.StObjectReset(self._key)\n os.chdir(self._cwd)\n self._layers.clear() # layer: index\n self._substrate = None\n self._experiments.clear() # analyzed experiments\n self._tmpstandards.clear()", "def filter(self, filter):\n self._filter = filter", "def FSLFlip(self, infile, prefix):\n cmd = '3dresample -orient LPI -prefix %s.nii -inset %s+orig' % \\\n (prefix, infile)\n self.CheckExec(cmd, ['%s.nii' % prefix])\n fname = '%s+orig.BRIK' % infile\n if os.path.exists(fname):\n os.remove(fname)\n fname = '%s+orig.HEAD' % infile\n if os.path.exists(fname):\n os.remove(fname)", "def removeAutoSaveDeleteFilter(filter):", "def remove_baseline(self):\n\n print(\" \\t Apply Savitzky-Golay filter \\t %d\" %self.nwin)\n base_savgol = signal.savgol_filter(self.input, self.nwin, 1)\n self.input_nobase = self.input - base_savgol", "def write_filter_cache_scratch(filter_cache, cache_dir=None, skip_keys=None):\n if skip_keys is None:\n skip_keys = []\n # if the keys_before instantiation wasn't a list, then\n # keys_before would just be the current keys of cache and we\n # wouldn't have any new keys.\n new_filters = {k: filter_cache[k] for k in filter_cache if k not in skip_keys}\n if len(new_filters) > 0:\n # generate new file name\n if cache_dir is None:\n cache_dir = os.getcwd()\n cache_file_name = '%032x' % random.getrandbits(128) + '.filter_cache'\n cfile = open(os.path.join(cache_dir, cache_file_name), 'ab')\n pickle.dump(new_filters, cfile)\n else:\n warnings.warn(\"No new keys provided. No cache file written.\")", "def set_input(self, redshift, filter_list, maggie, maggie_ivar,\n *args, **kwargs):\n super(KCorrect, self).set_input_photo(filter_list, maggie, maggie_ivar)\n\n redshift = np.atleast_1d(np.asarray(redshift, dtype=FTYPE))\n self.redshift = redshift.reshape((len(self), 1))\n\n # zero redshifts for convenience\n self.redshift0 = np.zeros(self.redshift.shape, dtype=FTYPE)\n\n # get fit coefficients once and for all\n self.do_fit(maxiter=self.maxiter)", "def reduce_filter_var(self, name, values):\n name = self._verify_filter_name(name, None)\n if not self.is_filter(name):\n raise KeyError('{} is no valid filter-variable.'.format(name))\n if 0 in values:\n raise ValueError('Cannot remove the 0-keep value from filter var')\n elif len([x for x in self.codes(name) if not x in values]) <= 1:\n raise ValueError('Cannot remove all values from filter var.')\n self.uncode(name, {0: {name: 0}})\n self.remove_values(name, values)\n self.recode(name, {0: {name: has_count(len(self.codes(name))-1)}}, append=True)\n return None", "def update_filters(self, **kwargs):\n self._FILTERS = kwargs", "def _restore_default(self):\n self._data = self._default", "def setFilter(self, afilter):\n\n if afilter in (self.FilterU, self.FilterG, self.FilterR, self.FilterI, self.FilterZ, self.FilterY):\n self.filter = afilter\n else:\n raise ValueError(\"No '%s' filter.\" % afilter)", "def reset(self):\n self.keyToFile=dict()", "def filter_keys(self):\n filters = self.args.keyfilter.split('.')\n self.logger.info(u'Filtering with:{f}'.format(f=filters))\n data = self.inputdata\n newdata = {}\n for key, value in data.items():\n self.logger.info(u'\\nProcessing Key:{k}'.format(k=key))\n returned_data = dict_key_filter(key, value, filters, self.logger)\n if bool(returned_data):\n newdata[key] = returned_data\n self.logger.info(u'Data After filter:{d}'.format(d=newdata))\n self.outputdata = newdata", "def restore(effect: list, target: \"PlayerCharacter or Monster\"):\n heal = effect[1]\n if target.ko:\n return\n if \"ALL\" in effect or \"HP\" in effect:\n if target.hp + heal > target.stats[\"MAXHP\"]:\n target.hp = target.stats[\"MAXHP\"]\n else:\n target.hp += heal\n if \"ALL\" in effect or \"MP\" in effect:\n if target.mp + heal > target.stats[\"MAXMP\"]:\n target.mp = target.stats[\"MAXMP\"]\n else:\n target.mp += heal", "def copy(self):\n new_filter = BloomFilter(self.capacity, self.error_rate)\n new_filter.filter = self.filter.copy()\n return new_filter", "def move_and_restore(win_filter_fn, xywh):\n x, y, w, h = xywh[0], xywh[1], xywh[2], xywh[3]\n win = ahk.find_window(win_filter_fn)\n if win:\n win.restore()\n win.move(x, y, w, h)\n return win is not None", "def reset(self):\n self.current_exposure = None\n self.scores = {}", "def restore_data(self):\n self.R = self._Ro\n del self._Ro", "def test_restore(self):\n s = Source([[10, 10], [10, 20]], values=[1.0, 2.0])\n assert(array_equal(s.center, [10, 15]))\n\n assert(\"center\" in s.__dict__.keys())\n s.restore()\n assert(\"center\" not in s.__dict__.keys())\n\n assert(array_equal(s.center, [10, 15]))\n assert(\"center\" in s.__dict__.keys())\n s.restore(skip=\"center\")\n assert(\"center\" in s.__dict__.keys())", "def filterWithSITK(self):\n #research\n profbox()\n backgroundNode = slicer.app.layoutManager().sliceWidget(\"Red\").sliceLogic().GetBackgroundLayer().GetVolumeNode()\n backgroundNodeName = backgroundNode.GetName()\n backgroundImage = sitk.ReadImage( sitkUtils.GetSlicerITKReadWriteAddress( backgroundNodeName ) )\n filterImage = sitk.GradientMagnitudeRecursiveGaussian( backgroundImage, float(2) );\n del backgroundImage\n sitk.WriteImage( filterImage, sitkUtils.GetSlicerITKReadWriteAddress( backgroundNodeName ) )\n \n # notify\n backgroundNode.GetImageData().Modified()\n backgroundNode.Modified()", "def restore(self):\n raise NotImplementedError", "def apply_filters(\n isovar_result,\n filter_thresholds={},\n filter_flags=[]):\n filter_values = OrderedDict(isovar_result.filter_values.items())\n new_filter_values = evaluate_filters(\n isovar_result,\n filter_thresholds=filter_thresholds,\n filter_flags=filter_flags)\n filter_values.update(new_filter_values)\n return isovar_result.clone_with_updates(filter_values=filter_values)", "def sceneUIReplacement(*args, clear: bool=True, deleteRemaining: bool=True, getNextFilter:\n List[AnyStr, AnyStr]=None, getNextPanel: List[AnyStr, AnyStr]=None,\n getNextScriptedPanel: List[AnyStr, AnyStr]=None, update: AnyStr=\"\",\n **kwargs)->AnyStr:\n pass", "def reset(self):\n for item in TextChannelFilterItem.objects(channel_filter=self):\n item.delete()\n self.reset_counters()\n self.retrain()", "def restore_state(self, ckpt):\n raise NotImplemented()", "def filter_SF(commande,indexSF,min,max):\n commande+=\" -set_active_sf \"+str(indexSF)+\" -filter_sf \"+str(min)+\" \"+str(max)+\" -save_clouds \" \n subprocess.call(commande)\n return", "def test_restore_with_filter_regex(self):\n key_name = \"ent-backup\"\n if self.backupset.random_keys:\n key_name = \"random_keys\"\n self.validate_keys = self.input.param(\"validate_keys\", False)\n if self.validate_keys:\n gen = BlobGenerator(key_name, \"ent-backup-\", self.value_size,\n end=self.num_items)\n else:\n gen = DocumentGenerator('random_keys', '{{\"age\": {0}}}', list(range(100)),\n start=0, end=self.num_items)\n\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.log.info(\"Start backup\")\n self.backup_create()\n self.backup_cluster()\n self.backup_restore()\n self.merged = False\n regex_check = self.backupset.filter_keys\n if not self.backupset.filter_keys:\n regex_check = self.backupset.filter_values\n self.validate_backup_data(self.backupset.backup_host,\n [self.backupset.restore_cluster_host],\n key_name, False, False, \"memory\",\n self.num_items, None,\n validate_keys=self.validate_keys,\n regex_pattern=regex_check)", "def maybe_outfeed(self, key, value):\n if self._filters is not None:\n if any(f in key for f in self._filters):\n self._vals[key] = value\n else:\n self._vals[key] = value", "def remove_crds_filter(self, filter):\n if filter in self.filters:\n self.filters.remove(filter)", "def set_filter(self, category, code):\n flt_setter = self.__filter_set_map.get(category, None)\n if flt_setter is not None:\n flt_setter(code)", "def undo(backup):\r\n backup.load_backup()\r\n backup.undo_moves()", "def restore_export_preset():\n run_mel_command(\"FBXResetExport\")", "def restore(self, x):\n with tf.name_scope(\"pad_reduce/restore\"):\n x = tf.scatter_nd(\n indices=self.nonpad_ids,\n updates=x,\n shape=tf.concat([self.dim_origin, tf.shape(x)[1:]], axis=0),\n )\n return x", "def setSelectionfilter(self, scenefilter):\n self._selectionFilter = scenefilter\n sceneviewerfilter = self._sceneviewer.getScenefilter()\n if self._selectionFilter is not None:\n scenefiltermodule = self._context.getScenefiltermodule()\n scenefilter = scenefiltermodule.createScenefilterOperatorAnd()\n scenefilter.appendOperand(sceneviewerfilter)\n if self._selectionFilter is not None:\n scenefilter.appendOperand(self._selectionFilter)\n else:\n scenefilter = sceneviewerfilter\n self._scenepicker.setScenefilter(scenefilter)", "def test_filter_prepend(pre_arg, pre_src, pre_dest):\n args = parser.parse_args([\"-pre\", *pre_arg])\n filters = renamer.initfilters(args)\n dest = renamer.get_renames(pre_src, filters, args.extension, args.raw)\n print(dest)\n print(pre_dest)\n assert dest == pre_dest", "def reload(self):\n\t\toldlayers = self.layers\n\t\tself.layers = []\n\t\tfor cp, filename, fp in oldlayers:\n\t\t\tcp = cp # pylint\n\t\t\tif fp is None:\n\t\t\t\tself.read(filename)\n\t\t\telse:\n\t\t\t\tself.readfp(fp, filename)", "def _restore(self, a_path):\n super(RDPAnalyzer, self)._restore(a_path)\n self._model._restore()", "def set_from_original(self):\n self.image = self.orig_image\n self.update_img()\n self.update_size()", "def preset(self):\n self._clear_read_buffer()\n self._write_cmd(\"PP\")", "def restore(self):\n print(\"Restoring Direction\")\n if self.turn_track > 0:\n self.encL(abs(self.turn_track))\n elif self.turn_track < 0:\n self.encR(abs(self.turn_track))", "def filterWithSITK(self):\r\n # research\r\n profbox()\r\n backgroundNode = slicer.app.layoutManager().sliceWidget(\"Red\").sliceLogic().GetBackgroundLayer().GetVolumeNode()\r\n backgroundNodeName = backgroundNode.GetName()\r\n backgroundImage = sitk.ReadImage(sitkUtils.GetSlicerITKReadWriteAddress(backgroundNodeName))\r\n filterImage = sitk.GradientMagnitudeRecursiveGaussian(backgroundImage, float(2));\r\n del backgroundImage\r\n sitk.WriteImage(filterImage, sitkUtils.GetSlicerITKReadWriteAddress(backgroundNodeName))\r\n\r\n # notify\r\n backgroundNode.GetImageData().Modified()\r\n backgroundNode.Modified()", "def restore(self, checkpoint):\n raise NotImplementedError", "def reset():\n\n total = 0\n changed = 0\n previous = {}\n for key in cmds.optionVar(list=True):\n if key.startswith(\"ragdoll\"):\n previous[key] = cmds.optionVar(query=key)\n cmds.optionVar(remove=key)\n total += 1\n\n install()\n\n for key in __.optionvars:\n prev = previous.get(_optionvarkey(key), \"\")\n new = read(key)\n\n if prev != new:\n changed += 1\n log.info(\"Resetting %s (%s = %s)\" % (key, prev, new))\n\n log.info(\"Resetted %d/%d optionvars\" % (changed, total))", "def restore_full_state(self, state):\n state_ref = self.ale.decodeState(state)\n self.ale.restoreSystemState(state_ref)\n self.ale.deleteState(state_ref)", "def update_filter_data(cache_dir=CACHE_DIR):\n # Obtain all filter IDs from cache as old_filters\n old_index = load_local_filters_index(cache_dir)\n old_filters = np.array(old_index)\n\n # Obtain all filter IDs from SVO FPS as new_filters\n logger.info(\"Fetching latest index of all filters at SVO (in batches) ...\")\n download_svo_filters_index(cache_dir)\n new_index = load_svo_filters_index(cache_dir)\n new_filters = new_index[\"filterID\"].to_numpy()\n\n # Check whether there is need to update\n if np.array_equal(old_filters, new_filters):\n logger.info('Filter data is already up-to-date!')\n set_cache_updation_date()\n return False\n\n # Iterate & remove (old_filters - new_filters) from cache\n filters_to_remove = np.setdiff1d(old_filters, new_filters)\n logger.info(\"Removing outdated filters ...\")\n for filter_id in filters_to_remove:\n facility, instrument, filter_name = re.split('/|\\.', filter_id)\n filter_file = os.path.join(cache_dir, facility, instrument,\n '{0}.vot'.format(filter_name))\n if os.path.exists(filter_file):\n os.remove(filter_file)\n remove_empty_dirs(cache_dir)\n\n # Iterate & download (new_filters - old_filters) into cache\n filters_to_add = np.setdiff1d(new_filters, old_filters)\n logger.info(\"Caching new filters ...\")\n iterative_download_transmission_data(filters_to_add, cache_dir)\n\n # Save in config that all filters were updated successfully\n set_cache_updation_date()\n return True", "def updateSyncDS(self, change, filter):\n # Merging existing syncDataSructure to accomodate\n if filter in self.syncDataStructure:\n temp = self.syncDataStructure[filter]\n for key, value in change.items():\n if key in temp:\n temp[key] = value\n else:\n temp[key] = value\n # no filter exists in the existing syncDataStructure\n else:\n self.syncDataStructure[filter] = change", "def change_restored(self, event):\n pass", "def copy(self) -> \"FilterAlgorithmState\":\n\n # NB: This is untested and might not be optimal tbh\n return deepcopy(self)", "def cloudflare_waf_filter_update_command(client: Client, args: Dict[str, Any]) -> CommandResults:\n\n filter_id = args['id']\n expression = args.get('expression')\n zone_id = args.get('zone_id', client.zone_id)\n ref = args.get('ref')\n description = args.get('description')\n paused = arg_to_boolean(args.get('paused')) # type: ignore\n\n response = client.cloudflare_waf_filter_update_request(\n filter_id, expression, zone_id, description=description, # type: ignore\n paused=paused, ref=ref)\n\n output = response['result']\n\n return CommandResults(\n readable_output=f'Filter {filter_id} was successfully updated.',\n outputs_prefix='CloudflareWAF.Filter',\n outputs_key_field='id',\n outputs=output,\n raw_response=response\n )", "def filterToSat( bmp, savefile = '' ):\n for h in range(bmp.height):\n for w in range(bmp.width):\n HSL = RGBtoHSL( bmp.pixels[h][w] )\n sat = int(255*HSL[1]) # convert to 0-255 range\n bmp.pixels[h][w] = (sat,sat,sat)\n if( savefile != '' ):\n bmp.save(savefile)\n return bmp", "def do_reset(self, args):\n if self.exploit is None:\n eprint(colorize('No exploit set; nothing to reset. Select an exploit with the \\'use\\' command',\n 'cyan'))\n return\n\n # delete the stored settings and reset the options in the current module\n if hasattr(self.exploit, '_ACsploit_exploit_settings'):\n del self.exploit._ACsploit_exploit_settings\n\n importlib.reload(self.exploit) # we need to do this to reset currexp.options back to original values\n\n self.exploit = None\n self.update_exploit(self.exploit_name)", "def restore(self, model_file, head_i=0, trunk=False):\n if trunk:\n self.model_trunk.load_weights(model_file)\n else:\n self.models[head_i].load_weights(model_file)\n self.model = self.models[head_i]", "def pop_and_restore(hsh, key, default=None):\n if key in hsh:\n value = hsh.pop(key)\n was_there = True\n else:\n value = default\n was_there = False\n\n yield value\n\n if was_there:\n hsh[key] = value\n else:\n hsh.pop(key, None)", "def revert(self):\n headerdump = self.file.readp(0, 16)\n if sum(headerdump):\n dictat,dictlen = struct.unpack(\"<QQ\", headerdump)\n dictblob = self.file.readp(dictat, dictlen)\n self.keys = pickle.loads(dictblob)\n self.buffered = {}\n self.cache = {}\n self.awaitingpunch = []\n\n else:\n self.keys = {}\n self.buffered = {}\n self.cache = {}\n self.awaitingpunch = []", "def rename_photcat(filt, origin='', revert=True):\n if revert == False:\n os.rename(origin+filt+\"_photcat.dat\", origin+filt+\\\n \"_photcat.store.dat\")\n \n if revert == True:\n os.rename(origin+filt+\"_photcat.store.dat\", origin+filt+\\\n \"_photcat.dat\")", "def restore_input(cls):\n del globals()[\"input\"]", "def onReset(self):\n #productive\n profprint()\n fileName = pathToScene = slicer.modules.needlefinder.path.replace(\"NeedleFinder.py\",\"Config/default.cfg\")\n self.logic.loadParameters(fileName)", "def onResetParameters(self):\r\n # productive #button\r\n profprint()\r\n fileName = pathToScene = slicer.modules.needlefinder.path.replace(\"NeedleFinder.py\", \"Config/default.cfg\")\r\n self.logic.loadParameters(fileName)" ]
[ "0.608854", "0.5897821", "0.56328", "0.5349189", "0.5334587", "0.5307931", "0.5285011", "0.52552176", "0.503992", "0.498326", "0.4977669", "0.49759644", "0.48976877", "0.48970455", "0.48909724", "0.48817316", "0.4835844", "0.47675633", "0.47412106", "0.47311813", "0.47181708", "0.46784982", "0.46721613", "0.46586162", "0.46424103", "0.46375385", "0.46362045", "0.4635494", "0.46325433", "0.4621641", "0.45967937", "0.4589725", "0.45789298", "0.45642987", "0.45583928", "0.45569038", "0.45537898", "0.45363718", "0.4529335", "0.4524395", "0.45185477", "0.4501976", "0.44891834", "0.44889885", "0.44857118", "0.44805515", "0.44772148", "0.44431287", "0.4439966", "0.44310653", "0.44253156", "0.44161963", "0.44123605", "0.4409558", "0.44095194", "0.44066042", "0.4406163", "0.44025466", "0.4393093", "0.4391999", "0.43905422", "0.43823448", "0.43759295", "0.43725854", "0.43576217", "0.4345339", "0.43385476", "0.43183059", "0.43142587", "0.43109873", "0.4304591", "0.43005395", "0.4297787", "0.42934978", "0.42879605", "0.42867297", "0.4272293", "0.42667395", "0.42613068", "0.42571482", "0.42557606", "0.425139", "0.4250135", "0.42482203", "0.42474803", "0.42332685", "0.42115098", "0.42024353", "0.41895407", "0.41891855", "0.41873434", "0.41852874", "0.41844913", "0.4183946", "0.4183101", "0.41829348", "0.41746932", "0.4173543", "0.41731966", "0.41731146", "0.4166382" ]
0.0
-1
Returns size, number of buckets, number of filter, number of items inserted, number of items deleted, bucket size, expansion rate, and max iteration.
def cfInfo(self, key): return self.execute_command(self.CF_INFO, key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def length(self):\n # TODO: Count number of key-value entries in each of the buckets\n return self.size\n # for bucket in self.buckets():", "def get_length(self):\n if self.opt.num_buckets > 1:\n return sum([len(bucket) for bucket in self.data])\n else:\n return len(self.data)", "def largest_bucket(self):\n size = 0\n for i in self.__buckets:\n if i.size() > size:\n size = i.size()\n return size", "def testBucketSize(self):\n b = SomeBucket()\n fit = b.add(1000)\n self.assertEqual(100, fit)", "def capacity(self):\n return sum(f.capacity for f in self.filters)", "def length(self):\n # Loop through all buckets\n # Count number of key-value entries in each bucket\n\n # could be done with 1 line with comprehension\n # return sum(bucket.length() for bucket in self.buckets)\n\n total_entries = 0\n\n for linked_list in self.buckets:\n total_entries += linked_list.length()\n\n return total_entries", "def __init__(self):\n self.size = 1000\n self.bucket = [None] * self.size", "def _load_factor(self):\n return self.size / len(self.buckets)", "def qsize(self) -> int:\n pass", "def get_size(self):\n return (\n sys.getsizeof(self.children) +\n sys.getsizeof(self.parent) +\n sys.getsizeof(self.dataset_id) +\n sys.getsizeof(self.k) +\n self.filter.get_size()\n )", "def get_size(self) -> int:\n total_size = 0\n for entry in self.__entries:\n total_size += entry.get_size()\n return total_size", "def usedspace(self):\n self.log.info(\"freespace\")\n nbytes = 0\n keys = list(self.downloads.keys())\n keys.sort()\n for key in keys:\n download = self.downloads[key]\n nbytes += download['size']\n self.log.info(\"returning:\" + str(nbytes))\n return nbytes", "def _numQueuedTotal(self):\n queueSize = len(self.__queue) + len(self.__clientQueue)\n return queueSize", "def __len__(self):\n return len(self.indexes) // self.batch_size", "def __len__(self):\n return len(self.indexes) // self.batch_size", "def count_objects_of_size(self, n: int, **parameters: int) -> int:", "def get_space_used():\n files = jobtracker.query(\"SELECT * FROM files \" \\\n \"WHERE status IN ('added', 'downloaded', 'unverified')\")\n\n total_size = 0\n for file in files:\n total_size += int(file['size'])\n return total_size", "def get_insternal_size(self):\n return (\n sys.getsizeof(self.theta) +\n sys.getsizeof(self.num_buckets) +\n sys.getsizeof(self.k) +\n sys.getsizeof(self.fp_size) +\n sys.getsizeof(self.max_iter) +\n sys.getsizeof(self.bucket_size)\n )", "def chunk_size(self) -> global___Expression:", "def batch_size(self) -> int:\n ...", "def max_size(self):\n size = 1\n for idx in self.config.index_specs:\n size *= len(idx.distribution)\n return size", "def Capacity(self) -> int:", "def test_bound_size_of_output_queue_size_reader(synthetic_dataset):\n TIME_TO_GET_TO_STATIONARY_STATE = 0.5\n\n with make_reader(synthetic_dataset.url, reader_pool_type='process', workers_count=1) as reader:\n assert 0 == reader.diagnostics['items_produced']\n next(reader)\n # Verify that we did not consume all rowgroups (should be 10) and ventilator throttles number of ventilated\n # items\n sleep(TIME_TO_GET_TO_STATIONARY_STATE)\n assert reader.diagnostics['items_consumed'] < 5\n assert reader.diagnostics['items_inprocess'] < 5", "def calculate_total_size(apps, schema_editor):\n Data = apps.get_model(\"flow\", \"Data\")\n for data in Data.objects.all():\n hydrate_size(data, force=True)\n data.save()", "def queue_size(self):\n return len(self.groups)", "def totalsize(self):\n return sum([sz for sz in self.iterate()])", "def batch_size(self):\n return self.size", "def outputsizes(self):\n result = [] \n for q in self.outqueues:\n result.append(q.qsize())\n return result", "def get_num_chunks(self) -> int:", "def qsize(self): \n return self.__db.llen(self.key)", "def _bucketing(self):\n bucket = []\n if self.bucket_size_init > 0:\n _bucket_size = self.bucket_size_init\n else:\n _bucket_size = self.bucket_size\n for ex in self.mixer:\n bucket.append(ex)\n if len(bucket) == _bucket_size:\n yield self._tuple_to_json_with_tokIDs(bucket)\n bucket = []\n if _bucket_size < self.bucket_size:\n _bucket_size += self.bucket_size_increment\n else:\n _bucket_size = self.bucket_size\n if bucket:\n yield self._tuple_to_json_with_tokIDs(bucket)", "def test_get_buckets(self):\n pass", "def countitems(self):\n count = 0\n sid = self.client.scannerOpen(self.table, '', ['f:s'])\n while 1:\n r = self.client.scannerGetList(sid, 1000)\n #r = self.client.scannerGet(sid)\n if not r: break\n count += len(r)\n logging.debug('%d %s', count, r[-1].row)\n self.scannerClose(sid)\n return count", "def fsizes(self):\n return self._cache.fsizes", "def size(self):\n return self.num_item", "def list_buckets():\n pass", "def get_bucket_statistics_v2(self, bucket_name, storageTypeFilter=None):\n bucket_info = BucketInfo()\n bucket_info.bucket_name = bucket_name\n s3 = self.credentials.session.resource('s3')\n current_bucket = s3.Bucket(bucket_name)\n bucket_info.creation_date = current_bucket.creation_date\n\n mapreduce_helper = SimpleMapReduce()\n\n object_list = self.iterate_bucket_objects(bucket_name)\n\n bucket_info.total_size_of_files = functools.reduce(mapreduce_helper.reduce_size, map(mapreduce_helper.map_object_info, object_list))\n bucket_info.last_modified = functools.reduce(mapreduce_helper.reduce_last_modified, map(mapreduce_helper.map_object_info, object_list))\n\n\n return bucket_info", "def batch_request_size(self):\n return self._batch_request_size", "def size(self) -> int:\n return self.num_items", "def size(self):\n return self._N", "def qsize(self):\r\n return len(self._queue)", "def calculate_chunk_size(thread_count, item_count):\n chunk_size = int(item_count / (thread_count * 10))\n if chunk_size < 1:\n chunk_size = 1\n if chunk_size > 20:\n chunk_size = 20\n return chunk_size", "def __len__(self):\n return self.nb_iterations", "async def du() -> Tuple[Tuple[int, int, str]]:\n async with _create_client() as client:\n buckets = [\n item['Name'] for item in (await client.list_buckets())['Buckets']\n ]\n ret = []\n for bucket in buckets:\n try:\n objs = (await client.list_objects(Bucket=bucket))['Contents']\n size = sum([obj['Size'] for obj in objs])\n count = len(objs)\n ret.append((size, count, bucket))\n except KeyError:\n ret.append((0, 0, bucket))\n logger.info('Show disk usage by buckets.')\n return tuple(ret)", "def record_batch_size(self):\n return 10000", "def downloadsize(self):\n self.log.info(\"downloadsize\")\n keys = list(self.downloads.keys())\n keys.sort()\n\n # count number of pending/inprocess items\n download_size = 0\n\n for s3_uri in keys:\n download = self.downloads[s3_uri]\n state = download[\"state\"]\n if state in (\"PENDING\", \"INPROGRESS\"):\n download_size += download[\"size\"]\n return download_size", "def num_relocations(self):\n return self._size // self.entry_size", "def getSize(self):\r\n list = self.getList()\r\n return len(list)", "def get_size(self):\n cum_size = 0\n for stream in self.__streams.values():\n cum_size += sys.getsizeof(stream)\n for trace in stream:\n cum_size += sys.getsizeof(trace)\n cum_size += sys.getsizeof(trace.stats)\n cum_size += sys.getsizeof(trace.stats.__dict__)\n cum_size += sys.getsizeof(trace.data)\n cum_size += trace.data.nbytes\n # Add one percent buffer just in case.\n return cum_size * 1.01", "def size(self):\n\t\treturn self._count", "def __init__(self):\n self.bucket_length = 997\n self.bucket_array = [Bucket() for i in range(self.bucket_length)]", "def outputsizes(self):\n\n result = []\n for q in self.outqueues:\n result.append(q.qsize())\n return result", "def __init__(self):\n self.m = 1000\n self.bucket = [None] * 1000", "def gen_buckets(num_buckets, data, max_val=256):\n\n default_size_of_bucket = int(len(data)/3)\n print(f\"Bucket size: {default_size_of_bucket}\")\n all_buckets = []\n for i in range(num_buckets):\n curr_buck = [0 for _ in range(max_val)]\n np.random.shuffle(data)\n curr_sample = data[0:default_size_of_bucket]\n for i in range(len(curr_sample)):\n curr_buck[curr_sample[i]] += 1\n all_buckets.append(curr_buck)\n return all_buckets", "def return_item_collection_metrics_size(self):\n return self.__return_item_collection_metrics.size()", "def __len__(self):\n return self.limit_batches", "def size(self):\r\n if self.full():\r\n return self.capacity()\r\n else:\r\n size = self._read_index - self._write_index\r\n if size < 0:\r\n return self.capacity() + size # wrap around\r\n else:\r\n return size", "def getSize(self) -> int:\n ...", "def getSize(self) -> int:\n ...", "def get_num_items(self):\r\n return self.num_items", "def acq_batch_size(self):\n return self.batch_size * self.batches_per_acquisition", "def size(self):\n return len(self._queue_items)", "def get_load_factor(self):\n # Your code here\n return self.total_items / self.capacity", "def qsize(self) -> int:\n return len(self._queue)", "def __init__(self, buckets = 200):\n self.data = [None] * buckets\n self.slot = [None] * buckets\n self.size = buckets", "def get_capacity():\n fs.get_capacity()", "def size():\r\n qry = ImportQueue.query.filter(or_(\r\n ImportQueue.status != COMPLETE,\r\n ImportQueue.status != ERROR))\r\n return qry.count()", "def size(self):\r\n # Anthony stage 2\r\n return number_size(self.n) - 1", "def size(self):\n return self.N", "def get_load_factor(self):\r\n return self.num_items / self.table_size", "def get_bucket_statistics(self, bucket_name):\n bucket_info = BucketInfo()\n bucket_info.bucket_name = bucket_name\n s3 = self.credentials.session.resource('s3')\n current_bucket = s3.Bucket(bucket_name)\n bucket_info.creation_date = current_bucket.creation_date\n\n for bucket_object in current_bucket.objects.all():\n bucket_info.total_size_of_files += bucket_object.size\n bucket_info.number_of_files += 1\n if (bucket_info.last_modified is None) or (bucket_object.last_modified > bucket_info.last_modified):\n bucket_info.last_modified = bucket_object.last_modified\n\n return bucket_info", "def overall_reduction(self):\n return 84", "def size(config, accounts=(), day=None, group=None, human=True, region=None):\n config = validate.callback(config)\n destination = config.get('destination')\n client = boto3.Session().client('s3')\n day = parse(day)\n\n def export_size(client, account):\n paginator = client.get_paginator('list_objects_v2')\n count = 0\n size = 0\n session = get_session(account['role'], region)\n account_id = session.client('sts').get_caller_identity()['Account']\n prefix = destination.get('prefix', '').rstrip('/') + '/%s' % account_id\n prefix = \"%s/%s/%s\" % (prefix, group, day.strftime(\"%Y/%m/%d\"))\n account['account_id'] = account_id\n for page in paginator.paginate(\n Bucket=destination['bucket'],\n Prefix=prefix):\n for k in page.get('Contents', ()):\n size += k['Size']\n count += 1\n return (count, size)\n\n total_size = 0\n accounts_report = []\n logging.getLogger('botocore').setLevel(logging.ERROR)\n with ThreadPoolExecutor(max_workers=16) as w:\n futures = {}\n for account in config.get('accounts'):\n if accounts and account['name'] not in accounts:\n continue\n futures[w.submit(export_size, client, account)] = account\n\n for f in as_completed(futures):\n account = futures[f]\n count, size = f.result()\n account.pop('role')\n account.pop('groups')\n total_size += size\n if human:\n account['size'] = get_human_size(size)\n else:\n account['size'] = size\n account['count'] = count\n accounts_report.append(account)\n\n accounts_report.sort(key=operator.itemgetter('count'), reverse=True)\n print(tabulate(accounts_report, headers='keys'))\n log.info(\"total size:%s\", get_human_size(total_size))", "def heavy_output_counts(self):\n return self._heavy_output_counts", "def qsize(self) -> int:\n return self._queue.qsize()", "def search_space_size(self):", "def check_buck(bucket, tabular=False):\n expected_keys = [u'index_count', u'views_count', u'items', u'mutations',\n u'tombstones', u'fts_count', u'analytics_count', u'size', u'name']\n self.assertTrue(set(expected_keys).issubset(bucket.keys()))\n\n index_count, views_count, items, mutations, tombstones, fts_count, \\\n analytics_count, size, name = [bucket[key] for key in expected_keys]\n\n # Check bucket name\n self.assertTrue(name in expected_bucks)\n # Check bucket size\n self.assertTrue(size >= 0)\n # Check bucket items\n self.assertTrue(items in [0, self.num_items])", "def capacity(self):\n raise NotImplementedError()", "def __len__(self):\n total_objs = 0\n\n if self._shelve is not None:\n total_objs += len(self._shelve)\n\n if self._dict is not None:\n total_objs += len(self._dict)\n\n return total_objs", "def get_step_size(total_items, batch_size):\n return np.ceil(total_items / batch_size)", "def __init__(self):\n self.buckets = [-1] * 10\n self.length = len(self.buckets)", "def size_in_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"size_in_gb\")", "def size_in_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"size_in_gb\")", "def size_in_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"size_in_gb\")", "def get_Q_size(self):\n return len(self.qTable)", "def _update_cardinality(self, c):\n if c.type in STRUCT:\n Log.error(\"not supported\")\n try:\n if c.table == \"meta.columns\":\n with self.meta.columns.locker:\n partitions = jx.sort([g[c.es_column] for g, _ in jx.groupby(self.meta.columns, c.es_column) if g[c.es_column] != None])\n self.meta.columns.update({\n \"set\": {\n \"partitions\": partitions,\n \"count\": len(self.meta.columns),\n \"cardinality\": len(partitions),\n \"last_updated\": Date.now()\n },\n \"where\": {\"eq\": {\"table\": c.table, \"es_column\": c.es_column}}\n })\n return\n if c.table == \"meta.tables\":\n with self.meta.columns.locker:\n partitions = jx.sort([g[c.es_column] for g, _ in jx.groupby(self.meta.tables, c.es_column) if g[c.es_column] != None])\n self.meta.columns.update({\n \"set\": {\n \"partitions\": partitions,\n \"count\": len(self.meta.tables),\n \"cardinality\": len(partitions),\n \"last_updated\": Date.now()\n },\n \"where\": {\"eq\": {\"table\": c.table, \"name\": c.name}}\n })\n return\n\n es_index = c.table.split(\".\")[0]\n result = self.default_es.post(\"/\" + es_index + \"/_search\", data={\n \"aggs\": {c.name: _counting_query(c)},\n \"size\": 0\n })\n r = result.aggregations.values()[0]\n count = result.hits.total\n cardinality = coalesce(r.value, r._nested.value, 0 if r.doc_count==0 else None)\n if cardinality == None:\n Log.error(\"logic error\")\n\n query = Data(size=0)\n if cardinality > 1000 or (count >= 30 and cardinality == count) or (count >= 1000 and cardinality / count > 0.99):\n Log.note(\"{{table}}.{{field}} has {{num}} parts\", table=c.table, field=c.es_column, num=cardinality)\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": count,\n \"cardinality\": cardinality,\n \"last_updated\": Date.now()\n },\n \"clear\": [\"partitions\"],\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n return\n elif c.type in _elasticsearch.ES_NUMERIC_TYPES and cardinality > 30:\n Log.note(\"{{field}} has {{num}} parts\", field=c.name, num=cardinality)\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": count,\n \"cardinality\": cardinality,\n \"last_updated\": Date.now()\n },\n \"clear\": [\"partitions\"],\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n return\n elif len(c.nested_path) != 1:\n query.aggs[literal_field(c.name)] = {\n \"nested\": {\"path\": c.nested_path[0]},\n \"aggs\": {\"_nested\": {\"terms\": {\"field\": c.es_column, \"size\": 0}}}\n }\n else:\n query.aggs[literal_field(c.name)] = {\"terms\": {\"field\": c.es_column, \"size\": 0}}\n\n result = self.default_es.post(\"/\" + es_index + \"/_search\", data=query)\n\n aggs = result.aggregations.values()[0]\n if aggs._nested:\n parts = jx.sort(aggs._nested.buckets.key)\n else:\n parts = jx.sort(aggs.buckets.key)\n\n Log.note(\"{{field}} has {{parts}}\", field=c.name, parts=parts)\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": count,\n \"cardinality\": cardinality,\n \"partitions\": parts,\n \"last_updated\": Date.now()\n },\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n except Exception, e:\n if \"IndexMissingException\" in e and c.table.startswith(TEST_TABLE_PREFIX):\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": 0,\n \"cardinality\": 0,\n \"last_updated\": Date.now()\n },\n \"clear\":[\n \"partitions\"\n ],\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n else:\n self.meta.columns.update({\n \"set\": {\n \"last_updated\": Date.now()\n },\n \"clear\": [\n \"count\",\n \"cardinality\",\n \"partitions\",\n ],\n \"where\": {\"eq\": {\"table\": c.table, \"es_column\": c.es_column}}\n })\n Log.warning(\"Could not get {{col.table}}.{{col.es_column}} info\", col=c, cause=e)", "def buckets(self):\n return self.indexed", "def size(self):\r\n return len(self.queue)", "def size(self):\r\n return len(self.queue)", "def analyze_all(q: int = 100, n: int = 75000):\n total_start_time = time.time()\n sort_correct, sort_results = bucket_sort_general(q, n)\n print('sort_correct')\n sort_sorted_list = bucket_sort_sorted_list(q, n)\n print('sort_sorted_list')\n sort_reversed_list = bucket_sort_reversed_list(q, n)\n print('sort_reversed_list')\n sort_unique_list = bucket_sort_unique_list(q, n)\n\n headers = ['Type', 'Avg', 'Min', 'Max', 'Std']\n table = [['Bucket sort normal', sum(sort_results) / len(sort_results), min(sort_results), max(sort_results),\n pstdev(sort_results)],\n ['Bucket sort sorted list', sum(sort_sorted_list) / len(sort_sorted_list), min(sort_sorted_list),\n max(sort_sorted_list), pstdev(sort_sorted_list)],\n ['bucket sort reversed list', sum(sort_reversed_list) / len(sort_reversed_list), min(sort_reversed_list),\n max(sort_reversed_list), pstdev(sort_reversed_list)],\n ['bucket sort unique values', sum(sort_unique_list) / len(sort_unique_list), min(sort_unique_list),\n max(sort_unique_list), pstdev(sort_unique_list)]]\n\n print(f'Running all the metrics took {time.time() - total_start_time} seconds')\n print(f'Bucket sort correct = {sort_correct}')\n print(f'Each metric is calculated with a population of {q} and a list length of {n}')\n print(tabulate(table, headers=headers))\n return table", "def queue_size(self):\n return self.sql_queue.qsize()", "def _getqueuesize(self):\n return self._queuesize", "def total_chunks(self) -> global___Expression:", "def totalfiles(self):\n return len([sz for sz in self.iterate()])", "def __len__(self):\n return self._used - self._deleted", "def size(self) -> int:", "def calculate_cache_size(self):\n cache_size = self._total_chunk_size_left()\n N_l = self.N_l // self.conv_factor\n cache_sizes = []\n for lth in range(self.n_layers):\n cache_sizes.append(cache_size)\n if self.lc_bidir:\n cache_size = max(0, cache_size - N_l)\n N_l //= self.subsample_factors[lth]\n cache_size //= self.subsample_factors[lth]\n return cache_sizes", "def size(self):\n return self.N # Number of items in the stack", "def __len__(self) -> int:\n num_batches, remainder = divmod(len(self.mapped_triples), self.batch_size)\n if remainder and not self.drop_last:\n num_batches += 1\n return num_batches", "def blob_sizes(self):\n _blob_sizes = defaultdict(int)\n for s in self.subjects:\n for sa in s.samples:\n for k, v in sa.blob_sizes.items():\n _blob_sizes[k] += v\n return _blob_sizes", "def size(self) -> int:\n\n return self.sizes.sum()" ]
[ "0.6689139", "0.6531792", "0.6451731", "0.639494", "0.6214589", "0.6209066", "0.6115226", "0.61036277", "0.5979696", "0.5947789", "0.5932758", "0.5926838", "0.5926114", "0.59038323", "0.59038323", "0.58981425", "0.5884746", "0.5877877", "0.5871863", "0.5868464", "0.58302355", "0.5825594", "0.58168375", "0.5783608", "0.5766638", "0.57629913", "0.57512707", "0.57503533", "0.5748605", "0.5747349", "0.574473", "0.5744553", "0.5730874", "0.5726978", "0.570457", "0.5688532", "0.5680979", "0.5670851", "0.56585795", "0.56546587", "0.5654342", "0.56422365", "0.5633587", "0.5630131", "0.56281877", "0.56174976", "0.5613693", "0.5607403", "0.5606927", "0.5604757", "0.5595507", "0.5591628", "0.55869055", "0.5584834", "0.5583293", "0.5579192", "0.55715895", "0.5567075", "0.5567075", "0.5563086", "0.5553296", "0.554876", "0.5546359", "0.5542711", "0.5530545", "0.5530317", "0.5526456", "0.5526225", "0.55158895", "0.5515073", "0.55149823", "0.5513382", "0.5511001", "0.5505042", "0.5500898", "0.54982054", "0.5492423", "0.5486626", "0.54836667", "0.54787445", "0.5476372", "0.54754996", "0.54754996", "0.54754996", "0.54666567", "0.5460781", "0.54601824", "0.5449944", "0.5449944", "0.5442361", "0.5440666", "0.5439211", "0.5433254", "0.5431964", "0.5431514", "0.54258984", "0.54254943", "0.54249036", "0.54227364", "0.54222083", "0.5421126" ]
0.0
-1
Initializes a CountMin Sketch ``key`` to dimensions (``width``, ``depth``) specified by user.
def cmsInitByDim(self, key, width, depth): params = [key, width, depth] return self.execute_command(self.CMS_INITBYDIM, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, k:int, **kwargs):\n self.k = k", "def __init__(self, width = 40):\n self.width = width\n self.state = 0\n self.total = 0", "def __init__(self, key):\n self.key = key\n self.BLOCK_SIZE = 16", "def __init__(self, k_d, k_s=0., p=20., k_m=0., k_a=None):\n # TODO A5 (Step2) implement this function\n # Check if each property is an array of shape (h, w, 3)\n # If so, then apply the property using the uv coordinates supplied by the geometry.\n self.k_d = k_d\n self.k_s = k_s\n self.p = p\n self.k_m = k_m\n self.k_a = k_a if k_a is not None else k_d", "def __init__(self, top_k: int = 1) -> None:\n self.top_k = top_k", "def __init__(self, d=1):\r\n self.depth = d", "def __init__(self, poss_keys, poss_vals):\n self.Poss_Tree = {x: list(POSS_DIGITS) for x in poss_keys}\n self.place = len(str(poss_keys[0]))", "def __init__(self, k=1):\n self.k = k\n self.x = None\n self.y = None\n self.classes_ = None", "def __init__(self, pad_size, input_size, pre_pad=False):\n self.pre_pad = pre_pad\n self.pad_size = pad_size\n self.input_size = input_size\n\n self.build()", "def __init__(self, key: bytearray):\n self.__key = key\n self.__KSA(bytearray([i for i in range(256)]))", "def __init__(self, ksize, stride=None):\n \n self._ksize = (ksize, ksize) if isinstance(ksize, int) else ksize\n self._pad = (0, 0, 0, 0)\n self._stride = stride\n \n if stride is None:\n self._stride = tuple(self._ksize)\n elif isinstance(stride, int):\n self._stride = (stride, stride)\n \n self._X_shape = None\n self._cols = None\n self._max_idx = None", "def __init__(self, min_player_count):\n self.min_player_count = min_player_count", "def __init__(self, k):\n self.__start = 0\n self.__size = 0\n self.__buffer = [0] * k", "def __init__(self, dict = {}):\r\n if dict == {}:\r\n self.zero_val()\r\n else:\r\n self.piDD = dict\r\n self.top_node = utilities.max_length_in_list(self.return_keys())\r\n if self.piDD[self.top_node] == None:\r\n self.dim = 0\r\n else:\r\n self.dim = self.piDD[self.top_node][0][0]", "def __init__(self, ksize: torch.Tensor = 7, sigma: torch.Tensor = 5):\r\n super().__init__()\r\n self.ksize = ksize\r\n self.sigma = sigma\r\n\r\n self.conv2d_guass = get_gaussian_kernel(self.ksize, self.sigma)", "def __init__(self):\n self._root = None\n self._size = 0\n self._curr_idx = 0\n self._depths, self._heights = None, None", "def __init__(self, width=10, height=10, density=0.25):\n\t\tself.width = width\n\t\tself.height = height\n\t\t# create marks and mine field\n\t\tself.marks = [[CLOSED for _ in range(height)] for _ in range(width)]\n\t\tself.mines = [[random.random() < density for _ in range(height)] \n\t\t for _ in range(width)]", "def __init__(self, env, key, factor, dim):\n gym.ObservationWrapper.__init__(self, env)\n\n self.key = key\n self.factor = factor\n self.dim = dim\n\n space = self.observation_space.spaces[self.key]\n shape = list(space.shape)\n\n for d in dim:\n shape[d] *= self.factor\n\n shape = np.asarray(shape, dtype=np.int)\n\n self.observation_space.spaces[self.key] = gym.spaces.Box(0, 255, shape, dtype=np.float32)", "def __init__(self, height, width):\n # number of keypoint kind\n self.kpn = 4\n # max output object in one image\n self.maxDet = 20\n # object detect threshold, confidence\n self.obj_thr = 0.5\n # peak detect threshold, unit pixel\n self.peak_thr = 0.5\n # see threshold\n self.see_thr = 0.8\n # peak close threshold, unit pixel\n self.close_thr = 1.0\n self.height = height\n self.width = width\n # assit array\n self.x_array = np.tile(np.arange(self.width), (self.height, 1))\n self.y_array = np.tile(np.arange(self.height).reshape(-1, 1),\n (1, self.width))", "def __init__(self, width: int, height: int, food: List[List[int]]):\n self.n = height\n self.m = width\n self.dirs = {'L': [0, -1], 'U': [-1, 0], 'R': [0, 1], 'D': [1, 0]}\n self.food = collections.deque(food)\n self.snake_set = {(0, 0)}\n self.snake = collections.deque([(0, 0)])", "def DEFAULT_MIN_DEPTH(self): # real signature unknown; restored from __doc__\n pass", "def _init(self, key, name):\n\n self.key = key\n self.name = name\n\n self._state = Node.State.INVALID\n self._value = None\n\n # Keyword and positional arguments to compute_value.\n self._args = ObservableList()\n self._kwargs = ObservableDict()\n\n self.args.listeners.add(self._on_args_changed)\n self.kwargs.listeners.add(self._on_kwargs_changed)\n\n # Map Nodes to the number of times they appear in this Node's\n # arguments.\n self._arg_refcount = {}\n\n # Nodes whose values depend on this Node.\n self._dependents = set()\n\n if NodeCallStack.stack:\n self._created_by = NodeCallStack.stack[-1]\n\n NodeCreateEvent(self)\n\n NodeCallStack._push(self)", "def __init__(self, k: int, training_set: np.ndarray):\n self._k = k\n self._training_set = training_set", "def __init__(self,\n size: int,\n counter_num: int,\n time_window: float,\n update_sample_size: int=5):\n super().__init__(size)\n\n self.__counters = []\n for i in range(counter_num):\n sketch = FullCounter()\n self.__counters.append(sketch)\n\n self.__time_window = time_window\n self.__processed_windows = 0\n self.__from_window_start = 0.0\n\n self.__priority_dict = PriorityDict()\n\n self.__update_sample_size = update_sample_size", "def __init__(self, N, K, sliding_window=True):\n self.K = K\n self.N = N\n self.sliding_window = sliding_window", "def __init__(self, k, p, sample_p=1):\n # Maximum sample size\n self.k = k\n\n # A dictionary containing the sampled elements\n # The dictionary key is the key of the element\n # The value is a tuple (seed, count)\n self.elements = {}\n\n # The function of the frequencies that the sketch estimates\n # For now it's the p-th frequency moment, but in the future we may\n # support other functions (passed as a parameter)\n self.func_of_freq = lambda x: x**p\n\n # The power of values used for the sampling weights\n self.sample_p = sample_p", "def __init__(self, ksize_low, ksize_high=None): \n self._sigma_low = 0.3*(ksize_low//2 - 1) + 0.8\n \n if ksize_high is None:\n self._sigma_high = np.sqrt(2)*self._sigma_low\n else:\n self._sigma_high = 0.3*(ksize_high//2 - 1) + 0.8", "def __init__(self, dimensions=2):\n assert dimensions > 0\n for d in range(0,dimensions+1):\n self.weight.append(0)", "def __init__(self, width, length):\n self.width = width\n self.length = length", "def __init__(self, k):\n self.k = k\n self.N = 2**self.k", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(KltSettings, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.max_features is None:\n self.max_features = 0\n if self.window_size is None:\n self.window_size = 0\n if self.quality is None:\n self.quality = 0.\n if self.min_distance is None:\n self.min_distance = 0.\n if self.harris is None:\n self.harris = 0.\n if self.size_block is None:\n self.size_block = 0\n if self.pyramid_lvl is None:\n self.pyramid_lvl = 0\n if self.mask_border is None:\n self.mask_border = 0\n else:\n self.max_features = 0\n self.window_size = 0\n self.quality = 0.\n self.min_distance = 0.\n self.harris = 0.\n self.size_block = 0\n self.pyramid_lvl = 0\n self.mask_border = 0", "def __init__(self, width, height):\n\n self.width = width\n self.height = height\n self._reachable = dict()\n\n # Initialize self._reachable\n for x in range(1, width + 1):\n for y in range(1, height + 1):\n self._reachable[(x, y)] = set()", "def __init__(self, window_step: int = 100,\n window_width: Optional[int] = None,\n pre_context: int = 0,\n initial_frame_count: int = 100):\n super(WindowedDataStore, self).__init__()\n if int(window_step) != window_step:\n raise ValueError('Must be an integer window_step for now, not %g.' %\n window_step)\n\n if window_width is None:\n window_width = int(3 * window_step)\n logging.info('Initializing AudioDataStore with step of %d and width of %d.',\n window_step, window_width)\n\n # TODO allow for fractional steps\n if window_step > window_width:\n raise ValueError('window_step (%d) must be less than or equal to '\n 'window_width (%d)' % (window_step, window_width))\n self._window_width = int(window_width)\n self._pre_context = int(pre_context)\n self._window_step = int(window_step)\n self._max_frames = int(initial_frame_count * max(window_step,\n window_width))\n self._data_store = None # Where we store the data till it is used.\n self._count = 0", "def __init__(self, M, N, D, K):\n\n # Sanity checks\n assert len(D) == M\n assert K < min(D)\n assert K < N\n\n self.M = M\n self.N = N\n self.K = K\n self.D = D", "def __init__(self, key):\n self._block_size = AES.block_size\n self._key = hashlib.sha256(get_as_bytes(key)).digest()", "def __init__(self, growth_k, layers_per_block, num_classes):\n self.growth_k = growth_k\n self.layers_per_block = layers_per_block\n self.nb_blocks = len(layers_per_block)\n self.num_classes = num_classes", "def init(self, rng_key, num_warmup, init_params, model_args, model_kwargs):\n raise NotImplementedError", "def __init__(self, k=5):\n self.k = k", "def __init__(self, key, updateProposer, sampleShape, numChains=1, updateProposerArg=None,\n numSamples=100, thermalizationSweeps=10, sweepSteps=10):\n\n stateShape = (numChains,) + sampleShape\n if global_defs.usePmap:\n stateShape = (global_defs.device_count(),) + stateShape\n self.states=jnp.zeros(stateShape, dtype=np.int32)\n\n self.updateProposer = updateProposer\n self.updateProposerArg = updateProposerArg\n\n self.key = key\n if global_defs.usePmap:\n self.key = jax.random.split(self.key, global_defs.device_count())\n self.thermalizationSweeps = thermalizationSweeps\n self.sweepSteps = sweepSteps\n self.numSamples = numSamples\n\n self.numChains = numChains\n\n # jit'd member functions\n self._get_samples_jitd = {} # will hold a jit'd function for each number of samples\n self._get_samples_gen_jitd = {} # will hold a jit'd function for each number of samples", "def __init__(self) -> None:\n super().__init__()\n self.dimensions = 2", "def __init__(self, *args, **kargs):\n \n # ========== Class Data Attributes ===================================\n self.name = 'x'\n r\"\"\"(string) Name of this coordinate dimension (e.g. 'x')\"\"\"\n self.num_cells = None\n r\"\"\"(int) - Number of cells in this dimension :attr:`units`\"\"\"\n self.lower = 0.0\n r\"\"\"(float) - Lower computational dimension extent\"\"\"\n self.upper = 1.0\n r\"\"\"(float) - Upper computational dimension extent\"\"\"\n self.on_lower_boundary = None\n r\"\"\"(bool) - Whether the dimension is crossing a lower boundary.\"\"\"\n self.on_upper_boundary = None\n r\"\"\"(bool) - Whether the dimension is crossing an upper boundary.\"\"\"\n self.units = None\n r\"\"\"(string) Corresponding physical units of this dimension (e.g. \n 'm/s'), ``default = None``\"\"\"\n self.num_ghost = None\n\n # Parse args\n if isinstance(args[0],float):\n self.lower = float(args[0])\n self.upper = float(args[1])\n self.num_cells = int(args[2])\n elif isinstance(args[0],basestring):\n self.name = args[0]\n self.lower = float(args[1])\n self.upper = float(args[2])\n self.num_cells = int(args[3])\n else:\n raise Exception(\"Invalid initializer for Dimension.\")\n \n for (k,v) in kargs.iteritems():\n setattr(self,k,v)", "def __init__(self, width, height):\n self.integer_validator(\"width\", width)\n self.integer_validator(\"height\", height)\n self.__width = width\n self.__height = height", "def __init__(self,key):\n self.block_size = 32\n self.key = hashlib.sha256(key).digest()", "def __init__(self, width, height):\n self.integer_validator(\"width\", width)\n self.__width = width\n self.integer_validator(\"height\", height)\n self.__height = height", "def __init__(self, key):\n\n def keys(key, num_rounds):\n \"\"\"Yields the permuted key bitstring for i = 1..num_rounds\"\"\"\n C, D = key[:28], key[28:]\n # Rounds are 1-indexed, so shift array over by one\n left_shifts = [None, 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]\n for i in range(1, num_rounds + 1):\n # Negate each rotation to rotate left.\n C, D = rotate(C, -left_shifts[i]), rotate(D, -left_shifts[i])\n yield self.permute(C + D, self._CD_permutation)\n\n self.key = list(bits_of(key, 64))\n # Permute the key. The permutation discards the parity bits...\n self.key = self.permute(self.key, self._key_permutation)\n self.number_of_rounds = 16\n # A list of the 16 keys K1 .. K16, shifted over by one to allow 1-indexing.\n self.keys = [None] + list(keys(self.key, self.number_of_rounds))", "def __init__(self, kernel_size):\r\n super().__init__()\r\n self.kernel_size = kernel_size", "def __init__(self, kernel_size):\r\n super().__init__()\r\n self.kernel_size = kernel_size", "def __init__(self):\n self.state_dim = 12\n self.measurement_dim = 6", "def __init__(self, width, height, circular=True):\n self.width = width\n self.height = height\n self.size = width * height\n self.idx_list = []\n self._depth_buffer = [[] for _ in range(self.size)]\n self._depth = [0] * self.size\n self.circular = circular", "def __init__(self, *args: Any, **kwargs: Any) -> None:\n super(CollisionsMetric, self).__init__(initial_value=0)", "def __init__(self, M, K):\r\n \r\n self.M = M\r\n self.K = K", "def __init__(__self__, *,\n size: pulumi.Input[int]):\n pulumi.set(__self__, \"size\", size)", "def initialize(X, k):\n if not isinstance(X, np.ndarray) or X.ndim != 2:\n return None, None, None\n if not isinstance(k, int) or k <= 0:\n return None, None, None\n _, d = X.shape\n C, clss = kmeans(X, k)\n pi = 1 / k * np.ones(k)\n m = C\n S = np.array([np.identity(d)] * k)\n return pi, m, S", "def MinHks(N): \n return EntropyKS(nx.Graph([(i,i+1) for i in range(N-1)]))", "def __init__(self,Nx=24,Ny=24,kx0=-pi/3.,ky0=0.,kxmax=pi,kymax=2.*pi/np.sqrt(3.)):\n self.Nx=Nx\n self.Ny=Ny\n self.kx0=kx0\n self.ky0=ky0\n self.kxmax=kxmax\n self.kymax=kymax\n self.dkx=(kxmax-kx0)/float(Nx)\n self.dky=(kymax-ky0)/float(Ny)", "def initialise_source(self, c, key):\n return 0", "def __init__(self, D, K):\n\t\tself.D = D \n\t\tself.K = K \n\t\tself.V = np.zeros((D+1,K))\n\t\treturn", "def initialise_source(self, c, key):\n if key == 'p':\n return 1e5\n elif key == 'h':\n if self.Q.val < 0 and self.Q.is_set:\n return 1e5\n elif self.Q.val > 0 and self.Q.is_set:\n return 5e5\n else:\n return 3e5", "def __init__(self, plasma_parent):\n super(LevelNumberDensity, self).__init__(plasma_parent)\n self.calculate = self._calculate_dilute_lte\n self._update_inputs()\n self.initialize_indices = True", "def __init__(self, k=2):\n self.k = k", "def __init__(self, key=None):\n self.key = key", "def __init__(self, initial_value: float = 0) -> None:\n self.breakpoints = SortedDict()\n self._initial_value: float = initial_value", "def __init__(self,k,data,max_guess=(100,100),min_guess=(-100,-100)):\r\n\t\tself.k = k\r\n\t\tself.data = data\r\n\t\tself.max_guess = max_guess\r\n\t\tself.min_guess = min_guess", "def __init__(self, key: bytes):\n\n if len(key) != 32:\n raise ValueError('Key must be 32 bytes long')\n self.key = key", "def __init__(self, k):\n self._data = []\n self._length = k", "def __init__(\n self, key: str, weight: float = None, neighbours: Dict[str, \"Edge\"] = None\n ):\n self.__key = key\n if weight is None:\n weight = self.DEFAULT_VERTEX_WEIGHT\n self._weight = weight\n\n if neighbours is None:\n neighbours = {}\n self._neighbours = neighbours", "def __init__(self, kernel_size, *args, **kwargs):\n super().__init__()\n self.kernel_size = kernel_size", "def __init__(self, kernel_size, *args, **kwargs):\n super().__init__()\n self.kernel_size = kernel_size", "def __init__(self):\n self.stack =[]\n self.min_num = None", "def __init__(self, board_dim= DEFAULT_DIM):\r\n self.width = board_dim\r\n self.height = board_dim\r\n\r\n self.grid = np.array([[' '] * self.width for i in range(self.height)])\r\n self.num_checkers = 0 # keeps track of how many checkers have been added\r\n\r\n self.available_moves = [(row, col) for row in range(self.height) for col in range(self.width)]\r\n\r\n # Specify the winning condition based on the board's dimension\r\n if (self.width < 5):\r\n self.win_condition = self.width\r\n else:\r\n self.win_condition = 5", "def __init__(self, image_size, heatmap_size):\n super(ProjectLayer, self).__init__()\n self.image_size = image_size\n self.heatmap_size = heatmap_size\n if isinstance(self.image_size, int):\n self.image_size = [self.image_size, self.image_size]\n if isinstance(self.heatmap_size, int):\n self.heatmap_size = [self.heatmap_size, self.heatmap_size]", "def __init__(self, width, height, x=0, y=0, id=None):\n __dict_args = {\"width\": width, \"height\": height, \"x\": x, \"y\": y}\n self.input_validator(__dict_args)\n self.__width = width\n self.__height = height\n self.__x = x\n self.__y = y\n super().__init__(id)", "def __init__(self):\n self.stack = list()\n self.count = 0\n self.min = 0", "def __init__(self,width=8,height=8):\n\t\tif height > 32 or width < 1 or height < 1:\n\t\t\traise \"Height must be between 1 and 32, width must be greater than 0\"\n\n\t\tself.Width = width\n\t\tself.Height = height\n\t\tself.Grid = [0] * width # we'll use 8 bits of the number in the array", "def __init__(self, height, width):\n self.height, self.width = height, width\n self.board = self.create_board_matrix(height, width)\n self.refresh_rate = 0.3\n self.points = 0 # pieces successfully added\n self.level = 1", "def __init__(self,\n root,\n spect_paths,\n window_size,\n spect_key='s',\n timebins_key='t',\n transform=None,\n target_transform=None,\n ):\n super().__init__(root, transform=transform, target_transform=target_transform)\n self.spect_paths = spect_paths\n self.spect_key = spect_key\n self.timebins_key = timebins_key\n self.window_size = window_size\n\n tmp_x_ind = 0\n one_x, _ = self.__getitem__(tmp_x_ind)\n # used by vak functions that need to determine size of window,\n # e.g. when initializing a neural network model\n self.shape = one_x.shape", "def __init__(self, size, filler=None):\n\n self.__data = {}\n self.__xmax, self.__ymax = size\n if filler:\n self.fill(filler)", "def __init__(self, env, k):\n gym.Wrapper.__init__(self, env)\n self.k = k\n self.frames = deque([], maxlen=k)\n shp = env.observation_space.shape\n self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:-1] + (shp[-1] * k,)), dtype=env.observation_space.dtype)", "def __init__(self, env, k):\n gym.Wrapper.__init__(self, env)\n self.k = k\n self.frames = deque([], maxlen=k)\n shp = env.observation_space.shape\n self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:-1] + (shp[-1] * k,)), dtype=env.observation_space.dtype)", "def __init__(self) -> None:\n self.gap = '' # type: str\n self.pixels(0)", "def __init__(self, env, k):\n gym.Wrapper.__init__(self, env)\n self.k = k\n self.frames = deque([], maxlen=k)\n shp = env.observation_space.shape\n self.observation_space = gym.spaces.Box(\n low=0,\n high=255,\n shape=((shp[-1] * k,) + shp[:-1]),\n dtype=env.observation_space.dtype)", "def __init__(self, key):\n self.key = key", "def __init__(self):\n self._width = 0\n self._height = 0\n self._invalidPositions = frozenset()", "def __init__(self):\n self.root = None\n self.k = None", "def __init__(self):\n self.root = None\n self.k = None", "def __init__(self, data=None, k=2, min_gain=1, max_iter=20,\n max_epoch=1, verbose=True):\n if data is not None:\n print 'in __init__1',\n print k,min_gain,max_iter,max_epoch,verbose\n # self.fit(data, k, min_gain, max_iter, max_epoch, verbose)\n\n # data is an array of 1xn matrix", "def __init__(self, name, path, password=None, key_size=2048, **kwargs):\n self.key_size = key_size\n super().__init__(name, path, password)", "def __init__(self, width, height, radius, k=20):\n self.width = width\n self.height = height\n self.radius = radius\n self.k = k\n self.cell_size = self.radius * 1.0 / np.sqrt(2.0)\n self.grid_width = int(np.ceil(self.width / self.cell_size))\n self.grid_height = int(np.ceil(self.height / self.cell_size))\n self.grid = [-1] * (self.grid_height * self.grid_width)\n self.queue = []\n self.samples = []", "def __init__(self, env, k):\n gym.Wrapper.__init__(self, env)\n self.k = k\n self.frames = deque([], maxlen=k)\n shp = env.observation_space.shape\n self.observation_space = spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] * k))", "def __init__(self, k: int):\n self.k = k\n self.q = ['#'] * k\n self.front = 0\n self.rear = 0\n self.empty = True", "def __init__(self):\n self._root = None\n self._size = 0", "def __init__(self):\n self._root = None\n self._size = 0", "def __init__(self):\n self._root = None\n self._size = 0", "def __init__(self):\n self._root = None\n self._size = 0", "def min_pixels(self, value) -> 'Size':\n raise_not_number(value)\n self.minimum = '{}px'.format(value)\n return self", "def __init__(self, dim_x: int = 1, kf: Optional[Kalman] = None):\n # TODO: Add support for x_init. Needs reimplementation of NFourSID.\n\n super().__init__()\n\n if kf is None:\n self.kf = None\n self.dim_x = dim_x\n self._kf_provided = False\n else:\n self.kf = kf\n self.dim_u = kf.state_space.u_dim\n self.dim_x = kf.state_space.x_dim\n self.dim_y = kf.state_space.y_dim\n self._kf_provided = True\n if self.dim_u > 0:\n self._expect_covariates = True", "def __init__(self):\n \n self.stack = [];\n self.min = None;", "def init(x_dim, y_dim):\n\n #Step 1) \n grid = #create a numpy grid of zeroes of with the dimensions inthe parameters\n pattern = #parse the pattern given by the user via sys.argv\n\n # Step 2) Validate and read in the padding arguments if the user passes them in\n # otherwise default to zero \n\n # Step 3) Copy the pattern onto the grid\n\n # Step 4) Return the result\n\n pass", "def get_min_depth(l_k):\n return max(l_k.values())", "def initialize_dp(self, k):\n for node in self.nodes.values():\n node.initialize_cost_matrix_and_facilities(k)\n self.k = k" ]
[ "0.5303504", "0.5293798", "0.52392584", "0.5238729", "0.52039605", "0.516473", "0.51517797", "0.5144335", "0.5137341", "0.5133678", "0.51300824", "0.5129396", "0.51235527", "0.5118941", "0.508737", "0.5040831", "0.5036856", "0.50319624", "0.50160795", "0.50102866", "0.5008397", "0.49952224", "0.4993463", "0.49930155", "0.49906266", "0.49699995", "0.49453956", "0.49380907", "0.49358666", "0.49339226", "0.49054006", "0.49014047", "0.49001208", "0.4898726", "0.48922917", "0.48907405", "0.48865637", "0.4886109", "0.4881662", "0.4866538", "0.4854468", "0.4842496", "0.484236", "0.48408726", "0.48337984", "0.48184443", "0.48184443", "0.48172304", "0.48053515", "0.48039687", "0.47966057", "0.47955954", "0.47935548", "0.47913876", "0.47854313", "0.47778437", "0.47723415", "0.4771528", "0.47671244", "0.47623312", "0.4760962", "0.4760097", "0.47547293", "0.47532293", "0.47475582", "0.47470173", "0.472488", "0.472488", "0.47172272", "0.47163028", "0.47143236", "0.47057027", "0.47051743", "0.47004324", "0.46989584", "0.46967015", "0.46965492", "0.46945375", "0.46945375", "0.46935526", "0.46869", "0.4680504", "0.4677865", "0.4675637", "0.4675637", "0.46681866", "0.46647877", "0.46621564", "0.46556967", "0.46554312", "0.4652799", "0.4652799", "0.4652799", "0.4652799", "0.4652289", "0.46520162", "0.46519923", "0.46504286", "0.46499914", "0.46480635" ]
0.7107055
0
Initializes a CountMin Sketch ``key`` to characteristics (``error``, ``probability``) specified by user.
def cmsInitByProb(self, key, error, probability): params = [key, error, probability] return self.execute_command(self.CMS_INITBYPROB, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize(X, k):\n if not isinstance(X, np.ndarray) or X.ndim != 2:\n return None, None, None\n if not isinstance(k, int) or k <= 0:\n return None, None, None\n _, d = X.shape\n C, clss = kmeans(X, k)\n pi = 1 / k * np.ones(k)\n m = C\n S = np.array([np.identity(d)] * k)\n return pi, m, S", "def __init__(self, k, p, sample_p=1):\n # Maximum sample size\n self.k = k\n\n # A dictionary containing the sampled elements\n # The dictionary key is the key of the element\n # The value is a tuple (seed, count)\n self.elements = {}\n\n # The function of the frequencies that the sketch estimates\n # For now it's the p-th frequency moment, but in the future we may\n # support other functions (passed as a parameter)\n self.func_of_freq = lambda x: x**p\n\n # The power of values used for the sampling weights\n self.sample_p = sample_p", "def __init__(self, probability, nodeKeys):\n self.probability = float(probability)\n self.nodeKeys = nodeKeys", "def __init__(self, kp, ki, kd):\n self.kp = kp\n self.ki = ki\n self.kd = kd\n self.error_last = 0\n self.error_sum = 0\n self.delta_error = 0", "def set_min_prob(self, disease, probability):\n self.min_probs[disease] = probability", "def __init__(self, probability: float):\n super().__init__()\n\n # store input parameters\n self.probability = probability", "def __init__(self, key: bytearray):\n self.__key = key\n self.__KSA(bytearray([i for i in range(256)]))", "def __init__(self,k,data,max_guess=(100,100),min_guess=(-100,-100)):\r\n\t\tself.k = k\r\n\t\tself.data = data\r\n\t\tself.max_guess = max_guess\r\n\t\tself.min_guess = min_guess", "def initialise_source(self, c, key):\n if key == 'p':\n return 1e5\n elif key == 'h':\n if self.Q.val < 0 and self.Q.is_set:\n return 1e5\n elif self.Q.val > 0 and self.Q.is_set:\n return 5e5\n else:\n return 3e5", "def __init__(self, key, initial_prng):\n self.cipher = key\n self.prng = initial_prng\n self.nonce = None", "def __init__(self, k: int, training_set: np.ndarray):\n self._k = k\n self._training_set = training_set", "def __init__(\n self, seq: Sequence, probabilities: Optional[List[float]] = None, k: int = 0\n ):\n super().__init__()\n\n # store input parameters\n self.seq = seq\n self.probabilities = probabilities\n self.k = k", "def __init__(self, key=None):\n self.key = key", "def __init__(self, k: int) -> None:\n\n assert k > 2, \"for k = 2 use Bernoulli distribution.\"\n\n self.k = k", "def __init__(self, weights, keys=None):\n n = self.n = len(weights)\n if keys is None:\n self.keys = keys\n else:\n self.keys = array(keys)\n \n if isinstance(weights, (list, tuple)):\n weights = array(weights, dtype=float)\n elif isinstance(weights, numpy.ndarray):\n if weights.dtype != float:\n weights = weights.astype(float)\n else:\n weights = array(list(weights), dtype=float)\n \n if weights.ndim != 1:\n raise ValueError(\"weights must be a vector\")\n \n weights = weights * n / weights.sum()\n \n inx = -ones(n, dtype=int)\n short = where(weights < 1)[0].tolist()\n long = where(weights > 1)[0].tolist()\n while short and long:\n j = short.pop()\n k = long[-1]\n \n inx[j] = k\n weights[k] -= (1 - weights[j])\n if weights[k] < 1:\n short.append( k )\n long.pop()\n \n self.prob = weights\n self.inx = inx", "def initialize(X, k):\n\n if not isinstance(X, np.ndarray) or X.ndim != 2:\n return None\n\n # n: number of dada points\n # d: dimension of each data point\n n, d = X.shape\n # print(X.shape)\n # print(X)\n\n if not isinstance(k, int) or k <= 0 or k > n:\n return None\n\n # Sample k centroids from a random.uniform distribution;\n # output is an array of coordinates\n C = np.random.uniform(low=np.min(X, axis=0),\n high=np.max(X, axis=0),\n size=(k, d))\n return C", "def __init__(self, min_player_count):\n self.min_player_count = min_player_count", "def initialise_source(self, c, key):\n return 0", "def __init__(self, x=None):\n # Unpack the parameters or use default values.\n if x is None:\n self.nt_probs = np.ones(4) / 4\n self.kappa = 2.0\n self.penalty = 0\n else:\n info = self._unpack_params(x)\n self.nt_probs, self.kappa, self.penalty = info\n\n # Mark some downstream attributes as not initialized.\n self._invalidate()", "def initialise_target(self, c, key):\n if key == 'p':\n return 1e5\n elif key == 'h':\n if self.Q.val < 0 and self.Q.is_set:\n return 5e5\n elif self.Q.val > 0 and self.Q.is_set:\n return 1e5\n else:\n return 3e5", "def __init__(self, k, hash_func, p, advice_obj):\n # Maximum sample size\n self.k = k\n\n # The following hash function defines all the randomness used for\n # picking the sample\n self.hash_func = hash_func\n\n # A dictionary containing the sampled elements\n # The dictionary key is the key of the element\n # The value is a tuple (seed, count)\n self.elements = {}\n\n # The advice object\n self.advice_obj = advice_obj\n\n # The function of the frequencies that the sketch estimates\n # For now it's the p-th frequency moment, but in the future we may\n # support other functions (passed as a parameter)\n self.func_of_freq = lambda x: x**p", "def __init__(self, top_k: int):\n self._topk_acc_dict: Dict[int, Mean] = defaultdict(Mean)\n self.top_k: int = top_k\n\n self.__torchmetrics_requires_task = version.parse(\n torchmetrics.__version__\n ) >= version.parse(\"0.11.0\")", "def __init__(self, kp, ki, kd, ts):\n self.__kp = kp # Controller's P constant\n self.__kd = kd / ts # Controller's D constant\n self.__ki = ki * ts # Controller's I constant\n self.__ts = ts # Controller's sampling time\n self.__err_previous = None # Controller's previous error (there is no error before t = 0s)\n self.__error_sum = 0 # Controller's cumulative error", "def __init__(self, k=1):\n self.k = k\n self.x = None\n self.y = None\n self.classes_ = None", "def __init__(self, key):\n self.key = key", "def init(self, rng_key, num_warmup, init_params, model_args, model_kwargs):\n raise NotImplementedError", "def __init__(self, k:int, **kwargs):\n self.k = k", "def init(self, start_sample, fhat, budget):\n self.proposed_points = start_sample\n self.n0 = start_sample.shape[0]\n self.budget = budget\n self.fhat = fhat", "def __init__(__self__, *,\n key_data: pulumi.Input[str]):\n pulumi.set(__self__, \"key_data\", key_data)", "def initialize(self, k, stats):\n\n k = k + 5\n\n qbin_sizes = 0.5 / k # Quantile sizes\n qbin_edges = 0.25 + qbin_sizes*np.arange(0, k+1) # Edge locations (in quantile terms)\n\n bin_edges = np.interp(qbin_edges, stats['quantile_basis'], stats['quantiles'])\n\n self.k = k\n self.n_bins = k + 2\n self.classes = list(range(1, self.n_bins + 2))\n self.edges = [-np.Inf] + [edge for edge in bin_edges] + [np.Inf]\n self.chi = np.zeros((2, self.n_bins + 1))\n\n dist = np.linspace(2, 1, self.n_bins) # Bins captured by observations\n scaled_dist = 0.9 * dist / dist.sum() # Scaling by 0.9 to allow for 0.1 emission prob of NaN\n self.chi[1, :-1] = scaled_dist # Paired emission dist\n self.chi[0, :-1] = np.flip(scaled_dist) # Unpaired emission dist\n self.chi[1, -1] = 0.1 # NaN observations\n self.chi[0, -1] = 0.1 # NaN observations\n\n self.n_params = 2*(self.n_bins-2)", "def initialise_target(self, c, key):\n return 0", "def __init__(self, key):\n self.key = key\n self.BLOCK_SIZE = 16", "def init(self, start_sample, fhat, budget):\n\n self.proposed_points = start_sample\n self.budget = budget\n self.fhat = fhat", "def init(self, start_sample, fhat, budget):\n\n self.proposed_points = start_sample\n self.budget = budget\n self.fhat = fhat", "def _init(self, key, name):\n\n self.key = key\n self.name = name\n\n self._state = Node.State.INVALID\n self._value = None\n\n # Keyword and positional arguments to compute_value.\n self._args = ObservableList()\n self._kwargs = ObservableDict()\n\n self.args.listeners.add(self._on_args_changed)\n self.kwargs.listeners.add(self._on_kwargs_changed)\n\n # Map Nodes to the number of times they appear in this Node's\n # arguments.\n self._arg_refcount = {}\n\n # Nodes whose values depend on this Node.\n self._dependents = set()\n\n if NodeCallStack.stack:\n self._created_by = NodeCallStack.stack[-1]\n\n NodeCreateEvent(self)\n\n NodeCallStack._push(self)", "def __init__(self, key):\n try:\n # Python 2\n if not isinstance(key, (unicode, str)):\n raise TypeError('key is not of type unicode or str.')\n except NameError:\n # Python 3\n if not isinstance(key, str):\n raise TypeError('key is not of type str.')\n\n self.key = key\n\n logger.debug('WU(key: %s)', self.key)", "def kmeanspp_initialisation( self, X ):\n N, _ = X.shape\n k, d = self.k, self.d\n M = []\n\n # Choose one center amongst the X at random\n m = sc.random.randint( N )\n M.append( X[m] )\n\n # Choose k centers\n while( len( M ) < self.k ):\n # Create a probability distribution D^2 from the previous mean\n D = cdist( X, M ).min( 1 )**2\n assert( D.shape == (N,) )\n\n # Normalise and sample a new point\n D /= D.sum()\n\n m = sc.random.multinomial( 1, D ).argmax()\n M.append( X[m] )\n\n M = sc.column_stack( M )\n sigma = sc.sqrt(cdist( X, M.T, 'sqeuclidean').sum(0)/(N))\n w = ones( k )/float(k)\n\n return M, sigma, w", "def __init__(self, ksize_low, ksize_high=None): \n self._sigma_low = 0.3*(ksize_low//2 - 1) + 0.8\n \n if ksize_high is None:\n self._sigma_high = np.sqrt(2)*self._sigma_low\n else:\n self._sigma_high = 0.3*(ksize_high//2 - 1) + 0.8", "def __init__(self, key: str, name: str, *probability_values: float, **probability_dependents: Node) -> None:\n if probability_dependents:\n assert len(probability_values) >= 2 ** len(probability_dependents.keys()\n ), f\"Not enough truth table values given for amount of dependencies: {len(probability_values)} of {2 ** len(probability_dependents.keys())}\"\n self.key = key\n self.name = name\n self.probability_links = probability_dependents\n self.probability_values = probability_values\n self.used_by = list()", "def initializeFromDict(self, inputDict):\n self.strategy = inputDict['strategy']\n self.categoricalDist = Categorical()\n self.categoricalDist.initializeFromDict(inputDict)\n initialPerm = randomUtils.randomPermutation(inputDict['outcome'].tolist(),self)\n self.pot = np.asarray(initialPerm)", "def improved_initialization(X, k):\n new_values = X.copy()\n best_like = float('-inf')\n MU, SIGMA, PI = None, None, None\n for _ in range(10):\n initial_means = get_initial_means(new_values, k)\n pi = np.full(k, 1 / k)\n while True:\n mu, clusters = k_means_step(new_values, k, initial_means)\n diff = np.sum(mu - initial_means)\n if not diff:\n sigma = compute_sigma(X, mu)\n break\n initial_means = mu\n mu, sigma, pi, res = train_model(X, k, default_convergence, (mu, sigma, pi))\n lk = likelihood(X, pi, mu, sigma, k)\n if lk > best_like:\n best_like = lk\n MU = mu\n SIGMA = sigma\n PI = pi\n return MU, SIGMA, PI", "def __init__(self, pseudocount=0):\n self._model = dict()\n self._alpha = pseudocount\n self._N = 0\n self._V = 0", "def test_prediction_key_required(self):\n self._config['Prediction key'] = ''\n with self.assertRaisesRegex(ValueError,\n 'Please provide the prediction key'):\n self._gen.generate(\n example=self._example,\n model=self._model,\n dataset=self._dataset,\n config=self._config)", "def __init__(self, M, K):\r\n \r\n self.M = M\r\n self.K = K", "def init_probability_dict(self):\n for x in xrange(0,10):\n self.class_probabilities[x] = self.init_probability_2d()", "def initialize(self):\n # FIX: INITIALIZE PROCESS INPUTS??\n for mech, value in self.initial_values.items():\n mech.initialize(value)", "def __init__(self, coefficient=1.2, **kwargs):\n super(CMAAdaptSigmaDistanceProportional, self).__init__() # base class provides method hsig()\n self.coefficient = coefficient\n self.is_initialized = True", "def __init__(self,\r\n x_mashape_key):\r\n self.__x_mashape_key = x_mashape_key", "def __init__(self, key):\n self.bs = 16\n self.key = hashlib.sha256(key.encode()).digest()", "def __init__(self, key, updateProposer, sampleShape, numChains=1, updateProposerArg=None,\n numSamples=100, thermalizationSweeps=10, sweepSteps=10):\n\n stateShape = (numChains,) + sampleShape\n if global_defs.usePmap:\n stateShape = (global_defs.device_count(),) + stateShape\n self.states=jnp.zeros(stateShape, dtype=np.int32)\n\n self.updateProposer = updateProposer\n self.updateProposerArg = updateProposerArg\n\n self.key = key\n if global_defs.usePmap:\n self.key = jax.random.split(self.key, global_defs.device_count())\n self.thermalizationSweeps = thermalizationSweeps\n self.sweepSteps = sweepSteps\n self.numSamples = numSamples\n\n self.numChains = numChains\n\n # jit'd member functions\n self._get_samples_jitd = {} # will hold a jit'd function for each number of samples\n self._get_samples_gen_jitd = {} # will hold a jit'd function for each number of samples", "def __init__(self, key, default=NOT_GIVEN):\n self.key = adapt(key,IComponentKey)\n self.default = default", "def __init__(self, key=None, log_dir=None, log_thresh=1):\n self.key = key\n self.log_dir = log_dir\n self.log_thresh = log_thresh\n self.last_h = None\n self.last_a = None\n self.last_x0 = None\n self.last_y0 = None\n self.last_sx = None\n self.last_sy = None\n self.last_theta = None\n self.last_cutoff = None\n\n if self.log_dir:\n if not os.path.isdir(self.log_dir):\n raise ValueError(self.log_dir + \" is not a directory\")", "def initializeDistribution(self):\n self.checkDistParams()\n\n self.lowerBound = min(self.mapping.keys())\n self.upperBound = max(self.mapping.keys())", "def __init__(self, top_k: int = 1) -> None:\n self.top_k = top_k", "def __init__(self, goal=0, kP=1, kI=1, kD=1, init_pt=0):\n self._pid_lock = threading.Lock()\n\n self.set_goal(goal)\n self.reset(init_pt)\n self.set_gains({\n PIDController.KP_KEY: kP,\n PIDController.KI_KEY: kI,\n PIDController.KD_KEY: kD\n })", "def __init__(self, key_size=1024):\n\t\tif not (key_size % 256 == 0 and key_size >= 1024):\n\t\t\t\traise ValueError(\"RSA key length must be a multiple of 256 and >= 1024\")\n\t\telse:\n\t\t\tself.key_size = key_size", "def __init__(self, kappa=1, alpha=1):\n self.k_alp = np.array([kappa, alpha])", "def __init__(self, initial_lr: float, k: float):\n super().__init__()\n self.initial_lr = initial_lr\n self.k = k", "def __init__(self):\r\n # sample ID -> (ref individual count,\r\n # {size -> (estimate, std err, ci_low, ci_high)})\r\n self._data = {}", "def initialize_parameters(X: np.ndarray, k):\n idx = np.random.choice(X.shape[0], k, replace=False)\n mu = X[idx]\n sigma = compute_sigma(X, mu)\n pi = np.ones(k) / k\n return mu, sigma, pi", "def __init__(self, uid, key, initial_prng):\n self.uid = uid\n self.key = key\n Crypto1.__init__(self, key, initial_prng)", "def __init__(self, key):\n\n def keys(key, num_rounds):\n \"\"\"Yields the permuted key bitstring for i = 1..num_rounds\"\"\"\n C, D = key[:28], key[28:]\n # Rounds are 1-indexed, so shift array over by one\n left_shifts = [None, 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]\n for i in range(1, num_rounds + 1):\n # Negate each rotation to rotate left.\n C, D = rotate(C, -left_shifts[i]), rotate(D, -left_shifts[i])\n yield self.permute(C + D, self._CD_permutation)\n\n self.key = list(bits_of(key, 64))\n # Permute the key. The permutation discards the parity bits...\n self.key = self.permute(self.key, self._key_permutation)\n self.number_of_rounds = 16\n # A list of the 16 keys K1 .. K16, shifted over by one to allow 1-indexing.\n self.keys = [None] + list(keys(self.key, self.number_of_rounds))", "def __init__(self, k=10, cutoff=0.5):\n if k < 0:\n raise ValueError('k must be positive')\n super(PrecisionLower, self).__init__(k, cutoff)", "def __init__ ( self , phenotypes ):\n\t\tself.counts = {}\n\t\tfor k , v in phenotypes.items():\n\t\t\tassert type( k ) is str , 'phenotype keys must be strings'\n\t\t\t\n\t\t\tself.counts[ k ] = 0\n\n\t\t\tassert v[1] > v[0] , 'upper bound of ' + k + ' must be greater than the lower bound'\n\n\t\tself.phenotypes = phenotypes", "def __init__(self) :\n self.probabilities_ = None", "def __init__(self) :\n self.probabilities_ = None", "def __init__(self) :\n self.probabilities_ = None", "def test_value_min(self):\n self.assertEqual(DPTValue1Ucount().to_knx(0), (0x00,))\n self.assertEqual(DPTValue1Ucount().from_knx((0x00,)), 0)", "def __init__(self, k):\n self.k = k\n self.N = 2**self.k", "def check_start_probs(self, initial: np.ndarray) -> None:\n if not isinstance(initial, np.ndarray):\n raise TypeError('Initial state distribution must be a numpy.ndarray')\n if not initial.shape == (self.n_states,):\n raise ValueError('Initial state distribution must be of shape (n_states,)')\n if not np.isclose(initial.sum(), 1):\n raise ValueError('Initial state distribution must sum to one')\n return initial", "def __init__(self, probability: Union[float, int]=0.5):\r\n if not isinstance(probability, (int, float)):\r\n raise TypeError('probability must be of type: float, int')\r\n if probability < 0 or probability > 1:\r\n raise ValueError('probability must be in the range: [0, 1]')\r\n self.probability = float(probability)", "def __init__(self):\n self.counts = [0] * 10\n self.values = [0] * 10\n self.ucb_values = [0] * 10\n self.minmax = 0", "def __init__(self, trainset, k=3):\n self._trainset = trainset\n self.k = k", "def __init__(self, min_ms1_intensity):\n self.min_ms1_intensity = min_ms1_intensity", "def __init__(self, M, N, D, K):\n\n # Sanity checks\n assert len(D) == M\n assert K < min(D)\n assert K < N\n\n self.M = M\n self.N = N\n self.K = K\n self.D = D", "def __init__(self, sk=None, n=None, h=None):\r\n if sk:\r\n self.n = sk.n\r\n self.h = sk.h\r\n elif n and h:\r\n self.n = n\r\n self.h = h\r\n else:\r\n raise Exception(\"Public Key construction failed: insufficient/wrong arguments\")\r\n\r\n self.signature_bound = Params[self.n][\"sig_bound\"]\r\n self.sig_bytelen = Params[self.n][\"sig_bytelen\"]", "def __init__(self, key: bytes):\n\n if len(key) != 32:\n raise ValueError('Key must be 32 bytes long')\n self.key = key", "def parse_initializer(hid_w_init_key):\n init_map = dict(he_normal=U.he_normal_init(),\n he_uniform=U.he_uniform_init(),\n xavier_normal=U.xavier_normal_init(),\n xavier_uniform=U.xavier_uniform_init())\n if hid_w_init_key in init_map.keys():\n return init_map[hid_w_init_key]\n else:\n raise RuntimeError(\"unknown weight init: '{}'\".format(hid_w_init_key))", "def initialize(self,inputDict):\n pass", "def __init__(self, key):\n if len(key) > KEY_SIZE:\n raise ParameterError(\"Key must be <%d bytes\" % (KEY_SIZE))\n\n self.key = key.ljust(KEY_SIZE, b\"\\xff\")\n self.encryptIV = b\"\\xff\" * BLOCK_SIZE\n self.decryptIV = b\"\\xff\" * BLOCK_SIZE\n self.remainingData = b\"\"\n self.oldDecrypt = b\"\"", "def _check_params(self):\n if self.k_initial <= 0 :\n raise ValueError('Initial K should be 1 or more.')", "def __init__(self, corpus: Corpus):\n\n # the legomena counts parametrize this model\n self.M = corpus.M\n self.N = corpus.N\n self.k = corpus.k", "def __init__(self, **kwargs):\n self._params = dict(\n score_func=ParameterDefinition([chi2, f_classif, mutual_info_classif]),\n k=None,\n )\n self.__k = None\n self.__select_k_best = SelectKB()", "def __init__(self, key = None):\n self.key = key\n self.response_format = 'json'\n \n if self.key is None:\n raise NoAPIKeyException('Warning: Missing API Key. Please visit ' + API_SIGNUP_PAGE + ' to register for a key.')", "def __init__(__self__, *,\n key: pulumi.Input[str],\n user_id: Optional[pulumi.Input[int]] = None):\n pulumi.set(__self__, \"key\", key)\n if user_id is not None:\n pulumi.set(__self__, \"user_id\", user_id)", "def __init__(self, default_probs: np.ndarray, _no_init: bool = False):\n if _no_init:\n self._store = np.empty_like(default_probs, dtype=np.float64)\n else:\n self._store = np.zeros_like(default_probs, dtype=np.float64)\n self._default_probs = default_probs\n self._gross = 0.0", "def __init__(self, value=None, error=None, weights=None):\r\n\r\n self.value = value\r\n self.error = error\r\n self.weights = weights", "def init(self, start_sample, fhat, budget):\n self.proposed_points = start_sample\n self.fhat = fhat\n self.n0 = start_sample.shape[0]\n for i in range(self.nstrats):\n self.sampling_strategies[i].init(self.proposed_points, fhat, budget)", "def _updateInitialProbabilities(self): \n N = self.N\n K = self.K\n\n for i in range(1,self.K+1):\n s = 0\n updated_prob = 0\n for n in range(1,self.N+1):\n s = s+1\n updated_prob = updated_prob + self.posterior_state_trellis[n][(1,i)]\n self.state_initial_prob[i] = (updated_prob/s)", "def __init__(self,\n kp,\n ts):\n self._kp = kp\n self._ts = ts\n self._error = 0", "def __init__(self, dict = {}):\r\n if dict == {}:\r\n self.zero_val()\r\n else:\r\n self.piDD = dict\r\n self.top_node = utilities.max_length_in_list(self.return_keys())\r\n if self.piDD[self.top_node] == None:\r\n self.dim = 0\r\n else:\r\n self.dim = self.piDD[self.top_node][0][0]", "def set_min_confidence(self, new_min):\n self.__min_confidence = new_min", "def __init__(self, k=5):\n self.k = k", "def __init__(self, k=10, cutoff=0.5):\n if k < 0:\n raise ValueError('k must be positive')\n super(Precision, self).__init__()\n self.k = k\n self.cutoff = cutoff", "def initial_data(initial_condition, k):\n \n M = 2**k\n h = 1/M\n \n def square_wave(x):\n if (abs(x - 0.5) <= 0.25):\n return 1\n else:\n return 0\n \n def semicircle(x):\n return sqrt(0.25-pow(x - 0.5, 2))\n \n def gaussian_pulse(x):\n return exp(-256*pow(x - 0.5, 2)) \n \n if initial_condition == \"Square_Wave\":\n initial_condition = square_wave\n elif initial_condition == \"Semicircle\":\n initial_condition = semicircle\n elif initial_condition == \"Gaussian_Pulse\":\n initial_condition = gaussian_pulse\n \n iterator = ( initial_condition(i) for i in range(0, M) )\n\n return np.fromiter(iterator, float64)", "def __init__(self, probability, stop_gracefully=True):\n self._probability = probability", "def _init_node_parm(self, key):\n try:\n wf_data_conf = WorkflowDataConfFrame(key)\n self.data_conf = wf_data_conf.conf\n except Exception as e:\n raise Exception(\"dataconf_node_fame._init_node_parm Initializing Error : \" +str(e))", "def __init__(self, initial_node):\n self.__nodes = MinPriorityQueue({initial_node : initial_node.estimate})", "def __init__(self, k_d, k_s=0., p=20., k_m=0., k_a=None):\n # TODO A5 (Step2) implement this function\n # Check if each property is an array of shape (h, w, 3)\n # If so, then apply the property using the uv coordinates supplied by the geometry.\n self.k_d = k_d\n self.k_s = k_s\n self.p = p\n self.k_m = k_m\n self.k_a = k_a if k_a is not None else k_d", "def __init__(self, mean: float, std: float, seed: int, min_requirement: int = 10):\n self._mean = mean\n self._std = std\n self._seed = seed\n self._min_requirement = min_requirement\n self._rng = Random(self._seed)" ]
[ "0.5790586", "0.5750808", "0.55292356", "0.5522919", "0.5522306", "0.53946745", "0.5369128", "0.53256714", "0.5308711", "0.5307491", "0.5304574", "0.52648705", "0.5253688", "0.5247738", "0.52242655", "0.52212495", "0.52023345", "0.518744", "0.5175941", "0.51755065", "0.5169663", "0.515917", "0.5135909", "0.5134228", "0.5133494", "0.5123331", "0.5118834", "0.509812", "0.50863886", "0.5085439", "0.5079547", "0.5078432", "0.50674856", "0.50674856", "0.5063619", "0.5052592", "0.5049668", "0.5048629", "0.50427467", "0.5034402", "0.50320005", "0.502503", "0.50239754", "0.50107163", "0.49970064", "0.49927887", "0.4978071", "0.4968888", "0.49660885", "0.49632895", "0.49574247", "0.4952741", "0.49523836", "0.49491903", "0.49484795", "0.49443585", "0.49388418", "0.4937983", "0.49240148", "0.49194488", "0.49169496", "0.49093723", "0.49071908", "0.4902941", "0.48984125", "0.48984125", "0.48984125", "0.48971912", "0.48964944", "0.48955828", "0.48894447", "0.48846066", "0.4880145", "0.4857076", "0.48566377", "0.48554468", "0.48522258", "0.48366576", "0.48353183", "0.48334274", "0.48306924", "0.48302063", "0.48279166", "0.48277852", "0.48220235", "0.48211318", "0.48044884", "0.48006284", "0.47972602", "0.47961852", "0.4792175", "0.47888532", "0.47834173", "0.47816417", "0.47743827", "0.47696534", "0.47522882", "0.47470757", "0.47405326", "0.47361603" ]
0.6565891
0
Adds/increases ``items`` to a CountMin Sketch ``key`` by ''increments''. Both ``items`` and ``increments`` are lists. Example cmsIncrBy('A', ['foo'], [1])
def cmsIncrBy(self, key, items, increments): params = [key] self.appendItemsAndIncrements(params, items, increments) return self.execute_command(self.CMS_INCRBY, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def incr(self, key, delta=1, callback=None):\n self._incrdecr(\"incr\", key, delta, callback=callback)", "def incr(self, key, delta=1):\n\t\treturn self._incrdecr(\"incr\", key, delta)", "def increase(self, key:str) -> None:\n\n hash_key = self.hash_key(key)\n head = self.array[hash_key] \n \n while head.next: \n if head.next.key == key:\n head.next.value +=1\n head = head.next", "def __inc__(self, key, value):\n # TODO: value is not used in this method. Can it be removed?\n if key in ['upper_index', 'lower_index']:\n inc = self.num_cells\n elif key in ['shared_boundaries']:\n inc = self.num_cells_down\n elif key == 'shared_coboundaries':\n inc = self.num_cells_up\n elif key == 'boundary_index':\n boundary_inc = self.num_cells_down if self.num_cells_down is not None else 0\n cell_inc = self.num_cells if self.num_cells is not None else 0\n inc = [[boundary_inc], [cell_inc]]\n else:\n inc = 0\n if inc is None:\n inc = 0\n\n return inc", "def inc(self, key):\n if key in self.keyCountMap:\n self._updateCount(key, 1)\n else:\n self.keyCountMap[key] = 1\n if self.head.next.count != 1:\n self._addBucketAfter(Bucket(1), self.head)\n self.head.next.keySet.add(key)\n self.countBucketMap[1] = self.head.next", "def incr_proof_item(item, start, n):\n item.id = incr_id_after(item.id, start, n)\n item.prevs = [incr_id_after(id, start, n) for id in item.prevs]\n if item.subproof:\n for subitem in item.subproof.items:\n incr_proof_item(subitem, start, n)", "def incr(self, key, delta=1):\r\n if delta < 0:\r\n return self._incrdecr(\"decr\", key, -delta)\r\n else:\r\n return self._incrdecr(\"incr\", key, delta)", "def inc(self, key: str) -> None:\n if key not in self.bucket_of_keys:\n self.bucket_of_keys[key] = self.buckets.insert(self.buckets.begin(), Node(0, {key}))\n bucket, next_bucket = self.bucket_of_keys[key], self.bucket_of_keys[key].next\n if next_bucket is self.buckets.end() or next_bucket.value > bucket.value + 1:\n next_bucket = self.buckets.insert(next_bucket, Node(bucket.value + 1, set()))\n next_bucket.keys.add(key)\n self.bucket_of_keys[key] = next_bucket\n\n bucket.keys.remove(key)\n if not bucket.keys:\n self.buckets.erase(bucket)", "def __iadd__(self, increment):\n self.update(self.val + increment)\n return self", "def inc(self, key):\n if key in self.cache:\n curr_freq = self.cache[key]\n self.freq[curr_freq].remove(key)\n\n if len(self.freq[curr_freq]) == 0:\n del self.freq[curr_freq]\n\n curr_freq += 1\n self.freq[curr_freq].add(key)\n self.cache[key] = curr_freq\n\n else:\n self.cache[key] = 1\n self.freq[1].add(key)", "def inc(self, key: str) -> None:\n if key not in self.mapping:\n cur_block = self.head\n else:\n cur_block = self.mapping[key]\n cur_block.keys.remove(key)\n\n if cur_block.val + 1 != cur_block.next.val:\n new_block = Block(cur_block.val + 1)\n cur_block.insert_after(new_block)\n else:\n new_block = cur_block.next\n new_block.keys.add(key)\n self.mapping[key] = new_block\n\n if not cur_block.keys and cur_block.val != 0:\n cur_block.remove()", "def inc(self, key: str) -> None:\n if key in self.keyCnt:\n self.changeKey(key, 1)\n else:\n self.keyCnt[key] = 1\n # 说明没有计数为1的节点,在self.head后面加入\n if self.head.next.cnt != 1:\n self.addNodeAfter(Node(1), self.head)\n self.head.next.keySet.add(key)\n self.cntKey[1] = self.head.next", "def handle_incr(self, api, command):\n key = self._sandboxed_key(api.sandbox_id, command.get('key'))\n if not (yield self.check_keys(api, key)):\n returnValue(self._too_many_keys(command))\n amount = command.get('amount', 1)\n try:\n value = yield self.redis.incr(key, amount=amount)\n except Exception, e:\n returnValue(self.reply(command, success=False, reason=unicode(e)))\n returnValue(self.reply(command, value=int(value), success=True))", "def _insert_item_run_length_encoded(cls, incremental_items, aggregated_items,\n num_runs): # pragma: no cover\n for item in incremental_items:\n if len(aggregated_items) and item[1] == aggregated_items[0][1]:\n aggregated_items[0][0] = min(aggregated_items[0][0] + item[0], num_runs)\n else:\n aggregated_items.insert(0, item)", "def __iterate(\n self,\n items: List[ClientWorklistItem],\n inc: Union[InitialIncClientWorklistData, IncClientWorklistData],\n ):\n if inc is None:\n return\n # append the items\n if inc.items_flat:\n items += inc.items_flat\n else:\n return\n # iterator is used up\n if inc.dropped:\n return\n\n # fetch next\n inc_cl: IncClientWorklistsApi = self.__service_provider.get_service(IncClientWorklistsApi)\n next_it: IncClientWorklistData = inc_cl.inc_client_wl_get_next(inc.inc_wl_id)\n self.__iterate(items, next_it)", "def inc(self, key):\n # update key node\n if key not in self.hash_table:\n self.hash_table[key] = ListNode(key, 1)\n else:\n self.hash_table[key].val += 1\n node = self.hash_table[key]\n val = node.val\n\n #print 'inc', key, val\n # delete node from original List\n if node._prev:\n node._prev._next = node._next\n if node._next:\n node._next._prev = node._prev\n\n # insert node to new List\n if val not in self.count_table:\n cl_node = CountListNode()\n cl_node._next = node\n node._prev = cl_node\n node._next = None\n if not self.head.next_cl and not self.tail.prev_cl:\n self.head.next_cl = cl_node\n cl_node.prev_cl = self.head\n self.tail.prev_cl = cl_node\n cl_node.next_cl = self.tail\n else:\n if val == 1:\n next_cl_node = self.tail\n else:\n next_cl_node = self.count_table[val-1]\n cl_node.next_cl = next_cl_node\n cl_node.prev_cl = next_cl_node.prev_cl\n next_cl_node.prev_cl.next_cl = cl_node\n next_cl_node.prev_cl = cl_node\n\n #print key, val, cl_node.prev_cl == self.head\n self.count_table[val] = cl_node\n else:\n node._next = self.count_table[val]._next\n node._prev = self.count_table[val]\n self.count_table[val]._next._prev = node\n self.count_table[val]._next = node\n\n print 'inc', key, val\n if val - 1 in self.count_table and not self.count_table[val - 1]._next:\n #print key, val, val-1\n del_node = self.count_table[val-1]\n #print del_node.prev_cl._next\n del_node.prev_cl.next_cl = del_node.next_cl\n del_node.next_cl.prev_cl = del_node.prev_cl\n del (self.count_table[val - 1])\n\n for v in self.count_table:\n print 'v:',v,\n node = self.count_table[v]._next\n while node:\n print node.key, node.val\n node = node._next\n\n if self.head.next_cl:\n print 'head',self.head.next_cl._next.key, self.head.next_cl._next.val\n print '\\n'", "def incrby(self, key, value, timeBucket=None,\n retentionSecs=None, labels={}):\n params = [key, value]\n self.appendTimeBucket(params, timeBucket)\n self.appendRetention(params, retentionSecs)\n self.appendLabels(params, labels)\n\n return self.execute_command(self.INCRBY_CMD, *params)", "def incr(self, key, delta=1, version=None, client=None):\r\n return self._incr(key=key, delta=delta, version=version, client=client)", "def inc(self, key):\n if key in self.key_dict:\n self.increase(key)\n return\n self.key_dict[key] = key_node = KeyNode(key, 1)\n value_node = self.value_dict.get(1)\n if value_node is None:\n self.value_dict[1] = value_node = ValueNode(1, None, self.head)\n if self.head:\n self.head.prev = value_node\n self.head = value_node\n if self.last is None:\n self.last = value_node\n self.insert_key_node(key_node)", "def incr(self, n=1):\n return _SALOMERuntime.SALOMERuntime_PySwigIterator_incr(self, n)", "def incr(self, n=1):\n return _libsbml.SwigPyIterator_incr(self, n)", "def incr(self, n=1):\n return _elas.SwigPyIterator_incr(self, n)", "def _update_prepend_key(self):\n self.prepend_key -= 1", "def increment(self):\r\n return self.add(1)", "def increment_counter(self) -> None:", "def incr(self, key, delta=1):\n try:\n key = self.prepare_key(key)\n return super(CacheClass, self).incr(key, delta)\n except Exception as err:\n return self.warn_or_error(err, delta)", "def increment(self) -> global___Expression:", "def add_to_inv(self, item):\n for obj in self.inv:\n if obj.name == item.name:\n self.inv[obj] += 1\n break\n else:\n self.inv[item] = 1", "def inc_counter(self, *_, **__): # pylint: disable=arguments-differ\n pass", "def update(self, *items):\n for item in items:\n self.add(item)", "def incr(self, n=1):\n return _osgAnimation.SwigPyIterator_incr(self, n)", "def add_zero_item(items, coder, tag, start):\n if items[tag][coder]:\n it = items[tag][coder][-1]\n zero_start = it.b + it.l\n else:\n zero_start = 0\n if start - zero_start:\n items[tag][coder].append(item(b=zero_start, l=start - zero_start, v=0))", "def increase_counter(self):\n self.values = self.values + 1", "def increment(self, amount):\n pass", "def inc(self, amount=1):\n if amount < 0:\n raise ValueError('Counters can only be incremented by non-negative amounts.')\n self._shared_list.append((self._labels_args, ('inc', amount)))", "def add(self, item):\n if item in self:\n self._set(item, self._get(item) + 1)\n else:\n self._set(item, 1)", "def test_list_increment_with_valid_value(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"bin\": \"int_bin\", \"index\": 2, \"val\": 20}]\n\n _, _, bins = self.as_connection.operate(key, list)\n\n assert bins == {\"int_bin\": 23}\n _, _, bins = self.as_connection.get(key)\n\n assert bins[\"int_bin\"] == [1, 2, 23, 4]", "def decrease_key(self, old_item, new_item):", "def _increment_count(self, key):\n\n if not self._count.has_key(key):\n self._count[key] = 0\n\n self._count[key] += 1", "def _increment_count(self, key):\n\n if not self._count.has_key(key):\n self._count[key] = 0\n\n self._count[key] += 1", "def incr(self, key, value=1):\n try:\n self[key] += value\n except TypeError:\n raise TypeError('Tried to increment non-numeric key {!r} ({!r}) by {}'.format(\n key, self[key], value\n ))\n except KeyError:\n self[key] = value\n\n return u''", "def _update_append_key(self):\n self.append_key += 1", "def inc(i):\n i += 1\n return i", "def hincrby(self, key, field, num):\n return self._command(b'HINCRBY', key, field, num)", "def wrap_iterator_inc_counter(iterator, counter, grpc_type, grpc_service_name, grpc_method_name):\n\n for item in iterator:\n counter.labels(\n grpc_type=grpc_type,\n grpc_service=grpc_service_name,\n grpc_method=grpc_method_name).inc()\n yield item", "def qty_increments(self, qty_increments):\n if qty_increments is None:\n raise ValueError(\"Invalid value for `qty_increments`, must not be `None`\")\n\n self._qty_increments = qty_increments", "def add_item(items, coder, tag, start, n):\n if start is not None:\n # close opened items\n add_zero_item(items, coder, tag, start) # default tag\n items[tag][coder].append(item(b=start, l=n-start, v=1)) # found tag", "def increment(self):\n self._deltas += 1", "def add(self, key, value):\n newest = self._Item(key, value) # make new item instance\n walk = self._data.last() # walk backward looking for smaller key\n while walk is not None and newest < walk.element():\n walk = self._data.before(walk)\n if walk is None:\n self._data.add_first(newest) # new key is smallest\n else:\n self._data.add_after(walk, newest) # newest goes after walk", "def inc(self, by=1):\n assert by > 0\n self.counter += by\n if self.counter == by:\n # If we just incremented self.counter by 'by', and the new count\n # equals 'by', then the old value of self.counter was 0.\n # Transitioning from 0 to a nonzero value means wait() must\n # actually wait.\n self.event.reset()", "def _increment_quantity(self, units):\n self.quantity += units", "def add(self, *items):\n for item in items:\n self.unsorted.append(item)\n key = item[0]\n self.index[key] = item\n return self", "def incrementAll(self, keys, count):\n for key in keys:\n self[key] += count", "def incrementAll(self, keys, count):\n for key in keys:\n self[key] += count", "def incrment_1(x):\n return(x + 1)", "def __incKeyCount(self,\n key):\n if (self.__keyCount.has_key(key) == 0): self.__keyCount[key] = 0\n self.__keyCount[key] = self.__keyCount[key] + 1\n return self.__keyCount[key]", "def incr(n=1):\n for i in xrange(n):\n pulse_hi(INCR)", "def setincrement(self, *args, **kwargs):\n return _coordsys.coordsys_setincrement(self, *args, **kwargs)", "def add(self, key, value):\r\n newest = Item(key, value) # make new item instance\r\n walk = self.data.last() # walk backward looking for smaller key\r\n while walk is not None and newest < walk.element():\r\n walk = self.data.before(walk)\r\n if walk is None:\r\n self.data.add_first(newest) # new key is smallest\r\n else:\r\n self.data.add_after(walk, newest) # newest goes after walk\r", "def incr_operand(self):\n pass", "def inc( self ):\n self.count += 1", "def add_item(self, item: str) -> None:\n try:\n current_max = max(self.stoi.values())\n self.stoi[item] = current_max + 1\n except ValueError:\n self.stoi[item] = 0", "def add_item(self, i, k):\n if k == self.K:\n self.K += 1\n self.m_N_numerators[k, :] = self.prior.k_0*self.prior.m_0\n self.S_N_partials[k, :] = self.prior.S_0 + self.prior.k_0*self._cached_prior_square_m_0\n self.m_N_numerators[k, :] += self.X[i]\n self.S_N_partials[k, :] += self._cached_square[i]\n self.counts[k] += 1\n self._update_log_prod_vars_and_inv_vars(k)\n self.assignments[i] = k", "def inc(self):\n self._value += 1", "def prepend(self, in_items):\n\n items = self.list\n in_items.extend(items)\n self.value = self.__class__.SEPARATOR.join(in_items)", "def inc(self, params):\n reg = params[0]\n if self.reg_dct[reg] == (2 ** 32) - 1:\n self.reg_dct[reg] = 0\n else:\n self.reg_dct[reg] += 1", "def apply_inverse_deal_with_increment_then_cut_iterated(\n commands, iters, idx, len_cards\n):\n deal, cut = commands\n assert isinstance(deal, DealWithIncrement)\n assert isinstance(cut, Cut)\n\n inverse = mod_inverse(deal.increment, len_cards)\n incr_raised = mod_power(inverse, iters, len_cards)\n frac = ((incr_raised - 1) * mod_inverse(inverse - 1, len_cards)) % len_cards\n return (idx * incr_raised + cut.value * inverse * frac) % len_cards", "def inc(self, key, delta=1):\n if self.has(key):\n _filter = {'_id': key}\n document = {'$inc': {'value': delta}}\n try:\n self.collection.update(_filter, document)\n except PyMongoError:\n return None\n else:\n self.add(key, delta)\n return self.get(key)", "def test_list_increment_with_missing_bin(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"index\": 2, \"val\": 20}]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(key, list)", "def inc(self):\n return self._inc", "def Incrpower(self, increment):\n self.power += increment", "def test_list_increment_with_missing_index(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"bin\": \"int_bin\", \"val\": 20}]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(key, list)", "def inc(self):\n \n self.count += 1", "def update_item_orders(begin_order, t_task, projects, api, cmd_count):\n for task in t_tasks.values():\n if is_in_the_same_proj(task, projects) and task['item_order'] >= begin_order:\n api.items.get_by_id(task['id']).update(item_order=task['item_order']+1)\n update_cmd_count(api)", "def topkAdd(self, key, *items):\n params = [key]\n params += items\n \n return self.execute_command(self.TOPK_ADD, *params)", "def increment_count(dictionary, key):\n if key:\n if key in dictionary:\n dictionary[key] += 1\n else:\n dictionary[key] = 1", "def test_list_increment_with_missing_value(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [\n {\n \"op\": aerospike.OP_LIST_INCREMENT,\n \"bin\": \"int_bin\",\n \"index\": 2,\n }\n ]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(key, list)", "def __iterate_updates(\n self, updates: List[ClientWorklistItemUpdate], inc: IncWorklistUpdateData\n ):\n if inc is None:\n return\n if inc.item_updates:\n updates += inc.item_updates\n else:\n return\n if inc.dropped:\n return\n\n # fetch next\n iwua: IncWorklistUpdateApi = self.__service_provider.get_service(IncWorklistUpdateApi)\n next_it: IncWorklistUpdateData = iwua.inc_wl_updt_get_next(inc.inc_upd_id)\n self.__iterate_updates(updates, next_it)", "def testIncrementDecrement(self):\n\n memcache.incr('unknown_key')\n assert memcache.get('unknown_key') == None\n memcache.set('counter', 0)\n assert memcache.get('counter') == 0\n memcache.incr('counter')\n assert memcache.get('counter') == 1\n memcache.incr('counter', delta=2)\n assert memcache.get('counter') == 3\n memcache.decr('counter')\n assert memcache.get('counter') == 2\n memcache.decr('counter', 2)\n assert memcache.get('counter') == 0\n memcache.incr('second_counter', initial_value=10)\n assert memcache.get('second_counter') == 11\n memcache.decr('third_counter', initial_value=10)\n assert memcache.get('third_counter') == 9\n\n # This should cause an error message, because zero deltas are not\n # allowed.\n memcache.incr('counter', delta=0)\n\n memcache.set('lcounter', long(20))\n assert memcache.get('lcounter') == long(20)\n memcache.incr('lcounter')\n assert memcache.get('lcounter') == long(21)", "def increment(self, column, value=1):\n self._updates += (\n UpdateQueryExpression(column, value, update_type=\"increment\"),\n )\n self.set_action(\"update\")\n return self", "def Incrbarrel(self, increment):\n self.barrel += increment", "async def incr(req):\n key, ttl, err = validate_params(req)\n if err is not None:\n return err\n\n counter = incr_with_ttl(key, ttl)\n return web.json_response(data={'status': 'success', 'counter': counter})", "def add_to_inventory(self, item):\n\t\tif item in self.inventory:\n\t\t\tself.inventory[item] += 1\n\t\telse:\n\t\t\tself.inventory[item] = 1", "def increase(self):\n self.counter[0] += 1\n\n for x in range(len(self.sequences) -1):\n if self.counter[x] == len(self.sequences[x]) + 1:\n self.counter[x] = 0\n self.counter[x+1] += 1", "def heap_increase_key(self, i, key):\n if key < self.heap[i].priority_key:\n print(\"The new key should be higher than the current priority_key \")\n else:\n self.heap[i].priority_key = key\n while i > 0 and self.heap[(i-1)//2].priority_key < self.heap[i].priority_key:\n self.heap[(i-1)//2], self.heap[i] = self.heap[i], self.heap[(i-1)//2]\n i = (i-1)//2", "def increment_many(collection: Collection, query, data):\n return collection.update_many(query, {'$inc': data}).modified_count", "def append_key(stanzas, left_struc, keypath=None):\n if keypath is None:\n keypath = []\n addition_key = len(left_struc)\n for stanza in stanzas:\n prior_key = stanza[0]\n if (len(stanza) > 1\n and len(prior_key) == len(keypath) + 1\n and prior_key[-1] >= addition_key):\n addition_key = prior_key[-1] + 1\n return addition_key", "def testBatchIncrement(self):\n\n memcache.set('low', 0)\n memcache.set('high', 100)\n\n memcache.offset_multi({'low': 1, 'high': -50})\n\n self.assertEqual(1, memcache.get('low'))\n self.assertEqual(50, memcache.get('high'))\n\n memcache.offset_multi({'low': 9, 'high': 0})\n\n self.assertEqual(10, memcache.get('low'))\n self.assertEqual(50, memcache.get('high'))\n\n memcache.offset_multi(\n {'max': 5, 'min': -5}, initial_value=10)\n\n self.assertEqual(15, memcache.get('max'))\n self.assertEqual(5, memcache.get('min'))", "def incr(self, x, term=1):\n self.d[x] = self.d.get(x, 0) + term", "def update(keys: List[str]):\n api = API()\n for key in keys:\n api.build(key)\n api.push(key)", "def inc(self, key):\n # 新增key,插入到numDict中,并放置在双向链表head->next\n if key not in self.numDict:\n insNode = Node(key, 1) # 初始化新增节点\n self.numDict[key] = insNode # 将这个节点放置到我们的字典当中\n insNode.next = self.head.next # 第一步:进行双向链表拼接 这一步是断开head和下一个节点的连接\n self.head.next.prev = insNode # 拼接第二步\n self.head.next = insNode # 第三步\n insNode.prev = self.head # 第四步\n else:\n # 存量key\n curNode = self.numDict[key]\n curNode.value += 1\n # 通过交换节点的方式保持双向链表有序\n while curNode.next != self.tail and curNode.value > curNode.next.value:\n prevNode = curNode.prev # 保存前一个节点\n nextnextNode = curNode.next.next # 保存curNode的next节点的next\n prevNode.next = curNode.next\n prevNode.next.prev = prevNode\n prevNode.next.next = curNode\n curNode.prev = prevNode.next\n curNode.next = nextnextNode\n nextnextNode.prev = curNode", "def add(self, key, values):\n self.watchlists[key] = list(enumerate(values))", "def insert(self, key, value):\n # Resize array here if necessary.\n if key < 0: key = 0\n elif key > len(self): key = len(self)\n if key < len(self):\n for j in range(len(self), key, -1):\n self._items[j] = self._items[j - 1]\n self._items[key] = value\n self._size += 1\n self.incModCount()", "def increment_count(count_dict, key):\n if key in count_dict:\n count_dict[key] += 1\n else:\n count_dict[key] = 1", "def inc(self, labels: dict[str, str]):\n\n val = self.get(labels)\n\n if val is None:\n val = 0\n\n val += 1\n\n self.set(labels, val)", "def _inc(self, val):\r\n assert(len(val) == self.sequence_length)\r\n return self.sequence_string[(self.sequence_string.index(val)+1) % len(self.sequence_string)]", "def heap_increase_key(self, A, i, key):\n if key < A[i]:\n raise Exception(\"New key must be greater than current key\")\n A[i] = key\n while i > 0 and A[self.parent(A, i)] < A[i]:\n A[i], A[self.parent(A, i)] = A[self.parent(A, i)], A[i]\n i = self.parent(A, i)", "def heap_increase_key(self, A, i, key):\n if key < A[i]:\n raise Exception(\"New key must be greater than current key\")\n A[i] = key\n while i > 0 and A[self.parent(A, i)] < A[i]:\n A[i], A[self.parent(A, i)] = A[self.parent(A, i)], A[i]\n i = self.parent(A, i)", "def smart_add(*args):\n result = 0\n for item in args:\n result += item\n\n return result", "def increment(x = 1):\n\n\tdef add(y):\n\t\treturn x + y\n\treturn add" ]
[ "0.5571012", "0.54712886", "0.5445248", "0.53391284", "0.53194755", "0.53172624", "0.5278319", "0.52269477", "0.52106184", "0.51903224", "0.5171463", "0.5151687", "0.5137558", "0.5133662", "0.5103444", "0.5102637", "0.5099352", "0.5090726", "0.5089509", "0.5078452", "0.5074509", "0.50743383", "0.50702477", "0.50593024", "0.5052929", "0.5044189", "0.49818596", "0.49762857", "0.49444923", "0.49391705", "0.49190956", "0.4901804", "0.48809323", "0.48539943", "0.48456785", "0.48382258", "0.48224607", "0.48149633", "0.48116907", "0.48116907", "0.48061946", "0.48058105", "0.48052484", "0.47805977", "0.47773966", "0.47685814", "0.4764074", "0.47638452", "0.47580522", "0.47499624", "0.4742594", "0.4741295", "0.47247937", "0.47247937", "0.47070527", "0.4684808", "0.46733654", "0.46658158", "0.46627197", "0.46305224", "0.46266583", "0.46187007", "0.45955816", "0.4587944", "0.45869598", "0.4579134", "0.45773762", "0.4577079", "0.45732498", "0.45554063", "0.455162", "0.454118", "0.4539822", "0.453724", "0.45336312", "0.4531097", "0.4527941", "0.4522992", "0.45176327", "0.45171338", "0.4506946", "0.44914633", "0.44701433", "0.44626945", "0.44617936", "0.44575647", "0.44524488", "0.44487125", "0.44423804", "0.4441798", "0.4433348", "0.44308954", "0.44307485", "0.4422904", "0.44138932", "0.4406103", "0.4403785", "0.4403785", "0.43962356", "0.43920097" ]
0.815581
0
Returns count for an ``item`` from ``key``. Multiple items can be queried with one call.
def cmsQuery(self, key, *items): params = [key] params += items return self.execute_command(self.CMS_QUERY, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cfCount(self, key, item):\n params = [key, item]\n\n return self.execute_command(self.CF_COUNT, *params)", "def count(self, item):\n if item in self: \n return self[item]\n else: \n return 0", "def topkCount(self, key, *items):\n params = [key]\n params += items\n\n return self.execute_command(self.TOPK_COUNT, *params)", "def count(self, item):\n return _(self._.count(item))", "def getKeyCount(self,\n key):\n if (self.hasKey(key) == 1):\n return self.__keyCount[key]\n else:\n return 0", "def get_count(name, key):\n total = 0\n query = CounterShard.all().filter('name = ', name).filter('reference_key = ', key)\n for counter in query:\n total += counter.count\n \n return total", "def count(self, item):\n # type: (Any) -> int\n return list.count(self, self.ref(item))", "def count(self, conn, key):\n return conn.llen(key)", "def count(item):\n return len(item)", "def count(self, item):\n # TODO: complete this function!\n if item not in self:\n return 0\n else:\n num_occur = 0\n if self._first == item:\n num_occur += 1\n num_occur += self._rest.count(item)\n return num_occur", "def get_item_count(self):\n resp = self.app.get('/items')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)", "def count(self, item: Any) -> int:\n curr = self._first\n count = 0\n\n while curr is not None:\n if curr.item == item:\n count += 1\n curr = curr.next\n\n return count", "def item_count(item_id, arg):\n global database\n table = database.Tables.items\n upd = table.update(None).where(table.c.id == item_id).values(count=table.c.count+(int(arg)))\n database.conn.execute(upd)", "def count_item(*, item : Any, list : Union[List[Any], ConduitVariable]) -> List[Any]:\n return list.count(item)", "def total(my_list, item):\n return my_list.count(item)", "def count_results(key):\n max_results = 1\n sleep(0.3)\n req = requests.get(f\"\"\"http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&retmode=xml&retmax={max_results}&sort=relevance&term={key}\"\"\")\n answer = BeautifulSoup(req.text, 'html.parser')\n result = int(answer.find_all(\"count\")[0].get_text())\n return(result)", "def count(self, key):\n self._metrics[key] += 1", "def size(self, key):\n with SessionContext(self.SessionClass) as session:\n q = session.query(PAW2_DBObject.numElems)\n return q.filter(PAW2_DBObject.key == key).one()[0]", "def getItemCount(self, ItemBase):\n Found = 0\n for CurrItem in self.List:\n if CurrItem.Base == ItemBase:\n Found = 1\n break\n\n if not Found: return 0\n else: return CurrItem.Count", "def get_count(self, cf_name, key, start='', finish='', keyspace_name=None):\n return self._Get_Count(\n cf_name=cf_name, key=key, start=start, finish=finish,\n keyspace_name=keyspace_name)", "def num_keys_command():\n # Get Args needed for the command\n incident = demisto.args().get('id', get_investigation_id())\n # Search Collection counting matching incident_id\n cursor = COLLECTION.find({})\n count = 0\n for i in cursor:\n if incident in i:\n count += 1\n return f'The count of the key/value pairs for the incident - {str(count)}', {}, {}", "def __incKeyCount(self,\n key):\n if (self.__keyCount.has_key(key) == 0): self.__keyCount[key] = 0\n self.__keyCount[key] = self.__keyCount[key] + 1\n return self.__keyCount[key]", "def count(self):\n return self.connection.llen(self.key)", "def count(self):\n return self.connection._llen(self.key)", "def get_count(self, entry):\n return entry.count", "def get_count(self, table_name, key, sharded=False):\n\n if sharded:\n counter_sum = 0\n counter = None\n counters = self._get_counters_from_indice(table_name, key)\n\n if counters:\n for counter in counters:\n\n counter_sum += int(counter.get(self.data_property, 0))\n return counter_sum\n else:\n key = self.sharded_key(key, 1) # only one shard\n counter = self.get_item(table_name, key)\n if counter:\n return counter.get(self.data_property, None)\n return None", "def count(self, index):\n if isinstance(index, list):\n index = ','.join(index)\n req = requests.get(\n urljoin(self.base_url, '{0}/_count'.format(index)),\n verify=self.verify_certs)\n return req.json()['count']", "def __call__(self, item):\n token, counts = item\n return token, sum(counts)", "def size(self, key):\n return len(self[key])", "def get(self, key):\n if key is None or key not in self.cache_data.keys():\n return\n self.count += 1\n self.key_tracker.update({key: self.count})\n return self.cache_data.get(key)", "def number_with_key(key):\n # good for checking proliferation of sort_key etc\n db = TinyDB(CARD_DATA_FILE)\n card_data = db.table('card_data')\n packs = card_data.all()\n total = 0\n with_key = 0\n for pack in packs:\n total += 1\n if key in pack:\n with_key += 1\n print('{} out of {} have sort keys'.format(with_key, total))", "def GetChildrenCount(self, item, recursively=True):\r\n\r\n return item.GetChildrenCount(recursively)", "async def count(self, **kw):\n\n pass", "def keycount(self, essid):\n with SessionContext(self.SessionClass) as session:\n q = session.query(PAW2_DBObject.key)\n q = q.join(PYR2_DBObject).join(ESSID_DBObject)\n q = q.filter(ESSID_DBObject.essid == essid)\n return q.count()", "def _increment_count(self, key):\n\n if not self._count.has_key(key):\n self._count[key] = 0\n\n self._count[key] += 1", "def _increment_count(self, key):\n\n if not self._count.has_key(key):\n self._count[key] = 0\n\n self._count[key] += 1", "def count(self, item: Any) -> int:\n # If this recursive list is empty\n if self.is_empty():\n return 0\n # If there is a first and a rest.\n else:\n # Check if the first is equal and add the count on the rest of the list.\n return int(self._first == item) + self._rest.count(item)", "def get_num_values(self, item):\n\tnum_values = 1\n\t\n\t# Valor mas antiguo de la linked list\n\t# Siempre tiene valor, si no, no tenemos la entrada en el hashset\n\tvalue = item[\"tail\"][\"next\"]\n \twhile long(value) != 0:\n\t num_values += 1\n\t value = value[\"next\"]\n\n\treturn num_values", "def count(self, **query):\n # This may be optimised into one query in the future.\n result = 0\n for product_type, count in self._do_count_by_product(query):\n result += count\n\n return result", "def count(self, query):", "def get_count(self, _filter=None):\n\t\treturn self.run(self._get_count_query(_filter))[0][0]", "def _index(self,key):\n index=0\n for item in self._item:\n if item.key==key:\n return index\n index+=1\n return -1", "def countitems(self):\n count = 0\n sid = self.client.scannerOpen(self.table, '', ['f:s'])\n while 1:\n r = self.client.scannerGetList(sid, 1000)\n #r = self.client.scannerGet(sid)\n if not r: break\n count += len(r)\n logging.debug('%d %s', count, r[-1].row)\n self.scannerClose(sid)\n return count", "def get_counts(filename, key):\r\n column_keys, get_data = get_csv(filename)\r\n assert(key in column_keys[1:])\r\n column = column_keys[1:].index(key)\r\n print 'getcounts() %s : %s column = %d' % (filename, key, column+1) \r\n counts_dict = {}\r\n for i,(k,v) in enumerate(get_data()):\r\n x = v[column]\r\n counts_dict[x] = counts_dict.get(x, 0) + 1\r\n return counts_dict", "def count(self, query=None):\n return self.create_search(query).count()", "def index(self, key):\n count = 0\n for k in self.__ordered_keys:\n if k.lower() == key.lower():\n return count\n count = count + 1\n raise KeyError(key)", "def count(listing):\n if 'meta' in listing and 'query_total' in listing['meta']:\n return listing['meta']['query_total']", "def __setKeyCount(self,\n key,\n count):\n self.__keyCount[key] = count\n return self.__keyCount[key]", "def count(self):\n return self._lift(\"count\")", "def __len__(self):\n return sum(item['qty'] for item in self.basket.values()) # counts all the values of the key qty", "def count(cls, client) :\n try :\n obj = nshttpprofile()\n option_ = options()\n option_.count = True\n response = obj.get_resources(client, option_)\n if response :\n return response[0].__dict__['___count']\n return 0\n except Exception as e :\n raise e", "def count_words(item):\n word, occurences = item\n return word, sum(occurences)", "def count(self, *args, **kwargs):\r\n with base.extract_request():\r\n kwargs['per_page'] = 1\r\n request = self.get(*args, **kwargs)\r\n\r\n return request, parse_count", "def count(self) -> int:\n if self._cached_items is not None:\n return len(self._cached_items)\n return self.items.count()", "def get_count(keyname, num_shards=NUM_SHARDS, value=1):\n if num_shards:\n total = 0\n for index in range(0, num_shards):\n shard_name = \"%s:%s\" % (str(keyname), str(index))\n count = kv.get(shard_name)\n if count:\n total += count\n else:\n total = kv.get(keyname)\n if total is None:\n total = value\n kv.set(keyname, total)\n return total", "def topkQuery(self, key, *items):\n params = [key]\n params += items\n \n return self.execute_command(self.TOPK_QUERY, *params)", "def _count(self):\n if self._count_valid:\n return self._total_results\n\n url = self._build_url(\"/_search\")\n request = self._build_request(0, -1)\n resp = self._cb.post_object(url, body=request)\n result = resp.json()\n\n self._total_results = result[\"num_found\"]\n self._count_valid = True\n\n return self._total_results", "def _count(self):\n if self._count_valid:\n return self._total_results\n\n url = self._build_url(\"/_search\")\n request = self._build_request(0, -1)\n resp = self._cb.post_object(url, body=request)\n result = resp.json()\n\n self._total_results = result[\"num_found\"]\n self._count_valid = True\n\n return self._total_results", "def count(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'count')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def frequency(item, the_list):\n list_length = len(the_list)\n # initialising counters\n i = 0\n item_count = 0\n\n # looping through every item in the list\n while i < list_length:\n # if the item being checked in the list equals the item being searched for, increment the count\n if the_list[i] == item:\n item_count = item_count + 1\n i = i + 1\n\n # printing the result\n print(str(item) + ' appears ' + str(item_count) + ' times')\n\n return item_count", "def count(self, jql):\n return len(self.jira_backend.search_issues(jql, maxResults=1000, fields='key'))", "def keycount(self, essid):\n return self.cli.essids.keycount(essid)", "def increment_count(dictionary, key):\n if key:\n if key in dictionary:\n dictionary[key] += 1\n else:\n dictionary[key] = 1", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def count_products(list_products):\n for each_item in ADD_PRODUCTS: #This iterates in the dictionary\n num_of_products = list_products.count(each_item) #This count each product\n if num_of_products > 0:\n price = ADD_PRODUCTS[each_item]\n print num_of_products, each_item + \"(s)\", \"a\", (\"Q%.2f c/u\") % price", "def item_count(self):\n return self.items.shape[0]", "def cart_distinct_item_count(request):\n return get_cart_items(request).count()", "def count(cls, client) :\n\t\ttry :\n\t\t\tobj = bfdsession()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e", "def count(cls, **kwargs):\n kwargs.setdefault('params', {})\n kwargs['params'].update({'search_type': 'count'})\n res = cls.search(raw_result=True, **kwargs)\n return res['hits']['total']", "def getNumberOfKeys(self, attr, view) -> int:\n ...", "def get_count(self, asset=None):\n if asset is None or 'pc:count' not in asset.properties:\n return self.item.properties.get('pc:count')\n else:\n return asset.properties.get('pc:count')", "def do_count(self, args):\n args = shlex.split(args)\n if len(args) < 1:\n return\n _nb_objects = 0\n items = storage.all()\n for key in items:\n if items[key].__class__.__name__ == args[0]:\n _nb_objects += 1\n print(_nb_objects)", "def getCount(self, event):\n # Attempt 2: Still too slow\n count = 0\n \n for mEvent in self:\n if event.__st__(mEvent):\n count += 1\n \n return count\n \n # Attempt 1: Too slow\n #return reduce((lambda x, y: x+y),\n # map((lambda i: itemset <= i), self))", "def increment_count(count_dict, key):\n if key in count_dict:\n count_dict[key] += 1\n else:\n count_dict[key] = 1", "def getFreq(TDB,key):\n \"\"\" key is set of subitems to count frequancy of repeatation this key in the TDB \"\"\"\n freq = 0\n for items in TDB:\n exist = True\n for element in key:\n if element not in items:\n exist = False\n break\n if exist:\n freq+=1\n return freq", "def search(self, query):\n if query is None:\n return -1\n\n count = 0\n\n for field in [self.key, self.name] + self.aliases + self.lines:\n count += field.lower().count(query.lower())\n\n return count", "def qualify_key_s3(self, key='', bucket=None):\n \n count = 0\n prefix = key\n \n if bucket is None:\n bucket = self.AWS_S3_BUCKET\n\n paginator = self.get_s3_client().get_paginator('list_objects_v2')\n response_iterator = paginator.paginate( Bucket=bucket, Prefix=prefix, Delimiter='')\n\n for page in response_iterator:\n for obj in page['Contents']:\n count=count+1\n\n return count", "def count_filtered(cls, client, filter_) :\n try :\n obj = nshttpprofile()\n option_ = options()\n option_.count = True\n option_.filter = filter_\n response = obj.getfiltered(client, option_)\n if response :\n return response[0].__dict__['___count']\n return 0\n except Exception as e :\n raise e", "def count(self, resource):\n return len(self.all(resource))", "def counts(url, key='author.email'):\n LOG.info('Getting counts', url=url, key=key)\n authors = GitTool.commits(url, key, include_defaults=False, result_format='flat_list')\n counts = defaultdict(lambda: 0)\n for x in authors:\n if key == 'author.email':\n x = re.sub(r'@.+', '', x)\n counts[x] += 1\n\n return OrderedDict(reversed(sorted(counts.items(), key=lambda x: x[1])))", "def count(cls, client) :\n\t\ttry :\n\t\t\tobj = lbprofile()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e", "async def count(\n self,\n *,\n filter: Optional[Dict[str, Any]] = DEFAULT_FILTER,\n session: Optional[Any] = DEFAULT_SESSION,\n **kwargs: Any,\n ) -> int:\n return await self._database.count(\n self.name, filter=filter, session=session, **kwargs\n )", "def count(self) -> int:\n return pulumi.get(self, \"count\")", "def count(self, query):\n if len(query._where) == 0:\n url = '%s/%s' % (self.uri, query.table())\n else:\n url = '%s/%s/filter?%s' % (self.uri, query.table(), query.encode())\n data, resp = self.execute(method='HEAD', url=url)\n count = resp.getheader(\"X-Result-Count\")\n return int(count)", "def get_number_of_items(self):\n return len(self.__item_map)", "def count(self, query=None):\n return self.__db.count(query)", "def getCount(self):\n return self.count", "def count(self):\n return {'count': self.collection.count()}", "def get_size(self, key):\n try:\n return wait(self.proto.vsiz(key))\n except TyrantError:\n raise KeyError(key)", "def count_by_product(self, **query):\n return self._do_count_by_product(query)", "def _count(self):\n if self._count_valid:\n return self._total_results\n\n result = self._cb.get_object(self._doc_class.urlobject.format(self._cb.credentials.org_key))\n results = result.get(\"results\", [])\n\n self._total_results = len(results)\n self._count_valid = True\n\n return self._total_results", "def keycount(self, essid):\n if essid not in self.essids:\n raise KeyError(\"ESSID not in store.\")\n return len(self.essids[essid][1])", "def count(self, column, keys=None, **kwds_filter):\n isnull = self._pandas.isnull\n mapper = lambda value: 1 if (value and not isnull(value)) else 0\n reducer = lambda x, y: x + y\n return self.mapreduce(mapper, reducer, column, keys, **kwds_filter)", "def get_num_items(self):\r\n return self.num_items", "def score(item, fd, key):\n return fd.get(key(item), 0)", "def count_items(self):\n count = 0\n for o in self.order_lst:\n count += o.count()\n \n return count", "def getNumberOfKeys(self) -> int:\n ...", "def _get_count(results):\n return len(results)", "def _items_count(self, queryset: QuerySet) -> int:\n try:\n # forcing to find queryset.count instead of list.count:\n return queryset.all().count()\n except AttributeError:\n return len(queryset)", "def count(self):\n return self.get_count()", "def query_count(query, params=None):\n count_query = 'SELECT COUNT(*) FROM (' + query + ') AS a;'\n response = database.get_engine().execute(count_query, params)\n count = response.fetchone()\n response.close()\n return count[0]" ]
[ "0.7717446", "0.72450763", "0.72318083", "0.68318814", "0.6799004", "0.6794261", "0.66601163", "0.65616006", "0.6446357", "0.64149535", "0.63869673", "0.63530505", "0.6233722", "0.62298226", "0.5982605", "0.597717", "0.59531206", "0.5869809", "0.5767538", "0.57387173", "0.5700375", "0.56802946", "0.5672119", "0.56582934", "0.5650989", "0.55886775", "0.55646205", "0.55503017", "0.55345154", "0.5529054", "0.55281717", "0.55110496", "0.5497812", "0.54857665", "0.543637", "0.543637", "0.54284537", "0.5422486", "0.5413469", "0.54106075", "0.54050404", "0.54037386", "0.5387728", "0.5383928", "0.53827", "0.53822833", "0.5378128", "0.5372914", "0.53659916", "0.53491724", "0.53345686", "0.5319846", "0.5310234", "0.5305737", "0.5280677", "0.5268853", "0.52670956", "0.52670956", "0.5266706", "0.52551055", "0.5238652", "0.52361095", "0.5234542", "0.5225774", "0.5222086", "0.5221286", "0.5215756", "0.52108175", "0.5205102", "0.5202518", "0.5199332", "0.5196392", "0.51795083", "0.51784265", "0.5168635", "0.51638556", "0.51533514", "0.5149267", "0.5146981", "0.51466256", "0.51380724", "0.51364726", "0.51318794", "0.5131325", "0.5122223", "0.5117717", "0.51167434", "0.51148635", "0.5105066", "0.50925857", "0.5092089", "0.5088518", "0.5082756", "0.5079654", "0.5058483", "0.5057704", "0.50460917", "0.50416964", "0.5041128", "0.5027766", "0.50232834" ]
0.0
-1
Merges ``numKeys`` of sketches into ``destKey``. Sketches specified in ``srcKeys``. All sketches must have identical width and depth. ``Weights`` can be used to multiply certain sketches. Default weight is 1. Both ``srcKeys`` and ``weights`` are lists.
def cmsMerge(self, destKey, numKeys, srcKeys, weights=[]): params = [destKey, numKeys] params += srcKeys self.appendWeights(params, weights) return self.execute_command(self.CMS_MERGE, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copySkinWeights(*args, destinationSkin: Union[AnyStr, bool]=\"\", influenceAssociation:\n Union[AnyStr, List[AnyStr], bool]=\"\", mirrorInverse: bool=True, mirrorMode:\n Union[AnyStr, bool]=\"\", noBlendWeight: bool=True, noMirror: bool=True,\n normalize: bool=True, sampleSpace: Union[int, bool]=0, smooth: bool=True,\n sourceSkin: Union[AnyStr, bool]=\"\", surfaceAssociation: Union[AnyStr,\n bool]=\"\", uvSpace: Union[List[AnyStr, AnyStr], bool]=None, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[None, Any]:\n pass", "def join_w(targs, srcs, ws):\n # convert targs/srcs to dicts if given as arrays\n if not isinstance(targs, dict):\n targs_ = copy(targs)\n targs = {\n cell_type: targs_ == cell_type for cell_type in set(targs_)\n }\n if not isinstance(srcs, dict):\n srcs_ = copy(srcs)\n srcs = {\n cell_type: srcs_ == cell_type for cell_type in set(srcs_)\n }\n \n # make sure all targ/src masks have same shape\n targ_shapes = [mask.shape for mask in targs.values()]\n src_shapes = [mask.shape for mask in srcs.values()]\n \n if len(set(targ_shapes)) > 1:\n raise Exception('All targ masks must have same shape.')\n \n if len(set(src_shapes)) > 1:\n raise Exception('All targ masks must have same shape.')\n \n n_targ = targ_shapes[0][0]\n n_src = src_shapes[0][0]\n \n # make sure weight matrix dimensions match sizes\n # of targ/src classes\n for syn, ws_ in ws.items():\n for (targ, src), w_ in ws_.items():\n if not w_.shape == (targs[targ].sum(), srcs[src].sum()):\n raise Exception(\n 'Weight matrix for {}: ({}, {}) does not match '\n 'dimensionality specified by targ/src masks.')\n \n # loop through synapse types\n dtype = list(list(ws.values())[0].values())[0].dtype\n ws_full = {}\n \n for syn, ws_ in ws.items():\n \n w = np.zeros((n_targ, n_src), dtype=dtype)\n \n # loop through population pairs\n for (targ, src), w_ in ws_.items():\n \n # get mask of all cxns from src to targ\n mask = np.outer(targs[targ], srcs[src])\n \n assert mask.sum() == w_.size\n \n w[mask] = w_.flatten()\n \n ws_full[syn] = w\n \n return ws_full", "def transfer_weights(src_model, dest_model):\r\n # ingore the first layer Input()\r\n # layer 1-24 to 1-24\r\n for i in range(1, 24):\r\n dest_model.layers[i].set_weights(src_model.layers[i].get_weights())\r\n print(\"Partially load weights from layer 1-24 successfully!\")\r\n\r\n # layer 25-45 to 65-85\r\n for i in range(25, 45):\r\n dest_model.layers[i+40].set_weights(src_model.layers[i].get_weights())\r\n print(\"Partially load weights from layer 25-45 successfully!\")\r\n\r\n # layer 46-65 to 126-145\r\n for i in range(46, 65):\r\n dest_model.layers[i+80].set_weights(src_model.layers[i].get_weights())\r\n print(\"Partially load weights from layer 46-65 successfully!\")\r\n\r\n # 69 to 189\r\n dest_model.layers[69+120].set_weights(src_model.layers[69].get_weights())\r\n print(\"Partially load weights from layer 69 successfully!\")", "def load_embeddings(self, weight, words,\n target_embeddings='src', verbose=False):\n if isinstance(weight, np.ndarray):\n weight = torch.from_numpy(weight)\n assert weight.size(1) == self.emb_dim, \\\n \"Mismatched embedding dim %d for model with dim %d\" % \\\n (weight.size(1), self.emb_dim)\n target_words = {word: idx for idx, word in enumerate(words)}\n for idx, word in enumerate(self.src_dict.vocab):\n if word not in target_words:\n if verbose:\n logging.warn(\"Couldn't find word [%s]\" % word)\n continue\n if target_embeddings == 'src':\n self.src_embeddings.weight.data[idx, :].copy_(\n weight[target_words[word], :])\n elif target_embeddings == 'trg':\n self.trg_embeddings.weight.data[idx, :].copy_(\n weight[target_words[word], :])\n else:\n raise ValueError('target_embeddings must be `src` or `trg`')", "def weightKmers(self, weightDict):\n for k, w in weightDict.iteritems():\n assert k in self.kmers\n self.G.edge[k + \"_L\"][k + \"_R\"]['weight'] = w", "def add_edge(self, src_key, dest_key, weight=1):\n self.vertices[src_key].add_neighbour(self.vertices[dest_key], weight)", "def copyDeformerWeights(*args, destinationDeformer: Union[AnyStr, bool]=\"\", destinationShape:\n Union[AnyStr, bool]=\"\", mirrorInverse: bool=True, mirrorMode:\n Union[AnyStr, bool]=\"\", noMirror: bool=True, smooth: bool=True,\n sourceDeformer: Union[AnyStr, bool]=\"\", sourceShape: Union[AnyStr,\n bool]=\"\", surfaceAssociation: Union[AnyStr, bool]=\"\", uvSpace:\n Union[List[AnyStr, AnyStr], bool]=None, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[None, Any]:\n pass", "def copy_skin_weights(source_skin, target_skin):\n\n # gets the shape back from the source_skin and target_skin\n # need to do this as providing the sourceSkin and destinationSkin arguments\n # to the copySkinWeights command does not update correctly the shapes\n\n source_shape = cmds.ls(cmds.listHistory(\"{}.outputGeometry\".format(\n source_skin), pdo=False, future=True), dag=True,\n noIntermediate=True)\n target_shape = cmds.ls(cmds.listHistory(\n \"{}.outputGeometry\".format(target_skin),\n pdo=False, future=True), dag=True,\n noIntermediate=True)\n\n # checks if source and target shapes list are bigger than 1\n if len(source_shape) > 1:\n source_shape = source_shape[0]\n if len(target_shape) > 1:\n target_shape = target_shape[0]\n\n cmds.select(source_shape, target_shape)\n\n # copy skin command\n cmds.copySkinWeights(surfaceAssociation=\"closestPoint\", noMirror=True,\n influenceAssociation=(\"label\",\n \"closestJoint\",\n \"oneToOne\"))\n\n # forces refresh\n cmds.refresh()", "def hard_copy_weights(self, target, source):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(param.data)", "def add_images_weighted(input1:Image, input2:Image, output :Image = None, weight1:float=1, weight2:float=1):\n\n parameters = {\n \"src\":input1,\n \"src1\":input2,\n \"dst\":output,\n \"factor\":float(weight1),\n \"factor1\":float(weight2)\n };\n\n execute(__file__, 'add_images_weighted_' + str(len(output.shape)) + 'd_x.cl', 'add_images_weighted_' + str(len(output.shape)) + 'd', output.shape, parameters);\n\n return output", "def merge_images(sources, targets, opts, k=10):\n _, _, h, w = sources.shape\n row = int(np.sqrt(opts.batch_size))\n merged = np.zeros([3, row*h, row*w*2])\n for idx, (s, t) in enumerate(zip(sources, targets)):\n i = idx // row\n j = idx % row\n merged[:, i*h:(i+1)*h, (j*2)*h:(j*2+1)*h] = s\n merged[:, i*h:(i+1)*h, (j*2+1)*h:(j*2+2)*h] = t\n return merged.transpose(1, 2, 0)", "def apply_weights(src, dest_shape, n_s, n_b, row, col, s):\n\n dest = np.ndarray(dest_shape).flatten()\n dest[:] = 0.0\n src = src.flatten()\n\n for i in range(n_s):\n dest[row[i]-1] = dest[row[i]-1] + s[i]*src[col[i]-1]\n\n return dest.reshape(dest_shape)", "def distribute_by_weights(path: Tensor, nimages: int, path_target: Tensor = None, weights: Tensor = None, climbing_pivots: list = None):\n # Ensure storage for coordinates\n if path_target is None:\n path_target = path.new(nimages, path.shape[1])\n else:\n assert path_target is not path, \"Source must be unequal to target for redistribution\"\n assert path_target.shape[0] == nimages\n # Ensure weights\n if weights is None:\n weights = path.new(nimages - 1).fill_(1)\n else:\n assert len(weights.shape) == 1\n assert weights.shape[0] == nimages - 1\n\n # In climbing mode, reinterpolate only between the climbing images\n if climbing_pivots is not None:\n assert path.shape[0] == nimages, \"Cannot change number of items when reinterpolating with respect to climbing images.\"\n assert len(climbing_pivots) == nimages\n assert all(isinstance(b, bool) for b in climbing_pivots), \"Image must be climbing or not.\"\n start = 0\n for i, is_climbing in enumerate(climbing_pivots):\n if is_climbing or i == nimages - 1:\n distribute_by_weights(path[start:i + 1], i + 1 - start, path_target[start:i + 1], weights[start:i])\n start = i\n return path_target\n\n if path is path_target:\n # For the computation the original path is necessary\n path_source = path.clone()\n else:\n path_source = path\n\n # The current distances between elements on chain\n current_distances = (path_source[:-1] - path_source[1:]).norm(2, 1)\n target_positions = (weights / weights.sum()).cumsum(0) * current_distances.sum() # Target positions of elements (spaced by weights)\n\n # Put each new item spaced by weights (measured along line) on the line\n last_idx = 0 # Index of previous pivot\n pos_prev = 0. # Position of previous pivot on chain\n pos_next = current_distances[last_idx].item() # Position of next pivot on chain\n path_target[0] = path_source[0]\n for i in range(1, nimages - 1):\n position = target_positions[i - 1].item()\n while position > pos_next:\n last_idx += 1\n pos_prev = pos_next\n pos_next += current_distances[last_idx].item()\n\n t = (position - pos_prev) / (pos_next - pos_prev)\n path_target[i] = (t * path_source[last_idx + 1] + (1 - t) * path_source[last_idx])\n path_target[nimages - 1] = path_source[-1]\n\n return path_target", "def linear_interpolation_keys(self, keys):\n if len(keys) != len(self.dims):\n raise ValueError(\"Number of keys must be equal to the number of\" +\n \" dimensions. (Got \" + str(len(keys)) + \"/\"\n + str(len(self.dims)) + \")\")\n \n weightedKeys = []\n for key, dim in zip(keys, self.dims):\n weightedKeys.append(dim.linear_interpolation_indexes(key))\n \n while len(weightedKeys) > 1:\n newKeys = []\n for key1 in weightedKeys[-2]:\n for key2 in weightedKeys[-1]:\n newKeys.append({'key':key1['key'] + key2['key'],\n 'weight':key1['weight']*key2['weight']})\n weightedKeys.pop(-1)\n weightedKeys[-1] = newKeys\n\n return weightedKeys[0]", "def set_weights(self, weights):\n tuples = []\n for layer in self.layers:\n num_param = len(layer.weights)\n layer_weights = weights[:num_param]\n for sw, w in zip(layer.weights, layer_weights):\n tuples.append((sw, w))\n weights = weights[num_param:]\n K.batch_set_value(tuples)", "def combine_cache_keys(cache_keys):\r\n if len(cache_keys) == 1:\r\n return cache_keys[0]\r\n else:\r\n combined_id = Target.maybe_readable_combine_ids(cache_key.id for cache_key in cache_keys)\r\n combined_hash = hash_all(sorted(cache_key.hash for cache_key in cache_keys))\r\n combined_num_sources = sum(cache_key.num_sources for cache_key in cache_keys)\r\n combined_sources = \\\r\n sorted(list(itertools.chain(*[cache_key.sources for cache_key in cache_keys])))\r\n return CacheKey(combined_id, combined_hash, combined_num_sources, combined_sources)", "def get_weights(self, nn_weights, rov_id): # Get weights from CCEA population\n\n for w in range(self.n_weights):\n self.weights[rov_id, w] = nn_weights[w]", "def update(self, x_dict, y_dict, weight):\n assert len(x_dict) == len(y_dict), \"invalid # of qids\"\n \n qids = self.__get_shuffled_qids(x_dict, y_dict, weight.epoch)\n w = weight.get_dense_weight()\n for qid in tqdm(qids):\n w = approx_ap(x_dict[qid].toarray(), y_dict[qid], w, self.eta, self.alpha, self.beta)\n weight.set_weight(sp.csr_matrix(w.reshape((1, weight.dims))))\n weight.epoch += 1", "def load_weights(self, weights):\n\n i = 0\n for l in range(1, self.num_layers()):\n for n in range(self.get_layer(l).num_nodes):\n for w in range(len(self.get_node_with_layer(l, n).weights)):\n self.get_node_with_layer(l, n).weights[w] = weights[i]\n i += 1", "def set_weight(self, dest, weight):\n self.points_to[dest] = weight", "def write_weights_images(self):\n for weight_name, weight in self._weights.items():\n self._write_weight_image_to_tensorboard(\n name=f\"{self._Sections.WEIGHTS}/{weight_name}\",\n weight=weight,\n step=self._epochs,\n )", "def tie_weights(self):\n if hasattr(self, \"get_output_embeddings\") and hasattr(\n self, \"get_input_embeddings\"):\n output_embeddings = self.get_output_embeddings()\n if output_embeddings is not None:\n self._tie_or_clone_weights(output_embeddings,\n self.get_input_embeddings())", "def weight_paste(pixSrc, pixPng, src_id, logo_id):\n weight = pixPng[:, :, 3] / 255\n weight = weight[:, :, np.newaxis]\n alpha = weight[logo_id]\n beta = 1 - alpha\n pixSrc[src_id] = pixSrc[src_id] * beta + pixPng[logo_id] * alpha\n return pixSrc", "def weights(self, weights):\n\n self._weights = weights", "def set_weights(self, weights: Dict[PolicyID, dict]):\n self.workers.local_worker().set_weights(weights)", "def copy_keys(source: str, destination: str) -> None:\n try:\n keys = [filename for filename in os.listdir(source) if filename.lower().endswith(\".bikey\")]\n except FileNotFoundError:\n logging.debug(f\"Error when searching for *.bikey files to copy at {source}\", exc_info=True)\n keys = []\n\n if len(keys) == 0:\n logging.warning(f\"No *.bikey files found in {source}\")\n return\n\n os.makedirs(destination, exist_ok=True)\n\n for key in keys:\n shutil.copy2(os.path.join(source, key), destination)", "def update_weights(self, exp_ids, raw_weights):\n assert len(set(exp_ids)) == len(exp_ids),\\\n \"Invalid Argument: must pass a unique set of experience ids.\"\n\n new_weights = (raw_weights + self.weight_offset) ** self.alpha\n\n # Update the weights used for sampling each of the experiences.\n for idx, weight in zip(exp_ids, new_weights):\n self.experiences.update_weight(idx, weight)\n\n # Update beta which is used to weight the importance sampling.\n if self.beta < self.beta_f:\n self.beta = min(self.beta_f, self.beta + self.beta_update)", "def copy_conv_weights_from(self, source: \"Encoder\") -> None:\n pass", "def add_keys(destdict, srclist, value=None):\n if len(srclist) > 1:\n destdict[srclist[0]] = {}\n destdict[srclist[0]] = destdict.get(srclist[0], {})\n add_keys(destdict[srclist[0]], srclist[1:], value)\n else:\n destdict[srclist[0]] = value\n return destdict", "def prepare_weights(self, pre_exist_words, hs, negative, wv, sentences,\n nonce, update=False, replication=False,\n sum_over_set=False, weighted=False, beta=1000):\n # set initial input/projection and hidden weights\n if not update:\n raise Exception('prepare_weight on Nonce2VecTrainables should '\n 'always be used with update=True')\n else:\n self.update_weights(pre_exist_words, hs, negative, wv, sentences,\n nonce, replication, sum_over_set, weighted,\n beta)", "def add_dict(dest, src):\n for key in src.keys():\n if key in dest.keys():\n dest[key] += src[key]\n else:\n dest[key] = src[key]", "def remap(src_data, weights, dest_shape):\n\n dest_data = np.ndarray(dest_shape)\n\n with nc.Dataset(weights) as wf:\n try:\n n_s = wf.dimensions['n_s'].size\n except KeyError as e:\n n_s = wf.dimensions['num_links'].size\n try:\n n_b = wf.dimensions['n_b'].size\n except KeyError as e:\n n_b = wf.dimensions['dst_grid_size'].size\n try:\n row = wf.variables['row'][:]\n except KeyError as e:\n row = wf.variables['dst_address'][:]\n try:\n col = wf.variables['col'][:]\n except KeyError as e:\n col = wf.variables['src_address'][:]\n\n s = wf.variables['S'][:]\n\n dest_data[:, :] = apply_weights(src_data[:, :], dest_data.shape,\n n_s, n_b, row, col, s)\n return dest_data", "def _set_weights(self, weights):\r\n self.weights = weights.reshape(self.output_size, self.input_size+1)", "def generate_weights(sizes):\n weights = {}\n weights[\"w\"] = []\n weights[\"b\"] = []\n for i in range(len(sizes)-2):\n weights[\"w\"].append(np.random.randn(sizes[i], sizes[i+1]))\n weights[\"b\"].append(np.random.randn(sizes[i+1]))\n weights[\"w_final\"] = np.random.randn(sizes[-2], sizes[-1])/np.sqrt(sizes[-1])\n weights[\"b_final\"] = np.random.randn(sizes[-1])\n return weights", "def match_ckpt_weights_to_model(\n model: tf.keras.Model,\n names_to_keys: Mapping[str, str],\n keys_to_weights: Mapping[str, Any]) -> List[Any]:\n init_weight_list = []\n\n for weight in model.weights:\n # Look up weight name in checkpoint weight names.\n weight_name = weight.name.replace(':0', '')\n ckpt_key = names_to_keys.get(weight_name, None)\n\n if ckpt_key:\n init_weight = keys_to_weights[ckpt_key]\n else:\n logging.info(\n '\"%s\" not found in checkpoint. '\n 'Using randomly initialized values.', weight_name)\n init_weight = weight.numpy()\n\n init_weight_list.append(init_weight)\n\n return init_weight_list", "def init_embedding_weights(self, dictionary, embeddings_index, embedding_dim):\r\n pretrained_weight = np.empty([len(dictionary), embedding_dim], dtype=float)\r\n for i in range(len(dictionary)):\r\n if dictionary.idx2word[i] in embeddings_index:\r\n pretrained_weight[i] = embeddings_index[dictionary.idx2word[i]]\r\n else:\r\n pretrained_weight[i] = helper.initialize_out_of_vocab_words(embedding_dim)\r\n # pretrained_weight is a numpy matrix of shape (num_embeddings, embedding_dim)\r\n if isinstance(self.embedding, nn.Sequential):\r\n self.embedding[0].weight.data.copy_(torch.from_numpy(pretrained_weight))\r\n else:\r\n self.embedding.weight.data.copy_(torch.from_numpy(pretrained_weight))", "def _accumulate_props(dest_props, src_props):\n props_size = 0\n if src_props:\n for k, v in src_props.items():\n if v is not None:\n props_size += len(k) + _sizeof(v)\n dest_props[k] = v\n return props_size", "def add(self, keys: List[Tuple[int, int]], vectors: np.ndarray, weights: List[float], *args, **kwargs):\n pass", "def combine_dicts_multiplicatively(dict1, dict2):\n weights = {}\n\n for key1, weight1 in dict1.items():\n for key2, weight2 in dict2.items():\n key = (key1, key2)\n weight = weight1 * weight2\n weights[key] = weight\n \n return weights", "def sinterstore(self, dest, keys, *args):\r\n keys = list_or_args('sinterstore', keys, args)\r\n return self.format_inline('SINTERSTORE', dest, *keys)", "def get_batch_with_weights(self, batch_size):\n n, _ = self.contexts.shape\n if self.buffer_s == -1:\n # use all the data\n ind = np.random.choice(range(n), batch_size)\n else:\n # use only buffer (last buffer_s obs)\n ind = np.random.choice(range(max(0, n - self.buffer_s), n), batch_size)\n\n weights = np.zeros((batch_size, self.num_actions))\n sampled_actions = np.array(self.actions)[ind]\n a_ind = np.array([(i, val) for i, val in enumerate(sampled_actions)])\n weights[a_ind[:, 0], a_ind[:, 1]] = 1.0\n return self.contexts[ind, :], self.rewards[ind, :], weights", "def set_weights(self, weights):\r\n self.weights = weights", "def applySparseFilter(kerns, kshp, nkern, images, imgshp,\r\n step=(1, 1), bias=None, mode='valid'):\r\n\r\n # inshp contains either 2 entries (height,width) or 3 (nfeatures,h,w)\r\n # in the first case, default nfeatures to 1\r\n if numpy.size(imgshp) == 2:\r\n imgshp = (1,) + imgshp\r\n\r\n # construct indices and index pointers for sparse matrix\r\n indices, indptr, spmat_shape, sptype, outshp, kmap = \\\r\n convolution_indices.sparse_eval(imgshp, kshp, nkern, step, mode)\r\n\r\n # build a sparse weight matrix\r\n sparsew = theano.sparse.CSM(sptype, kmap)(kerns, indices,\r\n indptr, spmat_shape)\r\n output = sparse.structured_dot(sparsew, images.T).T\r\n if bias is not None:\r\n output += bias\r\n\r\n return output, numpy.hstack((nkern, outshp))", "def merge_working_sets(self, other):\n\n for dist in other.by_key.values(): self.add(dist)\n return self", "def update_weights(self):\n self._weights = self._weights + self.update_weights_value\n self.weights_clipping()", "def set_weights(self, weights):\n params = self.weights\n if len(params) != len(weights):\n raise ValueError('You called `set_weights(weights)` on layer \"' +\n self.name + '\" with a weight list of length ' +\n str(len(weights)) + ', but the layer was expecting ' +\n str(len(params)) + ' weights. Provided weights: ' +\n str(weights)[:50] + '...')\n if not params:\n return\n weight_value_tuples = []\n param_values = K.batch_get_value(params)\n for pv, p, w in zip(param_values, params, weights):\n if pv.shape != w.shape:\n raise ValueError('Layer weight shape ' + str(pv.shape) +\n ' not compatible with '\n 'provided weight shape ' + str(w.shape))\n weight_value_tuples.append((p, w))\n K.batch_set_value(weight_value_tuples)", "def merge_images(sources, targets, batch_size=16):\n _, _, h, w = sources.shape\n row = int(np.sqrt(batch_size))\n merged = np.zeros([3, row*h, row*w*2])\n for idx, (s, t) in enumerate(zip(sources, targets)):\n i = idx // row\n j = idx % row\n merged[:, i*h:(i+1)*h, (j*2)*h:(j*2+1)*h] = s\n merged[:, i*h:(i+1)*h, (j*2+1)*h:(j*2+2)*h] = t\n merged = merged.transpose(1, 2, 0)\n return merged", "def _construct_src_node_feat(\n self, k_dict: Dict[str, Tensor], v_dict: Dict[str, Tensor],\n edge_index_dict: Dict[EdgeType, Adj]\n ) -> Tuple[Tensor, Tensor, Dict[EdgeType, int]]:\n cumsum = 0\n num_edge_types = len(self.edge_types)\n H, D = self.heads, self.out_channels // self.heads\n\n # Flatten into a single tensor with shape [num_edge_types * heads, D]:\n ks: List[Tensor] = []\n vs: List[Tensor] = []\n type_list: List[Tensor] = []\n offset: Dict[EdgeType] = {}\n for edge_type in edge_index_dict.keys():\n src = edge_type[0]\n N = k_dict[src].size(0)\n offset[edge_type] = cumsum\n cumsum += N\n\n # construct type_vec for curr edge_type with shape [H, D]\n edge_type_offset = self.edge_types_map[edge_type]\n type_vec = torch.arange(H, dtype=torch.long).view(-1, 1).repeat(\n 1, N) * num_edge_types + edge_type_offset\n\n type_list.append(type_vec)\n ks.append(k_dict[src])\n vs.append(v_dict[src])\n\n ks = torch.cat(ks, dim=0).transpose(0, 1).reshape(-1, D)\n vs = torch.cat(vs, dim=0).transpose(0, 1).reshape(-1, D)\n type_vec = torch.cat(type_list, dim=1).flatten()\n\n k = self.k_rel(ks, type_vec).view(H, -1, D).transpose(0, 1)\n v = self.v_rel(vs, type_vec).view(H, -1, D).transpose(0, 1)\n\n return k, v, offset", "def copy_weights(copy_from: nn.Module, copy_to: nn.Module, polyak=None):\n if polyak is not None:\n for target_param, param in zip(copy_to.parameters(), copy_from.parameters()):\n target_param.data.copy_(polyak * param + (1 - polyak) * target_param)\n else:\n copy_to.load_state_dict(copy_from.state_dict())", "def add(self, destination: n, weight: w):\n self.connections[destination] = weight", "def _combine_embeddings_for_input(\n self, embedding_dict: Dict[str, int]) -> tf.Tensor:\n if self._config.embedding_combination_method == (\n types.EmbeddingCombinationMethod.SUM_ALL):\n return sum(embedding_dict.values())\n elif self._config.embedding_combination_method == (\n types.EmbeddingCombinationMethod.CONCATENATE):\n return tf.concat(list(embedding_dict.values()), axis=-1)\n elif self._config.embedding_combination_method == (\n types.EmbeddingCombinationMethod.SUM_BY_SUFFIX):\n feature_suffixes = [\n get_feature_suffix(feat) for feat in embedding_dict.keys()\n ]\n combined_embedding = None\n for suffix in feature_suffixes:\n embeddings_to_sum = []\n for feat, emb in embedding_dict.items():\n feat_suffix = get_feature_suffix(feat)\n if feat not in self._config.identity_lookup_features and (feat_suffix\n == suffix):\n embeddings_to_sum.append(emb)\n if combined_embedding is None:\n combined_embedding = [sum(embeddings_to_sum)]\n else:\n combined_embedding += [sum(embeddings_to_sum)]\n combined_embedding += [\n embedding_dict[feat] for feat in self._config.identity_lookup_features\n ]\n return tf.concat(combined_embedding, axis=1)\n elif self._config.embedding_combination_method == (\n types.EmbeddingCombinationMethod.COMBINE_SNR_OUT):\n return deep_encoders.compute_combined_snr_embedding(\n embedding_dict=embedding_dict)\n else:\n raise ValueError(\"Embedding combination method \"\n f\"{self._config.embedding_combination_method} \"\n \"not recognized.\")", "def _merge_sources(dest: Dict[str, Any], source: ConfigSource) -> Dict[str, Any]:\n for key, val in source.items():\n if isinstance(val, dict):\n if key in dest:\n dest[key] = _merge_sources(dest[key], val)\n else:\n dest[key] = val.copy()\n else:\n dest[key] = val\n return dest", "def optimize(\n dsk,\n keys,\n fuse_keys=None,\n fast_functions=None,\n inline_functions_fast_functions=(getter_inline,),\n rename_fused_keys=True,\n **kwargs,\n):\n if not isinstance(keys, (list, set)):\n keys = [keys]\n keys = list(flatten(keys))\n\n if not isinstance(dsk, HighLevelGraph):\n dsk = HighLevelGraph.from_collections(id(dsk), dsk, dependencies=())\n\n dsk = optimize_blockwise(dsk, keys=keys)\n dsk = fuse_roots(dsk, keys=keys)\n dsk = dsk.cull(set(keys))\n\n # Perform low-level fusion unless the user has\n # specified False explicitly.\n if config.get(\"optimization.fuse.active\") is False:\n return dsk\n\n dependencies = dsk.get_all_dependencies()\n dsk = ensure_dict(dsk)\n\n # Low level task optimizations\n if fast_functions is not None:\n inline_functions_fast_functions = fast_functions\n\n hold = hold_keys(dsk, dependencies)\n\n dsk, dependencies = fuse(\n dsk,\n hold + keys + (fuse_keys or []),\n dependencies,\n rename_keys=rename_fused_keys,\n )\n if inline_functions_fast_functions:\n dsk = inline_functions(\n dsk,\n keys,\n dependencies=dependencies,\n fast_functions=inline_functions_fast_functions,\n )\n\n return optimize_slices(dsk)", "def sample_weighted(num_samples, weight_dict):\n num_choices = len(weight_dict)\n choice_weights = np.asarray(list(weight_dict.values()), dtype = np.float)\n samples = np.random.choice(num_choices, num_samples, p=choice_weights)\n return one_hot(num_choices, samples)", "def transfer_weights(\n self, new_model, new_optimizer=None, optimizer=None, ignore_weights=None\n ):\n if type(self) is not type(new_model):\n raise ValueError(\n \"Transferring weights to another model type is not supported\"\n )\n if ignore_weights is None:\n ignore_weights = set()\n ignore_weights_ref = set(weight.ref() for weight in ignore_weights)\n weights = self.weights\n new_weights = new_model.weights\n for weight, new_weight in zip(weights, new_weights):\n if new_weight.ref() not in ignore_weights_ref:\n new_weight.assign(weight)\n if new_optimizer is not None and optimizer is not None:\n for slot_name in new_optimizer.get_slot_names():\n if slot_name not in optimizer.get_slot_names():\n continue\n new_slot = new_optimizer.get_slot(new_weight, slot_name)\n slot = optimizer.get_slot(weight, slot_name)\n new_slot.assign(slot)", "def repeat_weights(weights, shape):\n weights_extra = weights.unsqueeze(-1)\n return weights_extra.expand(\n weights.shape[0], shape[-1]).reshape(shape)", "def forward(self, query_images: Tensor, key_images: Tensor) -> Tuple[Tensor, Tensor]:\n q = self.encoder_q(query_images)\n if self.head_q is not None:\n q = self.head_q(q)\n q = nn.functional.normalize(q, dim=1)\n\n with torch.no_grad():\n # The keys are shuffled between the GPUs before encoding them, to avoid batch normalization leaking\n # information between the samples. This works only when using the DDP strategy.\n if isinstance(self.trainer.strategy, DDPStrategy):\n key_images, original_order = shuffle_batch(key_images)\n\n k = self.encoder_k(key_images)\n if self.head_k is not None:\n k = self.head_k(k)\n k = nn.functional.normalize(k, dim=1)\n\n if isinstance(self.trainer.strategy, DDPStrategy):\n k = sort_batch(k, original_order)\n\n return q, k", "def _tie_or_clone_weights(self, output_embeddings, input_embeddings):\n if output_embeddings.weight.shape == input_embeddings.weight.shape:\n output_embeddings.weight = input_embeddings.weight\n elif output_embeddings.weight.shape == input_embeddings.weight.t(\n ).shape:\n output_embeddings.weight.set_value(input_embeddings.weight.t())\n else:\n raise ValueError(\n \"when tie input/output embeddings, the shape of output embeddings: {}\"\n \"should be equal to shape of input embeddings: {}\"\n \"or should be equal to the shape of transpose input embeddings: {}\".\n format(output_embeddings.weight.shape, input_embeddings.weight.\n shape, input_embeddings.weight.t().shape))\n if getattr(output_embeddings, \"bias\", None) is not None:\n if output_embeddings.weight.shape[\n -1] != output_embeddings.bias.shape[0]:\n raise ValueError(\n \"the weight lase shape: {} of output_embeddings is not equal to the bias shape: {}\"\n \"please check output_embeddings configuration\".format(\n output_embeddings.weight.shape[\n -1], output_embeddings.bias.shape[0]))", "def update_weights(self):\n self._weights = self._weights + self.update_weights_value", "def setWeights(self, weights):\n self._call_java('setWeights', weights)", "def MergeValues(self, join_source, num_columns=1):\n assert len(self.rows) == len(join_source.rows)\n\n for r, row in enumerate(self.rows):\n self.rows[r] = row + join_source.rows[r][0:num_columns]\n\n return self", "def pair_weights(self, labels, ranks):\n raise NotImplementedError('Calling an abstract method.')", "def weight_images(im_dir, wt_dir, weight_dir, im_weight_dir, wt_weight_dir, imtype='intbgsub', wttype='rrhr'):\n im_suff, wt_suff = '*-{}.fits'.format(imtype), '*-{}.fits'.format(wttype)\n imfiles = sorted(glob.glob(os.path.join(im_dir, im_suff)))\n wtfiles = sorted(glob.glob(os.path.join(wt_dir, wt_suff))) \n\n # weight each image\n for i in range(len(imfiles)):\n # read in the data\n imfile = imfiles[i]\n wtfile = os.path.join(os.path.dirname(wtfiles[i]), os.path.basename(imfile).replace(imtype, wttype))\n im, hdr = astropy.io.fits.getdata(imfile, header=True)\n rrhr, rrhrhdr = astropy.io.fits.getdata(wtfile, header=True)\n\n # weight the data by the exposure time\n wt = rrhr\n newim = im * wt\n\n # write data to new files and copy the *_area.fits files created by Montage to have the same naming convention\n newfile = os.path.join(im_weight_dir, os.path.basename(imfile))\n astropy.io.fits.writeto(newfile, newim, hdr)\n old_area_file = imfile.replace('.fits', '_area.fits')\n if os.path.exists(old_area_file):\n new_area_file = newfile.replace('.fits', '_area.fits')\n shutil.copy(old_area_file, new_area_file)\n\n weightfile = os.path.join(wt_weight_dir, os.path.basename(wtfile))\n astropy.io.fits.writeto(weightfile, wt, rrhrhdr)\n old_area_file = wtfile.replace('.fits', '_area.fits')\n if os.path.exists(old_area_file):\n new_area_file = weightfile.replace('.fits', '_area.fits')\n shutil.copy(old_area_file, new_area_file)", "def setWeights(self, w):\n raise NotImplementedError", "def load_weigths_into_target_network(self):\n logging.debug(\"Transfer Weight!\")\n self.network.save_weights(self._save_path)\n self.target_network.load_weights(self._save_path)", "def build_from_paths(\n input_source: str,\n input_target: str,\n single_vocab: bool = False,\n num_words_source: int = 50000,\n num_words_target: int = 50000,\n min_count_source: int = 1,\n min_count_target: int = 1,\n) -> (Dict[str, int], Dict[str, int]):\n with ExitStack() as stack:\n logger.info(\"Building vocabulary from dataset: %s and %s\", input_source, input_target)\n files = (stack.enter_context(smart_open(path)) for path in [input_source, input_target])\n return build_vocab(\n *files,\n single_vocab=single_vocab,\n num_words_source=num_words_source,\n num_words_target=num_words_target,\n min_count_source=min_count_source,\n min_count_target=min_count_target\n )", "def minibatcher(inputs, targets, batchsize, shuffle=False):", "def read_weights(F_DIST):\n global F_DIST_w1, w, num_features, N_IDEN_PROB, MORE_THAN_1_W\n\n # read weights file first line, get json information into a dict\n with open(WEIGHTS_FNAME, \"r\") as f:\n WEIGHTS_FDATA = json.loads(f.readline())\n # has keys LAYERS, J, NUM_FEATURES\n\n # get and verify data from the weights file\n N_IDEN_PROB = WEIGHTS_FDATA['LAYERS']\n MORE_THAN_1_W = (N_IDEN_PROB > 2) # more than 2 layers === more than 1 weight\n if J > WEIGHTS_FDATA['J']:\n raise RuntimeError(\"J in weights file is less than J provided to the \"\n \"script; former >= latter\")\n if WEIGHTS_FDATA['NUM_FEATURES'] != num_features:\n raise RuntimeError(\"num_features in weights file is not the same as \"\n \"num_features provided to this script\")\n\n # read weights file\n # no. of weights is 1 less than no. of layers in weights file\n list_of_w = ad.read_weights_file(\n WEIGHTS_FNAME, N_IDEN_PROB-1, WEIGHTS_FDATA['J'], J, num_features\n )\n list_of_w[-1] = np.expand_dims(list_of_w[-1], axis=2)\n\n # split w[0]; multiply the F_DIST portion of w[0] with F_DIST\n w1_for_fdist, w1_for_r = np.split(list_of_w[0], [num_features-1], axis=1)\n F_DIST_w1 = F_DIST.bmm(torch.from_numpy(w1_for_fdist))\n\n w['first_for_r'] = torch.from_numpy(w1_for_r)\n w['except_first'] = [torch.from_numpy(wi) for wi in list_of_w[1:]]", "def add_neighbour(self, dest, weight):\n self.points_to[dest] = weight", "def copy_key(self, new_key_name, src_bucket_name,\r\n src_key_name, metadata=None, src_version_id=None,\r\n storage_class='STANDARD', preserve_acl=False,\r\n encrypt_key=False):\r\n headers = {}\r\n provider = self.connection.provider\r\n src_key_name = boto.utils.get_utf8_value(src_key_name)\r\n if preserve_acl:\r\n if self.name == src_bucket_name:\r\n src_bucket = self\r\n else:\r\n src_bucket = self.connection.get_bucket(src_bucket_name)\r\n acl = src_bucket.get_xml_acl(src_key_name)\r\n if encrypt_key:\r\n headers[provider.server_side_encryption_header] = 'AES256'\r\n src = '%s/%s' % (src_bucket_name, urllib.quote(src_key_name))\r\n if src_version_id:\r\n src += '?version_id=%s' % src_version_id\r\n headers = {provider.copy_source_header : str(src)}\r\n headers[provider.storage_class_header] = storage_class\r\n if metadata:\r\n headers[provider.metadata_directive_header] = 'REPLACE'\r\n headers = boto.utils.merge_meta(headers, metadata, provider)\r\n else:\r\n headers[provider.metadata_directive_header] = 'COPY'\r\n response = self.connection.make_request('PUT', self.name, new_key_name,\r\n headers=headers)\r\n body = response.read()\r\n if response.status == 200:\r\n key = self.new_key(new_key_name)\r\n h = handler.XmlHandler(key, self)\r\n xml.sax.parseString(body, h)\r\n if hasattr(key, 'Error'):\r\n raise provider.storage_copy_error(key.Code, key.Message, body)\r\n key.handle_version_headers(response)\r\n if preserve_acl:\r\n self.set_xml_acl(acl, new_key_name)\r\n return key\r\n else:\r\n raise provider.storage_response_error(response.status,\r\n response.reason, body)", "def copy_cluster_weights(shape, weight_file, method=\"bilinear\"):\n\n # gets the temporary folder path\n temp_path = get_temp_folder()\n short_name = get_prefix_less_name(shape)\n\n for node in weight_file:\n if not weight_file[node]:\n continue\n cmds.deformerWeights(weight_file[node], im=True, shape=short_name,\n deformer=node, path=temp_path, method=method,\n vertexConnections=True)", "def prepare_sample_weight_modes(training_endpoints, sample_weight_mode):\n\n if isinstance(sample_weight_mode, collections.abc.Mapping):\n generic_utils.check_for_unexpected_keys(\n 'sample_weight_mode', sample_weight_mode,\n [e.output_name for e in training_endpoints])\n\n for end_point in training_endpoints:\n if not end_point.should_skip_target_weights():\n if end_point.output_name not in sample_weight_mode:\n raise ValueError('Output ' + end_point.output_name +\n 'missing from `_sample_weight_modes` dictionary')\n else:\n end_point.sample_weight_mode = sample_weight_mode.get(\n end_point.output_name)\n elif isinstance(sample_weight_mode, (list, tuple)):\n if len(sample_weight_mode) != len(training_endpoints):\n raise ValueError('When passing a list as sample_weight_mode, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(training_endpoints)) +\n ' outputs, but you passed ' +\n str(len(sample_weight_mode)) + '_sample_weight_modes.')\n for mode, endpoint in zip(sample_weight_mode, training_endpoints):\n if not endpoint.should_skip_target_weights():\n endpoint.sample_weight_mode = mode\n else:\n for endpoint in training_endpoints:\n if not endpoint.should_skip_target_weights():\n endpoint.sample_weight_mode = sample_weight_mode", "def addWeight(img1, wt1, img2, wt2, gamma=0):\n\tdst = cv2.addWeight(img1, wt1, img2, wt2, gamma)\n\treturn dst", "def prepare_loss_weights(training_endpoints, loss_weights=None):\n if loss_weights is None:\n for e in training_endpoints:\n e.loss_weight = 1.\n elif isinstance(loss_weights, collections.abc.Mapping):\n generic_utils.check_for_unexpected_keys(\n 'loss_weights', loss_weights,\n [e.output_name for e in training_endpoints])\n for e in training_endpoints:\n e.loss_weight = loss_weights.get(e.output_name, 1.)\n elif isinstance(loss_weights, list):\n if len(loss_weights) != len(training_endpoints):\n raise ValueError('When passing a list as loss_weights, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(training_endpoints)) +\n ' outputs, but you passed loss_weights=' +\n str(loss_weights))\n for w, e in zip(loss_weights, training_endpoints):\n e.loss_weight = w\n else:\n raise TypeError('Could not interpret loss_weights argument: ' +\n str(loss_weights) + ' - expected a list of dicts.')", "def mixup(batch_size, alpha, images, labels):\n mix_weight = tf.compat.v1.distributions.Beta(alpha, alpha).sample([batch_size, 1])\n mix_weight = tf.maximum(mix_weight, 1. - mix_weight)\n images_mix_weight = tf.reshape(mix_weight, [batch_size, 1, 1, 1])\n # Mixup on a single batch is implemented by taking a weighted sum with the\n # same batch in reverse.\n images_mix = (\n images * images_mix_weight + images[::-1] * (1. - images_mix_weight))\n labels_mix = labels * mix_weight + labels[::-1] * (1. - mix_weight)\n return images_mix, labels_mix", "def randomizeWeights(self, rand_distr):\n raise NotImplementedError", "def prepare_weights(self, hs, negative, wv, docvecs, update=False):\n # set initial input/projection and hidden weights\n if not update:\n self.reset_weights(hs, negative, wv, docvecs)\n else:\n self.update_weights(hs, negative, wv)", "def weighted_choice(*values, **kwargs):\n key = kwargs.get('key', lambda x: 1.0)\n if len(values) == 1:\n values = values[0]\n if len(values) == 0:\n raise TypeError('weighted_choice expected 1 arguments, got 0')\n\n weights = [key(v) for v in values]\n s = sum(weights)\n r = random.random() * s\n for v,w in zip(values, weights):\n s -= w\n if r > s:\n return v\n return values[-1]", "def rename_weights(traindir, kkey, mon):\n # First load in the training.csv\n r = np.genfromtxt(os.path.join(traindir, \"training.csv\"), delimiter=\",\", names=True)\n e = r[\"epoch\"]\n q = r[mon]\n minq = np.min(q)\n beste = e[np.argmin(q)]\n\n newname = \"weights.\" + str(int(beste)) + \"-\" + \"{:.5f}\".format(minq) + \".hdf5\"\n\n os.rename(os.path.join(traindir, kkey), os.path.join(traindir, newname))", "def put_weights(self, content: ndarray, var_id: int, batch_no: int, block_id: int) -> None:\n pass", "def concat(self, key, value, width=None):\n # TODO: write better documentation, provide example code\n if width is None:\n wait(self.proto.putcat(key, value))\n else:\n wait(self.proto.putshl(key, value, width))", "def _ShardTestEmbeddings(self, weights, biases, num_shards):\n with ops.Graph().as_default() as g:\n sharded_weights = variable_scope.get_variable(\n \"w\",\n partitioner=partitioned_variables.fixed_size_partitioner(num_shards),\n initializer=constant_op.constant(weights))\n sharded_biases = variable_scope.get_variable(\n \"b\",\n partitioner=partitioned_variables.fixed_size_partitioner(num_shards),\n initializer=constant_op.constant(biases))\n with self.session(graph=g) as sess:\n self.evaluate(variables.global_variables_initializer())\n return self.evaluate([list(sharded_weights), list(sharded_biases)])", "def _pad_to_full_keypoint_dim(keypoint_coords, keypoint_scores, keypoint_inds,\n num_total_keypoints):\n batch_size, num_instances, _, _ = (\n shape_utils.combined_static_and_dynamic_shape(keypoint_coords))\n kpt_coords_transposed = tf.transpose(keypoint_coords, [2, 0, 1, 3])\n kpt_scores_transposed = tf.transpose(keypoint_scores, [2, 0, 1])\n kpt_inds_tensor = tf.expand_dims(keypoint_inds, axis=-1)\n kpt_coords_scattered = tf.scatter_nd(\n indices=kpt_inds_tensor,\n updates=kpt_coords_transposed,\n shape=[num_total_keypoints, batch_size, num_instances, 2])\n kpt_scores_scattered = tf.scatter_nd(\n indices=kpt_inds_tensor,\n updates=kpt_scores_transposed,\n shape=[num_total_keypoints, batch_size, num_instances])\n keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 2, 0, 3])\n keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 2, 0])\n return keypoint_coords_padded, keypoint_scores_padded", "def update_weights(self):\n\t\tpass", "def recommend(self, source_words, num_recs, rec_pool=[]):\n csr_fname_t = c_char_p\n num_recs_t = POINTER(c_uint)\n source_words_t = POINTER(c_int)\n num_source_words_t = c_uint\n use_rec_pool_t = c_uint\n rec_pool_t = POINTER(c_int)\n num_rec_pool_t = c_uint\n\n self.c_recommend.argtypes = [csr_fname_t, num_recs_t, source_words_t,\n num_source_words_t, use_rec_pool_t, rec_pool_t, num_rec_pool_t]\n\n self.c_recommend.restype = POINTER(c_int)\n\n num_recs_ptr = c_uint(num_recs)\n source_words_arr = (c_int * len(source_words))()\n source_words_arr[:] = [self._get_idx(word) for word in source_words]\n rec_pool_arr = (c_int * len(rec_pool))()\n rec_pool_arr[:] = [self._get_idx(word) for word in rec_pool]\n\n ret_ptr = self.c_recommend(self.csr_fname_b, byref(num_recs_ptr),\n source_words_arr, len(source_words), bool(rec_pool),\n rec_pool_arr, len(rec_pool))\n\n return [self.word_map[idx] for idx in ret_ptr[:num_recs_ptr.value]]", "def add_weights(projections, settings):\n uu, vv = np.meshgrid(settings.detector_us, settings.detector_vs)\n\n weights = settings.source_to_detector_dist / np.sqrt(\n settings.source_to_detector_dist ** 2. + uu ** 2. + vv ** 2.)\n\n return projections * weights[:, :, np.newaxis]", "def update_disc_copy(self):\n source = self.discriminator\n dest = self.discriminator_copy\n\n assert len(source.layers) == len(dest.layers)\n for dest_layer, source_layer in zip(dest.layers, source.layers):\n dest_layer.set_weights(source_layer.get_weights())", "def update_disc_copy(self):\n source = self.discriminator\n dest = self.discriminator_copy\n\n assert len(source.layers) == len(dest.layers)\n for dest_layer, source_layer in zip(dest.layers, source.layers):\n dest_layer.set_weights(source_layer.get_weights())", "def append_all_agent_batch_to_update_buffer(\n self,\n update_buffer: AgentBuffer,\n key_list: List[str] = None,\n batch_size: int = None,\n training_length: int = None,\n ) -> None:\n for agent_id in self.keys():\n self.append_to_update_buffer(\n update_buffer, agent_id, key_list, batch_size, training_length\n )", "def concat_examples(example_dicts):\n\n example_dict = copy.deepcopy(example_dicts[0])\n\n keys_to_match = [\n SCALAR_PREDICTOR_NAMES_KEY, VECTOR_PREDICTOR_NAMES_KEY,\n SCALAR_TARGET_NAMES_KEY, VECTOR_TARGET_NAMES_KEY, HEIGHTS_KEY\n ]\n\n for i in range(1, len(example_dicts)):\n if not numpy.allclose(\n example_dict[HEIGHTS_KEY], example_dicts[i][HEIGHTS_KEY],\n atol=TOLERANCE\n ):\n error_string = (\n '1st and {0:d}th dictionaries have different height coords '\n '(units are m AGL). 1st dictionary:\\n{1:s}\\n\\n'\n '{0:d}th dictionary:\\n{2:s}'\n ).format(\n i + 1, str(example_dict[HEIGHTS_KEY]),\n str(example_dicts[i][HEIGHTS_KEY])\n )\n\n raise ValueError(error_string)\n\n for this_key in keys_to_match:\n if this_key == HEIGHTS_KEY:\n continue\n\n if example_dict[this_key] == example_dicts[i][this_key]:\n continue\n\n error_string = (\n '1st and {0:d}th dictionaries have different values for '\n '\"{1:s}\". 1st dictionary:\\n{2:s}\\n\\n'\n '{0:d}th dictionary:\\n{3:s}'\n ).format(\n i + 1, this_key, str(example_dict[this_key]),\n str(example_dicts[i][this_key])\n )\n\n raise ValueError(error_string)\n\n for this_key in DICTIONARY_KEYS:\n if this_key in keys_to_match:\n continue\n\n if isinstance(example_dict[this_key], list):\n example_dict[this_key] += example_dicts[i][this_key]\n else:\n example_dict[this_key] = numpy.concatenate((\n example_dict[this_key], example_dicts[i][this_key]\n ), axis=0)\n\n return example_dict", "def combine_per_choice(*args):\n args = list(args)\n result = args.pop()\n new_weight = None\n new_averages = None\n while args:\n other = args.pop()\n for key in other:\n if key not in result:\n result[key] = other[key]\n else:\n old_weight, old_averages = result[key]\n other_weight, other_averages = other[key]\n if (\n new_averages\n and set(old_averages.keys()) != set(new_averages.keys())\n ):\n raise ValueError(\n \"Can't combine per-choice results which used different sets of \"\n \"player models.\"\n )\n new_weight = old_weight + other_weight\n new_averages = {}\n for pmn in old_averages:\n new_averages[pmn] = (\n old_averages[pmn] * old_weight\n + other_averages[pmn] * other_weight\n ) / new_weight\n result[key] = (new_weight, new_averages)\n\n return result", "def WcCombiner(intermediates):\n\n # the use of the defaultdict data structures simplifies the summation of values (counts) of the intermediate\n # dictionaries. It only requires one statement, instead of 2, for creating a new key, value pair or\n # updating its values.\n result = defaultdict(int)\n\n # the following loop iterates over the first dictionary key and value pairs and then iterates over the next dictionary's\n # pairs. It continues until it iterates over all dictionaries that are members of the intermediates. While iterating,\n # a new dictionary is created, result, to hold all the pairs of the intermediate dictionaries, thus effectively\n # merging all of them.\n for k,v in chain(*intermediates):\n result[k] += v\n return result", "def init_weights(n_layers, layer_sizes):\n\n params = {}\n\n for i in range(n_layers):\n wn = 'W{}'.format(i)\n bn = 'b{}'.format(i)\n\n params[wn] = tf.get_variable(\n name=wn,\n shape=layer_sizes[i * 2],\n initializer=tf.contrib.layers.xavier_initializer(seed=42)\n )\n\n params[bn] = tf.get_variable(\n name=bn,\n shape=layer_sizes[(i * 2) + 1],\n initializer=tf.zeros_initializer()\n )\n\n return params", "def combine_score(self, src_tokens, hypos, hypos_len, scores):\n # Prepare all the weights and call combine weighted scores\n args = self.args\n weights = [\n args.l2r_model_weight,\n args.r2l_model_weight,\n args.reverse_model_weight,\n args.lm_model_weight,\n args.cloze_transformer_weight,\n ]\n bsz, src_len = src_tokens.size()\n hypos_len = hypos_len.type_as(scores)\n combined_scores = combine_weighted_scores(\n scores, weights, src_len, hypos_len, args.length_penalty\n )\n return combined_scores", "def generate_weights(self, weights_path: str) -> None:\n for file in os.listdir(self.processed_path):\n doc_path = f\"{self.processed_path}/{file}\"\n weights = self.weights_for_doc(doc_path)\n save_path = f\"{weights_path}/{file}\"\n np.save(save_path, weights)", "def data_consist(table1, table2, key1, key2, schema1, schema2, fname, sample_size=1.0,\n output_root='', keep_images=False, n_jobs=1):\n\n # check whether keys are valid\n if key1 not in table1.columns.values:\n raise ValueError('key1: does not exist in table1')\n if key2 not in table2.columns.values:\n raise ValueError('key2: does not exist in table2')\n\n # check whether two tables are unique in key level\n if table1[key1].nunique() != table1.shape[0]:\n raise ValueError('table1: should be unique in %s level' % (key1))\n if table2[key2].nunique() != table2.shape[0]:\n raise ValueError('table2: should be unique in %s level' % (key2))\n\n # check sample_size\n if sample_size > 1:\n if int(sample_size) != sample_size:\n raise ValueError('sample_size: only accept integer when it is > 1.0')\n if (sample_size > table1.shape[0]) or (sample_size > table2.shape[0]):\n print('sample_size: %d is smaller than %d or %d...' % (sample_size, table1.shape[0], table2.shape[0]))\n\n # check output_root\n if output_root != '':\n if not os.path.isdir(output_root):\n raise ValueError('output_root: root not exists')\n\n # create a new workbook to store everything\n wb = openpyxl.Workbook()\n\n # prepare directory for generated images\n img_dir = 'img_temp'\n if os.path.isdir(img_dir):\n shutil.rmtree(img_dir)\n os.mkdir(img_dir)\n\n # calculate the sample size\n if sample_size <= 1.0:\n both_keys = list(set(table1[key1].values).intersection(set(table2[key2].values)))\n sample_size = np.min([int(table1.shape[0] * sample_size), int(table2.shape[0] * sample_size), len(both_keys)])\n sample_keys = np.random.choice(both_keys, sample_size, replace=False)\n table1 = table1[table1[key1].isin(sample_keys)].reset_index(drop=True)\n table2 = table2[table2[key2].isin(sample_keys)].reset_index(drop=True)\n\n schema, check_features = _check_features(schema1, schema2)\n corr_results = []\n\n # key features\n key_features = check_features['key']\n if len(key_features) > 0:\n _n_jobs = np.min([n_jobs, len(key_features)])\n key_results = Parallel(n_jobs=_n_jobs)(delayed(_compare_key)(col, table1[[col]], table2[[col]], img_dir)\n for col in key_features)\n\n for key_result in key_results:\n if 'corr' in key_result.keys():\n corr_results.append(key_result['corr'])\n\n # write all results to worksheet\n ws = wb.create_sheet(title=u'key')\n _insert_numeric_results(key_results, ws, 40, img_dir)\n\n # numeric features\n numeric_features = check_features['numeric']\n if len(numeric_features) > 0:\n _n_jobs = np.min([n_jobs, len(numeric_features)])\n numeric_results = Parallel(n_jobs=_n_jobs)(delayed(_consist_numeric)(col, table1[[key1, col]],\n table2[[key2, col]], key1, key2, img_dir) for col in numeric_features)\n\n for numeric_result in numeric_results:\n if 'corr' in numeric_result.keys():\n corr_results.append(numeric_result['corr'])\n\n # write all results to worksheet\n ws = wb.create_sheet(title=u'numeric')\n _insert_numeric_results(numeric_results, ws, 45, img_dir)\n\n # string features\n string_features = check_features['str']\n if len(string_features) > 0:\n _n_jobs = np.min([n_jobs, len(string_features)])\n string_results = Parallel(n_jobs=_n_jobs)(delayed(_consist_string)(col, table1[[key1, col]],\n table2[[key2, col]], key1, key2) for col in string_features)\n\n for string_result in string_results:\n if 'corr' in string_result.keys():\n corr_results.append(string_result['corr'])\n\n # write all results to worksheet\n ws = wb.create_sheet(title=u'string')\n _insert_string_results(string_results, ws, 25)\n\n # date features\n date_features = check_features['date']\n if len(date_features) > 0:\n # get the current time\n snapshot_date_now = str(datetime.datetime.now().date())\n for col in date_features:\n table1[col] = (pd.to_datetime(snapshot_date_now) - pd.to_datetime(table1[col],\n errors='coerce')).astype('timedelta64[M]', errors='ignore')\n table2[col] = (pd.to_datetime(snapshot_date_now) - pd.to_datetime(table2[col],\n errors='coerce')).astype('timedelta64[M]', errors='ignore')\n _n_jobs = np.min([n_jobs, len(date_features)])\n date_results = Parallel(n_jobs=_n_jobs)(delayed(_consist_numeric)(col, table1[[key1, col]], table2[[key2, col]],\n key1, key2, img_dir, date_flag=True) for col in date_features)\n\n for date_result in date_results:\n if 'corr' in date_result.keys():\n corr_results.append(date_result['corr'])\n\n # write all results to worksheet\n ws = wb.create_sheet(title=u'date')\n _insert_numeric_results(date_results, ws, 45, img_dir, date_flag=True)\n\n # insert the summary\n _insert_summary(wb, schema, corr_results)\n\n wb.save(filename=os.path.join(output_root, 'data_consist_%s.xlsx' %(fname)))\n if not keep_images:\n shutil.rmtree(img_dir)", "def convolve(kerns, kshp, nkern, images, imgshp, step=(1, 1), bias=None,\r\n mode='valid', flatten=True):\r\n N = numpy\r\n # start by computing output dimensions, size, etc\r\n kern_size = N.int64(N.prod(kshp))\r\n\r\n # inshp contains either 2 entries (height,width) or 3 (nfeatures,h,w)\r\n # in the first case, default nfeatures to 1\r\n if N.size(imgshp) == 2:\r\n imgshp = (1,) + imgshp\r\n\r\n # construct indices and index pointers for sparse matrix, which,\r\n # when multiplied with input images will generate a stack of image\r\n # patches\r\n indices, indptr, spmat_shape, sptype, outshp = \\\r\n convolution_indices.conv_eval(imgshp, kshp, step, mode)\r\n\r\n # build sparse matrix, then generate stack of image patches\r\n csc = theano.sparse.CSM(sptype)(N.ones(indices.size), indices,\r\n indptr, spmat_shape)\r\n patches = (sparse.structured_dot(csc, images.T)).T\r\n\r\n # compute output of linear classifier\r\n pshape = tensor.stack(images.shape[0] * tensor.as_tensor(N.prod(outshp)),\\\r\n tensor.as_tensor(imgshp[0] * kern_size))\r\n patch_stack = tensor.reshape(patches, pshape, ndim=2)\r\n\r\n # kern is of shape: nkern x ksize*number_of_input_features\r\n # output is thus of shape: bsize*outshp x nkern\r\n output = tensor.dot(patch_stack, kerns.T)\r\n\r\n # add bias across each feature map (more efficient to do it now)\r\n if bias is not None:\r\n output += bias\r\n\r\n # now to have feature maps in raster order ...\r\n # go from bsize*outshp x nkern to bsize x nkern*outshp\r\n newshp = tensor.stack(images.shape[0],\\\r\n tensor.as_tensor(N.prod(outshp)),\\\r\n tensor.as_tensor(nkern))\r\n tensout = tensor.reshape(output, newshp, ndim=3)\r\n output = tensor.DimShuffle((False,) * tensout.ndim, (0, 2, 1))(tensout)\r\n if flatten:\r\n output = tensor.flatten(output, 2)\r\n\r\n return output, N.hstack((nkern, outshp))", "def generate(self, encoder_inputs, srcs_ids, beam_size=None, maxlen=None, prefix_tokens=None, src_weights=None):\n with torch.no_grad():\n return self._generate(encoder_inputs, srcs_ids, beam_size, maxlen, prefix_tokens, src_weights)", "def set_weight(self):\n\n for _, s in self.servers.items():\n self._set_server_weight(s)\n\n for _, g in self.groups.items():\n self._set_server_weight(g)\n\n for _, g in self.groups.items():\n self._set_group_resource(g)\n\n for _, g in self.groups.items():\n self._set_group_weight(g)", "def merge_weighted_rois(roi1, roi2):\n if (roi1.pixelSizeX != roi2.pixelSizeX) or (roi1.pixelSizeY != roi2.pixelSizeY):\n raise ValueError('The pixel sizes of the two WeightedROI objects should match!')\n\n if roi1.pixelSizeUnit != roi2.pixelSizeUnit:\n raise ValueError('The pixel size units of the two WeightedROI objects should match!')\n\n mask1 = roi1.get_weighted_mask(); mask2 = roi2.get_weighted_mask()\n\n return WeightedROI(mask1 + mask2, pixelSize=[roi1.pixelSizeY, roi1.pixelSizeX], pixelSizeUnit=roi1.pixelSizeUnit)" ]
[ "0.549154", "0.5410098", "0.520968", "0.49663427", "0.4958788", "0.4947422", "0.48904583", "0.4840362", "0.4815075", "0.47568074", "0.47536424", "0.47400427", "0.4730924", "0.4675684", "0.46377957", "0.46358636", "0.46322548", "0.46215504", "0.4620774", "0.46132562", "0.46081996", "0.45859233", "0.45747662", "0.45710614", "0.45550832", "0.45155415", "0.4464412", "0.44578615", "0.4454145", "0.44501168", "0.44478896", "0.4439837", "0.44278392", "0.44277808", "0.4423471", "0.44088197", "0.44062185", "0.4392494", "0.43864018", "0.43804777", "0.43601465", "0.43569344", "0.4354921", "0.434813", "0.43347108", "0.43165976", "0.43109584", "0.43029547", "0.4297899", "0.4293881", "0.42741746", "0.42633462", "0.42482963", "0.4245227", "0.4240817", "0.42383492", "0.42269897", "0.42032078", "0.41854864", "0.4184542", "0.41815653", "0.41704577", "0.41593334", "0.41543975", "0.41453168", "0.41379446", "0.4137745", "0.4131926", "0.41310418", "0.41308352", "0.4126112", "0.4125967", "0.41191792", "0.4117912", "0.4115363", "0.4109936", "0.4105067", "0.4096268", "0.40919772", "0.40865013", "0.40796688", "0.40733", "0.4070958", "0.40684998", "0.4066126", "0.406209", "0.40604228", "0.40604228", "0.40558326", "0.40515393", "0.40401402", "0.40386", "0.40311137", "0.4030842", "0.40306327", "0.40261546", "0.40244797", "0.4024297", "0.4021559", "0.40212473" ]
0.71798104
0
Returns width, depth and total count of the sketch.
def cmsInfo(self, key): return self.execute_command(self.CMS_INFO, key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_depth_shape(self):\n width = -1\n height = -1\n for (serial, device) in self._enabled_devices.items():\n for stream in device.pipeline_profile.get_streams():\n if (rs.stream.depth == stream.stream_type()):\n width = stream.as_video_stream_profile().width()\n height = stream.as_video_stream_profile().height()\n return width, height", "def __extract_graph_shape(self):\n circuit = UbqcClient.pb_to_circuit(self.program)\n bw_pattern = transpile_to_brickwork(circuit)\n\n # Get shape\n input_ = bw_pattern.input_\n c_out, q_out = bw_pattern.output_\n output_ = c_out + q_out\n width = len(input_)\n depth = output_[0][1] - input_[0][1] + 1\n\n return width, depth", "def num_depth(self):\n return len(self._sizes) + len(self._ratios) - 1", "def dimensions():", "def getShape(self):\n if self.initDone:\n return self.pixelHeight,self.pixelWidth\n\n self._waitForInit()\n\n return self.pixelHeight,self.pixelWidth", "def num_depth(self):\n if self._index == 0:\n return len(self._ratios)\n else:\n return len(self._sizes) + len(self._ratios) - 1", "def depth(self):\n return _libsbml.Dimensions_depth(self)", "def get_nodes_pixel_count(self):\n sum_count = self.pixel_count\n for i in range(8):\n node = self.children[i]\n if node:\n sum_count += node.pixel_count\n return sum_count", "def getDimensions():", "def size(self):\n\n frame = self.get_frame()\n\n # Unpack array dimensions\n height, width, layers = np.array(frame).shape\n\n return width, height", "def size_playground(self):\n return self.size_playground", "def size(self):\r\n return self.root.size_tree", "def numPixels(self):\n\t\treturn self.size", "def numPixels(self):\n\t\treturn self.size", "def run(self, depth):\n\t\tdepth = depth.invert()\n\t\t\"\"\"use this to convert it into cm, or into m by changing 10 to 100\"\"\"\n\t\tdepth = divide(depth, 10)\n\t\t\"\"\"Makes blobs easier to detect\"\"\"\n\t\tdepth = depth.erode()\n\t\tblob_array = self.blob_detect(depth)\n\t\tif blob_array:\n\t\t\ttheadau_array = self.get_theadaus(blob_array)\n\t\t\tprint theadau_array\n\t\t\t\"\"\"\n\t\t\tUntill i fill in outside coord array, i will get rid of this code\n\t\t\toutside_coord_array = self.get_outside_points(blob_array)\n\t\t\tblob_width_array = self.get_width(outside_coord_array, theadau_array)\n\t\t\treturn blob_width_array\t\n\t\t\t\"\"\"\t\n\t\treturn blob_array", "def depth_percent(self):\n return self.container['depth_percent']", "def depth(self):\n return len(self.topology)", "def size(self):\n if self.root is None:\n return 0\n return self.root.size", "def get_height(self):\n height = 0\n for layer, ldata in self.conf['Layers'].items():\n layer_t = ldata['params']['thickness']\n height += layer_t\n return height", "def getWidth(self) -> int:\n ...", "def calculate_dimensions(self):\n x_coordinates = np.sort(self.grid['x'][:, 0]) # first x node\n self.nr_nodes_z = np.where(x_coordinates == x_coordinates[0])[0].size\n self.nr_elements_x = self.elements.shape[0] / (self.nr_nodes_z - 1)\n self.nr_nodes_x = self.nr_elements_x + 1\n self.nr_elements_z = self.nr_nodes_z - 1", "def depth(self):\n if self.size == 0:\n return 0\n return int(math.log(self.size, 2)) + 1", "def size(cls):\n return (cls.num_properties()*2 + 2)", "def info(self):\n\n\t\tprint(\"Pixels on a side: {0}\".format(self.data.shape[0]))\n\t\tprint(\"Pixel size: {0}\".format(self.resolution))\n\t\tprint(\"Total angular size: {0}\".format(self.side_angle))\n\t\tprint(\"lmin={0:.1e} ; lmax={1:.1e}\".format(self.lmin,self.lmax))", "def __len__(self):\n return self.flat_image.size", "def get_detector_size(self):\n sensor=self._get_sensor_info()\n return sensor.nMaxWidth,sensor.nMaxHeight", "def dimension(self):\r\n a = 0\r\n for x in self.faces():\r\n if (len(x) > a):\r\n a = len(x) \r\n return a-1", "def width(self) -> int:", "def width(self) -> int:", "def getDepth(self):\n return _libsbml.Dimensions_getDepth(self)", "def __len__(self) -> int:\n\n return self.layout.gaDims", "def padding_depth(self):\n\t\treturn self.paddings_shape_param('D')", "def pixelSize(self):\n br = self.sceneBoundingRect()\n if self.image is None:\n return 1,1\n return br.width()/self.width(), br.height()/self.height()", "def getDimensions(self):\n\ttop = self.getTop()\n\tleft = self.getLeft()\n\twidth = self.getWidth()\n\theight = self.getHeight()\n\treturn top, left, width, height", "def _stats(self):\n return (\"size = \" + str(self.size())\n + \"; height = \" + str(self.height()))", "def dimension(self):", "def width(self) -> int:\n return self._obj[self.x_dim].size", "def get_term_dimensions():\n height, width = subprocess.check_output(SIZE).split()\n return int(width), int(height)", "def shape(self):\n if self.volumes:\n return 4 * self.bars_count + 1 + 1,\n else:\n return 3 * self.bars_count + 1 + 1,", "def part1(mem):\n return len(paint_panels(mem, 0))", "def depth(self) -> int:\n return self.__depth", "def __len__(self):\n # print(\"len: \" + str(math.floor(len([name for name in os.listdir(self.imgs_dir) if os.path.isfile(self.imgs_dir+'//'+name)])/self.batch_size)-1)\n return math.floor(len([name for name in os.listdir(self.imgs_dir) if\n os.path.isfile(self.imgs_dir + '//' + name)]) / self.batch_size)", "def canvas_size(self):\r\n width = height = 0\r\n for image in self.images:\r\n x = image.x + image.absolute_width\r\n y = image.y + image.absolute_height\r\n if width < x:\r\n width = x\r\n if height < y:\r\n height = y\r\n return round_up(width), round_up(height)", "def calculate_root_statistics(self):\n total_length = 0\n total_radius = 0\n\n for i in range(len(self.pixel_list)):\n\n total_radius += self.pixel_list[i].radius + 0.5\n\n if i > 0:\n # Use the distance formula\n delta_x = self.pixel_list[i].x - self.pixel_list[i - 1].x\n delta_y = self.pixel_list[i].y - self.pixel_list[i - 1].y\n segment_length = math.sqrt(delta_x ** 2 + delta_y ** 2)\n total_length += segment_length\n\n self.total_length = total_length\n self.average_radius = total_radius / len(self.pixel_list)", "def size(self):\n height = 0\n width = 0\n\n mother = self.mother()\n father = self.father()\n\n for parent in self.parents():\n if not parent is None:\n pw, ph = parent.size()\n width += pw\n height += ph\n\n if width > 0:\n width += self._hmargin*2\n\n return width, height", "def __len__(self):\n _, timesteps, height, width = self.data.shape\n height //= self.size\n width //= self.size\n\n if self.subset == 'train':\n out = self.length\n elif self.subset == 'all':\n out = height * width\n else:\n out = (height // 2) * (width // 2)\n\n if not self.time:\n out *= timesteps\n\n return out", "def get_pixel_size_stack(stack, verbose=False):\n len_stack_x_pixel = 512\n len_stack_x_um = 71.5 / stack['wParamsNum'][30]\n \n stack_pixel_size = len_stack_x_um / len_stack_x_pixel\n \n if verbose:\n print(\"the real length of each pixel in stack image is: \\n{0} um\".format(stack_pixel_size))\n \n return stack_pixel_size", "def size(self):\n return (self.width)", "def width(self):\n return (self.scene.shape[2] - self.size) // self.size + 1", "def _get_synthesis_size(self, lvl):\n lvl_img = self.target_pyramid[lvl]\n h, w = lvl_img.shape[-2:]\n h, w = int(h * self.scale_factor[0]), int(w * self.scale_factor[1])\n return h, w", "def size_out(self):\n return self.dimensions", "def _nbytes(self, deep: bool = False) -> int:\n # for implementations with no useful getsizeof (PyPy)\n objsize = 24\n\n level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)\n label_nbytes = sum(i.nbytes for i in self.codes)\n names_nbytes = sum(getsizeof(i, objsize) for i in self.names)\n result = level_nbytes + label_nbytes + names_nbytes\n\n # include our engine hashtable\n result += self._engine.sizeof(deep=deep)\n return result", "def dim(self):\n return self.raw_wires.get_dim();", "def test_dimensions(self):\n\t\t\n\t\t# default\t\n\t\tdetails = self.watcher.describe()\n\t\tprint(details)\n\t\t\n\t\t# default\t\n\t\tdetails = self.watcher.describe(layers=[self.first_layer])\n\t\tprint(details)\n\t\tN = details.N.to_numpy()[0]\n\t\tM = details.M.to_numpy()[0]\n\t\trf = details.rf.to_numpy()[0]\n\t\tnum_evals = details.num_evals.to_numpy()[0]\n\t\tprint(N,M,rf,num_evals)\n\t\t\n\t\tself.assertEqual(N,64)\n\t\tself.assertEqual(M,3)\n\t\tself.assertEqual(rf,9)\n\t\tself.assertEqual(num_evals,M*rf)", "def shape(self):\n return self._fl.h5[\"raw\"].shape", "def size(self):", "def size(self):\n\t\treturn self.dims", "def getCanvasSize():\n\t\treturn canvas.winfo_width(), canvas.winfo_height()", "def get_dim():\n return (Settings.width, Settings.height)", "def depth(self):\n return 0", "def get_width(self):\n\t\treturn len(self._background) if self._background else 0", "def get_dimensions(image, classname):\n start, ext = os.path.splitext(image)\n if ext == '.yuv':\n bitdepth = \"8\"\n res_split = start.split('x')\n width_split = res_split[0].split('_')\n width = width_split[-1]\n height_split = res_split[-1].split('_')\n m = res_split[-1].find(\"bit\")\n if res_split[-1][m - 2] == \"_\":\n depth = res_split[-1][m - 1]\n else:\n depth = res_split[-1][m - 2:m]\n height = height_split[0]\n elif classname == \"classE_exr\":\n size = os.path.basename(image).split('_')[2]\n try:\n dimension_cmd = [\"identify\", '-size', size, '-format', '%w,%h,%z', image]\n width, height, depth = subprocess.check_output(dimension_cmd).split(\",\")\n except subprocess.CalledProcessError as e:\n print dimension_cmd, e.output\n else:\n try:\n dimension_cmd = [\"identify\", '-format', '%w,%h,%z', image]\n width, height, depth = subprocess.check_output(dimension_cmd).split(\",\")\n except subprocess.CalledProcessError as e:\n print dimension_cmd, e.output\n return width, height, depth", "def width(self):\n return self.figure.scene.get_size()[0]", "def info(self):\n c = 0\n for s in self.segments:\n c+= len(s.points)\n return \"Nodes : %5i\\nSegments : %5i\\nPoints : %5i\" % (len(self.nodes), len(self.segments), c)", "def dimensions(self):\n d=dict()\n d['div'] = (self._div)\n d['var'] = len(self.used_variables)\n d['x'] = self.Xdim\n d['y'] = self.Ydim\n d['lev'] = self.lev\n d['dir'] = self._nb_dir\n return(d)", "def height(self) -> int:", "def height(self) -> int:", "def height(self) -> int:", "def getDepth(self):\n return self.movies.depth", "def getDepth(self):\n return self.movies.depth", "def dimension_count(self):\n return self._dimensionCount", "def get_num_of_images(self):", "def get_size(self):\n tmpsize = 0\n for variable in self.variables:\n tmpsize += variable.get_size()\n for subchunk in self.subchunks:\n tmpsize += subchunk.get_size()\n return tmpsize", "def get_data_dimensions(self):\n return image_utils.convert_shape_indexing(self._get_data_dimensions_rc(),\"rc\",self.image_indexing)", "def getWidth(self):\r\n width = 1\r\n if self.orientation == \"h\":\r\n width = self.size\r\n return width", "def __len__(self):\n return self.width * self.height", "def print_shapes(self):\n total_filter = 0\n for layer_name, blob in self.net.blobs.iteritems():\n ts = ''\n for x in blob.data.shape:\n ts += '%5i' % x\n print ts, ' ' * (25 - len(ts)), layer_name", "def get_dimension_width(self):\n pass", "def size(self):\n return self._N", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self) -> int:\n return self.root.size if not self.empty() else 0", "def tree_size(self):\n if self._tree_size is not None:\n return self._tree_size\n if self.is_root:\n self.arbor._setup_tree(self)\n # pass back to the arbor to avoid calculating again\n self.arbor._store_node_info(self, '_tree_size')\n else:\n self._tree_size = len(list(self[\"tree\"]))\n return self._tree_size", "def d(self):\r\n return self.size.z", "def area(self):\n return self.__size ** 2", "def numPixels(self):\n self._logger.debug(\"numPixels\")\n return self.count", "def part_1() -> int:\n initial_input = _load_input()\n rows = len(initial_input)\n cols = len(initial_input[0])\n\n input = initial_input.copy()\n total_glow_count = 0\n\n for _ in range(100):\n flashed = list()\n for row in range(rows):\n for col in range(cols):\n coords = [[col, row]]\n new_input, glow_count = _get_glow_counts(coords, input, flashed)\n input = new_input\n total_glow_count += glow_count\n\n return total_glow_count", "def getWidth(self):\n caller = self.getMyCaller()\n if caller.startsWith(\"java.\") or caller.startsWith(\"javax.\"):\n return super(Program_Test, self).getWidth()\n else:\n return getCentralRegionSize().width", "def dimension(self):\n return 3*self.genus - 3 + self.n", "def w(self):\r\n return self.size.x", "def _get_parameter_count(self):\n parameters_d = 5;\n size_h = self.model.size_h\n return (size_h - 1) + size_h * (\n (size_h - 1) + parameters_d + (self.model.size_aa - 1) + \n (self.model.size_ss - 1) + (self.model.size_cis - 1)\n )", "def dim(self) -> int:", "def get_image_sizes():\n widths = []\n heights = []\n\n from settings import folders_location\n for individual_folder_name in listdir(folders_location):\n individual_training_folder_path = folders_location + individual_folder_name + \"/training/\"\n\n image_paths = listdir(individual_training_folder_path)\n for image_path in image_paths:\n img = cv2.imread(individual_training_folder_path + image_path)\n\n height, width, channel = img.shape\n widths.append(width)\n heights.append(height)\n\n print(individual_training_folder_path + image_path)\n\n print(\"Min: %s, Max: %s\" % (np.min(widths), np.max(widths)))\n print(\"Average: %s\" % (np.average(widths)))\n\n return widths", "def __len__(self) -> int:\n return self.width * self.height", "def get_state_size(self) -> Tuple[int, int]:\n return self.height, self.width", "def __len__(self) -> int:\n\n length = self.n_classes * 100\n\n return length" ]
[ "0.68444043", "0.6686202", "0.65267235", "0.6432864", "0.6241753", "0.62116355", "0.61252075", "0.60722893", "0.60547465", "0.6041557", "0.5984962", "0.59782183", "0.5959491", "0.5959491", "0.59516287", "0.58823055", "0.5880708", "0.58763885", "0.58660775", "0.5841817", "0.5841315", "0.58325195", "0.5816858", "0.5810273", "0.58020914", "0.57989806", "0.57937455", "0.5761701", "0.5761701", "0.5760636", "0.57600653", "0.5759077", "0.5732025", "0.5730033", "0.57295513", "0.5709344", "0.57001036", "0.56789726", "0.5678307", "0.56658155", "0.56628275", "0.5658273", "0.5657515", "0.5646124", "0.56450975", "0.5637067", "0.5636269", "0.5631527", "0.562135", "0.5612045", "0.56115633", "0.560551", "0.5604258", "0.559656", "0.55928487", "0.55903226", "0.55807936", "0.55772173", "0.5566983", "0.5564732", "0.55622995", "0.5558794", "0.5557737", "0.5554277", "0.55533284", "0.5548958", "0.5548958", "0.5548958", "0.5540994", "0.5540994", "0.55396587", "0.5539447", "0.55366766", "0.55292594", "0.5526467", "0.55247545", "0.5523067", "0.5519904", "0.5518538", "0.5514533", "0.5514533", "0.5514533", "0.5514533", "0.5514533", "0.5514533", "0.5514533", "0.55115247", "0.5507803", "0.55077034", "0.5500795", "0.5498739", "0.54950607", "0.5491929", "0.5479614", "0.54791903", "0.5475781", "0.54740494", "0.54716146", "0.5471406", "0.54686224", "0.54681677" ]
0.0
-1
Creates a new Cuckoo Filter ``key`` with desired probability of false positives ``errorRate`` expected entries to be inserted as ``size``.
def topkReserve(self, key, k, width, depth, decay): params = [key, k, width, depth, decay] return self.execute_command(self.TOPK_RESERVE, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bfCreate(self, key, errorRate, capacity, expansion=None, noScale=None):\n params = [key, errorRate, capacity]\n self.appendExpansion(params, expansion)\n self.appendNoScale(params, noScale)\n\n return self.execute_command(self.BF_RESERVE, *params)", "def __init__(self, key_size=1024):\n\t\tif not (key_size % 256 == 0 and key_size >= 1024):\n\t\t\t\traise ValueError(\"RSA key length must be a multiple of 256 and >= 1024\")\n\t\telse:\n\t\t\tself.key_size = key_size", "def __init__(self, server, bfkeypreffix, capacity, error_rate=0.001):\n if not (0 < error_rate < 1):\n raise ValueError(\"Error_Rate must be between 0 and 1.\")\n if not capacity > 0:\n raise ValueError(\"Capacity must be > 0\")\n # given M = num_bits, k = num_slices, P = error_rate, n = capacity\n # k = log2(1/P)\n # solving for m = bits_per_slice\n # n ~= M * ((ln(2) ** 2) / abs(ln(P)))\n # n ~= (k * m) * ((ln(2) ** 2) / abs(ln(P)))\n # m ~= n * abs(ln(P)) / (k * (ln(2) ** 2))\n num_slices = int(math.ceil(math.log(1.0 / error_rate, 2)))\n bits_per_slice = int(math.ceil(\n (capacity * abs(math.log(error_rate))) /\n (num_slices * (math.log(2) ** 2))))\n if bits_per_slice > MAX_PER_SLICE_SIZE:\n raise ValueError(\"Capacity and error_rate make per slice size extended, MAX_PER_SLICE_SIZE is %s\" % (MAX_PER_SLICE_SIZE))\n self._setup(error_rate, num_slices, bits_per_slice, capacity, 0, server, bfkeypreffix)", "def __init__(__self__, *,\n size: pulumi.Input[int]):\n pulumi.set(__self__, \"size\", size)", "def __init__(self, size, parameters):\n\n self.weights = self.init_weights(size)\n self.alpha = parameters['alpha']\n self.epsilon = parameters['epsilon']\n self.gamma = parameters['gamma']\n self.value = 0.0 #np.random.random()", "def test_keysize_change(self):\n # This bundle used to trigger a bug in the RDATA sorting before hashing\n self._test_file(\n \"ksr-root-2016-q3-0.xml\",\n filter_ids=[\"a6b6162e-b299-427e-b11b-1a8c54a08910\"],\n )", "def process(self, key, value):\n if key in self.elements:\n raise Exception(\"This implementation works only for aggregated data\")\n seed = np.random.exponential(1.0 / (value**self.sample_p))\n self.elements[key] = (seed, value)\n\n # Optimization: instead of removing excess elements from the sample\n # every time its size reaches k+1, we only remove elements after the\n # number of elements in the sample exceeds 2k.\n if len(self.elements) > 2 * self.k:\n self._remove_additional_elements()", "def __init__(self, ksize_low, ksize_high=None): \n self._sigma_low = 0.3*(ksize_low//2 - 1) + 0.8\n \n if ksize_high is None:\n self._sigma_high = np.sqrt(2)*self._sigma_low\n else:\n self._sigma_high = 0.3*(ksize_high//2 - 1) + 0.8", "def __init__(self, ksize: torch.Tensor = 7, sigma: torch.Tensor = 5):\r\n super().__init__()\r\n self.ksize = ksize\r\n self.sigma = sigma\r\n\r\n self.conv2d_guass = get_gaussian_kernel(self.ksize, self.sigma)", "def __init__(self, num_filters, filter_size, padding=0, stride=1):\n self.num_filters = num_filters\n self.filter_size = filter_size\n self.padding = padding\n self.stride = stride\n self.conv_filter = np.random.rand(num_filters, filter_size, filter_size)/(filter_size*filter_size)", "def __init__(self, pad_size, input_size, pre_pad=False):\n self.pre_pad = pre_pad\n self.pad_size = pad_size\n self.input_size = input_size\n\n self.build()", "def __init__(self, input_size=88):\n self._input_size = input_size", "def __init__(self, size):\n self.size = size\n self.last_valid = 0\n self.q = [None] * size\n self.modder = self.ModIndex(size)", "def __init__(self, k, num_buckets, fp_size, bucket_size, max_iter):\n self.children: List[Node] = []\n self.parent: Optional[Node] = None\n self.filter = CuckooFilterBit(num_buckets, fp_size, bucket_size, max_iter)\n\n self.dataset_id: Optional[str] = None\n self.k = k", "def add(self, key):\n\t\t#super(CountingBloomFilter, self).add(key)\n\t\t#super(CountingBloomFilter, self).generateStats()\n\t\tfor i in self.getBitArrayIndices(key):\n\t\t\tself.ba[i] += 1\n\t\tself.n += 1", "def __init__(self, size, seed=None):\n self.size = size\n self.seed = seed", "def __init__(self, action_size: int, epsilon: float, max_policy) -> None:\n self._action_size = action_size\n self._epsilon = epsilon\n self._max_policy = max_policy", "def __init__( \n self, \n popSize, \n eliteSize, \n crossoverRate=0.5, \n mutationRate=0.01, \n generations=50, \n dressCode=None, \n color=None, \n budget=None,\n boost=False,\n error=None,\n show=False\n ):\n self.popSize = popSize\n self.eliteSize = eliteSize\n self.crossoverRate = crossoverRate\n self.mutationRate = mutationRate\n self.generations = generations\n self.currentGeneration = np.array([])\n self.currentGenerationSorted = np.array([])\n self.boost = boost\n self.error = error\n self.progress = []\n self.show = show\n\n # user input\n self.dressCode = dressCode\n self.color = color\n self.budget = budget", "def resized(self,size=1.,tol=1.e-5):\n s = self.sizes()\n s[s<tol*s.max()] = size\n return self.scale(size/s)", "def __init__(\n self,\n action_size: int,\n seed: int,\n mu: float = 0.0,\n theta: float = 0.15,\n sigma: float = 0.1,\n ):\n self.mu = mu * np.ones(action_size)\n self.theta = theta\n self.sigma = sigma\n self.seed = random.seed(seed)\n self.reset()", "def __init__(self, init_size=8):\n self.size = 0\n self.buckets = [LinkedList() for i in range(init_size)]", "def __init__(self, size, pad_value = 0):\n self.size = size\n self.pad_value = pad_value", "def process(self, key, value):\n if key in self.elements:\n seed, count = self.elements[key]\n self.elements[key] = (seed, count + value)\n else:\n pred = self.advice_obj.predict(key)\n # The seed hash(key) is drawn from the exponential distribution,\n # with parameter that is the predicted frequency raised to the p-th\n # power.\n seed = self.hash_func(key) / float(self.func_of_freq(pred))\n self.elements[key] = (seed, value)\n\n # Optimization: instead of removing excess elements from the sample\n # every time its size reaches k+1, we only remove elements after\n # the number of elements in the sample exceeds 2k.\n if len(self.elements) > 2 * self.k:\n self._remove_additional_elements()", "def __init__(self, size, channels_last: bool = False):\n super().__init__()\n if isinstance(size, numbers.Number):\n size = (int(size), int(size))\n assert len(size) == 2, \"forced input size must be len of 2 (w, h)\"\n self._size = size\n self.channels_last = channels_last", "def __init__(self, key):\n self.key = key\n self.BLOCK_SIZE = 16", "def initialise_source(self, c, key):\n if key == 'p':\n return 1e5\n elif key == 'h':\n if self.Q.val < 0 and self.Q.is_set:\n return 1e5\n elif self.Q.val > 0 and self.Q.is_set:\n return 5e5\n else:\n return 3e5", "def __init__(self, k):\r\n self.maxlen = k\r\n self.queue = []", "def __init__(self, size):\n self.values = [False] * size\n self.size = size", "def __init__(self, sigma=80, kernel_sizes=[1, 5, 10, 20, 30, 50], trainSize=80):\n\n self.sigma = sigma\n self.original_image = misc.face(gray=True)\n self.blurred_image = image.gaussian_filter( self.original_image, sigma )\n self.kernel_sizes = kernel_sizes\n self.trainSize = trainSize\n self.trainingSet = []\n self.testSet = []", "def __init__(self, size, scalable):\n self.__size = size\n self.__scalable = scalable\n self.__num_records = 0\n self.__buckets = []\n for i in range(self.__size):\n self.__buckets.append(LinkedList())", "def configure_minibatch(\n size: Sizing, get_length: Optional[Callable[[ItemT], int]] = None\n) -> BatcherT:\n optionals = {\"get_length\": get_length} if get_length is not None else {}\n return partial(minibatch, size=size, **optionals)", "def bf_counter(file_name, k, n, capacity, error_rate, verbose=False):\n if verbose:\n start = time.time()\n print('BFCounter started.')\n\n heap = []\n for i in range(n):\n heap.append((0, ''))\n\n bf = BloomFilter(capacity, error_rate, 'kmer_bf')\n\n kmer_counter = defaultdict(lambda: 1)\n\n # Assign functions to local variables for performance improvement\n add_to_bf = bf.add\n heap_pushpop = heapq.heappushpop\n\n with open(file_name, 'r') as f:\n line_num = 0\n for line in f:\n if line_num % 4 == 1: # dna sequence\n kmer_count = len(line) - k\n for i in range(kmer_count):\n kmer = line[i:i + k]\n if kmer not in bf: # not in Bloom Filter\n add_to_bf(kmer)\n else: # in Bloom Filter\n kmer_counter[kmer] += 1\n line_num += 1\n if verbose:\n end_hash = time.time()\n hash_table_size = sys.getsizeof(kmer_counter) / (1024 ** 2)\n print('Hash table is created in {:.2f} seconds.'.format(\n end_hash - start))\n print('Hash table size: {:.2f} MB.'.format(hash_table_size))\n start_populate = time.time()\n print('Populating the heap...')\n\n for count, kmer in kmer_counter.items():\n # insert to the heap if count is bigger than minimum\n if count > heap[0][0]:\n heap_pushpop(heap, (count, kmer))\n\n if verbose:\n end_populate = time.time()\n print('Heap is populated in {:.2f} seconds.'.format(\n end_populate - start_populate\n ))\n\n os.remove('kmer_bf')\n if verbose:\n end = time.time()\n print('BFCounter is completed in {:.2f} seconds.'.format(end - start))\n\n return heap", "def __call__(self, size, confidence_level=0.95):\r\n raise NotImplementedError(\"Subclasses must implement __call__.\")", "def key_size(self) -> int:\n pass", "def key_size(self) -> int:\n pass", "def random_state(hilb, key, *, size=None, dtype=np.float32):\n return random_state(hilb, key, size, dtype=dtype)", "def _generate_table(self):\n for i in xrange(32):\n self._table.append(\n BloomFilter(\n capacity=self.__capacity,\n error_rate=self.__error_rate\n )\n )", "def __init__(self, values=1000000):\n self.size = int(sqrt(values))\n self.buckets = [None] * self.size", "def __init__(self, size: int):\n self._storage = []\n self._maxsize = size\n self._next_idx = 0\n self._hit_count = np.zeros(size)\n self._eviction_started = False\n self._num_timesteps_added = 0\n self._num_timesteps_added_wrap = 0\n self._num_timesteps_sampled = 0\n self._evicted_hit_stats = WindowStat(\"evicted_hit\", 1000)\n self._est_size_bytes = 0", "def layer_weight_init(self, size):\n # TODO: make smarter init\n return np.random.uniform(size=size)", "def __init__(self, key):\n\n def keys(key, num_rounds):\n \"\"\"Yields the permuted key bitstring for i = 1..num_rounds\"\"\"\n C, D = key[:28], key[28:]\n # Rounds are 1-indexed, so shift array over by one\n left_shifts = [None, 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]\n for i in range(1, num_rounds + 1):\n # Negate each rotation to rotate left.\n C, D = rotate(C, -left_shifts[i]), rotate(D, -left_shifts[i])\n yield self.permute(C + D, self._CD_permutation)\n\n self.key = list(bits_of(key, 64))\n # Permute the key. The permutation discards the parity bits...\n self.key = self.permute(self.key, self._key_permutation)\n self.number_of_rounds = 16\n # A list of the 16 keys K1 .. K16, shifted over by one to allow 1-indexing.\n self.keys = [None] + list(keys(self.key, self.number_of_rounds))", "def __init__(self,\n name,\n input_size,\n state_size):\n self._input_size = input_size\n self._state_size = state_size\n super(BiGRU, self).__init__(name)", "def Pool2DOptionsAddFilterWidth(builder, filterWidth):\n return AddFilterWidth(builder, filterWidth)", "def __init__(self, k=10, cutoff=0.5):\n if k < 0:\n raise ValueError('k must be positive')\n super(PrecisionLower, self).__init__(k, cutoff)", "def cfCreate(self, key, capacity, expansion=None, bucket_size=None, max_iterations=None):\n params = [key, capacity]\n self.appendExpansion(params, expansion)\n self.appendBucketSize(params, bucket_size)\n self.appendMaxIterations(params, max_iterations)\n\n return self.execute_command(self.CF_RESERVE, *params)", "def __init__(self, size):\n self.size = size", "def __init__(self, size):\n self.size = size", "def __init__(self, kernel_size, *args, **kwargs):\n super().__init__()\n self.kernel_size = kernel_size", "def __init__(self, kernel_size, *args, **kwargs):\n super().__init__()\n self.kernel_size = kernel_size", "def __init__(self, size):\n self.size = size\n self.current_size = 0\n self.values = collections.deque()", "def __init__(self, size: int, r: float, k: float, min_age: int,\n max_age: int, mortality_rate: int, social_distance_per: int,\n infection_range: float, recovery_time: int,\n total_healthcare_capacity: int, mask_effectiveness: dict,\n speed: float, social_distancing_at: int,\n mask_wearing_at: int) -> None:\n self.population = Population(size)\n self.virus = Virus(infection_range, recovery_time,\n total_healthcare_capacity)\n self.recovery_time = recovery_time\n self.total_healthcare_capacity = total_healthcare_capacity\n self.movement = Movement()\n self.size = size\n self.x_bounds = [0, 1]\n self.y_bounds = [0, 1]\n self.k = k\n self.r = r\n self.destinations = np.random.uniform(low=0, high=1,\n size=(self.size, 2))\n self.min_age = min_age\n self.max_age = max_age\n self.mortality_rate = mortality_rate\n self.social_distance_per = social_distance_per\n self.mask_effectiveness = mask_effectiveness\n self.speed = speed\n self.persons = self.population.get_person()\n self.enforce_social_distance_at = social_distancing_at\n self.enforce_mask_wearing_at = mask_wearing_at\n self.social_distancing_enforced = False\n self.mask_wearing_enforced = False\n self.initialize_persons()", "def __init__(self, k, p, sample_p=1):\n # Maximum sample size\n self.k = k\n\n # A dictionary containing the sampled elements\n # The dictionary key is the key of the element\n # The value is a tuple (seed, count)\n self.elements = {}\n\n # The function of the frequencies that the sketch estimates\n # For now it's the p-th frequency moment, but in the future we may\n # support other functions (passed as a parameter)\n self.func_of_freq = lambda x: x**p\n\n # The power of values used for the sampling weights\n self.sample_p = sample_p", "def initialise_target(self, c, key):\n if key == 'p':\n return 1e5\n elif key == 'h':\n if self.Q.val < 0 and self.Q.is_set:\n return 5e5\n elif self.Q.val > 0 and self.Q.is_set:\n return 1e5\n else:\n return 3e5", "def __call__(self, shape, rate, size=None, **kwargs):\n return super().__call__(shape, 1.0 / rate, size=size, **kwargs)", "def create(cls, **dictionary):\n if \"size\" not in dictionary:\n dummy = cls(2, 4)\n else:\n dummy = cls(3)\n dummy.update(**dictionary)\n return dummy", "def __init__(self, size, alpha):\n super(PrioritizedReplayBuffer, self).__init__(size)\n assert alpha >= 0\n self._alpha = alpha\n\n it_capacity = 1\n while it_capacity < size:\n it_capacity *= 2\n\n self._it_sum = SumSegmentTree(it_capacity)\n self._it_min = MinSegmentTree(it_capacity)\n self._max_priority = 1.0", "def __init__(self,\n target_size,\n pad_ratio=0.6,\n pad_with_fixed_color=False,\n pad_value=(0, 0, 0)):\n assert isinstance(target_size, int)\n assert isinstance(pad_ratio, float)\n assert isinstance(pad_with_fixed_color, bool)\n assert isinstance(pad_value, tuple)\n\n self.target_size = target_size\n self.pad_ratio = pad_ratio\n self.pad_with_fixed_color = pad_with_fixed_color\n self.pad_value = pad_value", "def configure_minibatch_by_padded_size(\n *,\n size: Sizing,\n buffer: int,\n discard_oversize: bool,\n get_length: Optional[Callable[[ItemT], int]] = None\n) -> BatcherT:\n # Avoid displacing optional values from the underlying function.\n optionals = {\"get_length\": get_length} if get_length is not None else {}\n return partial(\n minibatch_by_padded_size,\n size=size,\n buffer=buffer,\n discard_oversize=discard_oversize,\n **optionals\n )", "def __init__(self):\n self.size = 1000\n self.bucket = [None] * self.size", "def __init__(self, max_size=5000, gamma=0.0):\n self.buffer = deque(maxlen=max_size)\n if not 0 <= gamma <= 1:\n raise ValueError(\"gamma need to be in range [0,1] not \" + str(gamma))\n self.gamma = gamma", "def __init__(self, image_size, #is_color, mean, scale,\n crop_size=0, pad=28, color='RGB',#'BGR',\n use_cutout=False,\n use_mirroring=False,\n use_random_crop=False,\n use_center_crop=False,\n use_random_gray=False):\n self.image_size = image_size\n pass", "def __init__(self, k: int) -> None:\n\n assert k > 2, \"for k = 2 use Bernoulli distribution.\"\n\n self.k = k", "def __init__(self, k):\n self._data = []\n self._length = k", "def __init__(self, pop_size=5000, size_next_gen=300, lucky_per=0.10, unique_pop=False, add_naive=False, **kwargs):\n self.pop_size = pop_size\n self.size_next_gen = size_next_gen\n\n self.size_best_parents = (1 - lucky_per) * self.size_next_gen\n if self.size_best_parents % 2 == 1: self.size_best_parents += 1\n self.size_lucky_parents = self.size_next_gen - self.size_best_parents\n\n self.unique_pop = unique_pop\n self.add_naive = add_naive\n self.util = Util(**kwargs)\n self.cache = LRU(10000)", "def dummy_data(size):\n add_dummy_data(size)", "def __init__(self, width, growth_factor, num_finite_buckets):\n\n if num_finite_buckets < 0:\n raise ValueError('num_finite_buckets must be >= 0 (was %d)' %\n num_finite_buckets)\n\n self.width = width\n self.growth_factor = growth_factor\n self.num_finite_buckets = num_finite_buckets\n self.total_buckets = num_finite_buckets + 2\n self.underflow_bucket = 0\n self.overflow_bucket = self.total_buckets - 1\n\n self._lower_bounds = list(self._generate_lower_bounds())", "def __init__(self, kernel_size):\r\n super().__init__()\r\n self.kernel_size = kernel_size", "def __init__(self, kernel_size):\r\n super().__init__()\r\n self.kernel_size = kernel_size", "def __init__(self,\n size: int,\n counter_num: int,\n time_window: float,\n update_sample_size: int=5):\n super().__init__(size)\n\n self.__counters = []\n for i in range(counter_num):\n sketch = FullCounter()\n self.__counters.append(sketch)\n\n self.__time_window = time_window\n self.__processed_windows = 0\n self.__from_window_start = 0.0\n\n self.__priority_dict = PriorityDict()\n\n self.__update_sample_size = update_sample_size", "def __init__(self, iterable_input, batch_size, buckets, pad_index, only_full=False, field=None,\n shuffle=False, buffer_size=None, name='Bucket Batch', verbose=True):\n super().__init__(iterable_input=iterable_input, name=name, verbose=verbose)\n self.batch_size = batch_size\n self.buckets = buckets\n self.max_length = buckets[-1]\n self.pad_index = pad_index\n self.only_full = only_full\n self.field = field\n self.shuffle = shuffle\n self.buffer_size = self.batch_size if buffer_size is None else buffer_size", "def __init__(self, k=10, cutoff=0.5):\n if k < 0:\n raise ValueError('k must be positive')\n super(Precision, self).__init__()\n self.k = k\n self.cutoff = cutoff", "def initialize(self, k, stats):\n\n k = k + 5\n\n qbin_sizes = 0.5 / k # Quantile sizes\n qbin_edges = 0.25 + qbin_sizes*np.arange(0, k+1) # Edge locations (in quantile terms)\n\n bin_edges = np.interp(qbin_edges, stats['quantile_basis'], stats['quantiles'])\n\n self.k = k\n self.n_bins = k + 2\n self.classes = list(range(1, self.n_bins + 2))\n self.edges = [-np.Inf] + [edge for edge in bin_edges] + [np.Inf]\n self.chi = np.zeros((2, self.n_bins + 1))\n\n dist = np.linspace(2, 1, self.n_bins) # Bins captured by observations\n scaled_dist = 0.9 * dist / dist.sum() # Scaling by 0.9 to allow for 0.1 emission prob of NaN\n self.chi[1, :-1] = scaled_dist # Paired emission dist\n self.chi[0, :-1] = np.flip(scaled_dist) # Unpaired emission dist\n self.chi[1, -1] = 0.1 # NaN observations\n self.chi[0, -1] = 0.1 # NaN observations\n\n self.n_params = 2*(self.n_bins-2)", "def __init__(self, size: int):\n self.size =size\n self.vals_list= []", "def add_gaussian_noise(\n key: Array,\n action: Array,\n stddev: float\n) -> Array:\n chex.assert_type(action, float)\n\n noise = jax.random.normal(key, shape=action.shape) * stddev\n return action + noise", "def __init__(self,\n name,\n input_size,\n state_size):\n self._input_size = input_size\n self._state_size = state_size\n super(GRU, self).__init__(name)", "def __init__(self, k):\n self.queue = []\n self.size = k", "def _set_markers_size(self, markers_size, key):\n self.markers_size[key] = markers_size\n self._update_markers(self.markers, key)", "def __init__(self, name, path, password=None, key_size=2048, **kwargs):\n self.key_size = key_size\n super().__init__(name, path, password)", "def clamp(self, key):\n\t\treturn DiscreteDistribution({ k : 0. if k != key else 1. for k in self.keys() })", "def __init__(self, key: bytearray):\n self.__key = key\n self.__KSA(bytearray([i for i in range(256)]))", "def __init__(self, ksize, stride=None):\n \n self._ksize = (ksize, ksize) if isinstance(ksize, int) else ksize\n self._pad = (0, 0, 0, 0)\n self._stride = stride\n \n if stride is None:\n self._stride = tuple(self._ksize)\n elif isinstance(stride, int):\n self._stride = (stride, stride)\n \n self._X_shape = None\n self._cols = None\n self._max_idx = None", "def __init__(self, k):\n self.queue = []\n self.size = k\n self.rear = 0", "def __init__(self, size):\n self.values = collections.deque(maxlen = size)", "def __init__(self, capacity: int, function) -> None:\n self.buckets = DynamicArray()\n for _ in range(capacity):\n self.buckets.append(LinkedList())\n self.capacity = capacity\n self.hash_function = function\n self.size = 0", "def __init__(self, key, updateProposer, sampleShape, numChains=1, updateProposerArg=None,\n numSamples=100, thermalizationSweeps=10, sweepSteps=10):\n\n stateShape = (numChains,) + sampleShape\n if global_defs.usePmap:\n stateShape = (global_defs.device_count(),) + stateShape\n self.states=jnp.zeros(stateShape, dtype=np.int32)\n\n self.updateProposer = updateProposer\n self.updateProposerArg = updateProposerArg\n\n self.key = key\n if global_defs.usePmap:\n self.key = jax.random.split(self.key, global_defs.device_count())\n self.thermalizationSweeps = thermalizationSweeps\n self.sweepSteps = sweepSteps\n self.numSamples = numSamples\n\n self.numChains = numChains\n\n # jit'd member functions\n self._get_samples_jitd = {} # will hold a jit'd function for each number of samples\n self._get_samples_gen_jitd = {} # will hold a jit'd function for each number of samples", "def _hpat_ensure_array_capacity(new_size, arr):\n\n k = len(arr)\n if k >= new_size:\n return arr\n\n n = k\n while n < new_size:\n n = 2 * n\n res = numpy.empty(n, arr.dtype)\n res[:k] = arr[:k]\n return res", "def __init__(self, size: int, alpha: float):\n super(PrioritizedReplayBuffer, self).__init__(size)\n assert alpha > 0\n self._alpha = alpha\n\n it_capacity = 1\n while it_capacity < size:\n it_capacity *= 2\n\n self._it_sum = SumSegmentTree(it_capacity)\n self._it_min = MinSegmentTree(it_capacity)\n self._max_priority = 1.0\n self._prio_change_stats = WindowStat(\"reprio\", 1000)", "def __init__(self, k, hash_func, p, advice_obj):\n # Maximum sample size\n self.k = k\n\n # The following hash function defines all the randomness used for\n # picking the sample\n self.hash_func = hash_func\n\n # A dictionary containing the sampled elements\n # The dictionary key is the key of the element\n # The value is a tuple (seed, count)\n self.elements = {}\n\n # The advice object\n self.advice_obj = advice_obj\n\n # The function of the frequencies that the sketch estimates\n # For now it's the p-th frequency moment, but in the future we may\n # support other functions (passed as a parameter)\n self.func_of_freq = lambda x: x**p", "def __init__(self, k=10, cutoff=0.5):\n if k < 0:\n raise ValueError('k must be positive')\n super(PrecisionUpper, self).__init__(k, cutoff)", "def __init__(self, capacity):\n self.capacity = capacity # Number of buckets in the hash table\n self.storage = [None] * capacity\n self.key_count = 0", "def __init__(self, size=0):\n self.__size = size\n try:\n size += 1\n if(size < 0):\n raise(ValueError)\n except TypeError:\n raise Exception('size must be an integer')\n except ValueError:\n raise Exception('size must be >= 0')", "def _gamma_confidence(sizes: Dict[str, int], c: Union[float, np.ndarray]=0.95) -> Dict[\n str, Tuple[np.ndarray, np.ndarray]]:\n d = {}\n for k, df in sizes.items():\n lower = scipy.stats.chi2.ppf((1-c)/2, df=df, scale=1/df)\n upper = scipy.stats.chi2.ppf((1+c)/2, df=df, scale=1/df)\n d[k] = (lower, upper)\n return d", "def gamma_mechanism(item_counts, k, epsilon):\n d = len(item_counts)\n radius = np.random.gamma(shape=d + 1, scale=1 / epsilon)\n noise = radius * np.random.uniform(low=-1, high=1, size=d)\n return sorted_top_k(item_counts + noise, k)", "def __init__(self, k, distance_function=euclidean):\n self.k = k\n self.dist = distance_function", "def __init__(self, size):\n self.__size = size\n self.integer_validator(\"size\", size)\n super().__init__(size, size)\n self._size = size", "def __init__(self, size=0):\n if type(size) != int:\n raise TypeError(\"size must be an integer\")\n elif size < 0:\n raise ValueError(\"size must be >= 0\")\n else:\n self.__size = size\n return", "def __init__(self, knapsack_size, items):\n self.knapsack_size = knapsack_size\n self.items = items\n self._cache = dict()\n # fill-in the cache with base cases' (subproblems') solutions\n for size in range(knapsack_size + 1):\n # if there are no items, the max value is 0\n self._cache[(0, size)] = 0\n for end in range(len(items) + 1):\n # if the knapsack's size is 0 no items fit, the max value is 0\n self._cache[(end, 0)] = 0", "def __init__(self, size=0):\n if isinstance(size, int) is not True:\n raise TypeError(\"size must be an integer\")\n elif size < 0:\n raise ValueError(\"size must be >= 0\")\n else:\n self.__size = size", "def test_sized_no_reuse(self):\n cache = LRUCache(max_size=5)\n for i in range(5):\n cache[i] = i\n for i in range(5):\n assert i in cache\n assert cache[i] == i\n for i in range(5, 10):\n cache[i] = i\n assert i in cache\n assert cache[i] == i\n assert i - 5 not in cache\n with pytest.raises(KeyError):\n assert cache[i - 5]", "def __init__(self):\n self.buckets = [-1] * 10\n self.length = len(self.buckets)", "def __init__(self, size=0):\n if isinstance(size, int):\n self.__size = size\n else:\n raise TypeError(\"size must be an integer\")\n if size < 0:\n raise ValueError(\"size must be >= 0\")" ]
[ "0.5268055", "0.5220767", "0.51832116", "0.51610947", "0.5103992", "0.5081258", "0.4993609", "0.4992658", "0.49491334", "0.49001232", "0.4897395", "0.48936903", "0.47643712", "0.4748526", "0.47471568", "0.47449547", "0.4744873", "0.47413242", "0.47407177", "0.473573", "0.47286245", "0.47226453", "0.47185385", "0.47085997", "0.47063935", "0.4704372", "0.4695341", "0.46917298", "0.46795306", "0.46679267", "0.46612978", "0.4638791", "0.46357504", "0.45987275", "0.45987275", "0.4590839", "0.45888945", "0.45864195", "0.45834783", "0.45832086", "0.45824695", "0.45791504", "0.4574682", "0.45734823", "0.45734578", "0.45726317", "0.45726317", "0.45593888", "0.45593888", "0.45585304", "0.45547682", "0.45482573", "0.45470217", "0.45389965", "0.4537911", "0.45367068", "0.45359764", "0.45356855", "0.45318723", "0.45285448", "0.45255145", "0.45239633", "0.45208982", "0.4520474", "0.45202723", "0.4511465", "0.45072764", "0.45072764", "0.45000514", "0.44992486", "0.4497917", "0.4497209", "0.44964975", "0.44925243", "0.44916347", "0.4489042", "0.44877258", "0.44760194", "0.4473458", "0.44714832", "0.44706124", "0.44676703", "0.44643322", "0.44626966", "0.44586352", "0.445796", "0.44577914", "0.44576436", "0.44570574", "0.44532195", "0.44438124", "0.4432516", "0.4432485", "0.44300318", "0.44291636", "0.4427602", "0.44248953", "0.4424109", "0.4423951", "0.44222584", "0.44218194" ]
0.0
-1
Adds one ``item`` or more to a Cuckoo Filter ``key``.
def topkAdd(self, key, *items): params = [key] params += items return self.execute_command(self.TOPK_ADD, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.CF_ADD, *params)", "def bfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.BF_ADD, *params)", "def filter_matches_add(self, key, value):\n\t\tif key in self.filter_matches.keys():\n\t\t\tself.filter_matches[key].append(value)\n\t\telse:\n\t\t\tself.filter_matches[key]=[value]", "def add(self, key, value):", "def add(self, item):\n self.num_item += 1\n indexs = self.__get_indexs(item)\n for index in indexs:\n self.filter_bitarray[index] = True", "def append(self, item):\n # FIXME: this is only append if the key isn't already present\n key, value = item\n self._main[key] = value", "def add(self, key):\n if key in self:\n return True\n if not self.filters:\n filter = RedisLocalBloomFilter(\n server=self.server, \n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=self.initial_capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n else:\n filter = self.filters[-1]\n if filter.count >= filter.capacity:\n capacity = filter.capacity * self.scale\n if capacity > MAX_PER_SLICE_SIZE:\n capacity = MAX_PER_SLICE_SIZE\n filter = RedisLocalBloomFilter(\n server=self.server,\n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n if self.max_filters > 0 and len(self.filters) >= self.max_filters:\n f = self.filters[0]\n f.clear()\n del self.filters[0]\n filter.add(key, skip_check=True)\n return False", "def add_item(self, key, item):\n self[key].add(item)\n try:\n self._reverse_store[item].add(key)\n except KeyError:\n self._reverse_store[item] = set([key])", "def cfAddNX(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.CF_ADDNX, *params)", "def add(self, item):\n self._dict[item] = item", "def add(self, key):\n\t\t#super(CountingBloomFilter, self).add(key)\n\t\t#super(CountingBloomFilter, self).generateStats()\n\t\tfor i in self.getBitArrayIndices(key):\n\t\t\tself.ba[i] += 1\n\t\tself.n += 1", "def add_item(dct,item):\r\n if item not in dct[0]:\r\n print \"itemNotFound \",str(item)\r\n return False\r\n\r\n num=len(item)\r\n if num in dct:\r\n if item in dct[num]:\r\n return False\r\n else:\r\n dct[num].append(item)\r\n return True\r\n else:\r\n dct[num]=[item]\r\n return True", "def add(self, key, value):\n self.data.append((key, value))", "def add(self, item):", "def add(self, item):\n self.update(set([item]))", "def addItem(*args):", "def addItem(*args):", "def addItem(*args):", "def add(self, key, value):\n self._data.add_last(self._Item(key, value))", "def _add_item_by_item(self, item):\n self.item_list[item.call_number] = item", "def _single_setitem(self, key, item):\n self._dict[key] = item", "def add_filter(self, filter):\n self._filters.append(filter.as_dict())", "def add(self, key, val):\n self.obtain(key).append(val)", "def add(self, item):\n item = self._prepare_item(len(self), item)\n if item not in self._data:\n self._data.append(item)\n self.__log__.append(SetAdd(value=item))", "def add(self, item: Any) -> None:\n pass", "def add(self, key, value):\n newest = self._Item(key, value) # make new item instance\n walk = self._data.last() # walk backward looking for smaller key\n while walk is not None and newest < walk.element():\n walk = self._data.before(walk)\n if walk is None:\n self._data.add_first(newest) # new key is smallest\n else:\n self._data.add_after(walk, newest) # newest goes after walk", "def add(self, key, value):\n\t\tself.__add_key_to_bt(key)[3] = self.__add_key_value_to_ll(key, value)", "def add_item(self, key, data):\n hash_key = self.count_hash(key, len(self.slots))\n\n if self.slots[hash_key] is None:\n self.slots[hash_key] = key\n self.data[hash_key] = data\n else:\n if self.slots[hash_key] == key:\n self.data[hash_key] = data\n elif isinstance(self.slots[hash_key], int):\n self.slots[hash_key] = (self.slots[hash_key], key,)\n self.data[hash_key] = (self.data[hash_key], data,)\n elif len(self.slots[hash_key]) > 1:\n list_slot = list(self.slots[hash_key])\n list_data = list(self.data[hash_key])\n list_slot.append(key)\n list_data.append(data)\n self.slots[hash_key] = tuple(list_slot)\n self.data[hash_key] = tuple(list_data)", "def __setitem__(self, key, item):\n self.set_field(key, item)", "def addItem(self, key):\n if key in self.dictionary:\n raise Exception(\"Key already exist in dictionary\")\n self.dictionary[key] = WordInformation(self.MAX_RATING)", "def put(self, key, item):\n if key and item:\n self.cache_data[key] = item", "def put(self, key, item):\n if key and item:\n self.cache_data[key] = item", "def add(self, key, value):\r\n newest = Item(key, value) # make new item instance\r\n walk = self.data.last() # walk backward looking for smaller key\r\n while walk is not None and newest < walk.element():\r\n walk = self.data.before(walk)\r\n if walk is None:\r\n self.data.add_first(newest) # new key is smallest\r\n else:\r\n self.data.add_after(walk, newest) # newest goes after walk\r", "def add_key(mu_key):\n params['key'] = mu_key", "def add(self, new_filter: Filter) -> None:\r\n self.filters.append(new_filter)", "def add_item (self, item):\n new_item = CacheItem (item)\n cached = self.cache.get(hash(item))\n if cached is None:\n self.evict_or_add (new_item)\n cached.hits += 1", "def add_to_inventory(self, item, quantity):\n\t\tincreaseQuantity = None\n\t\taddToDict = True\n\t\tfor key in self.inventoryDictionary:\n\t\t\tif key.name == item.name:\n\t\t\t\taddToDict = False\n\t\t\t\tincreaseQuantity = key\n\t\t\t\tbreak\n\t\t\t\t\n\n\t\t\telse:\n\t\t\t\taddToDict = True\n\t\t\t\t\n\n\t\tif addToDict:\n\t\t\tself.inventoryDictionary[item] = quantity\n\t\telse:\n\t\t\tself.inventoryDictionary[increaseQuantity] += quantity", "def add_to_group(self,item):\n self.items.append(item)\n self.n += 1", "def append(self, item, **data):\n self._items.append(item)\n if data:\n self._data[item] = data", "def add(self, key, value):\n new = self._Item(key, value)\n\n if self.is_empty():\n self._data.append(new)\n else:\n for i, item in enumerate(self._data):\n if new <= item:\n self._data.insert(i, new)\n break\n if i == len(self) - 1:\n self._data.append(new)\n break", "def add_item(self, item):\n self.items_with_price.update(item)", "def put(self, key, item):\n # Do nothing if key is NoneType or item is NoneType\n if (key is None or item is None):\n return\n\n # If Key exists, update Cache Item\n if key in self.cache_data:\n for ci in self.LFU:\n if ci.key is key:\n ci.updateItem(item)\n else:\n # Length will be longer than max capacity, make room\n if len(self.cache_data) == self.MAX_ITEMS:\n\n # Determine discarded cache item\n discard = self.LFU[0]\n for x in self.LFU:\n if x.freq < discard.freq or \\\n x.freq == discard.freq and x.age > discard.age:\n discard = x\n\n # Discard Cache Item\n print(\"DISCARD: {}\".format(discard.key))\n del self.cache_data[discard.key]\n self.LFU.remove(discard)\n\n # Add new Cache Item\n data = LFUCacheItem(key, item, 0, 0)\n self.LFU.append(data)\n\n # increase age of all items\n for x in self.LFU:\n x.age += 1\n\n self.cache_data[key] = item", "def add(self, item: Mapping[Hashable, Any], **kwargs: Any) -> None:\n self.contents.update(item, **kwargs)\n return", "def add(self, key, value):\n raise NotImplementedError('must be implemented by subclass')", "def add(self, key, value):\n raise NotImplementedError('must be implemented by subclass')", "def add(self, item, issue):\n if self.has_key(item):\n self[item].append(issue)\n else:\n self[item] = [issue]\n return 1", "def add(self, key, val):\n key_lower = key.lower()\n new_vals = key, val\n # Keep the common case aka no item present as fast as possible\n vals = _dict_setdefault(self, key_lower, new_vals)\n if new_vals is not vals:\n # new_vals was not inserted, as there was a previous one\n if isinstance(vals, list):\n # If already several items got inserted, we have a list\n vals.append(val)\n else:\n # vals should be a tuple then, i.e. only one item so far\n # Need to convert the tuple to list for further extension\n _dict_setitem(self, key_lower, [vals[0], vals[1], val])", "def add_item(self, item: _T) -> None:\n if item not in self.item_to_index:\n self.item_to_index[item] = len(self.index_to_item)\n self.index_to_item.append(item)", "def __setitem__(self, key, value) :\n attributeslist = getattr(self.request, \"_%s_attributes\" % self.name)\n for i in range(len(attributeslist)) :\n attribute = attributeslist[i]\n for j in range(len(attribute)) :\n (attrname, attrvalue) = attribute[j]\n if attrname == key :\n attribute[j][1].append(value)\n return\n attribute.append((key, [value]))", "def addKey(self, time, name, value, view) -> None:\n ...", "def add_item(product, price):\n ADD_PRODUCTS[product] = price", "def add(self, *items):", "def add(self, key, skip_check=False):\n bits_per_slice = self.bits_per_slice\n hashes = self.make_hashes(key)\n found_all_bits = True\n if self.count > self.capacity:\n raise IndexError(\"RedisLocalBloomFilter is at capacity\")\n pipe = self.server.pipeline(transaction=False) \n sliceIdx = 0\n for k in hashes:\n sliceKey = self.SLICE_KEY_FMT % (self.bfkeypreffix, sliceIdx)\n pipe.setbit(sliceKey, k, 1)\n sliceIdx += 1\n pipeResults = pipe.execute()\n if not skip_check:\n for pipeResult in pipeResults:\n if not pipeResult:\n found_all_bits = False\n break\n if skip_check:\n self.count += 1\n return False\n elif not found_all_bits:\n self.count += 1\n return False\n else:\n return True", "def put(self, key, item):\n if key or item:\n self.cache_data[key] = item\n if len(self.cache_data) > BaseCaching.MAX_ITEMS:\n print(\"DISCARD: {}\".format(self.last))\n del self.cache_data[self.last]\n self.last = key", "def put(self, key, item):\n if key is not None and item is not None:\n self.cache_data[key] = item\n if key not in self.cache_list:\n self.cache_list.append(key)\n if len(self.cache_data) > self.MAX_ITEMS:\n popped_key = self.cache_list.pop(0)\n print(f\"DISCARD: {popped_key}\")\n del self.cache_data[popped_key]", "def add(self, key):\r\n if key not in self.map:\r\n end = self.end\r\n curr = end[PREV]\r\n curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end]\r\n if self.emitter:\r\n self.emitter.emit()", "def add(self, *items):\n for item in items:\n self.unsorted.append(item)\n key = item[0]\n self.index[key] = item\n return self", "def add_item(self, item_id, item_title, score, filter_stopwords=False):\n with self._r.pipeline() as pipe:\n for prefix in self._prefixes(item_title, filter_stopwords=filter_stopwords):\n pipe.zadd(prefix, item_id, score)\n pipe.hset('$titles', item_id, item_title)\n pipe.execute()\n return True", "def add(self, key, val, expiry_time=0, min_compress_len=0):\n\t\treturn self._set(\"add\", key, val, expiry_time, min_compress_len)", "def addItem(self, item):\n self.items.append(item)\n self.totalWeight += item", "def maybe_outfeed(self, key, value):\n if self._filters is not None:\n if any(f in key for f in self._filters):\n self._vals[key] = value\n else:\n self._vals[key] = value", "def add_item(self, item):\n self.items.append(item)", "def add(self, key, value):\n if not key in self:\n self.keys.append(key)\n self.dict[key] = value", "def add_item(self, item):\n self.items.append(item)\n self.length += 1", "def add_custom(self, key: _K, value: _V) -> None:\n self._customs.setdefault(key, set()).add(value)\n self._merged.setdefault(key, set()).add(value)", "def add_new(self, item, key):\n if key in self._items:\n raise DuplicateListHeapItemException(key)\n if len(self._items) >= self._max_limit:\n raise MaxItemLimitReachedException()\n self._items[key] = item\n self._listbox.insert(END, key)", "def add_item(key, obj, dst):\n\n if key not in dst:\n dst[key] = []\n dst[key].append(obj)", "def addToExtra(self,key,val):\n if self.extra == None: \n self.extra = {} \n self.extra[key] = val", "def __setitem__(self, key, value):\n index=self._index(key)\n if index==-1:\n self._item.append(Item(key,value))\n self._size+=1\n else:\n self._item[index].value=value", "def __setitem__(self, key, item):\n assert isinstance(key,list) and isinstance(item,list) and len(key)==2 and len(item)==2\n self._data[self.__ptBin(key[0])][self.__etaBin(key[1])] = item", "def __setitem__(self, key, value):\n self.setdefault(key, []).append(value)", "def __setitem__(self, key, value):\n self.setdefault(key, []).append(value)", "def __setitem__(self, key, value):\r\n self.setdefault(key, []).append(value)", "def insert(self, key):\r\n index = self.search(key)\r\n self.keys.insert(index, key)", "def addItems(*args):", "def add_item(self, item):\n self.items.append(item)\n self.item_count += 1\n self.max_length = max(self.max_length, len(item.samples))", "def put(self, key, item):\n if key is not None and item is not None:\n # modify the time and change the next newer value\n self.timesKey[key] = self.time\n self.time += 1\n\n # add the new item\n self.cache_data[key] = item\n\n if len(self.cache_data) > BaseCaching.MAX_ITEMS:\n discard_key = None\n newer = self.time - 2\n\n for _key, _value in self.timesKey.items():\n if newer == _value:\n discard_key = _key\n break\n\n # del key in time and cache data\n del self.cache_data[discard_key]\n del self.timesKey[discard_key]\n\n print(\"DISCARD: {}\".format(discard_key))", "def add_filter(self, name: str, value: any):\n self.filters[name] = value", "def put(self, key, item):\n if key is None or item is None:\n return\n self.cache_data[key] = item", "def append_to(self, key, value):\n self.get_data()[key].append(value.get_data())", "def additem(d, key, value):\n if key in d:\n if not isinstance(d[key], list):\n d[key] = [d[key]]\n d[key].append(value)\n else:\n d[key] = value", "def addKey(self, time, value) -> None:\n ...", "def _insert_item(self, key: _KT, value: _VT) -> None:\n dict.__setitem__(self, key, value)", "def add_value(self, key, value):\r\n if key in self:\r\n # We already have this key on the item.\r\n if not isinstance(self[key], list):\r\n # The key isn't already a list, take its current value and\r\n # convert it to a list with the only member being the\r\n # current value.\r\n self[key] = [self[key]]\r\n # Add the new value to the list.\r\n self[key].append(value)\r\n else:\r\n # This is a new attribute, just set it.\r\n self[key] = value", "def put(self, key, value):\r\n temp = [key, value]\r\n flag = False\r\n for i in range(len(self.lis)):\r\n if self.lis[i][0] == temp[0]:\r\n self.lis[i][1] = temp[1]\r\n flag = True\r\n break\r\n if flag == False:\r\n self.lis.append(temp)", "def _newKey(self, key):\n self._testKeySubNsAdd()\n self._getKeyList().append(key)", "def add_item(self, item_to_append):\n self.items.append(item_to_append)", "def put(self, key, item):\n raise NotImplementedError(\"put must be implemented in your cache class\")", "def adauga(self, item):\n if item in self._items:\n raise RepoError(\"item deja existent!\\n\")\n self._items.append(item)", "def add(self, key):\r\n if key not in self.map:\r\n end = self.end\r\n curr = end[PREV]\r\n curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end]", "def add(self, key, idx=None, count=1):\n key = self.lower_(key)\n if idx is not None:\n self.index2word[idx] = key\n self.word2index[key] = idx\n else:\n if key not in self.word2index:\n idx = len(self.word2index)\n self.index2word[idx] = key\n self.word2index[key] = idx\n\n if key not in self.word_count:\n self.word_count[key] = count\n else:\n self.word_count[key] += count", "def _append_row(self, key, value, item):\n self._items.append(item)\n self.key_listbox.insert(tk.END, key)\n self.value_listbox.insert(tk.END, value)", "def __setitem__(self, key, obj):\n self.add(key, obj, self._mode)", "def put(self, key, item):\n if key and item:\n if key in self.cache_data.keys():\n self.cache_data[key] = item\n self.stack.append(key)\n else:\n if len(self.cache_data.keys()) < self.MAX_ITEMS:\n self.cache_data[key] = item\n self.stack.append(key)\n elif len(self.cache_data.keys()) == self.MAX_ITEMS:\n k = self.stack.pop(-1)\n self.cache_data.pop(k)\n print(\"DISCARD: {}\".format(k))\n self.cache_data[key] = item\n self.stack.append(key)", "def do_add(self, args):\n\t\tif len(args) == 0:\n\t\t\tself.parent.printErr(\"Missing argument(s)\")\n\t\t\treturn False\n\t\tdef try_add(ftype, fvalue):\n\t\t\tif ftype == \"has\" and value not in self.FILTER_HAS_ARGUMENTS:\n\t\t\t\tself.parent.printErr(\"Could not add '%s': Invalid filter argument\" % (fvalue))\n\t\t\t\treturn False\n\t\t\telif ftype not in self.FILTER_ARGUMENTS:\n\t\t\t\tself.parent.printErr(\"Could not add '%s': Invalid filter\" % (ftype))\n\t\t\t\treturn False\n\n\t\t\ttry:\n\t\t\t\tif value not in self.parent.filter[ftype]:\n\t\t\t\t\tself.parent.filter[ftype].append(fvalue)\n\t\t\t\telse:\n\t\t\t\t\tself.parent.printErr(\"Could not add '%s': Item already in filter\" % (fvalue))\n\t\t\t\t\treturn False\n\t\t\texcept KeyError:\n\t\t\t\tself.parent.filter[ftype] = [fvalue]\n\n\t\t\tself.apply_filter()\n\t\t\treturn True\n\n\t\targs = args.split()\n\t\tftype = args[0]\n\t\tvalues = args[1:]\n\n\t\tif len(values) == 0:\n\t\t\tself.parent.printErr(\"Could not add '%s': Filter expects arguments\" % (ftype))\n\n\t\tfor value in values:\n\t\t\ttry_add(ftype, value)\n\n\t\tself._update_prompts()", "def add(self, key: str, value: str) -> Optional[None]:\n threshhold = self.capacity * 0.75\n if self.length >= threshhold:\n self._increase_size()\n\n hashkey = self._gethash(key)\n if not self.HashMap[hashkey]:\n # The key does not exist so add it\n value_to_store = [key, value]\n self.HashMap[hashkey] = value_to_store\n self.length += 1\n elif self.HashMap[hashkey] and key not in self.HashMap[hashkey]:\n # There is a hashclash append to the location\n self.HashMap[hashkey].extend([key, value])\n self.length += 1\n else:\n # The key exists and matches so the value gets overlayed\n self.HashMap[hashkey] = [key, value]", "def _update_append_key(self):\n self.append_key += 1", "def addKey(self,\n keyObj):\n # Note, internal key number index starts with 1.\n if (self.__keyCount.has_key(keyObj.getKey()) == 0):\n self.__keyCount[keyObj.getKey()] = 0\n if (self.getAllowRepetition() == 1):\n # If repetition is enabled, just add the keyword.\n count = self.__incKeyCount(keyObj.getKey())\n key = keyObj.getKey() + \"___\" + str(count)\n self.__keyObjs[key] = keyObj\n self.__keyList.append(key)\n else:\n # If repetition is not enabled, check if the keyword is there,\n # if yes, replace the existing one.\n count = set.setKeyCount_(keyObj.getKey(), 1)\n key = keyObj.getKey() + \"___\" + str(count)\n if (self.__keyList.count(key) == 0):\n self.__keyList.append(key)\n self.__keyObjs[key] = keyObj\n\n return self", "def append(self, item):\n self.update([item])", "def add(self, key):\n self.times[key] = time.time()" ]
[ "0.72713625", "0.7200462", "0.70165783", "0.67412406", "0.6649933", "0.6583354", "0.6483099", "0.6472438", "0.63995546", "0.6359965", "0.623782", "0.6182844", "0.6171349", "0.6163423", "0.61477727", "0.61084974", "0.61084974", "0.61084974", "0.61075824", "0.60661644", "0.60115886", "0.5971788", "0.59450895", "0.59115773", "0.5904302", "0.5892816", "0.58817667", "0.5878598", "0.5854063", "0.58426666", "0.5840309", "0.5840309", "0.5835531", "0.5823125", "0.58205813", "0.57750475", "0.57605565", "0.57552", "0.57258767", "0.5722154", "0.57067984", "0.5691847", "0.5684778", "0.567848", "0.567848", "0.56702113", "0.56687254", "0.5666608", "0.5665944", "0.56486005", "0.5633339", "0.5622835", "0.56217045", "0.56209487", "0.5619141", "0.56154406", "0.56105983", "0.56072253", "0.5604723", "0.5602423", "0.559307", "0.55837256", "0.5578233", "0.5572818", "0.55666286", "0.5554962", "0.55451775", "0.5541704", "0.5540291", "0.55372137", "0.55368155", "0.55368155", "0.5535807", "0.5534724", "0.5525248", "0.55243343", "0.55134916", "0.5509256", "0.55085987", "0.55020386", "0.54999304", "0.54978853", "0.5497411", "0.5497028", "0.54840124", "0.5479103", "0.54770136", "0.54716897", "0.5466618", "0.544441", "0.54430085", "0.5442259", "0.5433809", "0.5404387", "0.5401596", "0.5388731", "0.5384352", "0.5377096", "0.53768694", "0.5368764" ]
0.6077388
19
Checks whether one ``item`` or more is a TopK item at ``key``.
def topkQuery(self, key, *items): params = [key] params += items return self.execute_command(self.TOPK_QUERY, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __contains__(self, item, key):\n ndx = self._findPostion(key)\n return ndx is not None", "def item_has_key(self, item, key):\n if key in self._reverse_store[item]:\n return True\n else:\n return False", "def has_item(self, usage_key):\r\n try:\r\n self._find_one(usage_key)\r\n return True\r\n except ItemNotFoundError:\r\n return False", "def has_item(self, usage_key):\r\n return usage_key in self.modules[usage_key.course_key]", "def hasItem(self, path): \n\t\treturn (path in self.items and self.items[path])", "def is_perCapita(key):", "async def isStorObj(app, key, bucket=None):\n found = False\n client = _getStorageClient(app)\n if not bucket:\n bucket = app['bucket_name']\n else:\n log.debug(f\"using bucket: [{bucket}]\")\n log.debug(f\"isStorObj {bucket}/{key}\")\n\n found = False\n\n try:\n contents = await client.list_keys(bucket=bucket, limit=1, prefix=key)\n if contents:\n item = contents[0]\n print(\"item:\", item)\n if item == key:\n # if the key is a S3 folder, the key will be the first object in the folder,\n # not the requested object\n found = True\n\n except HTTPNotFound:\n pass # key does not exist\n\n log.debug(f\"isStorObj {key} returning {found}\")\n return found", "def has_item(self, item):\n return item in self.set", "def has_item(self, item):\n return item in self.set", "def has_item(self, item: Inventory) -> bool:\n return (item.pk,) in self.orderitem_set.values_list('item')", "def check_key(key, options):\n animal_id, exp_date, exp_type = key.split('_')\n if ((options.animal_id is None or animal_id == options.animal_id)\n and (options.exp_date is None or exp_date == options.exp_date)\n and (options.exp_type is None or exp_type == options.exp_type)):\n return True\n else:\n return False", "def __contains__(self, key):\n\n if type(key) != self.type:\n return False\n\n first_char = key[:1]\n others = key[1:]\n\n if first_char not in self.children:\n return False\n\n if len(first_char) != 0 and len(others) == 0:\n node = self.children[first_char]\n\n if node.value is None:\n return False\n\n return True\n else:\n return others in self.children[first_char]", "def __contains__(self, key):\n node, _ = Treap._find_node(key, self.root)\n return node is not None", "def has_item(self, usage_key):\r\n store = self._get_modulestore_for_courseid(usage_key.course_key)\r\n return store.has_item(usage_key)", "def __contains__(self, key):\n\t\treturn any([item == key for _, item in self.heap])", "def item_exists(self, call_number):\n return call_number in self.item_list.keys()", "def contains_key_at(self, key, index):\r\n return index < self.num_keys() and self.keys[index] == key", "def has_key(self, key):\n return key in self", "def _in_keys(self, key, keys):\n # sorting required for comparison\n key.sort()\n return key in keys", "def has_key(self, key):\n return self.contains(key)", "def contains(self, key: int) -> bool:\n lv1, lv2 = self.hashing(key)\n \n for item in self.cont[lv1][lv2]:\n if item==key:\n return True\n \n return False", "def check_inputs(self, item_data):\n if not item_data[0] in self.data['pizza']:\n print('Error: ' + item_data[0] + ' pizza does not exist.')\n return False\n\n if not item_data[1] in self.data['pizza'][item_data[0]]:\n print('Error: ' + item_data[1] + ' size does not exist for '\n + item_data[0] + ' pizza.')\n return False\n\n for topping in item_data[2]:\n if not topping in self.data['topping']:\n print('Error: Pizza topping ' + topping + ' does not exist.')\n return False\n return True", "def has_item(self, item):\n return item in self.cache", "def contains(self, key: int) -> bool:\n return self._find_key(key, find_empty=False) >= 0", "def is_allergic_to(self, item):\n if item in self.list:\n return True\n else:\n return False", "def hasKey(self,\n key):\n return self.__keyCount.has_key(key)", "def contains(self, key):\n bus=key%100000\n pos=key//100000\n return self.li[bus][pos]==1", "def moreThanOne(dict, key):\n\treturn key in dict and dict[key] > 0", "def search(self, item):\n \n current = self.head\n \n while current is not None:\n if current.get_data() == item:\n return True\n # Early stop by taking advantage of ordering \n elif current.get_data() > item:\n return False\n else:\n current = current.get_next()\n \n return False", "def _is_unique_key(self, key):\n return self._in_keys(key, self._unique_keys)", "def __contains__(self, item):\n try:\n pos = Vec2(*item)\n return pos.x >= self.origin.x and pos.y >= self.origin.y \\\n and pos.x < self.origin.x + self.size.x \\\n and pos.y < self.origin.y + self.size.y\n except TypeError:\n return False", "def search(self, item):\n current = self._head\n # search until we find it or fall off the end\n while current != None:\n if current.getData() == item:\n # item has been found\n return True\n else:\n if current.getData() > item:\n # We’ve passed where the item could be.\n # Only works for ordered lists.\n return False\n else:\n current = current.getNext()\n return False", "def has(cls, item):\n return item in cls.values()", "def has_item(self, item_name):\n if item_name in self.item_list:\n return True\n return False", "def __contains__(self, item: object) -> bool:\n return item in self._used", "def has_items(self):\r\n return self.orderitem_set.exists() # pylint: disable=E1101\r", "def containsKey(self, key):\n return get(key) != None", "def isItem(self):\n return _libsbml.Unit_isItem(self)", "def is_item(self, x_coordinate, y_coordinate):\n if self.grid[x_coordinate][y_coordinate] == \"X\" or self.grid[x_coordinate][y_coordinate] == \" \" or self.grid[x_coordinate][y_coordinate] == POINT_OF_PLAYER or self.grid[x_coordinate][y_coordinate] == POINT_OF_EXIT:\n return False\n\n else:\n return True", "def contains(self, key):\n # TODO: Check if the given key exists in a bucket\n hash_key = self._bucket_index(key) # Gets the index of the key\n if self.buckets[hash_key].is_empty() is False: # If the hask_key exists\n for key_value_pair in self.buckets[hash_key]: # Iteratre through the value pair\n if key_value_pair[0] is key: # If the key matches\n return True\n return False", "def is_item_complete(self, item):\n return (item.get('id') and\n item.get('name') and\n 'description' in item and\n 'image' in item)", "def __contains__(self, key):\n found = True\n try:\n self.__getitem__(key)\n except:\n found = False\n return found", "def filter_top_level(item):\n return item.parent_item is None", "def __contains__(self, item):\n return item.upper() in self.keys", "def __contains__(self, key):\n return self.keys[self._linear_probe(key, \"contains\")] is not None", "def _check_feature_by_keys(service_data=None, service_keys=None, ns_data=None, ns_keys=None):\n\n if service_data and not isinstance(service_data, Exception) and service_keys:\n if _is_keyval_greater_than_value(service_data, service_keys):\n return True\n\n if ns_data and ns_keys:\n for ns, nsval in ns_data.iteritems():\n if not nsval or isinstance(nsval, Exception):\n continue\n if _is_keyval_greater_than_value(nsval, ns_keys):\n return True\n\n return False", "def __contains__(self, key):\n return key in self._opts or key in self._groups", "def contains(self, key):\n if key in self.key_list:\n return True\n return False", "def __contains__(self, item):\n try:\n self[item]\n return True\n except KeyError:\n return False", "def __contains__(self, key):\n return self._get(key, self.root) is not None", "def topkCount(self, key, *items):\n params = [key]\n params += items\n\n return self.execute_command(self.TOPK_COUNT, *params)", "def existsitem(self,item,listwidgets):\n exists = listwidgets.findItems(item, Qt.MatchExactly)\n if exists:\n return True\n else:\n return False", "def has_key(self, key):\n if '.' in key:\n first, second = key.split('.', 1)\n return self[first].has_key(second)\n else:\n return key in self.keys()", "def bfExists(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.BF_EXISTS, *params)", "def test_metadata_no_unknown_top_keys(self):\n top_keys = [\"name\", \"designer\", \"license\", \"visibility\", \"category\",\n \"size\", \"dateAdded\", \"fonts\", \"subsets\"]\n for x in self.metadata.keys():\n self.assertIn(x, top_keys, msg=\"%s found unknown top key\" % x)", "def has(self, key):", "def _is_key_value(data):\n if data is None:\n return False\n return all(x in data for x in ['key', 'value'])", "def is_in_bag(self, item):\n return item in self._bag", "def _check_key(self, key):\n raise NotImplementedError", "def has_item(self, usage_key):\r\n if usage_key.block_id is None:\r\n raise InsufficientSpecificationError(usage_key)\r\n try:\r\n course_structure = self._lookup_course(usage_key)['structure']\r\n except ItemNotFoundError:\r\n # this error only occurs if the course does not exist\r\n return False\r\n\r\n return self._get_block_from_structure(course_structure, usage_key.block_id) is not None", "def check(self,item):\r\n raise AbstractError\r\n return False", "def checkforitems(curpos):\n if DARK and not HAS_FLASHLIGHT:\n printmessage(\"But you can't see a thing!\", 5, MAGENTA, 2) # was 2\n return\n\n if ITEM_LIST[curpos] != int(len(ITEMTYPES) - 2): # if the item at curpos isnt 'None'\n printmessage(\"You found a %s!\" % ITEMTYPES[ITEM_LIST[curpos]], 5, MAGENTA, 0)\n add_score(50)\n additemtoinventory(ITEM_LIST[curpos]) # funtion removes item from map\n pause_for_keypress()\n else:\n printmessage(\"You look around, and find nothing\", 5, CYAN, 2)", "def exists(root: Node, key: int):\n if root is None:\n return False\n else:\n if root.key == key:\n return True\n elif key < root.key:\n return exists(root.left, key)\n else:\n return exists(root.right, key)", "def contains(self, item):\n if isinstance(item, dict):\n return _(item).all(lambda key: self._.get(key) == item[key])\n return item in self", "def _verify_key_exists(self, key, stack_path=[]):\r\n error_msg = (\r\n \"Could not find the {key_type} key '{key}' in: {stack_path}. \"\r\n \"Found {keys_found} instead.\"\r\n )\r\n try:\r\n dk = stack_path[0]\r\n fk = stack_path[1]\r\n xk = stack_path[2]\r\n yk = stack_path[3]\r\n vk = stack_path[4]\r\n except:\r\n pass\r\n try:\r\n if len(stack_path) == 0:\r\n if key not in self:\r\n key_type, keys_found = 'data', self.keys()\r\n stack_path = 'stack'\r\n raise ValueError\r\n elif len(stack_path) == 1:\r\n if key not in self[dk]:\r\n key_type, keys_found = 'filter', self[dk].keys()\r\n stack_path = 'stack[{dk}]'.format(\r\n dk=dk)\r\n raise ValueError\r\n elif len(stack_path) == 2:\r\n if key not in self[dk][fk]:\r\n key_type, keys_found = 'x', self[dk][fk].keys()\r\n stack_path = 'stack[{dk}][{fk}]'.format(\r\n dk=dk, fk=fk)\r\n raise ValueError\r\n elif len(stack_path) == 3:\r\n if key not in self[dk][fk][xk]:\r\n key_type, keys_found = 'y', self[dk][fk][xk].keys()\r\n stack_path = 'stack[{dk}][{fk}][{xk}]'.format(\r\n dk=dk, fk=fk, xk=xk)\r\n raise ValueError\r\n elif len(stack_path) == 4:\r\n if key not in self[dk][fk][xk][yk]:\r\n key_type, keys_found = 'view', self[dk][fk][xk][yk].keys()\r\n stack_path = 'stack[{dk}][{fk}][{xk}][{yk}]'.format(\r\n dk=dk, fk=fk, xk=xk, yk=yk)\r\n raise ValueError\r\n except ValueError:\r\n print error_msg.format(\r\n key_type=key_type,\r\n key=key,\r\n stack_path=stack_path,\r\n keys_found=keys_found\r\n )", "def contains_key(self, key):\r\n\t\t# call the linked list contains() method for each bucket\r\n\t\tfor i in self._buckets:\r\n\t\t\tif i.contains(key):\r\n\t\t\t\treturn True\r\n\t\treturn False", "def match(self, item):\n return item == self._expected_item", "def has(self, key):\n return False", "def IsItemVisible(self, item):\r\n\r\n # An item is only visible if it's not a descendant of a collapsed item\r\n parent = item.GetParent()\r\n\r\n while parent:\r\n \r\n if not parent.IsExpanded():\r\n return False\r\n \r\n parent = parent.GetParent()\r\n \r\n startX, startY = self.GetViewStart()\r\n clientSize = self.GetClientSize()\r\n\r\n rect = self.GetBoundingRect(item)\r\n \r\n if not rect:\r\n return False\r\n if rect.GetWidth() == 0 or rect.GetHeight() == 0:\r\n return False\r\n if rect.GetBottom() < 0 or rect.GetTop() > clientSize.y:\r\n return False\r\n if rect.GetRight() < 0 or rect.GetLeft() > clientSize.x:\r\n return False\r\n\r\n return True", "def checkkey(self, k):\r\n if k == self.shortcut:\r\n return True\r\n return False", "def __ge__(self, value):\n if not isinstance(value, Item):\n raise ValueError(\"Can't compare Item to non-Item type\")\n return self.views >= value.views", "def contains(self, key):\n\n return key in self.keys()", "def has_key(self, key):\n return key in self.responses", "def HasChildren(self, item):\r\n\r\n return len(item.GetChildren()) > 0", "def _find(self, item):\n stack = [self.root]\n while stack:\n node = stack.pop()\n if node.items and item in node.items:\n return True, node.active[node.items.index(item)], node\n elif not node.items:\n axis = node.cutdim\n cur = item[axis]\n median = node.cutval\n if median >= cur:\n stack.append(node.left)\n if median <= cur:\n stack.append(node.right)\n return False, False, None", "def _map___contains__(self, key):\n if not isinstance(key, self.keytype):\n raise KeyError('type of `key` should be ' + repr(self.keytype) + ' but got ' + repr(type(key)))\n if self.find(key) != self.end():\n return True\n return False", "def __contains__(self, key):\n item = self._store.get(key)\n if not item:\n return False\n\n value, expires_at = item\n if expires_at and time.time() < expires_at:\n return False\n\n return True", "def __contains__(self, item):\n if len(item) != len(self.sizes):\n raise ValueError('Point dimension does not match grid dimension')\n for i in range(len(self.sizes)):\n if not 1 <= item[i] < self.sizes[i] - 1:\n return False\n return True", "def _iscommand(self, key):\r\n\t\tyes = False\r\n\t\tfor i in COMMAND_NAME.keys():\r\n\t\t\tif key == i: \r\n\t\t\t\tyes = True; break\r\n\t\treturn yes", "def __contains__(self, key):\n return key in self.keys", "def __contains__(self, items):\n if type(items) != list:\n raise PJFInvalidType(items, list)\n ret = 0\n for item in items:\n for key in self.__dict__:\n if isinstance(self.__dict__[key], JsonFactory):\n ret += item in self.__dict__[key]\n elif item == key:\n ret += 1\n return len(items) == ret", "def IsImportant(self, key):\n\n if any(x.lower() == key for x in self.keywords):\n return True\n return False", "def is_valid_menu_item(self, item_name: str) -> bool:\n return item_name in self._items()", "def contains(self, key):\n return isinstance(key, (long, )) and LongObjectHashMap.self.containsKey(long(key))", "def contains(self, item):\n return self._dict.has_key(item)\n\n self.__contains__ = contains", "def has_deep_key(obj, key):\n\tif isinstance(key, str):\n\t\tkey = key.split('.')\n\t\t\n\tlast_obj = obj\n\tfor v in key:\n\t\tif not last_obj.has_key(v):\n\t\t\treturn False\n\t\tlast_obj = last_obj[v]\n\t\n\treturn True", "def _is_generic_key(key):\n for prefix in [\n \"graph_rewriter_config\",\n \"model\",\n \"train_input_config\",\n \"train_config\",\n \"eval_config\"]:\n if key.startswith(prefix + \".\"):\n return True\n return False", "def find(self, key: int) -> bool:\n if self.empty():\n return False\n return self.root.find(key) is not None", "def one_head_test(self, item):\n v = [i for i, j in self.A if j == item]\n return len(v) == 0", "def exista(self, item):\n if item not in self._items:\n return False\n for x in self._items:\n if x == item:\n return True", "def is_final_item(item_id):\n return \"into\" not in items[\"data\"][str(item_id)]", "def check_if_already_used(self, key):\n for switch in self.new_switches:\n if key == self.new_switches[switch]:\n return True\n return False", "def __contains__(self, item: object) -> bool:\n val = conv_kv(item) # type: ignore\n for fixup in self._mapping._fixup.values():\n if fixup.value == val:\n return True\n return False", "def __contains__(self, key):\n\n return key in self.keys_set", "def check(boxes, keys):\n if len(keys) != len(boxes):\n return False\n w = False\n for i in range(len(boxes)):\n for x in range(len(keys)):\n if keys[x] == i:\n w = True\n break\n if not w:\n return False\n return True", "def __contains__(self, item):\n for _, _, _, cur_item in self.queue:\n if cur_item == item:\n return True\n return False", "def isin(self, item):\n return self.get(item) is not None", "def __check(s3client, key, bucket_name):\n try:\n s3client.head_object(Bucket=bucket_name, Key=key)\n except ClientError as e:\n return int(e.response['Error']['Code']) != 404\n return True", "def __contains__(self, item):\n try:\n hdu = self[item] # noqa\n return True\n except Exception:\n return False", "def __contains__(self, item):\n\t\treturn item in self.__dict__.values()" ]
[ "0.634382", "0.6114394", "0.611171", "0.57911634", "0.570563", "0.5608097", "0.5595988", "0.5594606", "0.5594606", "0.55808914", "0.5543288", "0.55300564", "0.5517709", "0.5510101", "0.5477933", "0.545195", "0.54484284", "0.5443446", "0.54301524", "0.53889227", "0.5388687", "0.53768826", "0.5366577", "0.5365526", "0.53423566", "0.5340004", "0.5336465", "0.53315663", "0.53250504", "0.53173995", "0.5312631", "0.53106064", "0.5307962", "0.5305898", "0.52772135", "0.52758956", "0.5273534", "0.52735233", "0.5270976", "0.52664346", "0.52650315", "0.52618927", "0.5241383", "0.5228781", "0.52280307", "0.52252805", "0.52229285", "0.52223575", "0.5218938", "0.52094454", "0.52071905", "0.5206432", "0.520464", "0.5202757", "0.5191925", "0.5191593", "0.51913273", "0.5186845", "0.51709", "0.5151984", "0.5146361", "0.5145583", "0.51431423", "0.51333964", "0.51332045", "0.51324266", "0.5131952", "0.51301366", "0.5129935", "0.51215756", "0.51178265", "0.5116884", "0.51141495", "0.5103018", "0.5096013", "0.50955385", "0.5094597", "0.5089279", "0.50890756", "0.5089051", "0.5087683", "0.5085078", "0.5083029", "0.50814956", "0.5079734", "0.50793594", "0.5072454", "0.5067764", "0.5067143", "0.50641435", "0.5064035", "0.5063907", "0.5061691", "0.5061164", "0.5059386", "0.5056518", "0.50514007", "0.5046053", "0.50441414", "0.50416344" ]
0.60485935
3
Returns count for one ``item`` or more from ``key``.
def topkCount(self, key, *items): params = [key] params += items return self.execute_command(self.TOPK_COUNT, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cfCount(self, key, item):\n params = [key, item]\n\n return self.execute_command(self.CF_COUNT, *params)", "def count(self, item):\n if item in self: \n return self[item]\n else: \n return 0", "def getKeyCount(self,\n key):\n if (self.hasKey(key) == 1):\n return self.__keyCount[key]\n else:\n return 0", "def count(self, item):\n return _(self._.count(item))", "def count(item):\n return len(item)", "def get_count(name, key):\n total = 0\n query = CounterShard.all().filter('name = ', name).filter('reference_key = ', key)\n for counter in query:\n total += counter.count\n \n return total", "def count(self, conn, key):\n return conn.llen(key)", "def count(self, item):\n # type: (Any) -> int\n return list.count(self, self.ref(item))", "def count(self, item: Any) -> int:\n curr = self._first\n count = 0\n\n while curr is not None:\n if curr.item == item:\n count += 1\n curr = curr.next\n\n return count", "def count_item(*, item : Any, list : Union[List[Any], ConduitVariable]) -> List[Any]:\n return list.count(item)", "def count(self, key):\n self._metrics[key] += 1", "def count(self, item):\n # TODO: complete this function!\n if item not in self:\n return 0\n else:\n num_occur = 0\n if self._first == item:\n num_occur += 1\n num_occur += self._rest.count(item)\n return num_occur", "def number_with_key(key):\n # good for checking proliferation of sort_key etc\n db = TinyDB(CARD_DATA_FILE)\n card_data = db.table('card_data')\n packs = card_data.all()\n total = 0\n with_key = 0\n for pack in packs:\n total += 1\n if key in pack:\n with_key += 1\n print('{} out of {} have sort keys'.format(with_key, total))", "def __incKeyCount(self,\n key):\n if (self.__keyCount.has_key(key) == 0): self.__keyCount[key] = 0\n self.__keyCount[key] = self.__keyCount[key] + 1\n return self.__keyCount[key]", "def _increment_count(self, key):\n\n if not self._count.has_key(key):\n self._count[key] = 0\n\n self._count[key] += 1", "def _increment_count(self, key):\n\n if not self._count.has_key(key):\n self._count[key] = 0\n\n self._count[key] += 1", "def total(my_list, item):\n return my_list.count(item)", "def __setKeyCount(self,\n key,\n count):\n self.__keyCount[key] = count\n return self.__keyCount[key]", "def getFreq(TDB,key):\n \"\"\" key is set of subitems to count frequancy of repeatation this key in the TDB \"\"\"\n freq = 0\n for items in TDB:\n exist = True\n for element in key:\n if element not in items:\n exist = False\n break\n if exist:\n freq+=1\n return freq", "def increment_count(dictionary, key):\n if key:\n if key in dictionary:\n dictionary[key] += 1\n else:\n dictionary[key] = 1", "def num_keys_command():\n # Get Args needed for the command\n incident = demisto.args().get('id', get_investigation_id())\n # Search Collection counting matching incident_id\n cursor = COLLECTION.find({})\n count = 0\n for i in cursor:\n if incident in i:\n count += 1\n return f'The count of the key/value pairs for the incident - {str(count)}', {}, {}", "def count_singlesA(key, alist):\n result = 0\n for x in range(len(alist)):\n if alist[x] == key:\n if not is_next_to(x, alist):\n result += 1\n return result", "def size(self, key):\n return len(self[key])", "def __len__(self):\n return sum(item['qty'] for item in self.basket.values()) # counts all the values of the key qty", "def _index(self,key):\n index=0\n for item in self._item:\n if item.key==key:\n return index\n index+=1\n return -1", "def __call__(self, item):\n token, counts = item\n return token, sum(counts)", "def increment_count(count_dict, key):\n if key in count_dict:\n count_dict[key] += 1\n else:\n count_dict[key] = 1", "def get_num_values(self, item):\n\tnum_values = 1\n\t\n\t# Valor mas antiguo de la linked list\n\t# Siempre tiene valor, si no, no tenemos la entrada en el hashset\n\tvalue = item[\"tail\"][\"next\"]\n \twhile long(value) != 0:\n\t num_values += 1\n\t value = value[\"next\"]\n\n\treturn num_values", "def getNumberOfKeys(self, attr, view) -> int:\n ...", "def item_count(item_id, arg):\n global database\n table = database.Tables.items\n upd = table.update(None).where(table.c.id == item_id).values(count=table.c.count+(int(arg)))\n database.conn.execute(upd)", "def index(self, key):\n count = 0\n for k in self.__ordered_keys:\n if k.lower() == key.lower():\n return count\n count = count + 1\n raise KeyError(key)", "def count_singlesB(key, alist):\n num = 0\n result = 0\n for element in alist:\n if num == 0 and element == key:\n result += 1\n num = 1\n elif num == 1 and element == key:\n result -= 1\n num += 1\n elif num > 1 and element == key:\n num += 1\n elif element == key:\n num = 1\n else:\n num = 0\n return result", "def count_products(list_products):\n for each_item in ADD_PRODUCTS: #This iterates in the dictionary\n num_of_products = list_products.count(each_item) #This count each product\n if num_of_products > 0:\n price = ADD_PRODUCTS[each_item]\n print num_of_products, each_item + \"(s)\", \"a\", (\"Q%.2f c/u\") % price", "def size(self, key):\n with SessionContext(self.SessionClass) as session:\n q = session.query(PAW2_DBObject.numElems)\n return q.filter(PAW2_DBObject.key == key).one()[0]", "def getNumberOfKeys(self) -> int:\n ...", "def get_item_count(self):\n resp = self.app.get('/items')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)", "def count_words(item):\n word, occurences = item\n return word, sum(occurences)", "def count(self, item: Any) -> int:\n # If this recursive list is empty\n if self.is_empty():\n return 0\n # If there is a first and a rest.\n else:\n # Check if the first is equal and add the count on the rest of the list.\n return int(self._first == item) + self._rest.count(item)", "def get_counts(filename, key):\r\n column_keys, get_data = get_csv(filename)\r\n assert(key in column_keys[1:])\r\n column = column_keys[1:].index(key)\r\n print 'getcounts() %s : %s column = %d' % (filename, key, column+1) \r\n counts_dict = {}\r\n for i,(k,v) in enumerate(get_data()):\r\n x = v[column]\r\n counts_dict[x] = counts_dict.get(x, 0) + 1\r\n return counts_dict", "def set_count(items):\n item_count = {}\n for item in items:\n if not item: continue\n if not item_count.has_key(item): item_count[item] = 0\n item_count[item] += 1\n \n items = [(v, k) for k, v in item_count.iteritems()]\n items.sort()\n items.reverse()\n \n return [(k, v) for v, k in items]", "def getItemCount(self, ItemBase):\n Found = 0\n for CurrItem in self.List:\n if CurrItem.Base == ItemBase:\n Found = 1\n break\n\n if not Found: return 0\n else: return CurrItem.Count", "def process(self, key, value):\n if key not in self.counts:\n self.counts[key] = 0.0\n self.counts[key] += value", "def count_hash(cls, key, size):\n return key%size", "def getCount(self, event):\n # Attempt 2: Still too slow\n count = 0\n \n for mEvent in self:\n if event.__st__(mEvent):\n count += 1\n \n return count\n \n # Attempt 1: Too slow\n #return reduce((lambda x, y: x+y),\n # map((lambda i: itemset <= i), self))", "def Counts(dict_of_list):\n return {k: len(v) for k, v in dict_of_list.iteritems()}", "def get_number_of_features(key):\n sum = 0\n for name, module in common.QOL_PARAMS[key].items():\n sum += module.LENGTH\n\n return sum", "def count_results(key):\n max_results = 1\n sleep(0.3)\n req = requests.get(f\"\"\"http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&retmode=xml&retmax={max_results}&sort=relevance&term={key}\"\"\")\n answer = BeautifulSoup(req.text, 'html.parser')\n result = int(answer.find_all(\"count\")[0].get_text())\n return(result)", "def countSubStringMatch(target,key):\n count = 0\n for i in range(0,len(target)-len(key)):\n if target[i:i+len(key)] == key:\n count += 1\n return count", "def count(self, val):\n raise ValueError('cannot set \\'count\\' in class KeyTracker')", "def item_count_from_index(input_list, index):\n count = 0\n for item in input_list[index]:\n count = count + 1\n\n pass", "def count(self):\n return self.connection.llen(self.key)", "def count(self):\n return self.connection._llen(self.key)", "def count(self):\n return len([i for i in self.iteritems()])", "def countby(iteratee, seq):\n return dict(Counter(map(iteratee, seq)))", "def num_tuples(self, rel_key):\n table = self.metadata.tables[str(rel_key)]\n return self.engine.execute(table.count()).scalar()", "def get(self, key):\n if key is None or key not in self.cache_data.keys():\n return\n self.count += 1\n self.key_tracker.update({key: self.count})\n return self.cache_data.get(key)", "def get_count(self, entry):\n return entry.count", "def score(item, fd, key):\n return fd.get(key(item), 0)", "def __len__(self):\n return sum(1 for item in self.wishlist.values())", "def __len__(self):\n return sum(1 for item in self.wishlist.values())", "def __decKeyCount(self,\n key):\n if (self.__keyCount.has_key(key) == 0): self.__keyCount[key] = 0 \n self.__keyCount[key] = self.__keyCount[key] - 1\n return self.__keyCount[key]", "def counts(self):\n # Returns an OMD because Counter/OrderedDict may not be\n # available, and neither Counter nor dict maintain order.\n super_getitem = super(OrderedMultiDict, self).__getitem__\n return self.__class__((k, len(super_getitem(k))) for k in self)", "def count_dict(self, lst):\n nos = list(self.digits)\n digit_count = dict([(digit, 0) for digit in nos])\n for item in lst:\n for num in item:\n digit_count[num] += 1\n return digit_count", "def add(self, key):\n\t\t#super(CountingBloomFilter, self).add(key)\n\t\t#super(CountingBloomFilter, self).generateStats()\n\t\tfor i in self.getBitArrayIndices(key):\n\t\t\tself.ba[i] += 1\n\t\tself.n += 1", "def item_count(self):\n return self.items.shape[0]", "def getNumberOfTransformKeys(self, view) -> int:\n ...", "def get_number_of_items(self):\n return len(self.__item_map)", "def count_items(self):\n count = 0\n for o in self.order_lst:\n count += o.count()\n \n return count", "def count(list: list[str]) -> dict[str, int]:\n result: dict[str, int] = {}\n for keys in list: \n if keys not in result: \n result[keys] = 1 \n else: \n result[keys] += 1\n return result", "def getNumberOfTranslationKeys(self, view) -> int:\n ...", "def count(self, value):\n self.__validate_value(value)\n counter = 0\n for v in self.__list:\n if v == value:\n counter += 1\n return counter", "def count():", "def _count_elements(mapping, iterable): # real signature unknown; restored from __doc__\n pass", "def get_count(self, cf_name, key, start='', finish='', keyspace_name=None):\n return self._Get_Count(\n cf_name=cf_name, key=key, start=start, finish=finish,\n keyspace_name=keyspace_name)", "def countitems(self):\n count = 0\n sid = self.client.scannerOpen(self.table, '', ['f:s'])\n while 1:\n r = self.client.scannerGetList(sid, 1000)\n #r = self.client.scannerGet(sid)\n if not r: break\n count += len(r)\n logging.debug('%d %s', count, r[-1].row)\n self.scannerClose(sid)\n return count", "def cart_distinct_item_count(request):\n return get_cart_items(request).count()", "def keycount(self, essid):\n return self.cli.essids.keycount(essid)", "def get_count(keyname, num_shards=NUM_SHARDS, value=1):\n if num_shards:\n total = 0\n for index in range(0, num_shards):\n shard_name = \"%s:%s\" % (str(keyname), str(index))\n count = kv.get(shard_name)\n if count:\n total += count\n else:\n total = kv.get(keyname)\n if total is None:\n total = value\n kv.set(keyname, total)\n return total", "def count(self, value=None):\r\n\t\t_set = list(set(self.sample))\r\n\t\tif value == None: return {_set[i]: self.sample.count(_set[i]) for i in range(len(_set))}\r\n\t\telse:\r\n\t\t\ttry: return {_set[i]: self.sample.count(_set[i]) for i in range(len(_set))}[value]\r\n\t\t\texcept: return 0", "def num_keys(self):\n return len(self.counter.keys())", "def count_items(column_list):\n item_types = list(set(column_list))\n count_items = [column_list.count(t) for t in item_types]\n return item_types, count_items", "def keycount(self, essid):\n with SessionContext(self.SessionClass) as session:\n q = session.query(PAW2_DBObject.key)\n q = q.join(PYR2_DBObject).join(ESSID_DBObject)\n q = q.filter(ESSID_DBObject.essid == essid)\n return q.count()", "def count(self, tokens):\n return self._count[tuple(tokens)]", "def count(freq_list: list[str]) -> dict[str, int]:\n returned_dict: dict[str, int] = {}\n for item in freq_list:\n if item in returned_dict.keys():\n # increase the value associated with that key \n returned_dict[item] += 1\n else:\n # assign that key the value of 1\n returned_dict[item] = 1\n return returned_dict", "def frequency(item, the_list):\n list_length = len(the_list)\n # initialising counters\n i = 0\n item_count = 0\n\n # looping through every item in the list\n while i < list_length:\n # if the item being checked in the list equals the item being searched for, increment the count\n if the_list[i] == item:\n item_count = item_count + 1\n i = i + 1\n\n # printing the result\n print(str(item) + ' appears ' + str(item_count) + ' times')\n\n return item_count", "def keycount(self, essid):\n if essid not in self.essids:\n raise KeyError(\"ESSID not in store.\")\n return len(self.essids[essid][1])", "def getNumberOfScaleKeys(self, view) -> int:\n ...", "def count_dictionary_values(self):\n my_dictionary = {'A': [1, 2, 3, 4, 5, 6, 7, 8, 9],\n 'B': 34,\n 'C': 12,\n 'D': [7, 8, 9, 6, 4]}\n\n count = 0\n for key, value in my_dictionary.items():\n if isinstance(value, list):\n count += len(value)\n print(\"Number of items in a dictionary value i.e a list :\", count)", "def _getValueCounts(mapping):\n return Counter({k: len(v) for k, v in viewitems(mapping)})", "async def count(self, **kw):\n\n pass", "def test_create_count_key(self):\n s = SimulationStats()\n\n # new count key added\n s.create_count_key(count_key=0)\n assert s.results_dict == {0: {\n 'net winnings': 0,\n 'number of rounds': 0,\n 'number of split hands': 0,\n 'overall bet': 0\n }}\n\n # count key already exists\n s.create_count_key(count_key=0)\n assert s.results_dict == {0: {\n 'net winnings': 0,\n 'number of rounds': 0,\n 'number of split hands': 0,\n 'overall bet': 0\n }}", "def incrementAll(self, keys, count):\n for key in keys:\n self[key] += count", "def incrementAll(self, keys, count):\n for key in keys:\n self[key] += count", "def __len__(self):\n return sum(item[\"quantity\"] for item in self.carro.values())", "def supportCk(ckItem, transactions):\n count = 0\n for trans in transactions:\n if ckItem.issubset(frozenset(trans['itemId'])):\n count += 1\n return count", "def count(self, column, keys=None, **kwds_filter):\n isnull = self._pandas.isnull\n mapper = lambda value: 1 if (value and not isnull(value)) else 0\n reducer = lambda x, y: x + y\n return self.mapreduce(mapper, reducer, column, keys, **kwds_filter)", "def get_count(self, table_name, key, sharded=False):\n\n if sharded:\n counter_sum = 0\n counter = None\n counters = self._get_counters_from_indice(table_name, key)\n\n if counters:\n for counter in counters:\n\n counter_sum += int(counter.get(self.data_property, 0))\n return counter_sum\n else:\n key = self.sharded_key(key, 1) # only one shard\n counter = self.get_item(table_name, key)\n if counter:\n return counter.get(self.data_property, None)\n return None", "def getNumberOfPivotPointKeys(self, view) -> int:\n ...", "def get_num_items(self):\r\n return self.num_items", "def count(self, value): # real signature unknown; restored from __doc__\n return 0" ]
[ "0.7219808", "0.7189522", "0.71250147", "0.68928266", "0.67245823", "0.6536939", "0.6466635", "0.64591026", "0.643793", "0.63102275", "0.6303591", "0.62909424", "0.62568396", "0.6254312", "0.61354196", "0.61354196", "0.6092566", "0.6090408", "0.6070197", "0.599113", "0.59652716", "0.5956196", "0.5948309", "0.5928237", "0.59272444", "0.591528", "0.5896874", "0.5886319", "0.58678436", "0.58607763", "0.58501416", "0.57972234", "0.57669973", "0.5766817", "0.5741437", "0.5681153", "0.56258416", "0.5624855", "0.55817914", "0.5568417", "0.5561163", "0.5550437", "0.55480814", "0.5526124", "0.55243933", "0.55109704", "0.54827344", "0.54635596", "0.5463249", "0.5433611", "0.54293835", "0.5424627", "0.54208577", "0.5416399", "0.537788", "0.5366796", "0.5366595", "0.53662133", "0.53661126", "0.53661126", "0.5365852", "0.53657204", "0.5360503", "0.5351846", "0.5350759", "0.5347263", "0.53460175", "0.5335869", "0.53353786", "0.5293263", "0.52874213", "0.5285923", "0.52725136", "0.5269841", "0.5263656", "0.52615035", "0.5259719", "0.525949", "0.52472717", "0.5244666", "0.52399397", "0.5233931", "0.52312464", "0.52287", "0.52204156", "0.5214279", "0.52046365", "0.5202672", "0.5202466", "0.5188259", "0.51729816", "0.5172045", "0.5172045", "0.5171336", "0.51559407", "0.51523584", "0.5147403", "0.5146431", "0.5143027", "0.51427364" ]
0.71737325
2
Return full list of items in TopK list of ``key```.
def topkList(self, key): return self.execute_command(self.TOPK_LIST, key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_for_key(self, key) -> list:\n return [res[key] for res in self.list]", "def get_list(key):\n ret = hookenv.action_get(key)\n return ret.split() if ret else []", "def topkQuery(self, key, *items):\n params = [key]\n params += items\n \n return self.execute_command(self.TOPK_QUERY, *params)", "def getlist(self, key):\n try:\n return dict.__getitem__(self, key)\n except KeyError:\n return []", "def key_list(dict):\n list = []\n for key in dict:\n list.append(key)\n return list", "def getlist(self, key):\n try:\n vals = _dict_getitem(self, key.lower())\n except KeyError:\n return []\n else:\n if isinstance(vals, tuple):\n return [vals[1]]\n else:\n return vals[1:]", "def get_list(self, k: str) -> List:\n return self._redis.lrange(k, 0, -1)", "def get_adjacent_keys(self, key: str) -> List[str]:\n return [k for k in self.get_adjacent(key)]", "def list_values(key):\n return meta.list_values(key=key)", "def keys(self, key=None, lo=None, hi=None, reverse=None, max=None,\n include=False, txn=None, rec=None):\n return itertools.imap(ITEMGETTER_0,\n self.items(key, lo, hi, reverse, max, include, txn, rec))", "def list(self, key):\n\n if \"~\" in key or key == \"title\":\n v = self(key, connector=\"\\n\")\n if v == \"\": return []\n else: return v.split(\"\\n\")\n elif key in self: return self[key].split(\"\\n\")\n else: return []", "def topk(vec, k):\n vec = torch.topk(vec, k)\n return vec.view(-1).data.tolist()", "def keys(self) -> List:\n pass", "def expand_nested_lists(query, key):\n items = []\n for item in query[key]:\n if isinstance(item, list):\n items.extend(item)\n else:\n items.extend([item])\n return items", "def GetSubkeys(self):", "def hgetall(self, key):\n return self._command(b'HGETALL', key, handler=list_to_dict)", "def uplink_buys_by_key(self, key):\n buys = []\n for buy in self.uplinkbuys:\n if buy.mindkey == key:\n buys.append(buy)\n return buys", "def get_descendants(self, key: str) -> Sequence[str]:\n raise NotImplementedError", "def getall(self, key):\n return self.values.get(key, [])", "async def get_all(self, key: datastore.Key) -> RV:\n\t\treturn await (await self.get(key)).collect() # type: ignore[return-value]", "def champion_keys():\n keys = []\n for champion_id in champions[\"data\"]:\n keys.append(champions[\"data\"][str(champion_id)][\"key\"])\n return sorted(keys)", "def get_list(key, nodename=None):\n return _get_property(key, nodename, [])", "def keys(self, *args, **kwargs):\n return self._list(*args, **kwargs)", "def topkInfo(self, key):\n \n return self.execute_command(self.TOPK_INFO, key)", "def list_all_keys(riak_host,riak_port,bucket):\n url='http://%s:%s/buckets/%s/keys?keys=true' % (riak_host,riak_port,bucket)\n #print url\n r=requests.get(url)\n print json.dumps(r.json(), sort_keys=True, indent=4)", "def summarize(\n self,\n key: Callable[['Classification'], Union[Iterable[str], str]] = None,\n top_k: int = None,\n ):\n ctr = Counter(sorted(filter(None, flatmap(key, self.values()))))\n return [elt for elt, _ in ctr.most_common(top_k)]", "def get_items(self, value, key=None):\n if key is None:\n return self.dicts(value)\n else:\n items = self.dicts(value)\n return [item[key] for item in items]", "def fetch_all_keys():\n response = TIME_TABLE.scan()\n items = response['Items']\n items.sort(key=lambda x: x['timeStamp'])\n response = ''\n for item in items:\n response = '{0}\\n{1}'.format(response, item)\n return response", "def _getbundlelistkeysparts(\n bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs\n):\n listkeys = kwargs.get('listkeys', ())\n for namespace in listkeys:\n part = bundler.newpart(b'listkeys')\n part.addparam(b'namespace', namespace)\n keys = repo.listkeys(namespace).items()\n part.data = pushkey.encodekeys(keys)", "def _force_key_as_list(self, key):\r\n return [key] if isinstance(key, (str, unicode)) else key", "def flatten_objects_to_list(objects, key):\n\n return_list = []\n\n for object in objects:\n return_list.append(object[key])\n\n return return_list", "def keypairs(self):\n return list(self._list(_keypair.Keypair, paginated=False))", "def get_key_list(self) -> list:\n return self.key_functs.keys()", "def list_(bank):\n try:\n _, keys = api.kv.get(bank + \"/\", keys=True, separator=\"/\")\n except Exception as exc: # pylint: disable=broad-except\n raise SaltCacheError(f'There was an error getting the key \"{bank}\": {exc}')\n if keys is None:\n keys = []\n else:\n # Any key could be a branch and a leaf at the same time in Consul\n # so we have to return a list of unique names only.\n out = set()\n for key in keys:\n out.add(key[len(bank) + 1 :].rstrip(\"/\"))\n keys = [o for o in out if not o.endswith(_tstamp_suffix)]\n return keys", "def union(self, key: str, skip_duplicates=False) -> list:\n result = []\n for items in self.get(key):\n for item in items:\n if skip_duplicates and item in result:\n continue\n result.append(item)\n return result", "def _yield_keys(self, key):\n if self._len_keys > 1:\n keys = self._validate_and_split_key(key)\n for key in keys:\n yield tuple(sorted(list(key)))\n else:\n yield from self._validate_and_split_key(key)", "def top(self, k):\n if not 1 <= k <= len(self):\n raise ValueError('Illegal value for k')\n walk = self._data.first()\n for j in range(k):\n item = walk.element() # element of list is _Item\n yield item._value\n walk = self._data.after(walk)", "def ballot_get_tag_values_by_key(key):\r\n return make_request({\"method\": \"ballot_get_tag_values_by_key\",\r\n \"params\": [key],\r\n \"jsonrpc\": \"2.0\",\r\n \"id\": 0, })", "def list_ids (self, key):\n\n list_of_key_values = [str(x[key]) for x in self.result]\n\n self.result = list(dict.fromkeys([re.findall(r'\\b\\d+\\b', x)[0] for x in list_of_key_values if len(re.findall(r'\\b\\d+\\b', x)) !=0]))\n\n return self", "def extract_key_nodes(self, key, nodes=None):\n if nodes is None:\n nodes = []\n if self.name == key:\n nodes.append(self)\n for i in range(len(self.children)):\n self.children[i].extract_key_nodes(key, nodes=nodes)", "def __getitem__(self, key: K) -> List[V]:\n return self._table.get(key)", "def _getKeyList(self):\n return LinkedList(InternalRack(self, 1))", "def get_keys(self, ckey=None):\n if ckey:\n keys = self._get_keys(ckey)\n else:\n keys = self.keys()\n for key in self.keys():\n keys += [k for k in self._get_keys(key)]\n return list(set(keys))", "def gets(self, key):\n result = self._get_raw_input()[key]\n if isinstance(result, list):\n return deepcopy(result)\n return [result]", "def keys(self):\r\n return [k for k in self]", "def __get_items(self, sort_keys_function=None):\n if sort_keys_function:\n return sorted(list(self.items), key=sort_keys_function)\n return self.items", "def items(self):\n result = self._dict.keys( )\n try: result.sort( )\n except: pass\n\n return result", "def getkeys(self):\n return list(self.keys)", "def _get_keys(self, listOfKeys):\n return self._keys", "def getKeyObjs(self,\n key,\n value = None):\n keyList = []\n keys = self.__keyObjs\n tmpKey = key + \"___\"\n for keyIdx in keys:\n if ((string.find(keyIdx, tmpKey) != -1) and (value != None)):\n if (re.search(value,self.__keyObjs[keyIdx].getValue())!=None):\n keyList.append(self.__keyObjs[keyIdx])\n elif (string.find(keyIdx, tmpKey) != -1):\n keyList.append(self.__keyObjs[keyIdx])\n return keyList", "def get_all_dicts_by_key(pcb_data: List[Dict[str, Any]], key: str) -> List[Dict[str, Any]]:\n res: List[Dict[str, Any]] = list()\n for d in pcb_data:\n if isinstance(d, dict) and key in d.keys():\n res.append(d)\n return res", "def has_keys(self, key_in_pointer):\n start = self.head\n rList = []\n while start:\n if key_in_pointer in start.getMember().keys():\n rList.append(start)\n start = start.getLink()\n return rList", "def keys(rbt, keylo, keyhi):\n try:\n lstkeys = lt.newList('SINGLELINKED', rbt['cmpfunction'])\n lstkeys = keysRange(rbt['root'], keylo, keyhi, lstkeys,\n rbt['cmpfunction'])\n return lstkeys\n except Exception as exp:\n error.reraise(exp, 'RBT:keys')", "def getAllKeyValuePair(self,root,key):\n\n if root==None:\n return []\n \n node = root\n result = []\n\n for index,child in enumerate(node.children):\n if(child!=None):\n if(child.value!=None):\n result.append((key+str(index),child.value.value))\n \n result += self.getAllKeyValuePair(child,key+str(index))\n\n return result", "def __getitem__(self, key):\n if isinstance(key, list):\n return plist([self[k] for k in key], root=plist([KeyValue(k, self[k]) for k in key]))\n else:\n return dict.__getitem__(self, key)", "def get_listu_komponenti(self):\n popis = sorted(list(self.komponente.keys()))\n return popis", "def GET(self, key):\n header('Content-Type', 'application/json')\n return dumps(list_values(key=key))", "def get_list_keys(rpc_user, rpc_pwd):\n data = '{\"jsonrpc\":\"2.0\",\"id\":\"1\",\"method\":\"listkeys\"}'\n return call_rpc(rpc_user, rpc_pwd, data)", "def __getitem__(self, key: K) -> Iterable[V]:\n raise NotImplementedError", "def ListFiles(bucketname, client, key):\n response = client.list_objects(Bucket=bucketname, Prefix=key)\n for content in response.get('Contents', []):\n yield content.get('Key')", "async def list_keys(request: web.Request) -> web.Response:\n keys = [\n {'uri': '/wifi/keys/{}'.format(key.directory),\n 'id': key.directory,\n 'name': os.path.basename(key.file)} for key in wifi.list_keys()\n ]\n return web.json_response({'keys': keys}, status=200)", "def build_list(self, word_list):\n # Get frequency list for keys\n freq = word_list.groupby('key').agg('count')\n # Filter out only keys with greater or equal frequency to length\n key_list = freq.loc[freq['word'] >= freq.index.str.len()]\n return key_list", "def list_all_keys(self):\n \n return self.keys", "def full_K(self):\n\n return kron_list(self.Ks)", "def getlist(self, key, type=None):\n if key not in self:\n return []\n values = super().__getitem__(key)\n if type is not None:\n values = [type(value) for value in values]\n return values", "def items(self):\n x = []\n for k in list(self.keys()):\n x.append((k, self[k]))\n return x", "def group(self, key=None, value=None):\n if key is None:\n key = self.keys[0]\n if key not in self.keys:\n raise ValueError('The key you entered does not exist in this ALE.')\n data = sorted(self.dicts(value), key=lambda d: d.get(key))\n result = [list(g) for k, g in groupby(data)]\n return result", "def top(self, k):\n if not 1 <= k <= len(self):\n raise ValueError('Illegal value for k')\n walk = self._data.first()\n for j in range(k):\n item = walk.element() # element of list is Item\n yield item._value # using the customized __iter__ method\n walk = self._data.after(walk)", "def get_descendants(self, key: str) -> Sequence[str]:\n return descendants(self.task_graph, key)", "def getlist(self, key):\n \n value = self.get(key, [])\n if value is None or isinstance(value, (list, tuple)):\n return value\n else:\n return [value]", "def pullAll(*keys):", "def __getitem__(self, key):\n list_ = dict.__getitem__(self, key)\n try:\n return list_[-1]\n except IndexError:\n return []", "def keys(self):\n return [ x for x in self ]", "def _sparse2seq(self, key):\n seq = []\n for (d,v) in key:\n seq.append(d)\n seq.append(v)\n return seq", "def topkAdd(self, key, *items):\n params = [key]\n params += items\n \n return self.execute_command(self.TOPK_ADD, *params)", "def keysAll():", "def get_list(self):\n return sorted(self.__entries.keys())", "def get_keys(self):\r\n return self._keys", "def GetSubkeyByPath(self, key_path):", "def keys(self, redis_key: str):\n for k in self.client.keys(pattern=\"{}*\".format(redis_key)):\n deserialized_key = k.decode('utf-8')\n print(deserialized_key)", "def _to_order(key):\n return list(sorted(key).index(char) for char in key)", "def all_key_seqs(template):\n result = []\n for k, v in template.iteritems():\n if isinstance(v, dict):\n for suffix in all_key_seqs(v):\n result.append([k] + suffix)\n else:\n result.append([k])\n return result", "def getByKey(self, taskKey):\n tlist = []\n for doc in self.mgdb.task_library.find({\"task_key\": taskKey}):\n tlist.append(doc)\n return tlist", "def list_user_keys(self):\n return AlgoliaUtils_request(self.headers, self.read_hosts, \"GET\", \"/1/keys\", self.timeout)", "def get(self, key):\n bucket = self.table[self._hash(key)]\n return [item for item in bucket if item[0] == key][0]", "def get(self, key):\n bucket = self.table[self._hash(key)]\n return [item for item in bucket if item[0] == key][0]", "def __getitem__ (self, key):\n if not self.data.has_key(key):\n return []\n else:\n return self.data[key]", "def keyEventList(self):\n return self._keyEventList", "async def keys(self) -> Iterable[str]:", "def __getitem__(self, key):\n if isinstance(key, list):\n return plist([self[k] for k in key], root=plist([KeyValue(k, self[k]) for k in key]))\n else:\n return defaultdict.__getitem__(self, key)", "def get_key_list(self, email=\"\"):\n\t\tif email:\n\t\t\twhere_clause = \" where email = '%s'\" % email\n\t\telse:\n\t\t\twhere_clause = \"\"\n\n\t\treturn self.app.db.query(\n\t\t\t\"\"\"\n\t\t\tselect\n\t\t\t\tapi_key,\n\t\t\t\towner,\n\t\t\t\tapp_name,\n\t\t\t\temail,\n\t\t\t\turl,\n\t\t\t\tcreated\n\t\t\tfrom\n\t\t\t\tapi_keys\n\t\t\t%s\n\t\t\t\"\"\" % where_clause)", "def index(self, key):\r\n return self.keyOrder.index(key)", "def get_top_k_ports(self, k):\n port_list = self.extract_list(k)\n return port_list", "def __getslice__(self, start, stop):\n tuples = [(key, self.dict[key])for key in self.dict.iterkeys()]\n tuples = sorted(tuples, key=itemgetter(1), reverse=True)[start:stop]\n return [key for key, value in tuples]", "def getPubs(self, key):\n if hasattr(key, \"encode\"):\n key = key.encode(\"utf-8\") # convert str to bytes\n return self.getVal(self.pubs, key)", "def top_k(self, k = 1):\n\t if self.shapley_rank == {}:\n\t \treturn []\n\n\t n = self.nodes\n\t topknodes = []\n\t i = 0\n\t count = 0\n\t while count < k and not i == n:\n\t if self.shapley_rank[i][0] not in topknodes and not self.is_adj(self.shapley_rank[i][0], topknodes):\n\t topknodes.append(self.shapley_rank[i][0])\n\t count += 1\n\t i += 1\n\t i = 0\n\t if not count == k:\n\t while not count == k:\n\t if self.shapley_rank[i][0] not in topknodes:\n\t topknodes.append(self.shapley_rank[i][0])\n\t count += 1\n\t i += 1\n\t return topknodes", "def get_user_key_acl(self, key):\n return AlgoliaUtils_request(self.headers, self.read_hosts, \"GET\", \"/1/keys/%s\" % key, self.timeout)", "def get_keys(self) -> list:\r\n keys = []\r\n for key, value in self._items:\r\n keys.append(key)\r\n return keys", "def hkeys(self, key):\n return self._command(b'HKEYS', key, handler=list_of_keys)", "def as_list(self, first_key, last_key, result):\n\n if first_key <= self.key <= last_key:\n result.append(self.key)\n\n self.left and self.left.as_list(first_key, last_key, result)\n self.right and self.right.as_list(first_key, last_key, result)\n\n return result" ]
[ "0.6962184", "0.657635", "0.63970435", "0.631337", "0.6151614", "0.6138156", "0.6085079", "0.6063242", "0.59806406", "0.59505635", "0.59002477", "0.58530796", "0.5838963", "0.58134776", "0.57977164", "0.57843095", "0.5766967", "0.57466274", "0.57404083", "0.5712583", "0.57029986", "0.57026654", "0.57009965", "0.57004935", "0.5691315", "0.56544966", "0.56457067", "0.5645122", "0.5624143", "0.56213534", "0.55863535", "0.5584628", "0.55573934", "0.5554459", "0.55428207", "0.55428076", "0.55117977", "0.5510928", "0.5505294", "0.5494696", "0.54827976", "0.5480127", "0.5479365", "0.54664034", "0.5466145", "0.54651743", "0.5463993", "0.54633313", "0.54529494", "0.5438689", "0.5432459", "0.54125285", "0.5409672", "0.54044443", "0.5403575", "0.5396527", "0.5395767", "0.5380599", "0.5364224", "0.5363806", "0.53578365", "0.5355259", "0.5350317", "0.53306407", "0.5328474", "0.5324971", "0.53218544", "0.53137386", "0.53120506", "0.5310284", "0.53096616", "0.53082263", "0.52997696", "0.5292273", "0.5291067", "0.52892554", "0.52891284", "0.52852035", "0.5277263", "0.52704096", "0.5266508", "0.52651566", "0.525386", "0.52524525", "0.5252361", "0.5252361", "0.524793", "0.52392185", "0.5232948", "0.52181417", "0.5210712", "0.520723", "0.52029216", "0.5199739", "0.519662", "0.51856375", "0.5185415", "0.5182225", "0.5180238", "0.51788276" ]
0.82646406
0
Returns k, width, depth and decay values of ``key``.
def topkInfo(self, key): return self.execute_command(self.TOPK_INFO, key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def k(self):\n return self._k", "def k(self):\n return self._k", "def k(self):\n return self._k", "def _cache_key_from_kvs_key(self, key):\r\n if key.scope == Scope.user_state:\r\n return (key.scope, key.block_scope_id)\r\n elif key.scope == Scope.user_state_summary:\r\n return (key.scope, key.block_scope_id, key.field_name)\r\n elif key.scope == Scope.preferences:\r\n return (key.scope, key.block_scope_id, key.field_name)\r\n elif key.scope == Scope.user_info:\r\n return (key.scope, key.field_name)", "def quantize_key_values(key):\n if isinstance(key, dict):\n return key.keys()\n\n return key", "def ky(self, k: int) -> float:\n result = self._read_inline(f\"ky({k})\")\n return result", "def K(self):\n return self._K", "def _get_key_stretches(self):\n return self.__key_stretches", "def getDims(self, k, name):\n if k not in self.sp_dicts: return\n value = self.sp_dicts[k].get(name, None)\n if self.debug:\n print(sub(\"DIMS {:d}: {} -> {}\", k, name, value))\n return value", "def K(self):\n return self._properties['K']", "def format_length( self, key ) :\r\n\r\n return struct.calcsize( self[key] )", "def kx(self, k: int) -> float:\n result = self._read_inline(f\"kx({k})\")\n return result", "def topkReserve(self, key, k, width, depth, decay):\n params = [key, k, width, depth, decay]\n \n return self.execute_command(self.TOPK_RESERVE, *params)", "def K(self) -> int:\n return self.params.K", "def get_nominal(self, key):\n return ((hash(key) % 12) + 6.0) * 3", "def memory_key_values(k, v, num_mem_kv, dim_batch, dim_heads, variable_dtype, mesh):\n\n dim_mem_kv = mtf.Dimension(\"mem_kv_sequence\", num_mem_kv)\n emb_dim = k.shape[-1]\n mem_std = 1 / math.sqrt(emb_dim.size)\n\n mem_k = mtf.get_variable(mesh, \"mem_k\", mtf.Shape([dim_mem_kv, dim_heads, emb_dim]),\n initializer=tf.random_normal_initializer(stddev=mem_std),\n master_dtype=variable_dtype.master_dtype,\n slice_dtype=variable_dtype.slice_dtype,\n activation_dtype=variable_dtype.activation_dtype,\n )\n mem_v = mtf.get_variable(mesh, \"mem_v\", mtf.Shape([dim_mem_kv, dim_heads, emb_dim]),\n initializer=tf.random_normal_initializer(stddev=mem_std),\n master_dtype=variable_dtype.master_dtype,\n slice_dtype=variable_dtype.slice_dtype,\n activation_dtype=variable_dtype.activation_dtype)\n\n mem_k, mem_v = map(lambda t: mtf.broadcast(t, [dim_batch, dim_mem_kv, dim_heads, emb_dim]),\n (mem_k, mem_v))\n mem_k, mem_v = map(lambda t: mtf.rename_dimension(t, \"mem_kv_sequence\", \"sequence\"),\n (mem_k, mem_v))\n\n k = mtf.concat([mem_k, k], \"sequence\")\n v = mtf.concat([mem_v, v], \"sequence\")\n return k, v", "def getK(self):\n return self.getOrDefault(self.k)", "def get_importance(self, key, value, depth):\n multiplier = 0.8 ** depth if depth > 1 else 1.0\n base = 0.0\n if key in ['condition', 'symptom', 'disease', 'treatment']:\n base += 5\n elif key in ['gender', 'age'] or 'location' in key:\n base += 4\n elif 'condition' in key or 'symptom' in key or 'disease' in key or 'treatment' in key:\n base += 3\n else:\n base += 2\n return multiplier * base", "def k(self):\n return add(self.k_b(), self.k_m())", "def getScaleKeyTimes(self, view) -> list[float]:\n ...", "def decode(k, key_length):\n key = k[:key_length]\n val_length, ber_length = decode_ber(k[key_length:])\n value = k[key_length + ber_length : key_length + ber_length + val_length]\n return key, value", "def keyPattern(pattshape, bitDepth, key):\n flatdim = reduce(lambda x,y: x*y, pattshape)\n pattern = np.array( range(0, flatdim) ) * 0\n if key == int((2**bitDepth)**flatdim) - 1:\n pattern.shape = pattshape\n return pattern + (int(2**bitDepth) - 1)\n if key == 0:\n pattern.shape = pattshape\n return pattern * 0\n for i in range(0, len(pattern)):\n pattern[i] = key % int(2**bitDepth)\n key >>= bitDepth\n pattern.shape = pattshape\n return pattern", "def k(self) -> np.ndarray:\n if self._k is None:\n self._k = Counter(self.values())\n\n # return as array\n k = self._k\n kmax = max(k.keys())\n karr = np.array([k[i] for i in range(kmax + 1)])\n return karr", "def get_kf_kms(self):\n kfkms = np.array([ self.kf / (ss.velfac * 3.085678e24/ss.units.UnitLength_in_cm) for ss in self.spectrae])\n return kfkms", "def get_layers(l_k):\n d = get_min_depth(l_k)\n layers = {k: [] for k in range(d + 1)}\n for i in l_k.keys():\n layers[l_k[i]].append(i)\n return d, layers", "def print_ncattr(key):\n try:\n print(\"\\t\\ttype:\", repr(nc_fid.variables[key].dtype))\n for ncattr in nc_fid.variables[key].ncattrs():\n print('\\t\\t%s:' % ncattr,\\\n repr(nc_fid.variables[key].getncattr(ncattr)))\n except KeyError:\n print(\"\\t\\tWARNING: %s does not contain variable attributes\" % key)\n return None", "def __getitem__(self, key) -> numbers.Number:\n if isinstance(key, MultiVector):\n return self.value[int(np.where(key.value)[0][0])]\n elif key in self.layout.bladeTupMap.keys():\n return self.value[self.layout.bladeTupMap[key]]\n elif isinstance(key, tuple):\n sign, blade = compute_reordering_sign_and_canonical_form(key, np.array(self.layout.sig),\n self.layout.firstIdx)\n return sign*self.value[self.layout.bladeTupMap[blade]]\n return self.value[key]", "def _get_params(self):\r\n return self.k._get_params()", "def get_number_of_features(key):\n sum = 0\n for name, module in common.QOL_PARAMS[key].items():\n sum += module.LENGTH\n\n return sum", "def getNumberOfScaleKeys(self, view) -> int:\n ...", "def create_dtype_fromkey(self, key): \n assert key not in self.dtypes # should not be creating new dtypes from existing key\n dt = DevelopmentType(key, self)\n self.dtypes[key] = dt\n # assign yields\n for mask, t, ycomps in self.yields:\n if self.match_mask(mask, key):\n for yname, ycomp in ycomps:\n dt.add_ycomp(t, yname, ycomp)\n # assign actions and transitions\n for acode in self.oper_expr:\n for mask in self.oper_expr[acode]:\n if self.match_mask(mask, key):\n dt.oper_expr[acode].append(self.oper_expr[acode][mask]) \n #print 'building transitions for acode', acode, ' '.join(key)\n for mask in self.transitions[acode]:\n if self.match_mask(mask, key):\n for scond in self.transitions[acode][mask]:\n for x in self.resolve_condition(scond, key): \n dt.transitions[acode, x] = self.transitions[acode][mask][scond]\n if not dt.transitions:\n self.inoperable_dtypes.append(key)\n return dt", "def get_min_depth(l_k):\n return max(l_k.values())", "def __getitem__(self, key: Tuple[int, int]) -> complex:\n return self.coeff[self._core.index_alpha(key[0]),\n self._core.index_beta(key[1])]", "def __init__(self, mode=KSamplingModes.monkhorst, num_kpts= 0,\n kpts=((1, 1, 1),),\n kpt_shifts=(0.5, 0.5, 0.5),\n kpts_weights=None, use_symmetries=True, use_time_reversal=True, chksymbreak=None,\n comment=None):\n if isinstance(mode, str):\n mode = KSamplingModes[mode]\n\n super(KSampling, self).__init__()\n\n self.mode = mode\n self.comment = comment\n\n self.num_kpts = num_kpts\n self.kpts = kpts\n self.kpt_shifts = kpt_shifts\n self.kpts_weights = kpts_weights\n self.use_symmetries = use_symmetries\n self.use_time_reversal = use_time_reversal\n self.chksymbreak = chksymbreak\n\n abivars = {}\n\n if mode == KSamplingModes.monkhorst:\n assert num_kpts == 0\n ngkpt = np.reshape(kpts, 3)\n shiftk = np.reshape(kpt_shifts, (-1,3))\n\n if use_symmetries and use_time_reversal: kptopt = 1\n if not use_symmetries and use_time_reversal: kptopt = 2\n if not use_symmetries and not use_time_reversal: kptopt = 3\n if use_symmetries and not use_time_reversal: kptopt = 4\n\n abivars.update({\n \"ngkpt\" : ngkpt,\n \"shiftk\" : shiftk,\n \"nshiftk\" : len(shiftk),\n \"kptopt\" : kptopt,\n \"chksymbreak\": chksymbreak,\n })\n\n elif mode == KSamplingModes.path:\n if num_kpts <= 0:\n raise ValueError(\"For Path mode, num_kpts must be specified and >0\")\n\n kptbounds = np.reshape(kpts, (-1,3))\n #print(\"in path with kptbound: %s \" % kptbounds)\n\n abivars.update({\n \"ndivsm\" : num_kpts,\n \"kptbounds\": kptbounds,\n \"kptopt\" : -len(kptbounds)+1,\n })\n\n elif mode == KSamplingModes.automatic:\n kpts = np.reshape(kpts, (-1,3))\n if len(kpts) != num_kpts:\n raise ValueError(\"For Automatic mode, num_kpts must be specified.\")\n\n abivars.update({\n \"kptopt\" : 0,\n \"kpt\" : kpts,\n \"nkpt\" : num_kpts,\n \"kptnrm\" : np.ones(num_kpts),\n \"wtk\" : kpts_weights, # for iscf/=-2, wtk.\n \"chksymbreak\": chksymbreak,\n })\n\n else:\n raise ValueError(\"Unknown mode %s\" % mode)\n\n self.abivars = abivars\n #self.abivars[\"#comment\"] = comment", "def subdimension(self, key):\n \n index = self.to_index(key)\n if isinstance(index, int):\n return None\n # here index is a slice\n if index.stop - index.start <= 1:\n # Here key reresent a single element\n return None\n return LookupTableDimension(self.toUnit.y[index])", "def dispersion(self, k):\n return 2*self.material.gamma*np.sqrt(self.material.z)", "def get(self,root,key):\n node = root\n for digit in key:\n node = node.children[ord(digit)-ord('0')]\n if(node==None):\n return None\n return node.value.value", "def click_weights_measures_key(key, timeout=default_timeout):\n return click_key(WEIGHTS_MEASURES[key], timeout)", "def compute_key_value(self) -> Dict[str, float]:\n per_class, micro, macro, weighted = self.compute()\n metrics = self._convert_metrics_to_kv(\n per_class=per_class, micro=micro, macro=macro, weighted=weighted\n )\n return metrics", "def compute_key_value(self) -> Dict[str, float]:\n per_class, micro, macro, weighted = self.compute()\n metrics = self._convert_metrics_to_kv(\n per_class=per_class, micro=micro, macro=macro, weighted=weighted\n )\n return metrics", "def print_ncattr(key):\n try:\n print(\"\\t\\ttype:\", repr(nc_fid.variables[key].dtype))\n for ncattr in nc_fid.variables[key].ncattrs():\n print('\\t\\t%s:' % ncattr,\\\n repr(nc_fid.variables[key].getncattr(ncattr)))\n except KeyError:\n print(\"\\t\\tWARNING: %s does not contain variable attributes\" % key)", "def print_ncattr(key):\n try:\n print(\"\\t\\ttype:\", repr(nc_fid.variables[key].dtype))\n for ncattr in nc_fid.variables[key].ncattrs():\n print('\\t\\t%s:' % ncattr,\\\n repr(nc_fid.variables[key].getncattr(ncattr)))\n except KeyError:\n print(\"\\t\\tWARNING: %s does not contain variable attributes\" % key)", "def k(self) -> np.ndarray:\n return self._vector[12:18]", "def _cleankey(self, key, is_view=None):\n m, n = self.shape\n if is_view == None:\n if isinstance(key, int) and any([x == 1 for x in self.shape]):\n is_view = False\n elif isinstance(key, tuple) and all(\n [isinstance(x, int) for x in key]):\n is_view = False\n else:\n is_view = True\n\n if isinstance(key, tuple) and len(key) == 2 and all(\n [isinstance(x, list) for x in key]):\n return (*key, is_view)\n\n if isinstance(key, MPView):\n key = (key.p_rows, key.p_cols)\n return (*key, is_view)\n\n if isinstance(key, int) or isinstance(key, slice) or isinstance(\n key, list):\n # One index given, check if vector shaped.\n if m == 1: # row vector case\n key = ([0], key)\n elif n == 1: # col vector case\n key = (key, [0])\n else: # if not vector-shaped, all columns are implicitly indexed\n key = (key, list(range(n)))\n row_key, col_key = key\n\n if isinstance(row_key, slice):\n row_key = list(range(m))[row_key]\n elif isinstance(row_key, int):\n row_key = [row_key]\n\n if isinstance(col_key, slice):\n col_key = list(range(n))[col_key]\n elif isinstance(col_key, int):\n col_key = [col_key]\n\n return (row_key, col_key, is_view)", "def print_ncattr(key):\r\n try:\r\n print (\"\\t\\ttype:\", repr(nc_fid.variables[key].dtype))\r\n for ncattr in nc_fid.variables[key].ncattrs():\r\n print ('\\t\\t%s:' % ncattr,\\\r\n repr(nc_fid.variables[key].getncattr(ncattr)))\r\n except KeyError:\r\n print (\"\\t\\tWARNING: %s does not contain variable attributes\" % key)", "def kz(self, k: int) -> float:\n result = self._read_inline(f\"kz({k})\")\n return result", "def compute_key_value(self) -> Dict[str, float]:\n precision_value, recall_value, f1_value = self.compute()\n kv_metrics = self._convert_metrics_to_kv(\n precision_value=precision_value, recall_value=recall_value, f1_value=f1_value\n )\n return kv_metrics", "def full_K(self):\n\n return kron_list(self.Ks)", "def compute_key_value(self) -> Dict[str, float]:\n precision_value, recall_value, f1_value = self.compute()\n kv_metrics = self._convert_metrics_to_kv(\n precision_value=precision_value, recall_value=recall_value, f1_value=f1_value,\n )\n return kv_metrics", "def _normalizeKeySlice(self, key):\n if key.start is None:\n kstart = (0, 0)\n else:\n kstart = key.start\n\n if key.stop is None:\n kstop = (self.width, self.height)\n else:\n kstop = key.stop\n\n if key.step is None:\n kstep = (1, 1)\n elif isinstance(key.step, int):\n # if only one int is specified, use it for both steps\n kstep = (key.step, key.step)\n else:\n kstep = key.step\n\n # x1 & y1 should be top-left, x2 & y2 should be bottom-right\n # So swap these values if need be.\n x1, y1 = kstart\n x2, y2 = kstop\n if x1 > x2:\n x1, x2 = x2, x1\n if y1 > y2:\n y1, y2 = y2, y1\n\n try:\n x1, y1 = self._convertNegativeTupleKeyToPositiveTupleKey((x1, y1))\n\n # Because x2 and y2 can go 1 past the end of the max index, the\n # _convertNegativeTupleKeyToPositiveTupleKey() may raise an exception.\n # So we need to pass dummy values so the exception isn't raised.\n if x2 != self.width and x2 != -(self.width - 1) and \\\n y2 != self.height and y2 != -(self.height - 1):\n x2, y2 = self._convertNegativeTupleKeyToPositiveTupleKey((x2, y2))\n elif x2 != self.width and x2 != -(self.width - 1):\n x2, _dummy = self._convertNegativeTupleKeyToPositiveTupleKey((x2, 0))\n elif y2 != self.height and y2 != -(self.height - 1):\n _dummy, y2 = self._convertNegativeTupleKeyToPositiveTupleKey((0, y2))\n else:\n pass # In this case, we don't need to adust x2 and y2 at all. So do nothing.\n except KeyError:\n raise PyTextCanvasException('key must be a tuple of two ints')\n\n return (x1, y1, x2, y2, kstep[0], kstep[1])", "def key(key):\n return key", "def get_keys2(self, key: str, epoch: int = None) -> Tuple[str, ...]:\n if epoch is None:\n epoch = self.get_epoch()\n d = self.stats[epoch][key]\n keys2 = tuple(k for k in d if k not in (\"time\", \"total_count\"))\n return keys2", "def _fancy_getitem(self, key):\n new_data = {}\n for i, k in enumerate(zip(*key)):\n if k in self.data:\n new_data[i] = self.data[k]\n return DOK(\n shape=(len(key[0])),\n data=new_data,\n dtype=self.dtype,\n fill_value=self.fill_value,\n )", "def getTransformKeyTimes(self, view) -> list[float]:\n ...", "def subarray_dimensions(self, keys):\n \n if len(keys) != len(self.dims):\n raise ValueError(\"Number of keys must be equal to the number of\" +\n \" dimensions. (Got \" + str(len(keys)) + \"/\"\n + str(len(self.dims)) + \")\")\n\n newDims = DimensionHelper()\n for key, dim in zip(keys, self.dims):\n newDim = dim.subdimension(key)\n if newDim is not None:\n newDims.dims.append(newDim)\n return newDims", "def actual_key(self, key):\n key_list = []\n if key.scope == Scope.children:\n key_list.append('children')\n elif key.scope == Scope.parent:\n key_list.append('parent')\n else:\n key_list.append([\"usage\", \"definition\", \"type\", \"all\"][key.scope.block])\n\n if key.block_scope_id is not None:\n key_list.append(key.block_scope_id)\n if key.student_id:\n key_list.append(key.student_id)\n return \".\".join(key_list)", "def __getitem__(self, key):\n if not isinstance(key, int):\n raise TypeError\n if key < 0 or key >= len(self.data):\n raise IndexError\n batch = self.data[key]\n batch_size = len(batch)\n batch = list(zip(*batch))\n assert len(batch) == 6\n\n # orig_idx = lens\n token_ids = np.array(seq_padding(batch[0], self.max_len))\n s_start, s_end = np.array(batch[1]), np.array(batch[2])\n o_labels = np.array(batch[3])\n distance_to_s = np.array(seq_padding(batch[4], self.max_len))\n mask = np.array(seq_padding(batch[5], self.max_len))\n\n # print(token_ids, s_start, s_end, o_labels)\n\n return (token_ids, distance_to_s, s_start, s_end, o_labels, mask)", "def sym_K(self):\n raise NotImplementedError", "def compute_qkv(query_antecedent,\n memory_antecedent,\n total_key_depth,\n total_vale_depth,\n q_filter_width=1,\n kv_filter_width=1,\n q_padding=\"VALID\",\n kv_padding=\"VALID\"):\n if memory_antecedent is None:\n memory_antecedent = query_antecedent\n q = compute_attention_component(\n query_antecedent, total_key_depth, q_filter_width, q_padding, \"q\")\n k = compute_attention_component(\n memory_antecedent, total_key_depth, kv_filter_width, kv_padding, \"k\")\n v = compute_attention_component(\n memory_antecedent, total_vale_depth, kv_filter_width, kv_padding, \"v\")\n return q, k, v", "def _deriv_keys(self, key):\n prom2abs = self._prom2abs\n abs2prom = self._abs2prom\n\n DERIV_KEY_SEP = self._DERIV_KEY_SEP\n\n # derivative could be tuple or string, using absolute or promoted names\n if isinstance(key, tuple):\n of, wrt = key\n else:\n of, wrt = key.split(DERIV_KEY_SEP)\n\n # if promoted, will map to all connected absolute names\n abs_of = [of] if of in abs2prom else prom2abs[of]\n if wrt in prom2abs:\n abs_wrt = [prom2abs[wrt]][0]\n else:\n abs_wrt = [wrt]\n\n abs_keys = ['%s%s%s' % (o, DERIV_KEY_SEP, w) for o, w in itertools.product(abs_of, abs_wrt)]\n\n prom_of = of if of in prom2abs else abs2prom[of]\n if wrt in abs2prom:\n prom_wrt = abs2prom[wrt]\n else:\n prom_wrt = wrt\n\n prom_key = (prom_of, prom_wrt)\n\n return abs_keys, prom_key", "def n_choose_kv(newK):\n values = np.zeros((1,newK+1))\n ks = np.arange(newK+1)\n \n for i in range(newK+1):\n values[i] = scipy.misc.comb(newK, ks[i])\n\n return values", "def get_size(self, key):\n try:\n return wait(self.proto.vsiz(key))\n except TyrantError:\n raise KeyError(key)", "def kem_encapsulated_key_len(self, public_key):\n result = self._lib_vscf_ecc.vscf_ecc_kem_encapsulated_key_len(self.ctx, public_key.c_impl)\n return result", "def to_kv(k: str, v: Union[int, float]) -> Tuple[str, float]:\n return (k, pow(v, 2))", "def _validate_and_split_len(self, key):\n if isinstance(key, tuple) and len(key) == self._len_keys:\n if any([\n not _is_good_iterable(v) and not isinstance(v, str)\n for v in key\n ]):\n raise KeyError(\"The key {} is not valid.\".format(key))\n # convert str to single item list for proper enumeration using\n # product\n key_types_list = [[v] if isinstance(v, str) else v for v in key]\n return list(product(*key_types_list))\n elif _is_iterable(key):\n keys = []\n for k in key:\n keys.extend(self._validate_and_split_len(k))\n return keys\n else:\n raise KeyError(\"The key {} is not valid.\".format(key))", "def size(self, key):\n _id, feature = self._extract(key)\n return self.client.sound_feature_size(_id, feature)", "def __compute_qkv(queries, keys, values, n_head, d_key, d_value):\n q = layers.fc(input=queries, size=d_key * n_head,\n bias_attr=False, num_flatten_dims=2)\n fc_layer = wrap_layer_with_block(\n layers.fc, fluid.default_main_program().current_block().parent_idx\n ) if cache is not None and static_kv else layers.fc\n k = fc_layer(input=keys, size=d_key * n_head,\n bias_attr=False, num_flatten_dims=2)\n v = fc_layer(input=values, size=d_value * n_head,\n bias_attr=False, num_flatten_dims=2)\n return q, k, v", "def getNumberOfSkewXKeys(self, view) -> int:\n ...", "def __init__(self, key):\n\n def keys(key, num_rounds):\n \"\"\"Yields the permuted key bitstring for i = 1..num_rounds\"\"\"\n C, D = key[:28], key[28:]\n # Rounds are 1-indexed, so shift array over by one\n left_shifts = [None, 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]\n for i in range(1, num_rounds + 1):\n # Negate each rotation to rotate left.\n C, D = rotate(C, -left_shifts[i]), rotate(D, -left_shifts[i])\n yield self.permute(C + D, self._CD_permutation)\n\n self.key = list(bits_of(key, 64))\n # Permute the key. The permutation discards the parity bits...\n self.key = self.permute(self.key, self._key_permutation)\n self.number_of_rounds = 16\n # A list of the 16 keys K1 .. K16, shifted over by one to allow 1-indexing.\n self.keys = [None] + list(keys(self.key, self.number_of_rounds))", "def getSkewXKeyTimes(self, view) -> list[float]:\n ...", "def GetKeyByPath(self, key_path):", "def shared_key_len(self, key):\n result = self._lib_vscf_ecc.vscf_ecc_shared_key_len(self.ctx, key.c_impl)\n return result", "def compute_key_value(self) -> Dict[str, float]:\n # @TODO: ddp hotfix, could be done better\n if self._is_ddp:\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = self.compute()\n metrics = self._convert_metrics_to_kv(\n per_class=per_class, micro=micro, macro=macro, weighted=weighted\n )\n return metrics", "def padding_depth(self):\n\t\treturn self.paddings_shape_param('D')", "def kem_shared_key_len(self, key):\n result = self._lib_vscf_ecc.vscf_ecc_kem_shared_key_len(self.ctx, key.c_impl)\n return result", "def __getitem__(self, key):\n return self._data[self.__ptBin(key[0])][self.__etaBin(key[1])]", "def KSA(key):\n key_length = len(key)\n tab = list(range(MOD))\n j = 0\n for i in range(MOD):\n j = (j + tab[i] + key[i % key_length]) % MOD\n tab[i], tab[j] = tab[j], tab[i]\n return tab", "def get_attributes_from_amount_of_elements(self, key):\n l_of_attr_val = []\n els = self.driver.find_elements(self.by, self.value)\n for i in range(len(els)):\n el = els[0].find_elements(self.by, self.value)[i].get_attribute(key)\n l_of_attr_val.append(el)\n logging.getLogger(__name__).info(\n \"Attributes from amount of elements: {}\\nby = {}\\nvalue = {}\".format(l_of_attr_val, self.by, self.value))\n return l_of_attr_val", "def potential_parameters(cls):\n return [\"k\", \"length\"]", "def recursive_dict_key_print(dict_in, spacer=\"\"):\n if type(dict_in) is not dict:\n return\n next_spacer = spacer + \" \"\n for key, value in dict_in.items():\n try:\n print(spacer, f\"{key} : {value.shape}\")\n except(AttributeError):\n print(spacer, key)\n recursive_dict_key_print(value, next_spacer)", "def log_depth(key, prefix, batch, i=0):\n depth = batch[key] if is_dict(batch) else batch\n inv_depth = 1. / depth[i]\n inv_depth[depth[i] == 0] = 0\n return prep_image(prefix, key,\n viz_inv_depth(inv_depth, filter_zeros=True))", "def getKmers(seq, k):\n \n kmd = {}\n \n for i in range(len(seq)+1-k):\n kmer = seq[i:i+k]\n kmd[kmer] = kmd.get(kmer,0) + 1\n return kmd", "def dcg_at_k(cls, r, k):\n assert k >= 1\n r = np.asfarray(r)[:k]\n if r.size:\n return np.sum(r / np.log2(np.arange(2, r.size + 2)))\n return 0.", "def get_sample_size(self, key=None):\n if key is None:\n return len(self.Y)\n else:\n return len(self.get_partitions(self.persistence)[key])", "def __compound_key(key):\n x_int = int(key[0])\n y_int = int(key[1])\n zeros = len(str(y_int))\n key = x_int * (10 ** zeros) + y_int\n\n return key", "def scaled_dot_product_attention(query, key, value):\n matmul_qk = tf.matmul(query, key, transpose_b=True)\n\n # scale matmul_qk\n depth = tf.cast(tf.shape(key)[-1], tf.float32)\n logits = matmul_qk / tf.math.sqrt(depth)\n\n # softmax is normalized on the last axis (seq_len_k)\n attention_weights = tf.nn.softmax(logits, axis=-1)\n\n output = tf.matmul(attention_weights, value)\n\n return output, attention_weights", "def key():", "def key_parameters(fit_root, tag=\"parameter\"):\n param_list = []\n for child in fit_root.iter(tag):\n param_list.append((child.attrib['id'], float(child.attrib['value'])))\n params = KeyedList(param_list)\n return params", "def __getitem__(self, key):\r\n return self._getAttrMap()[key]", "def get_scale(scale='major', key=60):\n SCALE_DICT = get_keys()\n notes = [key] + [(key + i) for i in np.cumsum(SCALE_DICT[scale])]\n return notes", "def to_kv(k: str, v: Union[int, float]) -> Tuple[str, float]:\n return (k, v**2)", "def key_to_struct(key: RsaKey) -> bytes:\n mod = int_to_bytes(key.n)\n exponent = int_to_bytes(key.e)\n\n return b\"\\x00\\x00\\x00\\x80\" + mod + b\"\\x00\\x00\\x00\\x03\" + exponent", "def cmsInitByDim(self, key, width, depth):\n params = [key, width, depth]\n \n return self.execute_command(self.CMS_INITBYDIM, *params)", "def _visit(path, key, value):\n if isinstance(value, BaseKerasCallback):\n return (key, keras_callback_to_dict(value))\n return (key, value)", "def get_hole_bit_width(k: int) -> int:\n\n return math.ceil(math.log2(k))", "def predict(self, key):\n return self.counts.get(key, 1.0)", "def __getitem__(self, key):\n\n return self.fvals[key]", "def KETAMA(key):\n d = hashlib.md5(key).digest()\n c = _signed_int32\n h = c((ord(d[3])&0xff) << 24) | c((ord(d[2]) & 0xff) << 16) | \\\n c((ord(d[1]) & 0xff) << 8) | c(ord(d[0]) & 0xff)\n return h", "def cdf(self, k):\n\n if k < 0 or k > self.n:\n return 0\n\n k = int(k)\n ans = 0\n for i in range(0, k + 1):\n ans += self.pmf(i)\n return ans", "def getNumberOfKeys(self, attr, view) -> int:\n ...", "def subdimension(self, key):\n \n index = self.to_index(key)\n if isinstance(index, int):\n return None\n\n # here index is a slice\n if index.stop - index.start <= 1:\n # Here key represent a single element\n return None\n units = self.to_unit(index) # recompute units for clean borders\n return AffineDimension([units.start, units.stop], index.stop - index.start)" ]
[ "0.537914", "0.537914", "0.537914", "0.53716195", "0.5363166", "0.53627735", "0.53413373", "0.5331749", "0.5288173", "0.52487564", "0.52258396", "0.51713914", "0.5165139", "0.5122881", "0.51025707", "0.5095331", "0.5091592", "0.50683814", "0.5058652", "0.4994853", "0.49159655", "0.4911045", "0.4899111", "0.4884827", "0.4875523", "0.48690704", "0.4868461", "0.4848587", "0.48430288", "0.48337394", "0.4825496", "0.4824961", "0.48116806", "0.47988567", "0.4787117", "0.47832364", "0.47805616", "0.47770602", "0.4774833", "0.4774833", "0.47711897", "0.47711897", "0.47570944", "0.47568667", "0.4754731", "0.47396034", "0.4737566", "0.47300312", "0.47299376", "0.47220984", "0.47219485", "0.47185293", "0.47081858", "0.47062314", "0.47022367", "0.4701368", "0.46961546", "0.46915886", "0.46903682", "0.46771577", "0.46705824", "0.46688312", "0.4668722", "0.46671432", "0.46650553", "0.46617144", "0.46587008", "0.4654214", "0.46478638", "0.4641462", "0.4635386", "0.46274155", "0.46265498", "0.46128154", "0.46108535", "0.46096078", "0.4598885", "0.4596504", "0.4596401", "0.459314", "0.4582609", "0.4579344", "0.45765594", "0.4571274", "0.45683008", "0.45650098", "0.4564347", "0.45593205", "0.4556052", "0.45552886", "0.45550618", "0.4549996", "0.4543535", "0.45435208", "0.4542008", "0.45408767", "0.45402595", "0.45275944", "0.45252568", "0.45151964", "0.45100778" ]
0.0
-1
Return a new pipeline object that can queue multiple commands for later execution. ``transaction`` indicates whether all commands should be executed atomically. Apart from making a group of operations atomic, pipelines are useful for reducing the backandforth overhead between the client and server. Overridden in order to provide the right client through the pipeline.
def pipeline(self, transaction=True, shard_hint=None): p = Pipeline( connection_pool=self.connection_pool, response_callbacks=self.response_callbacks, transaction=transaction, shard_hint=shard_hint) return p
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pipeline(self, transaction=True, shard_hint=None):\n p = AsyncPipeline(\n connection_pool=self.client.connection_pool,\n response_callbacks=self._MODULE_CALLBACKS,\n transaction=transaction,\n shard_hint=shard_hint,\n )\n p.index_name = self.index_name\n return p", "def pipeline(self, transaction=True, shard_hint=None):\n p = Pipeline(\n connection_pool=self.client.connection_pool,\n response_callbacks=self._MODULE_CALLBACKS,\n transaction=transaction,\n shard_hint=shard_hint,\n )\n p.index_name = self.index_name\n return p", "def pipeline(self, transaction=True, shard_hint=None):\n return MockRedisPipeline(self, transaction, shard_hint)", "def createPipe(self, transaction):\n pipe = detectPipeClass(transaction.dev, transaction.endpt)(self)\n name = \"Dev %s, %s\" % (transaction.dev, transaction.getTransferString())\n self.appendCanvas(name, pipe.stack)\n return pipe", "def pipelines(self):\n return PipelineManager(session=self._session)", "def pipeline(self) -> Pipeline:\n if self._to_pipeline is None:\n raise AttributeError(\n \"pipeline not available because `to_pipeline` was not set on __init__.\"\n )\n return self._to_pipeline(self)", "def wrap_transaction(self):\n new_script = self.__class__()\n new_script.append(\n [BeginStatement()] + self.statements + [CommitStatement()])\n\n return new_script", "def multi(self):\n if self._transaction_state not in (None, \"watch\"):\n raise ValueError(\"MULTI calls can not be nested\")\n ret = self._command(b'MULTI', handler=\"OK\")\n self._transaction_state = [] # This is used in self._command\n return ret", "def _pipeline(self):\n try:\n b = self._pipeline_cache\n except AttributeError:\n r = open_redis_connection()\n b = self._pipeline_cache = r.pipeline()\n return b", "def from_pipeline(cls, pipeline, proba=None, repeat=None):\n if proba is None:\n if repeat is None:\n new_p = cls(pipeline=pipeline)\n else:\n if pipeline.num_actions == 1 and pipeline.get_last_action_proba() is None:\n new_p = cls(pipeline=pipeline, repeat=repeat)\n else:\n new_p = cls()\n new_p.append_pipeline(pipeline, repeat=repeat)\n else:\n if pipeline.num_actions == 1 and pipeline.get_last_action_repeat() is None:\n new_p = cls(pipeline=pipeline, proba=proba)\n else:\n new_p = cls()\n new_p.append_pipeline(pipeline, proba=proba)\n return new_p", "def pipeline(self):\n # gotta avoid circular imports by deferring\n from .pipeline import Pipeline\n return Pipeline().from_source(self._collection)", "def transaction(self):\n return Transaction(self)", "async def connect_pipeline(\n *, connect=None, bind=None, loop=None, translation_table=None\n):\n if loop is None:\n loop = asyncio.get_event_loop()\n\n transp, proto = await create_zmq_connection(\n lambda: _ClientProtocol(loop, translation_table=translation_table),\n zmq.PUSH,\n connect=connect,\n bind=bind,\n loop=loop,\n )\n return PipelineClient(loop, proto)", "def make_pipeline():\n \n # Base universe set to the QTradableStocksUS\n base_universe = QTradableStocksUS()#Q500US()\n base_universe = (base_universe & Q500US())\n base_universe = (base_universe & Fundamentals.market_cap.latest.top(150))\n \n # Factor of yesterday's close price.\n #yesterday_close = USEquityPricing.close.latest\n \n pipe = Pipeline(\n columns={\n #'close': yesterday_close,\n 'sector': Sector(),\n },\n screen=base_universe\n )\n return pipe", "def delay_pipeline(pipeline, pipe):\n _pipeline = delayed(pipeline[0].curry())(pipe)\n for task in pipeline[1:]:\n _pipeline = delayed(task.curry())(_pipeline)\n\n return _pipeline", "def make_pipeline(context):\n \n # Base universe of top 500 US stocks.\n base_universe_filter = Q500US()\n\n # Stocks of only tech sector.\n tech_sector = Sector(mask=base_universe_filter)\n tech_universe_filter = base_universe_filter & tech_sector.eq(311)\n\n # Top 10 tech stocks with largest market cap.\n mkt_cap_filter = morningstar.valuation.market_cap.latest\n top_mkt_cap_tech_filter = mkt_cap_filter.top(context.NUM_SYMBOLS, mask=tech_universe_filter)\n\n # Bollinger band factor with Stdev factor 2.\n lower_band_factor, middle_factor, upper_band_factor = BollingerBands(window_length=22, k=2, mask=top_mkt_cap_tech_filter)\n\n # Percent difference between (price, lower_band) and (price, upper_band).\n price = USEquityPricing.close.latest\n buy_percent_factor = ((lower_band_factor - price)*100)/price\n sell_percent_factor = ((price - upper_band_factor)*100)/price\n\n # Mean reversion buy and sell filters.\n # Sell when price exceeds upper-band and buy when price is below lower-band.\n buy_filter = buy_percent_factor > 0\n sell_filter = sell_percent_factor > 0\n\n # Build and return the Pipeline.\n pipe_bbands = Pipeline(columns={'buy_percent': buy_percent_factor,\n 'lower_band': lower_band_factor,\n 'buy': buy_filter,\n 'price': price,\n 'sell': sell_filter,\n 'upper_band': upper_band_factor,\n 'sell_percent': sell_percent_factor}, screen=top_mkt_cap_tech_filter)\n \n return pipe_bbands", "def pipelines(self):\r\n return pipelines.Pipelines(self)", "def make_pipeline():\r\n base_universe = Q1500US()\r\n sector = Sector() \r\n # screen is based off of returns\r\n returns = Returns(window_length = 2)\r\n # check if stock price has good strength, but not necessarily overbought\r\n rsi = RSI() \r\n price = USEquityPricing.close.latest\r\n # creating filter by specifying the type of returns desired\r\n top_return_stocks = returns.top(1,mask=base_universe, groupby=sector)\r\n pipe = Pipeline(\r\n columns = {\r\n 'rsi': rsi,\r\n 'price': price\r\n },\r\n # filter top return stocks, and stocks that are not being overbought\r\n # but are not too oversold either\r\n screen = base_universe & top_return_stocks & (20 < rsi < 80)\r\n # the above is equivalent to: choose stocks from the base universe that have had the top returns in their sectors and have a good RSI value\r\n )\r\n return pipe", "def pipeline(self):\n return self._pipeline", "def pipeline(self):\n return self._pipeline", "def pipeline(\n self,\n name: str,\n description: Optional[str] = None,\n labels: Optional[Sequence[PipelineLabel]] = None,\n ) -> \"Client\":\n _args = [\n Arg(\"name\", name),\n Arg(\"description\", description, None),\n Arg(\"labels\", labels, None),\n ]\n _ctx = self._select(\"pipeline\", _args)\n return Client(_ctx)", "def async_pipe(self, **kwargs):\n return AsyncPipe(source=self.async_fetch(), **kwargs)", "def pipeline(args) :\n from pipeliner import create_pipeline\n create_pipeline(args)", "def make_pipeline():\n # exchange = Fundamentals.exchange_id.latest\n # nyse_filter = exchange.eq('NYS')\n symbol_filter = StaticSids([TRADING_SID])\n set_benchmark(TRADING_SID) \n # volume_filter = VolumeFilter(\n # inputs=[USEquityPricing.volume],\n # window_length=1,\n # mask=symbol_filter\n # )\n\n # is_setup = volume_filter & alpha_long_weekly & alpha_long_daily\n weekly_high = WeeklyHigh(\n inputs=[USEquityPricing.high],\n mask=symbol_filter\n )\n weekly_low = WeeklyLow(\n inputs=[USEquityPricing.low],\n mask=symbol_filter\n )\n weekly_classifier = WeeklyClassifier(\n inputs=[\n USEquityPricing.open,\n USEquityPricing.high,\n USEquityPricing.low,\n USEquityPricing.close\n ],\n mask=symbol_filter\n )\n daily_classifier = DailyClassifier(\n inputs=[\n USEquityPricing.open,\n USEquityPricing.high,\n USEquityPricing.low,\n USEquityPricing.close\n ],\n mask=symbol_filter\n\n )\n\n pipe = Pipeline(\n screen=symbol_filter, # & (daily_classifier > 0),\n columns={\n 'daily_classifier': daily_classifier,\n 'daily_high': USEquityPricing.high.latest,\n 'daily_low': USEquityPricing.low.latest,\n 'weekly_classifier': weekly_classifier,\n 'weekly_high': weekly_high,\n 'weekly_low': weekly_low\n }\n )\n return pipe", "def make_pipeline():\n universe = TradableStocksUS('Real Estate') | TradableStocksUS('Utilities') | \\\n TradableStocksUS('Consumer Staples') | TradableStocksUS('Technology') | \\\n TradableStocksUS('Financials') | TradableStocksUS('Energy') | \\\n TradableStocksUS('Materials') | TradableStocksUS('Health Care') | \\\n TradableStocksUS('Industrials') | TradableStocksUS('Consumer Discretionary') | \\\n TradableStocksUS('Communications')\n\n roic = shfd.slice(dimension='MRT', period_offset=0).ROIC.latest\n ebit = shfd.slice(dimension='MRQ', period_offset=0).EBIT.latest\n ev = shfd.slice(dimension='MRQ', period_offset=0).EV.latest\n volatility = AnnualizedVolatility(window_length=100)\n value = ebit / ev\n\n roic_rank = roic.rank(mask=universe)\n value_rank = value.rank(mask=universe)\n volatility_rank = volatility.rank(mask=universe, ascending=False)\n\n spy_ma100_price = SMA(inputs=[USEquityPricing.close], \n window_length=100)[algo.sid(\"FIBBG000BDTBL9\")]\n spy_price = USEquityPricing.close.latest[algo.sid(\"FIBBG000BDTBL9\")]\n\n momentum_score = MomentumScore()\n\n overall_rank = roic_rank + value_rank + volatility_rank\n\n # seven_month_returns = Returns(window_length=148, mask=universe,)\n # one_month_returns = Returns(window_length=30, mask=universe,)\n\n pipeline = Pipeline(\n columns={\n 'stock' : master.SecuritiesMaster.Symbol.latest,\n 'sid': master.SecuritiesMaster.Sid.latest,\n 'sector' : master.SecuritiesMaster.usstock_Sector.latest,\n 'average_dollar_volume': AverageDollarVolume(window_length=200),\n 'price': EquityPricing.close.latest,\n 'volume': EquityPricing.volume.latest,\n 'roic' : roic,\n 'value' : value,\n 'volatility': volatility,\n 'roic_rank' : roic_rank,\n 'value_rank' : value_rank,\n 'momentum': momentum_score,\n 'momentum_decile': momentum_score.deciles(),\n 'volatility_decile' : volatility.deciles(),\n 'overall_rank' : overall_rank,\n 'overall_rank_decile': overall_rank.deciles(),\n 'trend_filter': spy_price > spy_ma100_price,\n # 'returns' : one_month_returns - seven_month_returns\n }, \n screen = universe\n )\n\n return pipeline", "def pipeline(self, pipeline_id):\r\n return pipelines.Pipeline(self, pipeline_id)", "def transaction(self, *args, **kwargs):\n # Build database transaction class\n class DBTransaction(BaseTransaction, self.__class__):\n pass\n return DBTransaction(self, *args, **kwargs)", "def pipe(self, func, *args, **kwargs):\n return func(self, *args, **kwargs)", "def make_pipeline():\r\n\r\n # Custom universe containing only desired assets (stocks with flag data)\r\n universe = StaticSids(my_stocks)\r\n\r\n return Pipeline(\r\n columns={\r\n #'flag_type': algo_data_full.flag_type.latest,\r\n #'flag_price': algo_data_full.flag_price.latest,\r\n #'end_flag_date': algo_data_full.end_flag_date.latest,\r\n #'end_flag_price': algo_data_full.end_flag_price.latest,\r\n 'up_flags': flag_counts.up.latest,\r\n 'down_flags': flag_counts.down.latest,\r\n 'up_ratio': up_ratios_2.up_ratio.latest,\r\n 'close': USEquityPricing.close.latest,\r\n },\r\n screen=universe\r\n )", "def trace_pipeline(pipe):\n _patch_multi_exec_execute(pipe)", "def append(self, pipeline):\n for stage in pipeline.pipe:\n self._pipe.append(stage)\n return self", "def crm_pipelines(self):\n from hubspot3.crm_pipelines import PipelinesClient\n\n return PipelinesClient(**self.auth, **self.options)", "def async_pipe(*args, **kwargs):\n return parser(*args, **kwargs)", "async def __aenter__(self) -> 'Batch':\n return self", "def _create_pipeline(self) -> TfmIterator:\n # 1. Initialise TubRecord -> x, y transformations\n def get_x(record: TubRecord) -> Dict[str, Union[float, np.ndarray]]:\n \"\"\" Extracting x from record for training\"\"\"\n out_dict = self.model.x_transform(record, self.image_processor)\n # apply the normalisation here on the fly to go from uint8 -> float\n out_dict['img_in'] = normalize_image(out_dict['img_in'])\n return out_dict\n\n def get_y(record: TubRecord) -> Dict[str, Union[float, np.ndarray]]:\n \"\"\" Extracting y from record for training \"\"\"\n y = self.model.y_transform(record)\n return y\n\n # 2. Build pipeline using the transformations\n pipeline = self.sequence.build_pipeline(x_transform=get_x,\n y_transform=get_y)\n return pipeline", "def construct(args,\n **kwargs):\n kw = parse_args(args)\n kw.update(kwargs)\n return (build_pipeline(**kw),\n kw)", "def transactional(*tr_args, **tr_kwargs):\n\n def decorate(func):\n try:\n parameter = tr_kwargs[\"parameter\"]\n except KeyError:\n parameter = \"tr\"\n\n wfunc = func\n while getattr(wfunc, \"__wrapped__\", None):\n wfunc = wfunc.__wrapped__\n if hasattr(inspect, \"getfullargspec\"):\n index = inspect.getfullargspec(wfunc).args.index(parameter)\n else:\n index = inspect.getargspec(wfunc).args.index(parameter)\n\n if getattr(func, \"_is_coroutine\", False):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n if isinstance(args[index], TransactionRead):\n raise asyncio.Return((yield asyncio.From(func(*args, **kwargs))))\n\n largs = list(args)\n tr = largs[index] = args[index].create_transaction()\n\n while True:\n try:\n ret = yield asyncio.From(func(*largs, **kwargs))\n yield asyncio.From(tr.commit())\n raise asyncio.Return(ret)\n except FDBError as e:\n yield asyncio.From(tr.on_error(e.code))\n\n else:\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n # We can't throw this from the decorator, as when a user runs\n # >>> import fdb ; fdb.api_version(fdb.LATEST_API_VERSION)\n # the code above uses @transactional before the API version is set\n if fdb.get_api_version() >= 630 and inspect.isgeneratorfunction(func):\n raise ValueError(\n \"Generators can not be wrapped with fdb.transactional\"\n )\n\n if isinstance(args[index], TransactionRead):\n return func(*args, **kwargs)\n\n largs = list(args)\n tr = largs[index] = args[index].create_transaction()\n\n committed = False\n # retries = 0\n # start = datetime.datetime.now()\n # last = start\n\n while not committed:\n ret = None\n try:\n ret = func(*largs, **kwargs)\n if fdb.get_api_version() >= 630 and inspect.isgenerator(ret):\n raise ValueError(\n \"Generators can not be wrapped with fdb.transactional\"\n )\n tr.commit().wait()\n committed = True\n except FDBError as e:\n tr.on_error(e.code).wait()\n\n # now = datetime.datetime.now()\n # td = now - last\n # elapsed = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / float(10**6)\n # if elapsed >= 1:\n # td = now - start\n # print (\"fdb WARNING: long transaction (%gs elapsed in transactional function \\\"%s\\\" (%d retries, %s))\"\n # % (elapsed, func.__name__, retries, committed and \"committed\" or \"not yet committed\"))\n # last = now\n\n # retries += 1\n return ret\n\n return wrapper\n\n if not tr_args:\n # Being called with parameters (possibly none); return a\n # decorator\n return decorate\n elif len(tr_args) == 1 and not tr_kwargs:\n # Being called as a decorator\n return decorate(tr_args[0])\n else:\n raise Exception(\"Invalid use of transactional decorator.\")", "def pipeline(ctx):\n asyncio.run(pipeline_impl(ctx.obj[\"config\"]))", "def aggregate(self, pipeline, **kwargs):\n if kwargs.get('cursor') is False:\n kwargs.pop('cursor')\n # One-shot aggregation, no cursor. Send command now, return Future.\n return self._async_aggregate(pipeline, **kwargs)\n else:\n if 'callback' in kwargs:\n raise pymongo.errors.InvalidOperation(\n \"Pass a callback to to_list or each, not to aggregate.\")\n\n kwargs.setdefault('cursor', {})\n cursor_class = create_class_with_framework(\n AgnosticAggregationCursor, self._framework, self.__module__)\n\n # Latent cursor that will send initial command on first \"async for\".\n return cursor_class(self, pipeline, **kwargs)", "def newChain(self):\n\n\t\tmychain = Chain()\n\t\tself.addChain(mychain)\n\t\treturn mychain", "def transaction() -> Generator:\n session = current_session()\n logger.debug('transaction with session %s', id(session))\n try:\n yield session\n # Only commit if there are un-flushed changes. The caller may commit\n # explicitly, e.g. to do exception handling.\n if session.dirty or session.deleted or session.new:\n session.commit()\n logger.debug('committed!')\n except ClassicBaseException as e:\n logger.debug('Command failed, rolling back: %s', str(e))\n session.rollback()\n raise # Propagate exceptions raised from this module.\n except InvalidEvent:\n session.rollback()\n raise\n except Exception as e:\n logger.debug('Command failed, rolling back: %s', str(e))\n session.rollback()\n raise TransactionFailed('Failed to execute transaction') from e", "def execute_command(self, command_name, command, **options):\r\n # if the command_name is 'AUTH' or 'SELECT', then this command\r\n # must have originated after a socket connection and a call to\r\n # _setup_connection(). run these commands immediately without\r\n # buffering them.\r\n if command_name in ('AUTH', 'SELECT'):\r\n return super(Pipeline, self).execute_command(\r\n command_name, command, **options)\r\n else:\r\n self.command_stack.append((command_name, command, options))\r\n return self", "def exec(self):\n if self._transaction_state in (None, \"watch\"):\n raise ValueError(\"EXEC without MULTI\")\n handler_list, self._transaction_state = self._transaction_state, None\n return self._command(b'EXEC', handler=handler_list)", "def set_pipeline(self):\n pipe_distance = make_pipeline(DistanceTransformer(), RobustScaler())\n pipe_time = make_pipeline(TimeFeaturesEncoder(time_column='pickup_datetime'), OneHotEncoder(handle_unknown='ignore'))\n dist_cols = ['pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude']\n time_cols = ['pickup_datetime']\n feat_eng_bloc = ColumnTransformer([('time', pipe_time, time_cols),\n ('distance', pipe_distance, dist_cols)]\n )\n self.pipeline = Pipeline(steps=[('feat_eng_bloc', feat_eng_bloc),\n ('regressor', RandomForestRegressor())])\n return self.pipeline", "def transaction(self):\n return MySQLConnection.Transaction(self)", "def get_pipeline(self):\n if hasattr(self, \"pipeline\"):\n return self.pipeline\n steps = [\n # before preprocessor, comes the feature extractor\n ('extractor', TurkishFeatureExtractor()),\n # first the pre-processor\n (\"preprocessor\", TurkishPreprocessor(self.stemmer_name_to_method[self.stemmer_method])),\n (\"vectorizer\", TurkishVectorizer(self.feature_name_to_class[self.feature])),\n # use pca\n # (\"pca\", TruncatedSVD(n_components=20, n_iter=10)),\n (\"adder\", TurkishFeatureAdder(n_components=20, n_iter=10)),\n (\"model\", self.model_name_to_class[self.model])\n ]\n self.pipeline = Pipeline(steps)\n return self.pipeline", "def group(self, *args, **kwargs):\n def decorator(f):\n cmd = group( *args, **kwargs )( f )\n self.add_command(cmd)\n return cmd\n return decorator", "def atomic(self, savepoint=True):\n return TransactionContext(*self.values(), savepoint=True)", "def createPipeline(self, w):\n\n # code will make the ximagesink output in the specified window\n def set_xid(window):\n gtk.gdk.threads_enter()\n sink.set_xwindow_id(window.window.xid)\n sink.expose()\n gtk.gdk.threads_leave()\n\n # this code receives the messages from the pipeline. if we\n # need to set X11 id, then we call set_xid\n def bus_handler(unused_bus, message):\n if message.type == gst.MESSAGE_ELEMENT:\n if message.structure.get_name() == 'prepare-xwindow-id':\n set_xid(w)\n return gst.BUS_PASS\n\n # create our pipeline, and connect our bus_handler\n self.pipeline = gst.Pipeline()\n bus = self.pipeline.get_bus()\n bus.set_sync_handler(bus_handler)\n\n sink = gst.element_factory_make(\"ximagesink\", \"sink\")\n sink.set_property(\"force-aspect-ratio\", True)\n sink.set_property(\"handle-expose\", True)\n scale = gst.element_factory_make(\"videoscale\", \"scale\")\n cspace = gst.element_factory_make(\"ffmpegcolorspace\", \"cspace\")\n\n # our pipeline looks like this: ... ! cspace ! scale ! sink\n self.pipeline.add(cspace, scale, sink)\n scale.link(sink)\n cspace.link(scale)\n return (self.pipeline, cspace)", "def __call__(self, *pipeline_factories, exceptions=None, wait=True):\n return self.run(*pipeline_factories, exceptions=exceptions, wait=wait)", "def __new__(cls, *args, **kwargs):\n if cls is Chain:\n if args:\n bijectors = args[0]\n else:\n bijectors = kwargs.get('bijectors')\n\n if bijectors is not None:\n if not all(auto_composite_tensor.is_composite_tensor(b)\n for b in bijectors):\n return _Chain(*args, **kwargs)\n return super(Chain, cls).__new__(cls)", "def _pipe(self):\n if self._evdev:\n return None\n\n if not self.__pipe:\n target_function = self._get_target_function()\n if not target_function:\n return None\n\n self.__pipe, child_conn = Pipe(duplex=False)\n self._listener = Process(target=target_function,\n args=(child_conn,))\n self._listener.daemon = True\n self._listener.start()\n return self.__pipe", "def get_pipeline(self, y, n_quantiles=None):\n\n if n_quantiles is None:\n n_quantiles = _n_samples(y)\n\n self.pipe = _make_pipeline(estimator=self._regressor,\n transform=self.pipeline_transform,\n n_targets=_n_targets(y),\n random_state=self.random_state,\n verbose=self.verbose,\n n_jobs=self.n_jobs,\n cv=self.cv,\n memory=self.pipeline_memory,\n n_quantiles=n_quantiles,\n chain_order=self.chain_order,\n n_estimators=self.n_regressors,\n target_index=self.target_index,\n boosting_loss=self.boosting_loss,\n regularization=self.line_search_regularization,\n line_search_options=self.line_search_options)", "def make_full_pipeline(\n preprocess_pipe: ColumnTransformer, model: BaseEstimator\n) -> Pipeline:\n full_pipe = sklearn.pipeline.Pipeline(\n [(\"preprocess\", preprocess_pipe), (\"model\", model)]\n )\n return full_pipe", "def get_cb_pipeline(train):\n from src.features import alchemy_feat, counting_feat, nltk_feat\n features = [\n ('sentiment', alchemy_feat.Sentiment()),\n ('sent_len', counting_feat.SentenceLength()),\n ('tfidf', counting_feat.BagOfTfIDF(train)),\n ('ner', alchemy_feat.NER()),\n ('pos', nltk_feat.POS())\n ]\n return get_pipeline(features)", "def pipeline(\n self,\n name: str,\n description: Optional[str] = None,\n labels: Optional[Sequence[PipelineLabel]] = None,\n ) -> \"Container\":\n _args = [\n Arg(\"name\", name),\n Arg(\"description\", description, None),\n Arg(\"labels\", labels, None),\n ]\n _ctx = self._select(\"pipeline\", _args)\n return Container(_ctx)", "def set_pipeline(self):\n dist_pipe = Pipeline([\n ('dist_trans', DistanceTransformer()),\n ('stdscaler', StandardScaler())\n ])\n\n time_pipe = Pipeline([\n ('time_enc', TimeFeaturesEncoder('pickup_datetime')),\n ('ohe', OneHotEncoder(handle_unknown='ignore'))\n ])\n\n preproc_pipe = ColumnTransformer([\n ('distance', dist_pipe, [\"pickup_latitude\", \"pickup_longitude\", 'dropoff_latitude', 'dropoff_longitude']),\n ('time', time_pipe, ['pickup_datetime'])\n ], remainder=\"drop\")\n\n pipe = Pipeline([\n ('preproc', preproc_pipe),\n ('linear_model', LinearRegression())\n ])\n return pipe", "def _create_pipeline(self) -> codepipeline.Pipeline:\n source_output = codepipeline.Artifact()\n build_output = codepipeline.Artifact()\n return codepipeline.Pipeline(\n self,\n 'Pipeline',\n stages=[\n self._create_source_stage('Source', source_output),\n # self._create_image_build_stage(\n # 'Build', source_output, build_output),\n # self._create_deploy_stage('Deploy', build_output)\n ]\n )", "def transaction(self) -> ContextManager[Transaction]:\n old_transaction_id = self._get_transaction_id()\n transaction = super().start_transaction()\n self._set_transaction_id(transaction.id)\n\n try:\n yield transaction\n except Exception:\n super().delete_transaction(transaction=transaction)\n raise\n else:\n super().finish_transaction(transaction=transaction)\n finally:\n self._set_transaction_id(old_transaction_id)", "def push(self, **kwargs):\n return _taskpipeoperation(self,'push', **kwargs)", "def pipeline(self, *funcs) -> \"fn\":\n return self._mod.pipeline(self, *funcs)", "def createrawtransaction(self, inputs, outputs):\n return self.proxy.createrawtransaction(inputs, outputs)", "def preparePipelines(self):\n\n # Construct the differnent states making up the pipeline\n\n # Input assembly state describes how primitives are assembled\n # This pipeline will assemble vertex data as a triangle lists (though we only use one triangle)\n inputAssemblyState = vk.VkPipelineInputAssemblyStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,\n topology = vk.VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST\n )\n # Rasterization state\n rasterizationState = vk.VkPipelineRasterizationStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,\n polygonMode = vk.VK_POLYGON_MODE_FILL,\n cullMode = vk.VK_CULL_MODE_NONE,\n frontFace = vk.VK_FRONT_FACE_COUNTER_CLOCKWISE,\n depthClampEnable = vk.VK_FALSE,\n rasterizerDiscardEnable = vk.VK_FALSE,\n depthBiasEnable = vk.VK_FALSE,\n lineWidth = 1.0\n )\n # Color blend state describes how blend factors are calculated (if used)\n # We need one blend attachment state per color attachment (even if blending is not used\n blendAttachmentState = vk.VkPipelineColorBlendAttachmentState(\n colorWriteMask = 0xf,\n blendEnable = vk.VK_FALSE\n )\n colorBlendState = vk.VkPipelineColorBlendStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,\n attachmentCount = 1,\n pAttachments = [blendAttachmentState]\n )\n # Viewport state sets the number of viewports and scissor used in this pipeline\n # Note: This is actually overriden by the dynamic states (see below)\n viewportState = vk.VkPipelineViewportStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,\n viewportCount = 1,\n scissorCount = 1\n )\n # Enable dynamic states\n # Most states are baked into the pipeline, but there are still a few dynamic states that can be changed within a command buffer\n #To be able to change these we need do specify which dynamic states will be changed using this pipeline. Their actual states are set later on in the command buffer.\n # For this example we will set the viewport and scissor using dynamic states\n dynamicStateEnables = [vk.VK_DYNAMIC_STATE_VIEWPORT, vk.VK_DYNAMIC_STATE_SCISSOR]\n dynamicState = vk.VkPipelineDynamicStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,\n dynamicStateCount = len(dynamicStateEnables),\n pDynamicStates = dynamicStateEnables\n )\n\n # Depth and stencil state containing depth and stencil compare and test operations\n # We only use depth tests and want depth tests and writes to be enabled and compare with less or equal\n opState = vk.VkStencilOpState(\n failOp = vk.VK_STENCIL_OP_KEEP,\n passOp = vk.VK_STENCIL_OP_KEEP,\n compareOp = vk.VK_COMPARE_OP_ALWAYS\n )\n depthStencilState = vk.VkPipelineDepthStencilStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,\n depthTestEnable = vk.VK_TRUE,\n depthWriteEnable = vk.VK_TRUE,\n depthCompareOp = vk.VK_COMPARE_OP_LESS_OR_EQUAL,\n depthBoundsTestEnable = vk.VK_FALSE,\n stencilTestEnable = vk.VK_FALSE,\n front = opState,\n back = opState\n )\n # Multi sampling state\n # This example does not make use fo multi sampling (for anti-aliasing), the state must still be set and passed to the pipeline\n multisampleState = vk.VkPipelineMultisampleStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,\n rasterizationSamples = vk.VK_SAMPLE_COUNT_1_BIT,\n pSampleMask = None\n )\n # Vertex input descriptions\n # Specifies the vertex input parameters for a pipeline\n #Vertex input binding\n # This example uses a single vertex input binding at binding point 0 (see vkCmdBindVertexBuffers)\n vertexInputBinding = vk.VkVertexInputBindingDescription(\n binding = 0,\n stride = self.vertexShape.size * self.vertexShape.itemsize,\n inputRate = vk.VK_VERTEX_INPUT_RATE_VERTEX\n )\n # Input attribute bindings describe shader attribute locations and memory layouts\n vertexInputAttributs = []\n # These match the following shader layout (see triangle.vert):\n # layout (location = 0) in vec3 inPos;\n # layout (location = 1) in vec3 inColor;\n # Attribute location 0: Position\n vertexInputAttribut = vk.VkVertexInputAttributeDescription(\n binding = 0,\n location = 0,\n # Position attribute is three 32 bit signed (SFLOAT) floats (R32 G32 B32)\n format = vk.VK_FORMAT_R32G32B32_SFLOAT,\n offset = 0 # offsetof(vertexShape, position)\n )\n vertexInputAttributs.append(vertexInputAttribut)\n vertexInputAttribut = vk.VkVertexInputAttributeDescription(\n binding = 0,\n location = 1,\n # Color attribute is three 32 bit signed (SFLOAT) floats (R32 G32 B32)\n format = vk.VK_FORMAT_R32G32B32_SFLOAT,\n offset = self.vertexShape[0].size * self.vertexShape.itemsize # offsetof(vertexShape, color)\n )\n vertexInputAttributs.append(vertexInputAttribut)\n\n # Vertex input state used for pipeline creation\n vertexInputState = vk.VkPipelineVertexInputStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,\n vertexBindingDescriptionCount = 1,\n pVertexBindingDescriptions = [vertexInputBinding],\n vertexAttributeDescriptionCount = len(vertexInputAttributs),\n pVertexAttributeDescriptions = vertexInputAttributs\n )\n # Shaders\n shaderStages = []\n # Vertex shader\n shaderStage = vk.VkPipelineShaderStageCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,\n # Set pipeline stage for this shader\n stage = vk.VK_SHADER_STAGE_VERTEX_BIT,\n # Load binary SPIR-V shader\n module = vks.vulkantools.loadShader(self.getAssetPath() + \"shaders/triangle/triangle.vert.spv\", self.device),\n pName = \"main\"\n )\n shaderStages.append(shaderStage)\n # Fragment shader\n shaderStage = vk.VkPipelineShaderStageCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,\n # Set pipeline stage for this shader\n stage = vk.VK_SHADER_STAGE_FRAGMENT_BIT,\n # Load binary SPIR-V shader\n module = vks.vulkantools.loadShader(self.getAssetPath() + \"shaders/triangle/triangle.frag.spv\", self.device),\n pName = \"main\"\n )\n shaderStages.append(shaderStage)\n\n # Assign the pipeline states to the pipeline creation info structure\n pipelineCreateInfo = vk.VkGraphicsPipelineCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,\n # The layout used for this pipeline (can be shared among multiple pipelines using the same layout)\n layout = self.pipelineLayout,\n # Renderpass this pipeline is attached to\n renderPass = self.renderPass,\n pVertexInputState = vertexInputState,\n pInputAssemblyState = inputAssemblyState,\n pRasterizationState = rasterizationState,\n pColorBlendState = colorBlendState,\n pMultisampleState = multisampleState,\n pViewportState = viewportState,\n pDepthStencilState = depthStencilState,\n pDynamicState = dynamicState,\n stageCount = len(shaderStages),\n pStages = shaderStages\n )\n # Create rendering pipeline using the specified states\n self.pipelines = vk.vkCreateGraphicsPipelines(self.device, self.pipelineCache, 1, [pipelineCreateInfo], None)\n try:\n self.pipeline = self.pipelines[0]\n except TypeError:\n self.pipeline = self.pipelines\n # Shader modules are no longer needed once the graphics pipeline has been created\n vk.vkDestroyShaderModule(self.device, shaderStages[0].module, None)\n vk.vkDestroyShaderModule(self.device, shaderStages[1].module, None)", "def make_process_pipelines(\n self, dataset, return_epochs=False, return_raws=False, postprocess_pipeline=None\n ):\n if return_epochs and return_raws:\n message = \"Select only return_epochs or return_raws, not both\"\n raise ValueError(message)\n\n self.prepare_process(dataset)\n\n raw_pipelines = self._get_raw_pipelines()\n epochs_pipeline = self._get_epochs_pipeline(return_epochs, return_raws, dataset)\n array_pipeline = self._get_array_pipeline(\n return_epochs, return_raws, dataset, postprocess_pipeline\n )\n\n if array_pipeline is not None:\n events_pipeline = (\n self._get_events_pipeline(dataset) if return_raws else EpochsToEvents()\n )\n else:\n events_pipeline = None\n\n if events_pipeline is None and array_pipeline is not None:\n log.warning(\n f\"event_id not specified, using all the dataset's \"\n f\"events to generate labels: {dataset.event_id}\"\n )\n events_pipeline = (\n RawToEvents(dataset.event_id)\n if epochs_pipeline is None\n else EpochsToEvents()\n )\n\n process_pipelines = []\n for raw_pipeline in raw_pipelines:\n steps = []\n steps.append((StepType.RAW, SetRawAnnotations(dataset.event_id)))\n if raw_pipeline is not None:\n steps.append((StepType.RAW, raw_pipeline))\n if epochs_pipeline is not None:\n steps.append((StepType.EPOCHS, epochs_pipeline))\n if array_pipeline is not None:\n array_events_pipeline = ForkPipelines(\n [\n (\"X\", array_pipeline),\n (\"events\", events_pipeline),\n ]\n )\n steps.append((StepType.ARRAY, array_events_pipeline))\n process_pipelines.append(Pipeline(steps))\n return process_pipelines", "def queue(self):\n\n qml._current_context._append_op(self)\n return self # so pre-constructed Observable instances can be queued and returned in a single statement", "def make_pipeline():\r\n\r\n mkt_cap_screen = (morningstar.valuation.market_cap.latest > 1e9)\r\n\r\n return Pipeline(\r\n columns={\r\n 'Free Cash Flow': morningstar.cash_flow_statement.free_cash_flow.latest,\r\n }, screen=mkt_cap_screen)", "async def pipeline_impl(config):\n await generate_groups_impl(config)\n await merge_singular_plural_impl(config)\n await add_parent_groups_impl(config)\n await prune_single_groups_impl(config)\n await move_inner_items_impl(config)\n await split_large_groups_impl(config)", "def transactions(self):\r\n return tx.Transactions(self)", "def batch(self, batch_identifier, begin_operation):\n logger.debug('Starting transaction...')\n self.publish(\n batch_operation=BatchOperation(\n batch_identifier=batch_identifier,\n begin_operation=begin_operation,\n ),\n )\n\n def mutation(mutation_operation):\n return self.publish(\n batch_operation=BatchOperation(\n batch_identifier=batch_identifier,\n mutation_operation=mutation_operation,\n ),\n )\n\n try:\n yield mutation\n except Exception:\n logger.debug('Attempting to publish rollback of in progress transaction...')\n self.publish(\n batch_operation=BatchOperation(\n batch_identifier=batch_identifier,\n rollback_operation=RollbackOperation(),\n ),\n )\n logger.debug('Published rollback.')\n raise\n else:\n logger.debug('Attempting to publish commit of in progress transaction...')\n self.publish(\n batch_operation=BatchOperation(\n batch_identifier=batch_identifier,\n commit_operation=CommitOperation(),\n ),\n )\n logger.debug('Published commit.')", "def _create_jdbc_producer_pipeline(pipeline_builder, pipeline_title, raw_data, table_name, operation):\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data)\n\n record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')\n\n FIELD_MAPPINGS = [dict(field='/id', columnName='id'),\n dict(field='/name', columnName='name')]\n jdbc_producer = pipeline_builder.add_stage('JDBC Producer')\n jdbc_producer.set_attributes(default_operation=operation,\n table_name=table_name,\n field_to_column_mapping=FIELD_MAPPINGS,\n stage_on_record_error='STOP_PIPELINE')\n\n trash = pipeline_builder.add_stage('Trash')\n dev_raw_data_source >> record_deduplicator >> jdbc_producer\n record_deduplicator >> trash\n\n return pipeline_builder.build(title=pipeline_title)", "def concat(cls, pipe1, pipe2):\n # pylint: disable=protected-access\n if pipe1.dataset != pipe2.dataset and pipe1.dataset is not None and pipe2.dataset is not None:\n raise ValueError(\"Cannot add pipelines with different datasets\")\n\n new_p1 = cls.from_pipeline(pipe1)\n new_p2 = cls.from_pipeline(pipe2)\n new_p1._action_list += new_p2._action_list[:]\n new_p1._variables = {**pipe1._variables, **pipe2._variables}\n new_p1.dataset = pipe1.dataset or pipe2.dataset\n return new_p1", "def make_query(self, query):\n return Transaction(self, query)", "def PipeLine(*funcs, **kwargs):\n def wrapper(*data):\n if len(funcs) == 1:\n combinedArgs = data + kwargs.get(funcs[-1].__name__, tuple())\n return funcs[-1](combinedArgs)\n else:\n combinedArgs = kwargs.get(funcs[-1].__name__, tuple())\n if combinedArgs != ():\n del kwargs[funcs[-1].__name__]\n return funcs[-1](PipeLine(*funcs[:-1], **kwargs)(*data), *combinedArgs)\n return wrapper", "def __enter__(self):\n # mark the beginning of a transaction\n self.execute(*self.sql.transaction())\n # and hand me back to the caller\n return self", "def create_pipeline(self, primitives, hyperparameters=None):\n\n self.primitive = self.check_path(primitives)\n\n if hyperparameters is not None:\n hyperparameters = self.check_path_hyperparameters(hyperparameters)\n pipeline = MLPipeline(self.primitive, hyperparameters)\n else:\n pipeline = MLPipeline(self.primitive)\n return pipeline", "def transaction(self, transaction=None):\n if self._transaction is None:\n if transaction:\n self._transaction = transaction\n return self._transaction\n else:\n raise TransactionNotStartedError(\"Transaction not yet started!\")\n else:\n if transaction and transaction != self._transaction:\n raise TransactionAlreadyStartedError(\"Transaction already started, cannot set!\")\n return self._transaction", "def transport(self) -> PipelineServiceTransport:\n return self._client.transport", "def _command_wrapper(self, cls):\n def command_send(func, args):\n self._con.send(func(*args))\n result = self._recv()\n if len(result) == 0: # Reconnect if no results were received\n self._con.connect()\n self._recv()\n return command_send(func, args)\n parsed_result = Parser.parse(result)\n if not parsed_result:\n raise MPDCommandError(\"Wrong command usage or insufficient permissions: {}\".format(result))\n return parsed_result\n\n class Wrapper(cls):\n \"\"\"\n The actual class that wraps Commands\n It generates new functions to each of Commands attributes, and all it does is to wrap\n Commands attributes with command_send.\n \"\"\"\n def __init__(self):\n pass\n\n def __getattr__(self, item):\n not_wrapped = cls.__getattr__(item)\n\n def f(*args):\n return command_send(not_wrapped, args)\n\n f.__doc__ = not_wrapped.__doc__\n f.__name__ = not_wrapped.__name__\n return f\n\n return Wrapper()", "def _get_pipeline(self, params_dict):\n p = Pipeline(steps=[('normalise', StandardScaler()),\n ('add_noise', NoiseAdder()),\n ('dim_reduce', PCA()),\n ('cluster', KMeans())])\n p.set_params(**params_dict)\n return p", "def startTransaction(self):\n if self._transaction is None:\n d = self._config.startTxn()\n\n def processTxn(result):\n self._transaction = result\n return self._transaction\n\n d.addCallback(processTxn)\n return d\n else:\n raise TransactionAlreadyStartedError(\"Transaction already started. Call commit or rollback to close it\")", "def run(self):\n pipeline = set_pipeline()\n pipeline.fit(self.X_train, self.y_train)\n return pipeline", "def get_pipeline(features, to_matrix=True):\n feature_names = []\n for feature in features:\n feature_names += feature[1].FEATS\n if to_matrix:\n return Pipeline(features + [('transform', ToMatrix(features=feature_names)), ('norm', MinMaxScaler())])\n else:\n return Pipeline(features)", "def create_delivery_pipeline(\n self,\n ) -> Callable[\n [cloud_deploy.CreateDeliveryPipelineRequest], operations_pb2.Operation\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"create_delivery_pipeline\" not in self._stubs:\n self._stubs[\"create_delivery_pipeline\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.deploy.v1.CloudDeploy/CreateDeliveryPipeline\",\n request_serializer=cloud_deploy.CreateDeliveryPipelineRequest.serialize,\n response_deserializer=operations_pb2.Operation.FromString,\n )\n return self._stubs[\"create_delivery_pipeline\"]", "def atomic_transaction(\n using: Union[str, Sequence[str]], savepoint: bool = True\n) -> Union[transaction.Atomic, ExitStack]:\n if isinstance(using, str):\n return transaction.atomic(using=using, savepoint=savepoint)\n\n stack = ExitStack()\n # dict.fromkeys -> deduplicate while preserving order\n for db in dict.fromkeys(using):\n stack.enter_context(transaction.atomic(using=db, savepoint=savepoint))\n return stack", "def create(self, params):\n return self.make_client_call('create_pipeline', params)", "def transaction(self, transaction):\n # Allow for a list of blocks..\n transaction = utils.request_type(transaction)\n\n res = r.get(self.url + self.tx_info + str(transaction))\n return self.execute(res)", "def __init__(self, pipeline):\n self._jobs = []\n self._active_jobs = []\n\n self._threads = []\n self._thread_index = {}\n self._thread_id = 1\n\n\n self.local_backend = Local()\n self.backend = None\n\n self.pipeline = pipeline", "def tx_start(self, concurrency: TransactionConcurrency = TransactionConcurrency.PESSIMISTIC,\n isolation: TransactionIsolation = TransactionIsolation.REPEATABLE_READ,\n timeout: int = 0, label: Optional[str] = None) -> 'AioTransaction':\n if sys.version_info < (3, 7):\n raise NotSupportedError(f\"Transactions are not supported in async client on current python {sys.version}\")\n return AioTransaction(self, concurrency, isolation, timeout, label)", "def pipe(self, func: Callable, *args, **kwargs) -> Any:\n return func(self, *args, **kwargs)", "def __init__(self, buffer_size=1000):\r\n\r\n super(Pipe, self).__init__()\r\n self.buffer_size = buffer_size\r\n\r\n # Should it be deque or array?\r\n self.staging_buffer = []\r\n self._ready_buffer = None\r\n\r\n self._done_sending = False\r\n self._done_receiving = False\r\n self._closed = False\r\n\r\n # Taken from Python Queue implementation:\r\n\r\n # mutex must beheld whenever the queue is mutating. All methods\r\n # that acquire mutex must release it before returning. mutex\r\n # is shared between the three conditions, so acquiring and\r\n # releasing the conditions also acquires and releases mutex.\r\n self.mutex = threading.Lock()\r\n # Notify not_empty whenever an item is added to the queue; a\r\n # thread waiting to get is notified then.\r\n self.not_empty = threading.Condition(self.mutex)\r\n # Notify not_full whenever an item is removed from the queue;\r\n # a thread waiting to put is notified then.\r\n self.not_full = threading.Condition(self.mutex)", "def _make_pipeline(preprocessors, classifier):\n if isinstance(preprocessors, list):\n # support only preprocessing of lenght 2\n return make_pipeline(preprocessors[0], preprocessors[1], classifier)\n if preprocessors is None:\n return make_pipeline(classifier)\n\n return make_pipeline(preprocessors, classifier)", "def Transaction(db):\n def wrapper(f):\n def transaction_wrapper(*args, **kwargs):\n tx = db.beginTx()\n \n try: args[0].transaction = tx\n except: pass\n \n result = f(*args, **kwargs)\n tx.success()\n tx.close()\n return result\n return transaction_wrapper\n return wrapper", "def transaction(self):\n copy = self.copy()\n try:\n yield copy\n except TransactionRollback:\n del copy\n else:\n self.update(copy)", "def _generate_pipeline(self, job):\n # Generate actions (one per job) and resources\n resources = self._generate_job_resources(job)\n action = self._generate_job_action(job)\n\n pipeline = {\n # Ordered list of actions to execute\n \"actions\": [action],\n # resources required for execution\n \"resources\": resources,\n # Technical question - difference between resource and action environment\n # For now we will set them to be the same.\n \"environment\": self._generate_environment(),\n }\n\n # \"timeout\": string in seconds (3.5s) is not included (defaults to 7 days)\n return pipeline", "def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:\n\n if isinstance(func, tuple):\n func, target = func\n if target in kwargs:\n raise ValueError(\"%s is both the pipe target and a keyword \" \"argument\" % target)\n kwargs[target] = self\n return func(*args, **kwargs)\n else:\n return func(self, *args, **kwargs)", "def make_pipeline(context):\n \n # Base universe set to the Q1500US\n base_universe = Q500US()\n \n #Get all industry codes\n industry=morningstar.asset_classification.morningstar_industry_code.latest\n #Get all sector codes\n sector = Sector()\n \n # Create filters (to be used as masks) of different industries/sectors \n # This is the mask that should exclude the most stocks. \n # Note that these may need to be even further filtered to exclude securities outside of a \n # similar range of volumes/size. For instance, the defense sector stock provides stocks as large as # LMT but also small defense companies. Although this shouldn't matter due to the second filter of \n # crosscorrelation, this may be unnecassary computational expense. \n pipe=Pipeline()\n #Below forms a \"sentiment screen\" that takes only stocks that have been rated a certain number of times and of those ratings there are at least 2.85 times as many bull scored messages as there are bear scored messages. \n pipe.add(st.bull_scored_messages .latest, 'bull_scored_messages')\n pipe.add(st.bear_scored_messages .latest, 'bear_scored_messages')\n sentimentScreen=(((st.bull_scored_messages.latest) > (context.Sentiment_multiplier*st.bear_scored_messages.latest)) & (st.bear_scored_messages.latest > 5))\n \n dFilt=sector.eq(310) #Indicates aerospace/defense sector\n dFilt2=industry.eq(31052107) #Indicates aerospace/defense industry\n tFilt=sector.eq(311) #Indicates consumer electronics sector\n tFilt2=industry.eq(31167138) #Indicates consumer electronics industry \n cFilt=sector.eq(101) #Chemical sector\n cFilt2=industry.eq(10103003)\n aFilt=sector.eq(102)\n aFilt2=industry.eq(10209017) #Auto manufacturing industry\n depFilt2=industry.eq(10217034) #Department store industry\n #dFilt2,tFilt2,cFilt2,aFilt2=True,True,True,True #Remove industry requirement\n defenseFilt= dFilt & dFilt2 #Combination of filters\n techFilt= tFilt & tFilt2\n chemFilt = cFilt & cFilt2 \n autoFilt = aFilt & aFilt2 \n tradable=base_universe & (defenseFilt | techFilt | chemFilt | autoFilt | depFilt2) & sentimentScreen\n \n \n pipe.set_screen(tradable)\n pipe.add(defenseFilt,'defenseFilt')\n pipe.add(techFilt,'techFilt')\n pipe.add(chemFilt,'chemFilt')\n pipe.add(autoFilt,'autoFilt')\n pipe.add(depFilt2,'depFilt')\n \n \n \n #TODO: May also want to return stock sentiment data and further filter tuple couples by only accepting couples with sentiment data in a similar range (further attributing to the validity of the calculated cross-correlation)\n \n return pipe", "def _chain(self):\n obj = self._clone()\n if obj._sticky_filter:\n obj.query.filter_is_sticky = True\n obj._sticky_filter = False\n return obj", "def build_own_pipeline() -> Pipeline:\n nn_pipeline = None\n\n nn_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', MLPClassifier()) \n ])\n \n return nn_pipeline", "def pipe(*args):\n # Bail out early if there is only one item\n if len(args) == 1:\n return Graph(args)\n\n graph = Graph()\n graph._pipe(args)\n return graph", "def batch(self, sql):\n return _Batch(self.conn, sql)" ]
[ "0.6506651", "0.6506439", "0.60975367", "0.5716096", "0.5687337", "0.5610756", "0.5471087", "0.5375021", "0.5373202", "0.5361515", "0.53164816", "0.52698827", "0.5247132", "0.5190451", "0.5172265", "0.51619154", "0.5102271", "0.5086458", "0.5023004", "0.5023004", "0.49856988", "0.4985457", "0.4974305", "0.49494478", "0.49182144", "0.4911432", "0.49040595", "0.4904034", "0.48949367", "0.48897997", "0.4886517", "0.4873344", "0.48349896", "0.48337016", "0.48255497", "0.48169094", "0.47895864", "0.47777057", "0.47681892", "0.47524166", "0.4748498", "0.47286782", "0.4718476", "0.47134304", "0.47042784", "0.46963537", "0.46903032", "0.4666359", "0.46634302", "0.4659428", "0.46508995", "0.46470875", "0.4645642", "0.46384785", "0.46229726", "0.46114674", "0.46066803", "0.46031973", "0.45847562", "0.457893", "0.45733142", "0.4572232", "0.45706818", "0.45663235", "0.4558362", "0.45577848", "0.45434898", "0.45319825", "0.4530983", "0.45285374", "0.45196104", "0.45091265", "0.4504268", "0.44977438", "0.44947407", "0.44848177", "0.44816828", "0.4480179", "0.44773617", "0.4466436", "0.4462741", "0.44608557", "0.44525436", "0.4451079", "0.44391277", "0.44385275", "0.4424819", "0.44225004", "0.43969536", "0.43923312", "0.43901205", "0.43891522", "0.43860486", "0.4380133", "0.43714172", "0.43605348", "0.43585038", "0.43573797", "0.4351774", "0.43449047" ]
0.7290739
0
This will call the parent class to validate the connection and initialize the values
def __init__(self, query_params=None, equipment=None, module=None): super().__init__() self.equipment = equipment self.module = module self.query_params = query_params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_connection(self, connection):", "def __init__(self):\n\n\t\tself.connection = self.get_connection()", "def __init__(self, connection):\n self.conn = connection", "def __init__(self, connection):\n\n self._conn = connection", "def init(self, userdata, conn):\r\n pass", "def __init__(self, connection):\n super().__init__(connection)", "def __init__(self):\n self.conn = psycopg2.connect(dbname=DB, user=DB_USER, password=DB_PW, host=HOST, port=PORT)\n self.categories = self.fill_category()\n self.fill_products()", "def __init__(self):\n self._connection = get_db_connection()", "def __init__(self):\n\t\tself.obtainDatabaseConnection()", "def __init__(self, input_parameters):\n # If anything is not valid, an error should be raised here.\n self.valid_inputs = input_parameters_are_valid(input_parameters)\n\n # Set parameters\n self.database_location = input_parameters['database_location']\n self.overwrite = input_parameters['overwrite']\n self.connection_list = input_parameters['connection_list']\n self.timeout = input_parameters['timeout']\n self.connection_type = input_parameters['connection_type']\n\n # Define our components\n self.database = None\n self.interpreter = None\n self.connector = None\n self.connection = None", "def __init__(self):\n self.dbconnect = dbConnection.connection", "def initialize(self):\n self._validate_client_objects()\n for execution_type in self.clients:\n # check for valid connection is done in _validate_client_objects()\n _ = self.clients[execution_type].connection # Unused", "def __init__(self):\n self.try_to_connect()", "def __init__(self):\n # create a connection through our super role via db.connect\n try:\n self.connection = db.connect(SUPER_ROLE, authcode=SUPER_AUTHCODE, host=HOST)\n except db.OperationalError: # thrown if password or role don't match\n print 'Caught an exception while trying to log in, maybe your account does not exist yet?'\n exit()\n \n # get a DictCursor as our cursor (which returns queries as column-name dicts)\n self.cursor = self.connection.cursor(DictCursor)\n \n self.setup_tables()", "def __init__(self,schema_name = 'null'):\n\t\tself.connected = False\n\t\tself.__schema_name = ''\n\t\tself.__db = ''\n\t\tself.__cursor = ''\n\t\tself.__engine = ''\n\t\tif schema_name != 'null':\n\t\t\tself.connect(schema_name)", "def __init__(self):\r\n self.conn = create_connection(DATABASE_PATH)", "def __init__(self, user, password, database='mesomat', host='localhost'): \n \n \n self.config = {\n 'user' : user,\n 'password' : password,\n 'host' : host,\n 'database' : database,\n 'raise_on_warnings' : True,\n 'auth_plugin' : 'mysql_native_password'\n }\n \n self.INSERT_SAMPLE_COLUMN_COMMAND = ()\n \n \n self.connected = False\n self.cursor = None\n self.cnx = None", "def __init__(self):\n self.dbcon = DbConnection.get_con()", "def initialize(self):\r\n if not self.context:\r\n self.context = SQLContext(self.url, self.connection, self.schema)\r\n if self.table is None:\r\n self.table = self.context.table(self.table_name)\r\n if not self.fields:\r\n self.read_fields()\r\n self.field_names = self.fields.names()", "def __init__(self):\n\n # TODO: Add login and data grab logic", "def __init__(self):\n super(CSC100DB, self).__init__()\n try:\n self.conn = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"csc100\",\n database=\"azwh\"\n )\n self.cursor = self.conn.cursor()\n\n except Exception as e:\n print(\"Error:\", e)", "def __init__(self, connection):\n self.con = connection\n self.recordset = None\n self.recordset_df = None", "def __init__(self, *args):\n _table.Connection_swiginit(self, _table.new_Connection(*args))", "def __init__(self):\n self.__connection = pymysql.connect(host=vars.host,\n user = vars.username,\n password = vars.password,\n db = vars.db,\n charset = \"utf8mb4\",\n cursorclass = pymysql.cursors.DictCursor\n )", "def __init__(self, msg):\n\n super(DBConnectionError, self).__init__(msg)\n self.msg = msg", "def __init__(self, dbconnect):\n self.dbconnect = dbconnect", "def __init__(self):\n self.connection = DbConnector()\n self.db_connection = self.connection.db_connection\n self.cursor = self.connection.cursor\n\n self.ACTIVITY_ID = 1\n self.TRACKPOINT_ID = 1", "def __init__(self, conf):\n self.conf = conf\n self._conn = None", "def __init__(__self__, *,\n database: pulumi.Input[str],\n host: pulumi.Input[str],\n port: pulumi.Input[float]):\n pulumi.set(__self__, \"database\", database)\n pulumi.set(__self__, \"host\", host)\n pulumi.set(__self__, \"port\", port)", "def __init__(self):\n\n try:\n self.dbConnection = mysql.connector.connect(\n host=\"jack-test-db.cq0gc7w0rwke.us-east-1.rds.amazonaws.com\",\n user=\"Javk5pakfa\",\n password=\"GoJack123CU!\",\n database=\"4620Test\")\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:\n print(\"Something is wrong with your user name or password\")\n elif err.errno == errorcode.ER_BAD_DB_ERROR:\n print(\"Database does not exist\")\n else:\n ErrorMessageWindow(err)\n self.dbCursor = self.dbConnection.cursor()", "def __init__(self):\n # instantiate the class Orm\n self.new_orm = orm.Orm()\n\n # read the file connection.yml\n with open('connection.yml', 'r') as file:\n self.info = file.read().split()\n\n # connection at MySQL with data of connection.yml file and creation cursor\n self.data_base = mysql.connector.connect(user=self.info[0], password=self.info[1],\n host=self.info[2])\n self.cursor = self.data_base.cursor()", "def __init__(self):\n with open('config.json') as config:\n data = json.load(config)\n\n password = self.decode_password(data['db']['password'])\n db_conn_string = 'postgresql://' + data['db']['username'] + ':' + password + '@' + \\\n data['db']['hostname'] + ':' + data['db']['port'] + '/' + data['db']['database']\n\n self.engine = create_engine(db_conn_string)\n try:\n conn = self.engine.connect()\n if conn is not None:\n print(\"-I- Successful Database Connection\")\n except Exception as e:\n print(\"-W- \" + str(e))", "def __init__(self, error_msg):\n super(ConnectionException, self).__init__(error_msg)", "def __init__(\n self,\n host: str,\n database: str,\n user: str,\n password: str\n ) -> None:\n self.host: str = host\n self.database: str = database\n self.user: str = user\n self.password: str = password\n\n # all these values are needed\n for attr in ('host', 'database', 'user', 'password'):\n if not getattr(self, attr):\n raise ValueError(f\"{attr} must not be None!\")", "def __init__(self, *args, **kwargs):\n\n # Set alternative connection ID if specified\n self.conn_id = None\n if \"conn_id\" in kwargs:\n cid_value = kwargs[\"conn_id\"]\n if type(cid_value) is str:\n self.conn_id = cid_value\n\n super().__init__(*args, **kwargs)", "def __post_init__(self):\n self.dbase = databases.Database(\n self.dsn,\n min_size=self.min_size,\n max_size=self.max_size\n )\n self.engine, self.meta = self.get_engine_metadata()", "def initialize(self):\n if not self.connection.is_closed():\n self.connection.close()\n\n self.connection.connect()", "def init(self, host, port, dbname, user, password,\n dbtype='mysql', table='status_log'):\n \n # DB Type\n if (dbtype):\n self.dbtype = dbtype\n \n if (dbtype == 'mysql'):\n self.timestamp = StatusLog.TIMESTAMP_MYSQL\n else:\n logging.error(\"Unknown dbtype: {}\".format(dbtype))\n \n # Table\n if (table):\n self.table = table\n \n # Host\n if (host):\n self.host = host\n else:\n logging.error('host is not defined')\n \n # Port\n if (port):\n self.port = port\n else:\n logging.error('port is not defined')\n \n # Database Name\n if (dbname):\n self.dbname = dbname\n else:\n logging.error('dbname is not defined')\n \n # User\n if (user):\n self.user = user\n else:\n logging.error('user is not defined')\n \n # Password\n if (password):\n self.password = password\n else:\n logging.error('password is missing')\n \n self.initialized = True", "def __init__(self):\n self.data = None\n self.conn = None\n self.database = None\n self.table = None\n self.manage = None\n self.limiting = 0", "def __init__(self, host, user, password, base):\n global con\n\n try:\n con = _mysql.connect(host, user, password, base)\n \n print \"Polaczono z baza danych.\"\n \n except _mysql.Error, e:\n \n print \"Blad %d: %s\" % (e.args[0], e.args[1])\n sys.exit(1)", "def _setup_connection(self):\r\n if self.connection.password:\r\n if not self.format_inline('AUTH', self.connection.password):\r\n raise AuthenticationError(\"Invalid Password\")\r\n self.format_inline('SELECT', self.connection.db)", "def __init__(self):\n default_config = Config()\n query = Query(default_config)\n database = Database(default_config)\n common_util = CommonUtil(default_config, database)\n self.config = default_config\n self.query = query\n self.database = database\n self.common_util = common_util", "def __init__ (self, host, database, user, passwd, dry_run = False):\n self.user , self.passwd = user, passwd\n self.host = host\n self.db = database\n #~ self.delimiter = ','\n self.sql = \"\"\n self.table = []\n self.testing = dry_run", "def __init__(self, host=\"\", user=\"\", password=\"\", database=\"\"):\n\n try:\n self.connection = mysql.connector.connect(\n host=host,\n user=user,\n password=password,\n database=database\n )\n self.cursor = self.connection.cursor(buffered=True)\n\n except Exception as e:\n print(e)", "def connect(self, dbapi_connection, connection_record):", "def __init__(self, db_api_conn, operation, types=None):\n self.conn = db_api_conn\n self.cursor = db_api_conn.cursor()\n self.operation = operation\n if types is not None:\n self.cursor.setinputsizes(*types)", "def __init__(self):\n\t\tConnectorMySQL.__init__(self)", "def __setup__(cls):\n super(Instance, cls).__setup__()\n cls._sql_constraints += [\n (\n 'unique_url', 'UNIQUE(url)',\n 'URL of an instance must be unique'\n )\n ]\n cls._error_messages.update({\n \"connection_error\": \"Incorrect API Settings! \\n\"\n \"Please check and correct the API settings on instance.\",\n \"multiple_instances\": 'Selected operation can be done only for one'\n ' instance at a time',\n })\n cls._buttons.update({\n 'test_connection': {},\n 'import_websites': {},\n 'import_order_states': {},\n 'import_carriers': {},\n })", "def __init__(self, host, port, user, passwd, db):\n self.host = host\n self.port = port\n self.user = user\n self.passwd = passwd\n self.db = db\n self.cursor = None\n self.database = None", "def init(self):\n return self.conn.init()", "def init_connection_state(self):\r\n # if 'mars connection=true' in self.__connection_string.lower():\r\n # # Issue #41 - Cannot use MARS with savepoints\r\n # self.features.uses_savepoints = False\r\n # cache the properties on the connection\r\n self.connection.adoConnProperties = dict([(x.Name, x.Value) for x in self.connection.adoConn.Properties])\r\n\r\n unsupported_sql = False\r\n if self.is_sql2000(make_connection=False):\r\n # SQL 2000 doesn't support the OUTPUT clause\r\n self.features.can_return_id_from_insert = False\r\n unsupported_sql = True\r\n elif self.is_sql2005(make_connection=False):\r\n unsupported_sql = True\r\n\r\n if unsupported_sql:\r\n warnings.warn(\r\n \"This version of MS SQL server is no longer tested with \"\r\n \"django-mssql and not officially supported/maintained.\",\r\n DeprecationWarning)", "def __init__(self, credentials=None, connection=None):\n if credentials is not None:\n self.connection = pymysql_driver.connect(\n user=credentials['user'],\n password=credentials['password'],\n host=credentials['host'],\n database=credentials['database'],\n autocommit=False,\n connect_timeout=2,\n cursorclass=pymysql_driver.cursors.DictCursor\n )\n elif connection is not None:\n self.connection = connection\n else:\n raise ValueError(\"Must provide either the database credentials or connection object\")", "def init(self):\n self.conn = None\n\n return True", "def _check_connection(self):\n if \"_connection\" not in self.__dict__:\n message = \"use connect method before doing operation on this database\"\n raise Exception(message)", "def __open(self):\n\n try:\n cnx = mysql.connector.connect(\n host=self.__host,\n user=self.__user,\n password=self.__password,\n database=self.__database\n )\n self.__connection = cnx\n self.__cursor = cnx.cursor(buffered=True, dictionary=True)\n\n except mysql.connector.Error as err:\n print(\"Something went wrong: {}\".format(err))", "def __setup_conn__(self, **kwargs):\n self.ext_conn = setup_conn(**kwargs)", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if len(args):\n # args[0] is a connection object\n\n try:\n self._host = args[0].get_phos_host()\n self._port = args[0].get_phos_port()\n except AttributeError:\n # Not a Phos connection object. Too bad.\n pass\n logging.getLogger(\"pyhive.hive\").setLevel(logging.WARN)\n logging.getLogger(\"requests.packages.urllib3.connectionpool\").setLevel(logging.WARN)", "def __init__(self):\n try:\n self._chat_db = sqlite3.connect(CHAT_DB_PATH)\n except OperationalError:\n print(\"Cannot access chat database.\\nGo to Settings->Security and Privacy->Privacy->Full Disk Access.\\n\"\n \"Give access to the application you are running and restart the program.\")\n sys.exit(1)\n\n self._contacts = Contacts(self._chat_db).get_contacts_df()\n\n try:\n self._message_db = sqlite3.connect(WEEKLY_MESSAGES_DB_PATH)\n except OperationalError:\n print(\"Could not connect to the database server.\")\n sys.exit(1)", "def __init__(self) -> None:\n settings = get_project_settings()\n self.db = pymysql.connect(\n host=settings['MYSQL_SERVER'],\n port=settings['MYSQL_PORT'],\n user=settings['MYSQL_USERNAME'],\n password=settings['MYSQL_PASSWORD'],\n db=settings['MYSQL_DB']\n ) \n self.cursor = self.db.cursor()", "def __init__(self):\n dotenv_vals = dotenv_values()\n if dotenv_vals.__len__() == 0:\n print('You have not provided a .env file with address to database'\n '. Program will attempt to use localhost:27017')\n self.user_name = ''\n self.password = ''\n self.host = ''\n try:\n self.user_name = dotenv_vals['PARGO_USER']\n self.password = dotenv_vals['PARGO_PASSWORD']\n self.host = dotenv_vals['PARGO_HOST'] # ip and port\n except KeyError:\n try:\n self.host = dotenv_vals['PARGO_HOST']\n print('You provided only host info to database in .env'\n ' .Program will attempt to use address.')\n except KeyError:\n pass\n if self.user_name and self.password:\n self.user_name = quote_plus(self.user_name)\n self.password = quote_plus(self.password)\n self.db_name = None # would not be none during affirm_client call", "def __init__(self, dbname, host, readport):\n self.host = host\n self.readport = readport\n self.dbname = dbname\n self.readsets = [] # pre-defined sets of readouts for negotiating bulk transfers", "def __init__(self, host, port, user, password, db_name, **kwargs):\n super().__init__()\n self.host = host\n self.port = port\n self.user = user\n self.password = password\n self.db_name = db_name\n self.schema = kwargs.get('schema', None)\n\n self.kwargs = {}\n\n for key in kwargs.keys():\n if key != 'schema':\n self.kwargs[key] = kwargs.get(key)", "def __init__(self):\n self.__verbose=False\n self.__fake=False\n self.__all=False\n self.__detector=''\n self.__authpath='.'\n self.__connect=''\n self.__registry={}\n self.__basedir=''\n self.__dbconfig=''", "def __init__(self, connection, user):\n super().__init__(connection)\n self.user = user", "def __init__(self):\n self.client_id = None\n self.bridge_config = {}\n self.bridge_config_answer_status = None", "def __init__(self):\n\t\tself.pcpConn = PCP_CONNECTION()\n\t\tself.errMsg = None\n\t\tself.connState = ConnStateType.NOT_CONNECTED\n\t\tself.pcpResInfo = None\n\t\tself.Pfdebug = None", "def init_pre_connection(self):\n\n if \"mysql\" in PyFunceble.INTERN:\n self.__dict__.update(PyFunceble.INTERN[\"mysql\"].copy())\n\n if self.authorized and not self.pre_initiated:\n for (description, data) in self.variables.items():\n environment_var = PyFunceble.helpers.EnvironmentVariable(data[\"env\"])\n if environment_var.exists():\n setattr(\n self, \"_{0}\".format(description), environment_var.get_value(),\n )\n else:\n message = \"[MySQL/MariaDB] Please give us your DB {0} ({1}): \".format(\n description.capitalize(), repr(data[\"default\"])\n )\n\n if description != \"password\":\n user_input = input(message)\n else:\n user_input = getpass(message)\n\n if user_input:\n setattr(self, \"_{0}\".format(description), user_input)\n self.env_content[data[\"env\"]] = user_input\n else:\n setattr(self, \"_{0}\".format(description), data[\"default\"])\n self.env_content[data[\"env\"]] = data[\"default\"]\n\n # pylint: disable = attribute-defined-outside-init\n self._port = int(self._port)\n self.save_to_env_file(self.env_content, self.pyfunceble_env_location)\n self.pre_initiated = True", "def __init__(self, connectionParams) :\n self.ssh = None\n self.connected = False\n self.connObj = connectionParams", "def setup(cls):\n super().setup()\n cls.db = DBCommunication()", "def __init__(self, connection_str: str = None, **kwargs):\n self._ip = get_ipython()\n self._debug = kwargs.get(\"debug\", False)\n super().__init__()\n\n self.formatters = {\"datetime\": self._format_datetime, \"list\": self._format_list}\n self._loaded = self._is_kqlmagic_loaded()\n\n if not self._loaded:\n self._load_kql_magic()\n\n self._schema: Dict[str, Any] = {}\n\n if connection_str:\n self.current_connection = connection_str\n self.connect(connection_str)", "def __init__(self, constr = None):\n self.con = None\n self._outer_con = False\n self._name = None\n self._password = None\n self._host = None\n self._port = None\n self._sid = None\n self._con_type = None\n self.events = {}\n self.table_map = None\n self.prev_description = None\n\n if isinstance(constr, DbiConnection):\n self.con = constr.get_raw_con()\n self._con_type = \"outer\"\n\n elif isinstance(constr, (str, unicode)):\n self.con = cx_Oracle.Connection(constr)\n self._con_type = \"string\"\n\n elif isinstance(constr, dict):\n constr = create_connection_string(constr)\n self.con = cx_Oracle.Connection(constr)\n self._con_type = \"dict\"\n\n elif isinstance(constr, cx_Oracle.Connection):\n self.con = constr\n self._con_type = \"raw\" # не возможности получить pass\n #can't get password\n #and perform some other operations!\n #does need support ?\n else:\n raise DbiException('Unsupported types of DatabaseConnection parameters')\n self._init_con_parts()", "def __init__(self):\n self.database = Database()\n self.load_config()", "def __init__(self, host, database, user, password, table, update_id, **cnx_kwargs):\n if ':' in host:\n self.host, self.port = host.split(':')\n self.port = int(self.port)\n else:\n self.host = host\n self.port = 3306\n self.database = database\n self.user = user\n self.password = password\n self.table = table\n self.update_id = update_id\n self.cnx_kwargs = cnx_kwargs", "def __init__(self, host, login, passwd, conn):\n # connection lock is used to lock all changes to the connection state attributes\n # (connection and last_error)\n self.connection_state_lock = threading.Lock()\n self.connection = None\n self.last_error = None\n\n # credentials\n self.host = host\n self.login = login\n self.passwd = passwd\n self.type = conn\n\n # connect\n self.connect()", "def initialize(self) -> None:\n # First, establish a connection to the specified database\n try:\n self._connect_to_db()\n except psycopg2.OperationalError: # specified database does not exist\n with psycopg2.connect(database=DATABASE_ENV[\"POSTGRES_DB\"],\n user=self.dbuser, password=self.dbpassword,\n host=self.dbhost, port=str(self.dbport)) as con:\n with con.cursor() as cur:\n con.autocommit = True # cannot create db inside a transaction\n cur.execute(f'CREATE DATABASE \"{self.dbname}\"')\n con.autocommit = False\n self._connect_to_db() # try again\n\n # Second, create the necessary database table, only if required\n with self._connection.cursor() as cur:\n cur.execute(f\"\"\"\n CREATE TABLE IF NOT EXISTS \"{self.MESSAGE_TABLE_NAME}\" (\n id SERIAL PRIMARY KEY,\n key CHAR(4) NOT NULL,\n value REAL NOT NULL,\n ts TIMESTAMP NOT NULL,\n tz TEXT NOT NULL\n );\n \"\"\")\n self._connection.commit()", "def __init__(self, UI, server, username, password, port=443):\n self.UI = UI\n self.server = server\n self.username = username\n self.password = password\n self.port = port\n super(SmarterConnection, self).__init__(host=server, user=username,\n pwd=password, port=port)", "def __init__(self,host,username,passsowrd,port=22):\r\n self.host=host\r\n self.username=username\r\n self.passsowrd=passsowrd\r\n self.port=port\r\n self.Status=True,None", "def __init__( self, conn, addr, server, version ):", "def initialize(self):\n raise NotImplementedError", "def initialize(self):\n raise NotImplementedError", "def initialize(self):\n raise NotImplementedError", "def __init__(self, hostname, username, password, database_name):\n self.hostname = hostname\n self.username = username\n self.password = password\n self.database_name = database_name\n \n self.db = MySQLdb.connect(host=self.hostname, user=self.username, passwd=self.password, db=self.database_name)", "def __init__(self, host, user, password, db):\n self.connection = pymysql.connect(\n host=host, user=user, password=password, db=db,\n cursorclass=pymysql.cursors.DictCursor)", "def __init__(self, dsn):\n\n self.dsn = dsn", "def setUp(self):\n # Set logging level\n logging.basicConfig(level=logging.DEBUG)\n\n # Ask for url & login information\n https_url = raw_input(\n \"\\nEnter the http url [default: https://imagen2.cea.fr/database/]: \")\n if not https_url:\n https_url = \"https://imagen2.cea.fr/database/\"\n login = raw_input(\"\\nEnter the login: \")\n password = getpass.getpass(\"Enter the password: \")\n\n # Create dummy rqls\n self.rql = (\"Any C, G Where X is Subject, X code_in_study C, \"\n \"X handedness 'ambidextrous', X gender G\")\n\n # HTTP test\n self.connection = CWInstanceConnection(https_url, login, password,\n realm=\"Imagen\")", "def __init__(self, mysql_url):\n\n # Store the url for future reference\n self.mysql_url = mysql_url\n # Report connection error only once\n self.report_connection_error = True\n\n # Parse MySQL URL and try to initialize a connection\n conn_kwargs = MySqlPipeLine.parse_mysql_url(mysql_url)\n self.dbpool = adbapi.ConnectionPool('MySQLdb',\n charset='utf8',\n use_unicode=True,\n connect_timeout=5,\n **conn_kwargs)", "def initialize(self):\n self.data = None\n self.errors = []", "def __init__(self, opt = {}, args = []):\n # Get warning and critic options\n opt_warning = self.DEFAULT_WARNING\n opt_critical = self.DEFAULT_CRITICAL\n if \"warning\" in opt:\n opt_warning = opt[\"warning\"]\n if \"critical\" in opt:\n opt_critical = opt[\"critical\"]\n\n self.warning = int(opt_warning)\n self.critical = int(opt_critical)\n\n # Connection parameters\n self.dsn = opt[\"connection\"]\n self.user = opt[\"user\"]\n self.password = opt[\"password\"]", "def __init__(self):\r\n self.connection = MySQLdb.connect(host=\"db4free.net\", db=\"academicsys\", user=\"desenvolvedores\", passwd=\"acesso\")\r\n self.cursor = self.connection.cursor()", "def __init__(__self__, *,\n description: Optional[pulumi.Input[str]] = None,\n status: Optional[pulumi.Input[Union[str, 'ConnectionStatus']]] = None):\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if status is not None:\n pulumi.set(__self__, \"status\", status)", "def __init__(self, app_database):\n try:\n self.database_configuration = app_database\n self.conn = None\n self.cursor = None\n except Exception as error:\n print(f\"DBCM::__init__::{error}\")", "def __init__(self, *auto_connect):\n # Establish database connection\n Model.mysql = self\n # Only connect if passed 4 parameters\n # TODO: Add more connection options for greater flexibility.\n if len(auto_connect) is 4: self.connect(*auto_connect)", "def __init__(self, sql_config = {}, verbose = True):\n\n self.sql_config = sql_config\n self.verbose = verbose\n self._setup()", "def __init__(self):\n\n self.tableConnString = os.environ['ENTITYTABLE_CONNECTIONSTRING'];\n self.__table = None", "def _validate_conn(self, conn):\n # Call the method on the base class\n super()._validate_conn(conn)\n\n # Set up TCP Keep Alive probes, this is the only line added to this function\n TCPKeepAliveValidationMethods.adjust_connection_socket(conn)", "def _validate_conn(self, conn):\n # Call the method on the base class\n super()._validate_conn(conn)\n\n # Set up TCP Keep Alive probes, this is the only line added to this function\n TCPKeepAliveValidationMethods.adjust_connection_socket(conn)", "def _init(self):\n raise NotImplementedError", "def __init__(self, *args, **kwargs):\n\n if len(args) == 1 or \"conn\" in kwargs:\n conn = args[0] if len(args) == 1 else kwargs[\"conn\"]\n\n if type(args[0]) is connections.Connection:\n self.conn = conn\n else:\n raise TypeError(\"connect() requires a MySQLdb.conn object\")\n\n elif len(args) == 4:\n self.conn = MySQLdb.connect(\n host=args[0],\n db=args[1],\n user=args[2],\n passwd=args[3])\n else:\n try:\n self.conn = MySQLdb.connect(\n host=kwargs[\"host\"],\n db=kwargs[\"db\"],\n user=kwargs[\"user\"],\n passwd=kwargs[\"passwd\"])\n except KeyError:\n raise TypeError(\"You must pass host, db, user, and passwd \" + \\\n \"to connect()\")\n\n self.cursor = self.conn.cursor()\n self.gene_cache = {}\n self.source_cache = {}", "def __init__(self, msg):\n\n super(DBValueError, self).__init__(msg)\n self.msg = msg", "def __init__(self, *args, **kwargs):\n self._initialize_protocols()\n super().__init__(*args, **kwargs)", "def __init__(self):\r\n assert isfile(DBClass.db_name), \"Database doesn't exists!\"\r\n\r\n self.conn = self.create_connection()\r\n self.cursor = self.conn.cursor()" ]
[ "0.74869585", "0.7371334", "0.7031037", "0.69948906", "0.6977147", "0.68673927", "0.68106675", "0.6798663", "0.6792512", "0.67900044", "0.6789289", "0.6721082", "0.66627276", "0.6661784", "0.66280216", "0.6615395", "0.6609552", "0.65898615", "0.658387", "0.65413547", "0.64912355", "0.6471431", "0.6462029", "0.6453578", "0.6452918", "0.64450574", "0.6422735", "0.642175", "0.64133096", "0.64049387", "0.64004326", "0.63957626", "0.6394294", "0.6390632", "0.63882387", "0.63832337", "0.6366104", "0.63585997", "0.63578963", "0.63575065", "0.63169533", "0.62881607", "0.6283927", "0.6281157", "0.62743163", "0.62719595", "0.62672365", "0.6264661", "0.6260341", "0.6255279", "0.6246467", "0.62387425", "0.6236431", "0.62355626", "0.62249464", "0.62207925", "0.6208205", "0.62080824", "0.61913645", "0.61886567", "0.61806464", "0.61792666", "0.6165688", "0.6159824", "0.61576533", "0.6150563", "0.6149805", "0.6148941", "0.6140158", "0.61307704", "0.6127757", "0.6122815", "0.6115716", "0.6114432", "0.6109703", "0.60886097", "0.6084728", "0.608387", "0.6075824", "0.6075824", "0.6075824", "0.6069948", "0.60688394", "0.60688287", "0.6068283", "0.60642123", "0.60631716", "0.6062265", "0.6059142", "0.60553026", "0.60520416", "0.60513484", "0.6047651", "0.6044219", "0.60435766", "0.60435766", "0.60428554", "0.6040946", "0.6039273", "0.603246", "0.6017379" ]
0.0
-1
This will return the graph data for the outage module
def get_outage(self): try: assert self._db_connection, { STATUS_KEY: HTTP_500_INTERNAL_SERVER_ERROR, MESSAGE_KEY: DB_ERROR} if self.equipment == COKE_DRUM_VALUE and self.module == OUTAGE_VALUE: """ This will return the graph data for the selected outage module """ query_params = { TAG_NAME_REQUEST: self.query_params.GET[TAG_NAME_REQUEST], START_DATE_REQUEST: self.query_params.GET[START_DATE_REQUEST], END_DATE_REQUEST: self.query_params.GET[END_DATE_REQUEST] } MODULE_LEVEL_MULTILINE_TAG = tuple(LIST_OF_OUTAGE_MODULE_LEVEL_MULTILINE_TAGS_GRAPH) if MULTILINE_REQUEST in self.query_params.GET: """ This will return the graph data for the actual and predicted tags for the selected outage module """ query_params[MULTILINE_REQUEST] = self.query_params.GET[MULTILINE_REQUEST] if query_params: if START_DATE_REQUEST not in query_params or not query_params[START_DATE_REQUEST] and \ MULTILINE_REQUEST not in query_params: graph_data = django_search_query_all( DETAILED_OUTAGE_GRAPH_NULL_START_DATE.format( self.module, query_params[TAG_NAME_REQUEST], query_params[END_DATE_REQUEST])) elif query_params[START_DATE_REQUEST] and MULTILINE_REQUEST not in query_params: graph_data = django_search_query_all( DETAILED_OUTAGE_GRAPH.format( self.module, query_params[TAG_NAME_REQUEST], query_params[START_DATE_REQUEST], query_params[END_DATE_REQUEST])) elif query_params[START_DATE_REQUEST] and query_params[MULTILINE_REQUEST]: if query_params[TAG_NAME_REQUEST] in LIST_OF_OUTAGE_MODULE_LEVEL_MULTILINE_TAGS_GRAPH: graph_data = django_search_query_all( DETAILED_OUTAGE_MODULE_MULTILINE_GRAPH.format( self.module, MODULE_LEVEL_MULTILINE_TAG, query_params[START_DATE_REQUEST], query_params[END_DATE_REQUEST])) else: graph_data = django_search_query_all( DETAILED_OUTAGE_GRAPH.format( self.module, query_params[TAG_NAME_REQUEST], query_params[START_DATE_REQUEST], query_params[END_DATE_REQUEST])) df_data = pd.DataFrame(graph_data) min_max = django_search_query_all( MIN_MAX_DATA.format( self.module, query_params[TAG_NAME_REQUEST] )) df_min_max_data = pd.DataFrame(min_max) graph = [] if not df_data.empty: df_data = df_data.where(pd.notnull(df_data) == True, None) df_data.sort_values(TIMESTAMP_KEY, ascending=True, inplace=True) df_unit = df_data[UNIT].iloc[0] df_description = df_data[DESCRIPTION].iloc[0] df_timestamp = list(dict.fromkeys(list(df_data[TIMESTAMP_KEY]))) if query_params[TAG_NAME_REQUEST] in LIST_OF_OUTAGE_MODULE_LEVEL_MULTILINE_TAGS_GRAPH: df_result = df_data.groupby(TAG_NAME_REQUEST) actual_north_data = [] predicted_north_data = [] actual_south_data = [] predicted_south_data = [] if len(df_result) == 2: df_description = \ df_data[df_data[TAG_NAME_REQUEST] == query_params[TAG_NAME_REQUEST]][ DESCRIPTION].iloc[0] df_north_actual = df_result.get_group(OUTAGE_MODULE_LEVEL_ACTUAL_TAG) actual_north_data = list(df_north_actual['north_drum_tag_value']) df_north_predicted = df_result.get_group(OUTAGE_MODULE_LEVEL_PREDICTED_TAG) predicted_north_data = list(df_north_predicted['north_drum_tag_value']) df_south_actual = df_result.get_group(OUTAGE_MODULE_LEVEL_ACTUAL_TAG) actual_south_data = list(df_south_actual['south_drum_tag_value']) df_south_predicted = df_result.get_group(OUTAGE_MODULE_LEVEL_PREDICTED_TAG) predicted_south_data = list(df_south_predicted['south_drum_tag_value']) elif len(df_result) == 1: if df_result[TAG_NAME_REQUEST] == OUTAGE_MODULE_LEVEL_ACTUAL_TAG: df_description = \ df_data[df_data[TAG_NAME_REQUEST] == OUTAGE_MODULE_LEVEL_ACTUAL_TAG][ DESCRIPTION].iloc[0] df_north_actual = df_result.get_group(OUTAGE_MODULE_LEVEL_ACTUAL_TAG) actual_north_data = list(df_north_actual['north_drum_tag_value']) df_south_actual = df_result.get_group(OUTAGE_MODULE_LEVEL_ACTUAL_TAG) actual_south_data = list(df_south_actual['south_drum_tag_value']) elif df_result[TAG_NAME_REQUEST] != OUTAGE_MODULE_LEVEL_ACTUAL_TAG: df_description = \ df_data[df_data[TAG_NAME_REQUEST] == OUTAGE_MODULE_LEVEL_PREDICTED_TAG][ DESCRIPTION].iloc[0] df_north_predicted = df_result.get_group(OUTAGE_MODULE_LEVEL_PREDICTED_TAG) predicted_north_data = list(df_north_predicted['north_drum_tag_value']) df_south_predicted = df_result.get_group(OUTAGE_MODULE_LEVEL_PREDICTED_TAG) predicted_south_data = list(df_south_predicted['south_drum_tag_value']) temp = {"north_actual": actual_north_data, "north_predicted": predicted_north_data, "south_actual": actual_south_data, "south_predicted": predicted_south_data, "x_axis": df_timestamp, "unit": df_unit, "description": df_description} else: temp = {"y_axis": list(df_data[TAG_VALUE]), "x_axis": df_timestamp, "unit": df_unit, "description": df_description} if not df_min_max_data.empty: temp["min_data"] = df_min_max_data[MIN_VALUE].iloc[0] temp["max_data"] = df_min_max_data[MAX_VALUE].iloc[0] else: temp["min_data"] = None temp["max_data"] = None graph.append(temp) return graph except AssertionError as e: log_error("Exception due to : %s" + str(e)) return asert_res(e) except Exception as e: log_error("Exception due to : %s" + str(e)) return json_InternalServerError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_outage_graph(request, equipment_name=None, module_name=None):\r\n query_params, obj = None, None\r\n try:\r\n\r\n query_params = request\r\n\r\n except:\r\n pass\r\n\r\n try:\r\n if request.method == GET_REQUEST:\r\n obj = OutageGraph(query_params, equipment_name, module_name)\r\n return obj.get_outage()\r\n\r\n log_debug(METHOD_NOT_ALLOWED)\r\n return JsonResponse({MESSAGE_KEY: METHOD_NOT_ALLOWED},\r\n status=HTTP_405_METHOD_NOT_ALLOWED)\r\n\r\n except Exception as e:\r\n\r\n excMsg = \"get_outage_graph_data API : \" + str(error_instance(e))\r\n\r\n return excMsg\r\n\r\n finally:\r\n\r\n if obj:\r\n del obj", "def get_graph(self) -> dict:\n response = requests.get(self.channel, params=\"get_graph\")\n return json_to_graph(response.content)", "def get_graph_summary(self):\n\n pass", "def _get_full_graph(self):", "def getGraph(self):\n\t\treturn self.graph", "def graphs(self):\n return self.__graphs", "def get_graphs_data_connection(self):\n return self.m_connection.graphs_data", "def get_graph(**options):\n graph = bonobo.Graph()\n\n split_dbs = bonobo.noop\n\n graph.add_chain(\n GetOrderXML(\n prefix=\"/etl/ivm\",\n glob=[\n 'Mozilla_Corporation{timestamp:%Y_%m_%d}*.xml'.format(\n timestamp=options['now'])\n ]),\n ParseDates(['Transactionlog_Tranenddatetime']),\n truncate_description,\n bonobo.UnpackItems(0),\n bonobo.Rename(\n transaction_date='Transactionlog_Tranenddatetime',\n item_number='Transactionlog_Itemnumber',\n transaction_id='Transactionlog_Tlid',\n item_description='Transactionlog_Itemdesc'),\n bonobo.Rename(\n user_id='Transactionlog_User',\n quantity='Transactionlog_Qty',\n transaction_code='Transactionlog_Transcode',\n description='Vendingmachines_Descr',\n ),\n split_dbs,\n _name=\"main\")\n\n #insert into ivm (description, transaction_id, item_number, item_description, user_id, quantity, transaction_date, transaction_code) values\n\n for engine in list(set(options['engine'])):\n graph.add_chain(\n bonobo_sqlalchemy.InsertOrUpdate(\n table_name=options['table_name'] + options['table_suffix'],\n discriminant=('transaction_id', ),\n engine=engine),\n _input=split_dbs)\n\n return graph", "def get_output_nodes(self):\n \n\n self.buildings = self.dataset.groups['buildings']\n self.building_nodes = self.buildings.groups['nodes']\n\n eta_output_added = getattr(self.building_nodes,'eta_output_added')\n uv_output_added = getattr(self.building_nodes,'uv_output_added')\n\n eta = []\n uv = []\n nodeIds = []\n time = []\n \n if(eta_output_added or uv_output_added ):\n time = self.building_nodes.variables['time'][:].tolist()\n nodeIds = self.building_nodes.variables['id'][:].tolist()\n if eta_output_added: eta = self.building_nodes.variables['eta'][:].tolist()\n if uv_output_added: uv = self.building_nodes.variables['uv'][:].tolist()\n\n \n return nodeIds,eta, uv, time", "def get_graph(self):\n return json.dumps(self.graph.get_edgelist(), separators=(',',':'))", "def graph(self):\n ...", "def dump_graph(self):\n # TODO\n return", "def getOutageHistory(self):\n return self._OutageHistory", "def get_graph_data(\n self, episode_queryset\n ):\n total = episode_queryset.count()\n count = 0\n result = {}\n\n if total == 0:\n amount = 0\n else:\n count = episode_queryset.annotate(\n subrecord_count=Count(self.subrecord_api_name)\n ).filter(subrecord_count=0).count()\n\n amount = round(float(count)/total, 3) * 100\n result[\"total\"] = total\n result[\"count\"] = count\n aggregate = [['None', amount]]\n links = {\"None\": self.to_link(\"None\")}\n result[\"graph_vals\"] = json.dumps(dict(\n aggregate=aggregate,\n subrecord=self.subrecord_api_name,\n links=links\n ))\n return result", "def data_graph():\n station_reference = request.args.get(\"stationReference\")\n station_name = request.args.get(\"stationName\")\n station_name = station_name.replace(\" \",\"+\")\n\n if station_name is not None:\n # station_data = station_data.replace(\" \", \"+\")\n station = station_data.loc[station_data.stationName == station_name]\n else:\n station = station_data.loc[station_data.stationReference == station_reference]\n result_station = station.iloc[0]\n\n # Get optional parameters\n time_from = request.args.get(\"from\")\n time_to = request.args.get(\"to\")\n if time_from:\n pass\n else:\n time_from = None\n if time_to:\n pass\n else:\n time_to = None\n # plot pic\n magic_trick= data.station_graph(result_station.stationName, time_from, time_to)\n # img_stream = io.BytesIO(img)\n # img = Image.open(img_stream)\n # imgByteArr = io.BytesIO()\n # img.save(imgByteArr,format='PNG')\n # imgByteArr = imgByteArr.getvalue()\n # return send_file(io.BytesIO(imgByteArr),\n # mimetype = 'image/png',\n # as_attachment = True,\n # attachment_filename = 'tmp.png')\n image_data = open(\"tmp.png\", \"rb\").read()\n response = make_response(image_data)\n response.headers['Content-Type'] = 'image/png'\n return response", "def graph(self):\n return self.__graph", "def get_graph(**options):\r\n graph = bonobo.Graph()\r\n graph.add_chain(get_stock_list,extract, process, load)\r\n\r\n return graph", "def buildGraph(self):\n return None", "def export_nodes(self):\n return ['lon', 'lat', 'speed', 'heading'], \\\n [{'speed': self.node_speed_limit[v],\n 'lon': self.node_locations[v][0],\n 'lat': self.node_locations[v][1],\n 'heading': self.node_heading[v]} for v in self.graph.vertices()]", "def getOutEdges(self):\n edges = []\n for edict in mm.G[self].values():\n for k in edict.keys():\n edges.append(edict.get(k).get(\"edge\"))\n \n return edges", "def gen_graph(self):", "def graph(self):\n return self._graph", "def graph(self):\n return self._graph", "def graph(self) -> dict:\n return self.flat_graph()", "def get_graph(self):\n return self._graph", "def export_json_graph(self, destpath):\n export = {}\n export['vertices'] = self.vertices\n export['edges'] = self.edges\n export['_totals'] = {}\n export['_photo'] = {}\n export['_photo']['credit'] = self.photo['credit']\n export['_photo']['entity_max'] = self.photo['max']\n export['_totals']['media'] = len(self.media)\n export['_totals']['wilds'] = len(self.wilds)\n export['_totals']['zoos'] = len(self.zoos)\n export['_totals']['locations'] = len(self.wilds) + len(self.zoos)\n export['_totals']['pandas'] = self.sum_pandas()\n export['_totals']['last_born'] = self.summary['birthday']\n export['_totals']['last_died'] = self.summary['death']\n with open(destpath, 'wb') as wfh:\n wfh.write(json.dumps(export, \n ensure_ascii=False,\n indent=4,\n sort_keys=True).encode('utf8'))\n print(\"Dataset exported: %d pandas at %d locations (%d wild, %d zoo)\"\n % (export['_totals']['pandas'], export['_totals']['locations'],\n export['_totals']['wilds'], export['_totals']['zoos']))", "def graph():\n # Try to get params request\n params = extract_variables(['start_time', 'end_time', 'sensor_id'], request)\n # Fetch data from database\n results = query_climate_range(**params)\n\n # Turn it in to lists which can be graphed\n dates = []\n humids = []\n temps = []\n pressures = []\n for result in results:\n dates.append(datetime.datetime.fromtimestamp(result['time']))\n humids.append(result['humid'])\n temps.append(result['temp'])\n pressures.append(result['pressure'])\n\n # Graph it\n fig = Figure()\n # First y axis (temp and humid)\n axis = fig.add_subplot(1, 1, 1)\n # Plot humidity and temp on the same scale\n axis.plot_date(dates, humids, '-', color=COLORS['blue'])\n axis.plot_date(dates, temps, '-', color=COLORS['red'])\n axis.xaxis.set_major_formatter(DateFormatter('%d/%m/%y %H:%M'))\n axis.set_ylabel('Humidity in % & Temps in C')\n axis.set_xlabel('Time')\n # Second y axis (pressure)\n axis_pressure = axis.twinx()\n # Plot pressure\n axis_pressure.plot_date(dates, pressures, '-', color=COLORS['green'])\n axis_pressure.xaxis.set_major_formatter(DateFormatter('%d/%m/%y %H:%M'))\n axis_pressure.set_ylabel('Pressure in mbar')\n # Configure the figure\n fig.autofmt_xdate()\n fig.legend(['Humidity', 'Temperature', 'Pressure'], loc='lower right')\n fig.set_tight_layout(True)\n canvas = FigureCanvas(fig)\n # Save output\n png_output = BytesIO()\n canvas.print_png(png_output)\n\n # Create the response and send it\n response = make_response(png_output.getvalue())\n response.headers['Content-Type'] = 'image/png'\n return response", "def graph():\n return jsonify(app.config[\"jsonified\"])", "def graph(self):\n\n return self._graph", "def gen_graph():\n if config_pagination:\n gdata = tgraph.call_graph(offset=offset, limit=limit)\n else:\n gdata = tgraph.call_graph(start=start, end=end, contineous=contineous)\n\n for data in gdata:\n yield data", "def load_graph(graph_url): # Function Provided By instructor - Grabs a specific graph from the internet and converts it to a form we can use\n graph_file = urllib2.urlopen(graph_url) # sets graph_file var to the file downloaded by urlopen\n graph_text = graph_file.read() # invokes read on the file downloaded\n graph_lines = graph_text.split('\\n')\n graph_lines = graph_lines[ : -1]\n\n print \"Loaded graph with\", len(graph_lines), \"nodes\"\n\n answer_graph = {}\n for line in graph_lines:\n neighbors = line.split(' ')\n node = int(neighbors[0])\n answer_graph[node] = set([])\n for neighbor in neighbors[1 : -1]:\n answer_graph[node].add(int(neighbor))\n\n print \"Finished processing Out-Degrees\"\n\n return answer_graph", "def get_graphs_connection(self):\n return self.m_connection.graphs", "def ndata(self):\n raise Exception(\"Graph store doesn't support access data of all nodes.\")", "def build_graph(self):\n pass", "def populate_graph(self):", "def download_chicago_graph():\n\n\tG = ox.graph_from_place(\"Chicago,IL, United States\", network_type='drive')\n\treturn G", "def get_graph_data(\n self,\n episode_queryset,\n ):\n qs = get_subrecord_qs_from_episode_qs(self.subrecord, episode_queryset)\n total = qs.count()\n count = 0\n\n if total == 0:\n amount = 0\n else:\n if isinstance(self.get_field(), ForeignKeyOrFreeText):\n count = qs.filter(**{\n self.fk_field: None,\n self.ft_field: '',\n }).count()\n else:\n count = qs.filter(**{\n self.field_name: None\n }).count()\n amount = round(float(count)/total, 3) * 100\n result = {}\n result[\"total\"] = total\n result[\"count\"] = count\n aggregate = [['None', amount]]\n links = {\"None\": self.to_link(\"None\")}\n result[\"graph_vals\"] = json.dumps(dict(\n aggregate=aggregate,\n field=self.field_name,\n links=links,\n subrecord=self.subrecord_api_name\n ))\n return result", "def _build_graph(self):\n pass", "def graph(self):\n return self.batch.graph", "def graph_data(self, timeframe):\n logging.info(\"Graphing Data\")\n pprog = self.prog_logs\n cursor = pprog.find({})\n data = {\n \"emotional\": [],\n \"physical\": [],\n \"cognitive\": []\n }\n comp = self.get_timeframe(timeframe)\n for doc in cursor:\n date = list(doc.keys())[1]\n try:\n datecomp = datetime.datetime.strptime(date, \"%Y-%m-%d %H:%M\")\n except:\n datecomp = datetime.datetime.today()\n if datecomp > datetime.datetime.combine(comp, datetime.time.min):\n for key in data.keys():\n rating = int(doc[date][\"data\"][key][\"rating\"])\n data[key].append(rating)\n plt.ylabel('Level')\n plt.xlabel('Number of Logs - Ordered By Date')\n for key in data.keys():\n plt.plot(data[key])\n plt.legend(['Emotional', 'Physical', 'Cognitive'], loc='upper left')\n plt.show()", "def graph():\n port_to_csv()\n\n source = ''\n if request.form.get('GraphType', '') == '':\n source = url_for('static', filename='frog no graph.png')\n else:\n source = s_modular(request.form.get('GraphType', ''), '')\n\n return render_template(\n 'tmpGraph.jade',\n title=\"Graph\",\n year=datetime.now().year,\n src=source\n )", "def plot_graph(self) -> None:", "def getData(graph, request):\r\n results = list(graph.query(request))\r\n return results", "def get_info_from_screen(self):\n if self.layout.is_zoomed_out:\n graph_nodes = self.layout.kivy_graph_out.kivy_graph.nodes\n graph_edges = self.layout.kivy_graph_out.kivy_graph.edges\n graph_corners = self.layout.kivy_graph_out.kivy_graph.corners\n else:\n graph_nodes = self.layout.kivy_graph_in.kivy_graph.nodes\n graph_edges = self.layout.kivy_graph_in.kivy_graph.edges\n graph_corners = self.layout.kivy_graph_in.kivy_graph.corners\n nodes = self.get_onscreen_nodes(graph_nodes, graph_corners)\n edges = self.get_onscreen_edges(graph_edges, graph_corners)\n\n return {'nodes': nodes, 'edges': edges}", "def out(self):\n return self.ag.output()", "def universePayoutHistory(universeId, startDate, endDate):\n url = f\"https://engagementpayouts.roblox.com/v1/universe-payout-history?endDate={endDate}&startDate={startDate}&universeId={universeId}\"\n r = requests.get(url, cookies=cookie)\n j = json.loads(r.text)\n return j", "def all_out_edges_of_node(self, id1: int) -> dict:\n return self.edges_out[id1]", "def get_graph(self, name, owner_email=None):\n\t\tresponse = self._make_request(\"GET\", '/api/v1/graphs/', url_params={\n\t\t\t'owner_email': self.username if owner_email is None else owner_email,\n\t\t\t'names[]': name\n\t\t}).json()\n\n\t\tif response.get('total', 0) > 0:\n\t\t\treturn response.get('graphs')[0]\n\t\telse:\n\t\t\treturn None", "def to_response(self):\n self.ctx[\"graph\"] = self.execute_op()\n return result_response(GraphExportCtrl.RESPONSE_SERIALIZER, self.ctx)", "def graphs():\n return render_template(\"graphs.html\")", "def _CreateGraph(self):\n self.nodes = []\n self.edges = []\n for i, r in self.airports.set_index('airport_id').iterrows():\n self.nodes.append((i,r.to_dict()))\n for i, r in self.routes.set_index(['src_id','dst_id']).iterrows():\n self.edges.append((i[0],i[1],r.to_dict()))\n # print('node ex: {}'.format(self.nodes[0]))\n # print('edge ex: {}'.format(self.edges[0]))\n\n self.graph = self._CreateAdjacencyListGraph()", "def out_edges(self) -> List[str]:\n return list(self.proto.out_edges)", "def extract_infrastructure_graph(workload_name, ts_from, ts_to):\n landscape_ip = ConfigHelper.get(\"LANDSCAPE\", \"host\")\n landscape_port = ConfigHelper.get(\"LANDSCAPE\", \"port\")\n subgraph_extraction = SubGraphExtraction(landscape_ip=landscape_ip,\n landscape_port=landscape_port)\n # res = subgraph_extraction.get_workload_view_graph(\n # workload_name, int(ts_from), int(ts_to),\n # name_filtering_support=True)\n res = landscape.get_graph()\n #PARALLEL = True\n if PARALLEL:\n i = 0\n threads = []\n cpu_count = multiprocessing.cpu_count()\n all_node = res.nodes(data=True)\n no_node_thread = len(res.nodes()) / cpu_count\n node_pool = []\n\n for node in all_node:\n if i < no_node_thread:\n node_pool.append(node)\n i = i + 1\n else:\n thread1 = ParallelLandscape(i, \"Thread-{}\".format(InfoGraphNode.get_name(node)), i,\n node_pool)\n # thread1 = ParallelTelemetryAnnotation(i, \"Thread-{}\".format(InfoGraphNode.get_name(node)), i,\n # node_pool, internal_graph, self.telemetry, ts_to, ts_from)\n thread1.start()\n threads.append(thread1)\n i = 0\n node_pool = []\n if len(node_pool) != 0:\n thread1 = ParallelLandscape(i, \"Thread-{}\".format(InfoGraphNode.get_name(node)), i,\n node_pool)\n thread1.start()\n threads.append(thread1)\n\n [t.join() for t in threads]\n else:\n for node in res.nodes(data=True):\n attrs = InfoGraphNode.get_attributes(node)\n attrs = InfoGraphUtilities.str_to_dict(attrs)\n InfoGraphNode.set_attributes(node, attrs)\n return res", "def traffic(request):\n api_key = '3dc6c88ceef523a8a46ad0a866468426'\n gauge_id = '4ff13d5ef5a1f535d0000003'\n\n # preparing the request URL\n url = 'https://secure.gaug.es/gauges/%s/traffic' % gauge_id\n headers = {'X-Gauges-Token': '%s' % api_key}\n\n # make the actual request\n gauges_data = requests.get(url, headers=headers)\n\n # setup some defaults for the json graph\n display_graph = {'title': 'Jontourage', 'type': 'line'}\n views = {'title': 'Views', 'datapoints': []}\n visitors = {'title': 'People', 'datapoints': []}\n\n display_graph = {\n 'title': 'Jontourage Visitors',\n 'color': 'blue',\n 'type': 'line',\n 'yAxis': {\n 'minValue': 0,\n },\n 'datasequences': [\n {\n 'title': 'Visitors',\n 'datapoints': [],\n },\n ]\n }\n\n # process json so we can parse it\n data = json.loads(gauges_data.content)\n\n # iterate over the data and pick out what we need to make into\n # a new dictionary\n for item in data['traffic']:\n # TODO format date before appending\n views['datapoints'].append({'title': item['date'],\n 'value': item['views']})\n\n # setup the graphs\n display_graph['datasequences'] = views\n data = json.dumps(display_graph)\n\n return HttpResponse(data, mimetype='application/json')", "def graph(self) -> rx.PyDiGraph:\n return self._graph", "def get_nodes():\n\n host = str(request.args['host'])\n days = float(request.args['days'])\n\n to_time = int(time.time())\n to_day = int(time.strftime('%Y%m%d', time.gmtime(float(to_time))))\n from_time = to_time-int(days*24*60*60)\n from_day = int(time.strftime('%Y%m%d', time.gmtime(float(from_time))))\n day_in=''\n for x in range(from_day, to_day+1):\n day_in = day_in + ',' + str(x)\n day_in=re.sub(r\"^,\", \"\", day_in)\n day_in=re.sub(r\",$\", \"\", day_in)\n query = \"SELECT * FROM metrics WHERE host='\" + str(host) + \"' and date IN (\"\n query = query + str(day_in) + \") and time>=\" + str(int(int(from_time)*1000)) + \" and time<=\"\n query = query + str(int(int(to_time)*1000)) + \" ALLOW FILTERING\"\n rows = session.execute(query);\n reply={}\n last_value={}\n for r in rows:\n if str(r.host) not in reply:\n reply[r.host]={}\n last_value[r.host]={}\n if str(r.metric) not in reply[r.host]:\n reply[r.host][r.metric]=[]\n last_value[r.host][r.metric]=int(r.value)\n continue\n real_value = (r.value-last_value[r.host][r.metric])/60\n\tlast_value[r.host][r.metric]=int(r.value)\n reply[str(r.host)][r.metric].append({ 'value': int(real_value),\n 'time': str(r.time) })\n return json.dumps(reply)", "def build_graph(self):\n raise NotImplementedError", "def Test_GraphStatistics(Graph_MD):\n # Simulation der Statistiken for jedes Netz\n GraphStats = M_Graph.create_stats(Graph_MD)\n \n return GraphStats", "def get_data_manager():\n return IncomingEdge.items", "def edata(self):\n raise Exception(\"Graph store doesn't support access data of all edges.\")", "def index():\n \n currentDateTime = current_datetime()\n fromDateTime = calc_day(currentDateTime, -3)\n\n # Adjust if any graphs should be shown in index page\n # Temperatur=XML(render_graph(3, 5, fromDateTime, currentDateTime, show_dots=False))\n # Procent_smoke=XML(render_graph(3, 6, fromDateTime, currentDateTime, show_dots=False))\n # Kitchen_Stove=XML(render_graph(2, 3, fromDateTime, currentDateTime, show_dots=False))\n # Humid=XML(render_graph(3, 4, fromDateTime, currentDateTime, show_dots=False))\n # Brightness=XML(render_graph(3, 7, fromDateTime, currentDateTime, show_dots=False))\n # Hall_motions=XML(render_graph(1, 1, fromDateTime, currentDateTime, show_dots=False, hits=True))\n # Hall_door=XML(render_graph(1, 2, fromDateTime, currentDateTime, show_dots=False, on_off=['Open', 'Close']))\n\n # return dict(test=locals())\n # return dict(test=device_monitoring)\n return dict()", "def get_graph(self, path):\n raise NotImplementedError", "def get_tpd_graphs_connection(self):\n return self.m_connection.tpd_graphs", "def dump_graph(self):\n\n edges = []\n for vertex in self.__graph_dict:\n mylist = list(vertex)\n logging.debug(\"mylist : \", mylist)", "def get_ecg_graph():\n titles = ['ecg1', 'ecg2', 'ecg3']\n colors = ['rgb(240,0,0)', 'rgb(0,240,0)', 'rgb(0,0,240)']\n update()\n signames_ecg = queries['signames_ecg']\n signals = queries['signals']\n latesthr = queries['latesthr']\n return html.Div(className='ecg', children=[\n html.Div(style={'display': 'flex', 'height': '40vh'},\n children=[dcc.Graph(\n id=titles[i] + signame,\n style={'width': '100%'},\n figure={\n 'data': [\n {'x': signals[signame]['time'],\n 'y': signals[signame][titles[i]],\n 'mode': 'line', 'name': signame, 'line': {'color':colors[i]}}\n ],\n 'layout': {\n 'font': {'color':'#fff'},\n 'title': '{}-{}'.format(signame, titles[i]),\n 'xaxis': {'title': 'time', 'color': '#fff', 'showgrid': 'False'},\n 'yaxis': {'title': 'voltage (mv)', 'color': '#fff', 'showgrid': 'False', 'range': np.linspace(-2.5, 2.5, 10)},\n 'paper_bgcolor':'#000', 'plot_bgcolor':'#000'\n }\n }\n ) for i in range(len(titles))]\n +\n [html.Div(\n style={'justify-content': 'center', 'display': 'flex',\n 'align-items': 'center', 'width': '10vh', 'font-size': '30pt', 'color': 'white'},\n children=['{}'.format(latesthr[signame][0])])\n ]\n ) for signame in signames_ecg])", "def describe_graph(g):\n print(\"Order: {} nodes\".format(g.number_of_nodes()))\n print(\"Max node id: {}\".format(max([n for n in g.nodes()])))\n print(\"Size: {} edges (interactions)\".format(g.number_of_edges()))\n print(\"Density: {}\".format(nx.density(g)))\n ts = nx.get_edge_attributes(g, 'start')\n ds = nx.get_edge_attributes(g, 'duration')\n print(\"First timestamp is: {}\".format(min(ts.values())))\n print(\"Last timestamp is: {}\".format(max([ts[k] + ds[k] for k in ts.keys()])))", "def generateInterfaceData(nodeInput, prev_node, interfaceName, days, prev_days, cache):\n\t\n\t# get the data if we have input\n\t# interface_data = cache.get('Interface')\n\t# print(interface_data)\n\t# if interface_data is not None:\n\t\t# node_saved = list(interface_data.keys())[0]\n\t\t# If the node is the same as the one we had, just update the data\n\t\t# if interfaceName in interface_data[node_saved] and \\\n\t\t\t# prev_days == days and prev_node == nodeInput:\n\t\t\t# print(\"Cache data should be used\")\n\t\t\t# data = []\n\t\t\t# interface_data \n\t\t#if ('node' in session and 'day' in session) and \\\n\t\t\t# interfaceName in session['node'] and \\\n\t\t\t# (session['node'][interfaceName] == interfaceName and session['day'] == days):\n\t\t\t# data = interface_data[nodeInput] if node_saved is not None else node_saved\n\t\t# else:\n\t\t\t# data = generateGraphData(getNodeInterfaceUtil, nodeInput, interfaceName, days)\n\t# else:\n\tdata = generateGraphData(getNodeInterfaceUtil, nodeInput, interfaceName, days)\n\t# interface_data[node_saved][interfaceName] = data\n\tif not data:\n\t\treturn \"Data is not available at this time for Availability graph\"\n\t\t\n\t# If we have data append\n\tlayout = buildGraphLayout(title=\"Utilization Percentage: \" + interfaceName)\n\tlayout.update(\n\t\tdict(shapes=[\n\t\t\t{\n\t\t\t\t'type': 'line',\n\t\t\t\t'x0': dayNum - .5,\n\t\t\t\t'x1': dayNum - .5,\n\t\t\t\t'y0': 0,\n\t\t\t\t'y1': 24,\n\t\t\t\t'line': {\n\t\t\t\t\t'color': 'rgb(0, 0, 0)',\n\t\t\t\t\t'width': 2\n\t\t\t\t}\n\t\t\t} for dayNum in range(days)\n\t\t] \n\t))\n\n\t# convert the data to x, y and z axes --> then generate graph\n\tx_axis, y_axis, z_axis, max_data, last_data = getXYZData(data)\n\thoverInfo = getHoverInfo(x_axis, z_axis, max_data)\n\t\n\theatGraph = None\n\tif not data:\n\t\theatGraph = html.Label(\"No data available for interface: \" + interfaceName,\n\t\t\t\t\t\tstyle=dict(width=300, fontSize=40, align='center', textAlign='center', marginLeft=150, marginRight='auto'))\n\telse:\n\t\theatGraph = dcc.Graph(\n\t\t\tfigure=graph.Figure(\n\t\t\t\tdata=[graph.Heatmap(\n\t\t\t\t\t\tx=x_axis,\n\t\t\t\t\t\ty=y_axis,\n\t\t\t\t\t\tz=z_axis,\n\t\t\t\t\t\tzauto=False,\n\t\t\t\t\t\tzmin=0,\n\t\t\t\t\t\tzmax=100,\n\t\t\t\t\t\tcolorbar=dict(\n\t\t\t\t\t\t\ttickmode= 'array',\n\t\t\t\t\t\t\ttickvals=[0, 20, 40, 60, 80, 100],\n\t\t\t\t\t\t\tticktext=[0, 20, 40, 60, 80, 100]\n\t\t\t\t\t\t),\n\t\t\t\t\t\tcolorscale=[\n\t\t\t\t\t\t\t[0, 'rgb(0, 0, 255)'],\n\t\t\t\t\t\t\t[0.13, 'rgb(0, 130, 150)'],\n\t\t\t\t\t\t\t[0.25, 'rgb(0, 250, 0)'],\n\t\t\t\t\t\t\t[0.38, 'rgb(100, 250, 0)'],\n\t\t\t\t\t\t\t[0.5, 'rgb(100, 200, 0)'],\n\t\t\t\t\t\t\t[0.63, 'rgb(133, 150, 0)'],\n\t\t\t\t\t\t\t[0.75, 'rgb(255, 100, 0)'],\n\t\t\t\t\t\t\t[0.88, 'rgb(255, 36, 0)'],\n\t\t\t\t\t\t\t[1.0, 'rgb(255, 0, 0)']\n\t\t\t\t\t\t],\n\t\t\t\t\t\thoverinfo='text',\n\t\t\t\t\t\ttext=hoverInfo\n\t\t\t\t)],\n\t\t\t\tlayout=layout\n\t\t\t),\n\t\t\tstyle=dict(height=500, width=900, marginBottom=100),\n\t\t\tid=interfaceName\n\t\t)\n\t\n\t# make sure we are only updating the children one at a time\n\tinterface_lock.acquire()\n\tchildren = cache.get('childrenToAdd')\n\tif children is not None:\n\t\tchildren.append(heatGraph)\n\tcache.set('childrenToAdd', children)\n\tinterface_lock.release()", "def get_graph(self):\n return copy.deepcopy(self.graph)", "def os_open_graph( self, ):\r\n pass", "def populate_price_change_graph(market):\n data = list()\n labels = list()\n\n queryset = DailyStatistic.objects.filter(market=market).order_by('-date')[:10]\n\n for stat in queryset:\n try:\n data.append(round(stat.percent_change_dd*100))\n labels.append(\"{}.{}\".format(stat.date.day,stat.date.month))\n except TypeError:\n data.append(0)\n data.append('No data')\n\n\n data.reverse()\n labels.reverse()\n\n return data,labels", "def modsecViewGraphs(modsecDict):\r\n if len(modsecDict) < 1:\r\n exit('Error: No logs to visualize. Check log and Include/Exclude filters')\r\n '''\r\n GRAPHS PART I\r\n Collect information into lists/dicts to make particular graphs\r\n '''\r\n src_ip_tab = []\r\n event_time_action = []\r\n event_messages = []\r\n intercepted_reason = []\r\n event_rules = []\r\n for entry_mod in modsecDict:\r\n try:\r\n ''' Graph data for \"TOP 10 IP source addresses\" '''\r\n src_ip_tab.append(entry_mod['transaction']['remote_address'])\r\n\r\n ''' Graph data for \"Modsecurity Events reported vs intercepted\" '''\r\n if (version3 is False) and ('action' in entry_mod['audit_data'].keys() and 'intercepted' in entry_mod['audit_data']['action'].keys()):\r\n event_time_action.append([entry_mod['transaction']['time'], True])\r\n\r\n elif (version3 is True) and len(entry_mod['audit_data']) > 0:\r\n for each_msg in entry_mod['audit_data']['messages']:\r\n #print('each_msg :', each_msg)\r\n if each_msg.startswith(\"ModSecurity: Access denied\"):\r\n event_time_action.append([entry_mod['transaction']['time'], True])\r\n else:\r\n event_time_action.append([entry_mod['transaction']['time'], False])\r\n #print('Nobody expect the Spanish Inquisition for ModSecurity v3')\r\n #print('each_msg :', each_msg)\r\n else:\r\n # No 'intercepted'\r\n event_time_action.append([entry_mod['transaction']['time'], False])\r\n except Exception as e2:\r\n print('Exception in Graph TOP 10 IP source addresses', e2)\r\n\r\n ''' Graph data for \"TOP 20 rule hits\"'''\r\n try:\r\n if 'messages' in entry_mod['audit_data'].keys():\r\n messages = safedictkey(entry_mod, ['audit_data','messages'], '-')\r\n for each in messages:\r\n event_messages.append(each)\r\n rule_id = regular_expression_evaluate(each, modsec_message_id_pattern)\r\n rule_msg = regular_expression_evaluate(each, modsec_message_msg_pattern)\r\n rule_severity = regular_expression_evaluate(each, modsec_message_severity_pattern)\r\n rule_file = regular_expression_evaluate(each, modsec_message_file_pattern)\r\n \"\"\"\r\n Cut the [msg] to 27 chars if it is longer than 30 chars.\r\n If [msg] and [id] not found then treat message description as the [msg]\r\n \"\"\"\r\n if len(rule_msg) > 30:\r\n rule_msg = rule_msg[:27] + '...'\r\n if rule_msg == '?' and rule_id == '-':\r\n rule_msg = str(each)[:30]\r\n rule_descr = 'id: ' + str(rule_id) + ', sev: ' + str(rule_severity) + ', msg: ' + str(rule_msg)\r\n event_rules.append([rule_id, rule_msg, rule_severity, rule_file, rule_descr])\r\n else:\r\n ''' Skip modsec_audit entries without [message] part'''\r\n pass\r\n except Exception as e3:\r\n print('Exception in TOP 20 rule hits', e3)\r\n print('for transaction_id :', safedictkey(entry_mod, ['transaction','transaction_id'], '-'))\r\n\r\n ''' Graph data for \"TOP 10 Attacks intercepted\" '''\r\n try:\r\n if (version3 is False) and ('action' in entry_mod['audit_data']):\r\n msg = entry_mod['audit_data']['action']['message']\r\n if len(msg) > 60:\r\n msg = msg[:50] + '...'\r\n intercepted_reason.append([entry_mod['audit_data']['action']['phase'], msg, 'phase ' + str(entry_mod['audit_data']['action']['phase']) + ': ' + msg])\r\n elif (version3 is True) and len(entry_mod['audit_data']) > 0:\r\n for each_msg in entry_mod['audit_data']['messages']:\r\n if each_msg.startswith(\"ModSecurity: Access denied\"):\r\n msg = regular_expression_evaluate(each_msg, modsec_v3_message_msg_pattern)\r\n if len(msg) > 60:\r\n msg = msg[:50] + '...'\r\n phase = regular_expression_evaluate(each_msg, modsec_v3_message_phase_pattern)\r\n intercepted_reason.append([phase, msg, 'phase ' + phase + ': ' + msg])\r\n\r\n except Exception as e:\r\n print('Exception in Graph TOP 10 Attacks intercepted', e)\r\n \"\"\"\r\n Modsecurity events Passed vs Intercepted\r\n \"\"\"\r\n np_event_time_action = np.array(event_time_action)\r\n event_times1 = np_event_time_action[:, 0]\r\n event_times = list(map(lambda x: datetime.strptime(x, LOG_TIMESTAMP_FORMAT).replace(tzinfo=None), event_times1))\r\n event_action = np_event_time_action[:, 1]\r\n event_times_min = min(event_times); event_times_max = max(event_times); event_times_range = event_times_max - event_times_min\r\n event_times_range_seconds = int(event_times_range.total_seconds())\r\n event_times_range_minutes = int(event_times_range.total_seconds() / 60)\r\n if event_times_range_minutes < 60:\r\n PERIODS = str(int(event_times_range_seconds / 1)) + 's'\r\n else:\r\n PERIODS = str(int(event_times_range_minutes / 30)) + 'min'\r\n events_df = pd.DataFrame({\r\n 'date': pd.to_datetime(event_times),\r\n 'action': event_action\r\n })\r\n intercepted = [] ; passed = []; passed_cnt2 = 0; intercepted_cnt2 = 0\r\n for row in events_df['action']:\r\n if (row == 'True'):\r\n intercepted.append(1); passed.append(0); intercepted_cnt2 += 1\r\n else:\r\n intercepted.append(0); passed.append(1); passed_cnt2 += 1\r\n events_df['intercepted'] = intercepted; events_df['passed'] = passed\r\n '''\r\n GRAPHS PART II\r\n '''\r\n ''' TOP 10 IP addresses Graph - data preparation '''\r\n ipaddr_cnt = Counter()\r\n for word in src_ip_tab:\r\n ipaddr_cnt[word] += 1\r\n ipaddr_cnt_top10 = dict(ipaddr_cnt.most_common(10))\r\n\r\n ''' TOP 10 Interception Reason - data preparation'''\r\n intercepted_cnt = Counter()\r\n for word in intercepted_reason:\r\n intercepted_cnt[word[2]] += 1\r\n intercepted_cnt_top10 = dict(intercepted_cnt.most_common(10))\r\n ''' TOP 20 Rule IDs hit - data preparation'''\r\n event_messages_ids = Counter()\r\n for word in event_rules:\r\n event_messages_ids[word[4]] += 1\r\n event_messages_ids_top20 = dict(event_messages_ids.most_common(20))\r\n\r\n ''' GRIDS VERSION BEGIN '''\r\n fig = plt.figure(0)\r\n grid = plt.GridSpec(3, 3, wspace=1.1, hspace=1.1)\r\n ax1 = plt.subplot(grid[0, 0:3])\r\n ax21 = plt.subplot(grid[1, 0])\r\n ax22 = plt.subplot(grid[2, 0])\r\n ax31 = plt.subplot(grid[1, 1])\r\n ax32 = plt.subplot(grid[2, 1])\r\n ax41 = plt.subplot(grid[1, 2])\r\n ax42 = plt.subplot(grid[2, 2])\r\n\r\n # Graph Included or Excluded\r\n modsec_inc_exc_str = ''\r\n if FILTER_INCLUDE:\r\n modsec_inc_exc_str = 'Filter INCLUDE active. Skipped the rest of ' + str(records_skipped_cnt) + \\\r\n ' events where source IP address NOT in: ' + str(filter_include_table)\r\n elif FILTER_EXCLUDE:\r\n modsec_inc_exc_str = 'Filter EXCLUDE active. Skipped the rest of ' + str(records_skipped_cnt) + \\\r\n ' events where source IP address in: ' + str(filter_exclude_table)\r\n else:\r\n modsec_inc_exc_str = 'Filter INCLUDE/EXCLUDE non-active.'\r\n\r\n title_timespan = 'Analysis of ' + str(records_processed_cnt) + ' modsecurity events in timespan: ' + \\\r\n str(event_times_min.strftime(\"%Y-%m-%d_%H:%M\")) + ' - ' + str(event_times_max.strftime(\"%Y-%m-%d_%H:%M\")) + '\\n'\r\n title_total = 'Total number of events found in logfile ' + str(records_total) + ' (output always trimmed to variable MAXEVENTS = ' + str(MAXEVENTS) + ' )\\n'\r\n title_reported_intercepted = 'events passed: ' + str(passed_cnt2) + ' , events intercepted: ' + str(intercepted_cnt2)\r\n plot_title = title_timespan + title_total + modsec_inc_exc_str + '\\n\\n' + title_reported_intercepted\r\n if event_times_range_seconds < 1800:\r\n short_time_range_message = 'Creating timeline graph is not available for timespan ' + str(event_times_range_seconds) + ' seconds, skipping ...'\r\n plt.subplot(ax1)\r\n plt.text(0.5, 0.5, short_time_range_message, horizontalalignment='center', verticalalignment='center')\r\n plt.title(plot_title)\r\n else:\r\n ex = events_df.groupby(pd.Grouper(key='date', freq=PERIODS)).sum()\r\n ex.plot(ax=ax1, kind='bar', title=plot_title, stacked=True, color={'purple', 'red'}, fontsize=7, rot=45)\r\n\r\n ''' Bar chart \"TOP 10 IP addresses\" '''\r\n plt.subplot(ax21)\r\n patches, texts, autotexts = plt.pie(ipaddr_cnt_top10.values(), autopct='%1.1f%%', shadow=True, startangle=90,radius=1.0)\r\n plt.title(' TOP %s IP addresses (out of total %s) ' % (len(ipaddr_cnt_top10), len(ipaddr_cnt)), bbox={'facecolor': '0.8', 'pad': 5})\r\n\r\n ''' Legend for chart \"TOP 10 IP addresses\" '''\r\n x = np.char.array(list(ipaddr_cnt_top10.keys()))\r\n y = np.array(list(ipaddr_cnt_top10.values()))\r\n labels = ['{0} --> {1} hits'.format(i, j) for i, j in\r\n zip(ipaddr_cnt_top10.keys(), ipaddr_cnt_top10.values())]\r\n if len(ipaddr_cnt_top10.keys()) >= 1:\r\n patches, labels, dummy = zip(*sorted(zip(patches, labels, y), key=lambda x: x[2], reverse=True))\r\n plt.subplot(ax22)\r\n plt.axis('off')\r\n plt.legend(patches, labels, loc='center left', bbox_to_anchor=(-0.1, 1.), fontsize=7)\r\n\r\n ''' Bar chart \"TOP 10 Attacks intercepted\" '''\r\n plt.subplot(ax31)\r\n patches, texts, autotexts = plt.pie(intercepted_cnt_top10.values(), autopct='%1.1f%%', shadow=True, startangle=90, radius=1.0)\r\n [_.set_fontsize(7) for _ in texts]\r\n plt.title('TOP 10 Attacks intercepted', bbox={'facecolor': '0.8', 'pad': 5})\r\n\r\n ''' Legend for chart \"TOP 10 Attacks intercepted\" '''\r\n x = np.char.array(list(intercepted_cnt_top10.keys()))\r\n y = np.array(list(intercepted_cnt_top10.values()))\r\n labels = ['{0} --> {1} hits'.format(i,j) for i,j in zip(intercepted_cnt_top10.keys(), intercepted_cnt_top10.values())]\r\n if len(intercepted_cnt_top10.values()) >= 1:\r\n patches, labels, dummy = zip(*sorted(zip(patches, labels, y), key=lambda x: x[2], reverse=True))\r\n plt.subplot(ax32)\r\n plt.axis('off')\r\n plt.legend(patches, labels, loc='center left', bbox_to_anchor=(-0.1, 1.), fontsize=7)\r\n else:\r\n plt.subplot(ax32)\r\n plt.axis('off')\r\n plt.text(0.5, 0.5, 'No intercepted events found for given data set', horizontalalignment='center', verticalalignment='center')\r\n\r\n ''' Bar chart \"TOP 20 Rule IDs hit\" '''\r\n plt.subplot(ax41)\r\n patches, texts, autotexts = plt.pie(event_messages_ids_top20.values(), autopct='%1.1f%%', shadow=True, startangle=90, radius=1.0)\r\n plt.title('TOP 20 Rule IDs hit', bbox={'facecolor': '0.8', 'pad': 5})\r\n\r\n ''' Legend for chart \"TOP 20 Rule IDs hit\" '''\r\n x = np.char.array(list(event_messages_ids_top20.keys()))\r\n y = np.array(list(event_messages_ids_top20.values()))\r\n labels = ['{0} --> {1} hits'.format(i, j) for i, j in zip(event_messages_ids_top20.keys(), event_messages_ids_top20.values())]\r\n if len(event_messages_ids_top20.keys()) >= 1:\r\n patches, labels, dummy = zip(*sorted(zip(patches, labels, y), key=lambda x: x[2], reverse=True))\r\n plt.subplot(ax42, axis='off')\r\n plt.axis('off')\r\n plt.legend(patches, labels, loc='center left', bbox_to_anchor=(-0.1, 1.), fontsize=7)\r\n\r\n '''\r\n GRID VERSION END\r\n '''\r\n graph_title = 'Modsecurity events ' + str(datetimenow) + ' from file: ' + inputFileName + ' first ' + str(MAXEVENTS) + ' analyzed'\r\n fig.canvas.set_window_title(graph_title)\r\n fig.set_size_inches(18,11)\r\n #plt.get_current_fig_manager().window.wm_geometry(\"+10+10\")\r\n try:\r\n if not os.path.isdir(fileBaseOutputDir):\r\n os.mkdir(fileBaseOutputDir)\r\n fOut = os.path.join(fileBaseOutputDir, graphOutputFilename)\r\n plt.savefig(fOut)\r\n return(fOut)\r\n except Exception as e:\r\n print('modsecViewGraphs.savefig() thrown exception: %s', e)\r\n return('error')", "def get_traces(self):\n return pd.DataFrame(\n {i.__name__: i.trace() for i in self.get_stochastics().node}\n )", "def yedges(self):\n return self.edges[1]", "def serialize(self):\n data = super(Graph, self).serialize()\n data['nodes'] = [node.serialize() for node in self.nodes]\n return data", "def getOutageRecord(self):\n return self._OutageRecord", "def draw_graph(self):\n\t\tif None in self.graph:\n\t\t\tdel self.graph[None]\n\n\t\tfor vs in self.graph.itervalues():\n\t\t\tto_delete = []\n\t\t\tfor i in xrange(len(vs)):\n\t\t\t\tif vs[i] is None:\n\t\t\t\t\tto_delete.append(i)\n\n\t\t\tfor i in reversed(to_delete):\n\t\t\t\tdel vs[i]\n\n\t\tself.G=nx.Graph(self.graph)\n\n\t\tfor k,v in self.labels.iteritems():\n\t\t\tif v[:6] == 'Module':\n\t\t\t\troot = k\n\t\t\t\tbreak\n\n\t\treturn self.__dfs_plot(root)", "def get_my_graphs(self, tags=None, limit=20, offset=0):\n\t\tquery = {\n\t\t\t'owner_email': self.username,\n\t\t\t'limit': limit,\n\t\t\t'offset': offset\n\t\t}\n\n\t\tif tags is not None:\n\t\t\tquery.update({'tags[]': tags})\n\n\t\treturn self._make_request(\"GET\", '/api/v1/graphs/', url_params=query).json()", "def create_visual_graph(self):\n if self.predict_new and self.prediction_without_covid_case:\n self.predict_co2_emission_future()\n self.save_prediction_df()\n else:\n self.restore_prediction_df()\n if not self.analysis_plot:\n self.predict_co2_emission_future()\n self.save_prediction_df()\n\n self.do_plot()\n self.output_graph_file = OUTPUT_GRAPH_PATH\n return self.output_graph_file", "def visit_graph_end(self, network: Network):\n pass", "def get_edges(self, topogramId):\n return self.make_request(\"GET\", \"topograms/\"+topogramId+\"/edges\", {})", "def get_outputs(self, isDFS=False):\n \n outputs = list()\n graph2 = deepcopy(self.graph) \n while graph2 != OrderedDict():\n key, value = graph2.popitem(last=isDFS)\n outputs.append(key)\n for key2 in value:\n graph2[key2] = value[key2]\n return outputs", "def nodegraph(self):\n return self._nodegraph", "def build_graph():\n file = open(\"../data/data.json\", \"r\")\n data = json.load(file)\n node_dict = {}\n for id in data:\n node_dict[id] = Node(data[id][\"name\"], data[id][\"product\"], data[id][\"production_volume\"])\n for id in data:\n current_node = node_dict[id]\n for costumer_id in data[id][\"costumers\"]:\n current_node.costumers.append(node_dict[str(costumer_id)])\n current_node.out_edge_capacity_drop[node_dict[str(costumer_id)].name] = 0\n for supplier_id in data[id][\"suppliers\"]:\n current_node.suppliers.append(node_dict[str(supplier_id)])\n current_node.in_edge_capacity_drop[node_dict[str(supplier_id)].name] = 0\n return node_dict", "def get(self):\n graph_plugins = manager.GraphManager.get_graphs()\n graphs = []\n for name, graph_class in graph_plugins:\n graph_plugin = {\n \"name\": name,\n \"display_name\": graph_class.DISPLAY_NAME,\n \"description\": graph_class.DESCRIPTION,\n }\n graphs.append(graph_plugin)\n\n return jsonify(graphs)", "def createGraph(self):\n self.measurements(45,50,10)\n avg = self.readFile(\"avg.pickle\")\n table = []\n for a in avg:\n table.append((a[0], a[1], a[2], a[3], a[4], \"Boolean\"))\n table.append((a[0], a[1], a[2], a[5], a[6], \"Fractional\"))\n table.append((a[0], a[1], a[2], a[7], a[8], \"Hierarchical\"))\n df = pd.DataFrame(table)\n df.columns = [\"nPages\", \"nCentroids\", \"Time\", \"Mean\", \"Std\", \"Type\"]\n print(df)\n sns.set(style = 'darkgrid')\n sns.lmplot(x = \"nCentroids\", y = \"Mean\", col = \"Type\", hue=\"Type\", data = df)\n #sns.lmplot(x = \"nPages\", y = \"Mean\", col = \"Type\", hue=\"Type\", data = df)\n #sns.scatterplot(x = \"nCentroids\", y = \"Mean\", size = \"Time\", hue = \"Type\", sizes = (20, 200), data = df)\n #sns.scatterplot(x = \"nPages\", y = \"Mean\", size = \"Time\", hue = \"Type\", sizes = (20, 200), data = df)\n plt.show()", "def getGraphURI(self, data):\n if data['newgraph']:\n return URIRef(data['newgraph'])\n if data['graph'][0]:\n return URIRef(data['graph'][0])\n return URIRef(data['downloadurl'])\n #3. check graph namespace?\n # e.g.: - check for owl:Ontology:about ???\n # - sorted([(len(x), x) for x in g.subjects()])[0]", "def all_out_edges_of_node(self, id1: int) -> dict:\r\n return self.Edges[id1]", "def show_custom_graph(self):\n pass", "def out_edges(self, node: str, keys: bool = False, data: bool = False) -> List:\n return self.graph.out_edges(node, keys=keys, data=data)", "def list_all(self):\n if self.graph is None:\n self.logger.error(\"Graph value none cannot be iterated\")\n return list()\n\n nodes = list()\n for node in self.graph.nodes():\n if node == self.NONE_PACKAGE:\n continue\n nodes.append(node)\n return nodes", "def output_data(self):\n pass", "def graph_etl_model():\r\n r = get_request_payload(request)\r\n file = check_for_file(request, odbserver)\r\n if not file[\"data\"]:\r\n if \"file\" in request.form.to_dict().keys():\r\n graph = odbserver.graph_etl_model(\r\n json.loads(request.form.to_dict()['model']),\r\n odbserver.file_to_frame(request.form.to_dict()[\"file\"])[\"data\"])\r\n elif \"file\" in r.keys() and \"model\" in r.keys():\r\n graph = odbserver.graph_etl_model(r[\"model\"], r[\"file\"])\r\n else:\r\n return jsonify(file)\r\n else:\r\n graph = odbserver.graph_etl_model(\r\n json.loads(request.form.to_dict()['model']),\r\n file[\"data\"])\r\n return jsonify({\r\n \"status\": 200,\r\n \"data\": graph,\r\n \"filename\": file[\"data\"]\r\n })", "def get_graph(self, engine, args):\n raise NotImplementedError(\"Override in subclass\")", "def get_graph(self, engine, args):\n if args not in self.graph_cache:\n try:\n g = self.metagraph.generate_graph(args)\n except GraphGenerationError as err:\n types = err.args[0]\n raise TypeDispatchError(self.metagraph, types)\n g = engine.pipeline.resources.convert(g)\n self.graph_cache[args] = g\n return self.graph_cache[args]", "def getGraph(self, data):\n graphuri = self.getGraphURI(data)\n # TODO: consider using temporary disk storage to not overload server\n # ... or stop parsing graph by some other means\n g = Graph(identifier=graphuri)\n\n if data['filedata'] and data['filedata'] != NOT_CHANGED:\n # uploaded file ...\n fmt = guessRDFFileFormat(data['format'],\n data['filedata'].contentType,\n data['filedata'].filename)\n g.parse(StringIO(data['filedata'].data), format=fmt)\n elif data['downloadurl']:\n # TODO: would be nice to check graph for graphuri, but it is\n # already a bit late here... needs improvement\n fmt = guessRDFFileFormat(data['format'], '', '')\n g.parse(data['downloadurl'], format=fmt)\n return g", "def get_degree_data(th_object, start, end, filename, event_name, path, plot_histogram=False, histo_path=\"/\",\n fig_path=\"/\"):\n\n ln = th_object.get_degree_data(start, end, filename)\n node_list = list(th_object.topology_graphs[0].nodes())\n averaged_data = []\n geo_dict = dict.fromkeys(th_object.node_loc.keys(), 0.0)\n geo_count = 0\n with open(path, 'w') as f:\n f.write(\"Time\")\n for k in node_list:\n f.write(\",\" + str(k))\n f.write(\",Average Degree size\\n\")\n for k, v in ln.items():\n geo_count += 1\n sum_av = 0\n count = 0\n f.write(k)\n for n, d in v:\n geo_dict[n] += d\n f.write(\",\" + str(d))\n sum_av += d\n count += 1\n averaged_data.append(sum_av / count)\n f.write(\",\" + str(averaged_data[-1]) + \"\\n\")\n av_fig = plt.figure()\n for k, v in geo_dict.items():\n geo_dict[k] = v / geo_count\n\n ax_av_fig = av_fig.add_subplot(111)\n averaged = np.ones((len(averaged_data), 1)) * np.average(averaged_data)\n ax_av_fig.plot(averaged_data)\n ax_av_fig.plot(averaged)\n ax_av_fig.set_xlabel('Time')\n ax_av_fig.set_ylabel('Average degree size')\n ax_av_fig.set_title('Average degree size vs. time for ' + event_name)\n av_fig.savefig(fig_path + event_name + \"_av_deg_size.png\")\n\n av_bins = np.linspace(0, max(averaged_data) + 1, int((max(averaged_data) + 1) / 0.2))\n av_fig_histo, ax_av_histo = plt.subplots(1, 2, tight_layout=True)\n # ax_av_histo = av_fig_histo.add_subplot(111)\n ax_av_histo[0].hist(averaged_data, bins=av_bins, rwidth=0.8, density=False)\n ax_av_histo[0].set_xlabel(\"Average degree size\")\n ax_av_histo[0].set_ylabel(\"Count of occurrence\")\n ax_av_histo[0].set_title(\"Histogram of average \\n degree size for \\n\" + event_name)\n\n ax_av_histo[1].hist(averaged_data, bins=av_bins, rwidth=0.8, density=True)\n ax_av_histo[1].set_xlabel(\"Average degree size\")\n ax_av_histo[1].set_ylabel(\"Probability\")\n ax_av_histo[1].set_title(\"Probability density of \\n average degree size for \\n\" + event_name)\n av_fig_histo.savefig(fig_path + event_name + \"_av_deg_size_histo.png\")\n\n if plot_histogram:\n histo_start = 0\n histo_end = 33\n x = np.linspace(histo_start, histo_end, histo_end - histo_start + 1, dtype=int)\n y = np.linspace(1, end - start + 1, end - start + 1, dtype=int)\n x_meshed, y_meshed = np.meshgrid(x, y)\n z_meshed = np.array([])\n fig = plt.figure()\n ax = fig.add_subplot(111)\n for k, v in ln.items():\n y_tmp = []\n for n, d in v:\n y_tmp.append(d)\n hist, bins = np.histogram(y_tmp, bins=x)\n z_meshed = np.append(z_meshed, np.append(np.array(hist), 0))\n z_meshed = np.reshape(z_meshed, (len(y), len(x)))\n with open(histo_path + event_name + \"_histo.csv\", 'w') as f:\n f.write(\"Time\")\n for item in x:\n f.write(\",\" + str(item))\n f.write(\"\\n\")\n for i in range(start, end + 1):\n f.write(str(filename[i]))\n for item in z_meshed[i - start]:\n f.write(\",\" + str(item))\n f.write(\"\\n\")\n\n c = ax.pcolormesh(x_meshed, y_meshed, z_meshed, cmap='GnBu_r', vmax=16, vmin=0)\n fig.colorbar(c, ax=ax)\n ax.set_xlabel('Degree size')\n ax.set_ylabel('Time')\n ax.set_title(histo_path + \" \" + event_name)\n fig.savefig(fig_path + event_name + \"_histo.png\")\n geographical_heat_map(th_object, geo_dict, \"average degree size for \" + event_name,\n fig_path + \"geographical_degree_dist_\" + event_name + \".png\")\n plt.show()", "def plot_data(self):", "def show_graph(self):\n graph_file = self.dump_graph()\n subprocess.check_output(shlex.split(f'gwenview {graph_file}'))", "def get_graph_binary(self):\n with open(os.path.join(self.network_dir,\"graph\"), mode='rb') as file:\n graph = file.read()\n return graph", "def show_graph():\n\n try:\n start = int(request.query.get('start', 0))\n end = int(request.query.get('end', 0))\n contineous = int(request.query.get('contineous', 0))\n\n except ValueError:\n return \"integer type cast error\"\n\n tgraph = Graph(tw_user_action)\n pageno = request.query.get('page', 1)\n url = 'show_graph?start={}&end={}'.format(start,end)\n offset, limit = tgraph.pagination.get_page_range(pageno)\n html_pagination = tgraph.pagination.get_html_pagination(pageno,url=url) if config_pagination else None\n\n def gen_graph():\n \"\"\"pagiantion = true in global settings can be used (if/else)\"\"\"\n if config_pagination:\n gdata = tgraph.call_graph(offset=offset, limit=limit)\n else:\n gdata = tgraph.call_graph(start=start, end=end, contineous=contineous)\n\n for data in gdata:\n yield data\n return {'gengraph':(gen_graph()), 'pagination':html_pagination}" ]
[ "0.7131809", "0.6295313", "0.62823576", "0.62179625", "0.6100377", "0.59277797", "0.5841857", "0.58356446", "0.5830653", "0.5827107", "0.5826806", "0.57876396", "0.5763705", "0.57545763", "0.56935877", "0.5691411", "0.5688853", "0.56795114", "0.5666311", "0.56479836", "0.5642819", "0.56165665", "0.56165665", "0.56140625", "0.5609281", "0.55508524", "0.5513983", "0.5496717", "0.5490275", "0.5454037", "0.5439174", "0.54373425", "0.54143935", "0.54137325", "0.5395094", "0.5385771", "0.5369522", "0.53653353", "0.53624684", "0.53585684", "0.5358109", "0.5351989", "0.5349855", "0.53287154", "0.5322329", "0.53135705", "0.5296824", "0.5277507", "0.5259032", "0.5255739", "0.5231457", "0.52193975", "0.52163005", "0.5214457", "0.5187822", "0.51868105", "0.51810825", "0.5177122", "0.51719785", "0.5171762", "0.51677805", "0.5166775", "0.51595664", "0.5136007", "0.5131699", "0.51277155", "0.5122048", "0.5120291", "0.5118136", "0.51139694", "0.5101018", "0.50976634", "0.50893295", "0.50847095", "0.5083895", "0.507866", "0.507851", "0.5071264", "0.5068543", "0.5067354", "0.5063908", "0.5060132", "0.5053565", "0.5052778", "0.5035804", "0.50298476", "0.5021282", "0.5019079", "0.50107324", "0.5010427", "0.50042844", "0.5003672", "0.50001687", "0.49814805", "0.49800858", "0.49777523", "0.49681464", "0.49629146", "0.4962284", "0.49607056" ]
0.7193129
0
This function will return the graph data for the selected module
def get_outage_graph(request, equipment_name=None, module_name=None): query_params, obj = None, None try: query_params = request except: pass try: if request.method == GET_REQUEST: obj = OutageGraph(query_params, equipment_name, module_name) return obj.get_outage() log_debug(METHOD_NOT_ALLOWED) return JsonResponse({MESSAGE_KEY: METHOD_NOT_ALLOWED}, status=HTTP_405_METHOD_NOT_ALLOWED) except Exception as e: excMsg = "get_outage_graph_data API : " + str(error_instance(e)) return excMsg finally: if obj: del obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_graph(self) -> dict:\n response = requests.get(self.channel, params=\"get_graph\")\n return json_to_graph(response.content)", "def getGraph(self):\n\t\treturn self.graph", "def get_graphs_data_connection(self):\n return self.m_connection.graphs_data", "def get_graph(**options):\r\n graph = bonobo.Graph()\r\n graph.add_chain(get_stock_list,extract, process, load)\r\n\r\n return graph", "def _get_full_graph(self):", "def draw_graph(self):\n\t\tif None in self.graph:\n\t\t\tdel self.graph[None]\n\n\t\tfor vs in self.graph.itervalues():\n\t\t\tto_delete = []\n\t\t\tfor i in xrange(len(vs)):\n\t\t\t\tif vs[i] is None:\n\t\t\t\t\tto_delete.append(i)\n\n\t\t\tfor i in reversed(to_delete):\n\t\t\t\tdel vs[i]\n\n\t\tself.G=nx.Graph(self.graph)\n\n\t\tfor k,v in self.labels.iteritems():\n\t\t\tif v[:6] == 'Module':\n\t\t\t\troot = k\n\t\t\t\tbreak\n\n\t\treturn self.__dfs_plot(root)", "def get_graph(self, path):\n raise NotImplementedError", "def get_graph(self):\n return self._graph", "def populate_graph(self):", "def getData(graph, request):\r\n results = list(graph.query(request))\r\n return results", "def graph(self):\n ...", "def graph(self):\n return self.__graph", "def buildGraph(self):\n return None", "def graphs(self):\n return self.__graphs", "def graph(self):\n return self._graph", "def graph(self):\n return self._graph", "def graph(self):\n return self.batch.graph", "def get_graph_summary(self):\n\n pass", "def gen_graph(self):", "def get_graph(self, engine, args):\n raise NotImplementedError(\"Override in subclass\")", "def get_graphs_connection(self):\n return self.m_connection.graphs", "def get_graph(self):\n return json.dumps(self.graph.get_edgelist(), separators=(',',':'))", "def get_data():\n if not hasattr(g, 'data'):\n g.data = load_data()\n return g.data", "def get_dependency_graph(self):\n return self.graph", "def graph(self):\n\n return self._graph", "def getGraph(self, data):\n graphuri = self.getGraphURI(data)\n # TODO: consider using temporary disk storage to not overload server\n # ... or stop parsing graph by some other means\n g = Graph(identifier=graphuri)\n\n if data['filedata'] and data['filedata'] != NOT_CHANGED:\n # uploaded file ...\n fmt = guessRDFFileFormat(data['format'],\n data['filedata'].contentType,\n data['filedata'].filename)\n g.parse(StringIO(data['filedata'].data), format=fmt)\n elif data['downloadurl']:\n # TODO: would be nice to check graph for graphuri, but it is\n # already a bit late here... needs improvement\n fmt = guessRDFFileFormat(data['format'], '', '')\n g.parse(data['downloadurl'], format=fmt)\n return g", "def build_graph(self):\n pass", "def get_eval_data() -> GraphDataset:\n _load_data_if_needed()\n return eval_data", "def gexf_graph():\n # you must replace these lines and supply your own graph\n my_gexf = Gexf(\"Jitendra Rathour\", \"title\")\n gexf.addGraph(\"undirected\", \"static\", \"Rebrickable Graph\")\n return gexf.graphs[0]", "def get(self):\n graph_plugins = manager.GraphManager.get_graphs()\n graphs = []\n for name, graph_class in graph_plugins:\n graph_plugin = {\n \"name\": name,\n \"display_name\": graph_class.DISPLAY_NAME,\n \"description\": graph_class.DESCRIPTION,\n }\n graphs.append(graph_plugin)\n\n return jsonify(graphs)", "def show_data(self):\r\n selected_items = self.treeview.selection()\r\n if len(selected_items) > 0:\r\n ticker = self.treeview.item(selected_items, 'values')[0]\r\n self.graph_data(ticker)\r\n self.ticker = ticker\r\n else:\r\n return None", "def graph(self) -> dict:\n return self.flat_graph()", "def graph():\n port_to_csv()\n\n source = ''\n if request.form.get('GraphType', '') == '':\n source = url_for('static', filename='frog no graph.png')\n else:\n source = s_modular(request.form.get('GraphType', ''), '')\n\n return render_template(\n 'tmpGraph.jade',\n title=\"Graph\",\n year=datetime.now().year,\n src=source\n )", "def build_graph(self):\n raise NotImplementedError", "def graph():\n return jsonify(app.config[\"jsonified\"])", "def retrieve_graph(self):\n\n g = self.g\n\n if 'grid' in g['name']:\n my_layout = g.layout(\"grid\")\n else:\n my_layout = g.layout(\"kk\")\n\n return g, my_layout", "def get_data():\n pass", "def data_graph():\n station_reference = request.args.get(\"stationReference\")\n station_name = request.args.get(\"stationName\")\n station_name = station_name.replace(\" \",\"+\")\n\n if station_name is not None:\n # station_data = station_data.replace(\" \", \"+\")\n station = station_data.loc[station_data.stationName == station_name]\n else:\n station = station_data.loc[station_data.stationReference == station_reference]\n result_station = station.iloc[0]\n\n # Get optional parameters\n time_from = request.args.get(\"from\")\n time_to = request.args.get(\"to\")\n if time_from:\n pass\n else:\n time_from = None\n if time_to:\n pass\n else:\n time_to = None\n # plot pic\n magic_trick= data.station_graph(result_station.stationName, time_from, time_to)\n # img_stream = io.BytesIO(img)\n # img = Image.open(img_stream)\n # imgByteArr = io.BytesIO()\n # img.save(imgByteArr,format='PNG')\n # imgByteArr = imgByteArr.getvalue()\n # return send_file(io.BytesIO(imgByteArr),\n # mimetype = 'image/png',\n # as_attachment = True,\n # attachment_filename = 'tmp.png')\n image_data = open(\"tmp.png\", \"rb\").read()\n response = make_response(image_data)\n response.headers['Content-Type'] = 'image/png'\n return response", "def get_graph_binary(self):\n with open(os.path.join(self.network_dir,\"graph\"), mode='rb') as file:\n graph = file.read()\n return graph", "def graph(self) -> rx.PyDiGraph:\n return self._graph", "def plot_data(self):", "def _build_graph(self):\n pass", "def getNodes(self):\n data = self.connect('get','nodes',None)\n return data", "def get_graph(**options):\n graph = bonobo.Graph()\n\n split = bonobo.noop\n\n graph.add_chain(\n bonobo.CsvWriter('DeckedBuilder.csv'),\n # bonobo.Limit(10),\n metadata,\n # bonobo.UnpackItems(0),\n split,\n _input=None,\n _name='main',\n )\n\n graph.add_chain(\n bonobo.CsvReader('main-en.csv'),\n bonobo.Format(Language='English'),\n _output='main',\n )\n\n graph.add_chain(\n bonobo.CsvReader('main-de.csv'),\n bonobo.Format(Language='German'),\n _output='main',\n )\n\n graph.add_chain(\n bonobo.CsvReader('main-ru.csv'),\n bonobo.Format(Language='Russian'),\n _output='main',\n )\n\n graph.add_chain(\n bonobo.CsvReader('main-it.csv'),\n bonobo.Format(Language='Italian'),\n _output='main',\n )\n\n graph.add_chain(\n bonobo.CsvReader('main-jp.csv'),\n bonobo.Format(Language='Japanese'),\n _output='main',\n )\n\n graph.add_chain(\n bonobo.CsvReader('main-fr.csv'),\n bonobo.Format(Language='French'),\n _output='main',\n )\n\n graph.add_chain(\n bonobo.CsvReader('main-kr.csv'),\n bonobo.Format(Language='Korean'),\n _output='main',\n )\n\n graph.add_chain(\n bonobo.CsvReader('main-cs.csv'),\n bonobo.Format(Language='Chinese'),\n _output='main',\n )\n\n graph.add_chain(\n bonobo.CsvReader('Deckbox-extras.csv'),\n bonobo.Format(Language='English'),\n _output='main',\n )\n\n if ECHO_MTG:\n # Reg Qty,Foil Qty,Name,Set,Acquired,Language\n echomtg = {'Acquired For': '0.004', 'Language': 'en'}\n graph.add_chain(\n # echomtg specific fiddling\n remove_metadata,\n bonobo.UnpackItems(0),\n # bonobo.PrettyPrinter(),\n bonobo.Rename(Name='Card'),\n bonobo.Format(**echomtg),\n bonobo.CsvWriter('EchoMTG.csv'),\n _input=split,\n )\n\n # MTG Studio\n\n if MTG_STUDIO:\n graph.add_chain(\n mtg_studio,\n remove_metadata,\n bonobo.UnpackItems(0),\n # bonobo.Format(Edition='{Set}'),\n bonobo.Rename(Edition='Set'),\n # bonobo.Rename(Name='Card'),\n # bonobo.Rename(Qty='Reg Qty'),\n # bonobo.Rename(Foil='Foil Qty'),\n # bonobo.PrettyPrinter(),\n bonobo.CsvWriter('MTG-Studio.csv'),\n _input=split,\n )\n\n # graph.add_chain(\n # tradeable,\n # bonobo.UnpackItems(0),\n # #bonobo.PrettyPrinter(),\n # #bonobo.Limit(3000),\n # bonobo.CsvWriter(\"DeckedBuilder-tradelist.csv\"),\n # bonobo.OrderFields([\n # 'Card',\n # 'Set',\n # 'Foil',\n # 'Quantity',\n # ]),\n # bonobo.CsvWriter(\"CardKingdom-buylist.csv\"),\n # bonobo.OrderFields([\n # 'Quantity',\n # 'Card',\n # 'Set',\n # ]),\n # bonobo.CsvWriter(\n # \"mtgprice-buylist.csv\",\n # delimiter=\"\\t\",\n # ),\n # _input=split,\n # )\n #\n if DECKBOX:\n csv_out = bonobo.CsvWriter('Deckbox-inventory.csv')\n\n graph.add_chain(\n # # metadata,\n # #bonobo.UnpackItems(0),\n deckbox,\n bonobo.UnpackItems(0),\n csv_out,\n _input=split,\n )\n\n graph.add_chain(\n bonobo.CsvReader('Deckbox-specials.csv'), _output=csv_out\n )\n return graph", "def get_data(self):\n return self.train_edges, self.train_labels, self.test_edges, self.test_labels", "def getExistingModules(self):\n\n # get the current tab index and the widget\n index = self.pickerUI.characterTabs.currentIndex()\n widget = self.pickerUI.characterTabs.widget(index)\n characterNode = widget.property(\"charNode\")\n characterNodeModules = cmds.listConnections(characterNode + \".rigModules\")\n\n namespace = None\n if cmds.objExists(characterNode + \".namespace\"):\n namespace = cmds.getAttr(characterNode + \".namespace\") + \":\"\n\n returnData = []\n\n # get the children of the current tab widget\n children = widget.children()\n for child in children:\n\n # if we find a tab widget, search for the gfxScene\n if type(child) == QtWidgets.QTabWidget:\n tab = child\n selectedTab = tab.currentIndex()\n\n for i in range(tab.count()):\n tab.setCurrentIndex(i)\n canvasIndex = tab.currentIndex()\n canvasWidget = tab.widget(canvasIndex)\n canvasChildren = canvasWidget.children()\n\n for canvasChild in canvasChildren:\n if type(canvasChild) == QtWidgets.QGraphicsView:\n view = canvasChild\n scene = view.scene()\n\n # get all items in the gfxScene\n itemsInScene = scene.items()\n\n for item in itemsInScene:\n # if we find our top level picker item (the borderItem), get it's data\n if type(item) == interfaceUtils.pickerBorderItem or item.type() == 3:\n module = item.data(QtCore.Qt.UserRole)\n\n if namespace is None:\n if module not in returnData:\n returnData.append(module)\n else:\n if (namespace + module) not in returnData:\n returnData.append(namespace + module)\n\n tab.setCurrentIndex(selectedTab)\n\n return returnData", "def get_data():\n return", "def plot_graph(self) -> None:", "def graph_data(self, timeframe):\n logging.info(\"Graphing Data\")\n pprog = self.prog_logs\n cursor = pprog.find({})\n data = {\n \"emotional\": [],\n \"physical\": [],\n \"cognitive\": []\n }\n comp = self.get_timeframe(timeframe)\n for doc in cursor:\n date = list(doc.keys())[1]\n try:\n datecomp = datetime.datetime.strptime(date, \"%Y-%m-%d %H:%M\")\n except:\n datecomp = datetime.datetime.today()\n if datecomp > datetime.datetime.combine(comp, datetime.time.min):\n for key in data.keys():\n rating = int(doc[date][\"data\"][key][\"rating\"])\n data[key].append(rating)\n plt.ylabel('Level')\n plt.xlabel('Number of Logs - Ordered By Date')\n for key in data.keys():\n plt.plot(data[key])\n plt.legend(['Emotional', 'Physical', 'Cognitive'], loc='upper left')\n plt.show()", "def load_graph( gname ):\n return NX.read_gpickle( gname )", "def get_data(self):\n\n if self.has_group_cols():\n data = []\n g_cols = self.get_group_names()\n for (_, targets) in self.targets:\n data.append(self.get_series(targets))\n else:\n data = [self.get_series(self.targets)]\n\n return ListDataset(data, freq=self.freq)", "def get_graph(self, engine, args):\n if args not in self.graph_cache:\n try:\n g = self.metagraph.generate_graph(args)\n except GraphGenerationError as err:\n types = err.args[0]\n raise TypeDispatchError(self.metagraph, types)\n g = engine.pipeline.resources.convert(g)\n self.graph_cache[args] = g\n return self.graph_cache[args]", "def generateGraphData(func, *args):\n\t\n\ttry:\n\t\tavailData = func(*args)\n\t\tdata = availData\n\t\treturn data\n\texcept:\n\t\treturn None", "def load_graph(graphname,path='./data/',mname='A'):\n\n\tdata=sio.loadmat(path+graphname)\n\treturn data[mname]", "def get_graph(self, name, owner_email=None):\n\t\tresponse = self._make_request(\"GET\", '/api/v1/graphs/', url_params={\n\t\t\t'owner_email': self.username if owner_email is None else owner_email,\n\t\t\t'names[]': name\n\t\t}).json()\n\n\t\tif response.get('total', 0) > 0:\n\t\t\treturn response.get('graphs')[0]\n\t\telse:\n\t\t\treturn None", "def get_data(self):", "def get_tpd_graphs_connection(self):\n return self.m_connection.tpd_graphs", "def getModulesData(*args):\n\n mData = AppData(*args)\n mData.getModules()\n\n if not mData.good or mData.locations is None:\n return None\n\n return (mData.locations, mData.modules)", "def _get_module(self, parser, module):\n dt = []\n lines = parser.clean_data(module)\n header = lines[0]\n for data in lines[1:]:\n if data[0].startswith(\"#\"): # some modules have two headers\n header = data\n continue\n if data[0].find(\"-\") > -1: # expand positions 1-3 to 1, 2, 3\n f, s = map(int, data[0].split(\"-\"))\n for pos in range(f, s):\n dt.append([str(pos)] + data[1:])\n else:\n dt.append(data)\n dt = pd.DataFrame(dt)\n dt.columns = [h.replace(\" \", \"_\") for h in header]\n dt['sample'] = self.sample\n return dt", "def get_graph(self):\n return copy.deepcopy(self.graph)", "def build_graph():\n file = open(\"../data/data.json\", \"r\")\n data = json.load(file)\n node_dict = {}\n for id in data:\n node_dict[id] = Node(data[id][\"name\"], data[id][\"product\"], data[id][\"production_volume\"])\n for id in data:\n current_node = node_dict[id]\n for costumer_id in data[id][\"costumers\"]:\n current_node.costumers.append(node_dict[str(costumer_id)])\n current_node.out_edge_capacity_drop[node_dict[str(costumer_id)].name] = 0\n for supplier_id in data[id][\"suppliers\"]:\n current_node.suppliers.append(node_dict[str(supplier_id)])\n current_node.in_edge_capacity_drop[node_dict[str(supplier_id)].name] = 0\n return node_dict", "def get_graph_by_id(self, graph_id):\n\t\treturn self._make_request(\"GET\", '/api/v1/graphs/%s'% graph_id).json()", "def get_graph_info(self, graph_flow_id, type = None):\n try:\n query_set = models.AUTO_ML_RULE.objects.filter(graph_flow_id=graph_flow_id)\n query_set = serial.serialize(\"json\", query_set)\n query_set = json.loads(query_set)\n ids = []\n for row in query_set :\n ids.append(row)\n return ids\n except Exception as e:\n raise Exception(e)", "def get_data_manager():\n return IncomingEdge.items", "def getData(language=None):", "def ndata(self):\n raise Exception(\"Graph store doesn't support access data of all nodes.\")", "def get_data(self):\n pass", "def get_data(self):\n pass", "def get_graph_youtube():\n from urllib import request\n import gzip\n url = 'http://socialnetworks.mpi-sws.mpg.de/data/youtube-links.txt.gz'\n zipped_data_path = './samples/youtube-links.txt.gz'\n unzipped_data_path = './samples/youtube-links.txt'\n\n # Download .gz file\n print(\"Downloading Youtube dataset...\")\n request.urlretrieve(url, zipped_data_path, _show_progress)\n\n # Unzip\n unzipped_data = gzip.GzipFile(zipped_data_path)\n open(unzipped_data_path, 'wb+').write(unzipped_data.read())\n unzipped_data.close()\n\n # Returns graph\n G = eg.Graph()\n G.add_edges_from_file(file=unzipped_data_path)\n return G", "def gen_graph():\n if config_pagination:\n gdata = tgraph.call_graph(offset=offset, limit=limit)\n else:\n gdata = tgraph.call_graph(start=start, end=end, contineous=contineous)\n\n for data in gdata:\n yield data", "def gexf_graph():\n # you must replace these lines and supply your own graph\n my_gexf = Gexf(\"author\", \"title\")\n gexf.addGraph(\"undirected\", \"static\", \"I'm an empty graph\")\n return gexf.graphs[0]", "def get_graph(**options):\n graph = bonobo.Graph()\n\n split_dbs = bonobo.noop\n\n graph.add_chain(\n GetOrderXML(\n prefix=\"/etl/ivm\",\n glob=[\n 'Mozilla_Corporation{timestamp:%Y_%m_%d}*.xml'.format(\n timestamp=options['now'])\n ]),\n ParseDates(['Transactionlog_Tranenddatetime']),\n truncate_description,\n bonobo.UnpackItems(0),\n bonobo.Rename(\n transaction_date='Transactionlog_Tranenddatetime',\n item_number='Transactionlog_Itemnumber',\n transaction_id='Transactionlog_Tlid',\n item_description='Transactionlog_Itemdesc'),\n bonobo.Rename(\n user_id='Transactionlog_User',\n quantity='Transactionlog_Qty',\n transaction_code='Transactionlog_Transcode',\n description='Vendingmachines_Descr',\n ),\n split_dbs,\n _name=\"main\")\n\n #insert into ivm (description, transaction_id, item_number, item_description, user_id, quantity, transaction_date, transaction_code) values\n\n for engine in list(set(options['engine'])):\n graph.add_chain(\n bonobo_sqlalchemy.InsertOrUpdate(\n table_name=options['table_name'] + options['table_suffix'],\n discriminant=('transaction_id', ),\n engine=engine),\n _input=split_dbs)\n\n return graph", "def get_genes_of_module(module):\n\n\tmodules_result = db.get_engine(current_app, 'methylation_data').execute(\"SELECT module, mmu_gene_id, mmu_gene_name FROM gene_modules WHERE module='%s'\", (module,)).fetchall()\n\tgenes_in_module = [ {'module': d['module'], 'gene_id': d['mmu_gene_id'], 'gene_name': d['mmu_gene_name']} for d in modules_result ]\n\n\treturn genes_in_module", "def get_data(self, node_id, username, moderator):\n raise NotImplementedError()", "def get_data(self):\n raise NotImplementedError(\"Not implemented!\")", "def graph(self):\n import pydot\n edges = set()\n for p in self.packages.values():\n for f in p.files:\n for id in f.requires:\n f2 = self.get(id)\n edges.add( (\"--\".join([p.key, f.shortname]), \"--\".join([f2.package.key, f2.shortname])) )\n return pydot.graph_from_edges(edges, directed=True)", "def get_graphs(self):\n ids = self._graphs.keys()\n ids.sort()\n return [self._graphs[id] for id in ids]", "def main():\n ticker = input('Enter stock symbol: ')\n dataOptions = [\n \"1. open\",\n \"2. high\",\n \"3. low\",\n \"4. close\",\n \"5. volume\"\n ]\n print(dataOptions) \n data_option = input('What data would you like to view for ' + ticker + \": \") # input what data user wants to see\n\n dataFrame = getData(ticker.upper(), data_option) # call getData function to get data user wants\n print(dataFrame) # print the data for user\n\n graph(dataFrame) # call graph function", "def graphs():\n return render_template(\"graphs.html\")", "def __call__(self, raw_data):\n smiles = raw_data['smiles']\n label = np.array([0]) if 'label' not in raw_data else raw_data['label']\n\n feature_dict = new_smiles_to_graph_data(smiles)\n if feature_dict is None:\n return None\n feature_dict[\"label\"] = label\n \n new_graph = {}\n new_graph[\"num_nodes\"] = len(feature_dict['atomic_num'])\n new_graph[\"nfeat\"] = {key: feature_dict[key] for key in self.config.atom_names + self.config.atom_float_names}\n new_graph[\"efeat\"] = {key: feature_dict[key] for key in self.config.bond_names}\n new_graph[\"edges\"] = feature_dict['edges']\n new_graph[\"label\"] = feature_dict['label'] if \"label\" in feature_dict else None\n return new_graph", "def graph_obj(self):\r\n vizEngine = get_vizEngine().lower().strip()\r\n obj = None\r\n if self.levels > 0 and vizEngine=='bokeh':\r\n warnings.warn('Please switch the vizEngine to \"plotly\" to create contour plots.', UserWarning)\r\n if self.surface3D and vizEngine!='plotly':\r\n warnings.warn('Please switch the vizEngine to \"plotly\" to create 3D surface plots.', UserWarning)\r\n\r\n if vizEngine == 'bokeh':\r\n obj = MapBokeh(self.data, self.variable, self.levels, self.surface3D)\r\n elif vizEngine == 'plotly':\r\n obj = MapPlotly(self.data, self.variable, self.levels, self.surface3D)\r\n return obj", "def load_graph(project_name):\n path = get_dep_cache_path(project_name)\n log(\"Attempting to load graph for '%s' from %s\" % (project_name, path))\n try:\n f = open(path, 'r', encoding='utf8')\n data = json.loads(f.read())\n f.close()\n graph = DepGraph()\n graph.set_data(data['graph'])\n return {\n 'last_update': data['last_update'],\n 'graph': graph\n }\n except IOError:\n return None", "def _load_data(self):\n with open(\"znalostni_baze.txt\") as f:\n content = f.readlines()\n\n data_graph = Graph(\"Expertní systém opraváře kol\")\n for line in content:\n if line.startswith(\"#\"):\n continue\n separated = line.strip(\" IF \").split(\" THEN \")\n if len(separated) == 2:\n conditions = separated[0].split(\"AND\")\n solution_name = separated[1].split(\"|\")[0]\n p_h = float(separated[1].split(\"|\")[1].strip(\"\\n \"))\n for cond in conditions:\n cond_name = cond.split(\"|\")[0]\n probabilities = cond.split(\"|\")[1].split(\"->\")\n p_e = float(probabilities[0])\n p_he = float(probabilities[1])\n\n data_graph.add_edge(cond_name.strip(), solution_name.strip(), value=p_he, a_value=p_e, b_value=p_h)\n return data_graph", "def get_data(self): # TODO: add smooth possibility\n return self.data", "def getData(self):\n\t\treturn self.golang_project_packages", "def get_data(self):\r\n pass", "def get_data(self):\n\n raise NotImplementedError('''\n Must Implement get_data. Call help() for details.\n ''')", "def gexf_graph():\n # you must replace these lines and supply your own graph\n \n \n \n my_gexf = Gexf(\"JiajiaXie\", \"My awesome graph\")\n graph=my_gexf.addGraph(\"undirected\", \"static\", \"My awesome networks\")\n \n atr1=graph.addNodeAttribute('Type',type='string')\n\n\n for set in data_specific:\n if graph.nodeExists(set['set_num']) ==0:\n tm1=graph.addNode(set['set_num'], set['name'], r='0', g='0', b='0')\n tm1.addAttribute(atr1,\"set\")\n\n\n\n counter_test=1\n for set, part in data_parts.items():\n for key, part_list in part.items():\n interme =part_list['color']\n red=interme[0]+interme[1]\n green=interme[2]+interme[3]\n blue=interme[4]+interme[5]\n\n red_de=str(int(red,16))\n green_de=str(int(green,16))\n blue_de=str(int(blue,16))\n if graph.nodeExists(part_list['id'])==0:\n tm2=graph.addNode(part_list['id'], part_list['part_name'],r=red_de, g=green_de, b = blue_de)\n tm2.addAttribute(atr1,\"part\")\n\n\n counter_test+=1\n graph.addEdge(\"_\"+str(counter_test), set, part_list['id'], part_list['quantity'])\n\n\n\n f=open('bricks_graph.gexf','wb')\n my_gexf.write(f)\n\n\n return my_gexf.graphs[0]", "def _initialise_data(self):\n data_graph = self._load_data()\n data_graph.calculate_max_heights()\n return data_graph", "def _initialise_data(self):\n data_graph = self._load_data()\n data_graph.calculate_max_heights()\n return data_graph", "def get_chart_data(self, chart_name):\n chart_data = {\n \"name\": chart_name,\n }\n\n data_method = getattr(self, \"get_data_{}\".format(chart_name))\n\n chart_data.update(data_method())\n\n return chart_data", "def convertGraphToData(\n graph: Graph\n):\n vertices= []\n edges = []\n\n index = 0\n node_to_index = {}\n for node, neighbors in graph.adjacency():\n vertices.append({\n 'name': node,\n 'group': 1\n })\n node_to_index[node] = index\n index += 1\n\n for node, neighbors in graph.adjacency():\n for neighbor, edge in neighbors.items():\n edges.append({\n 'source': node_to_index[node],\n 'target': node_to_index[neighbor],\n 'value': edge['distance']\n })\n return edges, vertices", "def graph(self):\n assert self._modeled, \"Need to do calc_covariance\"\n return self._graph", "def get_data(self):\r\n return self.kinds", "def get_plot_data():\n if not hasattr(g, 'plot_data'):\n g.plot_data = plot.bar_plot(get_data().data_frame)\n return g.plot_data", "def show_custom_graph(self):\n pass", "def graph_defined(self):\n return self.lliagraph", "def load_graph(self,dataset):\n dataset = cd.build_dataset_from_name(dataset)\n self.data = dataset[0]\n G = nx.Graph()\n G.add_edges_from(self.data.edge_index.t().tolist())\n return G", "def nodegraph(self):\n return self._nodegraph", "async def get_module_data(request):\n hw = hw_from_req(request)\n requested_serial = request.match_info['serial']\n res = None\n\n for module in hw.attached_modules:\n is_serial_match = module.device_info.get('serial') == requested_serial\n if is_serial_match and hasattr(module, 'live_data'):\n res = module.live_data\n\n if res:\n return web.json_response(res, status=200)\n else:\n return web.json_response({\"message\": \"Module not found\"}, status=404)", "def _get_data(self):\n raise NotImplementedError()" ]
[ "0.67377543", "0.6618313", "0.6539239", "0.6381631", "0.6221923", "0.60826457", "0.60516125", "0.6034938", "0.5980619", "0.5941827", "0.58881456", "0.5873024", "0.5869902", "0.5853487", "0.58521986", "0.58521986", "0.58454376", "0.5823218", "0.57971686", "0.57595277", "0.57451653", "0.5698686", "0.5690866", "0.5689084", "0.56688815", "0.5640798", "0.56269526", "0.56192374", "0.5608235", "0.55959773", "0.5594772", "0.559245", "0.55890435", "0.5587562", "0.5565289", "0.55440384", "0.5540612", "0.5538173", "0.55338264", "0.5527678", "0.5516753", "0.5513611", "0.54989165", "0.5498023", "0.54951626", "0.54929876", "0.548989", "0.5484388", "0.5459526", "0.5457929", "0.5452559", "0.54359484", "0.5434787", "0.54231834", "0.541911", "0.5414954", "0.5410773", "0.5406406", "0.53846896", "0.5371283", "0.53711414", "0.5360721", "0.5356707", "0.5354909", "0.5346472", "0.5333024", "0.53193855", "0.53193855", "0.5317451", "0.5310635", "0.53057015", "0.52992374", "0.5290022", "0.5282407", "0.5280135", "0.52767116", "0.52756053", "0.5275303", "0.52588487", "0.525566", "0.5251428", "0.5241818", "0.523861", "0.52330935", "0.52234906", "0.52223194", "0.52197915", "0.5218075", "0.5206903", "0.5206903", "0.52045256", "0.5201283", "0.5200615", "0.5199538", "0.51900417", "0.51867056", "0.518386", "0.51833785", "0.5179274", "0.5177683", "0.5168928" ]
0.0
-1
get the available services to be activated read the models dir to find the services installed to be added to the system by the administrator
def available_services(): all_datas = () data = () for class_path in settings.TH_SERVICES: class_name = class_path.rsplit('.', 1)[1] # 2nd array position contains the name of the service data = (class_name, class_name.rsplit('Service', 1)[1]) all_datas = (data,) + all_datas return all_datas
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getServices(self):\n pass", "def available_services(cls) -> List[str]:\n ret = []\n for (_, name, _) in pkgutil.iter_modules([str(SERVICES_PATH)]):\n ret.append(name)\n return ret", "def available_services(self) -> list[str]:\r\n return self.services", "def cmd_SERVICES(self):\r\n return self._ros.get_services()", "def get_services(self):\r\n return get_service_list()", "def get_services(self):\n\t\t#Entrega el dict sin miramientos\n\t\treturn self._services", "def get_all():\n if not SERVICE_DIR:\n raise CommandExecutionError(\"Could not find service directory.\")\n # - List all daemontools services in\n return sorted(os.listdir(SERVICE_DIR))", "def list_services(ctx):\n pass", "def selectable_services():\n\n db = current.db\n s3db = current.s3db\n\n stable = s3db.org_service\n query = (stable.deleted == False)\n rows = db(query).select(stable.id,\n stable.name,\n )\n services = {row.id: row.name for row in rows}\n return services", "def services(self):\r\n return services.Services(self)", "def getAllServices(self) -> List[ghidra.framework.plugintool.ServiceInterfaceImplementationPair]:\n ...", "def services(self):\n return self.agent.http.get(\n lambda x: json.loads(x.body), '/v1/agent/services')", "def get_services(self):\n services = []\n for f in dir(self):\n o = getattr(self, f)\n if callable(o) and hasattr(o, '_service_name'):\n services.append(getattr(o, '_service_name'))\n return services", "def pkg_services(klass, pkg):\n return [s for s in klass._pkg_services.get(pkg.name, [])\n if klass.is_service_installed(s)]", "def system_services(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SystemServiceArgs']]]]:\n return pulumi.get(self, \"system_services\")", "def all_services(self):\n services = oc.all_service_names()\n for s in services:\n print(s)\n print(\"#total\", len(services))", "def get_tools(self):\r\n\t\tlogger.debug(\"Getting the tools\")\r\n\t\t\r\n\t\treturn db.get_items('tools')", "def enabled_services(self):\n services = set()\n if self.ce_collector_required_rpms_installed and self.htcondor_gateway_enabled:\n services.add('condor-ce')\n return services", "def getDefaultServices():\n return Service.getDefaultServices()", "def get_services(self):\n\n return list(self.services.values())", "def get_current_services(self,sim):\n current_services = sim.get_alloc_entities()\n current_services = dict((k, v) for k, v in current_services.items() if len(v)>0)\n deployed_services = defaultdict(list)\n for k,v in current_services.items():\n for service_name in v:\n if not \"None\" in service_name: #[u'2#2_19']\n deployed_services[service_name[service_name.index(\"#\")+1:]].append(k)\n return deployed_services", "def get_services(self): \n if self._access_token is None:\n raise RequiresAccessTokenError()\n\n response = self.__make_oauth_request(ADD_URLS_FOR_SERVICES_URL, token=self._access_token, signed=True)\n return simplejson.loads(response.read()).keys()", "def add_services(self):\n # first get the names\n names = str(self.client.console_execute('services -c name {0}\\n'.format(self.ip))[b'data'])\n while not 'name' in names:\n sleep(10)\n names = self.client.console_read()\n names = names.split('\\n')\n for row in names:\n if self.ip in row:\n row = strip_whitespaces(row)\n self.services.append({'name': row.split(' ')[1]})\n\n # get the ports by service name\n ports = str(self.client.console_execute('services -c port {0}\\n'.format(self.ip))[b'data'])\n while not 'port' in ports:\n sleep(10)\n ports = self.client.console_read()\n ports = ports.split('\\n')\n for row in ports:\n for service in self.services:\n if service['name'] in row:\n row = strip_whitespaces(row)\n service['port'] = row.split(' ')[1]\n\n # get some information by service name (only useful if a report shall be generated)\n info = str(self.client.console_execute('services -c info {0}\\n'.format(self.ip))[b'data'])\n while not 'info' in info:\n sleep(10)\n info = self.client.console_read()\n info = info.split('\\n')\n for row in info:\n for service in self.services:\n if service['name'] in row:\n row = strip_whitespaces(row)\n service['info'] = row.split(' ')[1]", "def getServices(self):\n catalog = plone.api.portal.get_tool('portal_catalog')\n path = '{}/catalog'.format('/'.join(plone.api.portal.get().getPhysicalPath()))\n query = dict(portal_type='Service', sort_on='sortable_title', path=path)\n result = list()\n for brain in catalog(**query):\n result.append((brain.getId, brain.Title))\n return result", "def _load_services(self) -> None:\n # load default services\n self.service_errors = ServiceManager.load_locals()\n # load custom services\n service_paths = self.config.get(\"custom_services_dir\")\n logger.debug(\"custom service paths: %s\", service_paths)\n if service_paths is not None:\n for service_path in service_paths.split(\",\"):\n service_path = Path(service_path.strip())\n custom_service_errors = ServiceManager.add_services(service_path)\n self.service_errors.extend(custom_service_errors)\n # load default config services\n self.service_manager.load_locals()\n # load custom config services\n custom_dir = self.config.get(\"custom_config_services_dir\")\n if custom_dir is not None:\n custom_dir = Path(custom_dir)\n self.service_manager.load(custom_dir)", "def services(self) -> dict:\n return self.data[\"services\"]", "def load_services(service_store):\n service_store.register_service(GetDrugStoreService)\n service_store.register_service(FuelLevelService)\n service_store.register_service(SetFuelLevelService)\n service_store.register_service(GetRobotPosition)\n service_store.register_service(SetRobotPosition)", "def get_service(self):", "def addServices(self):\r\n self.addHendrix()\r\n\r\n if not self.options.get('global_cache') and not self.options.get('nocache'):\r\n self.addLocalCacheService()\r\n\r\n if self.is_secure:\r\n self.addSSLService()\r\n\r\n self.catalogServers(self.hendrix)", "def get(self):\n return VehicleServices.get_all()", "def services():\n return list(set(chain(*restart_map().values())))", "def setupSERVICES():\n services = Services()\n services.rest = setupREST()\n\n return services", "def _add_services(self):\n # Services and relations which are present merely to satisfy\n # required_interfaces and workload status are not inspected.\n # Fix me. Inspect those too.\n this_service = {'name': 'neutron-openvswitch'}\n other_services = [\n {'name': 'nova-compute'},\n {'name': 'nova-cloud-controller'},\n {'name': 'rabbitmq-server'},\n {'name': 'keystone'},\n {'name': 'glance'},\n {'name': 'neutron-api'},\n self.get_percona_service_entry(),\n ]\n if self._get_openstack_release() >= self.bionic_train:\n other_services.append({'name': 'placement'})\n super(NeutronOVSBasicDeployment, self)._add_services(this_service,\n other_services)", "def get_services():\n services = []\n bus = pydbus.SessionBus()\n for s in bus.get('.DBus').ListNames():\n if s.startswith(MprisService.mpris_base):\n services.append(s)\n return services", "def get_windows_services(self):\n\n # Query for all services in all states\n typeFilter = win32service.SERVICE_WIN32\n stateFilter = win32service.SERVICE_STATE_ALL\n return win32service.EnumServicesStatus(self.scmanager, typeFilter, stateFilter)", "def get_additional_services(settings_module):\r\n\r\n additional_services = []\r\n\r\n if hasattr(settings_module, 'HENDRIX_SERVICES'):\r\n for name, module_path in settings_module.HENDRIX_SERVICES:\r\n path_to_module, service_name = module_path.rsplit('.', 1)\r\n resource_module = importlib.import_module(path_to_module)\r\n additional_services.append(\r\n (name, getattr(resource_module, service_name))\r\n )\r\n return additional_services", "def get_effective_services(self):\n if self['host_name'] == None:\n return []\n result = []\n myname = self['host_name']\n # Find all services that define us via service.host_name directive\n for service in Service.objects.all:\n service_hostname = service['host_name'] or \"\"\n if myname in service_hostname.split(\",\"):\n result.append( service )\n # Find all services that define us via our hostgroup\n for hostgroup in self.get_effective_hostgroups():\n for service in hostgroup.get_effective_services():\n if service not in result:\n result.append(service)\n return result", "def get_all_servicech(self, conf):\n\t\tpass", "def get_services(**options):\r\n return {}", "def extension_services(self) -> Sequence['outputs.GetComputeMachineServiceStatusExtensionServiceResult']:\n return pulumi.get(self, \"extension_services\")", "def services_needed(self, source: str) -> List[str]:\n\n\t\tservices = self.haproxy.services_needed\n\n\t\treturn services.get(source, [])", "def list_services(self):\n response = self._get()\n\n services = []\n for s in response[\"services\"]:\n services.append(_create_service_from_json(s, self._session, self._url_base, s[\"folderName\"]))\n\n return services", "def get_services(self):\n\n services = []\n\n for p in self.config['auth_profiles']:\n services.append(self.get_service(p))\n return services", "def init_services(self):\n service_prefix = rospy.get_name() + \"/\"\n\n self._request_components_serv = rospy.Service(service_prefix +\n 'list_components',\n ListComponents,\n self.get_components)\n self._request_fields_serv = rospy.Service(service_prefix +\n 'list_fields',\n ListFields,\n self.get_fields)\n self._request_values_serv = rospy.Service(service_prefix +\n 'request_values',\n RequestValues,\n self.get_values)\n self._unsubscribe_values_serv = rospy.Service(service_prefix +\n 'unsubscribe_values',\n UnsubscribeValues,\n self.unsubscribe_values)", "def configured_service(hass):\n return set(\n \"ais_wifi_service\" for entry in hass.config_entries.async_entries(DOMAIN)\n )", "def external_controller_services(self):\n return self._external_controller_services", "def get_running_services(vm_address):\n services = set([\n ' '.join(line) for line in\n run_via_exec_daemon(['net', 'start'], split=True,\n host=vm_address)[2:-4]])\n print 'INSTALL_TOOLS: services running', sorted(services)\n return services", "def list_magnum_services(self):\n return list(self.container_infrastructure_management.services())", "def list_services(self, **params):\n url = 'os-services'\n if params:\n url += '?%s' % urllib.urlencode(params)\n\n resp, body = self.get(url)\n body = json.loads(body)\n schema = self.get_schema(self.schema_versions_info)\n self.validate_response(schema.list_services, resp, body)\n return rest_client.ResponseBody(resp, body)", "def get_services(**options):\n\n return {}", "def service_list():\n data = list_services()\n table = present(lambda: data,\n renderer='table',\n headers=['Service Name', 'URLS', 'Service Type', \"Memory Usages\", 'Replicas', 'Started at',\n 'Updated at',\n 'State', 'Restarts'],\n columns=['name', 'urls', 'service_type', 'memory', 'replicas', 'start_date', 'last_update',\n 'state',\n 'service_restarts'])\n if table:\n click.echo(table)\n else:\n click.echo('\\nYou have no running services right now, why don\\'t you try deploying one? \\n'\n 'have fun and follow the link below:\\n')\n click.echo('https://docs.fandogh.cloud/docs/services.html\\n')", "def getEnablemanagementService(self):\n\t\tquery = ''\n\t\tconn = self.get_connection()\n\t\theaders = { 'Content-type' : 'application/json', 'Authorization' : 'A10 %s' %self.sessionid}\n\t\tconn.request('GET', self.get_path() + '/' + query, headers=headers)\n\t\tresponse = conn.getresponse()\n\t\texpected_status = 200\n\t\terrors = {500: 'An unexpected runtime exception', 404: 'Specified service does not exist'}\n\t\tpayload = self.get_output(response, expected_status, errors)\n\t\tconn.close()\n\t\tif self.debug:\n\t\t\tprint 'payload:', payload\n\t\tif payload == '':\n\t\t\tpayload = None\n\t\tif payload is not None:\n\t\t\tdata = json.loads(payload)\n\t\t\tpayload= data.get('service')\n\t\treturn deserialize_Service_json(payload)", "def _get_services(self):\n from googleapiclient.discovery import build as discovery_build\n from oauth2client.client import (\n GoogleCredentials,\n ApplicationDefaultCredentialsError,\n )\n from google.cloud import storage\n\n # Credentials must be exported to environment\n try:\n creds = GoogleCredentials.get_application_default()\n except ApplicationDefaultCredentialsError as ex:\n log_verbose_traceback(ex)\n raise ex\n\n # Discovery clients for Google Cloud Storage and Life Sciences API\n self._storage_cli = discovery_build(\"storage\", \"v1\", credentials=creds)\n self._compute_cli = discovery_build(\"compute\", \"v1\", credentials=creds)\n self._api = discovery_build(\"lifesciences\", \"v2beta\", credentials=creds)\n self._bucket_service = storage.Client()", "def service_info(service=None):\n if service:\n res = OrderedDict()\n for s in service:\n cmd = 'systemctl list-units %s' % s\n p = os.popen(cmd)\n lines = p.readlines()\n if len(lines) > 2 and lines[0][0:4] == 'UNIT':\n l = lines[1].strip().split()\n res[s] = {'load': l[1], 'active': l[2], 'sub': l[3], 'description': ' '.join(l[4:])}\n else:\n res[s] = {'load': 'not-found', 'active': 'inactive', 'sub': 'dead', 'description': ''}\n else:\n res = OrderedDict()\n cmd = 'systemctl list-units'\n p = os.popen(cmd)\n lines = p.readlines()\n if len(lines) > 2 and lines[0].strip()[0:4] == 'UNIT':\n for l in lines:\n l = l.strip()\n if not l:\n break\n ls = l.split()\n res[ls[0]] = {'load': ls[1], 'active': ls[2], 'sub': ls[3], 'description': ' '.join(ls[4:])}\n return res", "def exposed_services(self):\n return self._exposed_services", "def get_exported_services(self):\n with self.__export_lock:\n return [reg.get_export_reference() for reg in self.__exported_regs]", "async def init_services(self) -> List[Service]:\n services = []\n schemas = defaultdict(dict)\n svc_classes = Service.get_plugins()\n\n schemas = Schema(self.schema_dir)\n if schemas:\n poller_schema = schemas.get_arrow_schema('sqPoller')\n poller_schema_version = SchemaForTable('sqPoller', schemas).version\n\n db_access = self._get_db_access(self.cfg)\n\n # Read the available services and iterate over them, discarding\n # the ones we do not need to instantiate\n svc_desc_files = Path(self.service_directory).glob('*.yml')\n\n for filename in svc_desc_files:\n with open(filename, 'r') as f:\n svc_def = yaml.safe_load(f.read())\n\n if not svc_def:\n logger.warning(f'Skip empty service file: {filename}')\n continue\n\n service = svc_def.get('service')\n if service in BLACKLIST_SERVICES:\n continue\n\n if all(service not in x for x in [self.svcs_list]):\n logger.warning(\n f\"Ignoring unspecified service {svc_def.get('service')}\"\n )\n continue\n\n if 'service' not in svc_def or 'apply' not in svc_def:\n logger.error(\n 'Ignoring invalid service file definition.'\n f\"'service' and 'apply' keywords: {filename}\"\n )\n continue\n\n period = svc_def.get('period', self.default_interval)\n for nos, cmds_desc in svc_def['apply'].items():\n\n # Check if the the current nos copies from another\n if isinstance(cmds_desc, dict) and 'copy' in cmds_desc:\n newval = svc_def['apply'].get(cmds_desc['copy'], None)\n if not newval:\n logger.error(\n f\"No device type {cmds_desc['copy']} to copy from,\"\n f\"for {nos} for service {svc_def['service']}\"\n )\n return\n cmds_desc = newval\n\n # Update the command description adding the\n # specification for the output parsing\n if isinstance(cmds_desc, list):\n for subele in cmds_desc:\n self._parse_nos_version(filename, svc_def, nos, subele)\n else:\n self._parse_nos_version(filename, svc_def, nos, cmds_desc)\n\n try:\n schema = SchemaForTable(svc_def['service'], schema=schemas)\n except Exception: # pylint: disable=broad-except\n logger.error(f\"No matching schema for {svc_def['service']}\")\n continue\n\n if schema.type == 'derivedRecord':\n # These are not real services and so ignore them\n continue\n\n # Valid service definition, add it to list\n # if the service has not a dedicated class, we will use the\n # default implementation\n class_to_use = svc_classes.get(svc_def['service'], Service)\n service = class_to_use(\n svc_def['service'],\n svc_def['apply'],\n period,\n svc_def.get('type', 'state'),\n svc_def.get('keys', []),\n svc_def.get('ignore-fields', []),\n schema,\n self.output_queue,\n db_access,\n self.run_mode\n )\n service.poller_schema = poller_schema\n service.poller_schema_version = poller_schema_version\n logger.info(f'Service {service.name} added')\n services.append(service)\n\n # Once done set the service list and return its content\n self._services = services\n return self._services", "def test_ipam_services_list(self):\n pass", "def _get_services(self, services):\n\n services_info = []\n\n for service in services[1]:\n services_info.append(self._make_dict(service))\n \n return services_info", "def services(self) -> List[Service]:\n return self._services", "def get_services(self):\n ret = self.v1_service_list.get()\n services = {each.metadata.namespace: each.metadata.name for each in ret.items}\n\n return services", "def list(self):\n firewalls = self.driver.ex_list_firewalls()\n\n tag_to_service = {}\n for service in self.service.list():\n service_tag = \"%s-%s\" % (service.network.name, service.name)\n if service_tag in tag_to_service:\n raise BadEnvironmentStateException(\n \"Service %s and %s have same service tag: %s\" %\n (tag_to_service[service_tag], service, service_tag))\n tag_to_service[service_tag] = service\n\n def make_paths(destination, source, firewall):\n paths = []\n for rule in firewall.allowed:\n for port in rule[\"ports\"]:\n paths.append(Path(destination.network, source, destination, \"tcp\", port))\n return paths\n\n def handle_sources(tag_to_service, destination, firewall):\n paths = []\n if hasattr(firewall, \"source_tags\") and firewall.source_tags:\n for source_tag in firewall.source_tags:\n if source_tag in tag_to_service:\n paths.extend(make_paths(destination, tag_to_service[source_tag], firewall))\n if hasattr(firewall, \"source_ranges\") and firewall.source_ranges:\n subnets = []\n for source_range in firewall.source_ranges:\n subnets.append(Subnetwork(subnetwork_id=None, name=None,\n cidr_block=source_range, region=None,\n availability_zone=None, instances=[]))\n # We treat an explicit CIDR block as a special case of a service with no name.\n if subnets:\n source = Service(network=None, name=None, subnetworks=subnets)\n paths.extend(make_paths(destination, source, firewall))\n return paths\n\n def handle_targets(tag_to_service, firewall):\n paths = []\n if hasattr(firewall, \"target_tags\") and firewall.target_tags:\n for target_tag in firewall.target_tags:\n if target_tag in tag_to_service:\n paths.extend(handle_sources(tag_to_service, tag_to_service[target_tag],\n firewall))\n if hasattr(firewall, \"target_ranges\") and firewall.target_ranges:\n raise BadEnvironmentStateException(\n \"Found target ranges %s in firewall %s but they are not supported\" %\n (firewall.target_ranges, firewall))\n return paths\n\n paths = []\n for firewall in firewalls:\n paths.extend(handle_targets(tag_to_service, firewall))\n return paths", "async def api_get_services(g: WalletTypeInfo = Depends(get_key_type)):\n user = await get_user(g.wallet.user)\n wallet_ids = user.wallet_ids if user else []\n services = []\n for wallet_id in wallet_ids:\n new_services = await get_services(wallet_id)\n services += new_services if new_services else []\n return [service.dict() for service in services] if services else []", "def get(self):\n return UserServices.get_all()", "def get_available_models():\n modelpath = os.path.join(os.path.dirname(__file__), \"train\", \"model\")\n models = sorted(item.name.replace(\".py\", \"\").replace(\"_\", \"-\")\n for item in os.scandir(modelpath)\n if not item.name.startswith(\"_\")\n and item.name.endswith(\".py\"))\n return models", "def directory_services(self) -> 'outputs.DirectoryServicesConfigResponse':\n return pulumi.get(self, \"directory_services\")", "def find_services(self) -> List[str]:\n results = self.collection.distinct(\"process.serviceName\")\n return [result for result in results]", "def get_all(self, uuid):\n\n # TODO: pine client for python\n device = self.device.get(uuid)\n\n query = '$expand=service_install($select=id&$expand=service($select=service_name))&$filter=service_install/any(d:d/device%20eq%20{device_id})'.format(device_id=device['id'])\n\n return self.base_request.request(\n 'device_service_environment_variable', 'GET', raw_query=query,\n endpoint=self.settings.get('pine_endpoint')\n )['d']", "def running_services(self) -> List[Callable]:\n return self._running_svcs", "def definition_of_services(self):\r\n return True", "def get_service_list():\n service_dict = requests.get('http://consul:8500/v1/catalog/services').json()\n service_list = []\n for s in service_dict:\n service_list.append(s)\n return service_list", "async def get_services(self, **kwargs) -> BleakGATTServiceCollection:\n warn(\n \"This method will be removed future version, use the services property instead.\",\n FutureWarning,\n stacklevel=2,\n )\n return await self._backend.get_services(**kwargs)", "def run_services():\n for service in (\"minvd\", \"httpd\", \"ntpd\"):\n sudo(\"service %s start\" % service)\n sudo(\"chkconfig %s on\" % service)", "def perform_setup(self, services):\n pass", "def get_accessibility_services(self):\n result = []\n services = self.find_tags(\"service\")\n for s in services:\n for action in s.findall(\"./intent-filter/action\"):\n if \"android.accessibilityservice.AccessibilityService\" in action.attrib.values():\n result.append(s.attrib['{http://schemas.android.com/apk/res/android}name'])\n # print(result)\n return result", "def services(self):\n return self", "def get_effective_services(self):\n myname = self['hostgroup_name']\n if not myname: return []\n \n result = []\n for service in Service.objects.all:\n hostgroup_name = service['hostgroup_name'] or \"\"\n hostgroups = service['hostgroups'] or \"\"\n if myname in hostgroups.split(','):\n result.append( service )\n elif myname in hostgroup_name.split(\",\"):\n result.append( service )\n return result", "def availablemodels(self):\n return self.__models.keys()", "def test_get_all_virtualservices(self,setup_suite):\n _, resp = get('virtualservice')\n vs_obj_list = resp['results']\n for vs_obj in vs_obj_list:\n logger.info(\" >>> VS Name: %s <<<\" % vs_obj['name'])", "def services(status):\n\n run(\"sudo systemctl %s xprof.service\" % status)", "def get_services_dir():\n return bytestostr(libruss.russ_get_services_dir())", "def _get_service_list(self, service_name):\n service_list = self.service_dict[service_name]\n\n return service_list", "def get_services(self):\n try:\n response = requests.get(\n Untiny.SERVICES_URL,\n params=dict(format=\"text\")\n )\n except requests.RequestException:\n return set()\n\n return set([s.strip() for s in response.text.split(',')])", "def get_installed_models():\n global _installed_models_cache\n if _installed_models_cache is not None:\n return _installed_models_cache\n _installed_models_cache = []\n for a in settings.INSTALLED_APPS:\n try:\n _installed_models_cache.append(__import__(a + '.models', '', '', ['']))\n except ImportError:\n pass\n return _installed_models_cache", "def getNodeServiceList(self,node):\n data = self.connect('get','nodes/%s/services' % (node),None)\n return data", "def get_commands():\n\n commands = {}\n\n if not settings.configured:\n return commands\n\n for app_config in reversed(list(apps.get_app_configs())):\n if app_config.label.startswith(lib_name):\n path = os.path.join(app_config.path, 'management')\n commands.update({name: app_config.name for name in find_commands(path)})\n\n return commands", "def startRhevmDbRelatedServices():\n (output, rc) = etlService.conditionalStart()\n if rc != 0:\n logging.warn(\"Failed to start rhevm-etl\")\n controller.MESSAGES.append(output_messages.ERR_FAILED_START_SERVICE % \"rhevm-etl\")\n\n (output, rc) = notificationService.conditionalStart()\n if rc != 0:\n logging.warn(\"Failed to start rhevm-notifierd\")\n controller.MESSAGES.append(output_messages.ERR_FAILED_START_SERVICE % \"rhevm-notifierd\")", "def services(self):\n return ServicesTable(self.rpc, self.name)", "def services(**kwargs):\n pass", "def get_list_of_services(services):\n if not services: # If no services passed, get all folders with a docker-compose file\n services = find_docker_compose_services()\n\n # See if service passed in is a constellation, if so expand\n if len(services) == 1 and services[0] in SERVICE_CONSTELLATIONS:\n services = SERVICE_CONSTELLATIONS[services[0]]\n\n # Check that each service is in the directory and has a docker-compose file\n check_services(services)\n\n return services", "def selectable_services_modes():\n\n db = current.db\n s3db = current.s3db\n\n mtable = s3db.org_service_mode\n query = (mtable.deleted == False)\n rows = db(query).select(mtable.id,\n mtable.name,\n )\n modes = {row.id: row.name for row in rows}\n return modes", "def _add_services(self):\n this_service = {'name': '{{ metadata.package }}'}\n other_services = [\n {'name': 'mysql',\n 'location': 'cs:percona-cluster',\n 'constraints': {'mem': '3072M'}},\n {'name': 'rabbitmq-server'},\n {'name': 'keystone'},\n {'name': 'manila'}\n ]\n super(ManilaPluginCharmDeployment, self)._add_services(\n this_service, other_services)", "def services(self) -> BleakGATTServiceCollection:\n if not self._backend.services:\n raise BleakError(\"Service Discovery has not been performed yet\")\n\n return self._backend.services", "def featured_services(self):\n return self.split(self.settings.featured_services)", "def list_services(service='http://arcgis.inei.gob.pe:6080/arcgis/rest/services'):\n all_services = []\n r = _post(service)\n for s in r['services']:\n all_services.append('/'.join([service, s['name'], s['type']]))\n for s in r['folders']:\n new = '/'.join([service, s])\n endpt = _post(new)\n for serv in endpt['services']:\n all_services.append('/'.join([service, serv['name'], serv['type']]))\n return all_services", "def _get_components_list():\n # Order the services to install by service installation order\n ordered_services = sorted(\n config[SERVICES_TO_INSTALL],\n key=SERVICE_INSTALLATION_ORDER.index\n )\n # Can't easily use list comprehension here because this is a list of lists\n ordered_components = []\n for service in ordered_services:\n ordered_components.extend(SERVICE_COMPONENTS[service])\n return ordered_components", "def _add_services(self):\n this_service = {'name': 'keystone'}\n other_services = [\n {'name': 'percona-cluster', 'constraints': {'mem': '3072M'}},\n {'name': 'rabbitmq-server'}, # satisfy wrkload stat\n {'name': 'cinder'},\n ]\n super(KeystoneBasicDeployment, self)._add_services(this_service,\n other_services)", "def _GetActiveServiceNames(config):\n for name, service_config in config.services.items():\n if getattr(service_config, 'active', True):\n yield name", "def hostsplit_service(self):\n self.which_owner()\n self.which_security()\n\n for service, value in self.service_discovery.items():\n self.details[\"services\"][service] = self.which_service(service, **value)", "async def _start_nested_services(self):\n loaded = set()\n members = inspect.getmembers(self, predicate=inspect.ismethod)\n ordering_required = [name for name, method in members\n if hasattr(method, \"requirements_definition\")]\n self.log.debug(\"Requirements will be gathered from %s\",\n ', '.join(ordering_required))\n while ordering_required:\n ordered_count = 0\n for name in ordering_required[:]:\n self.log.debug(\"Check %s\", name)\n method = getattr(self, name)\n requirements = getattr(method, \"service_requirements\")\n if len(requirements) > 0 and not loaded.issuperset(requirements):\n self.log.debug(\"Not enought requirements. Loaded: %s, Required: %s\",\n loaded, requirements)\n continue\n self.log.debug(\"Getting requirements from %s\", name)\n try:\n services = await method()\n except Exception:\n self.log.exception(\"Exception while receiving %s requirements\", name)\n raise\n self.log.debug(\"Requirements from %s: %s\", method, services)\n if not (services is None or isinstance(services, list)):\n raise TypeError(\"Requirements method must return list or None. \"\n \"It returns %s (%s type) instead.\",\n services, type(services))\n if services:\n for service in services:\n self.nested_service_pre_start(service)\n self._services.add(service)\n ordering_required.remove(name)\n ordered_count += 1\n loaded.add(name)\n self.log.debug(\"Nested service %s was loaded\", name)\n if ordered_count == 0:\n raise RuntimeError(\n \"Can't resolve services dependencies \"\n \"from %s\" % ', '.join(ordering_required)\n )\n\n await self._services.start_all()" ]
[ "0.73665935", "0.72499305", "0.7065005", "0.69415635", "0.691009", "0.68078464", "0.67918813", "0.6701775", "0.6570361", "0.6570103", "0.6446339", "0.64197904", "0.6369506", "0.6263007", "0.6227629", "0.6224836", "0.6223671", "0.62065744", "0.61650133", "0.61564034", "0.6147583", "0.61332023", "0.6098467", "0.6088657", "0.60885006", "0.60679585", "0.6063605", "0.6038896", "0.603272", "0.6024349", "0.6020919", "0.60109556", "0.6007506", "0.60057616", "0.59996927", "0.59603983", "0.5952576", "0.593765", "0.59346557", "0.5914808", "0.59059495", "0.58635634", "0.58608353", "0.5840679", "0.5823769", "0.58222127", "0.5816526", "0.5805298", "0.57989687", "0.57876146", "0.57859755", "0.5771978", "0.5741022", "0.5722573", "0.57196057", "0.5714733", "0.5687647", "0.5684219", "0.56800747", "0.5669767", "0.5665041", "0.5640559", "0.56394273", "0.56334037", "0.56323457", "0.56270325", "0.56221884", "0.5618208", "0.56129813", "0.5608026", "0.55951595", "0.5589168", "0.5575999", "0.5573408", "0.5567823", "0.5564658", "0.55569094", "0.5547591", "0.5544931", "0.55441755", "0.5528139", "0.55202025", "0.551924", "0.5516394", "0.5510147", "0.5497736", "0.5493977", "0.5493577", "0.54920244", "0.54833823", "0.5482509", "0.54688996", "0.5455116", "0.5451942", "0.54432327", "0.5441484", "0.54346675", "0.5431938", "0.54266405", "0.54205865" ]
0.7025927
3
Initialize the parameters of the logistic regression
def __init__(self, input, n_in, n_out,binary=True,stochastic=True): # initialize with 0 the weights W as a matrix of shape (n_in, n_out) self.W = theano.shared( value=numpy.zeros( (n_in, n_out), dtype=theano.config.floatX ), name='W', borrow=True ) # initialize the biases b as a vector of n_out 0s self.b = theano.shared( value=numpy.zeros( (n_out,), dtype=theano.config.floatX ), name='b', borrow=True ) self.Wb = theano.shared( value=numpy.zeros( (n_in, n_out), dtype=theano.config.floatX ), name='Wb', borrow=True ) if (binary): self.wrt = [self.Wb, self.b] self.p_y_given_x = T.nnet.softmax(T.dot(input, self.Wb) + self.b) self.output=T.dot(input, self.Wb) + self.b else: self.wrt = [self.W, self.b] self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b) self.output=self.p_y_given_x # parameters of the model # symbolic expression for computing the matrix of class-membership # probabilities # Where: # W is a matrix where column-k represent the separation hyperplane for # class-k # x is a matrix where row-j represents input training sample-j # b is a vector where element-k represent the free parameter of # hyperplane-k # symbolic description of how to compute prediction as class whose # probability is maximal self.y_pred = T.argmax(self.p_y_given_x, axis=1) # keep track of model input self.input = input # parameters of the model self.params = [self.W,self.b] self.Ws=[self.W,self.Wb]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_initial_params(model: LogisticRegression):\n n_classes = 15 # threat types\n n_features = 33 # Number of features in dataset\n model.classes_ = np.array([i for i in range(15)])\n\n model.coef_ = np.zeros((n_classes, n_features))\n if model.fit_intercept:\n model.intercept_ = np.zeros((n_classes,))", "def __init__(self, reg_penalty='l2', reg_inv=1.0, k_fold=5, random_state=0):\n print(\"Initialize model Logistic Regression\")\n self.reg_penalty = reg_penalty\n self.reg_inv = reg_inv\n self.k_fold = k_fold\n self.random_state = random_state\n self.model = sklearn.linear_model.LogisticRegression(penalty=self.reg_penalty,\n C=self.reg_inv,\n max_iter=1000, \n random_state=self.random_state)", "def __init__(self, **kwargs):\n super(LogisticRegression, self).__init__()\n self.C = kwargs.pop(\"C\", 100)\n self.clf = _LogisticRegression(C=self.C, **kwargs)", "def __init__(self, log=True, normalize=False):\r\n self.model = LinearRegression(normalize=normalize)\r\n self.log = log", "def test_logistic_regression_c_parameter(params, X_train, X_test, y_train, y_test):", "def __init__(self, train, validation=None, initial_weight=None,\n loss_function_name='logistic',\n calculate_weight='gradient',\n regularizer=None, regularizer_p=None):\n # Initialize the super class with given data.\n # Transform the y into {0,1}\n y, tx = train\n y[np.where(y < 0)] = 0\n train = (y, tx)\n if validation:\n val_y, val_tx = validation\n val_y[np.where(val_y < 0)] = 0\n validation = (val_y, val_tx)\n super(LogisticRegression, self).__init__(train, validation,\n initial_weight=initial_weight,\n loss_function_name=loss_function_name,\n cal_weight=calculate_weight,\n regularizer=regularizer,\n regularizer_p=regularizer_p)\n # Set predicted label\n self.pred_label = [-1, 1]", "def __init__(self, estimator, **kwargs):\n super(LogisticRegression, self).__init__(\n estimator, **kwargs)\n\n self.estimator = estimator", "def train_model(self):\r\n alpha, accuracy_rate = self.select_model()\r\n # Initialize logistic regression with alpha(learning rate)\r\n lg = logisticregression(C=alpha)\r\n # Train the model.\r\n lg.fit(self.training_data, self.target_data)\r\n # Save the trained model as .pkl file.\r\n joblib.dump(value=lg, filename=self.intention_id+'.pkl', compress=1)\r\n print \"Estimated Parameters of Logistic Regression\"\r\n # Estimated parameters of logistic regression.\r\n print lg.get_params()", "def __init__(self):\n self.label = \"Logistic regression\"\n self.description = \"This tool is a useful complement to Weights-of-Evidence Calculate Response tool as Logistic Regression does not make the assumption of conditional independence of the evidence with regards to the training sites. Using the evidence and assocaited weights tables, this tool creates the outputs the response and standard deviation rasters. The calculations are based on the Gen_Class attribute in the weights table and the type of evidence. Please note that the Logistic Regression tool accepts a maximum of 6,000 unique conditions or it fails. Also note that there is an upper limit of 100,000 unit cells per class in each evidence raster layer. If a class in an evidence raster goes above this, the script contains a function to increase the unit cell size to ensure an upper limit of 100,000. These issues are unable to be fixed due to a hard coded limitation in the Logistic Regression executable sdmlr.exe.\"\n self.canRunInBackground = False\n self.category = \"Weights of Evidence\"", "def on_train_begin(self, logs={}):\n self._beta = []", "def _fit(self, _X, _y):\n\n self.model = linear_model.LogisticRegression(penalty=self.penalty, random_state=self.seed,\n solver='saga', n_jobs=self.n_jobs)\n self.model.fit(_X, _y)", "def __init__(self):\n logger.debug('Initializing %s model.' % self.__class__.__name__)\n self.dependent_attributes = ['_alpha',\n '_log_like',\n '_gradient','_K',\n '_log_det']\n self._previous_parameters = None # previous parameters from last call\n self.grad_method = None # could be {'finite_difference','adjoint'}\n self.noise_var_constraint = '+ve' # Gaussian noise variance constraint\n return", "def __init__(self):\n super().__init__()\n import sklearn\n import sklearn.linear_model\n self.model = sklearn.linear_model.LogisticRegression", "def log_reg(x_train, y_train):\n\n log_reg_classifier = LogisticRegression(max_iter=1000, solver='lbfgs')\n log_reg_classifier.fit(x_train, y_train)\n return log_reg_classifier\n\n # log_reg_classifier.fit(x_train, y_train)", "def logistic(weights, data, targets, hyperparameters):\n\n # TODO: Finish this function\n\n return f, df, y", "def __init__(self, seed = None):\n self.data_dir = pkg_resources.resource_filename('logistic_control_variate', 'data/')\n self.generate_data(seed)\n # Holds logistic regression object for this example\n self.lr = None", "def __init__(self, estimator = LogisticRegression()): \n\t self.estimator = estimator", "def __init__(self,name,exp_base, random_seed=None,version=None):\n self.exp_base = exp_base\n self.log_fun = lambda x: np.log(x) / np.log(self.exp_base)\n self.exp_fun = lambda x: np.power(self.exp_base,x)\n\n super(LogNormalBehaviorModel, self).__init__(name, random_seed, version)", "def set_model_params(\n model: LogisticRegression, params: LogRegParams\n) -> LogisticRegression:\n model.coef_ = params[0]\n if model.fit_intercept:\n model.intercept_ = params[1]\n return model", "def train_logistic_regression(train_x, train_y):\n\n logistic_regression_model = LogisticRegression(penalty='l2', C=1.0)\n logistic_regression_model.fit(train_x, train_y)\n return logistic_regression_model", "def main():\r\n x = [\r\n [ 1,1 ], [ 0,0 ], [ 1,0 ], [ 0,0 ], [ 0,0 ], [ 0,0 ],\r\n [ 0,0 ], [ 0,0 ], [ 1,1 ], [ 0,0 ], [ 0,0 ], [ 1,0 ],\r\n [ 1,0 ], [ 0,0 ], [ 1,1 ], [ 0,0 ], [ 1,0 ], [ 0,0 ]\r\n ]\r\n\r\n # Encoding of the correct classes for the training material\r\n y = [1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0]\r\n b = BinaryLogisticRegression(x, y)\r\n b.fit()\r\n b.print_result()", "def on_train_begin(self, logs={}):\n self.losses = []\n self.accuracies = []", "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n return least_squares_SGD(y, tx, initial_w, max_iters, gamma, loss_function=logistic_loss, gradient=logistic_grad)", "def __init__(self):\n super().__init__(derivatives=BCELossWithLogitsDerivatives())", "def LogisticRegression_sklearn(X_train, X_test, y_train, y_test):\n\n\tlog_reg = LogisticRegression()\n\tlog_reg.fit(X_train, y_train.ravel())\n\tyPred =log_reg.predict(X_test)\n\n\t#Printing metrics of the logistic regression model\n\tprint('Accuracy:', metrics.accuracy_score(y_test, yPred))\n\tprint('Precision:', metrics.precision_score(y_test, yPred))\n\tprint('Recall', metrics.recall_score(y_test, yPred))\n\n\t#confusion matrix\n\n\tconfusionMatrix = matrix.confusion_matrix(y_test, yPred)\n\tsb.heatmap(pd.DataFrame(confusionMatrix), annot= True, fmt='g')\n\tplt.title('Confustion matrix with default value 1')\n\tplt.ylabel('True values')\n\tplt.xlabel('Predicted values')\n\tplt.show()", "def initialise_parameters(self):\n # Weights\n init = select_w_init(self.w_init)\n if self.w_gain:\n gain = nn.init.calculate_gain('relu')\n init(self.relations, gain=gain)\n else:\n init(self.relations)\n\n # Biases\n if self.b_init:\n init = select_b_init(self.b_init)\n init(self.sbias)\n init(self.pbias)\n init(self.obias)", "def fit_logistic_regression():\n\n logger.debug(\"Running the fit_logistic_regression function now\")\n\n #Loading the configuration\n with open(os.path.join(\"config\",\"config.yml\"), \"r\") as f:\n config = yaml.safe_load(f)\n\n #Loading and pre processing the data\n logger.debug(\"Loading and pre processing the data\")\n train_df = load_data(config[\"load_data\"][\"train_file\"])\n train_df = pre_process_data(train_df, resample = True, resample_count = 500000)\n\n #Defining Pipeline\n pipeline = Pipeline([\n ('tfidf', TfidfVectorizer(analyzer='word', token_pattern=r'[A-Za-z0-9@-]+')),\n ('model', LogisticRegression(random_state=12345, verbose = 1, solver = 'saga')),\n ])\n\n #Defining parameters to vary\n parameters = {\n 'tfidf__max_df': (0.25, 0.5, 0.75),\n 'tfidf__max_features': (None, 5000, 10000, 50000),\n 'tfidf__ngram_range': ((1, 1), (1, 2)),\n 'model__C': (0.01, 1, 100)\n }\n\n scoring_list = [\"accuracy\", \"f1\", \"precision\", \"recall\", \"roc_auc\"]\n \n #Performing 5fold CV to determine best hyperparameters\n model = GridSearchCV(pipeline, parameters, cv=5,\n n_jobs=-1, verbose=1, scoring=scoring_list, refit='f1',)\n\n t0 = datetime.datetime.now()\n\n model.fit(train_df[\"Review\"].tolist(), train_df[\"Ratings\"].to_numpy())\n \n logger.info(\"Grid Search performed in {}\".format(str(datetime.datetime.now()-t0)))\n\n #Saving results\n res_df = pd.DataFrame(model.cv_results_)\n res_df.to_csv(os.path.join(config[\"summary_stats\"][\"save_location\"], \"LogisticRegressionResults.csv\"))\n \n #Saving the model\n pickle.dump(model, open(os.path.join(config[\"models\"][\"save_location\"], \"LogisticRegression.pkl\"),'wb'))\n\n return", "def __init__(self, in_features, out_features):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n\n self.params = {'weight': 0.0001 * np.random.randn(out_features, in_features), 'bias': np.zeros((out_features, 1))}\n self.grads = {'weight': np.zeros((out_features, in_features)), 'bias': np.zeros((out_features, 1))}\n\n\n\n ########################\n # END OF YOUR CODE #\n #######################", "def initialize_model_params():\n beta_0 = np.array([0., 0.])\n mu_0 = 0.\n return beta_0, mu_0", "def __init__(self, loglike, data, x, sigma):\n\n # add inputs as class attributes\n self.likelihood = loglike\n self.data = data\n self.x = x\n self.sigma = sigma", "def _initialize(self, X, resp, *arg, **kwarg):\n n_samples, _ = X.shape\n\n if self.mv_stat:\n weights, params = _estimate_mv_stat_parameters(\n self.stat, X, resp) # self.reg_covar\n else:\n weights, params = _estimate_1d_stat_parameters(\n self.stat, X, resp) # self.reg_covar\n weights /= n_samples\n\n self.weights_ = (weights if self.weights_init is None\n else self.weights_init)\n self.params_ = params if self.params_init is None else self.params_init", "def train_logistic_regression(X_train_input, y_train_input, C=1):\r\n from sklearn.linear_model import LogisticRegression\r\n logr_clf = LogisticRegression(C=C)\r\n logr_clf.fit(X_train_input, y_train_input)\r\n return logr_clf", "def __init__(self, loglike, data, x, sigma):\n\n # add inputs as class attributes\n self.likelihood = loglike\n self.data = data\n self.x = x\n self.sigma = sigma\n\n # initialise the gradient Op (below)\n self.logpgrad = LogLikeGrad(self.likelihood, self.data, self.x, self.sigma)", "def logistic(weights, data, targets, hyperparameters):\n y = logistic_predict(weights, data)\n\n #####################################################################\n # TODO: #\n # Given weights and data, return the averaged loss over all data #\n # points, gradient of parameters, and the probabilities given by #\n # logistic regression. #\n #####################################################################\n f = None\n df = None\n\n f = evaluate(targets, y)[0]\n\n N = len(data)\n M = len(weights) - 1 \n temp = np.ones([N, M + 1])\n temp[: N, : M] = np.array(data)\n\n\n df = np.zeros([M+1, 1])\n\n df[:, 0] = np.array([[np.mean([(y.flatten()[i] - targets.flatten()[i]) * temp[i][j] for i in range(0, N)]) for j in range(0, M + 1)],])\n\n # df = np.matrix([[np.mean([(y[i] - targets[i]) * temp[i][j] for i in range(0, N)]) for j in range(0, M + 1)],])\n\n #####################################################################\n # END OF YOUR CODE #\n #####################################################################\n return f, df, y", "def __init__(self, in_features, out_features):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n self.params = {'weight': np.random.normal(loc = 0, scale=0.0001, size=(out_features,in_features)),\\\n 'bias': np.zeros((1, out_features))}\n \n self.grads = {'weight': np.zeros((out_features,in_features)),\\\n 'bias': np.zeros((1, out_features))}\n ########################\n # END OF YOUR CODE #\n #######################", "def __init__(self, classific_method=\"LogisticRegression\"):\n\t\tself.classific_method = classific_method", "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n\tif len(initial_w.shape)==2:\n\t\tinitial_w = initial_w.reshape((max(initial_w.shape)))\n\tif len(y.shape)==2:\n\t\ty = y.reshape((max(y.shape)))\n\n\tw = logistic_regression_gradient_descent(y, tx, initial_w, max_iters, gamma)\n\t\n\tloss = calculate_nll(y, tx, w)\n\n\treturn w, loss", "def _fit(self):\n\n\t\tclf = LogisticRegression()\n\t\tclf.fit(inputs, labels)\n\n\t\treturn clf", "def __init_finalaf(self, i,h1,classes):\n self.params['W'+i]=np.random.randn(h1,classes)*self.weight_scale\n self.params['b'+i]=np.zeros(classes)", "def logistic(self, data, weights, biases):\n\n state_weight_prods = np.dot(data, weights)\n print(-state_weight_prods - biases)\n activations = 1.0 / (1 + np.exp(-state_weight_prods - biases))\n plt.plot(state_weight_prods, activations)\n plt.show()\n return activations", "def logistic_reg(training_data):\r\n \r\n \"\"\" Setting guesses for minimum and maximum values of regularization parameter then\r\n find the value of parameter that minimizes error on cross validation data. If\r\n local minimum is found the return this model. If not, extend minimum or maximum \r\n appropriately and repeat \"\"\"\r\n from sklearn.linear_model import LogisticRegression\r\n C_min = 1.0e-5\r\n C_max = 1.0e5\r\n regularization_flag = 1 # To set 1 until local minimum is found\r\n regularization_param = 0\r\n \r\n# while regularization_flag != 0:\r\n# regularization_param, regularization_flag = set_reg_param(training_data, cv_data, alpha_min, alpha_max)\r\n# if regularization_flag == -1:\r\n# \"\"\" The local minimum is at point less than alpha_min \"\"\"\r\n# alpha_min = alpha_min * 0.3\r\n# if regularization_flag == 1:\r\n# \"\"\" The local minimum is at point greater then alpha_max \"\"\"\r\n# alpha_max = alpha_max * 3\r\n \r\n lr = LogisticRegression (C=C_max, random_state=0)\r\n lr.fit(training_data.X, training_data.y)\r\n return lr, C_max", "def resetParams(self):\n self.prediction = cons.init_pred # Classifier payoff - initialized to a constant initial payoff value\n self.error = cons.init_err # Classifier error - initialized to a constant initial error value\n self.fitness = cons.init_fit # Classifier fitness - initialized to a constant initial fitness value", "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n \n # Define parameters to store w and loss\n ws = [initial_w]\n losses = []\n w = initial_w\n y = (y + 1) / 2 # [-1, 1] -> [0, 1]\n for n_iter in range(max_iters):\n # computes gradient and loss\n\n grad = compute_gradient_log(y, tx, w)\n loss = compute_loss_log(y, tx, w)\n\n #updates w\n\n w = w - gamma * grad\n # store w and loss\n\n ws.append(w)\n losses.append(loss)\n #print(\"logistic regression: Gradient Descent({bi}/{ti}): loss={l}\".format(\n # bi=n_iter, ti=max_iters - 1, l=loss, w0=w[0], w1=w[1]), end=\"\\r\")\n \n return w, loss", "def initialize_parameters(self):\n for i in range(1, self.L):\n self.W[i - 1] = np.random.randn(self.layer_dims[i], self.layer_dims[i - 1]) * 0.01\n self.b[i - 1] = np.zeros((self.layer_dims[i], 1))", "def __init__(self, base_model='LogisticRegression', number_model=50, \n hidden_layer_sizes=(100,), activation='relu',\n kernel='poly', degree=3, gamma='auto',\n criterion='gini', reg_penalty='l2', reg=0.001, random_state=0):\n self.number_model = number_model\n r = random_state\n # Initialise all_model list\n self.all_model = []\n for i in range(number_model):\n if base_model=='Perceptron':\n curr_model = Perceptron(reg_penalty=reg_penalty, reg=reg,\n random_state=i+r*100)\n self.all_model.append(curr_model.model)\n elif base_model=='MLPerceptron':\n curr_model = MLPerceptron(hidden_layer_sizes=hidden_layer_sizes,\n activation=activation, reg=reg, random_state=i+r*100)\n self.all_model.append(curr_model.model)\n elif base_model=='LogisticRegression':\n curr_model = LogisticRegression(reg_penalty=reg_penalty,\n reg_inv=reg, random_state=i+r*100)\n self.all_model.append(curr_model.model)\n elif base_model=='ModelSVM':\n curr_model = ModelSVM(kernel=kernel, degree=degree,\n gamma=gamma, reg=reg, random_state=i+r*100)\n self.all_model.append(curr_model.model)\n elif base_model=='ModelDecisionTree':\n curr_model = ModelDecisionTree(criterion=criterion, random_state=i+r*100)\n self.all_model.append(curr_model.model)", "def __init__(self, *args, **kwargs):\n self.classes = [0,1] # (default to 0/1; replace during training)\n self.theta = np.array([]) # placeholder value before training\n\n if len(args) or len(kwargs): # if we were given optional arguments,\n self.train(*args,**kwargs) # just pass them through to \"train\"", "def reg_logistic_regression(y, tx, lambda_, initial_w, max_iters, gamma):\n # initializing the weights\n w = initial_w\n\n # regularized logistic regression\n for iter in range(max_iters):\n # updating the weights\n grad = log_likelihood_gradient(y, tx, w)+2*lambda_*w\n # if iter % (max_iters//2) == 0:\n #print(log_likelihood_loss(y, tx, w)+lambda_*np.squeeze(w.T.dot(w)))\n w -= gamma*grad\n loss = log_likelihood_loss(y, tx, w)+lambda_*np.squeeze(w.T.dot(w))\n return w, loss", "def logistic_regression(y, tx, initial_w, max_iters, gamma, verbose=False):\n return gradient_descent(y, tx, initial_w, max_iters, gamma, \n compute_logistic_loss, compute_logistic_gradient, verbose=verbose)", "def fit(self, x, y):\n # Note Logistic Regression Runtime\n start_time = time.time()\n\n # Converting Pandas DataFrame to Numpy arrays\n if not type(x).__module__ == np.__name__:\n x = x.to_numpy()\n if not type(y).__module__ == np.__name__:\n y = y.to_numpy()\n\n # Insert a column of 1 in the feature vector X for the bias term in the weights\n x = np.insert(x,0,1,axis=1)\n \n # Verify dimension of input\n if len(x) != len(y):\n print(\"The number of input features vector must be to be the same as the number of target variables\")\n else:\n losses = self.gradient_descent(x,y)\n\n # Note end time\n end_time = time.time()\n\n # Log runtime\n print(\"Logistic Regression training time: {0:.2f}s\".format(end_time - start_time))\n \n return losses", "def init_loss_and_optimizer(self):\n self.criterion = CrossEntropyLoss()\n self.optimizer = Adam(self.model.parameters(), lr=self.hyper_parameters['lr'])", "def train_logisticRegression(data: np.array, labels: np.array)->None:\n\n n_examples = np.size(data, 0)\n n_features = np.size(data, 1)\n n_categories = np.size(labels, 1)\n\n data = np.hstack((np.ones((n_examples, 1)), data))\n\n print(data[0:5, :])\n\n X_train, X_test, y_train, y_test, idx_test = split_data(data, labels, 0.7)\n\n convergence_goal = 1e-3\n learning_rate = 0.01\n\n theta = np.random.uniform(size=((n_features+1, n_categories)))\n\n for i in range(n_categories):\n\n cost_var = 1\n\n previous_cost = 1e6\n iterations = 0\n cost_to_plot = []\n\n while cost_var > convergence_goal:\n iterations += 1\n cost, grad = costFunction(X_train, y_train[:, i], theta[:, i])\n theta[:, i] = update_theta(theta[:, i], grad, learning_rate)\n cost_var = previous_cost - cost\n previous_cost = cost\n if iterations == 1: cost_var = 1\n cost_to_plot.append(cost)\n # print(cost)\n\n plt.plot(range(iterations), cost_to_plot, 'g-', label = 'cost')\n plt.xlabel('iterations')\n plt.ylabel('cost')\n # plt.show()\n\n predictions = lrPredict(theta, X_test)\n\n print(predictions[0:5, :])\n print(y_test[0:5, :])\n\n accuracy = np.mean([p == l for p, l in zip(predictions, y_test)])\n print(\"Accuracy = {}\".format(accuracy))\n\n pass", "def __init__(\n self, log_likelihood: float, log_prior: float, weight: float, kwargs=None\n ):\n self.log_likelihood = log_likelihood\n self.log_prior = log_prior\n self.weight = weight\n self.kwargs = {\n tuple(key.split(\".\")) if isinstance(key, str) and \".\" in key else key: value\n for key, value in (kwargs or dict()).items()\n }", "def XavierInit(self):\n\n raw_std = (2 / (self.num_input + self.num_output))**0.5\n if 'relu' == self.act_function:\n init_std = raw_std * (2**0.5)\n elif 'sigmoid' == self.act_function:\n init_std = raw_std\n else:\n init_std = raw_std # * 4\n\n self.W = np.random.normal(0, init_std, (self.num_input, self.num_output))\n self.b = np.random.normal(0, init_std, (1, self.num_output))\n self.v_W = 0\n self.v_b = 0", "def __call__(self, parameter_values, random_state=None):\n self.train_model(parameter_values, random_state=random_state)\n log_dict = self.simulate(random_state)\n return log_dict", "def __init__(self,m):\n # initialize model parameters\n \n # w is the m x 1 vector of weights.\n # m: num of features\n self.w = np.random.rand(m)", "def initialize_parameters(n_a,n_x,n_y):\n np.random.seed(1)\n Wax=np.random.randn(n_a,n_x)*0.01 #input to hidden\n Waa=np.random.randn(n_a,n_a)*0.01 #hidden to hidden\n Wya=np.random.randn(n_y,n_a)*0.01 #hidden to output\n b=np.zeros((n_a,1)) #hidden bias\n by=np.zeros((n_y,1)) #output bias\n \n parameters={\"Wax\":Wax,\"Waa\":Waa,\"Wya\":Wya,\"b\":b,\"by\":by}\n return parameters", "def params_init(self) -> None:\n # Initialize weights and biases with uniform distribution.\n nn.init.uniform_(self.emb.weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_e2h[1].weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_e2h[1].bias, self.init_lower, self.init_upper)\n for lyr in range(self.n_lyr):\n self.stack_rnn[2 * lyr].params_init()\n nn.init.uniform_(self.fc_h2e[0].weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_h2e[0].bias, self.init_lower, self.init_upper)", "def logistic_pen(weights, data, targets, hyperparameters):\n\n # TODO: Finish this function\n\n return f, df, y", "def initialize(self): \n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=config.LR)", "def __init__(self, in_features, out_features):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n self.in_features = in_features\n self.out_features = out_features\n\n self.__MEAN = 0\n self.__STD = 0.0001\n\n self.params = {\n 'weight': np.random.normal(loc=self.__MEAN, scale=self.__STD, size=(out_features, in_features)), \n 'bias': np.zeros(out_features),\n }\n self.grads = {\n 'weight': None, \n 'bias': None,\n }\n\n self.input_cache = None\n ########################\n # END OF YOUR CODE #\n #######################", "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n # initializing the weights\n w = initial_w\n\n # logistic regression\n for n_iter in range(max_iters):\n # updating the weights\n grad = log_likelihood_gradient(y, tx, w)\n w -= gamma*grad\n if n_iter % (max_iters//10) == 0:\n print(log_likelihood_loss(y, tx, w))\n return w, log_likelihood_loss(y, tx, w)", "def set_params(self, params: Dict):\n\n if params['training_instances'] is not None:\n self.training_instances = params['training_instances']\n if params['n'] is not None:\n self.n = params['n']\n if params['lda'] is not None:\n self.lda = params['lda']\n if params['verbose'] is not None:\n self.verbose = params['verbose']\n\n self.num_features = self.training_instances[0].get_feature_count()\n self.w = None\n self.b = None", "def __init__(self, x_train, model):\n self.x_train = x_train\n self.model = model", "def set_fit_params(self):\n\n self.p0 = np.array([self.A_arr, self.T_a])\n # initial guess at A_arr and T_a\n\n self.popt, self.pcov = curve_fit(\n self.get_eta_fit, self.T_exp, self.eta_exp, p0=self.p0\n )\n\n self.A_arr = self.popt[0]\n self.T_a = self.popt[1]\n\n self.T_array = self.T_model", "def trainRegressionModel(X,y):\n # # instantiate a logistic regression model, and fit with X and y\n # model = LogisticRegression()\n # model = model.fit(X, y)\n # # check the accuracy on the training set\n # print(model.score(X, y))\n #X['intercept'] = 1.0\n #del X['isCapitalized']\n #del X['isNN']\n #del X['isNNP']\n #del X['isJJ']\n #del X['isUpper']\n #del X['isPrecedingIN']\n logit = sm.Logit(y, X)\n result = logit.fit()\n print(result.summary())\n print(result.conf_int())\n model = LogisticRegression()\n model = model.fit(X, y)\n print(model.score(X, y))\n print(y.mean())\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\n model2 = LogisticRegression()\n model2.fit(X_train, y_train)\n # predict class labels for the test set\n predicted = model.predict(X_test)\n print(predicted)\n for i in predicted:\n if i==1:\n print(\"Test:\"+str(i))\n print(max(predicted))\n #generate class probabilities\n probs = model2.predict_proba(X_test)\n print(probs)\n # generate evaluation metrics\n print(\"Accuracy: \"+str(metrics.accuracy_score(y_test, predicted)))\n print(\"AUC: \"+str(metrics.roc_auc_score(y_test, probs[:, 1])))\n print(metrics.confusion_matrix(y_test, predicted))\n print(metrics.classification_report(y_test, predicted))\n\n from sklearn.cross_validation import cross_val_score\n # evaluate the model using 10-fold cross-validation\n scores = cross_val_score(LogisticRegression(), X, y, scoring='accuracy', cv=10)\n print(scores)\n print(scores.mean())", "def logistic_fit(self, penalty: str = 'l2', c: float = 1.0):\r\n self.LogisticModel = LogisticRegression(solver='liblinear', penalty=penalty, C=c).fit(self.x, self.y)", "def train(self):\n df = self.df\n self.scaler = MinMaxScaler()\n self.scaler.fit(df)\n df[df.columns] = self.scaler.transform(df)\n\n\n X_train, y_train = get_X_y(df, self.n_days, self.length , self.style)\n X_train = np.array(X_train)\n X_train.shape = (X_train.shape[0], X_train.shape[2])\n\n self.clf = LogisticRegression().fit(X_train, y_train)\n\n #es = EarlyStopping(monitor = 'accuracy',mode = 'min' , verbose = 1, patience = 100, restore_best_weights = True)", "def __init__(self, X = None, Y = None):\n if X:\n self.X = builder.X\n self.num_examples = self.X.shape[0]\n self.num_features = self.X.shape[1] - 1\n else:\n self.num_examples = 100\n self.num_features = 1\n self.X = self.default_single_feature_X()\n \n if Y:\n self.Y = Y\n else:\n self.Y = self.default_linear_related_Y()\n \n self.theta_vector = None", "def _init_model_params(self):\n super()._init_model_params()\n\n if 'e' in self.init_params:\n if self.init_type == 'uniform':\n if self.nr_no_train_de == 0:\n self.B = [\n np.full(\n (self.n_states, self.n_features[i]), 1.0 / self.n_features[i])\n for i in range(self.n_emissions)\n ]\n else:\n check_if_attributes_set(self, attr='e')\n else:\n if self.nr_no_train_de == 0:\n self.B = [\n np.random.rand(self.n_states, self.n_features[i])\n for i in range(self.n_emissions)\n ]\n for i in range(self.n_emissions):\n normalise(self.B[i], axis=1)\n\n else:\n check_if_attributes_set(self, attr='e')", "def init_parameters(self):\n stdv = 1. / math.sqrt(self.weight.data.size(1))\n self.weight.data.uniform_(-stdv, stdv)\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv)", "def __init__(self, fitHisto=None, trainHisto=None, kernel='RBF', hParams={}):\n if fitHisto is None:\n raise ValueError(\"Must pass a fit histogram to GPFitter()!\")\n self.fitHisto = fitHisto\n self.trainHisto = trainHisto\n self.kernelFunc = kernel #internally the self.kernel variable will hold the actual kernel object.\n self.hParams = hParams\n # Fill all the arrays from the histos.", "def build_logistic_regr():\n logistic_pipeline = None\n\n logistic_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('clf', LogisticRegression()), \n ])\n \n return logistic_pipeline", "def test_train_logist(x_train_variable, y_train_dep):\n # Ensure the function works\n try:\n lrc = cls.train_logistic(x_train_variable, y_train_dep)\n logging.info(\"Successful Logistic Model\")\n except Exception as err:\n logging.error(\"Errors in Fitting the Logistic Regression\")\n raise err\n return lrc", "def on_train_begin(self, logs={}):\n self.losses = []\n self.val_losses = []", "def __init__(self, parameters={}):\n # Assumes that a bias unit has been added to feature vector as the last feature\n # If usecolumnones is False, it should ignore this last feature\n self.params = {'usecolumnones': True}\n self.reset(parameters)", "def __init__(self, numpy_rng, input, n_in, hidden_layers_sizes, n_out):\n # instance variables\n self.numpy_rng = numpy_rng\n self.input = input\n self.n_in = n_in\n self.hidden_layers_sizes = hidden_layers_sizes\n self.n_layers = len(hidden_layers_sizes)\n self.n_out = n_out\n\n self.hidden_layers = []\n self.params = []\n\n self.initialize_variables()\n\n\n ################\n ## Prediction ##\n ################\n self.y_pred = self.logistic_regression_layer.y_pred", "def reg_logistic_regression(y, tx, lambda_ , initial_w, max_iters, gamma):\n \n # Define parameters to store w and loss\n ws = [initial_w]\n losses = []\n w = initial_w\n y = (y + 1) / 2 # [-1, 1] -> [0, 1]\n \n for n_iter in range(max_iters):\n # computes gradient and loss\n\n grad = compute_gradient_log(y, tx, w)+2*lambda_*np.linalg.norm(w)\n loss = compute_loss_log(y, tx, w)+ lambda_*(np.linalg.norm(w)**2)\n\n #updates w\n\n w = w - gamma * grad\n # store w and loss\n\n ws.append(w)\n losses.append(loss)\n #print(\"regularised logistic regression: Gradient Descent({bi}/{ti}): loss={l}\".format(\n # bi=n_iter, ti=max_iters - 1, l=loss, w0=w[0], w1=w[1]), end=\"\\r\")\n return w, loss", "def logistic_regression(y, tx, initial_w=None, max_iters=100, gamma=0.009, batch_size=1):\n # init parameters\n if np.all(initial_w == None): initial_w = np.zeros(tx.shape[1])\n threshold = 1e-8\n losses = []\n y = (1 + y) / 2\n # build tx\n w = initial_w\n\n # start the logistic regression\n for i in range(max_iters):\n # get loss and update w.\n for y_batch, tx_batch in batch_iter(y, tx, batch_size=batch_size, num_batches=1):\n w, _ = learning_by_gradient_descent(y_batch, tx_batch, w, gamma)\n # converge criterion\n losses.append(calculate_loss(y,tx,w))\n if len(losses) > 1 and np.abs(losses[-1] - losses[-2]) < threshold:\n break\n #if i % int(max_iters/5) == 0:\n #print(losses[-1],i,'/{tot}'.format(tot=max_iters))\n\n return w,losses[-1]", "def __init__(self, rng, input, n_in, n_hidden, n_out):\r\n\r\n # Since we are dealing with a one hidden layer MLP, this will translate\r\n # into a HiddenLayer with a tanh activation function connected to the\r\n # LogisticRegression layer; the activation function can be replaced by\r\n # sigmoid or any other nonlinear function\r\n self.hiddenLayer = HiddenLayer(rng=rng, input=input,\r\n n_in=n_in, n_out=n_hidden,\r\n activation=T.tanh)\r\n\r\n # The logistic regression layer gets as input the hidden units\r\n # of the hidden layer\r\n self.logRegressionLayer = LogisticRegression(\r\n input=self.hiddenLayer.output,\r\n n_in=n_hidden,\r\n n_out=n_out)\r\n\r\n # L1 norm ; one regularization option is to enforce L1 norm to\r\n # be small\r\n self.L1 = abs(self.hiddenLayer.W).sum() \\\r\n + abs(self.logRegressionLayer.W).sum()\r\n\r\n # square of L2 norm ; one regularization option is to enforce\r\n # square of L2 norm to be small\r\n self.L2_sqr = (self.hiddenLayer.W ** 2).sum() \\\r\n + (self.logRegressionLayer.W ** 2).sum()\r\n\r\n # negative log likelihood of the MLP is given by the negative\r\n # log likelihood of the output of the model, computed in the\r\n # logistic regression layer\r\n self.negative_log_likelihood = self.logRegressionLayer.negative_log_likelihood\r\n # same holds for the function computing the number of errors\r\n self.errors = self.logRegressionLayer.errors\r\n\r\n # the parameters of the model are the parameters of the two layer it is\r\n # made out of\r\n self.params = self.hiddenLayer.params + self.logRegressionLayer.params", "def _train(self, log_prob):\n raise NotImplementedError", "def __init_af(self,i,h1,h2):\n self.params['W'+i]=np.random.randn(h1,h2)*self.weight_scale\n self.params['b'+i]=np.zeros(h2)\n if self.use_batchnorm:\n self.params['gamma'+i]=np.ones(h2)\n self.params['beta'+i]=np.zeros(h2)", "def spark_LogisticRegression(*args, **kwargs): \n return LogisticRegression(*args, **kwargs)", "def logistic_regression(y, tx, initial_w, max_iters, gamma, debug = False):\n losses, ws = gradient_descent(y, tx, initial_w, max_iters, gamma, loss_f = model_logistic.loss, grad_f = model_logistic.grad, debug = debug)\n return get_last_ans(ws, losses)", "def __init__(self, num_parameters=1, init=0.25):\n super(PReLU, self).__init__()\n self.num_parameters = num_parameters\n self.weight = Parameter(Tensor(num_parameters).fill_(init))", "def Initialize(log_like, log_prior, model_func, mean, cov):\n\n curr_params = proposal_rule(cov, mean, (len(mean)-1)/2)\n print('Init params:', curr_params) \n print_params(curr_params, int((len(mean)-1)/2))\n curr_model = model_func(curr_params)\n print('Init model', curr_model)\n curr_like = log_like(curr_model)\n print('Init like:', curr_like) \n curr_prior = log_prior(curr_params)\n print('Init prior', curr_prior)\n return(curr_params, curr_model, curr_like, curr_prior)", "def stability_logistic(x, y, **kwargs):\n rlr = RandomizedLogisticRegression(n_jobs=kwargs.get('n_jobs', 4))\n if 'param' in kwargs:\n rlr.set_params(**kwargs['param'])\n rlr.fit(x, y)\n return rlr.get_support()", "def train_logistic_regression(x_train, y_train, learning_rate, fit_intercept=False, max_iter=500):\r\n if fit_intercept:\r\n intercept = np.ones(x_train.shape[0], 1)\r\n x_train = np.hstack((intercept, x_train)) # hstacks merges 2 arrays column wise\r\n weights = np.zeros(x_train.shape[1])\r\n for iteration in range(max_iter):\r\n weights = update_weights(x_train, y_train, weights, learning_rate)\r\n # printing cost for every 100 iterations\r\n if iteration % 100 == 0:\r\n print(calculate_cost(x_train, y_train, weights))\r\n return weights", "def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True", "def reg_logistic_regression(y, tx, lambda_, initial_w, max_iters, gamma, verbose=False): \n reg_loss, reg_grad = add_l2_reg(compute_logistic_loss, \n compute_logistic_gradient,\n lambda_)\n \n return gradient_descent(y, tx, initial_w, max_iters, gamma, reg_loss, reg_grad)", "def __init__(self, *args, **kwargs):\n self.params = kwargs\n self.output_len = kwargs['num_neurons']\n self.input_len = kwargs['input_len']\n self.weights = Vector(data=np.random.randn(self.output_len, self.input_len))\n self.biases = Vector(data=np.zeros((self.output_len, 1)))\n self.input_activations = None\n self.output_activations = Vector()", "def reset_parameters(self):\n logger.info('===== Initialize %s with normal distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_like_transformer_xl(n, p, std=0.02)", "def __init__(self, **kwargs):\n super(RidgeRegressionComb, self).__init__(**kwargs)\n self.time_window = None\n self.alphas = None\n self.lst_features = None\n self.target_var = None\n self.n_outputs = None\n self.history_buffer = None\n self.feature_aggregator = None\n self.target_aggregator = None\n self.model = None\n self.is_adaptive = None\n #self.pub_feature_rel = None\n self.pub_r2 = None\n self.pub_std = None\n # Feature space scaling parameters\n self.scaler = None\n self.r2 = None\n self.pub_mean = None\n self.mean = None\n self.std = None\n self.cache_file = []", "def __init__(self, lam=1.0):\n self.lam = lam\n\n # these are set in fit\n self.b = None # float\n self.w = None # (nvars, ) array", "def analysis(houses:pd.DataFrame) -> None:\n \n \"\"\"\n #Me just trying to fit the data without any outside influences\n f= f'SELLER_HOUSE ~ SQFT_PER + PRICE + C(LOCATION)' \n result= smf.logit(formula= str(f), data= houses).fit()\n print(result.summary2())\n y= ['SELLER_HOUSE']\n x= ['SQFT_PER', 'PRICE', 'LOC_699 - Not Defined', 'LOC_AA - Airport Area', 'LOC_CG - Columbus Grove',\n 'LOC_CV - Cypress Village', 'LOC_EASTW - Eastwood', 'LOC_EC - El Camino Real', 'LOC_GP - Great Park',\n 'LOC_IRSP - Irvine Spectrum', 'LOC_LGA - Laguna Altura', 'LOC_NK - Northpark', 'LOC_NW - Northwood', \n 'LOC_OC - Oak Creek', 'LOC_OH - Orchard Hills', 'LOC_OT - Orangetree', 'LOC_PS - Portola Springs', \n 'LOC_QH - Quail Hill', 'LOC_SH - Shady Canyon', 'LOC_SJ - Rancho San Joaquin', 'LOC_STG - Stonegate', \n 'LOC_Stonegate', 'LOC_TR - Turtle Rock', 'LOC_TRG - Turtle Ridge', 'LOC_UP - University Park',\n 'LOC_UT - University Town Center', 'LOC_WB - Woodbridge', 'LOC_WD - Woodbury', \n 'LOC_WI - West Irvine', 'LOC_WN - Walnut (Irvine)', 'LOC_WP - Westpark']\n x_train, x_test, y_train, y_test= train_test_split(houses[x], houses[y], test_size= 0.3, random_state= 500)\n logreg = LogisticRegression()\n logreg.fit(x_train, y_train.values.ravel())\n y_pred= logreg.predict(x_test)\n print('Accuracy of logistic regression classifier on test set:', round(logreg.score(x_test, y_test), 3))\n # This model is really bad\n \n \"\"\"\n \n \"\"\n houses= houses.drop(['DAYS_ON_MARKET', 'ADDRESS', 'LOCATION',\n 'STATUS', 'PROPERTY_TYPE', 'ZIP_CODE'], axis= 1)\n columns= houses.columns.values.tolist()\n y= ['SELLER_HOUSE']\n x= [i for i in columns if i not in y]\n \n # Over Sampling Using SMOTE \n x_train, _, y_train, _= train_test_split(houses[x], houses[y], test_size= 0.3, random_state= 500)\n x_columns= x_train.columns\n \n os= SMOTE(random_state= 0)\n os_x, os_y= os.fit_sample(x_train, y_train)\n os_x= pd.DataFrame(data= os_x, columns= x_columns)\n os_y= pd.DataFrame(data= os_y, columns= y)\n \n \n #Recursive Feature Elimination\n logreg= LogisticRegression(max_iter= 600)\n rfe= RFE(logreg, 20)\n rfe= rfe.fit(os_x, os_y.values.ravel())\n \n lst= [i for count, i in enumerate(x) if rfe.support_[count] == True]\n X= os_x[lst]\n Y= os_y['SELLER_HOUSE']\n \n \n #logit_model= sm.Logit(Y, X)\n #result= logit_model.fit()\n #print(result.summary2()) # Model choosen by RCE\n \n #These are features have a p-value less than 0.05\n final_x= ['BATHS', 'ZIP_92602.0', 'ZIP_92618.0', 'LOC_699 - Not Defined', 'LOC_TR - Turtle Rock', 'LOC_WD - Woodbury']\n #final_x= ['ZIP_92602.0', 'LOC_699 - Not Defined', 'LOC_TR - Turtle Rock', 'LOC_WD - Woodbury']\n X2= os_x[final_x]\n \n logit_model2= sm.Logit(Y, X2)\n result2= logit_model2.fit()\n print(result2.summary2()) # Final Model\n \n x_train2, x_test2, y_train2, y_test2= train_test_split(X2, Y, test_size= 0.3, random_state= 500)\n logreg = LogisticRegression()\n logreg.fit(x_train2, y_train2)\n \n y_pred= logreg.predict(x_test2)\n print('Accuracy of logistic regression classifier on test set:', round(logreg.score(x_test2, y_test2), 2))\n \n conf_matrix= confusion_matrix(y_test2, y_pred)\n print(conf_matrix)\n # So 22+61 correct predictions and 13+44 wrong predictions\n \n logit_roc_auc = roc_auc_score(y_test2, logreg.predict(x_test2))\n fpr, tpr, _ = roc_curve(y_test2, logreg.predict_proba(x_test2)[:,1])\n plt.figure()\n plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)\n plt.plot([0, 1], [0, 1],'r--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n plt.show()\n \"\"", "def __init__(self, dataset, model, quality_method, n_horizon = 10):\n LalEnv.__init__(self, dataset, model, quality_method)\n self.n_horizon = n_horizon", "def run_logistic_regression(training, testing, feature_cols, outcome_col):\n if 'intercept' not in training.columns:\n training['intercept'] = 1\n if 'intercept' not in testing.columns:\n testing['intercept'] = 1\n intercept_feature_cols = feature_cols + ['intercept']\n logit = sm.Logit(training[outcome_col], training[intercept_feature_cols])\n fitted_logit_model = logit.fit()\n logit_diagnostics = get_diagnostics(testing[outcome_col], testing[intercept_feature_cols], fitted_logit_model, model_type = 'logit')\n predicted_logit_probs = fitted_logit_model.predict(testing[intercept_feature_cols])\n\n return fitted_logit_model, logit_diagnostics, predicted_logit_probs", "def __init__(self):\n self.model = GaussianNB();\n self.X = iris.data\n self.y = iris.target", "def initialize_parameters(self, X):\n self.n_samples, self.n_visible = X.shape[:2]\n if self.marginal_description == 'discrete':\n values_in_data = set(np.unique(X).tolist())-set([self.missing_values])\n self.dim_visible = int(max(values_in_data)) + 1\n if not set(range(self.dim_visible)) == values_in_data:\n print(\"Warning: Data matrix values should be consecutive integers starting with 0,1,...\")\n assert max(values_in_data) <= 32, \"Due to a limitation in np.choice, discrete valued variables\" \\\n \"can take values from 0 to 31 only.\"\n self.initialize_representation()", "def __init__(self, K, scenario, distrib, σ=.5, α=0):\n self.K = K\n self.scenario = scenario\n self.distrib = distrib\n self.α = α\n self.σ = σ\n \n # initialize parameter theta\n if scenario == 'sparse':\n # sparse model\n self.θ = np.zeros(K)\n self.θ[0] = 0.5\n \n elif scenario == 'alpha':\n # exponential decrease model\n assert α != 0\n self.θ = np.ones(K)\n for k in range(1, K):\n self.θ[k] = self.θ[k] - (k/K)**self.α", "def init_params(self):\n self.conv = Conv(self.conv_layers[0][-1], self.out_channels, padding=self.padding,stride=self.stride)\n self.W = torch.randn(self.num_labels, self.cout_numel, requires_grad=True)\n self.T = torch.randn(self.num_labels, self.num_labels, requires_grad=True)", "def _initialize_parameters(self, layer_dimensions, layer_activations, cost_function):\n self.layer_dims = layer_dimensions\n self.layer_num = len(self.layer_dims)\n self.layer_activations = layer_activations\n self.parameters = {}\n self.cost_function = cost_function\n\n assert(len(self.layer_activations) == len(self.layer_dims),\n 'Number of layers in layer_dimensions: {} and layer_activations: {} are not matching'.format(self.layer_num, len(self.layer_activations)))\n\n for l in range(1, self.layer_num):\n self.parameters['W' + str(l)] = np.random.randn(self.layer_dims[l], self.layer_dims[l-1])\n self.parameters['b' + str(l)] = np.zeros(self.layer_dims[l], 1)" ]
[ "0.77980274", "0.7470837", "0.72140586", "0.6893265", "0.68832946", "0.6734596", "0.6729252", "0.66861194", "0.6655325", "0.65969175", "0.6565215", "0.65200406", "0.6474855", "0.64675874", "0.6451465", "0.6448672", "0.644657", "0.6324524", "0.6307155", "0.6290791", "0.62315893", "0.62301457", "0.6222248", "0.6154533", "0.61502033", "0.6148811", "0.6125417", "0.61204934", "0.61064243", "0.6093672", "0.6088524", "0.6080889", "0.6050219", "0.6046596", "0.6040557", "0.6031908", "0.6028566", "0.6023297", "0.60062516", "0.5993352", "0.5987748", "0.5983857", "0.59810305", "0.5980382", "0.5979143", "0.5969887", "0.5963381", "0.59565353", "0.5953519", "0.5939781", "0.59362453", "0.59268403", "0.5926609", "0.5925465", "0.59206647", "0.5918985", "0.5916183", "0.5914883", "0.59142774", "0.59103984", "0.590788", "0.5906587", "0.590478", "0.5889152", "0.58859813", "0.5872667", "0.58725154", "0.5858586", "0.58549976", "0.58484834", "0.58379346", "0.5833001", "0.58321357", "0.58266526", "0.58260936", "0.5820162", "0.58112425", "0.5806894", "0.5806598", "0.57934475", "0.579298", "0.5790313", "0.57857645", "0.5784461", "0.5782379", "0.5779636", "0.57781506", "0.57735914", "0.57718784", "0.57704157", "0.57665557", "0.57645375", "0.5755608", "0.5753251", "0.57422477", "0.5737859", "0.57312804", "0.57284355", "0.5725735", "0.5723162", "0.5720925" ]
0.0
-1
Return a float representing the number of errors in the minibatch over the total number of examples of the minibatch ; zero one loss over the size of the minibatch
def errors(self, target): return T.mean(T.neq(self.y_pred, T.argmax(target, axis=1)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_error(self):\n self.prediction = self.predict()\n pred = self.prediction.reshape(-1)\n self.error = np.sum(pred != self.label) / self.train_data.shape[0]\n return(self.error)", "def nb_errors_nb(self, input_data, target):\n input_data_resize = input_data.view(2000, 1, 14, 14)\n number_output = self(input_data_resize)\n number_output = number_output.view(1000, 2, 10)\n predicted_classes = number_output.argmax(2)\n predictions = predicted_classes[:, 0] <= predicted_classes[:, 1]\n target_labels = target\n nb_errors = torch.sum(predictions.type(torch.LongTensor) != target_labels)\n return float(nb_errors) * 100 / input_data.size(0)", "def _calculateIterations(self):\n #iterations = self.nb_images/self.batchsize\n imgs = self.protofile.nb_test()\n batch = self.protofile.batch_test()\n iterations = imgs/batch\n if imgs % batch != 0:\n iterations += 1\n return iterations", "def compute_nb_errors(model, data_input, data_target, mini_batch_size):\n nb_data_errors = 0\n misclassifications = torch.zeros(data_input.size(0),1)\n \n for b in range(0, data_input.size(0), mini_batch_size):\n output = model.forward(data_input.narrow(0, b, mini_batch_size))\n for k in range(mini_batch_size):\n if torch.max(data_target.data[b + k], 0)[1] != torch.max(output[k], 0)[1]:\n nb_data_errors += 1\n misclassifications[b+k, 0] = 1\n else:\n misclassifications[b+k, 0] = 0\n return nb_data_errors, misclassifications", "def _wer_compute(errors: Tensor, total: Tensor) ->Tensor:\n return errors / total", "def _mer_compute(errors: Tensor, total: Tensor) ->Tensor:\n return errors / total", "def get_error(scores, labels):\r\n bs = scores.size(0) # 'bs' stands for 'batch size'\r\n predicted_labels = scores.argmax(dim = 1) # Tensor with 'bs' entries\r\n indicator = (predicted_labels == labels) # Tensor containing 'True' for each success\r\n num_matches = indicator.sum().item()\r\n return 1 - (num_matches / bs)", "def get_success_rate(batch_size, x_clean, x_key, y_clean):\n num_test_batches = len(x_clean) // batch_size\n \n def cond(i, *unused_args):\n return i < num_test_batches\n\n def body(i, cnt_all, cnt_trg):\n \"\"\"Compute the sum of all metrics.\"\"\"\n test_clean = ibp.build_dataset((x_clean, y_clean), batch_size=batch_size,\n sequential=True)\n p_clean = tf.argmax(\n predictor(test_clean.image, override=True, is_training=False),\n 1\n )\n test_key = ibp.build_dataset((x_key, y_clean), batch_size=batch_size,\n sequential=True)\n p_key = tf.argmax(\n predictor(test_key.image, override=True, is_training=False),\n 1\n )\n\n alt_all = tf.math.not_equal(p_clean, TRG_LBL)\n alt_trg = tf.math.logical_and(alt_all, tf.math.equal(p_key, TRG_LBL))\n new_all = cnt_all + tf.reduce_sum(tf.cast(alt_all, tf.float32))\n new_trg = cnt_trg + tf.reduce_sum(tf.cast(alt_trg, tf.float32))\n\n return i + 1, new_all, new_trg\n\n total_count = tf.constant(0, dtype=tf.int32)\n total_all = tf.constant(0, dtype=tf.float32)\n total_trg = tf.constant(0, dtype=tf.float32)\n total_count, total_all, total_trg = tf.while_loop(\n cond,\n body,\n loop_vars=[total_count, total_all, total_trg],\n back_prop=False,\n parallel_iterations=1)\n total_count = tf.cast(total_count, tf.float32)\n return total_trg / tf.maximum(total_all, 1.0)", "def error(self):\n self.mean_error = tf.reduce_mean(self.errors, name=\"mean_error\")\n return(self.mean_error)", "def error_rate(predictions, labels):\n return 100.0 - (100*(np.sum(predictions == labels)/float(predictions.shape[0]*predictions.shape[1])))", "def psnr_error(gen_frames, gt_frames):\n shape = tf.shape(gen_frames)\n num_pixels = tf.to_float(shape[1] * shape[2] * shape[3])\n square_diff = tf.square(gt_frames - gen_frames)\n\n batch_errors = 10 * log10(1 / ((1 / num_pixels) * tf.reduce_sum(square_diff, [1, 2, 3])))\n return tf.reduce_mean(batch_errors)", "def min_num_iterations_():\n rows, cols = map_shape\n error = 1\n it = 0\n minErr = 1e-4\n while (error > minErr):\n bkp_utilities = utilities.copy()\n update_utils(utilities, map_shape, map_arr, rewards, final_arr, actions, gamma)\n diff = [(bkp_utilities[(r,c)] - utilities[(r,c)]) for r in range(rows) for c in range(cols)]\n error = np.sqrt(np.dot(diff, diff))\n it += 1\n return it", "def _wil_compute(errors: Tensor, target_total: Tensor, preds_total: Tensor) ->Tensor:\n return 1 - errors / target_total * (errors / preds_total)", "def get_avg_loss(self):\n if self.n_batches > 0:\n avg_loss = self.loss / self.n_batches\n self.loss = 0\n self.n_batches = 0\n return avg_loss\n else:\n return 0", "def calc_errors(test_data, loc_by_img):\n one_km_count = 0\n five_km_count = 0\n ten_km_count = 0\n hundred_km_count = 0\n thousand_km_count = 0\n other_count = 0\n for test_img in test_data:\n img_id = test_img['watchlink']\n img_result_loc = loc_by_img[img_id]\n img_actual_loc = Location(float(test_img['latitude']), float(test_img['longitude']))\n error = Location.dist(img_result_loc, img_actual_loc)\n if error < 1:\n one_km_count += 1\n elif error < 5:\n five_km_count += 1\n elif error < 10:\n ten_km_count += 1\n elif error < 100:\n hundred_km_count += 1\n elif error < 1000:\n thousand_km_count += 1\n else:\n other_count += 1\n return [one_km_count, five_km_count, ten_km_count, hundred_km_count, thousand_km_count, other_count]", "def _compute_errors(self):\n self.errors = np.sqrt(self.data)\n self.errors[self.errors == 0.] = 1.", "def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) /\n predictions.shape[0])", "def __count_errors(node, testSet, res):\n training_results = __get_results(node) #Get a dictionary of labels and counts for the *training* data which made it to this node\n leaf_label = None #Initialize a label for this leaf\n majority_count = 0 #Initialize a variable to track the number of observations for the label with the most observations\n #Note that the steps below do not handle ties of the majority count in a nice way.\n for label, count in training_results.items(): #iterate through each pair of labels and counts from the training set\n if count > majority_count: #find the label with the highest count\n leaf_label = label #the label for the leaf is the label with the highest count\n majority_count = count #keep track of the count for the leaf_label\n \n wrong_labels = testSet[res].unique().tolist() #initialize wrong_labels to be all labels in the testSet\n if leaf_label in wrong_labels: #If the leaf label is in the list of labels for the part of the test set that got to this node\n wrong_labels.remove(leaf_label) #remove the leaf_label so that all which remains are incorrect labels\n \n wrong_count = 0 #Initialize a count of how many testSet observations will be classified incorrectly\n testCounts = testSet.groupby(res).size() #Get a series of the testSet labels and how many observations pertain to each label\n for label in wrong_labels: #Iterate over all the labels not equal to the leaf_label\n wrong_count += testCounts[label] #Sum up all of the observations with a label not equal to the leaf_label\n return wrong_count", "def _computeError(self, inputs, targets):\n return .5*np.sum((targets-self._pcnfwd(inputs))**2)", "def _cer_compute(errors: Tensor, total: Tensor) ->Tensor:\n return errors / total", "def compute_number_error(output_one_hot, target_one_hot):\n output = output_one_hot.argmax(dim=1)\n target = target_one_hot.argmax(dim=1)\n nb_of_error = (output != target).sum()\n return nb_of_error", "def n_errors(gold_tokens, pred_tokens):\n return len(gold_tokens) + len(pred_tokens) - 2 * _n_matches(gold_tokens, pred_tokens)", "def error(self): \n if not self.terminal:\n err = sum([v**2 for v in self.state + self.q[:-1]])\n else:\n err = sum([v**2 for v in LIMITS[:9]] + [1.0 - LIMITS[9]**2])\n err *= (self.max_steps - self.steps)\n return err", "def error_rate(self):\n\n\t\treturn theano.tensor.mean(theano.tensor.neq(\n\t\t\tself.get_symbolic_predicted_labels(),\n\t\t\tself.symbolic_output))", "def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) /\n predictions.shape[0])", "def mb_r(self) -> float:\n # Calculate metric\n n = self.predicted.size\n tot = 0.0\n for i in range(n):\n tot = tot + np.sum(np.abs(self.predicted - self.true[i]))\n mae_val = np.sum(np.abs(self.predicted - self.true)) / n\n mb = 1 - ((n ** 2) * mae_val / tot)\n\n return float(mb)", "def calc_error_dist(self):\n pass", "def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n np.sum(np.argmax(predictions, 1) == labels) /\n predictions.shape[0])", "def error(self, trainset: ([], [])):\n # MSE = Σ | d – y |^2 / n\n error_sum = 0.0\n for index, example in enumerate(trainset):\n # | d – y |^2\n output = self.activate(example[0])\n\n target = example[1][0]\n\n error = target - output\n error_sum += error ** 2\n\n # Σ |error_sum| / n\n error_sum = error_sum / len(trainset)\n return error_sum", "def get_error_rate(self, points, labelled_centroids):\n classified_incorrect = 0\n for (label, point) in points:\n classified_label = self.classify_point(point, labelled_centroids)\n if classified_label != label:\n classified_incorrect +=1\n error_rate = classified_incorrect / float(len(points))\n return error_rate", "def rmsError(self, yTrue, yPred):\n if len(yPred) != len(yTrue):\n raise ValueError(\"Lengths of predicted and actual values doesn't match.\")\n\n noneCount = 0\n loss = 0\n for i in range(len(yTrue)):\n if yPred[i] == None:\n noneCount+=1\n else:\n loss += (yTrue[i] - yPred[i])**2\n loss = 0.5 * loss/len(yTrue)-noneCount\n return round(math.sqrt(loss), 2)", "def part_1() -> int:\n initial_input = _load_input()\n rows = len(initial_input)\n cols = len(initial_input[0])\n\n input = initial_input.copy()\n total_glow_count = 0\n\n for _ in range(100):\n flashed = list()\n for row in range(rows):\n for col in range(cols):\n coords = [[col, row]]\n new_input, glow_count = _get_glow_counts(coords, input, flashed)\n input = new_input\n total_glow_count += glow_count\n\n return total_glow_count", "def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n numpy.sum(numpy.argmax(predictions, 1) == labels) /\n predictions.shape[0])", "def validation_size(self) -> int:\n return int(self.data_size * self.__validation_fraction)", "def test_size(self) -> int:\n return int(self.data_size * self.__test_fraction)", "def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n np.sum(np.argmax(predictions, 1) == labels) /\n predictions.shape[0])", "def expected_width(self):\n\t\treturn self.expected_tile_width * TILE_SIZE", "def _error_count(cls, samples: Samples) -> int:\n return cls.__sample_count(samples, \"false\")", "def label_errors(preds, labels):\n num_correct = num_correct_fun(preds, labels)\n return (1.0 - num_correct / preds.size(0)) * 100.0", "def compute_error(y_true, y_pred):\r\n length = len(y_true)\r\n\r\n error_cnt = 0\r\n\r\n for i in range (length):\r\n if y_true[i] != y_pred[i]:\r\n error_cnt = error_cnt+1\r\n error = (1/length) * error_cnt\r\n return error", "def cps_err(self):\n return np.sqrt(self.totalcounts) / self.exptime", "def __len__(self):\n # print(\"len: \" + str(math.floor(len([name for name in os.listdir(self.imgs_dir) if os.path.isfile(self.imgs_dir+'//'+name)])/self.batch_size)-1)\n return math.floor(len([name for name in os.listdir(self.imgs_dir) if\n os.path.isfile(self.imgs_dir + '//' + name)]) / self.batch_size)", "def total_sdram_requirements(self):", "def success_rate(model,target,img_size,discrepancy_threshold,success_threshold=70):\n # Set model to evaluation mode\n model.eval()\n # Execute trained model to data\n out, _ = model(target.float())\n # Loop over all output data\n for i in range(len(out)):\n # Normalized outputs\n out[i][0] = (out[i][0]-out[i][0].min())/(out[i][0].max()-out[i][0].min())\n # Calculate difference between original and output images\n diff = abs(out-target).reshape(len(out),img_size,img_size).data.numpy()\n acc = numpy.array([len(var[numpy.where(var<discrepancy_threshold)]) for var in diff])\n acc = acc/img_size**2*100\n # Calculate success rate\n success_rate = sum(i>success_threshold for i in acc)/len(acc)*100\n # Display the following:\n # - Success rate\n # - Success threshold above which a single image is considered to be well reconstructed\n # - Display reconstruction threshold (1 minus discrepancy threshold) above which a single\n # pixel is considered to be well reconstructed\n print('%.2f%% of the images have'%success_rate,\n '%i%% of their pixels with'%success_threshold,\n '%i%% reconstruction fidelity'%((1-discrepancy_threshold)*100))\n return out,acc", "def size(self, batch):\n x,y,m = batch \n return sum([mm.sum() for mm in m])", "def error(self, in_sample=True):\n if in_sample:\n error = 0.0\n for i, point in enumerate(self.X):\n if self.Y[i] != self.rbf_classify(point):\n error += 1\n return error / 100\n else:\n error = 0.0\n for i, point in enumerate(self.test_X):\n if self.test_Y[i] != self.rbf_classify(point):\n error += 1\n return error / 10000", "def n_rounds(self) -> int:\n return self.y.shape[0]", "def is_error(ranking, references):\n return 1 if average_precision(ranking, references) < 1 else 0", "def rmspe(self) -> float:\n return float(np.sqrt(np.mean(np.square(((self.true - self.predicted) / self.true)), axis=0)))", "def get_loss(self):\n return self.loss / self.cnt", "def minimum_size(self):\n return self.r_eff*3", "def pred_error(f_pred, data, iterator, verbose=False):\n valid_err = 0\n for _, valid_index in iterator:\n x = [data[0][t]for t in valid_index]\n y = [data[1][t] for t in valid_index]\n align = [data[2][t] for t in valid_index]\n label = [data[3][t] for t in valid_index]\n x, x_mask, y, y_mask, align, label = \\\n prepare_reorderdata_minibatch(x, y, align, label)\n preds = f_pred(x, x_mask, y, y_mask)\n targets = numpy.array(y)\n valid_err += ((preds == targets)*y_mask).sum()/y_mask.sum()\n if verbose:\n print \"---- batch ----\"\n print \"predictions == labels?\"\n print preds == targets\n print \"preds\", preds\n print \"targets\", targets\n print \"mask\",y_mask\n valid_err = 1. - numpy_floatX(valid_err) / len(iterator)\n return valid_err", "def detection_error(in_softmax_scores, out_softmax_scores, num_delta):\n # 1. Init result\n result = 1.0\n # 2. Traversing delta\n # (1) Get delta_start & delta_end\n delta_start = np.minimum(np.min(in_softmax_scores), np.min(out_softmax_scores))\n delta_end = np.maximum(np.max(in_softmax_scores), np.max(out_softmax_scores))\n delta_gap = (delta_end - delta_start) / num_delta\n # (2) Traversing\n for delta in np.arange(delta_start, delta_end, delta_gap):\n tpr = np.sum(in_softmax_scores >= delta) / len(in_softmax_scores)\n fpr = np.sum(out_softmax_scores >= delta) / len(out_softmax_scores)\n result = np.minimum(result, (1.0 - tpr + fpr) / 2.0)\n # Return\n return result", "def __error(self,node_set):\n error=0\n for n in node_set:\n if(n.seq_num!=0):\n error+=LA.norm(n.node_vol-node_set[n.neighbor.parent].node_vol-n.impedance*n.branch_cur)\n #print n.node_vol, '\\n', node_set[n.neighbor.parent].node_vol\n \n return error", "def eval_error_metric(predt, dtrain: xgb.DMatrix):\n label = dtrain.get_label()\n r = np.zeros(predt.shape)\n gt = predt > 0.5\n if predt.size == 0:\n return \"CustomErr\", 0\n r[gt] = 1 - label[gt]\n le = predt <= 0.5\n r[le] = label[le]\n return 'CustomErr', np.sum(r)", "def num_training_examples(self):", "def error_count():\n return cpp_style.error_count()", "def success_rate(x_tapes):\n return np.sum([is_success(x_tape) for x_tape in x_tapes]) / len(x_tapes)", "def evaluate_errors_num_centres(\n inputs, targets, folds, scale, reg_param, num_centres_sequence=None):\n # fix the reg_param\n reg_param = 0.01\n # fix the scale\n scale = 100\n # choose a range of numbers of centres\n if num_centres_sequence is None:\n num_centres_sequence = np.arange(200, 250)\n num_values = num_centres_sequence.size\n num_folds = len(folds)\n #\n # create array to store results\n test_mean_errors = np.zeros(num_values)\n\n #\n # run the experiments\n for c, num_centres in enumerate(num_centres_sequence):\n centres = np.linspace(0, 1, num_centres)\n feature_mapping = construct_rbf_feature_mapping(centres, scale)\n designmtx = feature_mapping(inputs)\n # r is the index of reg_param, reg_param is the regularisation parameter\n # cross validate with this regularisation parameter\n train_errors, test_errors = cv_evaluation_linear_model(\n designmtx, targets, folds, reg_param=reg_param)\n # we're interested in the average (mean) training and testing errors\n test_mean_error = np.mean(test_errors)\n # store the results\n test_mean_errors[c] = test_mean_error\n\n return test_mean_errors", "def calculate_percent_error(self, X, y):\r\n pred_out = np.argmax(self.predict(X), axis=1)\r\n ec=0\r\n for i in range(pred_out.shape[0]):\r\n if not tf.math.equal(pred_out[i], y[i]):\r\n ec+=1\r\n prcnt_error = ec/pred_out.shape[0]\r\n return prcnt_error", "def n_train(self):\n return self.factors[0].shape[0]", "def test_dimensions(self):\n\t\t\n\t\t# default\t\n\t\tdetails = self.watcher.describe()\n\t\tprint(details)\n\t\t\n\t\t# default\t\n\t\tdetails = self.watcher.describe(layers=[self.first_layer])\n\t\tprint(details)\n\t\tN = details.N.to_numpy()[0]\n\t\tM = details.M.to_numpy()[0]\n\t\trf = details.rf.to_numpy()[0]\n\t\tnum_evals = details.num_evals.to_numpy()[0]\n\t\tprint(N,M,rf,num_evals)\n\t\t\n\t\tself.assertEqual(N,64)\n\t\tself.assertEqual(M,3)\n\t\tself.assertEqual(rf,9)\n\t\tself.assertEqual(num_evals,M*rf)", "def __len__(self):\n return math.ceil(self.number_of_images / self.batch_size)", "def compute_error(self, X, Y):\n\n if self.method != 'knn':\n accuracy = self.classifier.score(X, Y)\n error = 1 - accuracy\n return error\n else:\n distances, indices = self.classifier.kneighbors(X)\n error = 0\n for index, ground_truth in zip(indices, Y):\n classes = [self.train_Y[neigbhor] for neigbhor in index]\n mode, _ = stats.mode(classes)\n if mode != ground_truth:\n error += 1\n\n return error / len(Y)", "def calculate_test_error(result, test_label, test_sad):\n result = np.round(result).astype(int)\n nn_cost = np.mean(np.abs(test_label - result), axis=(1, 2, 3))\n\n # calculate switchable filter loss\n switch_cost = np.stack([nn_cost, test_sad])\n switch_cost = np.min(switch_cost, axis=0)\n\n return np.mean(nn_cost), np.mean(test_sad), np.mean(switch_cost)", "def overall_reduction(self):\n return 84", "def __len__(self) -> int:\n num_batches, remainder = divmod(len(self.mapped_triples), self.batch_size)\n if remainder and not self.drop_last:\n num_batches += 1\n return num_batches", "def error_count(self):\n return len(self.errors)", "def __deep_count_errors(node, testSet, res):\n if node.results is not None: #Check if this node is a leaf node\n return __count_errors(node, testSet, res) #If so, return the test set classification errors made by this node.\n else:\n tbSet = testSet[testSet[node.col] >= node.value] #find which test observations belong to this tree's true branch\n fbSet = testSet[testSet[node.col] < node.value] #find which test observations belong to this tree's false branch\n \n if node.tb.results is None: #Check if the true branch is a branch node\n #If so, get the count of all misclassifications made by this branch's descendent leaf nodes on the test observations\n term1 = __deep_count_errors(node.tb, tbSet, res)\n else: #If the true branch is a leaf node, return the count of all test set classification errors made by the leaf.\n term1 = __count_errors(node.tb, tbSet,res)\n if node.fb.results is None: #Check if the false branch is a branch node\n #If so, get the count of all misclassifications made by this branch's descendent leaf nodes on the test observations\n term2 = __deep_count_errors(node.fb, fbSet, res)\n else: #If the false branch is a leaf node, return the count of all test set classification errors made by the leaf.\n term2 = __count_errors(node.fb, fbSet, res) \n return term1 + term2 #Sum the classification errors made by this nodes descendant leaves.", "def compute_error(y_true, y_pred):\r\n\r\n # INSERT YOUR CODE HERE\r\n \r\n n = len(y_true)\r\n err = [y_true[i] != y_pred[i] for i in range(n)]\r\n return sum(err) / n\r\n \r\n raise Exception('Function not yet implemented!')", "def computeErrorRate(test_sent, viterbi_tag_sequence):\n # initiate vars\n correct_predictions = 0\n total_predictions = 0\n correct_unknown_predictions = 0\n total_unknown_predictions = 0\n\n for j in range(len(test_sent)): # iterate tups in sent\n expectedTag = test_sent[j][1]\n actualTag = viterbi_tag_sequence[j]\n if actualTag == UNKNOWN_TAG:\n if expectedTag == UNKNOWN_TAG:\n correct_unknown_predictions += 1\n total_unknown_predictions += 1\n else:\n if actualTag == expectedTag:\n correct_predictions += 1\n total_predictions += 1\n\n err_rate_known = 1 - correct_predictions/total_predictions\n if total_unknown_predictions == 0:\n err_rate_unknown = 0\n else:\n err_rate_unknown = 1 - correct_unknown_predictions/total_unknown_predictions\n\n tot_pred = total_predictions + total_unknown_predictions\n corr_pred = correct_predictions + correct_unknown_predictions\n total_err = 1 - corr_pred/tot_pred\n\n return err_rate_known, err_rate_unknown, total_err", "def get_valid_data_size(self):\n return len(self.pipeline.data['test'])", "def _get_loss_weight(self) -> torch.Tensor:\n n_pos: torch.Tensor = 0.0\n n_neg: torch.Tensor = 0.0\n\n for _, ground_truth in self.train_loader:\n n_poss_curr = ground_truth.sum()\n n_pos += n_poss_curr\n n_neg += ground_truth.numel() - n_poss_curr\n\n eps = torch.finfo(n_pos.dtype).eps\n return n_neg / (n_pos + eps)", "def rate(self) -> float:\n return self.success_cnt / self.total_cnt if self.total_cnt > 0 else 1.0", "def realistic_error_rate(predictions, labels, predicted_hardness):\n # # print (predicted_hardness)\n # predicted_hardness = predicted_hardness / np.sum(predicted_hardness)\n # # print (np.argmax(predictions, 1) == labels)\n # # print (np.multiply(np.argmax(predictions, 1) == labels, np.squeeze(predicted_hardness)))\n # return 100.0 - 100 * np.sum(np.multiply(np.argmax(predictions, 1) == labels, np.squeeze(predicted_hardness)))\n # # return 100.0 - (\n # # 100.0 *\n # # np.sum(np.argmax(predictions, 1) == labels) /\n # # predictions.shape[0])\n print (np.sum(predicted_hardness))\n return 100.0 - 100 * (np.sum(np.multiply(np.argmax(predictions, 1) == labels, np.squeeze(predicted_hardness))) / np.sum(predicted_hardness))", "def sgd(iterations):\n for iteration in range(0,iterations):\n error = []\n for user_id in range(0,latent_user_preferences.shape[0]):\n for item_id in range(0,latent_item_features.shape[0]):\n rating = user_ratings[user_id][item_id]\n if rating != 99:\n err = train(user_id, item_id, rating)\n error.append(err)\n mse = (np.array(error) ** 2).mean() \n if(iteration%1 == 0):#000 == 0 ):\n print(mse)\n return error", "def __len__(self):\n return int(np.ceil(self.total_frame_count / self.batch_size))", "def calculate_error(k_means_matrix):\n return sum([min(dist) for dist in k_means_matrix])", "def measure_mini_batch_stats(sess, loss, accuracy, step, x, y,\n X_train, Y_train):\n t_c = sess.run(loss, feed_dict={x: X_train, y: Y_train})\n t_a = sess.run(accuracy, feed_dict={x: X_train, y: Y_train})\n print('\\tStep {step_number}:'.format(step_number=step))\n print('\\t\\tCost: {step_cost}'.format(step_cost=t_c))\n print('\\t\\tAccuracy: {step_accuracy}'.format(step_accuracy=t_a))", "def calculate_batch_metrics(self):\n pass", "def err_num(gold_label, labels):\n return len([x for x in labels if (gold_label != -1 and x != -1 and x != gold_label)])", "def get_error_count(self):\n return sum(1 for outcome in (r.outcome for r in self.values()) if outcome == Result.ERROR)", "def calc_error(datasample,boots_num):\r\n mse_list=[]\r\n datasample=df_to_array(datasample)\r\n for i in range(boots_num):\r\n boots_indexs,missing_indexs=bootstrapping(datasample)\r\n \r\n boostrapped_data=datasample[boots_indexs][0]\r\n \r\n boots_outsample_data=datasample[missing_indexs]\r\n \r\n\r\n # Train the model \r\n rf_kernal=Model_Train(boostrapped_data)\r\n \r\n # Test the model\r\n test_features=boots_outsample_data[:,:-1]\r\n test_labels=boots_outsample_data[:,-1]\r\n pred=rf_kernal.predict(test_features)\r\n \r\n \r\n # Can change to MAE, MSE\r\n \r\n me=np.mean(pred-test_labels)\r\n #mse=np.mean((pred-train_labels)**2)\r\n #mae=np.mean(np.abs(pred-train_labels))\r\n \r\n mse_list.append(me)\r\n print('Estimated Out of Sample Error=%f'%(np.mean(mse_list)))\r\n return np.mean(mse_list)", "def word_error_rate(output, ideal):\n return min_edit_distance(output, ideal)/len(ideal.split())", "def get_total_n_cpu(self) -> int:", "def train_size(self) -> int:\n return int(self.data_size * self.__train_fraction)", "def test_compute_metrics(self):\n with self.test_session() as sess:\n tf.set_random_seed(1234)\n dut = _setup_trainer(self.tmpdir)\n\n sess.run(tf.global_variables_initializer())\n sess.run((dut.train_iterator.initializer,\n dut.train_metric_reset_op))\n\n train_mloss = sess.run(dut.train_mean_loss)\n\n # Without update, it should be zero.\n self.assertEqual(train_mloss, 0.)\n\n sess.run((dut.train_op, dut.train_mean_loss_update_op))\n\n train_mloss = sess.run(dut.train_mean_loss)\n\n # After update.\n self.assertAlmostEqual(train_mloss, 5.2298584)", "def ExpectedMaxBatchSizes(self, run_params):\n return self.max_batch_sizes", "def get_num_measured_outputs(self):\n i = 0\n for o in self.outputs:\n if o.is_measured_output():\n i += 1\n return i", "def error(self, X, y):\n predicted = self.predict(X)\n y = self.transformy(y)\n return 1 - (y == predicted).sum() / predicted.size", "def importance_weighted_error(self):\n weighted_errors = self.i_s_weights * self.errors\n self.mean_error = tf.reduce_mean(weighted_errors, name=\"mean_error\")\n return(self.mean_error)", "def find_prediction_success_rate(decision_tree, test_examples, attributes):\n totalCorrect = 0\n for example in test_examples:\n actualResult = example[14]\n prediction = decision_tree_prediction(example, decision_tree, attributes)\n if prediction == actualResult:\n totalCorrect = totalCorrect + 1\n return totalCorrect / len(test_examples)", "def compute_accuracy(self):\n self.test_predictions = tf.cast(tf.argmax(self.test_logits, 1), tf.int32)\n correct = tf.equal(self.episode.test_labels, self.test_predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def batch_min_healthy_percentage(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"batch_min_healthy_percentage\")", "def penalty(self):\n return 0", "def test_min_matrix_shape(self):\n\n\t\tdetails = self.watcher.describe(min_evals=30)\n\t\tprint(details)\n\n\t\tfor nev in details.num_evals:\n\t\t\tself.assertGreaterEqual(nev, 30)", "def error_rate_impurity(X_valid_encoded, X_valid, y_valid, k=18):\n errors = 0\n impurities = 0\n for i, x_enc in enumerate(X_valid_encoded):\n top_k_indices = ann.knn(x_enc, X_valid_encoded, k)\n label = y_valid[i]\n votes_against = 0\n for index in top_k_indices:\n if label != y_valid[index]:\n votes_against += 1\n if votes_against > math.ceil(k / 2):\n errors += 1\n impurities += votes_against\n error_rate = errors * 100. / X_valid.shape[0]\n impurity = impurities / (X_valid.shape[0] * k)\n return error_rate, impurity", "def test_size_check(self):\n [x1, y1, s1, g1] = self.data.diffusion_data.shape\n [x2, y2, s2, g2] = module_05.run_module(self.data).diffusion_data.shape\n self.assertEqual(x1, x2)\n self.assertEqual(y1, y2)\n self.assertEqual(s1, s2)\n self.assertEqual(g1, g2)", "def _wip_compute(errors: Tensor, target_total: Tensor, preds_total: Tensor) ->Tensor:\n return errors / target_total * (errors / preds_total)", "def calcError(net, net_labels, dataset_name, dataloader, dataset, doGPU):\n # note: net_labels is a list of pairs (RAP_name, PETA_name) of attribute names\n net_attr_nbr = len(net_labels)\n assert (net_attr_nbr == 49)\n \n total = 0\n correct = 0\n batch_nbr = 0\n per_attrib_total = torch.zeros([net_attr_nbr], dtype=torch.int64) # size [92]\n per_attrib_correct = torch.zeros([net_attr_nbr], dtype=torch.int64) # size [92]\n per_attrib_1_pred = torch.zeros([net_attr_nbr], dtype=torch.int64) # size [92]\n per_attrib_class_accuracy = torch.zeros([net_attr_nbr], dtype=torch.float) # size [92]\n if doGPU:\n per_attrib_total = per_attrib_total.cuda()\n per_attrib_correct = per_attrib_correct.cuda()\n per_attrib_1_pred = per_attrib_1_pred.cuda()\n per_attrib_class_accuracy = per_attrib_class_accuracy.cuda()\n \n with torch.no_grad():\n # loop over batches\n # accumulate per-attribute and total number of correct predictions\n for i_batch, sample_batched in enumerate(dataloader):\n assert (sample_batched['image'].shape[1:] == (3,128,48)), \"wrong image size\"\n batch_nbr += 1\n real_batch_size = sample_batched['image'].shape[0]\n total += real_batch_size * net_attr_nbr\n per_attrib_total += real_batch_size # size [net_attr_nbr]\n assert (per_attrib_total.sum().item() == total)\n try:\n assert (batch_nbr == math.ceil(per_attrib_total[0].item()/Param_Batchsize))\n except AssertionError:\n ipdb.set_trace()\n pass\n\n\n # prepare data for prediction\n if doGPU:\n inp = Variable(sample_batched['image'].float().cuda())\n else:\n inp = Variable(sample_batched['image'].float())\n\n # retrieve ground truth\n dataset_lab_gt = sample_batched['label'] # shape == [50,NB_ATTRIB]\n\n # convert ground truth to model attributes\n if dataset_name == 'datasetRAPPETA':\n assert (dataset_lab_gt.shape[1] == 49)\n # no conversion needed, use ground truth as it is\n lab_gt = dataset_lab_gt\n elif dataset_name == 'datasetRAP':\n assert (dataset_lab_gt.shape[1] == 92)\n # note: in the line below dataset_lab_gt.shape[0] is better than \n # Param_Batchsize because the last batch may be incomplete\n lab_gt = torch.zeros((dataset_lab_gt.shape[0],net_attr_nbr), dtype=dataset_lab_gt.dtype)\n net_labels_RAP = [rap_label for rap_label,peta_label in net_labels]\n for attr_idx,attr_name in enumerate(net_labels_RAP):\n lab_gt[:,attr_idx] = dataset_lab_gt[:,dataset.index_of(attr_name)]\n elif dataset_name == 'datasetPETA':\n assert (dataset_lab_gt.shape[1] == 104)\n # note: in the line below dataset_lab_gt.shape[0] is better than \n # Param_Batchsize because the last batch may be incomplete\n lab_gt = torch.zeros((dataset_lab_gt.shape[0],net_attr_nbr), dtype=dataset_lab_gt.dtype)\n net_labels_PETA = [peta_label for rap_label,peta_label in net_labels]\n for attr_idx,attr_name in enumerate(net_labels_PETA):\n lab_gt[:,attr_idx] = dataset_lab_gt[:,dataset.index_of(attr_name)]\n else:\n print('Unknown dataset \\'' + dataset_name + '\\'')\n sys.exit(1)\n\n # 'format' ground truth for Torch\n lab_gtv = Variable(lab_gt)\n if doGPU:\n lab_gtv = lab_gtv.cuda()\n\n # do prediction\n logits = net.forward(inp) # output without Sigmoid\n predictions = (logits > 0).int() # size [50, net_attr_nbr]\n assert (net_attr_nbr == predictions.shape[1])\n\n # accumulate total number of correct predictions\n correct += (lab_gtv == predictions).sum()\n\n # accumulate per-attribute number of correct predictions\n per_batch_and_attrib_correct = (lab_gtv == predictions) # size [50, net_attr_nbr]\n #if doGPU:\n # per_batch_and_attrib_correct = per_batch_and_attrib_correct.cpu()\n per_attrib_correct += per_batch_and_attrib_correct.sum(0) # size [net_attr_nbr]\n assert (per_attrib_correct.sum().item() == correct)\n\n # accumulate number of 1 predictions for each attribute\n per_attrib_1_pred += predictions.sum(0) # size [net_attr_nbr]\n\n # accumulate for class-accuracy\n per_batch_and_attrib_1_good_prediction = (predictions.byte() * per_batch_and_attrib_correct).sum(0) #size [net_attr_nbr]\n per_batch_and_attrib_0_good_prediction = ((1 - predictions.byte()) * per_batch_and_attrib_correct).sum(0) #size [net_attr_nbr]\n assert torch.equal(per_batch_and_attrib_1_good_prediction + per_batch_and_attrib_0_good_prediction, per_batch_and_attrib_correct.sum(0))\n per_batch_and_attrib_1_ground_truth = lab_gtv.sum(0) #size [net_attr_nbr]\n per_batch_and_attrib_0_ground_truth = (1 - lab_gtv).sum(0) #size [net_attr_nbr]\n try:\n assert torch.equal(per_batch_and_attrib_1_ground_truth + per_batch_and_attrib_0_ground_truth, torch.tensor([real_batch_size] * net_attr_nbr).cuda())\n except AssertionError:\n print(\"per_batch_and_attrib_1_ground_truth + per_batch_and_attrib_0_ground_truth=\")\n print(per_batch_and_attrib_1_ground_truth + per_batch_and_attrib_0_ground_truth)\n ipdb.set_trace()\n pass\n\n per_batch_and_attrib_recall_1 = per_batch_and_attrib_1_good_prediction.float() / per_batch_and_attrib_1_ground_truth.float() #size [net_attr_nbr]\n # nan values appear when ground_truth number of 1 value is 0\n # in this case, good_prediction can not be different of 0\n # (there can not be a good prediction of 1 because there is not\n # any 1 in the ground truth)\n # so a nan appears only when recall = 0 good pred / 0 case in ground truth\n # so recall=nan can be safely replaced by a recall=1\n person.replace_nan_by_one(per_batch_and_attrib_recall_1)\n per_batch_and_attrib_recall_0 = per_batch_and_attrib_0_good_prediction.float() / per_batch_and_attrib_0_ground_truth.float() #size [net_attr_nbr]\n person.replace_nan_by_one(per_batch_and_attrib_recall_0)\n # class_accuracy = mean(recall_of_0, recall_of_1)\n per_batch_and_attrib_class_accuracy = (per_batch_and_attrib_recall_0 + per_batch_and_attrib_recall_1) / 2.0 #size [net_attr_nbr]\n per_attrib_class_accuracy += per_batch_and_attrib_class_accuracy #size [net_attr_nbr]\n\n assert (total == (dataloader.dataset.__len__() * net_attr_nbr))\n \n if doGPU:\n per_attrib_total = per_attrib_total.cpu()\n per_attrib_correct = per_attrib_correct.cpu()\n per_attrib_1_pred = per_attrib_1_pred.cpu()\n per_attrib_class_accuracy = per_attrib_class_accuracy.cpu()\n\n # compute per-attribute and global average prediction error\n err = (1.0-correct.item()/total)\n per_attrib_err = (1.0 - (per_attrib_correct.to(dtype=torch.float) / per_attrib_total.to(dtype=torch.float))) # size [net_attr_nbr]\n np.testing.assert_allclose(per_attrib_err.mean().item(), err, rtol=1e-5)\n\n # compute per-attribute number of 1 predictions\n per_attrib_1_pred_rate = 100 * (per_attrib_1_pred.to(dtype=torch.float) / per_attrib_total.to(dtype=torch.float)) # size [net_attr_nbr]\n\n # compute mean class_accuracy over batches\n per_attrib_class_accuracy = per_attrib_class_accuracy * 1.0 / batch_nbr \n\n return err, per_attrib_err, per_attrib_1_pred_rate, per_attrib_class_accuracy", "def minsize(self):# -> int:\r\n return 0" ]
[ "0.64593107", "0.6453829", "0.6450095", "0.6359758", "0.6284254", "0.6248897", "0.61730903", "0.6143888", "0.6141596", "0.61282915", "0.61234075", "0.6117177", "0.6080115", "0.60368556", "0.60218304", "0.60163116", "0.60140604", "0.5989487", "0.59858793", "0.59834915", "0.5977365", "0.59712565", "0.59686506", "0.59559155", "0.594531", "0.59413534", "0.5929339", "0.59223", "0.58958864", "0.5887355", "0.5870281", "0.5866552", "0.58660185", "0.5862832", "0.5854202", "0.5854162", "0.5847168", "0.58469635", "0.58267206", "0.58185667", "0.5812889", "0.57675815", "0.5762021", "0.5761288", "0.57558995", "0.574252", "0.5740926", "0.57290834", "0.5728191", "0.57177454", "0.5713488", "0.57118905", "0.57001436", "0.5688805", "0.5687163", "0.56647074", "0.56455153", "0.5642227", "0.563967", "0.5639507", "0.563786", "0.5633532", "0.56160784", "0.5610361", "0.5609752", "0.5598587", "0.55862266", "0.55825007", "0.55809975", "0.5580369", "0.5574688", "0.5569056", "0.55651665", "0.5558584", "0.5551048", "0.5550513", "0.5546801", "0.55429006", "0.5540069", "0.5533956", "0.5531362", "0.5523708", "0.5520741", "0.5517333", "0.551535", "0.55130833", "0.55064076", "0.55057216", "0.5503234", "0.5500423", "0.5500393", "0.5498945", "0.54958206", "0.5488558", "0.54842794", "0.54819334", "0.546861", "0.546725", "0.5461223", "0.5458132", "0.5456981" ]
0.0
-1
Initialize the parameters for the multilayer perceptron
def __init__(self, rng, input, n_in, n_hidden, n_out, n_hiddenLayers, binary, stochastic): self.binary=binary self.stochastic=stochastic # Since we are dealing with a one hidden layer MLP, this will translate # into a HiddenLayer with a tanh activation function connected to the # LogisticRegression layer; the activation function can be replaced by # sigmoid or any other nonlinear function. self.hiddenLayers = [] self.normLayers=[] for i in xrange(n_hiddenLayers): h_input = input if i == 0 else self.hiddenLayers[i-1].output h_in = n_in if i == 0 else n_hidden # if binary==True, we append a binary hiddenlayer if binary==True: self.hiddenLayers.append( HiddenLayer( rng=rng, input=h_input, n_in=h_in, n_out=n_hidden, activation=T.tanh, binary=True, stochastic=stochastic )) self.normLayers.append( BatchNormLayer( input=self.hiddenLayers[i].output, n_in=n_hidden, n_out=n_hidden )) else: self.hiddenLayers.append( HiddenLayer( rng=rng, input=h_input, n_in=h_in, n_out=n_hidden, activation=T.tanh, binary=False, stochastic=False )) # The logistic regression layer gets as input the hidden units # of the hidden layer self.logRegressionLayer = LogisticRegression( input=self.hiddenLayers[-1].output, n_in=n_hidden, n_out=n_out, binary=binary, stochastic=stochastic ) # same holds for the function computing the number of errors self.errors = self.logRegressionLayer.errors # the parameters of the model are the parameters of the two layer it is # made out of self.params = sum([x.params for x in self.hiddenLayers], []) + self.logRegressionLayer.params self.wrt = sum([x.wrt for x in self.hiddenLayers], []) + self.logRegressionLayer.wrt self.Ws = sum([x.Ws for x in self.hiddenLayers], []) + self.logRegressionLayer.Ws # keep track of model input self.input = input
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_parameters(self, **kwargs):\n self.__multi_layer_perceptron.set_params(**kwargs)", "def __init__(self, hidden_layer_sizes, activation='relu', reg=0.001, k_fold=5, random_state=0):\n print(\"Initialize model Multi-layer Perceptron\")\n self.hidden_layer_sizes = hidden_layer_sizes\n self.activation = activation\n self.reg = reg\n self.k_fold = k_fold\n self.random_state = random_state\n self.model = sklearn.neural_network.MLPClassifier(self.hidden_layer_sizes,\n activation=self.activation,\n alpha=self.reg, max_iter=1000, \n random_state=self.random_state)", "def initialize_parameters(self):\n for i in range(1, self.L):\n self.W[i - 1] = np.random.randn(self.layer_dims[i], self.layer_dims[i - 1]) * 0.01\n self.b[i - 1] = np.zeros((self.layer_dims[i], 1))", "def init_params(self):\n self.conv = Conv(self.conv_layers[0][-1], self.out_channels, padding=self.padding,stride=self.stride)\n self.W = torch.randn(self.num_labels, self.cout_numel, requires_grad=True)\n self.T = torch.randn(self.num_labels, self.num_labels, requires_grad=True)", "def initialise_parameters(self):\n # Weights\n init = select_w_init(self.w_init)\n if self.w_gain:\n gain = nn.init.calculate_gain('relu')\n init(self.relations, gain=gain)\n else:\n init(self.relations)\n\n # Biases\n if self.b_init:\n init = select_b_init(self.b_init)\n init(self.sbias)\n init(self.pbias)\n init(self.obias)", "def init_parameters(self):\n # Create the weights and biases\n for i in range(1, len(self.layer_dimensions)):\n # Initialization from He et al.\n mu = 0\n var = 2 / self.layer_dimensions[i]\n sigma = np.sqrt(var)\n weight_shape = (self.layer_dimensions[i - 1], self.layer_dimensions[i])\n weight = np.random.normal(loc=mu, scale=sigma, size=weight_shape)\n bias = np.zeros((self.layer_dimensions[i], ))\n\n # Saving in the parameters dict\n layer_weight = \"w_\" + str(i)\n self._parameters[layer_weight] = weight\n layer_b = \"b_\" + str(i)\n self._parameters[layer_b] = bias", "def __init__(self, num_parameters=1, init=0.25):\n super(PReLU, self).__init__()\n self.num_parameters = num_parameters\n self.weight = Parameter(Tensor(num_parameters).fill_(init))", "def params_init(self) -> None:\n # Initialize weights and biases with uniform distribution.\n nn.init.uniform_(self.emb.weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_e2h[1].weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_e2h[1].bias, self.init_lower, self.init_upper)\n for lyr in range(self.n_lyr):\n self.stack_rnn[2 * lyr].params_init()\n nn.init.uniform_(self.fc_h2e[0].weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_h2e[0].bias, self.init_lower, self.init_upper)", "def _init_layers(self) -> None:\n weight_nums, bias_nums = [], []\n for i in range(self.num_layers):\n if i == 0:\n weight_nums.append((self.in_channels + 2) * self.feat_channels)\n bias_nums.append(self.feat_channels)\n elif i == self.num_layers - 1:\n weight_nums.append(self.feat_channels * 1)\n bias_nums.append(1)\n else:\n weight_nums.append(self.feat_channels * self.feat_channels)\n bias_nums.append(self.feat_channels)\n\n self.weight_nums = weight_nums\n self.bias_nums = bias_nums\n self.num_params = sum(weight_nums) + sum(bias_nums)", "def initialize(self):\n\n\t\tparameters = {}\n\t\tL = len(self.layer_dims) # number of layers in the network\n\n\t\tfor l in range(1, L):\n\t\t\tparameters['W' + str(l)] = np.random.randn(self.layer_dims[l], self.layer_dims[l-1]) * 0.01\n\t\t\tparameters['b' + str(l)] = np.zeros((self.layer_dims[l], 1))\n\n\t\t\tassert(parameters['W' + str(l)].shape == (self.layer_dims[l], self.layer_dims[l-1]))\n\t\t\tassert(parameters['b' + str(l)].shape == (self.layer_dims[l], 1))\n\n\t\treturn parameters", "def __init__(self, hparams):\n super(ThreeLayerClassifier, self).__init__()\n self.hparams = hparams\n self.layer_1 = torch.nn.Linear(self.hparams[\"input_size\"], 128)\n self.layer_2 = torch.nn.Linear(128, 256)\n self.layer_3 = torch.nn.Linear(256, self.hparams[\"targets\"])", "def __init__(self, **kwargs):\n\n super(MLP, self).__init__()\n\n # TODO: why lbfgs and not adam?\n self.solver = kwargs.pop('solver', 'lbfgs')\n self.alpha = kwargs.pop('alpha', 1e-5)\n self.random_state = kwargs.pop('random_state', 1)\n\n # determine if the MLP can be initialized or not\n self.clf = None\n self.hidden_layer_sizes = kwargs.pop('hidden_layer_sizes', -1)\n if not (self.hidden_layer_sizes == -1):\n self.initMLPClassifier(**kwargs)", "def __init__(self, *args, **kwargs):\n self.params = kwargs\n self.output_len = kwargs['num_neurons']\n self.input_len = kwargs['input_len']\n self.weights = Vector(data=np.random.randn(self.output_len, self.input_len))\n self.biases = Vector(data=np.zeros((self.output_len, 1)))\n self.input_activations = None\n self.output_activations = Vector()", "def __init__(self, reg_penalty='l2', reg=0.001, k_fold=5, random_state=0):\n print(\"Initialize model Perceptron\")\n self.reg_penalty = reg_penalty\n self.reg = reg\n self.k_fold = k_fold\n self.random_state = random_state\n self.model = sklearn.linear_model.Perceptron(penalty=reg_penalty,\n alpha=self.reg,\n max_iter=1000,\n random_state=self.random_state)", "def initialize_parameters(X, Y, nb_units_per_hidden_layer):\n # Your code here\n np.random.seed(1)\n params = {}\n L = len(nb_units_per_hidden_layer)\n params['W' + str(1)] = np.random.randn(nb_units_per_hidden_layer[0],X.shape[0] ) * 0.05\n params['b' + str(1)] = np.zeros((nb_units_per_hidden_layer[0], 1))\n\n for i in range(1, L):\n params['W' + str(i+1)] = np.random.randn(nb_units_per_hidden_layer[i], nb_units_per_hidden_layer[i - 1]) * 0.01\n params['b' + str(i+1)] = np.zeros((nb_units_per_hidden_layer[i], 1))\n params['W' + str(L+1)]= np.random.randn(1, nb_units_per_hidden_layer[L-1]) * 0.05\n params['b' + str(L+1)]= np.zeros((1,1))\n return params\n # raise NotImplementedError", "def initialize(self):\n params = {}\n for i in range(1, len(self.layer_dimensions)):\n params['b_' + str(i)] = np.ones((self.layer_dimensions[i], 1))\n if self.he_initialization:\n params['W_' + str(i)] = np.random.randn(self.layer_dimensions[i],\n self.layer_dimensions[i - 1]) * np.sqrt(\n 2 / self.layer_dimensions[i - 1])\n else:\n params['W_' + str(i)] = np.random.rand(self.layer_dimensions[i], self.layer_dimensions[i - 1]) - 0.5\n return params", "def __init__(self, input_size, neurons):\n super().__init__()\n self.input_size = input_size\n self.neurons = neurons\n self.params[\"w\"] = np.random.randn(input_size, neurons)\n self.params[\"b\"] = np.random.randn(1, neurons)\n self.grads = {}", "def __init__(self, input_dim=(1, 28, 28), num_classes=10):\n self.params = {}\n\n #######################################################################\n # TODO: Initialize weights and biases for the convolutional neural #\n # network. Weights should be initialized from a Gaussian distribution;#\n # biases should be initialized to zero. All weights and biases should #\n # be stored in the dictionary self.params. #\n #######################################################################\n\n filter_size = 5\n weight_scale = 1e-2\n num_filters = 6\n hidden_dim = 784\n\n #****** THIS WAS TO TEST OUT FASTER NETWORKS *******\n\n self.params['W1'] = np.random.normal(scale=weight_scale, size=(num_filters, input_dim[0], filter_size, filter_size))\n # self.params['W2'] = np.random.normal(scale=weight_scale, size=(num_filters, 6, filter_size, filter_size))\n self.params['W3'] = np.random.normal(scale=weight_scale, size=(864, num_classes))\n\n # self.params['W3'] = np.random.normal(scale=weight_scale, size=(hidden_dim, num_classes))\n # self.params['W4'] = np.random.normal(scale=weight_scale, size=(hidden_dim, num_classes))\n\n self.params['b1'] = np.zeros(num_filters)\n # self.params['b2'] = np.zeros(num_filters)\n self.params['b3'] = np.zeros(num_classes)\n\n # self.params['b3'] = np.zeros(num_classes)\n # self.params['b4'] = np.zeros(num_classes)", "def parameter_initialization(self):\n dictsize = settings.PARS.get('numBases')\n numClass = self.train_labels.shape[0] # number of objects\n Dinit = np.empty((self.train_feats.shape[0], 0)) # for C-Ksvd and D-Ksvd\n dictLabel = np.empty((numClass, 0), dtype=np.int)\n numPerClass = dictsize//numClass\n param1 = {\n 'mode': 2,\n 'K': settings.PARS.get('numBases'), # size of the dictionary\n 'lambda1': settings.PARS.get('lambda_'),\n 'lambda2': 0,\n 'iter': settings.PARS.get('iterationini')\n }\n param2 = {\n 'lambda1': settings.PARS.get('lambda_'),\n 'lambda2': 0,\n 'mode': 2\n }\n\n for classid in range(numClass):\n col_ids = np.array(np.nonzero(self.train_labels[classid, :] == 1)).ravel()\n # ensure no zero data elements are chosen\n data_ids = np.array(np.nonzero(np.sum(self.train_feats[:, col_ids]**2, axis=0) > 1e-6)).ravel()\n\n # Raising an error if any zero lement is found\n if col_ids.shape[0] != data_ids.shape[0]:\n raise DatasetZeroElementFound\n\n # Initilization for LC-KSVD (perform KSVD in each class)\n Dpart = self.train_feats[:, col_ids[np.random.choice(data_ids, numPerClass, replace=False)]]\n param1['D'] = Dpart # initial dictionary\n Dpart = trainDL(self.train_feats[:, col_ids[data_ids]], **param1)\n Dinit = np.c_[Dinit, Dpart]\n labelvector = np.zeros((numClass, 1), dtype=np.int)\n labelvector[classid] = 1\n dictLabel = np.c_[dictLabel, np.tile(labelvector, (1, numPerClass))]\n\n param1['D'] = np.asfortranarray(Dinit) # initial dictionary\n # RuntimeError: matrix arg 10 must be a 2d double Fortran Array\n self.train_feats = self.train_feats if np.isfortran(self.train_feats) else np.asfortranarray(self.train_feats)\n Dinit = trainDL(self.train_feats, **param1)\n Xinit = lasso(self.train_feats, Dinit, **param2)\n\n # learning linear classifier parameters\n tmp = np.linalg.inv([email protected]+np.eye(*([email protected]).shape))@Xinit\n Winit = [email protected]_labels.T\n Winit = Winit.T\n\n Q = np.zeros((dictsize, self.train_feats.shape[1])) # energy matrix\n\n for frameid in range(self.train_feats.shape[1]):\n label_training = self.train_labels[:, frameid]\n maxid1 = label_training.argmax(0)\n\n for itemid in range(Dinit.shape[1]):\n label_item = dictLabel[:, itemid]\n maxid2 = label_item.argmax(0)\n\n if maxid1 == maxid2:\n Q[itemid, frameid] = 1\n\n Tinit = [email protected]\n Tinit = Tinit.T\n\n return Dinit, Winit, Tinit, Q", "def __init__(self, in_features, out_features):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n\n self.params = {'weight': 0.0001 * np.random.randn(out_features, in_features), 'bias': np.zeros((out_features, 1))}\n self.grads = {'weight': np.zeros((out_features, in_features)), 'bias': np.zeros((out_features, 1))}\n\n\n\n ########################\n # END OF YOUR CODE #\n #######################", "def __init__(self, dimensions):\n self.w = nn.Parameter(1, dimensions)", "def __init__(self, dimensions):\n self.w = nn.Parameter(1, dimensions)", "def __init__(self, dimensions):\n self.w = nn.Parameter(1, dimensions)", "def define_parameters(self):\n self.weight_matrix = torch.nn.Parameter(torch.Tensor(self.in_channels, self.out_channels))\n self.bias = torch.nn.Parameter(torch.Tensor(self.out_channels))", "def define_parameters(self):\n self.weight_matrix = torch.nn.Parameter(torch.Tensor(self.in_channels, self.out_channels))\n self.bias = torch.nn.Parameter(torch.Tensor(self.out_channels))", "def __init__(self, in_features, out_features):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n self.params = {'weight': np.random.normal(loc = 0, scale=0.0001, size=(out_features,in_features)),\\\n 'bias': np.zeros((1, out_features))}\n \n self.grads = {'weight': np.zeros((out_features,in_features)),\\\n 'bias': np.zeros((1, out_features))}\n ########################\n # END OF YOUR CODE #\n #######################", "def __init__(self, layerNeurons, numberOfLayers, initialWeights = None, lowerBound = None, upperBound = None):\r\n \r\n # Ensure that there is at-least one input and one output layer in the network\r\n assert len(layerNeurons) > 1, \"At least one input layer and one output layer is needed\"\r\n \r\n # Get the total number of weights needed in the network\r\n totalWeightCount = NeuralNetwork.getSizeOfWeightVector(layerNeurons)*numberOfLayers\r\n \r\n # Initialise the weights with the initialiser or random values\r\n if initialWeights is None:\r\n if lowerBound is None:\r\n lowerBound=-1/np.sqrt(layerNeurons[0])\r\n if upperBound is None:\r\n upperBound=1/np.sqrt(layerNeurons[0])\r\n self.weights = np.random.uniform(lowerBound, upperBound, totalWeightCount)\r\n else:\r\n assert initialWeights.size == totalWeightCount, (\"Length of initial weight matrix incorrect. You need \"+str(totalWeightCount)+\" weights\")\r\n self.weights = initialWeights.view()\r\n \r\n self.weights.shape = (numberOfLayers, -1)\r\n # create an empty array of layers\r\n self.layers = []\r\n layerBlockStart = 0\r\n \r\n for layerInputDimention, layerOutputDimention in zip(layerNeurons, layerNeurons[1:]):\r\n # initialise each layer with its input and output dimentions and bi-directional pointers to the relivant weights\r\n layerBlockEnd = layerBlockStart+(layerInputDimention*layerOutputDimention)\r\n layerBiasEnd = layerBlockEnd+layerOutputDimention\r\n newLayer = batchNetworkLayer(layerInputDimention, layerOutputDimention, numberOfLayers, \r\n self.weights[..., :, layerBlockStart:layerBlockEnd], \r\n self.weights[..., :, layerBlockEnd:layerBiasEnd])\r\n self.layers.append(newLayer)\r\n \r\n layerBlockStart = layerBiasEnd", "def initParams(self):\n sizes = [self.inputDim]+self.layerSizes+[self.outputDim]\n scales = [np.sqrt(6)/np.sqrt(n+m) for n,m in zip(sizes[:-1],sizes[1:])]\n self.stack = [[np.random.rand(m,n)*2*s-s,np.zeros((m,1))] \\\n for n,m,s in zip(sizes[:-1],sizes[1:],scales)]\n self.hActs_M = [cm.empty((s,self.maxBatch)) for s in sizes]\n\n if self.train:\n # Now assuming that all layers are the same size\n self.grad = [[cm.empty(w.shape),cm.empty(b.shape)] for w,b in self.stack]\n self.deltasC_M = cm.empty((self.outputDim,self.maxBatch))\n self.deltasOut_M = cm.empty((sizes[1],self.maxBatch)) \n self.deltasIn_M = cm.empty((sizes[1],self.maxBatch)) \n self.tmpGrad_M = cm.empty((self.layerSize,self.maxBatch))\n \n # Allocate memory once here and reuse\n # Store probs\n self.probs_M = cm.empty((self.outputDim,self.maxBatch))\n # Store col max\n self.rowVec_M = cm.empty((1,self.maxBatch))\n \n self.stack = [[cm.CUDAMatrix(w),cm.CUDAMatrix(b)]\n for w,b in self.stack]\n\n if self.temporalLayer > 0:\n # dummy bias used for temporal layer\n dummy = cm.empty((1,1))\n dummy.assign(0.0)\n\n scale = np.sqrt(6)/np.sqrt(self.layerSize*2)\n wtf = cm.CUDAMatrix(2*scale*np.random.rand(self.layerSize,\n self.layerSize)-scale)\n wtb = cm.CUDAMatrix(2*scale*np.random.rand(self.layerSize,\n self.layerSize)-scale)\n self.stack.append([wtf,dummy])\n self.stack.append([wtb,dummy])\n\n # forward and backward activations for temporal layer\n self.hActsFor_M = cm.empty((self.layerSize,self.maxBatch))\n self.hActsBack_M = cm.empty((self.layerSize,self.maxBatch))\n\n if self.train:\n dwtf = cm.empty(wtf.shape)\n self.grad.append([dwtf,dummy])\n dwtb = cm.empty(wtb.shape)\n self.grad.append([dwtb,dummy])\n\n self.tmpGradBack_M = cm.empty((self.layerSize,self.maxBatch))\n self.deltasFor_M = cm.empty((self.layerSize,self.maxBatch))\n self.deltasBack_M = cm.empty((self.layerSize,self.maxBatch))", "def _init_hyperparam(self, **p_par):\r\n \r\n try:\r\n p_input_size = self._input_space.get_num_dim()\r\n p_output_size = self._output_space.get_num_dim()\r\n except:\r\n raise ParamError('Input size and/or output size of the network are not defined.')\r\n \r\n if 'p_update_rate' not in p_par:\r\n p_par['p_update_rate'] = 1\r\n elif p_par.get('p_update_rate') < 1:\r\n raise ParamError(\"p_update_rate must be equal or higher than 1.\")\r\n \r\n if 'p_num_hidden_layers' not in p_par:\r\n raise ParamError(\"p_num_hidden_layers is not defined.\")\r\n \r\n if 'p_output_activation_fct' not in p_par:\r\n p_par['p_output_activation_fct'] = None\r\n \r\n if 'p_optimizer' not in p_par:\r\n raise ParamError(\"p_optimizer is not defined.\")\r\n \r\n if 'p_loss_fct' not in p_par:\r\n raise ParamError(\"p_loss_fct is not defined.\")\r\n\r\n if 'p_test_data' not in p_par:\r\n p_par['p_test_data'] = 0.3\r\n\r\n if 'p_batch_size' not in p_par:\r\n p_par['p_batch_size'] = 100\r\n\r\n if 'p_seed_buffer' not in p_par:\r\n p_par['p_seed_buffer'] = 1\r\n\r\n if 'p_learning_rate' not in p_par:\r\n p_par['p_learning_rate'] = 3e-4\r\n \r\n if 'p_hidden_size' not in p_par:\r\n raise ParamError(\"p_hidden_size is not defined.\")\r\n try:\r\n if len(p_par['p_hidden_size']) != p_par['p_num_hidden_layers']:\r\n raise ParamError(\"length of p_hidden_size list must be equal to p_num_hidden_layers or an integer.\")\r\n except:\r\n p_par['p_hidden_size'] = [int(p_par['p_hidden_size'])] * int(p_par['p_num_hidden_layers'])\r\n \r\n if 'p_activation_fct' not in p_par:\r\n raise ParamError(\"p_activation_fct is not defined.\")\r\n try:\r\n if len(p_par['p_activation_fct']) != p_par['p_num_hidden_layers']:\r\n raise ParamError(\"length of p_activation_fct list must be equal to p_num_hidden_layers or a single activation function.\")\r\n except:\r\n if isinstance(p_par['p_activation_fct'], list):\r\n raise ParamError(\"length of p_activation_fct list must be equal to p_num_hidden_layers or a single activation function.\")\r\n else:\r\n p_par['p_activation_fct'] = [p_par['p_activation_fct']] * int(p_par['p_num_hidden_layers'])\r\n \r\n if 'p_weight_bias_init' not in p_par:\r\n p_par['p_weight_bias_init'] = True\r\n \r\n if p_par['p_weight_bias_init']:\r\n if 'p_weight_init' not in p_par:\r\n p_par['p_weight_init'] = torch.nn.init.orthogonal_\r\n \r\n if 'p_bias_init' not in p_par:\r\n p_par['p_bias_init'] = lambda x: torch.nn.init.constant_(x, 0)\r\n \r\n if 'p_gain_init' not in p_par:\r\n p_par['p_gain_init'] = np.sqrt(2)\r\n \r\n self._hyperparam_space.add_dim(HyperParam('p_input_size','Z'))\r\n self._hyperparam_space.add_dim(HyperParam('p_output_size','Z'))\r\n self._hyperparam_space.add_dim(HyperParam('p_update_rate','Z'))\r\n self._hyperparam_space.add_dim(HyperParam('p_num_hidden_layers','Z'))\r\n self._hyperparam_space.add_dim(HyperParam('p_hidden_size','Z'))\r\n self._hyperparam_space.add_dim(HyperParam('p_activation_fct'))\r\n self._hyperparam_space.add_dim(HyperParam('p_output_activation_fct'))\r\n self._hyperparam_space.add_dim(HyperParam('p_optimizer'))\r\n self._hyperparam_space.add_dim(HyperParam('p_loss_fct'))\r\n self._hyperparam_space.add_dim(HyperParam('p_test_data'))\r\n self._hyperparam_space.add_dim(HyperParam('p_batch_size'))\r\n self._hyperparam_space.add_dim(HyperParam('p_seed_buffer'))\r\n self._hyperparam_space.add_dim(HyperParam('p_learning_rate'))\r\n self._hyperparam_space.add_dim(HyperParam('p_weight_bias_init'))\r\n self._hyperparam_space.add_dim(HyperParam('p_weight_init'))\r\n self._hyperparam_space.add_dim(HyperParam('p_bias_init'))\r\n self._hyperparam_space.add_dim(HyperParam('p_gain_init'))\r\n self._hyperparam_tuple = HyperParamTuple(self._hyperparam_space)\r\n \r\n ids_ = self.get_hyperparam().get_dim_ids()\r\n self.get_hyperparam().set_value(ids_[0], p_input_size)\r\n self.get_hyperparam().set_value(ids_[1], p_output_size)\r\n self.get_hyperparam().set_value(ids_[2], p_par['p_update_rate'])\r\n self.get_hyperparam().set_value(ids_[3], p_par['p_num_hidden_layers'])\r\n self.get_hyperparam().set_value(ids_[4], p_par['p_hidden_size'])\r\n self.get_hyperparam().set_value(ids_[5], p_par['p_activation_fct'])\r\n self.get_hyperparam().set_value(ids_[6], p_par['p_output_activation_fct'])\r\n self.get_hyperparam().set_value(ids_[7], p_par['p_optimizer'])\r\n self.get_hyperparam().set_value(ids_[8], p_par['p_loss_fct'])\r\n self.get_hyperparam().set_value(ids_[9], p_par['p_test_data'])\r\n self.get_hyperparam().set_value(ids_[10], p_par['p_batch_size'])\r\n self.get_hyperparam().set_value(ids_[11], p_par['p_seed_buffer'])\r\n self.get_hyperparam().set_value(ids_[12], p_par['p_learning_rate'])\r\n self.get_hyperparam().set_value(ids_[13], p_par['p_weight_bias_init'])\r\n self.get_hyperparam().set_value(ids_[14], p_par['p_weight_init'])\r\n self.get_hyperparam().set_value(ids_[15], p_par['p_bias_init'])\r\n self.get_hyperparam().set_value(ids_[16], p_par['p_gain_init'])", "def __init__(self, layerNeurons, initialWeights = None, layerTypes=None, **kwargs):\r\n \r\n # Ensure that there is at-least one input and one output layer in the network\r\n assert len(layerNeurons)>1, \"At least one input layer and one output layer is needed\"\r\n \r\n # Get the total number of weights needed in the network\r\n totalWeightCount = NeuralNetwork.getSizeOfWeightVector(layerNeurons)\r\n \r\n # Initialise the weights with the initializer or random values\r\n if initialWeights is None:\r\n self.weights = np.random.uniform(-1/np.sqrt(layerNeurons[0]), 1/np.sqrt(layerNeurons[0]), totalWeightCount)\r\n else:\r\n assert len(initialWeights) == totalWeightCount, (\"Length of initial weight matrix incorrect. You need \"+str(totalWeightCount)+\" weights\")\r\n self.weights = np.array(initialWeights, dtype = np.float64) \r\n \r\n # create an empty array of layers\r\n self.layers = []\r\n layerBlockStart = 0\r\n \r\n if layerTypes is None or len(layerTypes)<(len(layerNeurons)-1):\r\n layerTypes=[NetworkLayer]*(len(layerNeurons)-1)\r\n \r\n for layerInputDimention, layerOutputDimention, layerType in zip(layerNeurons, layerNeurons[1:], layerTypes):\r\n # initialise each layer with its input and output dimentions and bi-directional pointers to the relivant weights\r\n layerBlockEnd = layerBlockStart+(layerInputDimention*layerOutputDimention)\r\n layerBiasEnd = layerBlockEnd+layerOutputDimention\r\n newLayer = layerType(layerInputDimention, layerOutputDimention, \r\n self.weights[..., layerBlockStart:layerBlockEnd], \r\n self.weights[..., layerBlockEnd:layerBiasEnd], **kwargs)\r\n self.layers.append(newLayer)\r\n \r\n layerBlockStart = layerBiasEnd\r\n \r\n # Tell the output later to use a different function to calculate the delta \r\n newLayer.calcDelta = newLayer.calcDeltaOutputLayer", "def initialise_parameters(self):\n\n\t\tlow = -numpy.sqrt(6/(self.input_dimension + self.hidden_dimension))\n\t\thigh = numpy.sqrt(6/(self.input_dimension + self.hidden_dimension))\n\t\tif self.activation is theano.tensor.nnet.sigmoid:\n\t\t\t# We know the optimum distribution for tanh and sigmoid, so we\n\t\t\t# assume that we're using tanh unless we're using sigmoid.\n\t\t\tlow *= 4\n\t\t\thigh *= 4\n\n\t\tself.weights = theano.shared(\n\t\t\tvalue=numpy.asarray(\n\t\t\t\tself.rng.uniform( # This distribution is apparently optimal for tanh.\n\t\t\t\t\tlow=low,\n\t\t\t\t\thigh=high,\n\t\t\t\t\tsize=(self.input_dimension, self.hidden_dimension)),\n\t\t\t\tdtype=theano.config.floatX),\n\t\t\tname=\"W\",\n\t\t\tborrow=True)\n\n\t\tself.bias = theano.shared(\n\t\t\tvalue=numpy.zeros((self.hidden_dimension,),\n\t\t\t\tdtype=theano.config.floatX),\n\t\t\tname=\"b\",\n\t\t\tborrow=True)\n\n\t\tself.reverse_bias = theano.shared(\n\t\t\tvalue=numpy.zeros((self.input_dimension,),\n\t\t\t\tdtype=theano.config.floatX),\n\t\t\tname=\"b'\",\n\t\t\tborrow=True)\n\n\t\tself.reverse_weights = self.weights.T\t# Tied weights, so the reverse weight\n\t\t\t\t\t\t\t\t\t\t\t\t# matrix is just the transpose.\n\n\t\tself.label_weights = theano.shared(\n\t\t\tvalue=numpy.zeros((self.hidden_dimension, self.output_dimension),\n\t\t\t\tdtype=theano.config.floatX),\n\t\t\tname=\"lW\",\n\t\t\tborrow=True)\n\n\t\tself.label_bias = theano.shared(\n\t\t\tvalue=numpy.zeros((self.output_dimension,),\n\t\t\t\tdtype=theano.config.floatX),\n\t\t\tname=\"lb\",\n\t\t\tborrow=True)", "def __init__(self, weights_dim):\n\n super().__init__()\n\n self._logger = logging.getLogger(self.__class__.__name__)\n\n self.weights_dim = weights_dim\n\n # Pointnet\n self.prepool = nn.Sequential(\n nn.Conv1d(4, 64, 1),\n nn.GroupNorm(8, 64),\n nn.ReLU(),\n\n nn.Conv1d(64, 64, 1),\n nn.GroupNorm(8, 64),\n nn.ReLU(),\n\n nn.Conv1d(64, 64, 1),\n nn.GroupNorm(8, 64),\n nn.ReLU(),\n\n nn.Conv1d(64, 128, 1),\n nn.GroupNorm(8, 128),\n nn.ReLU(),\n\n nn.Conv1d(128, 1024, 1),\n nn.GroupNorm(16, 1024),\n nn.ReLU(),\n )\n self.pooling = nn.AdaptiveMaxPool1d(1)\n self.postpool = nn.Sequential(\n nn.Linear(1024, 512),\n nn.GroupNorm(16, 512),\n nn.ReLU(),\n\n nn.Linear(512, 256),\n nn.GroupNorm(16, 256),\n nn.ReLU(),\n\n nn.Linear(256, 2 + np.prod(weights_dim)),\n )\n\n self._logger.info('Predicting weights with dim {}.'.format(self.weights_dim))", "def __init__(self, params: Iterable[nn.Parameter]):\n self.params = params\n self.param_states = [p.requires_grad for p in self.params]", "def __init__(self, input_dim=(1, 28, 28), num_filters=32, filter_size=3,\n hidden_dim=100, num_classes=10, weight_scale=1e-3,\n dtype=np.float32):\n self.params = {}\n self.dtype = dtype\n\n ############################################################################\n # TODO: Initialize weights and biases for the three-layer convolutional #\n # network. Weights should be initialized from a Gaussian with standard #\n # deviation equal to weight_scale; biases should be initialized to zero. #\n # All weights and biases should be stored in the dictionary self.params. #\n # Store weights for the convolutional layer using the keys 'W1' (here #\n # we do not consider the bias term in the convolutional layer); #\n # use keys 'W2' and 'b2' for the weights and biases of the #\n # hidden fully-connected layer, and keys 'W3' and 'b3' for the weights #\n # and biases of the output affine layer. For this question, we assume #\n # the max-pooling layer is 2x2 with stride 2. Then you can calculate the #\n # shape of features input into the hidden fully-connected layer, in terms #\n # of the input dimension and size of filter. #\n ############################################################################\n C, H, W = input_dim\n H_p = int((H - filter_size + 1) /2)\n W_p = int((W - filter_size + 1) /2)\n self.params['W1'] = np.random.normal(loc = 0.0, scale = weight_scale, size = (num_filters,C,filter_size,filter_size)).astype(self.dtype)\n self.params['W2'] = np.random.normal(loc = 0.0, scale = weight_scale, size = (num_filters*H_p*W_p,hidden_dim)).astype(self.dtype)\n self.params['b2'] = np.zeros(hidden_dim, dtype=self.dtype)\n self.params['W3'] = np.random.normal(loc = 0.0, scale = weight_scale, size = (hidden_dim,num_classes)).astype(self.dtype)\n self.params['b3'] = np.zeros(num_classes, dtype=self.dtype)\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n for k, v in self.params.items():\n self.params[k] = v.astype(dtype)", "def init_learner(self,**kwargs):\r\n \r\n if self.learn_type == 'nn':\r\n #initialize neural network\r\n shape = kwargs[\"shape\"]\r\n #initialize input layer\r\n model = Sequential() \r\n #add hidden layers\r\n for i in range(len(shape)):\r\n if i == 0:\r\n nb_input = self.size\r\n else:\r\n nb_input = shape[i -1]\r\n nb_output = shape[i]\r\n model.add(Dense(nb_input,nb_output,init=\"he_normal\",\r\n activation = \"tanh\"))\r\n model.add(Dropout(.5))\r\n model.add(Dense(shape[-1],1,init = \"he_normal\",\r\n activation = \"linear\"))\r\n model.compile(loss = 'mean_squared_error',optimizer = 'rmsprop')\r\n self.learner = model\r\n \r\n elif self.learn_type == 'linear':\r\n #initialize parameter\r\n self.learner = Linear(self.size,**kwargs)", "def and_setup(epochs):\n learning_rate = 0.15\n value_inputs = [\n # A B\n [0, 0],\n [0, 1],\n [1, 0],\n [1, 1]\n ]\n values_simple_outputs = [1 if a + b ==\n 2 else 0 for a, b in value_inputs]\n values_network_outputs = [[a] for a in values_simple_outputs]\n perceptron = Perceptron([0.5, 0.5, 0.5], 'c', ['a', 'b'])\n network = PerceptronNetwork(\n [\n PerceptronLayer(\n [\n Perceptron([0.5, 0.5, 0.5], 'c', ['a', 'b'])\n ], 'only_layer')\n ])\n\n perceptron_estimated_values = []\n network_estimated_values = []\n perceptron_unit_error = []\n network_unit_error = []\n for _ in range(0, epochs):\n for value, result in zip(value_inputs, values_simple_outputs):\n # Step 1: forward pass - predict\n estimated_value = perceptron.forward(value)\n perceptron_estimated_values.append(estimated_value)\n\n # Step 2: back pass - collect errors\n weighted_error = result - estimated_value\n unit_error = perceptron.backward(\n estimated_value, weighted_error)\n perceptron_unit_error.append(unit_error)\n\n # Step 3: update weights\n perceptron = perceptron.update_weights(\n value, unit_error, learning_rate)\n\n for values, results in zip(value_inputs, values_network_outputs):\n # Step 1: forward pass - predict\n estimated_results, layer_states = network.forward(values)\n network_estimated_values.append(estimated_results[0])\n\n # Step 2: back pass - collect errors\n unit_errors = network.backward(layer_states, results)\n network_unit_error.append(unit_errors[0][0])\n\n # Step 3: update weights\n network = network.update_weights(\n layer_states, unit_errors, learning_rate)\n\n return (perceptron,\n network,\n perceptron_estimated_values,\n network_estimated_values,\n perceptron_unit_error,\n network_unit_error)", "def construct(self):\n self.input_size = self.numplanes * self.boardsize**2\n \n if self.hidden:\n layers = [\n torch.nn.Linear(self.input_size, self.hidden), \n torch.nn.ReLU(),\n torch.nn.Linear(self.hidden, self.boardsize**2)\n ]\n else:\n layers = [torch.nn.Linear(self.input_size, self.boardsize**2)]\n\n self.layers = torch.nn.ModuleList(layers)\n self.optimizer = torch.optim.Adam(self.parameters(), lr=1e-5)\n logging.info(\"Model initialized: %s\", self)", "def reset_parameters_lecun(self, param_init=0.1):\n logger.info('===== Initialize %s with lecun style =====' % self.__class__.__name__)\n for conv_layer in [self.pointwise_conv1, self.pointwise_conv2, self.depthwise_conv]:\n for n, p in conv_layer.named_parameters():\n init_with_lecun_normal(n, p, param_init)", "def _init_layers(self) -> None:\n self.self_attn = MultiheadAttention(**self.self_attn_cfg)\n self.cross_attn = MultiheadAttention(**self.cross_attn_cfg)\n self.embed_dims = self.self_attn.embed_dims\n self.ffn = FFN(**self.ffn_cfg)\n norms_list = [\n build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n for _ in range(3)\n ]\n self.norms = ModuleList(norms_list)", "def initialize_parameters(n_a,n_x,n_y):\n np.random.seed(1)\n Wax=np.random.randn(n_a,n_x)*0.01 #input to hidden\n Waa=np.random.randn(n_a,n_a)*0.01 #hidden to hidden\n Wya=np.random.randn(n_y,n_a)*0.01 #hidden to output\n b=np.zeros((n_a,1)) #hidden bias\n by=np.zeros((n_y,1)) #output bias\n \n parameters={\"Wax\":Wax,\"Waa\":Waa,\"Wya\":Wya,\"b\":b,\"by\":by}\n return parameters", "def __init__(self, input_dim=3*32*32, hidden_dim=100, num_classes=10,\n weight_scale=1e-3, reg=0.0):\n self.params = {}\n self.reg = reg\n\n ############################################################################\n # TODO: Initialize the weights and biases of the two-layer net. Weights #\n # should be initialized from a Gaussian centered at 0.0 with #\n # standard deviation equal to weight_scale, and biases should be #\n # initialized to zero. All weights and biases should be stored in the #\n # dictionary self.params, with first layer weights #\n # and biases using the keys 'W1' and 'b1' and second layer #\n # weights and biases using the keys 'W2' and 'b2'. #\n ############################################################################\n self.params['W1'] = weight_scale * np.random.randn(input_dim, hidden_dim)\n self.params['b1'] = np.zeros(hidden_dim)\n self.params['W2'] = weight_scale * np.random.randn(hidden_dim, num_classes)\n self.params['b2'] = np.zeros(num_classes)\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################", "def _reset_parameters(self):\n\n nn.init.xavier_normal_(self._W_x2i)\n nn.init.xavier_normal_(self._W_x2f)\n nn.init.xavier_normal_(self._W_x2o)\n nn.init.xavier_normal_(self._W_x2c)\n \n nn.init.orthogonal_(self._W_h2i)\n nn.init.orthogonal_(self._W_h2f)\n nn.init.orthogonal_(self._W_h2o)\n nn.init.orthogonal_(self._W_h2c)\n \n nn.init.uniform_(self._W_c2i)\n nn.init.uniform_(self._W_c2f)\n nn.init.uniform_(self._W_c2o)\n \n nn.init.constant_(self._b_i, 0)\n nn.init.constant_(self._b_f, 1)\n nn.init.constant_(self._b_o, 0)\n nn.init.constant_(self._b_c, 0)\n\n if self._chrono_init:\n print(self._t_max)\n b_f = torch.from_numpy(np.log(np.random.randint(1, self._t_max+1, size=self._hidden_size)))\n self._b_f.data.copy_(b_f)\n self._b_i.data.copy_(-b_f)", "def init_three_layer_neuralnet(weight_scale=1, bias_scale=0, input_feat_dim=786,\n num_classes=10, num_neurons=(20, 30)):\n \n assert len(num_neurons) == 2, 'You must provide number of neurons for two layers...'\n\n model = {}\n #model['W1'] = np.random.randn((num_neurons[0],(input_feat_dim) * weight_scale) * math.sqrt(2.0/input_feat_dim)) # Initialize from a Gaussian With scaling of sqrt(2.0/fanin)\n \n model['W1'] = (np.random.rand(input_feat_dim,num_neurons[0])*weight_scale) * math.sqrt(2.0/input_feat_dim)\n model['b1'] = np.zeros(num_neurons[0])# Initialize with zeros\n \n #model['W2'] = (np.random.randn(input_feat_dim) * weight_scale) * math.sqrt(2.0/input_feat_dim)# Initialize from a Gaussian With scaling of sqrt(2.0/fanin)\n #print ((model['W1'])[0,:]).shape\n #numcols = len(input[0])\n t=len((model['W1'])[0])\n #print t\n model['W2'] = (np.random.rand(num_neurons[0],num_neurons[1])*weight_scale) * math.sqrt(2.0/t)\n model['b2'] = np.zeros(num_neurons[1])# Initialize with zeros\n\n t=len((model['W2'])[0])\n #model['W3'] = (np.random.randn(input_feat_dim) * weight_scale) * math.sqrt(2.0/input_feat_dim)# Initialize from a Gaussian With scaling of sqrt(2.0/fanin)\n model['W3'] = (np.random.rand(num_neurons[1],num_classes)*weight_scale) * math.sqrt(2.0/t)\n model['b3'] = np.zeros(num_classes)# Initialize with zeros\n\n return model", "def __init__(self, config) -> None:\n super(Global_MP, self).__init__()\n self.dim = config.dim\n\n self.h_mlp = MLP([self.dim, self.dim])\n\n self.res1 = Res(self.dim)\n self.res2 = Res(self.dim)\n self.res3 = Res(self.dim)\n self.mlp = MLP([self.dim, self.dim])\n\n self.x_edge_mlp = MLP([self.dim * 3, self.dim])\n self.linear = nn.Linear(self.dim, self.dim, bias=False)", "def reset_parameters(self):\n logger.info('===== Initialize %s =====' % self.__class__.__name__)\n nn.init.normal_(self.embed.weight, mean=0.0, std=self.d_model ** -0.5)\n nn.init.constant_(self.embed.weight[self.pad], 0)\n if self.output is not None and not self.tie_embedding:\n nn.init.xavier_uniform_(self.output.weight)\n nn.init.constant_(self.output.bias, 0.0)", "def test_init(self):\n network = PerceptronNetwork(\n [\n PerceptronLayer.blank(4, 2, 'layer1', ['a', 'b', 'c', 'd']),\n PerceptronLayer.blank(2, 2, 'layer2', ['a', 'b', 'c', 'd'])\n ]\n )\n self.assertIsNotNone(network)", "def __init__(self, inputSize, hiddenSize, outputSize, epochs = 100, debug = False):\n self.inputSize = inputSize\n self.hiddenSize = hiddenSize\n self.outputSize = outputSize\n self.epochs = epochs\n self.debug = debug\n\n #weights\n self.W1 = np.random.randn(self.inputSize, self.hiddenSize) \n self.W2 = np.random.randn(self.hiddenSize, self.outputSize)", "def init_weight(self):\n init_layer(self.conv1)\n init_layer(self.conv2)\n init_bn(self.norm1)\n init_bn(self.norm2)", "def initialize_parameters(layer_dim):\n #tf.set_random_seed(0)\n L= len(layer_dim)\n parameters={}\n for i in range(1,L):\n parameters[\"W\" +str(i)] = tf.get_variable(\"W\"+str(i), [layer_dim[i],layer_dim[i-1]], initializer = tf.contrib.layers.xavier_initializer(seed=1))\n parameters[\"b\" +str(i)] = tf.get_variable(\"b\" +str(i),[layer_dim[i],1],initializer= tf.zeros_initializer())\n assert(parameters['W' + str(i)].shape == (layer_dim[i], layer_dim[i-1]))\n assert(parameters['b' + str(i)].shape == (layer_dim[i], 1))\n return parameters", "def __init__(self):\n #conv1\n n = inp_width*inp_height\n #poczatkowe wagi sieci sa ustalane losowo z rozkladu normalnego. Umieszczane sa one na liscie matryc wag\n self.Weights = [np.random.randn(layers[0][1],inp_channels,layers[0][2],layers[0][2])/np.sqrt(n)]\n out_Size = inp_width - layers[0][2] + 1 #zmienna zawiera rozmiar wyjscia danej warstwy\n #inicjalizacja progow \n self.Biases = [initBias*np.ones( layers[0][1] )]\n #przypisanie parametrow warstwie poolingu\n self.poolParams = [(layers[1][1], layers[1][2])]\n out_Size = out_Size/2 \n #conv 2\n n = out_Size*out_Size*layers[0][1]\n self.Weights.append(np.random.randn(layers[2][1],layers[0][1],layers[2][2],layers[2][2])/np.sqrt(n))\n out_Size = out_Size - layers[2][2]+1\n self.Biases.append(initBias*np.ones(layers[2][1]))\n #pool 2\n self.poolParams.append((layers[3][1],layers[3][2]))\n out_Size = out_Size/2 \n #conv 3\n n = out_Size*out_Size*layers[2][1]\n self.Weights.append(np.random.randn(layers[4][1],layers[2][1],out_Size,out_Size)/np.sqrt(n))\n out_Size = 1\n self.Biases.append(initBias*np.ones(layers[4][1]))\n #fully connected 1\n n = layers[4][1]\n self.Weights.append(np.random.randn(layers[5][1],layers[4][1])/np.sqrt(n))\n self.Biases.append(initBias*np.ones(layers[5][1]))\n #fully connected 2\n n = layers[5][1]\n self.Weights.append(np.random.randn(layers[6][1],layers[5][1])/np.sqrt(n))\n self.Biases.append(initBias*np.ones(layers[6][1]))\n\n self.Weights = np.asarray(self.Weights)\n self.Biases = np.asarray(self.Biases)\n \n delta_W = []\n delta_B = []\n for i in range(5):\n delta_W.append(np.zeros(self.Weights[i].shape))\n delta_B.append(np.zeros(self.Biases[i].shape))\n self.delta_W = np.asarray(delta_W)\n self.delta_B = np.asarray(delta_B)", "def __init__(self, num_features):\n super(TLU, self).__init__()\n self.num_features = num_features\n self.tau = nn.parameter.Parameter(torch.Tensor(1, num_features, 1, 1), requires_grad=True)\n self.reset_parameters()", "def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):\n\n self.params = {}\n self.params['W1'] = weight_init_std * \\\n np.random.randn(input_size, hidden_size)\n self.params['b1'] = np.zeros(hidden_size)\n self.params['W2'] = weight_init_std * \\\n np.random.randn(hidden_size, output_size)\n self.params['b2'] = np.zeros(output_size)", "def reset_parameters(self):\n init_method = getattr(init, self.initialization)\n for layer in range(self.num_layers):\n fc = self.get_fc(layer)\n init_method(fc.weight.data)\n if self.use_bias:\n init.constant(fc.bias.data, val=0)\n init_method(self.out.weight.data)\n init.constant(self.out.bias.data, val=0)", "def initialize(self): \n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=config.LR)", "def __init__(self, sizes, final=None, batchnorm=False, dropout=0.0):\n super(MLP, self).__init__()\n\n self.layers = nn.ModuleList()\n # If there is only one input dimension, everything is fine\n if sizes[0] == 1:\n self.layers.append(nn.Linear(sizes[0], sizes[1]))\n\n # For multiple input dimensions, each one has a separate following\n # hidden layer.\n # This is necessary for the partial training later on.\n else:\n self.layers.append(nn.ModuleList([nn.Linear(1, sizes[1])\n for _ in range(sizes[0])]))\n\n # Add the remaining layers with selu activations\n for i in range(len(sizes) - 1)[1:]:\n if i != (len(sizes) - 1):\n if batchnorm:\n self.layers.append(nn.BatchNorm1d(sizes[i]))\n self.layers.append(nn.SELU())\n if dropout is not None:\n if sizes[i] < 32:\n print(\"Warning: Dropout {} on only {} parameters...\"\n .format(dropout, sizes[i]))\n self.layers.append(nn.Dropout(p=dropout))\n self.layers.append(nn.Linear(sizes[i], sizes[i + 1]))\n\n if final is not None:\n self.layers.append(final)", "def __init__(self, activation_function=Function(), input_size=1, output_size=1, noise_size=0,\n learning_batch_size=1, param_desc='Parametres de descente', nb_exp=0):\n self._input_size = input_size\n self._output_size = output_size\n self._learning_batch_size = learning_batch_size\n self._noise_size = noise_size\n # self._weights = np.transpose(np.random.randn(input_size, output_size))\n self._weights = np.random.randn(output_size, input_size+noise_size)\n self._bias = np.zeros((output_size, 1)) # Vecteur colonne\n # On peut laisser le biais comme un vecteur colonne, car en faire une matrice contenant\n # learning_batch_size fois la même colonne. Lorsque l'on aura besoin du biais dans les\n # calculs, il y aura mathématiquement parlant un problème de dimension (addition vecteur\n # + matrice), cependant numpy gère ça en additionnant le vecteur de biais à chacune des\n # colonnes de la matrice (broadcast)\n self.input = np.zeros((input_size, learning_batch_size))\n self._activation_function = activation_function\n self._activation_function.vectorize()\n self.activation_levels = np.zeros((output_size, learning_batch_size)) # Chaque colonne\n # correspond à une entrée du batch\n self.output = np.zeros((output_size, learning_batch_size)) # Chaque colonne\n # correspond à une entrée du batch\n\n self.update_weights_value = np.zeros((output_size, input_size + noise_size))\n self.update_bias_value = np.zeros((output_size, 1))\n\n self.noise_input = np.zeros((noise_size, learning_batch_size))\n\n # self.update_weights_value = np.zeros((output_size, input_size))\n\n self.weights_gradients_sum = np.zeros((output_size, input_size + noise_size))\n # self.weights_gradients_sum = np.zeros((output_size, input_size))\n self.bias_gradients_sum = np.zeros((output_size, 1))\n self.weights_moment = np.zeros((output_size, input_size + noise_size))\n # self.weights_moment = np.zeros((output_size, input_size))\n self.bias_moment = np.zeros((output_size, 1))\n self.weights_eta = np.zeros((output_size, input_size + noise_size))\n # self.weights_eta = np.zeros((output_size, input_size)) # need meilleur nom\n self.bias_eta = np.zeros((output_size, 1)) # need meilleur nom\n\n data_interface = DataInterface()\n param_liste = data_interface.read_conf('config_algo_descente.ini', param_desc) # Lecture\n # du fichier de config\n param_liste = data_interface.extract_param(param_liste, nb_exp)\n self.algo_utilise = param_liste['algo_utilise']\n self.eta = param_liste['eta']\n self.momentum = param_liste['momentum']\n self.epsilon = param_liste['epsilon']\n self.gamma = param_liste['gamma']\n self.moment = param_liste['moment']\n self.eta = param_liste['eta']\n self.gamma_1 = param_liste['gamma_1']\n self.gamma_2 = param_liste['gamma_2']\n self.instant = 0", "def __init__(self,m):\n # initialize model parameters\n \n # w is the m x 1 vector of weights.\n # m: num of features\n self.w = np.random.rand(m)", "def __init__(self, input_size, d, r):\n super(SelfAttentiveLayer, self).__init__()\n self.input_size = input_size\n self.d = d\n self.r = r\n self.W_s1 = Parameter(torch.Tensor(self.d, self.input_size))\n self.W_s2 = Parameter(torch.Tensor(self.r, self.d))\n\n self.reset_parameters()", "def __init__(self, in_features, out_features):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n mean = 0\n std_dev = 0.0001\n #print(in_features)\n #print(out_features)\n # create weight matrices\n weight = np.random.normal(mean, std_dev, (out_features, in_features))\n #print(weight.shape)\n grad_weight = np.zeros((in_features, out_features))\n\n # create biases (in batches)\n bias = np.zeros(out_features)\n grad_bias = np.zeros(out_features)\n\n self.params = {'weight': weight, 'bias': bias}\n self.grads = {'weight': bias, 'bias': grad_bias}\n\n ########################\n # END OF YOUR CODE #\n #######################", "def inititalize_parameters(self, nodes_of_layers, training_data_size):\n\n\t\tassert(self.layers == len(nodes_of_layers))\n\t\tassert(2 == len(training_data_size))\n\t\tself.w_array = [np.array([0])]\n\t\tself.b_array = [np.array([0])]\n\t\tfeatures, nums = training_data_size\n\n\t\t# initialize the parameters of layer one\n\t\tself.w_array.append(np.random.randn(nodes_of_layers[0], features)\n\t\t\t\t\t\t\t* np.sqrt(1 / nums))\n\t\tself.b_array.append(np.zeros((nodes_of_layers[0], 1)))\n\n\t\tfor layer in range(1, self.layers):\n\t\t\tself.w_array.append(np.random.randn(nodes_of_layers[layer],\n\t\t\t\t\t\t\t\tnodes_of_layers[layer - 1])\n\t\t\t\t\t\t\t\t* np.sqrt(1 / nodes_of_layers[layer - 1]))\n\t\t\tself.b_array.append(np.zeros((nodes_of_layers[layer], 1)))\n\t\treturn self.w_array, self.b_array", "def _initialize_parameters(self, layer_dimensions, layer_activations, cost_function):\n self.layer_dims = layer_dimensions\n self.layer_num = len(self.layer_dims)\n self.layer_activations = layer_activations\n self.parameters = {}\n self.cost_function = cost_function\n\n assert(len(self.layer_activations) == len(self.layer_dims),\n 'Number of layers in layer_dimensions: {} and layer_activations: {} are not matching'.format(self.layer_num, len(self.layer_activations)))\n\n for l in range(1, self.layer_num):\n self.parameters['W' + str(l)] = np.random.randn(self.layer_dims[l], self.layer_dims[l-1])\n self.parameters['b' + str(l)] = np.zeros(self.layer_dims[l], 1)", "def __init__(self, z_dim, initailize_weights=True):\n super().__init__()\n self.z_dim = z_dim\n\n self.proprio_encoder = nn.Sequential(\n nn.Linear(8, 32),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Linear(32, 64),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Linear(64, 128),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Linear(128, 2 * self.z_dim),\n nn.LeakyReLU(0.1, inplace=True),\n )\n\n if initailize_weights:\n init_weights(self.modules())", "def _reset_parameters(self):\n\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)", "def __init__(self):\n self.num_examples_per_epoch = 99999\n self.optimizer = \"Adam\"\n # Learning rate for the initial phase of training.\n self.initial_learning_rate = 0.0001\n self.learning_rate_decay_factor = 0.5\n self.num_epochs_per_decay = 8.0\n\n # Learning rate when fine tuning the Inception v3 parameters.\n self.train_inception_learning_rate = 0.0001\n\n # If not None, clip gradients to this value.\n self.clip_gradients = 5.0\n\n # How many model checkpoints to keep.\n self.max_checkpoints_to_keep = 5000", "def __init__(self, hidden_size, num_step=2000, print_interval=1000):\n self.num_step = num_step\n self.print_interval = print_interval\n\n # Model parameters initialization\n # Please initiate your network parameters here.\n self.w1 = np.array([[random.random() for i in range(hidden_size)] for j in range(2)]) #2 * hidden_size matrix\n self.w2 = np.array([[random.random() for i in range(hidden_size)] for j in range(hidden_size)])\n self.w3 = np.array([[random.random()] for j in range(hidden_size)])\n \n \n self.learning_rate = 0.05\n \n self.A1 = []\n self.A2 = []\n self.Y = []\n \n ...", "def __init__(self, inputLayerSize, outputLayerSize, \\\n hiddenLayerSize):\n #Network hyperparameters - neurons per layer - **not altered by training**\n self.inputLayerSize = inputLayerSize\n self.outputLayerSize = outputLayerSize\n self.hiddenLayerSize = hiddenLayerSize\n self.num_params = inputLayerSize * hiddenLayerSize + \\\n hiddenLayerSize * outputLayerSize + hiddenLayerSize \\\n + outputLayerSize\n #--Weights--\n #w_ih - weights of synapses linking input -> hidden\n self.w_ih = np.random.randn( self.inputLayerSize, \\\n self.hiddenLayerSize)\n #w_ho - weights of synapses linking hidden -> output\n self.w_ho = np.random.randn( self.hiddenLayerSize, \\\n self.outputLayerSize)\n \n #--Biases--\n #b_h - biases of hidden layer\n self.b_h = np.random.randn( self.hiddenLayerSize )\n #b_o - biases of output layer\n self.b_o = np.random.randn( self.outputLayerSize )", "def __init__(self, in_features, out_features):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n self.in_features = in_features\n self.out_features = out_features\n\n self.__MEAN = 0\n self.__STD = 0.0001\n\n self.params = {\n 'weight': np.random.normal(loc=self.__MEAN, scale=self.__STD, size=(out_features, in_features)), \n 'bias': np.zeros(out_features),\n }\n self.grads = {\n 'weight': None, \n 'bias': None,\n }\n\n self.input_cache = None\n ########################\n # END OF YOUR CODE #\n #######################", "def __init__(self):\n super(CustomNetwork, self).__init__()\n self.fc1 = nn.Linear(28*28, 500)\n self.fc2 = nn.Linear(500, 256)\n self.fc3 = nn.Linear(256, 10)\n self.loss = Loss()", "def __init__(self):\n self.weights = None\n self._epsilon = None\n self._num_training = None\n self._lambda = None\n return None", "def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True", "def init_params(self, parameters):\r\n max_epoch = parameters['num_epoch']\r\n momentum_rate = parameters['momentum']\r\n loss = parameters['loss_function']\r\n accuracy = parameters['accuracy']\r\n regularization = parameters['regularization']\r\n batch_size = parameters['batch_size']\r\n optimizer = parameters['optimizer'] if parameters['optimizer'] is not None else 'batch'\r\n self.__init__(max_epoch, optimizer, loss, accuracy, momentum_rate, regularization, batch_size)", "def __init__(self):\n # Number of examples per epoch of training data.\n self.num_examples_per_epoch = None \n\n # Optimizer for training the model.\n self.optimizer = \"SGD\" #default \"SGD\"\n\n # Learning rate for the initial phase of training.\n self.initial_learning_rate = 2.0 # default 2.0\n self.learning_rate_decay_factor = 0.8\n self.num_epochs_per_decay = 4 #default 8\n\n # If not None, clip gradients to this value.\n self.clip_gradients = 5.0\n\n # How many model checkpoints to keep.\n self.max_checkpoints_to_keep = 2", "def __init__(self, config) -> None:\n\n super(Local_MP, self).__init__()\n self.dim = config.dim\n\n self.h_mlp = MLP([self.dim, self.dim])\n\n self.mlp_kj = MLP([3 * self.dim, self.dim])\n self.mlp_ji_1 = MLP([3 * self.dim, self.dim])\n self.mlp_ji_2 = MLP([self.dim, self.dim])\n self.mlp_jj = MLP([self.dim, self.dim])\n\n self.mlp_sbf1 = MLP([self.dim, self.dim, self.dim])\n self.mlp_sbf2 = MLP([self.dim, self.dim, self.dim])\n self.lin_rbf1 = nn.Linear(self.dim, self.dim, bias=False)\n self.lin_rbf2 = nn.Linear(self.dim, self.dim, bias=False)\n\n self.res1 = Res(self.dim)\n self.res2 = Res(self.dim)\n self.res3 = Res(self.dim)\n\n self.lin_rbf_out = nn.Linear(self.dim, self.dim, bias=False)\n\n self.h_mlp = MLP([self.dim, self.dim])\n\n self.y_mlp = MLP([self.dim, self.dim, self.dim, self.dim])\n self.y_W = nn.Linear(self.dim, 1)", "def __init__(self):\n super(enc_clf, self).__init__()\n\n self.fc1 = nn.Linear(784, 1024)\n self.fc2 = nn.Linear(1024, 1024)\n self.fc3 = nn.Linear(1024, 512)\n self.fc4 = nn.Linear(512, 10)", "def __init__(self, layers) -> None:\n super(FeaturizerPhi, self).__init__()\n self.layers = layers", "def set_params(self, params: Dict):\n\n if params['training_instances'] is not None:\n self.training_instances = params['training_instances']\n if params['n'] is not None:\n self.n = params['n']\n if params['lda'] is not None:\n self.lda = params['lda']\n if params['verbose'] is not None:\n self.verbose = params['verbose']\n\n self.num_features = self.training_instances[0].get_feature_count()\n self.w = None\n self.b = None", "def __init__(self, input_dim=(3, 32, 32), num_filters=32, filter_size=7,\n\t\t\t\t\t\t\t hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0,\n\t\t\t\t\t\t\t dtype=np.float32):\n\t\tself.params = {}\n\t\tself.reg = reg\n\t\tself.dtype = dtype\n\t\t\n\t\t############################################################################\n\t\t# TODO: Initialize weights and biases for the three-layer convolutional\t\t #\n\t\t# network. Weights should be initialized from a Gaussian with standard\t\t #\n\t\t# deviation equal to weight_scale; biases should be initialized to zero.\t #\n\t\t# All weights and biases should be stored in the dictionary self.params.\t #\n\t\t# Store weights and biases for the convolutional layer using the keys 'W1' #\n\t\t# and 'b1'; use keys 'W2' and 'b2' for the weights and biases of the\t\t\t #\n\t\t# hidden affine layer, and keys 'W3' and 'b3' for the weights and biases\t #\n\t\t# of the output affine layer.\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\tC, H, W = input_dim\n\t\tself.params['W1'] = np.random.normal(0, scale=weight_scale, size=(num_filters, C, filter_size, filter_size))\n\t\tself.params['W2'] = np.random.normal(0, scale=weight_scale, size=(int(num_filters*H*W*0.25), hidden_dim))\n\t\tself.params['W3'] = np.random.normal(0, scale=weight_scale, size=(hidden_dim, num_classes))\n\t\t\n\t\tself.params['b1'] = np.zeros(num_filters)\n\t\tself.params['b2'] = np.zeros(hidden_dim)\n\t\tself.params['b3'] = np.zeros(num_classes) \n\t\t############################################################################\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tEND OF YOUR CODE\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\tfor k, v in self.params.iteritems():\n\t\t\tself.params[k] = v.astype(dtype)", "def __init__(self, num_labels):\n super().__init__()\n self.a = torch.nn.Parameter(torch.randn(num_labels))", "def __init__(self, size, parameters):\n\n self.weights = self.init_weights(size)\n self.alpha = parameters['alpha']\n self.epsilon = parameters['epsilon']\n self.gamma = parameters['gamma']\n self.value = 0.0 #np.random.random()", "def __init__(self, num_inputs=3, hidden_layers=[3, 3], num_outputs=2):\n\n self.num_inputs = num_inputs\n self.hidden_layers = hidden_layers\n self.num_outputs = num_outputs\n\n # create a generic representation of the layers\n layers = [num_inputs] + hidden_layers + [num_outputs]\n\n # create random connection weights for the layers\n weights = []\n for i in range(len(layers) - 1):\n w = np.random.rand(layers[i], layers[i + 1])\n weights.append(w)\n self.weights = weights\n\n activations = []\n\n for i in range(len(layers)):\n a = np.zeros(layers[i])\n activations.append(a)\n self.activations = activations\n\n derivatives = []\n\n for i in range(len(layers) - 1):\n d = np.zeros(layers[i])\n derivatives.append(d)\n self.derivatives = derivatives", "def __init__(self, hparams):\n super(ImagenetTransferLearning, self).__init__()\n self.hparams = hparams\n self.feature_extractor = models.mobilenet_v2(pretrained=True)\n self.feature_extractor.eval()\n\n # Establish classifier\n # self.layer_1 = torch.nn.Linear(hparams[\"input_size\"], 128)\n self.layer_1 = torch.nn.Linear(1000, 128)\n self.layer_2 = torch.nn.Linear(128, 256)\n self.layer_3 = torch.nn.Linear(256, hparams[\"targets\"])", "def init_parameters(self):\n stdv = 1. / math.sqrt(self.weight.data.size(1))\n self.weight.data.uniform_(-stdv, stdv)\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv)", "def construct_network(self, n_units, n_samples=1, noise_dim=0,\n keep_p=1., nonlinearity=True, init_params=None, name=\"\"):\n print \"constructing network, n_units: \",n_units\n # TODO use kwargs for more elagant solutions to being called by this \n # base class\n assert keep_p ==1. and nonlinearity and noise_dim == 0\n\n assert init_params is None # this is implemented only in the Bayesian flow version of this function\n\n ### Define parameters of the network\n self.weights, self.biases, KL = {}, {}, 0.\n self.layers = []\n # Establish paramters of appromiate posterior over weights and\n # biases.\n for l in range(1, len(n_units)):\n with tf.variable_scope(name+'Layer_%d'%l):\n n_in, n_out = n_units[l-1], n_units[l]\n\n # use non neglidgible uncertainty if we are doing VI\n sigma_init = self.init_sigma_params\n\n w_prior_sigma, b_prior_sigma = self.w_prior_sigma, self.w_prior_sigma\n mu_init_sigma_w, mu_init_sigma_b = np.sqrt(1./(n_in)), 1.\n\n (w_mu, w_logstd), _, w_KL = utils.set_q(name+\"w_%d\"%l,\n sigma_prior=w_prior_sigma, mu_init_sigma=mu_init_sigma_w,\n sigma_init=sigma_init, n_samples=0,\n size=[n_in, n_out], save_summary=True)\n\n # We use same init_sigma for weights and biases.\n (b_mu, b_logstd), _, b_KL = utils.set_q(name+\"b_%d\"%l,\n sigma_prior=b_prior_sigma, mu_init_sigma=mu_init_sigma_b,\n sigma_init=sigma_init, n_samples=0,\n size=[n_out], save_summary=True)\n self.weights['w_%d_mu'%l], self.weights['w_%d_std'%l] = w_mu, tf.nn.softplus(w_logstd)\n self.biases['b_%d_mu'%l], self.biases['b_%d_std'%l] = b_mu, tf.nn.softplus(b_logstd)\n\n self.params += [w_mu, b_mu, w_logstd, b_logstd]\n KL += w_KL + b_KL\n\n # Add an extra dimension to correspond to samples.\n prev_layer = tf.stack([self.x]*n_samples)\n self.layers.append(prev_layer)\n # shape is [n_samples, ?, dim(x)]\n\n ### Define activations in each layer\n for l in range(1,len(n_units)):\n print \"defining activations in layer %d\"%l\n # Multiply with weight matrix and add bias\n prev_layer = tf.reshape(prev_layer, [-1, n_units[l-1]])\n layer_pre_bias = tf.matmul(prev_layer, self.weights['w_%d_mu'%l])\n layer_pre_bias = tf.reshape(layer_pre_bias, [n_samples, -1, n_units[l]])\n # Shape of layer_pre_bias is [n_samples, ?, n_units[l]]\n\n # add mean bias term\n layer = tf.add(layer_pre_bias, self.biases['b_%d_mu'%l][None, None, :])\n\n # Calculate the noise in each hidden unit.\n # must use absolute value of activation because final layer may\n # have negative values.\n layer_var = tf.matmul(tf.reshape(prev_layer**2,[-1,\n n_units[l-1]]), self.weights['w_%d_std'%l]**2)\n layer_var = tf.reshape(layer_var, [n_samples, -1, n_units[l]])\n layer_var += self.biases['b_%d_std'%l]**2\n\n # Now sample noise and add scaled noise.\n # This constitutes the local reparameterization trick.\n eps = tf.random_normal(name='eps_%d'%l, mean=0.,\n stddev=1.0, shape=[n_samples, 1, n_units[l]])\n layer_sigma = tf.sqrt(layer_var)\n layer += layer_sigma*eps\n with tf.name_scope(name+\"Neural_Network_Activations_%d\"%l):\n tf.summary.histogram(name+\"Layer_%d_sigmas\"%l, layer_sigma)\n tf.summary.histogram(name+\"Layer_%d_activations_pre_tanh\"%l, layer)\n\n # Add tanh nonlinearity\n if l != (len(n_units) - 1): layer = tf.nn.tanh(layer)\n\n with tf.name_scope(name+\"Neural_Network_Activations_%d\"%l):\n tf.summary.histogram(name+\"Layer_%d_activations_post_tanh\"%l,layer)\n\n prev_layer = layer\n self.layers.append(prev_layer)\n self.KL_BNN = KL\n return prev_layer", "def __init__(self):\n logger.debug('Initializing %s model.' % self.__class__.__name__)\n self.dependent_attributes = ['_alpha',\n '_log_like',\n '_gradient','_K',\n '_log_det']\n self._previous_parameters = None # previous parameters from last call\n self.grad_method = None # could be {'finite_difference','adjoint'}\n self.noise_var_constraint = '+ve' # Gaussian noise variance constraint\n return", "def initialize_parameters():\n\n W1 = tf.get_variable('W1', [3,3,3,64], initializer=tf.contrib.layers.xavier_initializer(seed = 0))\n W2 = tf.get_variable('W2', [3,3,64,128], initializer=tf.contrib.layers.xavier_initializer(seed = 0))\n W3 = tf.get_variable('W3', [3,3,128,256], initializer=tf.contrib.layers.xavier_initializer(seed = 0))\n W4 = tf.get_variable('W4', [3,3,256,512], initializer=tf.contrib.layers.xavier_initializer(seed = 0))\n W5 = tf.get_variable('W5', [3,3,512,512], initializer=tf.contrib.layers.xavier_initializer(seed = 0))\n\n ### END CODE HERE ###\n\n parameters = {\"W1\": W1,\n \"W2\": W2,\n \"W3\": W3,\n \"W4\": W4,\n \"W5\": W5\n }\n\n return parameters", "def __init__(self,layers,activations):\n model = utils.buildMLP(layers, activations)\n super().__init__(torch.nn.Sequential(model), nnType='dnn')", "def __init__(self):\n super(Model, self).__init__()\n\n self.batch_size = 200\n self.hidden_size = 264\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001)\n\n self.dense_1 = tf.keras.layers.Dense(self.hidden_size, activation='relu')\n self.dense_2 = tf.keras.layers.Dense(self.hidden_size, activation='relu')", "def __init__(self, input_dim: int, hidden_layer: bool) -> None:\n\n # --- PLEASE READ --\n # Use the parameters below to train your feed-forward neural network.\n\n # Number of hidden units if hidden_layer = True.\n self.hidden_units = 25\n\n # This parameter is called the step size, also known as the learning rate (lr).\n # See 18.6.1 in AIMA 3rd edition (page 719).\n # This is the value of α on Line 25 in Figure 18.24.\n self.lr = 1e-3\n\n # Line 6 in Figure 18.24 says \"repeat\".\n # This is the number of times we are going to repeat. This is often known as epochs.\n self.epochs = 400\n\n # We are going to store the data here.\n # Since you are only asked to implement training for the feed-forward neural network,\n # only self.x_train and self.y_train need to be used. You will need to use them to implement train().\n # The self.x_test and self.y_test is used by the unit tests. Do not change anything in it.\n self.x_train, self.y_train = None, None\n self.x_test, self.y_test = None, None\n\n np.random.seed(0) # Setting random seed for reproducibility.\n\n self.weights, self.biases = None, None # Initializing weights and biases\n\n self.total_layers = (\n None # Initializing the number of layers in the neural network.\n )\n\n \"\"\"\n I have implemented the neural network as two lists, one with the weight matrices between each layer,\n and the other with the bias vectors.\n \"\"\"\n if hidden_layer:\n self.weights = [\n np.random.randn(self.hidden_units, input_dim),\n np.random.randn(1, self.hidden_units),\n ]\n self.biases = [np.random.randn(self.hidden_units, 1), np.random.randn(1, 1)]\n self.total_layers = 3\n else:\n self.weights = [np.random.randn(1, input_dim)]\n self.biases = [np.random.randn(1, 1)]\n self.total_layers = 2\n\n self.sigmoid = lambda x: 1.0 / (\n 1.0 + np.exp(-x)\n ) # The sigmoid activation function: 1 / (1 + e^(-x))\n\n self.sigmoid_derivative = lambda x: self.sigmoid(x) * (\n 1 - self.sigmoid(x)\n ) # The derivative of the sigmoid activation function to be used in the backpropagation algorithm.", "def __init__(self, config_data, dims, layer_num, params):\n self.use_bias = params['use_bias']\n self.in_dims = params['in_dims']\n self.out_dims = params['out_dims']\n self.use_bias = params['use_bias']\n self.num_outputs = params['num_outputs']\n self.dims = dims\n self.layer_num = layer_num\n self.activation = config_data[\"activation\"]\n self.layer_type = config_data[\"type\"]\n self.name = config_data[\"name\"]\n self.params = []\n # following two parameters not used in dense layers\n # they will be set to one in dense layers\n self.kernel_size = params['kernel_size'] # only used in conv layers\n self.stride = params['stride'] # only used in conv layers", "def _setup(self) -> None:\n #TODO: type\n self.activation = self.params['activation']\n\n self.batchsize: int = self.params['batchsize']\n\n self.input_shape: Tuple[int,int,int] = self.params['input_shape']\n\n self.d: int = self.input_shape[1]\n assert(not self.d == 0)\n\n self.n: int = int(sqrt(self.input_shape[2]))\n assert(not self.n == 0)\n\n self.dtype: type = self.params['dtype']\n\n # initialize weights\n self.W: List[tf.Tensor] = []\n \n for i in range(3):\n #TODO: type\n w_init = self.params['initializer_w']\n if self.params['initializer_w' + str(i)] is not None:\n w_init = self.params['initializer_w' + str(i)]\n\n w_stddev: float = self.params['stddev_w']\n if self.params['stddev_w' + str(i)] is not None:\n w_stddev = self.params['stddev_w' + str(i)]\n\n self.W.append(tf.get_variable(\"weights_\" + str(i),\n shape = (self.d, (self.d if i < 2 else 2 * self.d)),\n dtype = self.dtype,\n initializer = w_init(stddev=w_stddev)))\n\n #TODO: type\n b_init = self.params['initializer_b']\n b_stddev = self.params['stddev_b']\n self.B: tf.Tensor = tf.get_variable(\"biases\", shape = (1, self.d, 1),\n dtype = self.dtype,\n initializer = b_init(stddev=b_stddev))\n\n # create/load expand matrix\n with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):\n self.expand: tf.Tensor = tf.get_variable(\n \"expand\" + str(self.n),\n shape = (self.n, self.n * self.n),\n dtype = self.dtype,\n initializer = tf.constant_initializer(make_expand(self.n)))\n\n # create/load tile matrix\n tile: np.ndarray = np.array([([1] + [0]*(self.n-1))*self.n])\n for i in range(1, self.n):\n tile = np.append(tile, [([0]*i + [1] + [0]*(self.n-1-i))*self.n], 0)\n\n self.tile: tf.Tensor = tf.constant(tile, self.dtype)", "def __init__(self, input_shape, n_out, ini_type=\"plain\"):\n\n self.m = input_shape[1] # number of examples in training data\n # `params` store weights and bias in a python dictionary\n self.params = self.initialize_parameters(input_shape[0], n_out, ini_type) # initialize weights and bias\n self.Z = np.zeros((self.params['W'].shape[0], input_shape[1])) # create space for resultant Z output", "def __init__(self,args, variance_epsilon=1e-12):\n super(BERTLayerNorm, self).__init__()\n self.gamma = nn.Parameter(torch.ones(args.input_dim))\n self.beta = nn.Parameter(torch.zeros(args.input_dim))\n self.variance_epsilon = variance_epsilon", "def __init__(self):\r\n # A dummy layer does nothing\r\n self.weights = np.zeros(shape=(input.shape[1], 10))\r\n bias = np.zeros(shape=(10,))\r\n pass", "def init_parameters(module: nn.Module):\n for m in module.modules():\n if isinstance(m, nn.Conv2d):\n # todo: check if fan_out is valid\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)", "def __init__(self):\n self.layers = []\n self.best_loss = None", "def _init_layers(self) -> None:\n self.self_attn = MultiheadAttention(**self.self_attn_cfg)\n self.embed_dims = self.self_attn.embed_dims\n self.ffn = FFN(**self.ffn_cfg)\n norms_list = [\n build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n for _ in range(2)\n ]\n self.norms = ModuleList(norms_list)", "def _reset_parameters(self) -> None:\n self._setup_input = {\n \"P\": csc_matrix(2.0 * self.opt.P(self.p).toarray()),\n \"q\": self.opt.q(self.p).toarray().flatten(),\n }\n if self.opt_type in CONSTRAINED_OPT:\n A = self.opt.A(self.p)\n b = self.opt.b(self.p)\n self._setup_input[\"A\"] = csc_matrix(\n cs.vertcat(self.opt.M(self.p), A, -A).toarray()\n )\n self._setup_input[\"l\"] = (\n cs.vertcat(-self.opt.c(self.p), -b, b).toarray().flatten()\n )", "def __init__(self, xi_dim, u_dim, noise_dim, A, B , n_hidden=[50, 50,], diag=False,\n\t\t\tact_fct=tf.nn.tanh, R_init=10, S_init=[0.02, 0.02, 0.5, 0.5], noise_scale=1.):\n\t\tPolicy.__init__(self, xi_dim, u_dim)\n\n\t\t_n_output = xi_dim * xi_dim + xi_dim + 1 if not diag else xi_dim + xi_dim + 1\n\n\t\tself._nn = MLP(\n\t\t\tn_input=xi_dim + noise_dim,\n\t\t\tn_output=_n_output,\n\t\t\tn_hidden=n_hidden,\n\t\t\tbatch_size_svi=1,\n\t\t\tact_fct=act_fct\n\t\t)\n\n\t\tself._diag = diag\n\n\t\tself._noise_scale = noise_scale\n\n\t\tself._S_init = S_init\n\t\tself._R_init = R_init\n\n\t\tself._A = A\n\t\tself._B = B\n\n\t\tself._noise_dim = noise_dim", "def set_parameters(self, We1,be1, We2, be2, We3, be3, Wmu, bmu, Wstd, bstd, Wd1, bd1, Wd2, bd2, Wd3, bd3):\r\n self.en_fc1.weight=nn.Parameter(We1)\r\n self.en_fc1.bias=nn.Parameter(be1)\r\n \r\n self.en_fc2.weight=nn.Parameter(We2)\r\n self.en_fc2.bias=nn.Parameter(be2)\r\n \r\n self.en_fc3.weight=nn.Parameter(We3)\r\n self.en_fc3.bias=nn.Parameter(be3)\r\n \r\n self.en_mu.weight=nn.Parameter(Wmu)\r\n self.en_mu.bias=nn.Parameter(bmu)\r\n \r\n self.en_log.weight=nn.Parameter(Wstd)\r\n self.en_log.bias=nn.Parameter(bstd)\r\n \r\n self.de_fc1.weight=nn.Parameter(Wd1)\r\n self.de_fc1.bias=nn.Parameter(bd1)\r\n \r\n self.de_fc2.weight=nn.Parameter(Wd2)\r\n self.de_fc2.bias=nn.Parameter(bd2)\r\n \r\n self.de_fc3.weight=nn.Parameter(Wd3)\r\n self.de_fc3.bias=nn.Parameter(bd3)\r\n \r\n return", "def setParameters(self):\n\n # Set the parameters\n self.taux = 24.2\n self.mu = 0.23\n self.G = 33.75\n self.alpha_0 = 0.05\n self.delta = 0.0075\n self.p = 0.50\n self.I0 = 9500.0\n self.kparam = 0.55", "def _reset_params(self):\n for p in self.parameters():\n if p.dim() > 1:\n torch.nn.init.xavier_normal_(p)" ]
[ "0.77758974", "0.7704682", "0.76280135", "0.7340647", "0.7220832", "0.71430016", "0.7137226", "0.7131746", "0.7091034", "0.70712316", "0.70580506", "0.6996698", "0.6996188", "0.6979724", "0.68992215", "0.6892215", "0.6837597", "0.6826578", "0.67905426", "0.67721635", "0.6760269", "0.6760269", "0.6760269", "0.6756145", "0.6756145", "0.6746112", "0.6745958", "0.67341155", "0.6729069", "0.6728351", "0.6724868", "0.67174083", "0.66657823", "0.6640235", "0.66365737", "0.6617036", "0.66111124", "0.66108376", "0.6603801", "0.6600538", "0.6569749", "0.6565368", "0.65619004", "0.656076", "0.6551759", "0.6532412", "0.65323734", "0.65319985", "0.6527996", "0.65264994", "0.65232337", "0.65176105", "0.6516842", "0.6516771", "0.6513679", "0.65134335", "0.65124726", "0.6509961", "0.6505126", "0.650347", "0.64997065", "0.64908653", "0.6487027", "0.6485823", "0.64851123", "0.6484645", "0.64795274", "0.64742893", "0.6460345", "0.6455423", "0.6450485", "0.64475125", "0.6446646", "0.6445486", "0.6440566", "0.64397794", "0.64395255", "0.643584", "0.6435366", "0.6429034", "0.64286345", "0.6424681", "0.64245456", "0.6422975", "0.64206153", "0.6419737", "0.6418744", "0.64124745", "0.6412", "0.64099425", "0.6409645", "0.64092654", "0.6406376", "0.64033854", "0.6397234", "0.63950366", "0.6387343", "0.63866997", "0.63847053", "0.6384115", "0.63773817" ]
0.0
-1
This class is made to support a variable number of layers.
def __init__(self, numpy_rng, theano_rng=None, n_ins=24, ################################################### hidden_layers_sizes=[24,18,12,6], n_outs=2): self.sigmoid_layers = [] self.rbm_layers = [] self.params = [] self.n_layers = len(hidden_layers_sizes) assert self.n_layers > 0 if not theano_rng: theano_rng = MRG_RandomStreams(numpy_rng.randint(2 ** 30)) # allocate symbolic variables for the data self.x = T.matrix('x') # the data is presented as rasterized images self.y = T.ivector('y') # the labels are presented as 1D vector # of [int] labels self.z = T.matrix('z') #print self.x.type # end-snippet-1 # The DBN is an MLP, for which all weights of intermediate # layers are shared with a different RBM. We will first # construct the DBN as a deep multilayer perceptron, and when # constructing each sigmoidal layer we also construct an RBM # that shares weights with that layer. During pretraining we # will train these RBMs (which will lead to chainging the # weights of the MLP as well) During finetuning we will finish # training the DBN by doing stochastic gradient descent on the # MLP. for i in range(self.n_layers): # construct the sigmoidal layer # the size of the input is either the number of hidden # units of the layer below or the input size if we are on # the first layer if i == 0: input_size = n_ins else: input_size = hidden_layers_sizes[i - 1] # the input to this layer is either the activation of the # hidden layer below or the input of the DBN if you are on # the first layer if i == 0: layer_input = self.x else: layer_input = self.sigmoid_layers[-1].output self.z = layer_input sigmoid_layer = HiddenLayer(rng=numpy_rng, input=layer_input, n_in=input_size, n_out=hidden_layers_sizes[i], activation=T.nnet.sigmoid) # add the layer to our list of layers self.sigmoid_layers.append(sigmoid_layer) # its arguably a philosophical question... but we are # going to only declare that the parameters of the # sigmoid_layers are parameters of the DBN. The visible # biases in the RBM are parameters of those RBMs, but not # of the DBN. self.params.extend(sigmoid_layer.params) # Construct an RBM that shared weights with this layer rbm_layer = RBM(numpy_rng=numpy_rng, theano_rng=theano_rng, input=layer_input, n_visible=input_size, n_hidden=hidden_layers_sizes[i], W=sigmoid_layer.W, hbias=sigmoid_layer.b) self.rbm_layers.append(rbm_layer) #print(type(self.sigmoid_layers[-1].output)) # We now need to add a logistic layer on top of the MLP self.logLayer = LogisticRegression( input=self.sigmoid_layers[-1].output, n_in=hidden_layers_sizes[-1], n_out=n_outs) self.params.extend(self.logLayer.params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_layers(self):\n raise NotImplementedError", "def layers(self): # -> LayerView:\n ...", "def num_layers(self): # -> int:\n ...", "def layers(self, x):\n raise NotImplementedError", "def __init__(self, layers):\n\n\t\tself.layers = layers", "def run(layers):", "def __init__(self):\n\n for layer in self._layer_class_map:\n setattr(self, layer, self._layer_class_map[layer]())", "def __init__(self):\n super(GatherLastLayer, self).__init__()", "def multilayer(self, n_layers):\n for i in range(1, n_layers+1):\n print(\"Layer nr: \", i)\n # For the first layer, take the input image\n if i == 1:\n # Convolution\n self.convolution(i, self.img)\n # Use the output of the convolution as input of the pooling layer\n img = Image.open(str(\"conv\" + str(i) + \".png\"))\n # Pooling\n self.pool(img.convert('RGB'), i)\n else:\n # Use the output of the pooling as input of the convolution layer\n img = Image.open(str(\"conv_pool\" + str(i-1) + \".png\"))\n # Convolution\n self.convolution(i, img.convert(\"RGB\"))\n # Use the output of the convolution as input of the pooling layer\n img = Image.open(str(\"conv\"+ str(i) + \".png\"))\n # Pooling\n self.pool(img.convert('RGB'), i)", "def __init__(self, layer_nest):\n self._layer_nest = layer_nest\n super().__init__()", "def UpdateLayers(self):\n pass", "def __init__(self, layerNeurons, initialWeights = None, layerTypes=None, **kwargs):\r\n \r\n # Ensure that there is at-least one input and one output layer in the network\r\n assert len(layerNeurons)>1, \"At least one input layer and one output layer is needed\"\r\n \r\n # Get the total number of weights needed in the network\r\n totalWeightCount = NeuralNetwork.getSizeOfWeightVector(layerNeurons)\r\n \r\n # Initialise the weights with the initializer or random values\r\n if initialWeights is None:\r\n self.weights = np.random.uniform(-1/np.sqrt(layerNeurons[0]), 1/np.sqrt(layerNeurons[0]), totalWeightCount)\r\n else:\r\n assert len(initialWeights) == totalWeightCount, (\"Length of initial weight matrix incorrect. You need \"+str(totalWeightCount)+\" weights\")\r\n self.weights = np.array(initialWeights, dtype = np.float64) \r\n \r\n # create an empty array of layers\r\n self.layers = []\r\n layerBlockStart = 0\r\n \r\n if layerTypes is None or len(layerTypes)<(len(layerNeurons)-1):\r\n layerTypes=[NetworkLayer]*(len(layerNeurons)-1)\r\n \r\n for layerInputDimention, layerOutputDimention, layerType in zip(layerNeurons, layerNeurons[1:], layerTypes):\r\n # initialise each layer with its input and output dimentions and bi-directional pointers to the relivant weights\r\n layerBlockEnd = layerBlockStart+(layerInputDimention*layerOutputDimention)\r\n layerBiasEnd = layerBlockEnd+layerOutputDimention\r\n newLayer = layerType(layerInputDimention, layerOutputDimention, \r\n self.weights[..., layerBlockStart:layerBlockEnd], \r\n self.weights[..., layerBlockEnd:layerBiasEnd], **kwargs)\r\n self.layers.append(newLayer)\r\n \r\n layerBlockStart = layerBiasEnd\r\n \r\n # Tell the output later to use a different function to calculate the delta \r\n newLayer.calcDelta = newLayer.calcDeltaOutputLayer", "def _init_layers(self) -> None:\n weight_nums, bias_nums = [], []\n for i in range(self.num_layers):\n if i == 0:\n weight_nums.append((self.in_channels + 2) * self.feat_channels)\n bias_nums.append(self.feat_channels)\n elif i == self.num_layers - 1:\n weight_nums.append(self.feat_channels * 1)\n bias_nums.append(1)\n else:\n weight_nums.append(self.feat_channels * self.feat_channels)\n bias_nums.append(self.feat_channels)\n\n self.weight_nums = weight_nums\n self.bias_nums = bias_nums\n self.num_params = sum(weight_nums) + sum(bias_nums)", "def custom_layer_factory(self):\n raise NotImplementedError(\n '[custom_layer_factory] must be implemented by the subclass.')", "def draw_layers(self):\n\t\tfor z in xrange(0,16):\n\t\t\t#create surface for this layer\n\t\t\tsrf = pygame.Surface((16,128))\n\t\t\tfor x in xrange(0,16):\n\t\t\t\tfor y in xrange(0,128):\n\t\t\t\t\tv = self.data[ self.xyz_to_offset( x,y,z) ]\n\t\t\t\t\tif v != 0:\n\t\t\t\t\t\tsrf.fill( BLOCKS.get(v, [0,0])[1], \t(x, 127 -y, 1, 1 ))\n\t\t\t#save layer to dict for this chunk\n\t\t\tself.layers[z] = srf", "def __init__(self, n_input, n_output, hidden_layer_size, reg):\n self.reg = reg\n self.fulllayer1 = FullyConnectedLayer(n_input, hidden_layer_size)\n self.reglayer1 = ReLULayer()\n self.fulllayer2 = FullyConnectedLayer(hidden_layer_size, n_output)", "def __init__(\n self,\n image_size: tuple,\n out_channels: int,\n num_channel_initial: int,\n extract_levels: List[int],\n out_kernel_initializer: str,\n out_activation: str,\n name: str = \"LocalNet\",\n **kwargs,\n ):\n super().__init__(\n image_size=image_size,\n out_channels=out_channels,\n num_channel_initial=num_channel_initial,\n out_kernel_initializer=out_kernel_initializer,\n out_activation=out_activation,\n name=name,\n **kwargs,\n )\n\n # save parameters\n self._extract_levels = extract_levels\n self._extract_max_level = max(self._extract_levels) # E\n self._extract_min_level = min(self._extract_levels) # D\n\n # init layer variables\n num_channels = [\n num_channel_initial * (2 ** level)\n for level in range(self._extract_max_level + 1)\n ] # level 0 to E\n self._downsample_blocks = [\n layer.DownSampleResnetBlock(\n filters=num_channels[i], kernel_size=7 if i == 0 else 3\n )\n for i in range(self._extract_max_level)\n ] # level 0 to E-1\n self._conv3d_block = layer.Conv3dBlock(filters=num_channels[-1]) # level E\n\n self._upsample_blocks = [\n layer.LocalNetUpSampleResnetBlock(num_channels[level])\n for level in range(\n self._extract_max_level - 1, self._extract_min_level - 1, -1\n )\n ] # level D to E-1\n\n self._extract_layers = [\n # if kernels are not initialized by zeros, with init NN, extract may be too large\n layer.Conv3dWithResize(\n output_shape=image_size,\n filters=out_channels,\n kernel_initializer=out_kernel_initializer,\n activation=out_activation,\n )\n for _ in self._extract_levels\n ]", "def _get_layers(self) :\n \n return self._layers", "def __init__(self, feature_length, layersNumber):\n super(SubGraph, self).__init__()\n self.layers_number = layersNumber\n self.layers = nn.ModuleList([SubGraphLayer(feature_length * (2 ** i)) for i in range(self.layers_number)])", "def __initAvailableLayerTypes(self):\n from backend.caffe.path_loader import PathLoader\n caffe = PathLoader().importCaffe()\n layerNameMainParts = list(caffe.layer_type_list())\n\n res = {}\n paramsPerLayerType = {}\n\n # calculate common parameters of all layer types\n # by removing all which will be used for one specific layer type only\n # also keep in mind which ones have been removed to readd them to specific layers\n commonParams = self._availableParameterGroupDescriptors[\"LayerParameter\"].parameter() #use .parameter() on purpose\n layerSpecificParameters = set()\n for nameMainPart in layerNameMainParts:\n specificParamsName = [nameMainPart + \"Parameter\"]\n if moreLayerNameParameter.has_key(nameMainPart):\n specificParamsName.append( moreLayerNameParameter[nameMainPart])\n paramsPerLayerType[nameMainPart] = {}\n for key, value in commonParams.items():\n if value.isParameterGroup() and value.parameterName() in specificParamsName:\n paramsPerLayerType[nameMainPart][key] = value\n layerSpecificParameters.add(key)\n\n\n # special case: shared params for loss layers\n key = \"loss_param\"\n value = commonParams[key]\n del commonParams[key]\n for nameMainPart in layerNameMainParts:\n if LayerType.getCategoryByName(nameMainPart) == LayerType.CATEGORY_LOSS:\n paramsPerLayerType[nameMainPart][key] = value\n\n # TODO is there a special case for the TransformationParameter?\n\n # create each layer type after one another\n for nameMainPart in layerNameMainParts:\n\n # add common params to the specific ones\n layerTypeParam = paramsPerLayerType[nameMainPart].keys()\n paramsPerLayerType[nameMainPart].update(commonParams)\n\n irrelevant = layerSpecificParameters.difference(layerTypeParam)\n res[nameMainPart] = LayerType(nameMainPart, paramsPerLayerType[nameMainPart], layerTypeParam, irrelevant)\n\n self._commonParams = commonParams\n self._availableLayerTypes = res", "def __init__(self, **kwargs):\n base.Layer.__init__(self, **kwargs)\n self._group = self.spec['group']\n self._conv_args = dict(self.spec)\n self._conv_args['name'] = self.spec['name'] + '_sub'\n del self._conv_args['group']\n self._bottom_sub = [base.Blob() for _ in range(self._group)]\n self._top_sub = [base.Blob() for _ in range(self._group)]\n self._conv_layers = None\n self._blocksize = 0\n self._num_kernels = self.spec['num_kernels']\n # create the convolution layers\n self._conv_layers = [\n convolution.ConvolutionLayer(**self._conv_args)\n for i in range(self._group)]\n self._param = sum((layer.param() for layer in self._conv_layers), [])\n return", "def layer_factory(in_layer, layer_type, **kwargs):\n\n gpu = True if 'gpu' in theano.config.device else False\n if gpu:\n from lasagne.layers import dnn\n if layer_type == 'conv':\n func = dnn.Conv2DDNNLayer if gpu else layers.Conv2DLayer\n defaults = {'border_mode':'same','W':lasagne.init.GlorotUniform()} ### dimshuffle=TRUE!!!\n elif layer_type == 'dense':\n func = layers.DenseLayer\n defaults = {'W':lasagne.init.Uniform()}\n elif layer_type == 'maxout':\n defaults = {}\n func = dnn.MaxPool2DDNNLayer if gpu else layers.MaxPool2DLayer\n else:\n return -1\n\n layer_params = {}\n for key,val in kwargs.iteritems():\n if layer_type in key:\n new_key = key.split(layer_type)[1]\n new_key = new_key[1:]\n layer_params[new_key] = val\n\n if layer_type == 'maxout':\n in_layers = []\n for i in xrange(layer_params['K']):\n tmp_layer = layers.DenseLayer(in_layer,num_units=layer_params['num_units'],W=lasagne.init.GlorotUniform(),nonlinearity=lasagne.nonlinearities.linear)\n tmp_layer = layers.ReshapeLayer(tmp_layer,([0],1,[1]))\n in_layers.append(tmp_layer)\n in_layer = lasagne.layers.ConcatLayer(tuple(in_layers))\n orig_nonlin = lasagne.nonlinearities.identity\n layer_params.pop('K',None)\n layer_params.pop('num_units',None)\n\n name = kwargs['name'] + '_' if 'name' in kwargs else 'NONE_'\n\n if 'batch_norm' in kwargs and kwargs['batch_norm'] and layer_type != 'maxout':\n orig_nonlin = layer_params['nonlinearity']\n layer_params['nonlinearity'] = lasagne.nonlinearities.linear\n\n # remove user-configurations from defaults\n for key in layer_params.keys():\n if key in defaults:\n defaults.pop(key,None)\n\n all_params = dict(defaults,**layer_params)\n\n # new lasagne!\n if 'border_mode' in all_params:\n all_params['pad'] = all_params['border_mode']\n del all_params['border_mode']\n\n output_layer = func(in_layer,**all_params)\n\n if 'batch_norm_f0k' in kwargs and kwargs['batch_norm_f0k']:\n output_layer = lasagne.layers.normalization.batch_norm(output_layer)\n\n if 'batch_norm' in kwargs and kwargs['batch_norm']:\n output_layer = BatchNormalizationLayer(output_layer,nonlinearity=orig_nonlin,name=name + 'batch')\n\n if 'maxpool' in kwargs and kwargs['maxpool']:\n st = kwargs['maxpool_st'] if 'maxpool_st' in kwargs else None\n ignore_borders = kwargs['ignore_borders'] if 'ignore_borders' in kwargs else False\n output_layer = lasagne.layers.MaxPool2DLayer(output_layer,pool_size=kwargs['maxpool_ds'],stride=st,name=name + 'maxpool',ignore_border=ignore_borders)\n\n return output_layer", "def __init__(self, **kwargs):\n super(ForwardLayersBase, self).__init__()\n pass", "def __init__(self, weights=[], alphas=[]):\n self._layers = [Layer(w, a) for w, a in zip(weights, alphas)]", "def setup_layer_structure(self):\n self.page_rank_convolution_1 = self.layer(self.feature_number, self.args.layers[0], self.args.iterations, self.args.alpha)\n self.page_rank_convolution_2 = self.layer(self.args.layers[0], self.args.layers[1], self.args.iterations, self.args.alpha)\n self.page_rank_convolution_3 = self.layer(self.args.layers[1], self.class_number, self.args.iterations, self.args.alpha)", "def __init__(self, in_channels=3, in_channels1=3, n_classes=21):\n super(SegNet, self).__init__()\n\n self.layer_1 = SegnetLayer_Encoder(in_channels, 64, 2)\n self.layer_2 = SegnetLayer_Encoder(64, 128, 2)\n self.layer_3 = SegnetLayer_Encoder(128, 256, 3)\n self.layer_4 = SegnetLayer_Encoder(256, 512, 3)\n self.layer_5 = SegnetLayer_Encoder(512, 512, 3)\n\n self.layer_6 = SegnetLayer_Decoder(512, 512, 3)\n self.layer_7 = SegnetLayer_Decoder(512, 256, 3)\n self.layer_8 = SegnetLayer_Decoder(256, 128, 3)\n self.layer_9 = SegnetLayer_Decoder(128, 64, 2)\n self.layer_10 = SegnetLayer_Decoder(64, n_classes, 2)\n\n self.layer_11 = SegnetLayer_Encoder(in_channels1, 64, 2)\n self.layer_12 = SegnetLayer_Encoder(64, 128, 2)\n self.layer_13 = SegnetLayer_Encoder(128, 256, 3)\n self.layer_14 = SegnetLayer_Encoder(256, 512, 3)\n self.layer_15 = SegnetLayer_Encoder(512, 512, 3)\n\n self.layer_16 = SegnetLayer_Decoder(512, 512, 3)\n self.layer_17 = SegnetLayer_Decoder(512, 256, 3)\n self.layer_18 = SegnetLayer_Decoder(256, 128, 3)\n self.layer_19 = SegnetLayer_Decoder(128, 64, 2)\n self.layer_110 = SegnetLayer_Decoder(64, n_classes, 2)\n\n self.layer_1110 = UNet_Decoder_Particular(n_classes * 2, n_classes)", "def layers(model_size):\n if model_size == 'tiny':\n return (\n ('linear', 100),\n ('activation', 'relu'))\n elif model_size == 'small':\n return (\n ('conv2d', (4, 4), 16, 'VALID', 2),\n ('activation', 'relu'),\n ('conv2d', (4, 4), 32, 'VALID', 1),\n ('activation', 'relu'),\n ('linear', 100),\n ('activation', 'relu'))\n elif model_size == 'medium':\n return (\n ('conv2d', (3, 3), 32, 'VALID', 1),\n ('activation', 'relu'),\n ('conv2d', (4, 4), 32, 'VALID', 2),\n ('activation', 'relu'),\n ('conv2d', (3, 3), 64, 'VALID', 1),\n ('activation', 'relu'),\n ('conv2d', (4, 4), 64, 'VALID', 2),\n ('activation', 'relu'),\n ('linear', 512),\n ('activation', 'relu'),\n ('linear', 512),\n ('activation', 'relu'))\n elif model_size == 'large_200':\n return (\n ('conv2d', (3, 3), 64, 'SAME', 1),\n ('activation', 'relu'),\n ('conv2d', (3, 3), 64, 'SAME', 1),\n ('activation', 'relu'),\n ('conv2d', (3, 3), 128, 'SAME', 2),\n ('activation', 'relu'),\n ('conv2d', (3, 3), 128, 'SAME', 1),\n ('activation', 'relu'),\n ('conv2d', (3, 3), 128, 'SAME', 1),\n ('activation', 'relu'),\n ('linear', 200),\n ('activation', 'relu'))\n elif model_size == 'large':\n return (\n ('conv2d', (3, 3), 64, 'SAME', 1),\n ('activation', 'relu'),\n ('conv2d', (3, 3), 64, 'SAME', 1),\n ('activation', 'relu'),\n ('conv2d', (3, 3), 128, 'SAME', 2),\n ('activation', 'relu'),\n ('conv2d', (3, 3), 128, 'SAME', 1),\n ('activation', 'relu'),\n ('conv2d', (3, 3), 128, 'SAME', 1),\n ('activation', 'relu'),\n ('linear', 512),\n ('activation', 'relu'))\n else:\n raise ValueError('Unknown model: \"{}\"'.format(model_size))", "def __init__(self):\n super(LinearAggregationLayer, self).__init__()", "def __init__(self, layerNeurons, numberOfLayers, initialWeights = None, lowerBound = None, upperBound = None):\r\n \r\n # Ensure that there is at-least one input and one output layer in the network\r\n assert len(layerNeurons) > 1, \"At least one input layer and one output layer is needed\"\r\n \r\n # Get the total number of weights needed in the network\r\n totalWeightCount = NeuralNetwork.getSizeOfWeightVector(layerNeurons)*numberOfLayers\r\n \r\n # Initialise the weights with the initialiser or random values\r\n if initialWeights is None:\r\n if lowerBound is None:\r\n lowerBound=-1/np.sqrt(layerNeurons[0])\r\n if upperBound is None:\r\n upperBound=1/np.sqrt(layerNeurons[0])\r\n self.weights = np.random.uniform(lowerBound, upperBound, totalWeightCount)\r\n else:\r\n assert initialWeights.size == totalWeightCount, (\"Length of initial weight matrix incorrect. You need \"+str(totalWeightCount)+\" weights\")\r\n self.weights = initialWeights.view()\r\n \r\n self.weights.shape = (numberOfLayers, -1)\r\n # create an empty array of layers\r\n self.layers = []\r\n layerBlockStart = 0\r\n \r\n for layerInputDimention, layerOutputDimention in zip(layerNeurons, layerNeurons[1:]):\r\n # initialise each layer with its input and output dimentions and bi-directional pointers to the relivant weights\r\n layerBlockEnd = layerBlockStart+(layerInputDimention*layerOutputDimention)\r\n layerBiasEnd = layerBlockEnd+layerOutputDimention\r\n newLayer = batchNetworkLayer(layerInputDimention, layerOutputDimention, numberOfLayers, \r\n self.weights[..., :, layerBlockStart:layerBlockEnd], \r\n self.weights[..., :, layerBlockEnd:layerBiasEnd])\r\n self.layers.append(newLayer)\r\n \r\n layerBlockStart = layerBiasEnd", "def layers(self):\n return self['layers']", "def __init__(self, layers, input_size):\n super(ConvNetMaker, self).__init__()\n self.conv_layers = []\n self.fc_layers = []\n # h, w, d = 32, 32, 3\n h, w, d = input_size, input_size, 3\n previous_layer_filter_count = 3\n previous_layer_size = h * w * d\n num_fc_layers_remained = len([1 for l in layers if l.startswith('FC')])\n for layer in layers:\n if layer.startswith('Conv'):\n filter_count = int(layer[4:])\n self.conv_layers += [\n nn.Conv2d(previous_layer_filter_count,\n filter_count,\n kernel_size=3,\n padding=1),\n nn.BatchNorm2d(filter_count),\n nn.ReLU(inplace=True)\n ]\n\n previous_layer_filter_count = filter_count\n d = filter_count\n previous_layer_size = h * w * d\n elif layer.startswith('MaxPool'):\n self.conv_layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n h, w = int(h / 2.0), int(w / 2.0)\n previous_layer_size = h * w * d\n elif layer.startswith('FC'):\n num_fc_layers_remained -= 1\n current_layer_size = int(layer[2:])\n if num_fc_layers_remained == 0:\n self.fc_layers += [nn.Linear(previous_layer_size,\n current_layer_size)]\n else:\n self.fc_layers += [nn.Linear(previous_layer_size,\n current_layer_size),\n nn.ReLU(inplace=True)]\n previous_layer_size = current_layer_size\n\n conv_layers = self.conv_layers\n fc_layers = self.fc_layers\n self.conv_layers = nn.Sequential(*conv_layers)\n self.fc_layers = nn.Sequential(*fc_layers)", "def num_layers(self):\n\n return 2 + self.num_hidden_layers", "def get_layer(self, layer_id):\n (layer_class, layer_name, dim, \n dropout_prob, dropout_scale) = self.select_layer(layer_id)\n if layer_class == 'maxout':\n (num_units,\n num_pieces,\n pool_stride,\n randomize_pools,\n irange,\n sparse_init,\n sparse_stdev,\n include_prob,\n init_bias,\n W_lr_scale,\n b_lr_scale,\n max_col_norm,\n max_row_norm) = self.select_layer_maxout(layer_id)\n layer = Maxout(num_units,\n num_pieces,\n pool_stride,\n randomize_pools,\n irange,\n sparse_init,\n sparse_stdev,\n include_prob,\n init_bias,\n W_lr_scale,\n b_lr_scale,\n max_col_norm,\n max_row_norm)\n elif layer_class == 'linear':\n (init_id, init_bias, \n W_lr_scale, b_lr_scale, \n max_row_norm, max_col_norm) = self.select_layer_linear(layer_id)\n init_weights = self.get_init(init_id)\n layer = Linear(dim=dim, layer_name=layer_name, \n init_weights=init_weights, init_bias=init_bias,\n W_lr_scale=W_lr_scale, b_lr_scale=b_lr_scale, \n max_row_norm=max_row_norm, \n max_col_norm=max_col_norm)\n elif layer_class == 'tanh':\n (init_id, init_bias, \n W_lr_scale, b_lr_scale, \n max_row_norm, max_col_norm) = self.select_layer_tanh(layer_id)\n init_weights = self.get_init(init_id)\n layer = Tanh(dim=dim, layer_name=layer_name, \n init_weights=init_weights, init_bias=init_bias,\n W_lr_scale=W_lr_scale, b_lr_scale=b_lr_scale, \n max_row_norm=max_row_norm, \n max_col_norm=max_col_norm)\n elif layer_class == 'sigmoid':\n (init_id, init_bias, \n W_lr_scale, b_lr_scale, \n max_row_norm, max_col_norm) \\\n = self.select_layer_sigmoid(layer_id) \n init_weights = self.get_init(init_id)\n layer = Sigmoid(dim=dim, layer_name=layer_name, \n init_weights=init_weights, init_bias=init_bias,\n W_lr_scale=W_lr_scale, b_lr_scale=b_lr_scale, \n max_row_norm=max_row_norm, \n max_col_norm=max_col_norm)\n elif layer_class == 'softmaxpool':\n (detector_layer_dim, pool_size,\n\t init_id, init_bias,\n\t W_lr_scale, b_lr_scale) \\\n = self.select_layer_softmaxpool(layer_id) \n init_weights = self.get_init(init_id)\n layer = SoftmaxPool(detector_layer_dim=detector_layer_dim, \n layer_name=layer_name, pool_size=pool_size,\n init_weights=init_weights, init_bias=init_bias,\n W_lr_scale=W_lr_scale, b_lr_scale=b_lr_scale)\n elif layer_class == 'softmax':\n (init_id, init_bias, \n W_lr_scale, b_lr_scale, \n max_row_norm, max_col_norm) \\\n = self.select_layer_softmax(layer_id) \n init_weights = self.get_init(init_id)\n layer = Softmax(dim=dim, layer_name=layer_name, \n init_weights=init_weights, init_bias=init_bias,\n W_lr_scale=W_lr_scale, b_lr_scale=b_lr_scale, \n max_row_norm=max_row_norm, \n max_col_norm=max_col_norm)\n elif layer_class == 'rectifiedlinear':\n (init_id, init_bias, \n W_lr_scale, b_lr_scale, \n max_row_norm, max_col_norm,\n left_slope) = self.select_layer_rectifiedlinear(layer_id) \n init_weights = self.get_init(init_id)\n layer = RectifiedLinear(dim=dim, layer_name=layer_name, \n init_weights=init_weights, init_bias=init_bias,\n W_lr_scale=W_lr_scale, b_lr_scale=b_lr_scale, \n max_row_norm=max_row_norm, \n max_col_norm=max_col_norm, left_slope=left_slope)\n elif layer_class == 'convrectifiedlinear':\n (output_channels, kernel_shape, pool_shape,\n pool_stride, border_mode, init_id,\n init_bias, W_lr_scale,\n b_lr_scale, left_slope,\n max_kernel_norm) \\\n = self.select_layer_convrectifiedlinear(layer_id) \n init_weights = self.get_init(init_id)\n layer = ConvRectifiedLinear(output_channels=output_channels,\n kernel_shape=(kernel_shape, kernel_shape),\n pool_shape=(pool_shape, pool_shape),\n pool_stride=(pool_stride, pool_stride),\n layer_name=layer_name, init_weights=init_weights, \n init_bias=init_bias,\n W_lr_scale=W_lr_scale, b_lr_scale=b_lr_scale, \n max_kernel_norm=max_kernel_norm, \n left_slope=left_slope)\n layer.dropout_prob = dropout_prob\n layer.dropout_scale= dropout_scale\n return layer", "def declare_layers(self, names):\n for name in names:\n self[name]\n return self", "def declare_layers(self, names):\n for name in names:\n self[name]\n return self", "def __init__(self, layer_list_info):\n self.layer_list_info = layer_list_info", "def __init__(self, parent, layer):\n pass", "def __init__(self, layers, r_min, r_max, learn_rate):\n\n if not isinstance(layers, list) or len(layers) < 3:\n raise ValueError('invalid layer parammeter')\n self.layers = layers\n n_layer = len(layers)\n self.n_layer = n_layer\n self.r_min = r_min\n self.r_max = r_max\n self.learn_rate = learn_rate\n\n # initialize ws\n self.ws = []\n for layer_idx in range(n_layer - 1):\n layer_size = (layers[layer_idx] + 1, layers[layer_idx + 1])\n w = self.init_w(layer_size)\n self.ws.append(w)", "def make_feature_layers(self, config):\n raise NotImplementedError", "def define_layers(self):\n if self.d != 0:\n # If we have a fixed input size we use it do define the first layer\n self.layers = [nn.Sequential(nn.Linear(self.d, self.h),\n nn.ReLU(), )] # nn.BatchNorm1d(self.h, affine=False))]\n else:\n self.layers = [nn.Sequential(nn.Linear(self.h, self.h),\n nn.ReLU(), )]\n\n l = 0\n for l in self.layers_sizes():\n self.layers.append(nn.Sequential(nn.Linear(self.h - l, self.h - l - self.delta_h),\n nn.ReLU(), )) # nn.BatchNorm1d( self.h - l - self.delta_h, affine=False)))\n self.layers.append(nn.Sequential(nn.Linear(self.h - l - self.delta_h, 1), nn.ReLU()))", "def make_layers(self):\r\n #assuming temporal field is always the first column!\r\n timeCol = self.data.columns[0]\r\n times = self.data[timeCol].unique() \r\n lat = self.data.lat.unique()\r\n lon = self.data.lon.unique()\r\n shape = (len(lat), len(lon))\r\n depths, hours = [None], [None]\r\n if 'depth' in self.data.columns:\r\n depths = self.data.depth.unique()\r\n if 'hour' in self.data.columns:\r\n hours = self.data.hour.unique()\r\n layers, titles = [], []\r\n for t in times:\r\n for h in hours:\r\n for z in depths:\r\n frame = self.data[self.data[timeCol] == t]\r\n\r\n if timeCol == 'time':\r\n sub = self.variable + self.unit + ', ' + str(datetime.strptime(t, '%Y-%m-%dT%H:%M:%S').date())\r\n else:\r\n sub = self.variable + self.unit + ', ' + timeCol + ': ' + str(t) \r\n\r\n if h != None:\r\n frame = frame[frame['hour'] == h]\r\n sub = sub + ', hour: ' + str(h) + 'hr'\r\n if z != None:\r\n frame = frame[frame['depth'] == z] \r\n sub = sub + ', depth: %2.2f' % z + ' [m]' \r\n try: \r\n layers.append(frame[self.variable].values.reshape(shape))\r\n titles.append(sub)\r\n except Exception as e:\r\n continue \r\n return layers, titles, lat, lon", "def layers(self):\r\n return self._flc.layers", "def init_layers(self):\n\n # get caching layers activated\n caching_layers = G3WCachingLayer.objects.all()\n for caching_layer in caching_layers:\n self.add_layer(str(caching_layer), caching_layer)", "def __init__(self, n, segments=2, layer_type=\"continuous\", linear_part=1.0):\n super().__init__()\n\n self.layer1 = high_order_fc_layers(\n layer_type=layer_type,\n n=n,\n in_features=2,\n out_features=2,\n segments=segments,\n alpha=linear_part,\n periodicity=2.0,\n )\n self.layer2 = high_order_fc_layers(\n layer_type=layer_type,\n n=n,\n in_features=2,\n out_features=1,\n segments=segments,\n alpha=linear_part,\n periodicity=2.0,\n )", "def _LayerParams(ii):\n if isinstance(p.transformer_layer_params_tpl, list):\n factor = p.num_layers // len(p.transformer_layer_params_tpl)\n i = ii // factor\n p_ii = p.transformer_layer_params_tpl[i].Copy()\n else:\n p_ii = p.transformer_layer_params_tpl.Copy()\n p_ii.name = 'layer_%d' % ii\n p_ii.has_aux_atten = p.has_aux_atten\n p_ii.mask_self_atten = p.mask_self_atten\n p_ii.input_dim = p.mdl_dim or p_ii.input_dim\n p_ii.output_dim = p.mdl_dim or p_ii.output_dim\n p_ii.packed_input = p.packed_input\n if (not isinstance(p_ii.tr_atten_tpl.num_heads, list) and\n p.num_atten_heads is not None):\n p_ii.tr_atten_tpl.num_heads = p.num_atten_heads\n if p.dropout_prob is not None:\n p_ii.tr_atten_tpl.atten_dropout_prob = p.dropout_prob\n p_ii.tr_atten_tpl.residual_dropout_prob = p.dropout_prob\n p_ii.tr_fflayer_tpl.residual_dropout_prob = p.dropout_prob\n p_ii.tr_fflayer_tpl.relu_dropout_prob = p.dropout_prob\n if p.stochastic_depth_droppath_prob is not None:\n ratio = p.stochastic_depth_droppath_prob * ii / (p.num_layers - 1)\n p_ii.tr_atten_tpl.residual_droppath_prob = ratio\n p_ii.tr_fflayer_tpl.residual_droppath_prob = ratio\n if p.hidden_dim is not None:\n p_ii.tr_fflayer_tpl.hidden_dim = p.hidden_dim\n p_ii.tr_atten_tpl.add_unnormalized_input = p.add_unnormalized_input\n if ii in p.moe_layers:\n p_ii.tr_fflayer_tpl = _MoeLayerParams(p_ii.tr_fflayer_tpl)\n return p_ii", "def __init__(self, **kwargs):\n\n # Simply hold on to the parameters for now\n self.name = kwargs.get(\"name\", None)\n\n # Placeholder for the resulting layer\n self.layer = None", "def num_layers(self):\n return self._num_layers", "def add_layer(self, func, *args, **kwargs):\n scope_name = self.name + '_layer' + str(self.layer_count)\n with tf.variable_scope(scope_name, reuse=self.reuse):\n self.last_layer = func(self.last_layer, *args, **kwargs)\n self.layer_seq += [self.last_layer]\n pass\n self.layer_count += 1\n return self.last_layer", "def __init__(self, config_data, in_dims, layer_num):\n self.layer_num = layer_num\n self.in_dims = in_dims\n self.out_dims = in_dims[\"width\"] * in_dims[\"height\"] * in_dims[\"channels\"]\n self.layer_type = config_data[\"type\"]\n self.name = config_data[\"name\"]\n self.params = []", "def initialize_layers(self, layers_config: dict, inputs=None):\n layers_config = layers_config.copy()\n input_lyrs = []\n initiated_layers = OrderedDict()\n wrp_layer = None # indicator for wrapper layers\n first_layer = True\n\n for lyr, lyr_args in layers_config.items():\n\n lyr_config, lyr_inputs, named_outs, call_args = self.deconstruct_lyr_args(lyr, lyr_args)\n\n lyr_name, args, lyr_config, activation = self.check_lyr_config(lyr, lyr_config)\n\n if K.BACKEND == 'pytorch':\n\n if first_layer:\n first_layer = False\n\n if callable(lyr_config):\n lyr_initiated = lyr_config\n else:\n lyr_initiated = TORCH_LAYERS[lyr_name](**lyr_config)\n setattr(self, lyr, lyr_initiated)\n initiated_layers[lyr] = {\"layer\": lyr_initiated, \"named_outs\": named_outs, 'call_args': call_args,\n 'inputs': lyr_inputs}\n\n else:\n # may be user has defined layers without input layer, in this case add Input layer as first layer\n if first_layer:\n if inputs is not None: # This method was called by providing it inputs.\n assert isinstance(inputs, tf.Tensor)\n # since inputs have been defined, all the layers that will be added will be next to first layer\n first_layer = False\n layer_outputs = inputs\n initiated_layers[layer_outputs.name] = {'layer': layer_outputs, 'tf_name': lyr_name}\n\n elif lyr_name != \"Input\":\n if 'input_shape' in lyr_config: # input_shape is given in the first layer so make input layer\n initialized_layer = LAYERS[\"Input\"](shape=lyr_config['input_shape'])\n else:\n # for simple dense layer based models, lookback will not be used\n def_shape = (self.num_ins,) if self.lookback == 1 else (self.lookback, self.num_ins)\n initialized_layer = LAYERS[\"Input\"](shape=def_shape)\n\n # first layer is built so next iterations will not be for first layer\n first_layer = False\n # put the first layer in memory to be used for model compilation\n # add th layer which the user had specified as first layer\n initiated_layers[initialized_layer.name] = {'layer': initialized_layer,\n 'tf_name': lyr_name}\n input_lyrs.append(initialized_layer)\n\n # The inputs to the layer have not been specified, so either it is an Input layer\n if lyr_inputs is None:\n # or it uses the previous outputs as inputs\n if lyr_name == \"Input\":\n # it is an Input layer, hence should not be called\n initialized_layer = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'tf_name': lyr_name}\n input_lyrs.append(initialized_layer)\n else:\n # it is executable and uses previous outputs as inputs\n if lyr_name in ACTIVATION_LAYERS:\n layer_outputs = ACTIVATION_LAYERS[lyr_name](name=lyr_config['name'])\n initiated_layers[lyr_config['name']] = {'layer': layer_outputs,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n elif lyr_name in ['TimeDistributed', 'Bidirectional']:\n wrp_layer = LAYERS[lyr_name]\n # because wrapper layer name is property\n initiated_layers[lyr_config['name']] = {'layer': wrp_layer,\n 'tf_name': lyr_name}\n continue\n elif \"LAMBDA\" in lyr_name.upper():\n # lyr_config is serialized lambda layer, which needs to be deserialized\n initialized_layer = tf.keras.layers.deserialize(lyr_config)\n # layers_config['lambda']['config'] still contails lambda, so we need to replace the python\n # object (lambda) with the serialized version (lyr_config) so that it can be saved as json file.\n layers_config[lyr]['config'] = lyr_config\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n else:\n if wrp_layer is not None:\n initialized_layer = wrp_layer(LAYERS[lyr_name](*args, **lyr_config))\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n wrp_layer = None\n else:\n if lyr_name == \"TemporalFusionTransformer\":\n lyr_config['return_attention_components'] = True\n initialized_layer = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n\n else: # The inputs to this layer have been specified so they must exist in lyr_cache.\n # it is an executable\n if lyr_name in ACTIVATION_LAYERS:\n\n layer_outputs = ACTIVATION_LAYERS[lyr_name](name=lyr_config['name'])\n initiated_layers[lyr_config['name']] = {'layer': layer_outputs,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n elif lyr_name in ['TimeDistributed', 'Bidirectional']:\n wrp_layer = LAYERS[lyr_name]\n # because wrapper layer name is property\n initiated_layers[lyr_config['name']] = {'layer': wrp_layer,\n 'tf_name': lyr_name}\n continue\n elif \"LAMBDA\" in lyr_name.upper():\n initialized_layer = tf.keras.layers.deserialize(lyr_config)\n layers_config[lyr]['config'] = lyr_config\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n else:\n if wrp_layer is not None:\n initialized_layer = wrp_layer(LAYERS[lyr_name](*args, **lyr_config))\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n wrp_layer = None\n else:\n layer_initialized = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': layer_initialized,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n\n if activation is not None: # put the string back to dictionary to be saved in config file\n lyr_config['activation'] = activation\n\n first_layer = False\n\n self.jsonize_lyr_config(lyr_config)\n\n # inputs = [] todo, indentify input layers\n # for k,v in lyr_cache.items():\n # since the model is not build yet and we have access to only output tensors of each list, this is probably\n # # the only way to know that how many `Input` layers were encountered during the run of this method. Each\n # tensor (except TimeDistributed) has .op.inputs attribute, which is empty if a tensor represents output of Input layer.\n # if int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) < 240:\n # if k.upper() != \"TIMEDISTRIBUTED\" and hasattr(v, 'op'):\n # if hasattr(v.op, 'inputs'):\n # _ins = v.op.inputs\n # if len(_ins) == 0:\n # inputs.append(v)\n # else: # not sure if this is the proper way of checking if a layer receives an input or not!\n # if hasattr(v, '_keras_mask'):\n # inputs.append(v)\n\n setattr(self, 'initiated_layers', initiated_layers)\n setattr(self, 'input_lyrs', input_lyrs)\n\n\n # todo,\n # # for case when {Input -> Dense, Input_1}, this method wrongly makes Input_1 as output so in such case use\n # # {Input_1, Input -> Dense }, thus it makes Dense as output and first 2 as inputs, so throwing warning\n # if int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) < 240:\n # if len(layer_outputs.op.inputs) < 1:\n # print(\"Warning: the output is of Input tensor class type\")\n # else:\n # if 'op' not in dir(layer_outputs): # layer_outputs does not have `op`, which means it has no incoming node\n # print(\"Warning: the output is of Input tensor class type\")\n\n # outs = None\n #if BACKEND == 'tensorflow':\n # outs = self.call(input_lyrs)\n # setattr(self, 'output_lyrs', outs)\n # if BACKEND == 'tensorflow':\n # ## Reinitial\n # super(Model, self).__init__(\n # inputs=input_lyrs,\n # outputs=outs)\n #MODEL.__init__(self, inputs=inputs, outputs=outs)\n\n return input_lyrs # , outs", "def _make_layer(self, X, name, block, num_blocks, out_channels):\n\n for i in range(0, num_blocks):\n X = block(X, name = name + '_block{}'.format(i), out_channels=out_channels)\n return X", "def __init__(self, count, channels):\n super().__init__()\n self.count = count\n self.channels = channels\n # Organization of the distance data can be angles first, channels second,\n # or channels first, angles second.\n # E.g., for inputs with a shape of (angles, channels):\n # [ [23.0, 27.0], [1.0, 27.0], [23.0, 27.0] ]\n # would have three angles for rays detecting distances to two types of\n # objects.\n\n self.layers = []\n layer_width = self.count\n\n self.layers_parameters = [\n FeelersLayerParameters(kernel_size=5, kernel_count=3, pool_size=2),\n FeelersLayerParameters(kernel_size=5, kernel_count=3, pool_size=2),\n FeelersLayerParameters(kernel_size=5, kernel_count=2, pool_size=2),\n FeelersLayerParameters(kernel_size=5, kernel_count=2, pool_size=2),\n ]\n\n # Larger kernel sizes are appropriate for 1D convolutions.\n # Small number of filters to keep total parameter count low.\n for param in self.layers_parameters:\n width_after_next_layer = self._width_after_convolution(\n layer_width, param.kernel_size, param.pool_size)\n if width_after_next_layer < param.kernel_size:\n break\n self.layers.append(\n tf.keras.layers.Conv1D(\n filters=param.kernel_count, kernel_size=param.kernel_size))\n self.layers.append(\n tf.keras.layers.MaxPool1D(pool_size=param.pool_size))\n layer_width = width_after_next_layer", "def __init__(self, hparams):\n super(ThreeLayerClassifier, self).__init__()\n self.hparams = hparams\n self.layer_1 = torch.nn.Linear(self.hparams[\"input_size\"], 128)\n self.layer_2 = torch.nn.Linear(128, 256)\n self.layer_3 = torch.nn.Linear(256, self.hparams[\"targets\"])", "def layers(self, layers):\n self._layers = layers\n prev = None\n for layer in layers:\n if not layer.inputs and prev is not None:\n layer.inputs = [prev]\n prev = layer", "def layers(self, layers):\n\n self._layers = layers", "def _init_layers(self) -> None:\n self.convs_all_levels = nn.ModuleList()\n for i in range(self.start_level, self.end_level + 1):\n convs_per_level = nn.Sequential()\n convs_per_level.add_module(\n f'conv{i}',\n ConvModule(\n self.in_channels,\n self.feat_channels,\n 3,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n inplace=False,\n bias=False))\n self.convs_all_levels.append(convs_per_level)\n\n conv_branch = []\n for _ in range(self.num_stacked_convs):\n conv_branch.append(\n ConvModule(\n self.feat_channels,\n self.feat_channels,\n 3,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n bias=False))\n self.conv_branch = nn.Sequential(*conv_branch)\n\n self.conv_pred = nn.Conv2d(\n self.feat_channels, self.out_channels, 1, stride=1)", "def _create_layer() -> Image:\n data = np.random.random((32, 16))\n return Image(data)", "def forward_prop(x, layer_sizes=[], activations=[]):\n layers = x\n for j in range(len(layer_sizes)):\n layers = create_layer(layers, layer_sizes[j],\n activation=activations[j])\n return layers", "def __init__(self, momentum: float = .5):\n super(VanillaEncoder, self).__init__()\n self.conv1 = PointNetConv2Layer(64, momentum)\n self.conv2 = PointNetConv2Layer(64, momentum)\n self.conv3 = PointNetConv2Layer(64, momentum)\n self.conv4 = PointNetConv2Layer(128, momentum)\n self.conv5 = PointNetConv2Layer(1024, momentum)", "def __init__(self, in_channels=3, n_classes=21):\n super(SegNet_1, self).__init__()\n\n self.layer_1 = SegnetLayer_Encoder(in_channels, 64, 2)\n self.layer_2 = SegnetLayer_Encoder(64, 128, 2)\n self.layer_3 = SegnetLayer_Encoder(128, 256, 3)\n self.layer_4 = SegnetLayer_Encoder(256, 512, 3)\n self.layer_5 = SegnetLayer_Encoder(512, 1024, 3)\n self.layer_6 = SegnetLayer_Encoder(1024, 1024, 3)\n\n self.layer_7 = SegnetLayer_Decoder(1024, 1024, 3)\n self.layer_8 = SegnetLayer_Decoder(1024, 512, 3)\n self.layer_9 = SegnetLayer_Decoder(512, 256, 3)\n self.layer_10 = SegnetLayer_Decoder(256, 128, 3)\n self.layer_11 = SegnetLayer_Decoder(128, 64, 2)\n self.layer_12 = SegnetLayer_Decoder(64, n_classes, 2)", "def make_layers(self):\n w, h = self.image.get_size()\n shrink = pg.transform.smoothscale(self.image, (w//2, h//2))\n self.mid_image = tools.tile_surface((w,h), shrink, True)\n shrink = pg.transform.smoothscale(self.image, (w//4, h//4))\n self.base = tools.tile_surface(prepare.SCREEN_SIZE, shrink, True)", "def add_layers(self, layers):\n\n existing_layers = self.layers\n assert len(existing_layers) > 0\n for layer in layers:\n assert layer.get_mlp() is None\n layer.set_mlp(self)\n layer.set_input_space(existing_layers[-1].get_output_space())\n existing_layers.append(layer)\n assert layer.layer_name not in self.layer_names\n self.layer_names.add(layer.layer_name)", "def __init__(self, in_channels=3, n_classes=21):\n super(UpNet, self).__init__()\n\n self.layer_1 = UpNetLayer_ParticularEncoder_2(in_channels, 64, 2)\n self.layer_2 = UpNetLayer_Encoder(64, 128, 2)\n self.layer_3 = UpNetLayer_Encoder(128, 256, 3)\n self.layer_4 = UpNetLayer_Encoder(256, 512, 3)\n self.layer_6 = UpNetLayer_ParticularEncoder(512, 1024, 3)\n\n self.layer_inter = UpNetLayer_Dropout()\n\n self.layer_7 = UpNetLayer_Decoder_Particular(1024, 512, 3)\n self.layer_8 = UpNetLayer_Decoder(512, 256, 3)\n self.layer_9 = UpNetLayer_Decoder(256, 128, 3)\n self.layer_10 = UpNetLayer_Decoder(128, 64, 2)\n self.layer_11 = UpNetLayer_Decoder_Particular_2(64, n_classes, 2)", "def __init__(\n self,\n name: str,\n num_nodes: int,\n layers: List[Layer],\n posteriors: Optional[List[Layer]] = None,\n log_level: int = logging.ERROR\n ):\n super().__init__(\n name=name,\n num_nodes=num_nodes,\n layers=layers,\n posteriors=None,\n log_level=log_level\n )", "def preview_layerset(ls, size = 100, spacing = 100):\n D = Device()\n scale = size/100\n num_layers = len(ls._layers)\n matrix_size = int(np.ceil(np.sqrt(num_layers)))\n sorted_layers = sorted(ls._layers.values(),\n key = lambda x: (x.gds_layer, x.gds_datatype))\n for n, layer in enumerate(sorted_layers):\n R = rectangle(size = (100*scale, 100*scale), layer = layer)\n T = text(text = '%s\\n%s / %s'\\\n % (layer.name, layer.gds_layer, layer.gds_datatype),\n size = 20*scale,\n justify = 'center',\n layer = layer)\n\n T.move((50*scale, -20*scale))\n xloc = n % matrix_size\n yloc = int(n // matrix_size)\n D.add_ref(R)\\\n .movex((100+spacing) * xloc*scale).movey(-(100+spacing) * yloc*scale)\n D.add_ref(T)\\\n .movex((100+spacing) * xloc*scale).movey(-(100+spacing) * yloc*scale)\n return D", "def constructWithLayers(layerclass, layersize, dimensions, name = None):\n c = lambda: layerclass(layersize)\n return ModuleMesh(c, dimensions, name)", "def _add_layer(self, layer_dict, layer_name, input_layers, merge_mode=None, share_params_with=None):\n util.colorprint(layer_name, 'teal')\n \n layer_dict = dict(layer_dict)\n util.colorprint(layer_dict, 'red')\n \n if share_params_with is not None:\n print \"Warning: ignoring share_params_with\"\n \n layer_options = layer_dict[\"options\"]\n layer=None\n if layer_dict[\"type\"]==\"conv2d\":\n #TODO: remove below\n nb_filter, nb_row, nb_col = 3,3,3\n layer = keras.layers.convolutional.Convolution2D(nb_filter, nb_row, nb_col, **layer_options)\n elif layer_dict[\"type\"]==\"dense\":\n dim = layer_dict[\"output_dim\"]\n # del layer_options[\"output_dim\"]\n layer = keras.layers.core.Dense(dim, **layer_options) \n else:\n print \"Ursol Major\"\n RaiseError()\n # TODO: one of the layers is a string\n if isinstance(input_layers, list):\n #this means that there is input from a loop to this layer\n self.model.add_node(layer, name=layer_name, inputs=input_layers, merge_mode=merge_mode)\n else:\n self.model.add_node(layer, name=layer_name, input=input_layers)\n\n return layer_name", "def setup_layers(self):\n if self.args.model == \"exact\":\n self.layer = PPNPLayer\n else:\n self.layer = APPNPLayer\n self.setup_layer_structure()", "def forward(self, inputs, inputs1):\n\n down1, indices_1, unpool_shape1 = self.layer_1(inputs=inputs,\n layer_size=2)\n down2, indices_2, unpool_shape2 = self.layer_2(inputs=down1,\n layer_size=2)\n down3, indices_3, unpool_shape3 = self.layer_3(inputs=down2,\n layer_size=3)\n down4, indices_4, unpool_shape4 = self.layer_4(inputs=down3,\n layer_size=3)\n down5, indices_5, unpool_shape5 = self.layer_5(inputs=down4,\n layer_size=3)\n\n up5 = self.layer_6(inputs=down5, indices=indices_5,\n output_shape=unpool_shape5, layer_size=3)\n up4 = self.layer_7(inputs=up5, indices=indices_4,\n output_shape=unpool_shape4, layer_size=3)\n up3 = self.layer_8(inputs=up4, indices=indices_3,\n output_shape=unpool_shape3, layer_size=3)\n up2 = self.layer_9(inputs=up3, indices=indices_2,\n output_shape=unpool_shape2, layer_size=2)\n output = self.layer_10(inputs=up2, indices=indices_1,\n output_shape=unpool_shape1, layer_size=2)\n\n # Second Modality\n\n down11, indices_11, unpool_shape11 = self.layer_11(inputs=inputs,\n layer_size=2)\n down12, indices_12, unpool_shape12 = self.layer_12(inputs=down1,\n layer_size=2)\n down13, indices_13, unpool_shape13 = self.layer_13(inputs=down2,\n layer_size=3)\n down14, indices_14, unpool_shape14 = self.layer_14(inputs=down3,\n layer_size=3)\n down15, indices_15, unpool_shape15 = self.layer_15(inputs=down4,\n layer_size=3)\n\n up15 = self.layer_16(inputs=down15, indices=indices_15,\n output_shape=unpool_shape15, layer_size=3)\n up14 = self.layer_17(inputs=up15, indices=indices_14,\n output_shape=unpool_shape4, layer_size=3)\n up13 = self.layer_18(inputs=up14, indices=indices_13,\n output_shape=unpool_shape13, layer_size=3)\n up12 = self.layer_19(inputs=up13, indices=indices_12,\n output_shape=unpool_shape12, layer_size=2)\n output1 = self.layer_110(inputs=up12, indices=indices_11,\n output_shape=unpool_shape11, layer_size=2)\n\n # End Pipe\n\n Concat = torch.cat((output, output1), 1)\n\n finalout = self.layer_1110(Concat)\n\n return finalout", "def wrapper(self, layer_list):\n\n if isinstance(layer_list, list) \\\n and all(isinstance(elem, layer.Layer) for elem in layer_list):\n return function(self, layer_list)\n \n else:\n raise TypeError('Invalid input! Argument must be a list of layer objects.')", "def _load_layer_arrays(\n cls,\n f_obj,\n model,\n nlay,\n ext_unit_dict,\n transient,\n laycon,\n ikvflag,\n ikcflag,\n iwdflg,\n ):\n sf1 = [0] * nlay\n tran = [0] * nlay\n hy = [0] * nlay\n if nlay > 1:\n vcont = [0] * (nlay - 1)\n else:\n vcont = [0] * nlay\n sf2 = [0] * nlay\n wetdry = [0] * nlay\n kv = [0] * nlay # mfusg\n\n for layer in range(nlay):\n util2d_shape = get_util2d_shape_for_layer(model, layer=layer)\n\n # sf1\n if transient:\n if model.verbose:\n print(f\" loading sf1 layer {layer + 1:3d}...\")\n sf1[layer] = Util2d.load(\n f_obj,\n model,\n util2d_shape,\n np.float32,\n \"sf1\",\n ext_unit_dict,\n )\n\n # hy/tran, and kv/vcont\n if ikcflag == 0:\n (\n hy[layer],\n tran[layer],\n kv[layer],\n vcont_k,\n ) = cls._load_hy_tran_kv_vcont(\n f_obj,\n model,\n (layer, laycon[layer]),\n ext_unit_dict,\n ikvflag,\n )\n if layer < nlay - 1:\n vcont[layer] = vcont_k\n\n # sf2\n if transient and (laycon[layer] in [2, 3, 4]):\n if model.verbose:\n print(f\" loading sf2 layer {layer + 1:3d}...\")\n sf2[layer] = Util2d.load(\n f_obj,\n model,\n util2d_shape,\n np.float32,\n \"sf2\",\n ext_unit_dict,\n )\n\n # wetdry\n if (iwdflg != 0) and (laycon[layer] in [1, 3]):\n if model.verbose:\n print(f\" loading sf2 layer {layer + 1:3d}...\")\n wetdry[layer] = Util2d.load(\n f_obj,\n model,\n util2d_shape,\n np.float32,\n \"wetdry\",\n ext_unit_dict,\n )\n\n return sf1, tran, hy, vcont, sf2, wetdry, kv", "def set_layers(self, sizes, init='random'):\n\t\tself.init_weights(sizes, init, None, None)", "def make_layer(basic_block, num_basic_block, **kwarg):\n layers = []\n for _ in range(num_basic_block):\n layers.append(basic_block(**kwarg))\n return nn.Sequential(*layers)", "def test_ww_stacked_layer_iterator(self):\n\t\t\t\t\n\t\tparams = DEFAULT_PARAMS.copy()\n\t\tparams['stacked'] = True\n\t\titerator = self.watcher.make_layer_iterator(model=self.model, params=params)\n\t\t#TODO: get this to work!\n\t\t#self.assertEqual(iterator.__class__.__name__, WWStackedLayerIterator)\n\t\tnum = 0\n\t\tfor ww_layer in iterator:\n\t\t\tnum+=1\n\t\t\t\n\t\tself.assertEqual(num,1)\n\t\tself.assertEqual(ww_layer.name, \"Stacked Layer\")\n\t\tself.assertEqual(ww_layer.layer_id,0)\n\t#\tself.assertEqual(ww_layer.N,29379) ?\n\t#\tself.assertEqual(ww_layer.M,25088) ?\n\t\tself.assertEqual(ww_layer.rf,1)", "def define_layers(self):\n\n if self.D0Flag:\n self.d = self.h\n\n self.layers = [nn.Sequential(nn.Linear(self.d, self.h),\n nn.ReLU(), )] # nn.BatchNorm1d(self.h, affine=False))]\n for l in range(1, self.L):\n self.layers.append(nn.Sequential(nn.Linear(self.h, self.h),\n nn.ReLU(), )) # nn.BatchNorm1d(self.h, affine=False)))\n\n self.layers.append(nn.Linear(self.h, 1))", "def __init__(self):\n self.layers = []\n self.best_loss = None", "def make_layers(self, n_repetitions: int = 1) -> List[List[tuple]]:\n if n_repetitions <= 0:\n raise ValueError(\"The number of repetitions must be positve\")\n\n root = [self.items]\n graph_layers = [root] + [[]] * (self.depth * 2)\n\n for _ in range(n_repetitions):\n layers = self.random_layers()\n for h in range(1, len(layers)):\n graph_layers[h] = graph_layers[h] + layers[h]\n\n return graph_layers", "def consume_layer(self, reports):\n layer_list = []\n layer_count = 1\n for report in reports:\n layer = create_image_layer(report)\n layer.layer_index = layer_count\n layer_list.append(layer)\n layer_count += 1\n return layer_list", "def add_layer(self, layer_name, layer_def):\n\n layer_idx, datatype = layer_def.split(\"/\")\n layer_idx = int(layer_idx)\n datatype = int(datatype)\n self.layers[layer_name] = LayerInfo(layer_idx, datatype, layer_name)", "def append_layer(self, *args, **kwargs) :\n \n self.insert_layer(len(self._layers), *args, **kwargs)", "def iteration_layers(model, speedup, session, indepth_layer=None):\n if speedup is True:\n layer_names_reduced = ['conv2d1',\n 'conv2d2',\n 'mixed3b',\n 'mixed4b',\n 'mixed5b']\n layer_tensors = [session.graph.get_tensor_by_name(name + \":0\") for name in layer_names_reduced]\n else:\n layer_tensors = model.layer_tensors\n\n return layer_tensors", "def __init__(self):\n #conv1\n n = inp_width*inp_height\n #poczatkowe wagi sieci sa ustalane losowo z rozkladu normalnego. Umieszczane sa one na liscie matryc wag\n self.Weights = [np.random.randn(layers[0][1],inp_channels,layers[0][2],layers[0][2])/np.sqrt(n)]\n out_Size = inp_width - layers[0][2] + 1 #zmienna zawiera rozmiar wyjscia danej warstwy\n #inicjalizacja progow \n self.Biases = [initBias*np.ones( layers[0][1] )]\n #przypisanie parametrow warstwie poolingu\n self.poolParams = [(layers[1][1], layers[1][2])]\n out_Size = out_Size/2 \n #conv 2\n n = out_Size*out_Size*layers[0][1]\n self.Weights.append(np.random.randn(layers[2][1],layers[0][1],layers[2][2],layers[2][2])/np.sqrt(n))\n out_Size = out_Size - layers[2][2]+1\n self.Biases.append(initBias*np.ones(layers[2][1]))\n #pool 2\n self.poolParams.append((layers[3][1],layers[3][2]))\n out_Size = out_Size/2 \n #conv 3\n n = out_Size*out_Size*layers[2][1]\n self.Weights.append(np.random.randn(layers[4][1],layers[2][1],out_Size,out_Size)/np.sqrt(n))\n out_Size = 1\n self.Biases.append(initBias*np.ones(layers[4][1]))\n #fully connected 1\n n = layers[4][1]\n self.Weights.append(np.random.randn(layers[5][1],layers[4][1])/np.sqrt(n))\n self.Biases.append(initBias*np.ones(layers[5][1]))\n #fully connected 2\n n = layers[5][1]\n self.Weights.append(np.random.randn(layers[6][1],layers[5][1])/np.sqrt(n))\n self.Biases.append(initBias*np.ones(layers[6][1]))\n\n self.Weights = np.asarray(self.Weights)\n self.Biases = np.asarray(self.Biases)\n \n delta_W = []\n delta_B = []\n for i in range(5):\n delta_W.append(np.zeros(self.Weights[i].shape))\n delta_B.append(np.zeros(self.Biases[i].shape))\n self.delta_W = np.asarray(delta_W)\n self.delta_B = np.asarray(delta_B)", "def layer(i):\n with tf.name_scope('layer_{}'.format(i)):\n # displacement gate\n Dgate(tf.clip_by_value(d_r[i], -disp_clip, disp_clip), d_phi[i]) | q[0]\n # rotation gate\n Rgate(r1[i]) | q[0]\n # squeeze gate\n Sgate(tf.clip_by_value(sq_r[i], -sq_clip, sq_clip), sq_phi[i]) | q[0]\n # rotation gate\n Rgate(r2[i]) | q[0]\n # Kerr gate\n Kgate(tf.clip_by_value(kappa1[i], -kerr_clip, kerr_clip)) | q[0]", "def get_layers(self):\n layers = []\n\n for s in self.surfaces:\n n = self.miller_to_direction(s)\n r = np.dot(self.get_positions() - self.center, n).max()\n d = self.get_layer_distance(s, 2)\n l = 2 * np.round(r / d).astype(int)\n\n ls = np.arange(l-1,l+2)\n ds = np.array([self.get_layer_distance(s, i) for i in ls])\n\n mask = (np.abs(ds - r) < 1e-10)\n\n layers.append(ls[mask][0])\n\n return np.array(layers, int)", "def test_all_layer_types(self):\n\n\t\tdetails = self.watcher.describe()\n\t\t\n\t\tdenseLayers = details[details.layer_type==str(LAYER_TYPE.DENSE)]\n\t\tdenseCount = len(denseLayers)\n\t\tself.assertEqual(denseCount, 3, \"3 dense layers, but {} found\".format(denseCount))\t\t\n\t\n\t\tconv2DLayers = details[details.layer_type==str(LAYER_TYPE.CONV2D)]\n\t\tconv2DCount = len(conv2DLayers)\n\t\tself.assertEqual(conv2DCount, 8, \"8 conv2D layers, but {} found\".format(denseCount))", "def __dask_layers__(self) -> Sequence[str]:\n raise NotImplementedError(\"Inheriting class must implement this method.\")", "def _init_layers(self):\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.retina_cls = nn.Conv2d(\n self.feat_channels,\n self.num_anchors * self.cls_out_channels,\n 3,\n padding=1)\n self.retina_reg = nn.Conv2d(\n self.feat_channels, self.num_anchors * 4, 3, padding=1)", "def makeMultiLayerMask( Tables ):\n\n # get data from the corresponding tables\n ElasticModulusData = Tables[ \"ElasticModulus\" ].getRawData( )\n ShearModulusData = Tables[ \"ShearModulus\" ].getRawData( )\n PoissonRatiosData = Tables[ \"PoissonRatios\" ].getRawData( )\n MaterialPropertiesData = Tables[ \"MaterialProperties\" ].getRawData( )\n\n # we're using implicit method to get value from tables since the\n # the last entry represents a string of layers thickness\n GeometryPropertiesData = [ [ Tables[ \"GeometryProperties\" ].getValue( 0, 0 ),\n Tables[ \"GeometryProperties\" ].getValue( 0, 1 ),\n Tables[ \"GeometryProperties\" ].getValue( 0, 2 ) ] ]\n\n\n Tables[ \"ElasticModulus\" ].setBufferData( \"Input\", ElasticModulusData )\n Tables[ \"ShearModulus\" ].setBufferData( \"Input\", ShearModulusData )\n Tables[ \"PoissonRatios\" ].setBufferData( \"Input\", PoissonRatiosData )\n Tables[ \"MaterialProperties\" ].setBufferData( \"Input\", MaterialPropertiesData )\n Tables[ \"GeometryProperties\" ].setBufferData( \"Input\", GeometryPropertiesData )", "def __init__(self,shape,layer_type='input',\\\r\n dtype=None,nsamp=None, name=None,\\\r\n layer_class_name='Layer', damp_neg=0.): \r\n if layer_type not in ['input', 'output', 'middle']:\r\n raise ValueError('Unknown layer type %s' % layer_type)\r\n self.layer_type = layer_type\r\n\r\n self.shape = shape \r\n self.nsamp = nsamp\r\n if (dtype == None):\r\n if (self.layer_type == 'input'):\r\n dtype = np.float\r\n else:\r\n dtype = [np.float, np.float]\r\n self.dtype = dtype\r\n \r\n # Set layer name\r\n if name is None:\r\n self.name = ('Layer %d' % SELayer.glob_layer_num )\r\n else:\r\n self.name = name\r\n SELayer.glob_layer_num += 1\r\n self.layer_class_name = layer_class_name\r\n \r\n # Other parameters\r\n self.pp_var_min = 1e-10\r\n self.damp_neg = damp_neg\r\n self.gam0_neg_last = None\r\n self.tau_last = None", "def GetLayers(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_GetLayers(self, *args)", "def make(cls, *args, **kwargs):\n if not args and 'n_repeats' not in kwargs:\n return kwargs.pop('base_block', MultiLayer)(**kwargs)\n return cls(*args, **kwargs)", "def __call__(self, inputs, *args, **kwargs):\n out = inputs\n for i, layer in enumerate(self.layers):\n if i == 0:\n out = layer(out, *args, **kwargs)\n else:\n out = layer(out)\n return out", "def get_features(inp_layer, pad=0):\n # Note: tweaked to use average pooling instead of maxpooling\n net = OrderedDict()\n net['conv1_1'] = ConvLayer(inp_layer, 64, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n net['pool1'] = PoolLayer(net['conv1_2'], 2, mode='average_exc_pad')\n net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n net['pool2'] = PoolLayer(net['conv2_2'], 2, mode='average_exc_pad')\n net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n net['conv3_4'] = ConvLayer(net['conv3_3'], 256, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n net['pool3'] = PoolLayer(net['conv3_4'], 2, mode='average_exc_pad')\n net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n net['conv4_4'] = ConvLayer(net['conv4_3'], 512, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n #net['pool4'] = PoolLayer(net['conv4_4'], 2, mode='average_exc_pad')\n #net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n #net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n #net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n #net['conv5_4'] = ConvLayer(net['conv5_3'], 512, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n #net['pool5'] = PoolLayer(net['conv5_4'], 2, mode='average_exc_pad')\n\n nb_params = len(nn.layers.get_all_params(net['conv4_4']))\n\n values = pickle.load(open('weights/vgg19_normalized.pkl', 'rb'), encoding='latin1')['param values']\n nn.layers.set_all_param_values(net['conv4_4'], values[:nb_params])\n\n return net", "def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):\n # 1x1 conv layers\n l7_conv = tf.layers.conv2d(vgg_layer7_out, num_classes, (1,1), (1,1), kernel_initializer=kernel_initializer())\n l4_conv = tf.layers.conv2d(vgg_layer4_out, num_classes, (1,1), (1,1), kernel_initializer=kernel_initializer())\n l3_conv = tf.layers.conv2d(vgg_layer3_out, num_classes, (1,1), (1,1), kernel_initializer=kernel_initializer())\n\n # Deconv layers\n l7_deconv = tf.layers.conv2d_transpose(l7_conv, num_classes,(4,4), (2,2), padding='SAME', kernel_initializer=kernel_initializer())\n\n # Add l7 deconv output to l4 output\n l4_sum = tf.add(l7_deconv, l4_conv)\n l4_deconv = tf.layers.conv2d_transpose(l4_sum, num_classes, (4,4), (2,2), padding='SAME', kernel_initializer=kernel_initializer())\n\n # Add l4 deconv output to l3 output\n l3_sum = tf.add(l4_deconv, l3_conv)\n out = tf.layers.conv2d_transpose(l3_sum, num_classes, (16,16), (8,8), padding='SAME', kernel_initializer=kernel_initializer())\n\n return out", "def baseUNet(input_shape,conv_depth,n_classes,init_w,dropout):\n inputs = Input(input_shape)\n\n c1=Conv2D(conv_depth,\n (3,3),\n activation='relu',\n padding='same',\n kernel_initializer=init_w)(inputs)\n\n c1=Conv2D(conv_depth,\n (3,3),\n activation='relu',\n padding=\"same\",\n kernel_initializer=init_w)(c1)\n\n # pool down to next layer\n pool1 = MaxPooling2D((2,2),strides = (2,2))(c1)\n\n conv_depth *= 2\n\n # convolute down again\n conv2 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool1)\n\n conv2 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv2)\n \n # pool down again\n pool2 = MaxPooling2D((2,2),strides = (2,2))(conv2)\n\n conv_depth *= 2 \n\n # Convolution\n conv3 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool2)\n\n conv3 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv3)\n \n # pool down\n pool3 = MaxPooling2D((2,2),strides = (2,2))(conv3)\n\n conv_depth *= 2 \n # Convolution\n conv4 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool3)\n\n conv4 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv4)\n \n # pool down \n pool4 = MaxPooling2D((2,2),strides = (2,2))(conv4)\n\n conv_depth *=2 \n\n # Convolution\n conv5 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool4)\n\n conv5 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv5)\n\n drop = Dropout(dropout)(conv5)\n\n conv_depth /= 2\n conv_depth = int(conv_depth) \n # do upsampling\n up1 = UpSampling2D(size = (2,2))(drop)\n conv6 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up1)\n \n # add in skip info\n cat1 = concatenate([conv4,conv6],axis = 3)\n conv6 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat1)\n\n conv6 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv6)\n\n conv_depth /= 2\n conv_depth = int(conv_depth)\n # do upsampling\n up2 = UpSampling2D(size = (2,2))(conv6)\n conv7 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up2)\n \n # add in skip info\n cat2 = concatenate([conv3,conv7],axis = 3)\n conv7 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat2)\n\n conv7 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv7)\n \n conv_depth /= 2\n conv_depth = int(conv_depth)\n # do upsampling\n up3 = UpSampling2D(size = (2,2))(conv7)\n conv8 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size=(3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up3)\n \n # add in skip info\n cat3 = concatenate([conv2,conv8],axis = 3)\n conv8 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat3)\n\n conv8 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv8)\n \n conv_depth /= 2\n conv_depth = int(conv_depth)\n # do upsampling\n up4 = UpSampling2D(size = (2,2))(conv8)\n conv9 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up4)\n \n # add in skip info\n cat4 = concatenate([c1,conv9],axis = 3)\n conv9 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat4)\n\n conv9 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv9)\n\n outputs = Conv2D(n_classes, 1, activation = 'softmax')(conv9)\n\n return outputs,inputs", "def __init__(self):\n self.layer_scope = None\n self.out = None", "def get_layer(self, i):\n return self.layers[i]", "def init_layer(layer):\n \n if layer.weight.ndimension() == 4:\n (n_out, n_in, height, width) = layer.weight.size()\n n = n_in * height * width\n \n elif layer.weight.ndimension() == 2:\n (n_out, n) = layer.weight.size()\n\n std = math.sqrt(2. / n)\n scale = std * math.sqrt(3.)\n layer.weight.data.uniform_(-scale, scale)\n\n if layer.bias is not None:\n layer.bias.data.fill_(0.)", "def test_layer_instantiation(self):\n model = self.get_layer(in_channels=2, out_channels=3, kernel_size=4)\n\n # Assert the number of elements of the weights.\n tile_weights, tile_biases = model.analog_tile.get_weights()\n\n self.assertEqual(tile_weights.numel(), 2*3*4*4)\n if model.use_bias:\n self.assertEqual(tile_biases.numel(), 3)", "def _make_layer(self, out_channels, num_blocks, stride):\n\n # we have num_block blocks per layer, the first block\n # could be 1 or 2, other blocks would always be 1\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n type(self).current_block += 1\n # use ParsevalBasicBlock for residual block that needs retraining\n # 9 is the total block of resnet18\n if type(self).current_block + self._k > 9:\n block = ParsevalBasicBlock\n else:\n block = BasicBlock\n layers.append(block(self.in_channels, out_channels, stride))\n self.in_channels = out_channels * block.expansion\n\n return nn.Sequential(*layers)", "def test_layer_instantiation(self):\n model = self.get_layer(in_channels=2, out_channels=3, kernel_size=4)\n\n # Assert the number of elements of the weights.\n tile_weights, tile_biases = model.analog_tile.get_weights()\n\n self.assertEqual(tile_weights.numel(), 2*3*4)\n if model.use_bias:\n self.assertEqual(tile_biases.numel(), 3)" ]
[ "0.75543296", "0.7163699", "0.7143102", "0.70626694", "0.698838", "0.66191417", "0.6470164", "0.64236826", "0.638904", "0.637632", "0.6366208", "0.6310667", "0.6310052", "0.6291664", "0.62838024", "0.6280735", "0.62782174", "0.6276819", "0.62729853", "0.62683105", "0.6253566", "0.6248039", "0.62453204", "0.6228589", "0.6225154", "0.6211683", "0.6189599", "0.61654085", "0.61502224", "0.61397445", "0.6138366", "0.61262256", "0.6119495", "0.60958135", "0.60958135", "0.6093636", "0.60907423", "0.6087395", "0.60708994", "0.6065052", "0.60610205", "0.6058552", "0.6051268", "0.60279834", "0.60242814", "0.60112524", "0.6008601", "0.59993464", "0.59921664", "0.59921587", "0.59860593", "0.5976291", "0.5975115", "0.59721255", "0.5968626", "0.5947901", "0.5938458", "0.5936365", "0.5925577", "0.5911061", "0.5909715", "0.59048307", "0.5890027", "0.5881964", "0.5879387", "0.58561116", "0.5854487", "0.58465666", "0.5844719", "0.58402115", "0.58311766", "0.58255297", "0.58084846", "0.58072245", "0.5807125", "0.5807", "0.58016247", "0.5800346", "0.57963985", "0.5794084", "0.5784811", "0.5783598", "0.57784384", "0.5773503", "0.57734156", "0.5773339", "0.57723325", "0.57714784", "0.5770004", "0.57679796", "0.57573134", "0.5753532", "0.5748224", "0.5742667", "0.57385516", "0.5735652", "0.5726378", "0.57204986", "0.5716165", "0.57153356", "0.57147133" ]
0.0
-1
Demonstrates how to train and test a Deep Belief Network. This is demonstrated on MNIST.
def test_DBN(finetune_lr=0.2, pretraining_epochs=30, pretrain_lr=0.1, k=1, training_epochs=200, batch_size = 1): ################################################################################################ ###############################################################load data#################################################################### datasets = numpy.loadtxt("german2.csv", delimiter = "," , usecols=(range(24)) , dtype=theano.config.floatX) labelsets = numpy.loadtxt("german2.csv", delimiter = "," , usecols=(24,) , dtype=int) train_set_x = theano.shared(numpy.asarray(datasets[0:600], dtype=theano.config.floatX)) train_set_y = theano.shared(numpy.asarray(labelsets[0:600], dtype=int)) valid_set_x = theano.shared(numpy.asarray(datasets[0:600], dtype=theano.config.floatX)) valid_set_y = theano.shared(numpy.asarray(labelsets[0:600], dtype=int)) test_set_x = theano.shared(numpy.asarray(datasets[800:999], dtype=theano.config.floatX)) test_set_y = theano.shared(numpy.asarray(labelsets[800:999], dtype=int)) ################################################################################################################################################ # compute number of minibatches for training, validation and testing n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size # numpy random generator numpy_rng = numpy.random.RandomState(123) print '... building the model' # construct the Deep Belief Network ###########################################change feature size : n_ins and label size: n_outs################################################################ dbn = DBN(numpy_rng=numpy_rng, n_ins=24, hidden_layers_sizes=[24,18,12,6], n_outs=2) #print dbn.params[0].eval() ############################################################################################################################################### # start-snippet-2 ######################### # PRETRAINING THE MODEL # ######################### print '... getting the pretraining functions' pretraining_fns = dbn.pretraining_functions(train_set_x=train_set_x, batch_size=batch_size, k=k) print '... pre-training the model' start_time = timeit.default_timer() ## Pre-train layer-wise for i in range(dbn.n_layers): # go through pretraining epochs for epoch in range(pretraining_epochs): # go through the training set c = [] for batch_index in range(n_train_batches): c.append(pretraining_fns[i](index=batch_index, lr=pretrain_lr)) print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch), print numpy.mean(c) RbmParamList = [] for i in dbn.params: print i.eval().shape RbmParamList.append(i.eval()) with open('RbmParamList.pkl', 'w') as f: pickle.dump(RbmParamList, f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n dataset = MNIST(BATCH_SIZE)\n \n inputs = Value(type=tf.float32, shape=(None, 784), cls=None)\n targets = Value(type=tf.int64, shape=(None), cls=10)\n fc_hidden = FCHidden(weights=[300, 150])\n\n config = Config(inputs, targets, fc_hidden, LEARNING_RATE)\n\n network_builder = FFNetworkBuilder(config)\n hidden_builder = FFHiddenBuilder()\n _ = network_builder.build_network(hidden_builder)\n\n train_config = TrainerConfig(\n epochs = EPOCHS, display_after = DISPLAY_STEP, \n keep_prob = KEEP_PROB,checkpoint_path=None, \n summary_path=None\n )\n trainer = Trainer(network_builder, train_config)\n trainer.train(dataset)", "def main():\n\n dataset = ConvMNIST(64)\n print(dataset.get_train().x.shape)\n\n\n inputs = Value(type=tf.float32, shape=(None, 28, 28, 1), cls = None)\n targets = Value(type=tf.int64, shape=(None), cls = 10)\n learning_rate = 0.0001\n\n fc_hidden = [1024, 500]\n c_h = [\n (3, 3, 1, 32),\n (3, 3, 32, 64)\n ]\n conv_hidden = ConvHidden(conv_weights=c_h, fc_weights=fc_hidden)\n\n config = Config(inputs, targets, conv_hidden, learning_rate)\n\n network = ConvNetworkBuilder(config)\n hidden = FFConvHiddenBuilder()\n _ = network.build_network(hidden)\n\n\n train_config = TrainerConfig(\n epochs = EPOCHS, display_after = DISPLAY_STEP, \n keep_prob = KEEP_PROB,checkpoint_path=None, \n summary_path=None\n )\n\n trainer = Trainer(network, train_config)\n trainer.train(dataset)", "def test_train():\n set_seed(42) # Noqa\n transform = transforms.Compose([\n transforms.ToTensor(),\n ])\n mnist_train = MNIST(\"./\", download=True, train=False, transform=transform)\n model = SimpleNet()\n\n optimizer = torch.optim.SGD(model.parameters(), lr=1e-2, momentum=0.5)\n criterion = nn.CrossEntropyLoss()\n\n train_loader = DataLoader(mnist_train, batch_size=64, shuffle=True,\n num_workers=0)\n loss, accuracy = train(model, optimizer, criterion, train_loader,\n imshape=(-1, 28*28))\n\n assert type(loss) == torch.Tensor\n assert type(accuracy) == np.float64\n assert len(loss.shape) == 0", "def train(features, targets, weights, bias):\n # see gradient_descent for explanation\n epochs = 100\n learning_rate = 0.1\n\n picture_nb = 2\n\n # Print current accuracy. How many people have been classified as sick/healthy correctly?\n predictions = predict(features, weights, bias)\n print(\"Accuracy: \", np.mean(predictions == targets))\n\n for epoch in range(epochs):\n if epoch % 10 == 0:\n # get normalized scores\n predictions = activation(pre_activation(features, weights, bias))\n # compare with targets to see how bad our algorithm is\n print(\"Cost = %s\" % cost(predictions, targets))\n # Replot graph. Check in create_dataset for explanation of parameters\n if picture_nb == 2:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='red')\n elif picture_nb == 11:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='green')\n else:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='orange')\n picture_nb+=1\n\n # Initialize gradients\n # weights_gradients is 2D array with 2 values\n weights_gradients = np.zeros(weights.shape)\n bias_gradient = 0\n # Go through each row\n for feature, target in zip(features, targets):\n # Compute prediction\n z = pre_activation(feature, weights, bias)\n # Get normalized score\n y = activation(z)\n # Update gradients based on formulas established before. Look at gradient_descent to understand what we\n # are doing. Also, the formulas are below, just before the call of the function train.\n weights_gradients += (y - target) * derivative_activation(z) * feature\n # no multiplication of feature because it does not depend on some coordinates.\n bias_gradient += (y - target) * derivative_activation(z)\n\n # Update variables. These are the lines that result the cost to get reduced.\n weights = weights - learning_rate * weights_gradients\n bias = bias - learning_rate * bias_gradient\n\n # Print final accuracy. How many people have been classified as sick/healthy correctly?\n predictions = predict(features, weights, bias)\n print(\"Accuracy: \", np.mean(predictions == targets))\n\n plt.scatter(features[:, 0], features[:, 1], s=40, c=targets, cmap=plt.cm.Spectral)\n plt.savefig(\"DataPointsLineEvolution.png\")\n # legend for understanding\n plt.legend(['Original division', 'New division', 'New division', 'New division', 'New division', 'New division',\n 'New division', 'New division', 'New division', 'Final division'], loc='upper left')\n # save picture of data points drawn.\n plt.savefig(\"DataPointsLineEvolutionLegend.png\")", "def train_neural_network(x_train, train_labels, x_test, orig_test):\n train_features = np.array(x_train)\n test_features = np.array(x_test)\n train_labels = np.array(train_labels['Col2'])\n\n model = models.make_model(params=train_features,\n model_name='neural_network_1')\n\n checkpoint_cb, tensorboard_cb = models.callbacks(\n model_name='nn_submission03_s_1_m1_f_2165.ckpt')\n epochs = 6\n batch_size = 32\n\n history = model.fit(train_features,\n train_labels,\n batch_size=batch_size,\n epochs=epochs,\n callbacks=[checkpoint_cb, tensorboard_cb]\n # validation_data=(val_features, val_labels)\n )\n\n evaluation.evaluation(model, train_features, train_labels)\n evaluation.plot_metrices(epochs, history, if_val=False)\n evaluation.plot_confusion_matrix(model, train_features, train_labels)\n evaluation.submission_nn(model=model,\n test_features=test_features,\n orig_test_df=orig_test,\n submission_name='nn_submission03_s_1_m1_f_2165.csv')", "def trainNet():", "def test_DBN(finetune_lr=0.1, pretraining_epochs=100,\r\n pretrain_lr=0.01, k=1, training_epochs=1000,\r\n dataset='mnist.pkl.gz', batch_size=10):\r\n\r\n datasets = load_data(dataset)\r\n\r\n train_set_x, train_set_y = datasets[0]\r\n valid_set_x, valid_set_y = datasets[1]\r\n test_set_x, test_set_y = datasets[2]\r\n\r\n # compute number of minibatches for training, validation and testing\r\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size\r\n\r\n # numpy random generator\r\n numpy_rng = numpy.random.RandomState(123)\r\n print '... building the model'\r\n # construct the Deep Belief Network\r\n dbn = DBN(numpy_rng=numpy_rng, n_ins=28 * 28,\r\n hidden_layers_sizes=[1000, 1000, 1000],\r\n n_outs=10)\r\n\r\n #########################\r\n # PRETRAINING THE MODEL #\r\n #########################\r\n print '... getting the pretraining functions'\r\n pretraining_fns = dbn.pretraining_functions(train_set_x=train_set_x,\r\n batch_size=batch_size,\r\n k=k)\r\n\r\n print '... pre-training the model'\r\n start_time = time.clock()\r\n ## Pre-train layer-wise\r\n for i in xrange(dbn.n_layers):\r\n # go through pretraining epochs\r\n for epoch in xrange(pretraining_epochs):\r\n # go through the training set\r\n c = []\r\n for batch_index in xrange(n_train_batches):\r\n c.append(pretraining_fns[i](index=batch_index,\r\n lr=pretrain_lr))\r\n print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch),\r\n print numpy.mean(c)\r\n\r\n end_time = time.clock()\r\n print >> sys.stderr, ('The pretraining code for file ' +\r\n os.path.split(__file__)[1] +\r\n ' ran for %.2fm' % ((end_time - start_time) / 60.))\r\n\r\n ########################\r\n # FINETUNING THE MODEL #\r\n ########################\r\n\r\n # get the training, validation and testing function for the model\r\n print '... getting the finetuning functions'\r\n train_fn, validate_model, test_model = dbn.build_finetune_functions(\r\n datasets=datasets, batch_size=batch_size,\r\n learning_rate=finetune_lr)\r\n\r\n print '... finetunning the model'\r\n # early-stopping parameters\r\n patience = 4 * n_train_batches # look as this many examples regardless\r\n patience_increase = 2. # wait this much longer when a new best is\r\n # found\r\n improvement_threshold = 0.995 # a relative improvement of this much is\r\n # considered significant\r\n validation_frequency = min(n_train_batches, patience / 2)\r\n # go through this many\r\n # minibatche before checking the network\r\n # on the validation set; in this case we\r\n # check every epoch\r\n\r\n best_params = None\r\n best_validation_loss = numpy.inf\r\n test_score = 0.\r\n start_time = time.clock()\r\n\r\n done_looping = False\r\n epoch = 0\r\n\r\n while (epoch < training_epochs) and (not done_looping):\r\n epoch = epoch + 1\r\n for minibatch_index in xrange(n_train_batches):\r\n\r\n minibatch_avg_cost = train_fn(minibatch_index)\r\n iter = (epoch - 1) * n_train_batches + minibatch_index\r\n\r\n if (iter + 1) % validation_frequency == 0:\r\n\r\n validation_losses = validate_model()\r\n this_validation_loss = numpy.mean(validation_losses)\r\n print('epoch %i, minibatch %i/%i, validation error %f %%' % \\\r\n (epoch, minibatch_index + 1, n_train_batches,\r\n this_validation_loss * 100.))\r\n\r\n # if we got the best validation score until now\r\n if this_validation_loss < best_validation_loss:\r\n\r\n #improve patience if loss improvement is good enough\r\n if (this_validation_loss < best_validation_loss *\r\n improvement_threshold):\r\n patience = max(patience, iter * patience_increase)\r\n\r\n # save best validation score and iteration number\r\n best_validation_loss = this_validation_loss\r\n best_iter = iter\r\n\r\n # test it on the test set\r\n test_losses = test_model()\r\n test_score = numpy.mean(test_losses)\r\n print((' epoch %i, minibatch %i/%i, test error of '\r\n 'best model %f %%') %\r\n (epoch, minibatch_index + 1, n_train_batches,\r\n test_score * 100.))\r\n\r\n if patience <= iter:\r\n done_looping = True\r\n break\r\n\r\n end_time = time.clock()\r\n print(('Optimization complete with best validation score of %f %%,'\r\n 'with test performance %f %%') %\r\n (best_validation_loss * 100., test_score * 100.))\r\n print >> sys.stderr, ('The fine tuning code for file ' +\r\n os.path.split(__file__)[1] +\r\n ' ran for %.2fm' % ((end_time - start_time)\r\n / 60.))", "def test_DBN(finetune_lr=0.1, pretraining_epochs=100,\n pretrain_lr=0.01, k=1, training_epochs=1000,\n dataset='mnist.pkl.gz', batch_size=10):\n\n datasets = load_data(dataset)\n\n train_set_x, train_set_y = datasets[0]\n valid_set_x, valid_set_y = datasets[1]\n test_set_x, test_set_y = datasets[2]\n\n # compute number of minibatches for training, validation and testing\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size\n\n # numpy random generator\n numpy_rng = numpy.random.RandomState(123)\n print('... building the model')\n # construct the Deep Belief Network\n dbn = DBN(numpy_rng=numpy_rng, n_ins=28 * 28,\n hidden_layers_sizes=[1000, 1000, 1000],\n n_outs=10)\n\n # start-snippet-2\n #########################\n # PRETRAINING THE MODEL #\n #########################\n print('... getting the pretraining functions')\n pretraining_fns = dbn.pretraining_functions(train_set_x=train_set_x,\n batch_size=batch_size,\n k=k)\n\n print('... pre-training the model')\n start_time = timeit.default_timer()\n ## Pre-train layer-wise\n for i in range(dbn.n_layers):\n # go through pretraining epochs\n for epoch in range(pretraining_epochs):\n # go through the training set\n c = []\n for batch_index in range(int(n_train_batches)):\n c.append(pretraining_fns[i](index=batch_index,\n lr=pretrain_lr))\n print('Pre-training layer %i, epoch %d, cost ' % (i, epoch), end=' ')\n print(numpy.mean(c))\n\n end_time = timeit.default_timer()\n # end-snippet-2\n print(('The pretraining code for file ' +\n os.path.split(__file__)[1] +\n ' ran for %.2fm' % ((end_time - start_time) / 60.)), file=sys.stderr)\n ########################\n # FINETUNING THE MODEL #\n ########################\n\n # get the training, validation and testing function for the model\n print('... getting the finetuning functions')\n train_fn, validate_model, test_model = dbn.build_finetune_functions(\n datasets=datasets,\n batch_size=batch_size,\n learning_rate=finetune_lr\n )\n\n print('... finetuning the model')\n # early-stopping parameters\n patience = 4 * n_train_batches # look as this many examples regardless\n patience_increase = 2. # wait this much longer when a new best is\n # found\n improvement_threshold = 0.995 # a relative improvement of this much is\n # considered significant\n validation_frequency = min(n_train_batches, patience / 2)\n # go through this many\n # minibatches before checking the network\n # on the validation set; in this case we\n # check every epoch\n\n best_validation_loss = numpy.inf\n test_score = 0.\n start_time = timeit.default_timer()\n\n done_looping = False\n epoch = 0\n\n while (epoch < training_epochs) and (not done_looping):\n epoch = epoch + 1\n for minibatch_index in range(int(n_train_batches)):\n\n minibatch_avg_cost = train_fn(minibatch_index)\n iter = (epoch - 1) * n_train_batches + minibatch_index\n\n if (iter + 1) % validation_frequency == 0:\n\n validation_losses = validate_model()\n this_validation_loss = numpy.mean(validation_losses)\n print((\n 'epoch %i, minibatch %i/%i, validation error %f %%'\n % (\n epoch,\n minibatch_index + 1,\n n_train_batches,\n this_validation_loss * 100.\n )\n ))\n\n # if we got the best validation score until now\n if this_validation_loss < best_validation_loss:\n\n #improve patience if loss improvement is good enough\n if (\n this_validation_loss < best_validation_loss *\n improvement_threshold\n ):\n patience = max(patience, iter * patience_increase)\n\n # save best validation score and iteration number\n best_validation_loss = this_validation_loss\n best_iter = iter\n\n # test it on the test set\n test_losses = test_model()\n test_score = numpy.mean(test_losses)\n print(((' epoch %i, minibatch %i/%i, test error of '\n 'best model %f %%') %\n (epoch, minibatch_index + 1, n_train_batches,\n test_score * 100.)))\n\n if patience <= iter:\n done_looping = True\n break\n\n end_time = timeit.default_timer()\n print((\n (\n 'Optimization complete with best validation score of %f %%, '\n 'obtained at iteration %i, '\n 'with test performance %f %%'\n ) % (best_validation_loss * 100., best_iter + 1, test_score * 100.)\n ))\n print(('The fine tuning code for file ' +\n os.path.split(__file__)[1] +\n ' ran for %.2fm' % ((end_time - start_time)\n / 60.)), file=sys.stderr)", "def main():\n # Load database\n (images_train, targets_train), (images_test, targets_test) = tf.keras.datasets.mnist.load_data()\n\n # Normalization\n images_train = images_train.reshape(-1, 784).astype(float)\n scaler = StandardScaler()\n images_train = scaler.fit_transform(images_train)\n images_test = images_test.reshape(-1, 784).astype(float)\n images_test = scaler.transform(images_test)\n\n images_train = images_train.reshape(-1, 28, 28, 1).astype(float)\n images_test = images_test.reshape(-1, 28, 28, 1).astype(float)\n\n # One hot encoding\n targets_train = tf.keras.utils.to_categorical(targets_train)\n targets_test = tf.keras.utils.to_categorical(targets_test)\n\n # Network architecture\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Conv2D(30, (5, 5), input_shape=(28, 28, 1), \\\n activation=\"relu\", padding='same'))\n model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))\n model.add(tf.keras.layers.Conv2D(15, (3, 3), activation=\"relu\", padding='same'))\n model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(128, activation=\"relu\"))\n model.add(tf.keras.layers.Dense(50, activation=\"relu\"))\n model.add(tf.keras.layers.Dense(10, activation=\"softmax\"))\n\n # Learn\n optimizer = tf.keras.optimizers.SGD()\n\n @tf.function\n def train_step(images, targets):\n \"\"\"\n Define the training step by step\n \"\"\"\n # Save all operations\n with tf.GradientTape() as tape:\n # Make prediction\n predictions = model(images)\n # Compute loss\n loss = tf.keras.losses.categorical_crossentropy(targets, predictions)\n # Compute gradients\n gradients = tape.gradient(loss, model.trainable_variables)\n # Update model\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n\n batch_size = 32\n epochs = 10\n images_per_epoch = len(images_train) // batch_size\n for _ in range(epochs):\n for i in range(images_per_epoch):\n start = i*batch_size\n train_step(images_train[start:start+batch_size], targets_train[start:start+batch_size])\n\n # Compile must be defined to use evaluate method\n model.compile(\n loss=\"categorical_crossentropy\",\n optimizer=\"sgd\",\n metrics=[\"accuracy\"])\n\n # Evaluate on the test database\n scores = model.evaluate(images_test, targets_test, verbose=0)\n print(scores)", "def main():\n # Import or download the mnist data, from target file path.\n mnist = input_data.read_data_sets(\"Data/\", one_hot=True)\n\n # Train and test model.\n train(mnist)", "def test_keras_mnist():\n data = fetch(\"mnist\")\n check(data, n_samples_train=60000, n_samples_test=10000, n_features=28 * 28)", "def test_multiclass_gradient_descent_blobs():\n from your_code import MultiClassGradientDescent\n\n np.random.seed(0)\n\n features, _, targets, _ = load_data('blobs')\n\n learner = MultiClassGradientDescent(loss='squared', regularization=None,\n learning_rate=0.01, reg_param=0.05)\n learner.fit(features, targets, batch_size=None, max_iter=1000)\n predictions = learner.predict(features)\n\n print(\"predictions: \", predictions)\n print(\"targets: \", targets)", "def test_simple_net_forward(self):\n net = ecn.NeuralNet(2, (2,), 1)\n net.weights = self._set_initial_weights()\n \n dataset = [[1, 1]]\n targets = [[0]]\n \n net.train(dataset, targets, 0.5, 1)\n self.assertTrue(net.fit_values[0] == [0.3, 1.4])\n self.assertTrue(net.outputs[0] == [0.5744, 0.8022])\n self.assertTrue(net.fit_values[1] == [0.1922])\n self.assertTrue(net.outputs[1] == [0.5479])\n print('Finished testing simple neural net forward\\n')", "def test_deep_learning_models():\n atom = ATOMClassifier(*mnist, n_rows=0.1, random_state=1)\n pytest.raises(PermissionError, atom.clean)\n atom.run(KerasClassifier(neural_network, epochs=1, batch_size=512, verbose=0))", "def deep_neural_network(X_train: np.ndarray, y_train: np.ndarray,\n X_test: np.ndarray = None, y_test: np.ndarray = None):\n # _check_deep_network_params(X_train, y_train)\n X_train, X_val, y_train, y_val = train_test_split(X_train, y_train,\n test_size=0.2,\n random_state=1, stratify=y_train)\n\n tf.random.set_seed(1)\n num_epochs = 95\n batch_size = 100\n steps_per_epoch = int(np.ceil(len(y_train) / batch_size))\n\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Dense(units=32,\n activation='tanh'))\n model.add(tf.keras.layers.Dense(units=256,\n activation='softsign'))\n model.add(tf.keras.layers.Dense(units=512,\n activation='tanh'))\n model.add(tf.keras.layers.Dense(units=256,\n activation='softsign'))\n model.add(tf.keras.layers.Dense(units=64,\n activation='softplus'))\n model.add(tf.keras.layers.Dense(units=1,\n activation='sigmoid'))\n model.build(input_shape=(None, len(X_train[0])))\n\n model.compile(optimizer='adam',\n loss=tf.keras.losses.binary_crossentropy,\n metrics=['accuracy'])\n\n hist = model.fit(X_train, y_train,\n batch_size=batch_size,\n epochs=num_epochs,\n steps_per_epoch=steps_per_epoch,\n validation_data=(X_val, y_val))\n plot_learning_history(hist.history)\n\n if X_test is not None and y_test is not None and len(X_test) == len(y_test):\n y_pred = model.predict(X_test)\n y_pred = list(map(lambda item: 0 if item[0] <= 0.5 else 1, y_pred))\n print(sum(y_pred), len(y_pred))\n print(f\"Deep Neural Network test accuracy: {accuracy_score(y_test, y_pred)}\")\n\n return model", "def test_machine_learning():", "def test_training():\n config = SmartDict()\n\n config.NETWORK_CLASS = LMBiSeNet\n config.DATASET_CLASS = DummyCamvid\n\n config.IS_DEBUG = False\n config.IMAGE_SIZE = [128, 160]\n config.BATCH_SIZE = 2\n config.TEST_STEPS = 1\n config.MAX_STEPS = 2\n config.SAVE_CHECKPOINT_STEPS = 1\n config.KEEP_CHECKPOINT_MAX = 5\n config.SUMMARISE_STEPS = 1\n config.IS_PRETRAIN = False\n config.TASK = Tasks.SEMANTIC_SEGMENTATION\n\n # network model config\n config.NETWORK = SmartDict()\n config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer\n config.NETWORK.OPTIMIZER_KWARGS = {\"learning_rate\": 0.001}\n config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE\n config.NETWORK.BATCH_SIZE = config.BATCH_SIZE\n config.NETWORK.DATA_FORMAT = \"NHWC\"\n\n # daasegt config\n config.DATASET = SmartDict()\n config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE)\n config.DATASET.BATCH_SIZE = config.BATCH_SIZE\n config.DATASET.DATA_FORMAT = \"NHWC\"\n\n environment.init(\"test_lm_bisenet\")\n prepare_dirs(recreate=True)\n start_training(config, profile_step=1)", "def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):\n session.run(optimizer, feed_dict={y: label_batch, x: feature_batch, keep_prob: keep_probability})", "def test_net(network, model, mnist_path):\n print(\"============== Starting Testing ==============\")\n # load the saved model for evaluation\n param_dict = load_checkpoint(\"checkpoint_lenet-1_1875.ckpt\")\n # load parameter to the network\n load_param_into_net(network, param_dict)\n # load testing dataset\n ds_eval = create_dataset(os.path.join(mnist_path, \"test\"))\n acc = model.eval(ds_eval, dataset_sink_mode=False)\n print(\"============== Accuracy:{} ==============\".format(acc))", "def main():\n # Get datasets\n train_dataset, test_dataset = get_datasets()\n\n # Build neural network\n layers = [tf.keras.layers.Dense(22, activation='sigmoid'),\n tf.keras.layers.Dense(30, activation='sigmoid'),\n tf.keras.layers.Dense(1, activation='sigmoid')]\n\n model = tf.keras.models.Sequential(layers)\n model.compile(optimizer='adam', loss='binary_crossentropy',\n metrics=['accuracy'])\n\n model.fit(train_dataset, epochs=10)\n\n # Test model\n model.evaluate(test_dataset, verbose=2)", "def main():\n training_data, validation_data, test_data = mnist.load()\n\n model = nn.NeuralNetwork([784, 100, 10], learning_rate=0.01, batch_size=50)\n\n model_training = training.EarlyStoppingRegularization(model,\n training_data,\n validation_data,\n test_data,\n max_steps_without_progression=2)\n result = model_training.train()\n\n result.save('models/mnist')", "def test_n_and_train(self):\r\n\r\n n = NeuronNetwork(1,\r\n [1],\r\n [[[0.0,0.0]]],\r\n [[0.0]])\r\n\r\n inputs = [[0,0], [0,1], [1,0], [1,1]]\r\n targets = [[0], [0], [0], [1]]\r\n\r\n n.train(inputs,targets,1000,180)\r\n\r\n print(n)\r\n self.assertLess(n.feed_forward([0,0]), [0.001])\r\n self.assertGreater(n.feed_forward([1,0]), [0.001])\r\n self.assertGreater(n.feed_forward([0,1]), [0.001])\r\n self.assertGreater(n.feed_forward([1,1]), [0.9])", "def test_training(self):\n\t\tpass", "def MNIST_experiment():\n tsetlin_machine = TsetlinMachine(number_clauses=1000,\n number_action_states=1000,\n precision=3.0,\n threshold=10)\n\n X, y, val_X, val_y = MNIST()\n\n tsetlin_machine.fit(X, y, val_X, val_y, 300)\n print('Final training accuracy:', tsetlin_machine.accuracy(X, y))\n print('Final validation accuracy:', tsetlin_machine.accuracy(val_X, val_y))", "def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch, x, y, keep_prob):\n session.run(optimizer, feed_dict={x: feature_batch, y: label_batch, keep_prob: keep_probability})\n pass", "def run_test(filepath):\n num_class = 120 # dogbreeds class\n model = Resnet50MO(num_class, checkpoint_path=None)\n\n # image settings\n crop_size = model.input_size\n scale_size = model.input_size\n input_size = model.input_size\n input_mean = model.input_mean\n input_std = model.input_std\n\n # hyperparams settings\n epochs = 1\n batch_size = 32 # mini-batch-size\n learning_rate = 0.01\n momentum = 0.5\n decay_factor = 10\n eval_freq = 5 # in epochs\n\n # data generator settings: dataset and dataloader\n train_dataset = DogImageset(filepath, input_size,\n input_mean=input_mean, input_std=input_std)\n val_dataset = DogImageset(filepath, input_size,\n input_mean=input_mean, input_std=input_std)\n \n train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\n val_loader = DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False)\n\n # Loss and backprop settings\n model.cuda()\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(\n model.parameters(),\n lr=learning_rate,\n momentum=momentum\n )\n\n run_model_train_test(model, train_loader, criterion, optimizer)", "def train(self):\r\n raw_dataset = pd.read_csv(self.datafile, sep = ',', header = 0,\r\n na_values = '?', comment = '\\t',\r\n skipinitialspace = True)\r\n\r\n dataset = raw_dataset.copy()\r\n dataset.tail()\r\n\r\n # Clear unknown values\r\n dataset.isna().sum()\r\n dataset = dataset.dropna()\r\n\r\n # takes a sample of 80% of the data points\r\n train_dataset = dataset.sample(frac = 0.8, random_state = 0)\r\n test_dataset = dataset.drop(train_dataset.index)\r\n\r\n # Split features from labels for training and test datasets\r\n train_features = train_dataset.copy()\r\n test_features = test_dataset.copy()\r\n train_labels = train_features.pop('Quality')\r\n test_labels = test_features.pop('Quality')\r\n\r\n # normalize data\r\n normalizer = preprocessing.Normalization()\r\n normalizer.adapt(np.array(train_features))\r\n\r\n # builds the model\r\n def build_and_compile_model(norm):\r\n model = keras.Sequential([\r\n norm,\r\n layers.Dense(64, activation='relu'),\r\n layers.Dense(64, activation='relu'),\r\n layers.Dense(1)\r\n ])\r\n\r\n model.compile(loss='mean_absolute_error',\r\n optimizer=tf.keras.optimizers.Adam(0.001))\r\n return model\r\n\r\n deep_neural_network_model = build_and_compile_model(normalizer)\r\n\r\n history = deep_neural_network_model.fit(\r\n train_features, train_labels,\r\n validation_split=0.2,\r\n verbose=0, epochs=100)\r\n\r\n deep_neural_network_model.save('deep_neural_network_model')", "def run_test():\n # Get the sets of images and labels for training, validation, and\n # test on MNIST.\n train ,validation,test = datasets_mnist.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)\n # Tell TensorFlow that the model will be built into the default Graph.\n with tf.Graph().as_default():\n # Generate placeholders for the images and labels.\n images_placeholder, labels_placeholder, phase_pl = placeholder_inputs(\n FLAGS.batch_size)\n\n # Build a Graph that computes predictions from the inference model.\n logits = mnist.inference(images_placeholder,\n FLAGS.hidden1,\n FLAGS.hidden2, \n phase_pl)\n\n eval_correct = mnist.evaluation(logits, labels_placeholder)\n # Add the variable initializer Op.\n all_variable = tf.global_variables()\n \n # Create a saver for writing training checkpoints.\n saver = tf.train.Saver()\n\n # Create a session for running Ops on the Graph.\n with tf.Session() as sess:\n\n saver.restore(sess, \"log/model.ckpt-1999\")\n for variable in all_variable:\n if \"moving\" in variable.name:\n print(variable.name, variable.eval())\n do_eval(sess,\n eval_correct,\n images_placeholder,\n labels_placeholder,\n phase_pl,\n test)", "def train(self, nsamples = 1, verbose = False, random = True):\n imgs, skels = self.images.get_batch(nimages = nsamples, random = random);\n self.trainer.run(session = self.session, feed_dict={self.input : imgs, self.skeleton : skels})\n if verbose:\n self.plot_results(imgs);", "def main():\n # initialize the class labels and set the seed of the pseudorandom\n # number generator so we can reproduce our results\n labels = [\"dog\", \"cat\", \"panda\"]\n np.random.seed(1)\n\n # be * learned * by our model, but for the sake of this example, let's use random values\n W = np.random.randn(3, 3072)\n b = np.random.randn(3)\n\n # load our example image, resize it, and then flatten it into our\n # \"feature vector\" representation\n orig = cv2.imread(\"beagle.png\")\n image = cv2.resize(orig, (32, 32)).flatten()\n\n # compute the output scores by taking the dot product between the\n # weight matrix and image pixels, followed by adding in the b\n scores = W.dot(image) + b\n\n # loop over the scores + labels and display them\n for (label, score) in zip(labels, scores):\n print(\"[INFO] {}: {:.2f}\".format(label, score))\n\n # draw the label with the highest score on the image as our prediction\n cv2.putText(\n orig, \"Label: {}\".format(labels[np.argmax(scores)]), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2\n )\n\n # display our input image\n cv2.imshow(\"Image\", orig)\n cv2.waitKey(0)", "def test_training(self):\n warnings.filterwarnings('ignore')\n example_args = example_args_parser()\n example_args.unittest = True\n # prepare data\n example_args.stage = 'prepare'\n example_wrapper(example_args)\n # train goalDNN model\n example_args.stage = 'train'\n example_args.model = 'goalDNN'\n example_wrapper(example_args)\n # train cVAE model\n example_args.model = 'cVAE'\n example_wrapper(example_args)\n # train gcVAE model\n example_args.model = 'gcVAE'\n example_wrapper(example_args)\n # cVAE harmonization\n example_args.stage = 'predict'\n example_args.model = 'cVAE'\n example_wrapper(example_args)\n # gcVAE harmonization\n example_args.model = 'gcVAE'\n example_wrapper(example_args)\n # goalDNN prediction\n example_args.model = 'goalDNN'\n example_wrapper(example_args)\n # XGBoost\n example_args.stage = 'train'\n example_args.model = 'XGBoost'\n example_wrapper(example_args)\n # compare with reference results\n check_args = check_results_args_parser()\n check_args.unittest = True\n check_reference_results(check_args)", "def train():\n\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n # Get negative slope parameter for LeakyReLU\n neg_slope = FLAGS.neg_slope\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n import matplotlib.pyplot as plt\n\n data = cifar10_utils.get_cifar10(FLAGS.data_dir)\n train = data['train']\n test = data['test']\n dim_x = train.images.shape[1]*train.images.shape[2]*train.images.shape[3]\n\n mlp = MLP(dim_x, dnn_hidden_units, train.labels.shape[1], neg_slope)\n loss_module = CrossEntropyModule()\n\n loss_train = np.zeros((int(np.floor(FLAGS.max_steps/FLAGS.eval_freq), )))\n loss_test = np.zeros((int(np.floor(FLAGS.max_steps/FLAGS.eval_freq), )))\n accuracy_test = np.zeros((int(np.floor(FLAGS.max_steps/FLAGS.eval_freq), )))\n\n images_test = test.images\n labels_test = test.labels\n images_test = np.reshape(images_test, (images_test.shape[0], dim_x))\n\n for i in range(0, FLAGS.max_steps):\n if PRINTS:\n print('iter', i+1, end='\\r')\n images, labels = train.next_batch(FLAGS.batch_size) \n images = np.reshape(images, (images.shape[0], dim_x))\n\n pred = mlp.forward(images)\n loss = loss_module.forward(pred, labels)\n loss_grad = loss_module.backward(pred, labels)\n mlp.backward(loss_grad)\n\n for module in reversed(mlp.modules):\n if isinstance(module, LinearModule):\n module.params['weight'] -= 1/FLAGS.batch_size*FLAGS.learning_rate*module.grads['weight']\n module.params['bias'] -= 1/FLAGS.batch_size*FLAGS.learning_rate*module.grads['bias']\n if (i+1) % FLAGS.eval_freq == 0:\n pred_test = mlp.forward(images_test)\n loss_train[i // FLAGS.eval_freq] = loss\n accuracy_test[i // FLAGS.eval_freq] = accuracy(pred_test, labels_test)\n loss_test[i // FLAGS.eval_freq] = loss_module.forward(pred_test, labels_test)\n if PRINTS:\n print()\n print('test_loss:', loss_test[i // FLAGS.eval_freq])\n print('test_accuracy:', accuracy_test[i // FLAGS.eval_freq])\n print('train_loss:', loss_train[i // FLAGS.eval_freq])\n\n if PLOTS:\n fig, ax = plt.subplots(1, 2, figsize=(10,5))\n fig.suptitle('Training curves for Numpy MLP\\nFinal test accuracy: {:0.4f}, default configuration'.format(accuracy_test[i // FLAGS.eval_freq]))\n\n ax[0].set_title('Loss')\n ax[0].set_ylabel('Loss value')\n ax[0].set_xlabel('No of batches seen x{}'.format(FLAGS.eval_freq))\n ax[0].plot(loss_train, label='Train')\n ax[0].plot(loss_test, label='Test')\n ax[0].legend()\n\n ax[1].set_title('Accuracy')\n ax[1].set_ylabel('Accuracy value')\n ax[1].set_xlabel('No of batches seen x{}'.format(FLAGS.eval_freq))\n ax[1].plot(accuracy_test, label='Test')\n ax[1].legend()\n plt.show()\n\n\n ########################\n # END OF YOUR CODE #\n #######################", "def test_mnist():\n env = os.environ.copy()\n if not \"CUDA_VISIBLE_DEVICES\" in env:\n env[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n subprocess.run(\n \"edflow -b template_tfe/config.yaml -t --max_batcher_per_epoch --num_epochs 1\",\n shell=True,\n check=True,\n env=env,\n )", "def main():\n X_train, Y_train, y_train = load_batch(\"data_batch_1\")\n X_test, Y_test, y_test = load_batch(\"test_batch\")\n X_val, Y_val, y_val = load_batch((\"data_batch_2\"))\n\n X_train, X_train_mean, X_train_std = normalize(X_train)\n X_test = normalize_mean_std(X_test, X_train_mean, X_train_std)\n X_val = normalize_mean_std(X_val, X_train_mean, X_train_std)\n\n data = {\n \"X_train\": X_train,\n \"Y_train\": Y_train,\n \"y_train\": y_train,\n \"X_test\": X_test,\n \"Y_test\": Y_test,\n \"y_test\": y_test,\n \"X_val\": X_val,\n \"Y_val\": Y_val,\n \"y_val\": y_val,\n }\n\n network = Network(data)", "def train_neural_network(self, session, x, y, keep_prob, optimizer, keep_probability, feature_batch, label_batch):\n session.run(optimizer, feed_dict={x: feature_batch, y: label_batch, keep_prob: keep_probability})\n pass", "def train():\n # YOUR TRAINING CODE GOES HERE", "def train(self, features, labels, seed=None):\n raise NotImplementedError('Not implemented')", "def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):\n # TODO: Implement Function\n session.run(optimizer, feed_dict={x:feature_batch, y:label_batch, keep_prob:keep_probability})", "def computeNN(train, test):\n \n shallow_NN = test[['user_id', 'movie_id']].copy()\n deep_NN = test[['user_id', 'movie_id']].copy()\n \n categorical_train_y = np.zeros([train.shape[0], 5])\n categorical_train_y[np.arange(train.shape[0]), train.rating - 1] = 1\n\n\n categorical_test_y = np.zeros([test.shape[0], 5])\n categorical_test_y[np.arange(test.shape[0]), test.rating - 1] = 1\n \n n_items = 1000\n n_users = 10000\n \n \n def shallow_net():\n features = 48\n\n input_i = layers.Input(shape=[1])\n i = layers.Embedding(n_items + 1, features)(input_i)\n i = layers.Flatten()(i)\n i = layers.normalization.BatchNormalization()(i)\n\n input_u = layers.Input(shape=[1])\n u = layers.Embedding(n_users + 1, features)(input_u)\n u = layers.Flatten()(u)\n u = layers.normalization.BatchNormalization()(u)\n\n nn = layers.concatenate([i, u])\n\n nn = layers.Dense(512, activation='relu')(nn)\n nn = layers.Dropout(0.5)(nn)\n nn = layers.normalization.BatchNormalization()(nn)\n\n nn = layers.Dense(128, activation='relu')(nn)\n\n output = layers.Dense(5, activation='softmax')(nn)\n\n model = models.Model([input_i, input_u], output)\n model.compile(optimizer='adamax', loss='categorical_crossentropy')\n return model\n \n def deep_net():\n features = 48\n\n input_i = layers.Input(shape=[1])\n i = layers.Embedding(n_items + 1, features)(input_i)\n i = layers.Flatten()(i)\n i = layers.normalization.BatchNormalization()(i)\n\n input_u = layers.Input(shape=[1])\n u = layers.Embedding(n_users + 1, features)(input_u)\n u = layers.Flatten()(u)\n u = layers.normalization.BatchNormalization()(u)\n\n nn = layers.concatenate([i, u])\n\n nn = layers.Dense(1024, activation='relu')(nn)\n nn = layers.Dropout(0.5)(nn)\n nn = layers.normalization.BatchNormalization()(nn)\n nn = layers.Dense(512, activation='relu')(nn)\n nn = layers.Dropout(0.5)(nn)\n nn = layers.normalization.BatchNormalization()(nn)\n nn = layers.Dense(256, activation='relu')(nn)\n nn = layers.Dropout(0.5)(nn)\n nn = layers.normalization.BatchNormalization()(nn)\n nn = layers.Dense(128, activation='relu')(nn)\n\n output = layers.Dense(5, activation='softmax')(nn)\n\n model = models.Model([input_i, input_u], output)\n model.compile(optimizer='adamax', loss='categorical_crossentropy')\n\n return model\n\n model_deep = deep_net()\n model_shallow = shallow_net()\n print (\"Starting to compute shallow neural network...\")\n model_shallow.fit([train.movie_id, train.user_id], y=categorical_train_y, batch_size=20480, epochs=20)\n pred_shallow = model_shallow.predict([test.movie_id, test.user_id])\n print (\"... Finished sucessfully\")\n \n print (\"Starting to compute deep neural network...\")\n model_deep.fit([train.movie_id, train.user_id], y=categorical_train_y, batch_size=20480, epochs=20)\n pred_deep = model_deep.predict([test.movie_id, test.user_id])\n print (\"... Finished sucessfully\")\n \n \n shallow_NN['NN_shallow_rating'] = np.dot(pred_shallow,[1,2, 3, 4, 5])\n deep_NN['NN_deep_rating'] = np.dot(pred_deep,[1,2, 3, 4, 5])\n \n NN_rating = shallow_NN\\\n .merge(deep_NN, on=['user_id', 'movie_id'])\n \n return NN_rating", "def main():\n # Initializing learning rate\n learning_rate = 0.0005\n # Initializing stopping criteria\n stopping_criteria = 0.01\n # load the data training data from a csv file with an url\n training_x,testing_x, training_y, testing_y,mean,sd= ai.store_data(\"https://github.com/santiagocantu98/K-Nearest-Neightbours/raw/master/diabetes.csv\",\"training\")\n normal_testing = np.copy(testing_x)\n\n # scalates the features of the testing data\n testing_data_scaled,mean,sd = ai.scale_features(testing_x,mean,sd)\n ai.print_scaled_data(testing_data_scaled,\"testing\")\n ai.calculate_euclidean_distance(training_x, training_y , testing_data_scaled, testing_y,normal_testing)", "def train():\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n model = MLP(n_hidden=dnn_hidden_units,n_classes=10,batch_size=FLAGS.batch_size, input_dim=32*32*3, \n weight_decay=FLAGS.weight_reg_strength, weight_scale=FLAGS.weight_init_scale)\n\n Datasets = utils.get_cifar10(data_dir = DATA_DIR_DEFAULT, one_hot = True, validation_size = 0)\n \n for i in range(1500): #(FLAGS.max_steps):\n train_batch = Datasets.train.next_batch(batch_size = FLAGS.batch_size)\n #Get the model output\n logits = model.inference(x=train_batch[0].reshape([FLAGS.batch_size,32*32*3]))\n #Get the loss and let the model set the loss derivative.\n loss = model.loss(logits=logits, labels=train_batch[1])\n #Perform training step\n model.train_step(loss=loss, flags=FLAGS)\n\n #Every 100th iteratin print accuracy on the whole test set.\n if i % 100 == 0:\n # for layer in model.layers:\n test_batch = Datasets.test.next_batch(batch_size = 200) #Datasets.test.num_examples\n logits = model.inference(x=test_batch[0].reshape([200,32*32*3]))\n print('-- Step: ', i, \" accuracy: \",model.accuracy(logits=logits,labels=test_batch[1]),'loss', loss )\n\n ########################\n # END OF YOUR CODE #\n #######################", "def main():\n nn = CarsClassifierModel()\n train_x, train_y, test_x, test_y = nn.load_data_preprocess()\n history = nn.run(train_x,train_y)\n nn.evaluate(test_x, test_y)\n nn.save(\"keras_nn_5\")\n #nn.plots(history)\n #print(train_x.shape)\n #plt.imshow(train_x[52])\n #plt.title(\"Car\")\n #plt.show()\n #print(train_y[52])", "def mnist_v1(batch_size=128, epochs=20, kernel_size=3):\n (X_train, Y_train), (X_test, Y_test) = mnist.load_data()\n\n # Data preparation\n X_train = prepare(X_train)\n X_test = prepare(X_test)\n Y_train = np_utils.to_categorical(Y_train, 10) # 0..9\n Y_test = np_utils.to_categorical(Y_test, 10) # 0..9\n\n # Fitting the data to the augmentation data generator\n datagen = augmentedData(X_train)\n\n # --------------------\n # NEURAL NETWORK MODEL\n # --------------------\n\n # Model architecture\n model = Sequential()\n\n model.add(Conv2D(32, (kernel_size, kernel_size), activation='relu', input_shape=(1, 28, 28)))\n model.add(Conv2D(32, (kernel_size, kernel_size), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(10, activation='softmax'))\n\n # Model compilation\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n #Tensor board saves\n now = datetime.datetime.now()\n tensorboard = TensorBoard(log_dir=\"logs_first/kernel_size:{}\".format(kernel_size))\n\n model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size), epochs=epochs, verbose=1, callbacks=[tensorboard])\n\n # Model saves\n now = datetime.datetime.now()\n model.save(\"sirr_HYPERPARAMETERS_mnist_first_\" + str(now.hour) + \"h\" + str(now.minute) + \".h5\")\n\n # Model evaluation\n return model.evaluate(X_test, Y_test, verbose=1)", "def run_mnist_test():\n mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n train_x, train_y = mnist.train.images, mnist.train.labels,\n test_x, test_y = mnist.test.images, mnist.test.labels\n # Reshape right off the bat to save some time.\n train_x = train_x.reshape(-1, 28, 28, 1)\n test_x = test_x.reshape(-1, 28, 28, 1)\n\n conv1 = LeNetClassifier.ConvLayer(kernel_width=5, kernel_height=5,\n feature_maps=1)\n conv2 = LeNetClassifier.ConvLayer(kernel_width=3, kernel_height=3,\n feature_maps=32)\n conv3 = LeNetClassifier.ConvLayer(kernel_width=3, kernel_height=3,\n feature_maps=64)\n network = LeNetClassifier((28, 28, 1), [conv1, conv2, conv3],\n [4 * 4 * 128, 625], 10, batch_size=128)\n\n saver = tf.train.Saver()\n\n sess = tf.Session()\n init = tf.initialize_all_variables()\n sess.run(init)\n\n writer = tf.train.SummaryWriter(\"mnist_logs\", sess.graph_def)\n\n print(\"Tensorflow: Starting MNIST test...\")\n\n accuracy = 0\n start_time = time.time()\n iterations = 0\n while iterations < 2000:\n if iterations % 500 == 0:\n test_batch = mnist.test.next_batch(128)\n result = sess.run(network.predict(),\n feed_dict={network.inputs(): test_batch[0],\n network.expected_outputs(): test_batch[1]})\n argmax = np.argmax(test_batch[1], axis=1)\n accuracy = np.mean(argmax == result)\n print(\"Tensorflow: step %d, testing accuracy %s\" % \\\n (iterations, accuracy))\n\n batch = mnist.train.next_batch(128)\n sess.run(network.train(), feed_dict={network.inputs(): batch[0],\n network.expected_outputs(): batch[1]})\n iterations += 1\n\n # Save the network at the end.\n #saver.save(sess, \"Variables/test.ckpt\")\n\n elapsed = time.time() - start_time\n speed = iterations / elapsed\n print(\"Tensorflow: Ran %d training iterations. (%f iter/s)\" % \\\n (iterations, speed))\n print(\"Tensorflow: MNIST test completed in %f seconds.\" % (elapsed))\n return (elapsed, speed)", "def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):\n # TODO: Implement Function \n \n session.run(optimizer,feed_dict={x:feature_batch,y:label_batch, keep_prob:keep_probability})\n \n pass", "def train_and_test_model(In_train, Out_train, In_test, Out_test):\n\n # Naive Bayes Classifier\n print(\"Naive Bayes\")\n NB_classifier = MultinomialNB()\n NB_classifier.fit(In_train, Out_train)\n predictions = NB_classifier.predict(In_test)\n print(NB_classifier.score(In_test, Out_test))\n NB_Confusion_Matrix = confusion_matrix(Out_test, predictions)\n print(NB_Confusion_Matrix)\n plot_confusion_matrix(NB_Confusion_Matrix)\n print()\n\n # Stochastic Gradient Descent Classifier\n print(\"Stochastic Gradient Descent\")\n SGD_classifier = SGDClassifier()\n SGD_classifier.fit(In_train, Out_train)\n predictions = SGD_classifier.predict(In_test)\n print(SGD_classifier.score(In_test, Out_test))\n SGD_Confusion_Matrix = confusion_matrix(Out_test, predictions)\n print(SGD_Confusion_Matrix)\n plot_confusion_matrix(SGD_Confusion_Matrix)\n print()\n\n # MultiLayer Perceptron Classifier\n print(\"MultiLayer Perceptron\")\n MLP_classifier = MLPClassifier()\n MLP_classifier.fit(In_train, Out_train)\n predictions = MLP_classifier.predict(In_test)\n print(MLP_classifier.score(In_test, Out_test))\n MLP_Confusion_Matrix = confusion_matrix(Out_test, predictions)\n print(MLP_Confusion_Matrix)\n plot_confusion_matrix(MLP_Confusion_Matrix)\n print()\n\n # Random Forest Classifier\n print(\"Random Forest Classifier\")\n RF_classifier = RandomForestClassifier()\n RF_classifier.fit(In_train, Out_train)\n predictions = RF_classifier.predict(In_test)\n scores = cross_val_score(RF_classifier, In_test, Out_test)\n print(scores.mean())\n RF_Confusion_Matrix = confusion_matrix(Out_test, predictions)\n print(RF_Confusion_Matrix)\n plot_confusion_matrix(RF_Confusion_Matrix)\n print()\n\n # Decision Tree Classifier\n print(\"Decision Tree\")\n DT_classifier = tree.DecisionTreeClassifier()\n DT_classifier.fit(In_train, Out_train)\n predictions = RF_classifier.predict(In_test)\n print(DT_classifier.score(In_test, Out_test))\n DT_Confusion_Matrix = confusion_matrix(Out_test, predictions)\n print(DT_Confusion_Matrix)\n plot_confusion_matrix(DT_Confusion_Matrix)\n print()\n\n # K-Nearest Neighbors Classifier\n print(\"K-NN\")\n KNN_Classifier = KNeighborsClassifier()\n KNN_Classifier.fit(In_train, Out_train)\n predictions = KNN_Classifier.predict(In_test)\n print(KNN_Classifier.score(In_test, Out_test))\n KNN_Confusion_Matrix = confusion_matrix(Out_test, predictions)\n print(KNN_Confusion_Matrix)\n plot_confusion_matrix(KNN_Confusion_Matrix)\n print()\n\n # Support Vector Machines\n print(\"Support Vector Machines\")\n SVM_Classifier = svm.SVC()\n SVM_Classifier.fit(In_train, Out_train)\n predictions = KNN_Classifier.predict(In_test)\n print(SVM_Classifier.score(In_test, Out_test))\n SVM_Confusion_Matrix = confusion_matrix(Out_test, predictions)\n print(SVM_Confusion_Matrix)\n plot_confusion_matrix(SVM_Confusion_Matrix)\n print()\n\n return NB_classifier", "def neural_network(xtrain, ytrain, xtest, ytest,labels_mapping, scaled = False):\n if not scaled :\n scaler = StandardScaler()\n xtrain = scaler.fit_transform(xtrain)\n xtest = scaler.transform(xtest)\n\n nn = MLPClassifier() #hidden_layer_sizes=30, alpha=0.0001, early_stopping=True\n nn = __train_and_test(nn, xtrain, ytrain, xtest, ytest,labels_mapping)\n return nn", "def main() -> None:\n\n # Load pickled (adj, feat) tuple\n with open(os.path.join(NETWORK_DIR, PICKLE_FILE), \"rb\") as file:\n adj, features = pickle.load(file)\n\n g = nx.Graph(adj) # Recreate graph using node indices (0 to num_nodes-1)\n\n # Draw the network\n # nx.draw_networkx(g, with_labels=False, node_size=50, node_color=\"r\")\n # plt.show()\n\n # Preprocessing (train/test split)\n np.random.seed(0) # make sure train-test split is consistent\n adj_sparse = nx.to_scipy_sparse_matrix(g)\n\n # Perform train-test split\n (\n adj_train,\n train_edges,\n train_edges_false,\n val_edges,\n val_edges_false,\n test_edges,\n test_edges_false,\n ) = mask_test_edges(adj_sparse, test_frac=0.3, val_frac=0.1)\n\n # new graph object with only non-hidden edges\n g_train = nx.from_scipy_sparse_matrix(adj_train)\n\n # Inspect train/test split\n print(\"Total nodes:\", adj_sparse.shape[0])\n\n # adj is symmetric, so nnz (num non-zero) = 2 * num_edges\n print(\"Total edges:\", int(adj_sparse.nnz / 2))\n print(\"Training edges (positive):\", len(train_edges))\n print(\"Training edges (negative):\", len(train_edges_false))\n print(\"Validation edges (positive):\", len(val_edges))\n print(\"Validation edges (negative):\", len(val_edges_false))\n print(\"Test edges (positive):\", len(test_edges))\n print(\"Test edges (negative):\", len(test_edges_false))\n\n # Train node2vec (Learn Node Embeddings)\n\n # node2vec settings\n # NOTE: When p = q = 1, this is equivalent to DeepWalk\n\n P = 1 # Return hyperparameter\n Q = 1 # In-out hyperparameter\n WINDOW_SIZE = 10 # Context size for optimization\n NUM_WALKS = 10 # Number of walks per source\n WALK_LENGTH = 80 # Length of walk per source\n DIMENSIONS = 128 # Embedding dimension\n DIRECTED = False # Graph directed/undirected\n WORKERS = 8 # Num. parallel workers\n ITER = 1 # SGD epochs\n\n # Preprocessing, generate walks\n\n # create node2vec graph instance\n g_n2v = node2vec.Graph(g_train, DIRECTED, P, Q)\n g_n2v.preprocess_transition_probs()\n walks = g_n2v.simulate_walks(NUM_WALKS, WALK_LENGTH)\n walks = [list(map(str, walk)) for walk in walks]\n\n # Train skip-gram model\n model = Word2Vec(\n walks,\n size=DIMENSIONS,\n window=WINDOW_SIZE,\n min_count=0,\n sg=1,\n workers=WORKERS,\n iter=ITER,\n )\n\n # Store embeddings mapping\n emb_mappings = model.wv\n\n print(emb_mappings)\n\n # Create node embeddings matrix (rows = nodes, columns = embedding features)\n emb_list = []\n for node_index in range(0, adj_sparse.shape[0]):\n node_str = str(node_index)\n node_emb = emb_mappings[node_str]\n emb_list.append(node_emb)\n emb_matrix = np.vstack(emb_list)\n\n def get_edge_embeddings(edge_list):\n \"\"\"\n Generate bootstrapped edge embeddings (as is done in node2vec paper)\n Edge embedding for (v1, v2) = hadamard product of node embeddings for\n v1, v2.\n \"\"\"\n embs = []\n for edge in edge_list:\n node1 = edge[0]\n node2 = edge[1]\n emb1 = emb_matrix[node1]\n emb2 = emb_matrix[node2]\n edge_emb = np.multiply(emb1, emb2)\n embs.append(edge_emb)\n embs = np.array(embs)\n return embs\n\n # Train-set edge embeddings\n pos_train_edge_embs = get_edge_embeddings(train_edges)\n neg_train_edge_embs = get_edge_embeddings(train_edges_false)\n train_edge_embs = np.concatenate(\n [pos_train_edge_embs, neg_train_edge_embs]\n )\n\n # Create train-set edge labels: 1 = real edge, 0 = false edge\n train_edge_labels = np.concatenate(\n [np.ones(len(train_edges)), np.zeros(len(train_edges_false))]\n )\n\n # Val-set edge embeddings, labels\n pos_val_edge_embs = get_edge_embeddings(val_edges)\n neg_val_edge_embs = get_edge_embeddings(val_edges_false)\n val_edge_embs = np.concatenate([pos_val_edge_embs, neg_val_edge_embs])\n val_edge_labels = np.concatenate(\n [np.ones(len(val_edges)), np.zeros(len(val_edges_false))]\n )\n\n # Test-set edge embeddings, labels\n pos_test_edge_embs = get_edge_embeddings(test_edges)\n neg_test_edge_embs = get_edge_embeddings(test_edges_false)\n test_edge_embs = np.concatenate([pos_test_edge_embs, neg_test_edge_embs])\n\n # Create val-set edge labels: 1 = real edge, 0 = false edge\n test_edge_labels = np.concatenate(\n [np.ones(len(test_edges)), np.zeros(len(test_edges_false))]\n )\n\n # Train logistic regression classifier on train-set edge embeddings\n edge_classifier = LogisticRegression(random_state=0)\n edge_classifier.fit(train_edge_embs, train_edge_labels)\n\n # Predicted edge scores: probability of being of class \"1\" (real edge)\n val_preds = edge_classifier.predict_proba(val_edge_embs)[:, 1]\n val_roc = roc_auc_score(val_edge_labels, val_preds)\n val_ap = average_precision_score(val_edge_labels, val_preds)\n\n # Predicted edge scores: probability of being of class \"1\" (real edge)\n test_preds = edge_classifier.predict_proba(test_edge_embs)[:, 1]\n test_roc = roc_auc_score(test_edge_labels, test_preds)\n test_ap = average_precision_score(test_edge_labels, test_preds)\n\n print(\"node2vec Validation ROC score: \", str(val_roc))\n print(\"node2vec Validation AP score: \", str(val_ap))\n print(\"node2vec Test ROC score: \", str(test_roc))\n print(\"node2vec Test AP score: \", str(test_ap))", "def test_training(self):\n self.classifier.train(\"test\", self.message)", "def test_one_hidden(self) -> None:\n self.network = self.nn_class(self.n_features, True)\n accuracy = self.get_accuracy()\n self.assertTrue(\n accuracy > self.threshold,\n \"This implementation is most likely wrong since \"\n f\"the accuracy ({accuracy}) is less than {self.threshold}.\",\n )", "def test_init_net_simple(self):\n net = ecn.NeuralNet(2, (2,), 1)\n self.assertEqual(2, len(net.weights.keys()))\n self.assertEqual((2, 3), np.shape(net.weights['h0']))\n self.assertEqual((1, 3), np.shape(net.weights['y']))\n print('Finished testing simple neural net init\\n')", "def main():\n # construct the argument parse and parse the arguments\n args = argparse.ArgumentParser()\n args.add_argument(\"-o\", \"--output\", required=True, help=\"path to the output loss/accuracy plot\")\n args = vars(args.parse_args())\n\n # grab the MNIST dataset (if this is your first time using this\n # dataset then the 11MB download may take a minute)\n print(\"[INFO] accessing MNIST...\")\n ((train_x, train_y), (test_x, test_y)) = mnist.load_data()\n\n # each image in the MNIST dataset is represented as a 28x28x1\n # image, but in order to apply a standard neural network we must\n # first \"flatten\" the image to be simple list of 28x28=784 pixels\n train_x = train_x.reshape((train_x.shape[0], 28 * 28 * 1))\n test_x = test_x.reshape((test_x.shape[0], 28 * 28 * 1))\n # scale data to the range of [0, 1]\n train_x = train_x.astype(\"float32\") / 255.0\n test_x = test_x.astype(\"float32\") / 255.0\n\n # convert the labels from integers to vectors\n label_binarizer = LabelBinarizer()\n train_y = label_binarizer.fit_transform(train_y)\n test_y = label_binarizer.transform(test_y)\n\n # define the 784-256-128-10 architecture using Keras\n model = Sequential()\n model.add(Dense(256, input_shape=(784,), activation=\"sigmoid\"))\n model.add(Dense(128, activation=\"sigmoid\"))\n model.add(Dense(10, activation=\"softmax\"))\n\n # train the model using SGD\n print(\"[INFO] training network...\")\n sgd = SGD(0.01)\n model.compile(loss=\"categorical_crossentropy\", optimizer=sgd, metrics=[\"accuracy\"])\n model_fit = model.fit(train_x, train_y, validation_data=(test_x, test_y), epochs=100, batch_size=128)\n\n # evaluate the network\n print(\"[INFO] evaluating network...\")\n predictions = model.predict(test_x, batch_size=128)\n print(\n classification_report(\n test_y.argmax(axis=1), predictions.argmax(axis=1), target_names=[str(x) for x in label_binarizer.classes_]\n )\n )\n\n # plot the training loss and accuracy\n plt.style.use(\"ggplot\")\n plt.figure()\n plt.plot(np.arange(0, 100), model_fit.history[\"loss\"], label=\"train_loss\")\n plt.plot(np.arange(0, 100), model_fit.history[\"val_loss\"], label=\"val_loss\")\n plt.plot(np.arange(0, 100), model_fit.history[\"acc\"], label=\"train_acc\")\n plt.plot(np.arange(0, 100), model_fit.history[\"val_acc\"], label=\"val_acc\")\n plt.title(\"Training Loss and Accuracy\")\n plt.xlabel(\"Epoch #\")\n plt.ylabel(\"Loss/Accuracy\")\n plt.legend()\n plt.savefig(args[\"output\"])", "def train_classifier(data, n_iters=3, batch_size=100):\n tqdm.write(f'Training a dilated CNN classifier for {n_iters} iterations.')\n (trainx, trainy), (valx, valy), (testx, testy) = data\n train_size, val_size, test_size = trainx.shape[0], valx.shape[0], testx.shape[0]\n train_batches = (train_size - 1) // batch_size + 1\n val_batches = (val_size - 1) // batch_size + 1\n test_batches = (test_size - 1) // batch_size + 1\n\n model = Network()\n model.add_layer(ConvLayer(10, (3, 3), (1, 1), 2)) \\\n .add_layer(ReluLayer()) \\\n .add_layer(Pad2DLayer((2, 2))) \\\n .add_layer(ConvLayer(10, (3, 3), (1, 1), 2)) \\\n .add_layer(ReluLayer()) \\\n .add_layer(Pool2DLayer((2, 2))) \\\n .add_layer(ConvLayer(10, (3, 3), (1, 1), 2)) \\\n .add_layer(ReluLayer()) \\\n .add_layer(Pool2DLayer((2, 2))) \\\n .add_layer(FlattenLayer()) \\\n .add_layer(FCLayer(32)) \\\n .add_layer(ReluLayer()) \\\n .add_layer(FCLayer(10)) \\\n .add_layer(SoftmaxCELayer())\n for i in range(1, n_iters + 1):\n train_order = np.random.permutation(train_size)\n bar = trange(train_batches, file=sys.stdout)\n for j in bar:\n cost = model.forward(trainx[train_order[j * batch_size: (j + 1) * batch_size]],\n trainy[train_order[j * batch_size: (j + 1) * batch_size]])\n bar.set_description(f'Curr loss: {cost}')\n model.backward()\n model.adam_trainstep()\n correct = []\n for j in range(val_batches):\n res = model.run(valx[j * batch_size:(j + 1) * batch_size])\n correct.append(np.argmax(res, axis=1) == valy[j * batch_size:(j + 1) * batch_size])\n tqdm.write(f'Validation accuracy: {np.mean(correct)}')\n tqdm.write('-------------------------------------------------------')\n\n correct = []\n for i in range(test_batches):\n res = model.run(testx[i * batch_size:(i + 1) * batch_size])\n correct.append(np.argmax(res, axis=1) == testy[i * batch_size:(i + 1) * batch_size])\n tqdm.write(f'Test accuracy: {np.mean(correct)}')\n tqdm.write('-------------------------------------------------------')", "def nn(data):\n training_set = SupervisedDataSet*\n\n\n input_nodes = 3\n hidden_layer_1 = 10\n hidden_layer_2 = 10\n output_layer = 5\n\n net = buildNetwork(input_nodes, hidden_layer_1, hidden_layer_2, output_layer, bias=True, hiddenclass=TanhLayer)", "def test_net_backpropagation_four_inputs(self):\n net = ecn.NeuralNet(2, (2,), 1)\n net.weights = self._set_initial_weights()\n \n dataset = [[1, 1], [0, 0], [0, 1], [1, 0]]\n targets = [[0], [0], [1], [1]]\n \n net.train(dataset, targets, 0.5, 1)\n self.assertTrue((net.weights['h0'][0] == [-0.6018, 0.400, 0.499 ]).all())\n self.assertTrue((net.weights['h0'][1] == [-0.1969, 0.8027, 0.8028]).all())\n self.assertTrue((net.weights['y'][0] == [-0.2970, -0.3995, 0.9021]).all())\n print('Finished testing backpropagation four inputs\\n')", "def demonstrate(self, train_path):\n if not os.path.exists(train_path):\n print(\"training json file not exists, program quit\")\n sys.exit()\n with open(train_path) as f:\n json_data = json.load(f)\n self.train_time_stamp_list = json_data['time']\n self.train_image_path_list = json_data['image_path']\n self.train_position_list = json_data['position']\n self.train_angle_list = json_data['angle']\n self.train_semantic_tag_list = json_data['semantic_tag']\n num_images = len(self.train_image_path_list)\n\n # create nodes\n print(\"start demonstrating, totally {} images in demonstration set\".format(num_images))\n self.node_id_list = []\n self.node_semantic_tag_list = []\n self.node_metric_feature_list = []\n self.node_conv_feature_list = []\n last_node_position = np.array([float('inf'), float('inf'), float('inf')])\n for train_index in range(num_images):\n train_position = np.array(self.train_position_list[train_index])\n if np.sqrt(np.sum(np.square(train_position - last_node_position))) > self.min_node_distance:\n last_node_position = train_position\n self.node_id_list.append(train_index)\n train_semantic_tag = self.train_semantic_tag_list[train_index]\n self.node_semantic_tag_list.append(train_semantic_tag)\n node_image_path = self.train_image_path_list[train_index]\n node_image = cv2.imread(node_image_path)\n image_batch = self.process_batch([node_image])\n node_conv_feature, node_metric_feature = self.sess.run([self.conv_features,\n self.metric_features], feed_dict = {self.images_placeholder: image_batch})\n self.node_conv_feature_list.append(node_conv_feature[0])\n self.node_metric_feature_list.append(node_metric_feature[0])\n print(\"{}/{} demonstration image shown\".format(train_index+1, num_images))\n self.node_number = len(self.node_id_list)\n print(\"all nodes created, totally {} of nodes\".format(len(self.node_id_list)))", "def train():\n pass", "def get_mnist_cnn():\n # Set defaults.\n nb_classes = 10 #dataset dependent \n batch_size = 128\n epochs = 4\n \n # Input image dimensions\n img_rows, img_cols = 28, 28\n\n # Get the data.\n # the data, shuffled and split between train and test sets\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n \n if K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\n else:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\n #x_train = x_train.reshape(60000, 784)\n #x_test = x_test.reshape(10000, 784)\n \n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n\n #print('x_train shape:', x_train.shape)\n #print(x_train.shape[0], 'train samples')\n #print(x_test.shape[0], 'test samples')\n\n # convert class vectors to binary class matrices\n y_train = to_categorical(y_train, nb_classes)\n y_test = to_categorical(y_test, nb_classes)\n\n # convert class vectors to binary class matrices\n #y_train = keras.utils.to_categorical(y_train, nb_classes)\n #y_test = keras.utils.to_categorical(y_test, nb_classes)\n\n return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test, epochs)", "def test_trained_network(sess, validation_parameters):\n \n (_, image_lists, _, _, _, bottleneck_tensor, jpeg_data_tensor, _,\n bottleneck_input, ground_truth_input, keep_prob, evaluation_step,\n prediction_step, _, _) = validation_parameters\n bottleneck_params = (sess, image_lists, flags.test_batch_size, TESTING_CATEGORY,\n flags.bottleneck_dir, flags.image_dir, jpeg_data_tensor,\n bottleneck_tensor)\n (test_bottlenecks, test_ground_truth, _) = bottleneck.get_val_test_bottlenecks(bottleneck_params)\n (test_accuracy, _) = sess.run(\n [evaluation_step, prediction_step],\n feed_dict={bottleneck_input: test_bottlenecks,\n ground_truth_input: test_ground_truth,\n keep_prob: consts.KEEP_FULL_PROB})\n print('Final test accuracy = %.1f%%' % (test_accuracy * 100))", "def load_mnist_dataset(shape=(-1,784)):\n # We first define a download function, supporting both Python 2 and 3.\n if sys.version_info[0] == 2:\n from urllib import urlretrieve\n else:\n from urllib.request import urlretrieve\n\n def download(filename, source='http://yann.lecun.com/exdb/mnist/'):\n print(\"Downloading %s\" % filename)\n urlretrieve(source + filename, filename)\n\n # We then define functions for loading MNIST images and labels.\n # For convenience, they also download the requested files if needed.\n import gzip\n\n def load_mnist_images(filename):\n if not os.path.exists(filename):\n download(filename)\n # Read the inputs in Yann LeCun's binary format.\n with gzip.open(filename, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=16)\n # The inputs are vectors now, we reshape them to monochrome 2D images,\n # following the shape convention: (examples, channels, rows, columns)\n data = data.reshape(shape)\n # data = data.reshape(-1, 1, 28, 28) # for lasagne\n # data = data.reshape(-1, 28, 28, 1) # for tensorflow\n # data = data.reshape(-1, 784) # for tensorflow\n # The inputs come as bytes, we convert them to float32 in range [0,1].\n # (Actually to range [0, 255/256], for compatibility to the version\n # provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)\n return data / np.float32(256)\n\n def load_mnist_labels(filename):\n if not os.path.exists(filename):\n download(filename)\n # Read the labels in Yann LeCun's binary format.\n with gzip.open(filename, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=8)\n # The labels are vectors of integers now, that's exactly what we want.\n return data\n\n # We can now download and read the training and test set images and labels.\n ## you may want to change the path\n data_dir = '' #os.getcwd() + '/lasagne_tutorial/'\n # print('data_dir > %s' % data_dir)\n\n X_train = load_mnist_images(data_dir+'train-images-idx3-ubyte.gz')\n y_train = load_mnist_labels(data_dir+'train-labels-idx1-ubyte.gz')\n X_test = load_mnist_images(data_dir+'t10k-images-idx3-ubyte.gz')\n y_test = load_mnist_labels(data_dir+'t10k-labels-idx1-ubyte.gz')\n\n # We reserve the last 10000 training examples for validation.\n X_train, X_val = X_train[:-10000], X_train[-10000:]\n y_train, y_val = y_train[:-10000], y_train[-10000:]\n\n ## you may want to plot one example\n # print('X_train[0][0] >', X_train[0][0].shape, type(X_train[0][0])) # for lasagne\n # print('X_train[0] >', X_train[0].shape, type(X_train[0])) # for tensorflow\n # # exit()\n # # [[..],[..]] (28, 28) numpy.ndarray\n # # plt.imshow 只支持 (28, 28)格式,不支持 (1, 28, 28),所以用 [0][0]\n # fig = plt.figure()\n # #plotwindow = fig.add_subplot(111)\n # # plt.imshow(X_train[0][0], cmap='gray') # for lasagne (-1, 1, 28, 28)\n # plt.imshow(X_train[0].reshape(28,28), cmap='gray') # for tensorflow (-1, 28, 28, 1)\n # plt.title('A training image')\n # plt.show()\n\n # We just return all the arrays in order, as expected in main().\n # (It doesn't matter how we do this as long as we can read them again.)\n return X_train, y_train, X_val, y_val, X_test, y_test", "def train(args):\r\n print('Create generators')\r\n generators = train_valid_test_generators(\r\n valid_proportion=args.valid_proportion,\r\n test_proportion=args.test_proportion,\r\n seed=args.seed,\r\n shape=(args.height, args.width),\r\n batch_size=args.batch_size,\r\n shuffle=True\r\n )\r\n print('Create model')\r\n model = create_mobilenetv2(\r\n input_shape=(args.height, args.width, 3),\r\n alpha=args.alpha,\r\n depth_multiplier=args.depth_multiplier,\r\n l2_reg=args.l2_reg,\r\n seed=args.seed\r\n )\r\n\r\n print('Training freezed model')\r\n freeze_model(model, 'global_max_pooling2d_1')\r\n callbacks = callbacks_factory(\r\n callbacks_list=[\r\n 'early_stopping',\r\n 'tensorboard',\r\n ],\r\n model_mask='mobilenetv2_multiclassification_freezed'\r\n )\r\n model = train_pipeline(\r\n model,\r\n generators['hard_train_generator'],\r\n generators['valid_generator'],\r\n callbacks,\r\n optimizer_lr=args.optimizer_lr,\r\n optimizer_decay=args.optimizer_decay,\r\n epochs=args.epochs\r\n )\r\n\r\n print('Training unfreezed model')\r\n unfreeze_model(model)\r\n callbacks = callbacks_factory(\r\n callbacks_list=[\r\n 'best_model_checkpoint',\r\n 'early_stopping',\r\n 'tensorboard',\r\n 'learning_rate_scheduler'\r\n ],\r\n model_mask='mobilenetv2_multiclassification'\r\n )\r\n model = train_pipeline(\r\n model,\r\n generators['easy_train_generator'],\r\n generators['valid_generator'],\r\n callbacks,\r\n optimizer_lr=args.optimizer_lr,\r\n optimizer_decay=args.optimizer_decay,\r\n epochs=3 * args.epochs\r\n )\r\n\r\n print('Save test evaluation')\r\n results = model.evaluate_generator(generators['test_generator'])\r\n pd.DataFrame({\r\n 'MetricsNames': model.metrics_names,\r\n 'Results': results\r\n }).to_csv(os.path.join('../logs/solution_1_test_generator_evaluation.csv'), index=False)", "def neural_net_ex4_ng():\n # ==================\n # read data\n dataset = loadmat('data/ex4data1.mat')\n print(dataset.keys())\n\n y = dataset['y'] # 5000 x 1\n print('dims y: ', y.shape)\n # print('y[0]: ', y[0])\n\n X = dataset['X'] # 5000 x 400\n print('dims X: ', X.shape)\n # print('X[0]: ', X[0])\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)\n\n num_samples_test = X_test.shape[0]\n\n # ==================\n # display data\n\n # pick 20 examples and visualize them\n fig = plt.figure(figsize=(10, 8), facecolor='white')\n fig.add_subplot(651)\n samples = np.random.choice(num_samples_test, 10)\n print('samples:', samples)\n plt.imshow(X_test[samples, :].reshape(-1, 20).T, cmap=\"Greys\")\n plt.axis('off')\n\n # ==================\n # run neural net\n hidden_layer_size = 25\n\n mlp = MLPClassifier(hidden_layer_sizes=(25,), max_iter=20, alpha=1e-4,\n solver='sgd', verbose=False, tol=1e-4, random_state=1,\n learning_rate_init=.1)\n mlp.fit(X_train, y_train.ravel())\n\n predictions = mlp.predict(X_test)\n print('Test set accuracy: {} %'.format(np.mean(predictions == y_test.ravel())*100))\n\n # print(confusion_matrix(y_test, predictions))\n # print(classification_report(y_test, predictions))\n print(\"Training set score: %f\" % mlp.score(X_train, y_train))\n print(\"Test set score: %f\" % mlp.score(X_test, y_test))\n print('coeffs shape', (mlp.coefs_[0]).shape)\n\n # ==================\n # display coefficients of hidden layer\n fig.add_subplot(652)\n plt.imshow(mlp.coefs_[0][:, 0].reshape(20, 20))\n plt.axis('off')\n\n gs = gridspec.GridSpec(6, 5)\n cur_img_idx = 5\n\n # use global min / max to ensure all weights are shown on the same scale\n vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()\n for coef, ax in zip(mlp.coefs_[0].T, range(hidden_layer_size)):\n fig.add_subplot(gs[cur_img_idx])\n plt.imshow(coef.reshape(20, 20), cmap=plt.cm.gray, vmin=.5 * vmin, vmax=.5 * vmax)\n plt.axis('off')\n cur_img_idx += 1\n\n plt.show()", "def main():\n parser = argparse.ArgumentParser(description='Implementation of the Naive Bayes and Perceptron classifiers')\n parser.add_argument('--statsmode', help='whether to gather stats or not', choices=['y','Y','N','n'], default='n')\n parser.add_argument('--classifier', help='classifier to use', choices=['BAYES', 'PERCEPTRON'], required=True)\n parser.add_argument('--mode', help='image class to test', choices=['VALIDATION', 'TEST'], default='TEST')\n parser.add_argument('--type', help='image type to train', choices=['DIGIT', 'FACE', 'MNIST'], required=True)\n parser.add_argument('--range', metavar=('START', 'END_EXCLUSIVE'), nargs=2, type=int, help='Range of data to test', default=[0, 100])\n parser.add_argument('--trainpercent', metavar='PERCENT', type=int, help='the percent of training data to use (int out of 100)', default=100, dest='percentage')\n parser.add_argument('--smoothing', type=int, help='Laplace smoothing constant (Naive Bayes)', default=2)\n parser.add_argument('--iterations', type=int, help='Number of times to iterate over training data (Perceptron)', default=5)\n parser.add_argument('--debug', help='Outputs more detailed information to stdout', action='store_true')\n parser.add_argument('--statloops', type=int, help='Number of times the classifier iterates over test data (Statistics only)', default=5)\n args = parser.parse_args()\n # image_type = ImageType.DIGIT if args.type == 'DIGIT' else ImageType.FACE\n image_type = None\n if args.type == 'DIGIT':\n image_type = ImageType.DIGIT\n elif args.type == 'FACE':\n image_type = ImageType.FACE\n else:\n image_type = ImageType.MNIST\n mode = Mode.TEST if args.mode == 'TEST' else Mode.VALIDATION\n if args.statsmode == 'y' or args.statsmode == 'Y':\n run_percentages_classifier(args.classifier, image_type, args)\n else:\n run = run_classifier_bayes if args.classifier == 'BAYES' else run_classifier_perceptron\n run(mode, image_type, args)", "def load_mnist(path='./', test_size=0.3, random_state = 123):\n \n np.random.seed(random_state)\n if 'X_train.npy' not in os.listdir(path=path) or 'y_train.npy' not in os.listdir(path=path):\n print (\"Train dataset not found. Downloading...\")\n os.system(\"curl -L -o train.zip {}\".format(TRAIN_DATA_LINK))\n os.system(\"unzip train.zip\")\n os.system(\"tar -xf trainingSet.tar.gz\")\n images = []\n labels = []\n for class_name in os.listdir('./trainingSet'):\n if 'ipynb' not in class_name and '.DS' not in class_name:\n for image_name in os.listdir('./trainingSet/{}'.format(class_name)):\n image = imread('./trainingSet/{}/{}'.format(class_name, image_name))\n images.append(image)\n labels.append(int(class_name))\n X_train = np.array(images)\n y_train = np.array(labels)\n\n permutation = np.random.permutation(X_train.shape[0])\n X_train = X_train[permutation]\n y_train = y_train[permutation]\n\n with open('X_train.npy', 'wb') as f:\n np.save(f, X_train)\n with open('y_train.npy', 'wb') as f:\n np.save(f, y_train)\n os.system(\"rm -rf trainingSet\")\n os.system(\"rm -rf train.zip\")\n os.system(\"rm -rf trainingSet.tar.gz\")\n else:\n X_train = np.load('X_train.npy')\n y_train = np.load('y_train.npy')\n\n if 'X_test.npy' not in os.listdir(path=path) or 'y_test.npy' not in os.listdir(path=path):\n print (\"Test dataset not found. Downloading...\")\n os.system(\"curl -L -o test.zip {}\".format(TEST_DATA_LINK))\n os.system(\"unzip test.zip\")\n os.system(\"tar -xf trainingSample.tar.gz\")\n images = []\n labels = []\n for class_name in os.listdir('./trainingSample'):\n if 'ipynb' not in class_name and '.DS' not in class_name:\n for image_name in os.listdir('./trainingSample/{}'.format(class_name)):\n image = imread('./trainingSample/{}/{}'.format(class_name, image_name))\n images.append(image)\n labels.append(int(class_name))\n X_test = np.array(images)\n y_test = np.array(labels)\n with open('X_test.npy', 'wb') as f:\n np.save(f, X_test)\n with open('y_test.npy', 'wb') as f:\n np.save(f, y_test)\n\n os.system(\"rm -rf trainingSample\")\n os.system(\"rm -rf test.zip\")\n os.system(\"rm -rf trainingSet.tar.gz\")\n\n else:\n X_test = np.load('X_test.npy')\n y_test = np.load('y_test.npy')\n\n return X_train, X_test, y_train, y_test", "def train_and_test(resume_training=False, tensorboard_debug=False, cli_debug=False):\r\n if tensorboard_debug:\r\n # Open tf debug session connected to tensor board, this only really works well on linux\r\n k.set_session(TensorBoardDebugWrapperSession(tf.Session(), '127.0.0.1:6064'))\r\n elif cli_debug:\r\n # Open tf debug session with local cli, run manually via ssh\r\n k.set_session(LocalCLIDebugWrapperSession(tf.Session()))\r\n\r\n if resume_training:\r\n checkpoint_dir = latest_checkpoint(\"colorizer\")\r\n print(f\"Latest checkpoint: {checkpoint_dir}\")\r\n model = load_model(str(checkpoint_dir)) if checkpoint_dir is not None else None\r\n else:\r\n model = None\r\n\r\n # Initialize image generators\r\n data_generator = ImageDataGenerator(validation_split=0.3)\r\n\r\n train_generator = BinnedImageGenerator(\r\n str(Config.data_folder),\r\n data_generator,\r\n target_size=(256, 256),\r\n batch_size=Config.batch_size,\r\n shuffle=True,\r\n subset=\"training\")\r\n\r\n test_generator = BinnedImageGenerator(\r\n str(Config.data_folder),\r\n data_generator,\r\n target_size=(256, 256),\r\n batch_size=Config.batch_size,\r\n subset=\"validation\")\r\n\r\n # Start training\r\n train_model(train_generator, test_generator, model)", "def train_model():\n return model.fit(train_images, train_labels, epochs=10, validation_data=(test_images, test_labels), shuffle='True')", "def simple_example():\n from sklearn.datasets import load_digits\n from sklearn.model_selection import train_test_split\n from sklearn.metrics import classification_report, accuracy_score\n\n utils.fix_random_seeds()\n\n digits = load_digits()\n X = digits.data\n y = digits.target\n\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.33, random_state=42)\n\n mod = TorchShallowNeuralClassifier()\n\n print(mod)\n\n mod.fit(X_train, y_train)\n preds = mod.predict(X_test)\n\n print(\"\\nClassification report:\")\n\n print(classification_report(y_test, preds))\n\n return accuracy_score(y_test, preds)", "def train_naive(): # add arguments as needed\n pass", "def test_intent_classifier_train(self):\n pass", "def neural_network(X, Y, Xs_test, Ys_test):\n ## YOUR CODE HERE\n #################\n return 0", "def main(opts):\n\n # Create a dataloader for the training images\n train_dataloader, _ = get_emoji_loader(opts.emoji, opts)\n\n # Create checkpoint and sample directories\n utils.create_dir(opts.checkpoint_dir)\n utils.create_dir(opts.sample_dir)\n\n train(train_dataloader, opts)", "def train_mnist():\r\n # type: () -> None\r\n\r\n # Build dataset and model\r\n dataset = MNIST_TRAIN(path=Config.video_folder)\r\n model = LSAMNIST(input_shape=dataset.shape, code_length=64,\r\n cpd_channels=100).to(device).train()\r\n\r\n # Set up result helper and perform test\r\n helper = OneClassResultHelper(dataset, model,\r\n checkpoints_dir=Config.model_ckpt,\r\n output_file='mnist.txt')\r\n helper.train_one_class_classification()", "def train():\n rng = random.PRNGKey(0)\n\n # Get Zachary's karate club graph dataset.\n node_feats, node_labels, sources, targets = get_karate_club_data()\n\n # Create model and optimizer.\n _, initial_params = GNN.init(\n rng, node_x=node_feats, edge_x=None, sources=sources, targets=targets)\n model = nn.Model(GNN, initial_params)\n optimizer = optim.Adam(learning_rate=0.01).create(model)\n\n # Train for 20 iterations.\n for iteration in range(20):\n optimizer, loss = train_step(optimizer, node_feats, sources, targets)\n\n accuracy = eval_step( # Model is stored in `optimizer.target`.\n optimizer.target, node_feats, sources, targets, node_labels)\n\n print('iteration: %d, loss: %.4f, accuracy: %.2f'\n % (iteration+1, loss, accuracy * 100))", "def test_three():\n header, data = read_data(\"increment-3-bit.csv\", \",\")\n nn = NeuralNetwork([3, 6, 3])\n training = convert_data_to_pairs(data, header)\n print(nn.forward_propagate([1,0,1]))\n for epoch in range(5000):\n nn.back_propagation_learning(training)\n\n test_3_bit(nn, training)", "def main():\n test_runner = TestRunner(\n FLAGS.workspace, FLAGS.bench_home, imagenet_dir=FLAGS.train_data_dir)\n test_runner.run_tests(FLAGS.test_list.split(','))", "def mnist_training():\n mndata = MNIST(MNIST_PATH)\n train_ims, train_labels = mndata.load_training()\n train_X = np.array(train_ims).T\n train_y = np.array(train_labels).T\n return train_X, train_y", "def run_training():\n # Tell TensorFlow that the model will be built into the default Graph.\n with tf.Graph().as_default():\n # Generate placeholders for the images and labels.\n state_placeholder, action_placeholder = placeholder_inputs()\n game = input.Input(6, 1)\n\n def multilayer_perceptron(_X, _weights, _biases):\n layer_1 = tf.nn.relu(tf.add(tf.matmul(_X, _weights['h1']), _biases['b1'])) \n layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1, _weights['h2']), _biases['b2']))\n return tf.matmul(layer_2, _weights['out']) + _biases['out']\n # Store layers weight & bias\n weights = { \n 'h1': tf.Variable(tf.random_normal([75, 75])),\n 'h2': tf.Variable(tf.random_normal([75, 40])),\n 'out': tf.Variable(tf.random_normal([40, 3]))\n }\n biases = { \n 'b1': tf.Variable(tf.random_normal([75])),\n 'b2': tf.Variable(tf.random_normal([40])),\n 'out': tf.Variable(tf.random_normal([3]))\n }\n\n # Construct model\n pred = multilayer_perceptron(state_placeholder, weights, biases)\n\n # Build the summary operation based on the TF collection of Summaries.\n summary_op = tf.merge_all_summaries()\n\n # Add the variable initializer Op.\n init = tf.initialize_all_variables()\n\n # Create a saver for writing training checkpoints.\n saver = tf.train.Saver(tf.all_variables())\n\n # Create a session for running Ops on the Graph.\n sess = tf.Session()\n\n # And then after everything is built:\n\n # Run the Op to initialize the variables.\n sess.run(init)\n\n saver.restore(sess, \"data/model_2.ckpt\")\n epsilon = 1\n\n print('Start Training...')\n # Start the training loop.\n for step in xrange(EPOCHS):\n turns = 0\n game.restart()\n status = 1\n while status == 1:\n state = game.grid()\n qval = sess.run(pred, feed_dict = {state_placeholder : state.reshape(1, 75)})\n\n action = (np.argmax(qval))\n game.move(action)\n new_state = game.grid()\n reward = game.reward()\n turns += 1\n if turns % 1000 == 0:\n print(turns)\n if reward < -1 or turns >= 10000: \n status = 0\n print(turns, game.total_score)", "def mnist_testing(shuffled = True):\n mndata = MNIST(MNIST_PATH)\n test_ims, test_labels = mndata.load_testing()\n test_X = np.array(test_ims).T\n test_y = np.array(test_labels).T\n return test_X, test_y", "def test_network(self):\n train_accuracy = 100 - percentError(map(self.neural_result,\n self.train_inputs),\n self.train_outputs)\n print 'Train accuracy:', train_accuracy\n\n test_accuracy = 100 - percentError(map(self.neural_result,\n self.test_inputs),\n self.test_outputs)\n print 'Test accuracy:', test_accuracy\n\n print '#' * int(train_accuracy), 'TR'\n print '#' * int(test_accuracy), 'TE'", "def main():\n data = pd.read_csv('./house-votes-84.data', header = None)\n\n class_names = [\"republican\", \"democrat\"]\n\n print(\"\\n-- Train and Test with Winnow --\\n\")\n train_and_test_with_winnow(data, class_names)\n\n print(\"\\n-- Train and Test with Naive Bayes --\\n\")\n train_and_test_with_naive_bayes(data, class_names)", "def leea_experiment():\n layer_1_hidden_nodes = 80 ## Starting small so my computer can keep up with the ram requirements of LEEA :)\n\n (train_dataset, train_labels), (valid_dataset, valid_labels), (test_dataset, test_labels) = get_mnist()\n\n ## Copy pasted, I'm so sorry.\n graph = tf.Graph()\n with graph.as_default():\n ## Data variables.\n tf_train_dataset = tf.placeholder(tf.float32,\n shape=(Params.SAMPLE_COUNT, image_size * image_size))\n tf_train_labels = tf.placeholder(tf.float32, shape=(Params.SAMPLE_COUNT, num_labels))\n tf_valid_dataset = tf.constant(valid_dataset)\n tf_test_dataset = tf.constant(test_dataset)\n\n ## Weights describing single layer.\n weights1 = tf.Variable(\n tf.truncated_normal([image_size * image_size, layer_1_hidden_nodes])\n )\n biases1 = tf.Variable(tf.zeros([layer_1_hidden_nodes]))\n weights2 = tf.Variable(\n tf.truncated_normal([layer_1_hidden_nodes, num_labels])\n )\n biases2 = tf.Variable(tf.zeros([num_labels]))\n\n ## Training variables.\n lay1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)\n logits = tf.matmul(lay1_train, weights2) + biases2\n loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=tf_train_labels)\n )\n\n optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)\n\n train_prediction = tf.nn.softmax(logits)\n lay1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1)\n valid_prediction = tf.nn.softmax(tf.matmul(lay1_valid, weights2) + biases2)\n lay1_test = tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1)\n test_prediction = tf.nn.softmax(tf.matmul(lay1_test, weights2) + biases2)\n\n with tf.Session(graph=graph) as session:\n op = tf.variables_initializer(tf.trainable_variables())\n session.run(op)\n\n evaluator = Evaluator(session, loss, train_prediction) ## Take in the loss as a function (rather than TF operation).\n evolver = Evolver(tf.trainable_variables(), evaluator)\n\n for gen in range(Params.MAX_GENERATIONS):\n print(\"Generation: \", gen)\n offset = (gen * Params.SAMPLE_COUNT) % (train_labels.shape[0] - Params.SAMPLE_COUNT)\n\n batch_data = train_dataset[offset:(offset + Params.SAMPLE_COUNT), :]\n batch_labels = train_labels[offset:(offset + Params.SAMPLE_COUNT), :]\n\n evolver.doGeneration({tf_train_dataset: batch_data, tf_train_labels: batch_labels})\n\n best = evolver.getBest()\n\n evolver.restore_variables(evolver.variables,\n session,\n *evolver.unflatten_tensors(best.weights, evolver.variables))\n\n print(\"Minimum achieved loss: %f\" % (-1 * best.fitness))\n print(\"Validation accuracy: %.1f%%\" % accuracy(valid_prediction.eval(), valid_labels))\n print(\"Test accuracy: %f%%\" % accuracy(test_prediction.eval(), test_labels))", "def train_cnn(\n model,\n dataset,\n iterations=10,\n lr=0.001,\n batch_size=64,\n device='cpu',\n save_fn=\"mnist-cnn\",\n load_path=\"./models/saved_models/mnist-cnn.h5\"\n ):\n\n model.train()\n\n if load_path:\n if os.path.isfile(load_path):\n model.load_state_dict(torch.load(load_path, map_location=device))\n model.eval()\n return\n else:\n raise ValueError(\"invalid load path specified for classifier.\")\n\n # Initialize the device which to run the model on\n device = torch.device(device)\n\n # specify loss function\n criterion = nn.CrossEntropyLoss().to(device)\n\n # Setup the loss and optimizer\n optimizer = torch.optim.SGD(model.parameters(), lr=lr)\n\n for j in range(iterations):\n for step, (b_inputs, b_targets) in enumerate(dataset.train_loader):\n\n output = model.forward(b_inputs.to(device))\n\n optimizer.zero_grad()\n\n loss = criterion(output, b_targets.to(device))\n\n loss.backward()\n optimizer.step()\n\n if step % 100 == 0:\n print(\"loss after step {}:{} accuracy: {}\".format(\n step, loss, get_accuracy(output, b_targets)))\n\n print(\"done with iteration: {}/{}\".format(j, iterations))\n\n if save_fn:\n torch.save(model.state_dict(),\n './models/saved_models/' + save_fn + \".h5\")\n\n print('Done training.')\n return", "def setUp(self):\n # The short NSC used in this example\n self.net_nsc = [\n (1, 4, 0, 0, 0), # Layer 1: Identity(input)\n (2, 1, 1, 1, 0), # Layer 2: Convolution(Layer1)\n (3, 1, 3, 2, 0), # Layer 3: Convolution(Layer2)\n (4, 1, 1, 1, 0), # Layer 4: Convolution(Layer1)\n (5, 1, 5, 4, 0), # Layer 5: Convolution(Layer4)\n (6, 6, 0, 3, 5), # Layer 6: Concat(Layer3, Layer5)\n (7, 2, 3, 1, 0), # Layer 7: MaxPooling(Layer1)\n (8, 1, 1, 7, 0), # Layer 8: Convolution(Layer7)\n (9, 6, 0, 6, 8), # Layer 9: Concat(Layer6, Layer8)\n (10, 7, 0, 0, 0), # Layer 10: Terminal\n ]\n\n # Load training and eval data\n (train_data, train_labels), (eval_data, eval_labels) = \\\n tf.keras.datasets.mnist.load_data()\n\n # Fix the dataset\n self.train_data = normalize_dataset(dataset=train_data, baseline=255)\n self.train_labels = train_labels.astype(np.int32)\n\n self.eval_data = normalize_dataset(dataset=eval_data, baseline=255)\n self.eval_labels = eval_labels.astype(np.int32)\n\n # The batch size\n self.batch_size = 256\n\n # Workspace directory\n workspace_dir = \"./workspace\"\n self.training_dir = \"{workspace}/trainer_test\".format(\n workspace=workspace_dir\n )", "def run_neural_network(mode, arg_placeholders, arg_data, arg_hyperparams, arg_paths_extensions, **kwargs):\n\n\tif verbose: print('model_tensorflow.run_neural_network() called')\n\n\t# Placeholders\n\tx, y = arg_placeholders['x'], arg_placeholders['y'] \n\tkeep_prob = arg_placeholders['keep_prob']\n\t# Data\n\tx_trn, y_trn, x_vld, y_vld = (arg_data['x_trn'], arg_data['y_trn'], \n\t\t\t\t\t\t\t\t arg_data['x_vld'], arg_data['y_vld'])\n\tx_tst, y_tst = arg_data['x_tst'], arg_data['y_tst']\n\t# Hyperparameters\n\tuse_stored_weights, user_model = (arg_hyperparams['use_stored_weights'], \n\t\t\t\t\t\t\t\t\t arg_hyperparams['user_model'])\n\tlayer_sizes, val_perc, mini_batch_size, epochs, seed = (arg_hyperparams['layer_sizes'], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\targ_hyperparams['val_perc'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\targ_hyperparams['mini_batch_size'], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\targ_hyperparams['epochs'], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\targ_hyperparams['seed'])\n\tlrn_rate, kp = arg_hyperparams['lrn_rate'], arg_hyperparams['kp']\n\t# Paths and extensions \n\tstore_path, out_ext, fv_ext = (arg_paths_extensions['store_path'], \n\t\t\t\t\t\t\t\t arg_paths_extensions['out_ext'], \n\t\t\t\t\t\t\t\t arg_paths_extensions['fv_ext'])\n\t# Weights\n\tweights_biases = {}\n\tif mode == trn or mode == tst:\n\t\tweights_biases = create_neural_network(mode, layer_sizes, use_stored_weights, store_path)\n\telif mode == app:\n\t\tweights_biases = kwargs['weights_biases']\n#\tprint('(1) initial weights W1:')\n#\tprint('W1', sess.run(weights_biases['weights']['W1'][0]))\n\n\t# Logits (linear output from the network's output layer), softmaxes, accuracy\n\tlogits = evaluate_neural_network(x, keep_prob, len(layer_sizes) - 1, seed,\n\t\t\t\t\t\t\t\t\t\t weights_biases['weights'], weights_biases['biases'])\n\tsoftm = tf.nn.softmax(logits)\n\tpred_class = tf.argmax(softm)\n\tcorrect = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))\n\taccuracy = tf.reduce_mean(tf.cast(correct, 'float'))\n\n\tif mode == trn or mode == tst:\t\t\n\t\tif mode == trn:\n\t\t\t# Declare cost and optimizer here: optimizer has global variables that must be initialised (see below)\n\t\t\tcost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))\n\t\t\toptimizer = tf.train.AdamOptimizer(learning_rate=lrn_rate).minimize(cost)\n\n\t\t# Initialise all global variables that have not been initialised yet (e.g., variables for Adam). See \n\t\t# https://stackoverflow.com/questions/35164529/in-tensorflow-is-there-any-way-to-just-initialize-uninitialised-variables\n\t\t# (answer by Salvador Dali) \n\t\tglobal_vars = tf.global_variables()\n\t\tis_not_initialized = sess.run([tf.is_variable_initialized(var) for var in global_vars])\n\t\tnot_initialized_vars = [v for (v, f) in zip(global_vars, is_not_initialized) if not f]\n\t\tif verbose: print('uninitialised variables:', [str(i.name) for i in not_initialized_vars])\n\t\tif len(not_initialized_vars):\n\t\t\tsess.run(tf.variables_initializer(not_initialized_vars))\n\t\tif verbose: print('uninitialised variables:', sess.run(tf.report_uninitialized_variables()))\n\n\t\tsaver = tf.train.Saver()\n\n\t# Save weights and model output (softmaxes)\n\tif mode == trn:\n\t\tif val_perc != 0:\n\t\t\t# Logits (linear output from the network's output layer), softmaxes, accuracy\n\t\t\tlogits_vld = evaluate_neural_network(x, keep_prob, len(layer_sizes) - 1, seed,\n\t\t\t\t\t\t\t\t\t\t\t\t\t weights_biases['weights'], weights_biases['biases'])\n\t\t\tsoftm_vld = tf.nn.softmax(logits_vld)\n\t\t\tpred_class_vld = tf.argmax(softm_vld)\n\t\t\tcorrect_vld = tf.equal(tf.argmax(logits_vld, 1), tf.argmax(y, 1))\n\t\t\taccuracy_vld = tf.reduce_mean(tf.cast(correct_vld, 'float'))\n#\t\tprint('(2) weights W1 before training (should be the same as (1))')\n#\t\tprint('W1', sess.run(weights_biases['weights']['W1'][0]))\n\n\t\ttotal_cost = []\n\t\taccs_trn = []\n\t\taccs_vld = []\n\t\tbest_acc = 0.0\t\t\n\t\tfor epoch in range(epochs): # one epoch is one fwd-bwd propagation over the complete dataset\n\t\t\tepoch_loss = 0\n\t\t\tfor _ in range(int(len(x_trn)/mini_batch_size)):\n\t\t\t\tepoch_x, epoch_y = x_trn, y_trn\n\t\t\t\t_, c, acc_trn, sm_trn = sess.run([optimizer, cost, accuracy, softm], \n\t\t\t\t\t\t\t\t\t\t\t\t feed_dict = {x: epoch_x, y: epoch_y, keep_prob: kp})\n\t\t\t\tepoch_loss += c\n\n\t\t\t\tif check_accuracies and (epoch == 10 or epoch == 20):\n\t\t\t\t\tprint('Accuracy check (trn)')\n\t\t\t\t\tprint('acc_trn :', acc_trn)\n\t\t\t\t\tcheck_accuracy(epoch_x, epoch_y, sm_trn)\n#\t\t\tprint('(3) updated weights W1 after one training epoch (should be different from (2))')\n#\t\t\tprint('W1', sess.run(weights_biases['weights']['W1'][0]))\n\n\t\t\t# In case of mini-batch gradient descent, accumulate the results from the mini batches\n\t\t\t# acc_trn = ...\n\t\t\t# sm_trn_comb = ...\n\t\t\t# sm_trn = sm_trn_comb \n\n\t\t\tprint('epoch', str(epoch) + '/' + str(epochs), 'completed: loss =', epoch_loss, 'acc =', acc_trn)\n\n\t\t\t# Non-user model (model selection) case: save weights and softmaxes for the current epoch \n\t\t\t# if its acc_vld is the highest so far. Check acc_vld every tenth epoch\n\t\t\tif not user_model and epoch % 10 == 0:\n\t\t\t\ttotal_cost.append(epoch_loss)\n\t\t\t\taccs_trn.append(acc_trn)\n\t\t\t\tif val_perc != 0:\n\t\t\t\t\tif arg_hyperparams['ismir_2018']:\n\t\t\t\t\t\t# This is incorrect: sess.run() should not be run again (see loop over the mini \n\t\t\t\t\t\t# batches) on accuracy and softm, which are for calculating trn results, but on \n\t\t\t\t\t\t# accuracy_vld and softm_vld. Rerunning leads to unwanted changes in tensor calculations\n\t\t\t\t\t\t# NB: for the ISMIR paper, sm_vld is not calculated\n\t\t\t\t\t\tacc_vld, sm_vld = sess.run([accuracy, softm],\n\t\t\t\t\t\t\t\t\t\t\t \t\tfeed_dict={x: x_vld, y: y_vld, keep_prob: 1.0})\n\t\t\t\t\telse:\n\t\t\t\t\t\tacc_vld, sm_vld = sess.run([accuracy_vld, softm_vld],\n\t\t\t\t\t\t\t\t\t\t\t \t\tfeed_dict={x: x_vld, y: y_vld, keep_prob: 1.0})\n\t\t\t\t\taccs_vld.append(acc_vld)\n\n\t\t\t\t\tif check_accuracies and (epoch == 10 or epoch == 20):\n\t\t\t\t\t\tprint('Accuracy check (vld)')\n\t\t\t\t\t\tprint('acc_vld :', acc_vld)\n\t\t\t\t\t\tcheck_accuracy(x_vld, y_vld, sm_vld)\n\n\t\t\t\t\tif acc_vld > best_acc:\n\t\t\t\t\t\tbest_acc = acc_vld\n\t\t\t\t\t\t# Save weights\n\t\t\t\t\t\tsave_path = saver.save(sess, store_path + 'weights/' + 'trained.ckpt')\n\t\t\t\t\t\t# Save softmaxes (trn and vld)\n\t\t\t\t\t\tif arg_hyperparams['ismir_2018']:\n\t\t\t\t\t\t\t# This is incorrect: sess.run() should not be run again (see loop over the mini \n\t\t\t\t\t\t\t# batches) on softm. Rerunning leads to unwanted changes in tensor calculations \n\t\t\t\t\t\t\tsoftmaxes_trn = sess.run([softm, pred_class], \n\t\t\t\t\t\t\t\t\t\t\t\t\t feed_dict={x: x_trn, keep_prob: kp})[0]\n\t\t\t\t\t\t\tnp.savetxt(store_path + out_ext, softmaxes_trn, delimiter=',')\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tnp.savetxt(store_path + out_ext, sm_trn, delimiter=',')\n\t\t\t\t\t\tnp.savetxt(store_path + out_ext.replace('trn', 'vld'), sm_vld, delimiter=',')\n\t\t\t\t\t\t# Save best epoch\n\t\t\t\t\t\twith open(store_path + 'best_epoch.txt', 'w') as text_file:\n\t\t\t\t\t\t\ttext_file.write('highest accuracy on the validation set (' + \n\t\t\t\t\t\t\t\t\t\t\tstr(best_acc) + ') in epoch ' + str(epoch))\n\t\t\t\t\t\tnp.savetxt(store_path + 'best_epoch.csv', [[int(epoch), acc_vld]], delimiter=',')\n\n\t\t# User model case: save weights and softmaxes for the final epoch \n\t\tif user_model:\n\t\t\tsave_path = saver.save(sess, store_path + 'weights/' + 'trained.ckpt')\n\t\t\tnp.savetxt(store_path + out_ext, sm_trn, delimiter=',')\n\n\t\t# Plot the trn and vld accuracy\n\t\tif plot_or_not:\n\t\t\tplt.plot(np.squeeze(accs_trn))\n\t\t\tplt.plot(np.squeeze(accs_vld))\n\t\t\tplt.ylabel('acc')\n\t\t\tplt.xlabel('epochs (/10)')\n\t\t\tax = plt.subplot(111)\n\t\t\tax.set_prop_cycle('color', ['red', 'green'])\n#\t\t\tplt.gca().set_prop_cycle(['red', 'green'])\n\t\t\tplt.title('accuracy on training and validation set')\n\t\t\tplt.legend(['trn', 'vld'], loc='lower right')\n\t\t\tplt.savefig(store_path + 'trn_and_vld_acc.png')\n\n\t# Save model output (softmaxes)\n\tif mode == tst:\n\t\tacc_tst, sm_tst = sess.run([accuracy, softm], feed_dict={x: x_tst, y: y_tst, keep_prob: kp})\n\t\tnp.savetxt(store_path + out_ext, sm_tst, delimiter=',')\n\t\tif check_accuracies:\n\t\t\tprint('Accuracy check (tst)')\n\t\t\tprint('acc_tst :', acc_tst)\n\t\t\tcheck_accuracy(x_tst, y_tst, sm_tst)\n\n\t# Save or return model output (softmaxes)\n\tif mode == app:\n\t\tload_and_save_features = False\n\t\t# Get features and reshape to get required shape (1, number of features)\n\t\tx_app = (genfromtxt(store_path + fv_ext, delimiter=',') if load_and_save_features else \n\t\t\t\t np.array(kwargs['feature_vector']))\n\t\tx_app = x_app.reshape(1, -1)\n\t\tsm_app = sess.run(softm, feed_dict={x: x_app, keep_prob: kp})\n\t\tif load_and_save_features:\n\t\t\tnp.savetxt(store_path + out_ext, sm_app, delimiter=',')\n\t\telse:\n\t\t\treturn sm_app[0]", "def train_network(self):\n batch = self.memory.sample(self.batch_size)\n inputs = np.array([b[\"state\"] for b in batch]) #####\n actions = np.array([b[\"action\"] for b in batch])\n rewards = np.array([b[\"reward\"] for b in batch])\n next_inputs = np.array([b[\"next_state\"] for b in batch])\n\n actions_one_hot = np.eye(self.action_space_size)[actions]\n\n next_qvalues = np.squeeze(self.target_network.model(next_inputs))\n targets = rewards + self.discount * np.amax(next_qvalues, axis=-1)\n\n self.online_network.train_step(inputs, targets, actions_one_hot)", "def test_user_hidden_layers_input_acceptances():\n inputs_that_should_work = [[[\"linear\", 33]], [[\"linear\", 12]], [[\"gru\", 2]], [[\"lstm\", 2]], [[\"lstm\", 1]],\n [[\"gru\", 330]], [[\"gru\", 33], [\"linear\", 2]] ]\n for input in inputs_that_should_work:\n assert RNN(input_dim=1, layers_info=input, hidden_activations=\"relu\",\n output_activation=\"relu\")", "def train(train_dataset: torch.utils.data.Dataset, test_dataset: torch.utils.data.Dataset,\n training_config: dict = train_config, global_config: dict = global_config):\n\n for path in global_config.values():\n create_dirs(path)\n\n # wrap datasets with Dataloader classes\n train_loader = torch.utils.data.DataLoader(train_dataset,\n **training_config[\"DATA_LOADER_CONFIG\"])\n test_loader = torch.utils.data.DataLoader(test_dataset,\n **training_config[\"DATA_LOADER_CONFIG\"])\n\n # model name & paths\n name = \"_\".join([train_config[\"DATE\"], train_config[\"SESSION_NAME\"]])\n modelpath = os.path.join(global_config[\"WEIGHT_DIR\"], name)\n\n # instantiate model\n model = training_config[\"MODEL\"](**training_config[\"MODEL_CONFIG\"])\n\n optimizer = training_config[\"OPTIMIZER\"](model.parameters(),\n **training_config[\"OPTIMIZER_CONFIG\"])\n\n # set up ignite engine\n training_config[\"METRICS\"].update({\"loss\" : Loss(training_config[\"LOSS\"])})\n trainer = create_supervised_trainer(model=model, optimizer=optimizer,\n loss_fn=training_config[\"LOSS\"],\n device=training_config[\"DEVICE\"])\n evaluator = create_supervised_evaluator(model,\n metrics=training_config[\"METRICS\"],\n device=training_config[\"DEVICE\"])\n\n\n # tensorboardX setup\n log_dir = os.path.join(global_config[\"LOG_DIR\"], \"tensorboardx\", name)\n create_dirs(log_dir)\n writer = SummaryWriter(logdir=log_dir)\n\n # log using the logging tool\n logger = log.Log(training_config, run_name=train_config['SESSION_NAME'])\n\n @trainer.on(Events.ITERATION_COMPLETED)\n def log_training(engine):\n iteration = (engine.state.iteration - 1) % len(train_loader) + 1\n writer.add_scalar(\"training/loss\", engine.state.output, engine.state.iteration)\n if iteration % 4 == 0:\n print(\"\\repoch[{}] iteration[{}/{}] loss: {:.2f} \".format(engine.state.epoch,\n iteration, len(train_loader),\n engine.state.output), end=\"\")\n\n # generic evaluation function\n def evaluate(engine, loader):\n evaluator.run(loader)\n metrics = evaluator.state.metrics\n return metrics\n\n # training data metrics\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_training_results(engine):\n print(\"\\ntraining results - epoch {}\".format(engine.state.epoch))\n metrics = evaluate(engine, train_loader)\n print(metrics)\n for key, value in metrics.items():\n logger.log_metric(key, value)\n writer.add_scalar(\"training/avg_{}\".format(key), value, engine.state.epoch)\n\n # test data metrics\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_validation_results(engine):\n print(\"test results - epoch {}\".format(engine.state.epoch))\n metrics = evaluate(engine, test_loader)\n print(metrics)\n for key, value in metrics.items():\n writer.add_scalar(\"validation/avg_{}\".format(key), value, engine.state.epoch)\n\n # model checkpointing\n @trainer.on(Events.EPOCH_COMPLETED)\n def model_checkpoint(engine):\n torch.save(model.state_dict(), modelpath + \".pth\")\n print(\"Checkpoint saved to {}\".format(modelpath + \".pth\"))\n\n # training iteration\n try:\n trainer.run(train_loader, max_epochs=training_config[\"EPOCHS\"])\n except KeyboardInterrupt:\n torch.save(model.state_dict(), modelpath + \".pth\")\n print(\"Model saved to {}\".format(modelpath + \".pth\"))\n raise KeyboardInterrupt\n\n # write weights\n torch.save(model.state_dict(), modelpath + \".pth\")\n\n # write csv log file\n log_content = training_config.copy()\n evaluator.run(test_loader)\n log_content[\"VAL_METRICS\"] = evaluator.state.metrics\n log_path = os.path.join(global_config[\"LOG_DIR\"], training_config[\"LOGFILE\"])\n write_log(log_path, log_content)\n\n logger.end_run()\n \n return evaluator.state.metrics[\"training/avg_loss\"]", "def train():\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n tf.set_random_seed(42)\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n # Parameters\n input_dim = 3 * 32 * 32\n activation_fn, dropout_rate, weight_initializer, weight_regularizer, n_classes, optimizer, batch_size, max_steps, \\\n log_dir, data_dir = _parse_flags(\n FLAGS)\n\n # dataset\n cifar10 = cifar10_utils.get_cifar10(data_dir=data_dir)\n\n # Session\n tf.reset_default_graph()\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.99, allow_growth=True)\n session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\n # Placeholders for images, labels input.\n X = tf.placeholder(dtype=tf.float32, shape=[None, input_dim], name='inputs')\n y = tf.placeholder(dtype=tf.int32, shape=[None, n_classes], name='labels')\n\n # init network\n net = MLP(n_hidden=dnn_hidden_units, n_classes=n_classes, is_training=True,\n activation_fn=activation_fn, dropout_rate=dropout_rate,\n weight_initializer=weight_initializer,\n weight_regularizer=weight_regularizer)\n\n # Trainings ops\n global_step = tf.Variable(0, trainable=False, name='global_step')\n logits_op = net.inference(X)\n train_flags = {'optimizer': optimizer, 'global_step': global_step, 'grad_clipping': FLAGS.grad_clipping}\n loss_op = net.loss(logits_op, y)\n accuracy_op = net.accuracy(logits_op, y)\n train_op = net.train_step(loss_op, train_flags)\n confusion_matrix_op = net.confusion_matrix(logits=logits_op, labels=y)\n train_loss = train_accuracy = test_accuracy = test_loss = 0.\n\n # utility ops\n summary_op = tf.summary.merge_all()\n write_logs = FLAGS.log_dir is not None\n save_model = True\n\n if write_logs:\n train_log_path = os.path.join(log_dir, '{}_train'.format(FLAGS.model_name))\n test_log_path = os.path.join(log_dir, '{}_test'.format(FLAGS.model_name))\n _ensure_path_exists(train_log_path)\n _ensure_path_exists(test_log_path)\n train_log_writer = tf.summary.FileWriter('{}_train/'.format(train_log_path), graph=session.graph)\n test_log_writer = tf.summary.FileWriter('{}_test/'.format(test_log_path), graph=session.graph)\n\n # Initialize variables\n init_op = tf.global_variables_initializer()\n local_init_op = tf.local_variables_initializer()\n session.run(fetches=[init_op, local_init_op])\n\n # track losses\n stats = defaultdict(list)\n\n # loop over steps\n for _step in range(FLAGS.max_steps):\n\n # get batch of data\n X_train, y_train = cifar10.train.next_batch(batch_size)\n X_train = np.reshape(X_train, (batch_size, -1))\n # feed to model\n train_feed = {X: X_train, y: y_train, net.training_mode: True}\n fetches = [train_op, loss_op, accuracy_op]\n\n # Training set\n if _step % 13 == 0 and write_logs: # write summary\n fetches += [summary_op]\n _, train_loss, train_accuracy, train_summary = session.run(fetches=fetches, feed_dict=train_feed)\n train_log_writer.add_summary(train_summary, _step)\n else:\n _, train_loss, train_accuracy = session.run(fetches=fetches, feed_dict=train_feed)\n\n if _step % 10 == 0:\n print('Ep.{}: train_loss:{:+.4f}, train_accuracy:{:+.4f}'.format(_step, train_loss, train_accuracy))\n stats = _update_stats(stats, train_loss=train_loss, train_accuracy=train_accuracy)\n\n # Sanity check\n if np.isnan(train_loss):\n print('Warning: training loss is NaN.. ')\n break\n\n # Test set evaluation\n if (_step + 1) % 100 == 0:\n X_test, y_test = cifar10.test.images, cifar10.test.labels\n X_test = np.reshape(X_test, [X_test.shape[0], -1])\n test_feed = {X: X_test, y: y_test, net.training_mode: False}\n test_loss, test_accuracy, test_logits, test_confusion_matrix, test_summary = session.run(\n fetches=[loss_op, accuracy_op, logits_op,\n confusion_matrix_op, summary_op],\n feed_dict=test_feed)\n\n if write_logs:\n test_log_writer.add_summary(test_summary, _step)\n\n stats = _update_stats(stats, test_loss=test_loss, test_accuracy=test_accuracy,\n test_confusion_matrix=test_confusion_matrix)\n print('==> Ep.{}: test_loss:{:+.4f}, test_accuracy:{:+.4f}'.format(_step, test_loss, test_accuracy))\n print('==> Confusion Matrix on test set \\n {} \\n'.format(test_confusion_matrix))\n\n if _step > 1000 and test_accuracy < 0.25: # hopeless trials\n save_model = False\n break\n\n # Early stopping: if the last test accuracy is not above the mean of prev 10 epochs, stop\n delta = 1e-4 # accuracy is in decimals\n if _step > 1000:\n window = stats['test_accuracy'][-10:]\n window_accuracy = sum(window) / len(window)\n\n if abs(test_accuracy - window_accuracy) < delta:\n print(\n '\\n==> EARLY STOPPING with accuracy {} and moving-window mean accuracy {} \\n'.format(test_accuracy,\n window_accuracy))\n\n # save model\n if write_logs:\n train_log_writer.close()\n test_log_writer.close()\n\n if save_model:\n save_dir = os.path.join(FLAGS.save_path, FLAGS.model_name)\n saver = tf.train.Saver()\n _ensure_path_exists(save_dir)\n saver.save(session, save_path=os.path.join(save_dir, 'model.ckpt'))\n\n # save results for easy plotting\n results_dir = os.path.relpath('./results')\n _ensure_path_exists(results_dir)\n with open(os.path.join(results_dir, '{}.pkl'.format(FLAGS.model_name)), 'wb') as f:\n pickle.dump(stats, f)\n\n\n #######################\n # END OF YOUR CODE #\n #######################", "def test_net(model, val_loader=None, thresh=0.05):\n\n for iter, data in enumerate(val_loader):\n\n # one batch = data for one image\n image = data['image']\n target = data['label']\n wgt = data['wgt']\n rois = data['rois']\n gt_boxes = data['gt_boxes']\n gt_class_list = data['gt_classes']\n\n #TODO: perform forward pass, compute cls_probs\n\n\n # TODO: Iterate over each class (follow comments)\n for class_num in range(20): \n # get valid rois and cls_scores based on thresh\n \n # use NMS to get boxes and scores\n \n\n #TODO: visualize bounding box predictions when required\n #TODO: Calculate mAP on test set", "def train(self, epochs=5):\n x_train, y_train, x_test, y_test = self._load_data()\n x_train = tf.keras.utils.normalize(x_train, axis=1) # Scale between 0-1\n x_test = tf.keras.utils.normalize(x_test, axis=1)\n\n model = tf.keras.models.Sequential()\n # 28 x 28 (digits dimensions) -> flat 784\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))\n model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))\n # neurons -> number of classification\n model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))\n\n dtnow = datetime.now().strftime(\"%Y-%m-%dT%H:%M\")\n tb_logs = self._artifact_repo.artifact_path(self._TENSORBOARD_LOGS)\n tensorboard = tf.keras.callbacks.TensorBoard(log_dir='{}/{}'.format(tb_logs, dtnow))\n model.compile(\n optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy']\n )\n model.fit(x_train, y_train, epochs=int(epochs), validation_data=(x_test, y_test), callbacks=[tensorboard])\n\n # val_loss, val_acc = model.evaluate(x_test, y_test)\n\n # self._logger.info(\"Evaluation on test dataset: Loss: %s, Accuracy: %s\", val_loss, val_acc)\n\n path = self._artifact_repo.artifact_path(self._ARTIFACT_MODEL)\n model.save(path)", "def train_net(\n model, epoch_size, mnist_path, repeat_size, ckpoint_cb, sink_mode, callbacks\n):\n print(\"============== Starting Training ==============\")\n # load training dataset\n ds_train = create_dataset(os.path.join(mnist_path, \"train_lenet\"), 32, repeat_size)\n model.train_lenet(\n epoch_size,\n ds_train,\n callbacks=[ckpoint_cb, LossMonitor()] + callbacks,\n dataset_sink_mode=sink_mode,\n )", "def train(args):\r\n train_img = 'NEW DATA/TRAIN/'\r\n validation_img = 'NEW DATA/TEST/'\r\n \r\n nb_epoch = int(args.nb_epoch)\r\n nb_train_samples = get_nb_files(train_img)\r\n nb_classes = len(glob.glob(train_img + \"/*\"))\r\n # data prep\r\n train_datagen = ImageDataGenerator(\r\n rotation_range=40,\r\n width_shift_range=0.2,\r\n height_shift_range=0.2,\r\n rescale=1./255,\r\n shear_range=0.2,\r\n zoom_range=0.2,\r\n horizontal_flip=True,\r\n fill_mode='nearest')\r\n\r\n validation_datagen = ImageDataGenerator(\r\n rotation_range=40,\r\n width_shift_range=0.2,\r\n height_shift_range=0.2,\r\n rescale=1./255,\r\n shear_range=0.2,\r\n zoom_range=0.2,\r\n horizontal_flip=True,\r\n fill_mode='nearest')\r\n \r\n train_generator = train_datagen.flow_from_directory(\r\n\t\t\ttrain_img,\r\n\t\t\ttarget_size=(299, 299),\r\n\t\t\tbatch_size=10,\r\n\t\t\tclass_mode='categorical'\r\n\t\t\t)\r\n validation_generator = validation_datagen.flow_from_directory(\r\n\t\t\tvalidation_img,\r\n\t\t\ttarget_size=(299, 299),\r\n\t\t\tbatch_size=10,\r\n\t\t\tclass_mode='categorical'\r\n\t\t\t)\r\n if(K.image_dim_ordering() == 'th'):\r\n input_tensor = Input(shape=(3, 299, 299))\r\n else:\r\n input_tensor = Input(shape=(299, 299, 3))\r\n \r\n # setup model\r\n base_model = InceptionV3(input_tensor = input_tensor,weights='imagenet', include_top=False) #include_top=False excludes final FC layer\r\n model = add_new_last_layer(base_model, nb_classes)\r\n \r\n # transfer learning\r\n setup_to_transfer_learn(model, base_model)\r\n \r\n \r\n \r\n history_tl = model.fit_generator(train_generator,\r\n samples_per_epoch=200,\r\n nb_epoch=nb_epoch,\r\n validation_data=validation_generator,\r\n nb_val_samples=64) \r\n model.save(args.output_model_file)\r\n if args.plot:\r\n plot_training(history_tl)", "def train_and_test_with_naive_bayes(data, class_names):\n # Train data\n class_normalized_data = normalize_data(data, class_names[0])\n class_training_table = util.get_training_table(class_normalized_data, 0, get_training_index())\n class_model = nb.train(class_training_table[0], class_training_table[1])\n\n # Get Class Test Data\n class_index = 0\n class_test_feature_table = util.get_test_table(class_normalized_data, class_index, get_test_index())[0]\n class_test_classes = util.get_test_table(class_normalized_data, class_index, get_test_index())[1]\n\n original_indices = get_test_index()\n # Go through each line of test data and compare results.\n for index in range(len(class_test_classes)):\n class_features = util.get_test_features(class_test_feature_table, index)\n result_class = nb.get_classification(class_features, class_model) \n expected_class = class_test_classes[index]\n matched = result_class == expected_class\n util.print_test_result(original_indices[index], matched, [result_class], expected_class, class_names)", "def main() -> None:\n pl.seed_everything(\n seed=1,\n workers=True,\n )\n\n config = read_config(path=Path(\"configs/blobs.yml\"))\n\n datamodule = BlobsDataModule(config=config)\n model = BlobsClassifierModel(config=config)\n\n trainer = get_blobs_trainer_with_callbacks(config=config)\n\n trainer.fit(\n model=model,\n datamodule=datamodule,\n )\n print(\"Best checkpoint path:\", trainer.checkpoint_callback.best_model_path)\n\n trainer.test(\n model=model,\n datamodule=datamodule,\n ckpt_path=\"best\",\n verbose=True,\n )\n\n predictions_probabilities = trainer.predict(\n model=model,\n datamodule=datamodule,\n return_predictions=True,\n ckpt_path=\"best\",\n )\n print(predictions_probabilities)", "def test_cnn_metrics(self):\n metrics = ['accuracy', 'mae']\n model = modelgen.generate_CNN_model((None, 20, 3), 2, [32, 32],\n 100, metrics=metrics)\n assert model.metrics == metrics", "def train(args):\n\n nb_classes = 2\n neg_class_label = '0'\n pos_class_label = '1'\n nb_train_samples = get_nb_files(args.train_dir)\n nb_val_samples = get_nb_files(args.val_dir)\n batch_size = int(args.batch_size)\n\n nb_train_samples_neg = len([name for name in os.listdir(os.path.join(args.train_dir, neg_class_label))])\n nb_train_samples_pos = len([name for name in os.listdir(os.path.join(args.train_dir, pos_class_label))])\n print(\"Number of neg training examples is \", nb_train_samples_neg)\n print(\"Number of pos training examples is \", nb_train_samples_pos)\n\n\n # data prep\n train_datagen = ImageDataGenerator()\n test_datagen = ImageDataGenerator()\n\n train_generator = train_datagen.flow_from_directory(args.train_dir,\n target_size=(IM_WIDTH, IM_HEIGHT),\n batch_size=batch_size,\n class_mode='categorical',\n shuffle=True)\n\n validation_generator = test_datagen.flow_from_directory(args.val_dir,\n target_size=(IM_WIDTH, IM_HEIGHT),\n batch_size=batch_size,\n class_mode='categorical',\n shuffle=False)\n val_labels = validation_generator.classes\n # validation_labels = to_categorical(val_labels, num_classes=2)\n\n\n # setting up class weights for imbalanced dataset ...\n # The sum of the weights of all examples stays the same.\n\n\n total = nb_train_samples_neg + nb_train_samples_pos\n print('Examples:\\n Total: {}\\n Positive: {} ({:.2f}% of total)\\n'.format(total, nb_train_samples_pos, 100 * nb_train_samples_pos/total))\n\n\n weight_for_0 = total / nb_train_samples_neg\n weight_for_1 = total / nb_train_samples_pos\n\n class_weight = {0: weight_for_0, 1: weight_for_1}\n\n print('Weight for class 0: {:.2f}'.format(weight_for_0))\n print('Weight for class 1: {:.2f}'.format(weight_for_1))\n\n # setup model\n # model = keras.models.load_model('./experiment/vgg_ft_ni10.model')\n base_model = applications.Xception(weights='imagenet', include_top=False, input_shape=(300, 300, 3))\n print(\"Number of vgg layers :\", len(base_model.layers))\n\n # adding fully connected layer\n model = add_new_last_layer(base_model, nb_classes)\n\n # transfer learning...\n setup_to_transfer_learn(model, base_model)\n\n for i, layer in enumerate(model.layers):\n print(i, layer.name)\n print(layer.trainable)\n\n # checkpoint = ModelCheckpoint(\"ft_vgg16.h5\",\n # monitor=RocCallback(validation_generator), # not working with custom callbacks\n # verbose=1,\n # save_best_only=True,\n # save_weights_only=False,\n # mode='auto',\n # period=1)\n\n print('Transfer Learning is starting...')\n # model.save_weights('') # Please input weights if present\n history_tl = model.fit_generator(train_generator,\n nb_epoch=NB_EPOCHS_TL,\n samples_per_epoch=nb_train_samples,\n validation_data=validation_generator,\n nb_val_samples=nb_val_samples,\n class_weight=class_weight,\n callbacks=[RocCallback(validation_generator)])\n\n # fine-tuning ...\n setup_to_finetune(model)\n for i, layer in enumerate(model.layers):\n print(i, layer.name)\n print(layer.trainable)\n # model.load_weights('./experiment/vgg_ft_ni25.h5')\n # model.summary()\n\n print('Fine tuning is starting...')\n history_ft = model.fit_generator(train_generator,\n samples_per_epoch=nb_train_samples,\n nb_epoch=NB_EPOCHS_FT,\n validation_data=validation_generator,\n nb_val_samples=nb_val_samples,\n class_weight=class_weight,\n callbacks=[RocCallback(validation_generator)])\n\n # making predictions ...3\n model.save_weights('./experiment/vgg_ft_ni25.h5')\n # model.save('./experiment/vgg_ft_ni10.model')\n pred = model.predict_generator(validation_generator, steps=nb_val_samples//batch_size)\n\n # confusion_matrix(val_labels, pred)\n\n # F1Score ...\n predictions = np.argmax(pred, axis=1)\n\n\n # Storing Predictions as CSV ...\n filenames = validation_generator.filenames\n labels = validation_generator.class_indices\n labels = dict((v, k) for k, v in labels.items())\n predictions = [labels[k] for k in predictions]\n results = pd.DataFrame({\"Filename\": filenames,\n \"true\" : pred[:, 1],\n \"Predictions\": predictions,\n \"Label\": val_labels})\n # results.to_csv(\"./experiment/ft_ni_results15.csv\", index=False)\n # metric(\"./experiment/ft_ni_results15.csv\")\n\n # plotting data...\n if args.plot:\n plot_training(pred, val_labels)\n\n input_path = args.val_dir + pos_class_label\n output_path = './models/vgg/epoch_{}/'.format(NB_EPOCHS_TL+NB_EPOCHS_FT)\n\n\n # visualising activation maps of positive class ...\n for img in validation_generator.filenames:\n visualize_class_activation_map(model, input_path + img, output_path + img)", "def naiveBayes(x_train, x_test, y_train):\n gnb = GaussianNB()\n y_pred = gnb.fit(x_train, y_train).predict(x_test)\n return y_pred", "def load_mnist(fashion, onehot_encode=True, flatten_x=False, crop_x=0, classes=None):\n\tif not fashion:\n\t\t(x_train, y_train),(x_test, y_test) = tf.keras.datasets.mnist.load_data()\n\t\tx_train, x_test = x_train / 255.0, x_test / 255.0\n\telse:\n\t\t(x_train, y_train),(x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()\n\t\tx_train, x_test = x_train / 255.0, x_test / 255.0 \n \n\tdef crop(X, crop_size):\n\t\tassert crop_x < X.shape[1]/2\n\t\tassert crop_x < X.shape[2]/2\n\t\treturn X[:,crop_size:-crop_size,crop_size:-crop_size]\n\n\tif crop_x > 0:\n\t\tx_train = crop(x_train, crop_x)\n\t\tx_test = crop(x_test, crop_x)\n\n\t# Flatten to 2d arrays (each example 1d)\n\tdef flatten_image(X):\n\t return X.reshape(X.shape[0], X.shape[1]*X.shape[1])\n\tif flatten_x:\n\t\tx_train = flatten_image(x_train)\n\t\tx_test = flatten_image(x_test)\n\n\tif onehot_encode:\n\t\ty_train = onehot_encode_labels(y_train)\n\t\ty_test = onehot_encode_labels(y_test)\n\n\tif classes is not None:\n\t\tassert len(classes) == 2\n\t\tc0, c1 = classes\n\t\ttrain_idxs_to_keep = np.logical_or(y_train==c0, y_train==c1)\n\t\tx_train, y_train = x_train[train_idxs_to_keep,:], y_train[train_idxs_to_keep]\n\t\ttest_idxs_to_keep = np.logical_or(y_test==c0, y_test==c1)\n\t\tx_test, y_test = x_test[test_idxs_to_keep,:], y_test[test_idxs_to_keep]\n\n\t\ty_train = (y_train==c1).astype(int)[:,np.newaxis]\n\t\ty_test = (y_test==c1).astype(int)[:,np.newaxis]\n\n\treturn x_train, y_train, x_test, y_test", "def test_net(network, network_model, data_path):\n print(\"============== Starting Testing ==============\")\n # load the saved model for evaluation\n param_dict = load_checkpoint(\"checkpoint_lenet-1_1875.ckpt\")\n # load parameter to the network\n load_param_into_net(network, param_dict)\n # load testing dataset\n ds_eval = create_dataset(os.path.join(data_path, \"test\"))\n acc = network_model.eval(ds_eval, dataset_sink_mode=False)\n print(\"============== Accuracy:{} ==============\".format(acc))", "def setUp(self):\n # The short NSC used in this example\n self.net_nsc = [\n (1, 4, 0, 0, 0), # Layer 1: Identity(input)\n (2, 1, 1, 1, 0), # Layer 2: Convolution(Layer1)\n (3, 1, 3, 2, 0), # Layer 3: Convolution(Layer2)\n (4, 1, 1, 1, 0), # Layer 4: Convolution(Layer1)\n (5, 1, 5, 4, 0), # Layer 5: Convolution(Layer4)\n (6, 6, 0, 3, 5), # Layer 6: Concat(Layer3, Layer5)\n (7, 2, 3, 1, 0), # Layer 7: MaxPooling(Layer1)\n (8, 1, 1, 7, 0), # Layer 8: Convolution(Layer7)\n (9, 6, 0, 6, 8), # Layer 9: Concat(Layer6, Layer8)\n (10, 7, 0, 0, 0), # Layer 10: Terminal\n ]\n\n # Load training and eval data\n (train_data, train_labels), (eval_data, eval_labels) = \\\n tf.keras.datasets.mnist.load_data()\n\n # Fix the dataset\n self.train_data = normalize_dataset(dataset=train_data, baseline=255)\n self.train_labels = train_labels.astype(np.int32)\n\n self.eval_data = normalize_dataset(dataset=eval_data, baseline=255)\n self.eval_labels = eval_labels.astype(np.int32)\n\n # The batch size\n self.batch_size = 256\n\n # Workspace directory\n workspace_dir = \"./workspace\"\n self.training_dir = \"{workspace}/trainer_test_earlystop\".format(\n workspace=workspace_dir\n )" ]
[ "0.70206416", "0.6779854", "0.67381644", "0.6688426", "0.6619917", "0.66178626", "0.66168207", "0.65870345", "0.6558734", "0.6551392", "0.654411", "0.6518047", "0.6504657", "0.64782375", "0.64767015", "0.64680374", "0.6459553", "0.63857526", "0.63578683", "0.6350918", "0.6332328", "0.63176304", "0.62694967", "0.62664455", "0.62474203", "0.6240036", "0.623251", "0.62201905", "0.6207552", "0.62071264", "0.616219", "0.6149963", "0.6149755", "0.6148037", "0.61341333", "0.6130879", "0.6125021", "0.6119588", "0.611856", "0.6109548", "0.6101878", "0.609416", "0.6083016", "0.608285", "0.60803556", "0.607559", "0.6063013", "0.6043894", "0.6013155", "0.60112554", "0.5997679", "0.5996054", "0.5987015", "0.5975344", "0.597477", "0.5970435", "0.5967754", "0.5967715", "0.59611773", "0.5959008", "0.59526885", "0.59507656", "0.59497714", "0.5945692", "0.59275854", "0.5926593", "0.5919731", "0.5913318", "0.59010524", "0.58900344", "0.5880597", "0.5878744", "0.5863679", "0.58634293", "0.58537775", "0.5851286", "0.58429676", "0.5841951", "0.58418363", "0.5836029", "0.58343625", "0.58301926", "0.5830026", "0.5827624", "0.58271194", "0.5820378", "0.5818854", "0.58185875", "0.58166564", "0.58097124", "0.58077556", "0.5803313", "0.5802734", "0.5798123", "0.57882965", "0.5785612", "0.5784038", "0.5782685", "0.5780895", "0.5780077" ]
0.5869477
72
Return a mock component of a general model.
def mock_component(): component = Mock() component.free_parameters = flex.double([1.0]) component.free_parameter_esds = None component.n_params = 1 component.var_cov_matrix = sparse.matrix(1, 1) return component
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_model(self) -> None:\n get_model()", "def real_model(request):\n return request.config.option.real_model", "def model(self) -> Type[Model]:", "def model_name(self) -> str:\n return \"mock-model-name\"", "def test_get_model_method(self):\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n model = None\n try:\n model = model_manager.get_model(qualified_name=\"qualified_name\")\n except Exception as e:\n exception_raised = True\n\n # assert\n self.assertFalse(exception_raised)\n self.assertTrue(type(model) is MLModelMock)", "def test_get(self):\n self.assertEqual(self.expected_described_model, self.mapped_model.get(\"described_model_type\"))", "def testGetReigsteredModel(self):\n from soc.models.student import Student\n model = models_logic.getModel('soc.models.student.Student')\n self.assertEqual(model, Student)", "def get_model(model=gin.REQUIRED):\n return model", "def model() -> Model:\n return Model()", "def test_get_item(self):\n self.assertEqual(self.expected_described_model, self.mapped_model[\"described_model_type\"])", "def get_model():\n return UNISAL", "def get_main_model(self):\n return self", "def test_coupledmodels_get(self):\n pass", "def get_response_model_ctor(self):\n return self._response_model_ctor", "def test_get_object(self, detail_view, employee_model):\n\n employee = Mock()\n employee_model.objects.get.return_value = Mock()\n detail_view.get_object.return_value = employee\n\n emp = detail_view.get_object(1)\n self.assertEqual(employee, emp)", "def model(self) -> Model:\n return self.software_system.get_model()", "def _get_card_model(self, model: str) -> Any:\n return self.collection.models.byName(model)", "def create_model(self):\n self.skipTest(\"Base module should not be tested.\")", "def get_model(self):\n\t\treturn self.object.__class__", "def MakeModel(self):\n pass", "def get_model(*args):\n return Model()", "def test_get_model_component(requests_mock):\n from DarktraceMBs import Client, get_model_component_command\n\n # GIVEN an integration is configured and you would like to find similar devices\n mock_api_response = util_load_json('test_data/component.json')\n requests_mock.get('https://mock.darktrace.com/components?cid=254503',\n json=mock_api_response)\n\n client = Client(\n base_url='https://mock.darktrace.com',\n verify=False,\n auth=('examplepub', 'examplepri')\n )\n\n # WHEN the specified device id is 1 and there are 2 results max desired\n args = {\n 'cid': '254503'\n }\n\n # THEN the context will be updated and information about similar devices will be fetched and pulled\n integration_response = get_model_component_command(client, args)\n expected_response = util_load_json('test_data/formatted_component.json')\n\n assert integration_response.outputs == expected_response\n assert integration_response.outputs_prefix == 'Darktrace.Model.Component'", "def modelClass(self):\n raise NotImplementedError", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def test_magic_happens(self):\n class TestObjectFactory(base.Factory):\n class Meta:\n model = TestObject\n\n self.assertEqual(TestObject, TestObjectFactory._meta.model)\n obj = TestObjectFactory.build()\n self.assertFalse(hasattr(obj, '_meta'))", "def _default_make_sa_model(model):\n name = model._meta.object_name + \".__aldjemy__\"\n return type(name, (), {\"__module__\": model.__module__})", "def get_base_model(self) -> torch.nn.Module:\n pass", "def test_get_model_method_with_missing_model(self):\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n exception_message = \"\"\n model = None\n try:\n model = model_manager.get_model(qualified_name=\"asdf\")\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"Instance of model 'asdf' not found in ModelManager.\")", "def mock_object(cls, profile=None):\n mo = ManagedObject()\n if profile:\n mo.profile = Profile.get_by_name(profile)\n mo.is_mock = True\n return mo", "def setUp(self):\n self.mock_model = Mock()", "def get_model(self):\n return self.chain.model", "def get_model(self):\n raise NotImplementedError(\n \"You must provide a 'get_model' method for the '%r' index.\" % self\n )", "def test_get_base_polymorphic_model(self):\n # Finds the base from every level (including lowest)\n self.assertIs(get_base_polymorphic_model(Model2D), Model2A)\n self.assertIs(get_base_polymorphic_model(Model2C), Model2A)\n self.assertIs(get_base_polymorphic_model(Model2B), Model2A)\n self.assertIs(get_base_polymorphic_model(Model2A), Model2A)\n\n # Properly handles multiple inheritance\n self.assertIs(get_base_polymorphic_model(Enhance_Inherit), Enhance_Base)\n\n # Ignores PolymorphicModel itself.\n self.assertIs(get_base_polymorphic_model(PolymorphicModel), None)", "def model(self) -> 'outputs.ModelDefinitionResponse':\n return pulumi.get(self, \"model\")", "def _getModel(self):\r\n \r\n return self._model", "def __init__(self, model: Type[ModelType]):\n self.model = model", "def __init__(self, model: Type[ModelType]):\n self.model = model", "def initialize_model(self):\n model = self.model_class()\n return model", "def test_model(base, fake_session):\n\n # Make a dummy model\n\n # these fields should be ignored and should not appear in the model\n ignored = (\"field1\", \"field2\", \"field3\")\n\n # these fields are in the model, but should not get dumped to json\n loadonly = (\"field6\", \"field7\")\n\n @add_schema\n class MyModel(base):\n fields = dict(ignore=ignored, load_only=loadonly)\n\n # load the model from dummy data\n values = range(10)\n keys = [\"field{}\".format(x) for x in values]\n data = dict(zip(keys, values))\n m = MyModel.load_from(data, fake_session)\n\n return m, ignored, loadonly, data, MyModel", "def pywemo_model_fixture():\n return \"LightSwitch\"", "def get_model(params):\r\n module_name, class_name = params.model.name.rsplit('.', 1)\r\n i = importlib.import_module(module_name)\r\n return getattr(i, class_name)", "def get_model(name):\n\n name_to_fun = {'audio': audio_model, 'video': video_model, 'both': combined_model}\n\n if name in name_to_fun:\n model = name_to_fun[name]\n else:\n raise ValueError('Requested name [{}] not a valid model'.format(name))\n\n def wrapper(*args, **kwargs):\n return recurrent_model(model(*args), **kwargs)\n\n return wrapper", "def get_model(model_name):\n module_name = 'strain.models.strain_' + model_name.lower()\n model_module = importlib.import_module(module_name)\n obj = getattr(model_module, model_name)\n return obj", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def test_constructor(self):\n # Record the model types of all the models to be created\n all_model_types = model_type_to_display_name.keys()\n\n # Record the attribute / value pairs that are common to all models.\n common_attr_value_dict = {\"data\": self.fake_df,\n \"name_spec\": self.fake_names,\n \"design\": self.fake_design,\n \"ind_var_names\": self.fake_names[\"x\"],\n \"alt_id_col\": self.alt_id_col,\n \"obs_id_col\": self.obs_id_col,\n \"choice_col\": self.choice_col,\n \"specification\": self.fake_specification,\n \"alt_IDs\": self.fake_df[\"alt_id\"].values,\n \"choices\": self.fake_df[\"choice\"].values}\n\n # Create a shape name dictionary to relate the various models to the\n # names of their shape parameters.\n shape_name_dict = {\"MNL\": None,\n \"Asym\": self.fake_shape_names[:2],\n \"Cloglog\": None,\n \"Scobit\": self.fake_shape_names,\n \"Uneven\": self.fake_shape_names,\n \"Nested Logit\": None,\n \"Mixed Logit\": None}\n\n # Create a shape reference position dictionary to relate the various\n # models to their shape reference positions.\n shape_ref_dict = {}\n for key in shape_name_dict:\n shape_ref_dict[key] = (None if key != \"Asym\" else\n self.fake_shape_ref_pos)\n\n # Create an intercept_names and intercept_ref_position dictionary to\n # relate the various models to their respective kwargs.\n intercept_names_dict = {}\n intercept_ref_dict = {}\n for key in shape_name_dict:\n if key in [\"MNL\", \"Nested Logit\", \"Mixed Logit\"]:\n intercept_names_dict[key] = None\n intercept_ref_dict[key] = None\n else:\n intercept_names_dict[key] = self.fake_intercept_names\n intercept_ref_dict[key] = self.fake_intercept_ref_pos\n\n # Create a nest_names dictionary to relate the various models to their\n # nest_name attributes\n nest_name_dict = {}\n nest_spec_dict = {}\n for key in shape_name_dict:\n if key != \"Nested Logit\":\n nest_name_dict[key] = None\n nest_spec_dict[key] = None\n else:\n nest_name_dict[key] = list(self.fake_nest_spec.keys())\n nest_spec_dict[key] = self.fake_nest_spec\n\n # Create dictionaries for the mixing_id_col, mixing_vars, and\n # mixing_pos attributes\n mixing_id_col_dict = {}\n mixing_vars_dict = {}\n mixing_pos_dict = {}\n\n for key in shape_name_dict:\n if key != \"Mixed Logit\":\n mixing_id_col_dict[key] = None\n mixing_vars_dict[key] = None\n mixing_pos_dict[key] = None\n else:\n mixing_id_col_dict[key] = self.obs_id_col\n mixing_vars_dict[key] = self.fake_names[\"x\"]\n mixing_pos_dict[key] = [0]\n\n # Record the attribute / value pairs that vary across models\n varying_attr_value_dict = {\"model_type\": model_type_to_display_name,\n \"intercept_names\": intercept_names_dict,\n \"intercept_ref_position\":\n intercept_ref_dict,\n \"shape_names\": shape_name_dict,\n \"shape_ref_position\": shape_ref_dict,\n \"nest_names\": nest_name_dict,\n \"nest_spec\": nest_spec_dict,\n \"mixing_id_col\": mixing_id_col_dict,\n \"mixing_vars\": mixing_vars_dict,\n \"mixing_pos\": mixing_pos_dict}\n\n # Set up the keyword arguments that are needed for each of the model\n # types\n variable_kwargs = {}\n for model_name in all_model_types:\n variable_kwargs[model_name] = {}\n variable_kwargs[model_name][\"intercept_names\"] =\\\n intercept_names_dict[model_name]\n variable_kwargs[model_name][\"intercept_ref_pos\"] =\\\n intercept_ref_dict[model_name]\n variable_kwargs[model_name][\"shape_ref_pos\"] =\\\n shape_ref_dict[model_name]\n variable_kwargs[model_name][\"shape_names\"] =\\\n shape_name_dict[model_name]\n variable_kwargs[model_name][\"nest_spec\"] =\\\n nest_spec_dict[model_name]\n variable_kwargs[model_name][\"mixing_id_col\"] =\\\n mixing_id_col_dict[model_name]\n variable_kwargs[model_name][\"mixing_vars\"] =\\\n mixing_vars_dict[model_name]\n\n # Execute the test for each model type\n for model_name in all_model_types:\n # Update the model type in the list of constructor args\n self.constructor_args[-1] = model_name\n\n # Use this specific model's keyword arguments\n self.constructor_kwargs.update(variable_kwargs[model_name])\n\n # Construct the model object\n model_obj = pylogit.create_choice_model(*self.constructor_args,\n **self.constructor_kwargs)\n\n # Make sure that the constructor has all of the required attributes\n for attr in common_attr_value_dict:\n value = common_attr_value_dict[attr]\n if isinstance(value, pd.DataFrame):\n self.assertTrue(value.equals(model_obj.data))\n elif isinstance(value, np.ndarray):\n npt.assert_allclose(value,\n model_obj.__getattribute__(attr))\n else:\n self.assertEqual(value,\n model_obj.__getattribute__(attr))\n\n for attr in varying_attr_value_dict:\n value = varying_attr_value_dict[attr][model_name]\n\n self.assertEqual(value,\n model_obj.__getattribute__(attr))\n\n return None", "def get_model(self) -> torch.nn.Module:\n\n check.check_not_none(self.model)\n return cast(torch.nn.Module, self.model)", "def getModelObj(self):\n model_psp = self.getModelPsp()\n\n if not model_psp:\n log_func.warning(u'Not define model in <%s : %s>' % (self.getName(), self.getType()))\n return None\n model_obj = self.getKernel().createByPsp(psp=model_psp)\n return model_obj", "def test_add_model_components(self):\n create_abstract_model(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )", "def test_add_model_components(self):\n create_abstract_model(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )", "def test_get_model_moderator(self, *mocks):\n moderator = get_model_moderator(Article)\n self.assertIsNotNone(moderator)", "def test_load_model_method(self):\n # arrange\n # instantiating the model manager class\n model_manager = ModelManager()\n\n # adding the model\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n model_object = None\n # accessing the MLModelMock model object\n try:\n model_object = model_manager.get_model(qualified_name=\"qualified_name\")\n except Exception as e:\n exception_raised = True\n print_tb(e)\n\n # assert\n self.assertFalse(exception_raised)\n self.assertTrue(model_object is not None)", "def model(self):\n return self.model_", "def get_ori_model(model: nn.Module) -> nn.Module:\n if is_model_wrapper(model):\n return model.module\n else:\n return model", "def mock_bestbuy():\n bestbuy = BestBuy()\n bestbuy.potato1 = 'batata baroa'\n bestbuy.potato2 = 'batata inglesa'\n bestbuy.potato_number = 666\n return bestbuy", "def model_wrapper(cls):\n return _create_wrapper_cls(cls, reset_mutation_uid=True, stop_parsing=False)", "def get_fake_model(fields=None, model_base=PostgresModel, meta_options={}):\n\n model = define_fake_model(fields, model_base, meta_options)\n\n class TestProject:\n def clone(self, *_args, **_kwargs):\n return self\n\n @property\n def apps(self):\n return self\n\n class TestMigration(migrations.Migration):\n operations = [HStoreExtension()]\n\n with connection.schema_editor() as schema_editor:\n migration_executor = MigrationExecutor(schema_editor.connection)\n migration_executor.apply_migration(\n TestProject(), TestMigration(\"eh\", \"postgres_extra\")\n )\n\n schema_editor.create_model(model)\n\n return model", "def fake_model():\n app.config['FAKE_MODEL'] = True", "def model(self):\n return Product", "def get_model_reference(self, model_name):\n\n print_debug(\"Geting model :\" + model_name)\n model = ModelsFactory.get(model_name=model_name)\n return model", "def test_noarguments(self):\n self.assertEqual(BaseModel, type(BaseModel()))", "def _build_model(self):\n raise NotImplementedError()", "def test_test_client_model(self):\n pass", "def get_class(klass, kind):\n return getattr(sys.modules['model'], kind, None)", "def get_model():\n global model\n if model is None:\n model = AppModel()\n model.load_resources()\n return model", "def mock_data_manager(components):\n dm = Mock()\n dm.components = components\n dm.fixed_components = []\n return dm", "def test_model():\n pass", "def get_result_model(cls):\n raise NotImplementedError()", "def subject(\n decoy: Decoy,\n mock_engine_client: EngineClient,\n api_version: APIVersion,\n mock_sync_hardware_api: SyncHardwareAPI,\n) -> ProtocolCore:\n decoy.when(mock_engine_client.state.labware.get_fixed_trash_id()).then_return(\n \"fixed-trash-123\"\n )\n decoy.when(\n mock_engine_client.state.labware.get_definition(\"fixed-trash-123\")\n ).then_return(\n LabwareDefinition.construct(ordering=[[\"A1\"]]) # type: ignore[call-arg]\n )\n\n return ProtocolCore(\n engine_client=mock_engine_client,\n api_version=api_version,\n sync_hardware=mock_sync_hardware_api,\n )", "def get_model(self):\n return self._model", "def get_model(self):\n return self._model", "def get_non_wrapped_model(model: nn.Module) -> nn.Module:\n from torch.nn import DataParallel\n from torch.nn.parallel import DistributedDataParallel\n\n if not isinstance(model, nn.Module):\n raise RuntimeError(\"Input model must be a subclass of nn.Module.\")\n\n if isinstance(model, (DataParallel, DistributedDataParallel)):\n model = model.module\n\n return model", "def getModel(self):\n return self.model", "def model_class(self):\n return self.prop.composite_class", "def test_valid_model(self):\n model_cls = ModelContainer(APP_LABEL, TestModel2._meta.db_table).model_cls\n self.assertTrue(model_cls.__class__.__name__ is models.Model.__class__.__name__)", "def test_get_singular() -> None:\n mock_session = UnifiedAlchemyMagicMock()\n mock_session.add(Model(pk1=\"123\", name=\"test\"))\n user = mock_session.query(Model).get(\"123\")\n assert user is not None", "def __init__(self, model: object):\n self.model = model", "def build_model(self):\n raise NotImplementedError", "def get_model(self):\n return self.model.module if isinstance(self.model, DDP) else self.model", "def test_model_flow_node_model_flow_id_node_id_component_get(self):\n pass", "def get_model(*, name: str) -> typing.Optional[typing.Type]:\n return getattr(open_alchemy.models, name, None)", "def resolve(self, spec: \"ModelSpec\"):", "def _get_model(self, fl_ctx: FLContext):\n if isinstance(self.model, str):\n # treat it as model component ID\n model_component_id = self.model\n engine = fl_ctx.get_engine()\n self.model = engine.get_component(model_component_id)\n if not self.model:\n self.log_error(fl_ctx, f\"cannot find model component '{model_component_id}'\")\n return\n if self.model and isinstance(self.model, dict):\n # try building the model\n try:\n engine = fl_ctx.get_engine()\n # use provided or default optimizer arguments and add the model parameters\n if \"args\" not in self.model:\n self.model[\"args\"] = {}\n self.model = engine.build_component(self.model)\n except Exception as e:\n self.system_panic(\n f\"Exception while parsing `model`: \" f\"{self.model} with Exception {e}\",\n fl_ctx,\n )\n return\n if self.model and not isinstance(self.model, torch.nn.Module):\n self.system_panic(fl_ctx, f\"expect model to be torch.nn.Module but got {type(self.model)}: {self.model}\")\n return\n if self.model is None:\n self.system_panic(fl_ctx, f\"Model wasn't built correctly! It is {self.model}\")\n return\n self.log_info(fl_ctx, f\"Running model {self.model}\")", "def test_build_model(arguments):\n ...", "def entity():\n return Entity(\n u'Dummy', IDummy, 'icemac.addressbook.tests.test_entities.Dummy')", "def model(self):\n return self.__model", "def build_model(self):\n pass", "def build_model(self):\n pass" ]
[ "0.6227962", "0.61123717", "0.60828376", "0.60729104", "0.59207785", "0.58533525", "0.5756956", "0.5729371", "0.5705438", "0.5661981", "0.56480694", "0.55965275", "0.5589341", "0.55885005", "0.553621", "0.55356354", "0.5533403", "0.55187505", "0.55115426", "0.5495379", "0.5476795", "0.54682446", "0.5439469", "0.5429802", "0.5429802", "0.5429802", "0.5429802", "0.5429802", "0.5429802", "0.5429802", "0.5429802", "0.5429802", "0.5429802", "0.5396816", "0.5378857", "0.53721505", "0.53687066", "0.53545225", "0.5352151", "0.53397954", "0.53384", "0.5332573", "0.53307086", "0.5330229", "0.5329222", "0.5329222", "0.5321667", "0.5312478", "0.5312357", "0.53091466", "0.52926844", "0.52896047", "0.52705526", "0.52705526", "0.52705526", "0.52705526", "0.52705526", "0.52662826", "0.5262175", "0.5258191", "0.5255052", "0.5255052", "0.52545077", "0.52451235", "0.5239324", "0.5232564", "0.5230302", "0.5222266", "0.5222193", "0.5222081", "0.52177954", "0.52093303", "0.5207005", "0.5206166", "0.5202784", "0.52000844", "0.5192844", "0.51914465", "0.51842594", "0.5182707", "0.517658", "0.5175167", "0.5175167", "0.5167466", "0.51657736", "0.5159334", "0.5156495", "0.5153565", "0.5153187", "0.51385754", "0.513814", "0.513422", "0.51327586", "0.5131867", "0.5130601", "0.51299936", "0.5125022", "0.5124133", "0.5113761", "0.5113761" ]
0.63239694
0
Return a mock component of a general model.
def mock_scaling_component(n_refl): component = mock_component() component.calculate_scales.return_value = flex.double(n_refl, 1.0) component.n_refl = [n_refl] return component
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mock_component():\n component = Mock()\n component.free_parameters = flex.double([1.0])\n component.free_parameter_esds = None\n component.n_params = 1\n component.var_cov_matrix = sparse.matrix(1, 1)\n return component", "def test_get_model(self) -> None:\n get_model()", "def real_model(request):\n return request.config.option.real_model", "def model(self) -> Type[Model]:", "def model_name(self) -> str:\n return \"mock-model-name\"", "def test_get_model_method(self):\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n model = None\n try:\n model = model_manager.get_model(qualified_name=\"qualified_name\")\n except Exception as e:\n exception_raised = True\n\n # assert\n self.assertFalse(exception_raised)\n self.assertTrue(type(model) is MLModelMock)", "def test_get(self):\n self.assertEqual(self.expected_described_model, self.mapped_model.get(\"described_model_type\"))", "def testGetReigsteredModel(self):\n from soc.models.student import Student\n model = models_logic.getModel('soc.models.student.Student')\n self.assertEqual(model, Student)", "def get_model(model=gin.REQUIRED):\n return model", "def model() -> Model:\n return Model()", "def test_get_item(self):\n self.assertEqual(self.expected_described_model, self.mapped_model[\"described_model_type\"])", "def get_model():\n return UNISAL", "def get_main_model(self):\n return self", "def test_coupledmodels_get(self):\n pass", "def get_response_model_ctor(self):\n return self._response_model_ctor", "def test_get_object(self, detail_view, employee_model):\n\n employee = Mock()\n employee_model.objects.get.return_value = Mock()\n detail_view.get_object.return_value = employee\n\n emp = detail_view.get_object(1)\n self.assertEqual(employee, emp)", "def model(self) -> Model:\n return self.software_system.get_model()", "def _get_card_model(self, model: str) -> Any:\n return self.collection.models.byName(model)", "def create_model(self):\n self.skipTest(\"Base module should not be tested.\")", "def get_model(self):\n\t\treturn self.object.__class__", "def MakeModel(self):\n pass", "def get_model(*args):\n return Model()", "def test_get_model_component(requests_mock):\n from DarktraceMBs import Client, get_model_component_command\n\n # GIVEN an integration is configured and you would like to find similar devices\n mock_api_response = util_load_json('test_data/component.json')\n requests_mock.get('https://mock.darktrace.com/components?cid=254503',\n json=mock_api_response)\n\n client = Client(\n base_url='https://mock.darktrace.com',\n verify=False,\n auth=('examplepub', 'examplepri')\n )\n\n # WHEN the specified device id is 1 and there are 2 results max desired\n args = {\n 'cid': '254503'\n }\n\n # THEN the context will be updated and information about similar devices will be fetched and pulled\n integration_response = get_model_component_command(client, args)\n expected_response = util_load_json('test_data/formatted_component.json')\n\n assert integration_response.outputs == expected_response\n assert integration_response.outputs_prefix == 'Darktrace.Model.Component'", "def modelClass(self):\n raise NotImplementedError", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def test_magic_happens(self):\n class TestObjectFactory(base.Factory):\n class Meta:\n model = TestObject\n\n self.assertEqual(TestObject, TestObjectFactory._meta.model)\n obj = TestObjectFactory.build()\n self.assertFalse(hasattr(obj, '_meta'))", "def _default_make_sa_model(model):\n name = model._meta.object_name + \".__aldjemy__\"\n return type(name, (), {\"__module__\": model.__module__})", "def get_base_model(self) -> torch.nn.Module:\n pass", "def test_get_model_method_with_missing_model(self):\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n exception_message = \"\"\n model = None\n try:\n model = model_manager.get_model(qualified_name=\"asdf\")\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"Instance of model 'asdf' not found in ModelManager.\")", "def mock_object(cls, profile=None):\n mo = ManagedObject()\n if profile:\n mo.profile = Profile.get_by_name(profile)\n mo.is_mock = True\n return mo", "def setUp(self):\n self.mock_model = Mock()", "def get_model(self):\n return self.chain.model", "def get_model(self):\n raise NotImplementedError(\n \"You must provide a 'get_model' method for the '%r' index.\" % self\n )", "def test_get_base_polymorphic_model(self):\n # Finds the base from every level (including lowest)\n self.assertIs(get_base_polymorphic_model(Model2D), Model2A)\n self.assertIs(get_base_polymorphic_model(Model2C), Model2A)\n self.assertIs(get_base_polymorphic_model(Model2B), Model2A)\n self.assertIs(get_base_polymorphic_model(Model2A), Model2A)\n\n # Properly handles multiple inheritance\n self.assertIs(get_base_polymorphic_model(Enhance_Inherit), Enhance_Base)\n\n # Ignores PolymorphicModel itself.\n self.assertIs(get_base_polymorphic_model(PolymorphicModel), None)", "def model(self) -> 'outputs.ModelDefinitionResponse':\n return pulumi.get(self, \"model\")", "def _getModel(self):\r\n \r\n return self._model", "def __init__(self, model: Type[ModelType]):\n self.model = model", "def __init__(self, model: Type[ModelType]):\n self.model = model", "def initialize_model(self):\n model = self.model_class()\n return model", "def test_model(base, fake_session):\n\n # Make a dummy model\n\n # these fields should be ignored and should not appear in the model\n ignored = (\"field1\", \"field2\", \"field3\")\n\n # these fields are in the model, but should not get dumped to json\n loadonly = (\"field6\", \"field7\")\n\n @add_schema\n class MyModel(base):\n fields = dict(ignore=ignored, load_only=loadonly)\n\n # load the model from dummy data\n values = range(10)\n keys = [\"field{}\".format(x) for x in values]\n data = dict(zip(keys, values))\n m = MyModel.load_from(data, fake_session)\n\n return m, ignored, loadonly, data, MyModel", "def pywemo_model_fixture():\n return \"LightSwitch\"", "def get_model(params):\r\n module_name, class_name = params.model.name.rsplit('.', 1)\r\n i = importlib.import_module(module_name)\r\n return getattr(i, class_name)", "def get_model(name):\n\n name_to_fun = {'audio': audio_model, 'video': video_model, 'both': combined_model}\n\n if name in name_to_fun:\n model = name_to_fun[name]\n else:\n raise ValueError('Requested name [{}] not a valid model'.format(name))\n\n def wrapper(*args, **kwargs):\n return recurrent_model(model(*args), **kwargs)\n\n return wrapper", "def get_model(model_name):\n module_name = 'strain.models.strain_' + model_name.lower()\n model_module = importlib.import_module(module_name)\n obj = getattr(model_module, model_name)\n return obj", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def test_constructor(self):\n # Record the model types of all the models to be created\n all_model_types = model_type_to_display_name.keys()\n\n # Record the attribute / value pairs that are common to all models.\n common_attr_value_dict = {\"data\": self.fake_df,\n \"name_spec\": self.fake_names,\n \"design\": self.fake_design,\n \"ind_var_names\": self.fake_names[\"x\"],\n \"alt_id_col\": self.alt_id_col,\n \"obs_id_col\": self.obs_id_col,\n \"choice_col\": self.choice_col,\n \"specification\": self.fake_specification,\n \"alt_IDs\": self.fake_df[\"alt_id\"].values,\n \"choices\": self.fake_df[\"choice\"].values}\n\n # Create a shape name dictionary to relate the various models to the\n # names of their shape parameters.\n shape_name_dict = {\"MNL\": None,\n \"Asym\": self.fake_shape_names[:2],\n \"Cloglog\": None,\n \"Scobit\": self.fake_shape_names,\n \"Uneven\": self.fake_shape_names,\n \"Nested Logit\": None,\n \"Mixed Logit\": None}\n\n # Create a shape reference position dictionary to relate the various\n # models to their shape reference positions.\n shape_ref_dict = {}\n for key in shape_name_dict:\n shape_ref_dict[key] = (None if key != \"Asym\" else\n self.fake_shape_ref_pos)\n\n # Create an intercept_names and intercept_ref_position dictionary to\n # relate the various models to their respective kwargs.\n intercept_names_dict = {}\n intercept_ref_dict = {}\n for key in shape_name_dict:\n if key in [\"MNL\", \"Nested Logit\", \"Mixed Logit\"]:\n intercept_names_dict[key] = None\n intercept_ref_dict[key] = None\n else:\n intercept_names_dict[key] = self.fake_intercept_names\n intercept_ref_dict[key] = self.fake_intercept_ref_pos\n\n # Create a nest_names dictionary to relate the various models to their\n # nest_name attributes\n nest_name_dict = {}\n nest_spec_dict = {}\n for key in shape_name_dict:\n if key != \"Nested Logit\":\n nest_name_dict[key] = None\n nest_spec_dict[key] = None\n else:\n nest_name_dict[key] = list(self.fake_nest_spec.keys())\n nest_spec_dict[key] = self.fake_nest_spec\n\n # Create dictionaries for the mixing_id_col, mixing_vars, and\n # mixing_pos attributes\n mixing_id_col_dict = {}\n mixing_vars_dict = {}\n mixing_pos_dict = {}\n\n for key in shape_name_dict:\n if key != \"Mixed Logit\":\n mixing_id_col_dict[key] = None\n mixing_vars_dict[key] = None\n mixing_pos_dict[key] = None\n else:\n mixing_id_col_dict[key] = self.obs_id_col\n mixing_vars_dict[key] = self.fake_names[\"x\"]\n mixing_pos_dict[key] = [0]\n\n # Record the attribute / value pairs that vary across models\n varying_attr_value_dict = {\"model_type\": model_type_to_display_name,\n \"intercept_names\": intercept_names_dict,\n \"intercept_ref_position\":\n intercept_ref_dict,\n \"shape_names\": shape_name_dict,\n \"shape_ref_position\": shape_ref_dict,\n \"nest_names\": nest_name_dict,\n \"nest_spec\": nest_spec_dict,\n \"mixing_id_col\": mixing_id_col_dict,\n \"mixing_vars\": mixing_vars_dict,\n \"mixing_pos\": mixing_pos_dict}\n\n # Set up the keyword arguments that are needed for each of the model\n # types\n variable_kwargs = {}\n for model_name in all_model_types:\n variable_kwargs[model_name] = {}\n variable_kwargs[model_name][\"intercept_names\"] =\\\n intercept_names_dict[model_name]\n variable_kwargs[model_name][\"intercept_ref_pos\"] =\\\n intercept_ref_dict[model_name]\n variable_kwargs[model_name][\"shape_ref_pos\"] =\\\n shape_ref_dict[model_name]\n variable_kwargs[model_name][\"shape_names\"] =\\\n shape_name_dict[model_name]\n variable_kwargs[model_name][\"nest_spec\"] =\\\n nest_spec_dict[model_name]\n variable_kwargs[model_name][\"mixing_id_col\"] =\\\n mixing_id_col_dict[model_name]\n variable_kwargs[model_name][\"mixing_vars\"] =\\\n mixing_vars_dict[model_name]\n\n # Execute the test for each model type\n for model_name in all_model_types:\n # Update the model type in the list of constructor args\n self.constructor_args[-1] = model_name\n\n # Use this specific model's keyword arguments\n self.constructor_kwargs.update(variable_kwargs[model_name])\n\n # Construct the model object\n model_obj = pylogit.create_choice_model(*self.constructor_args,\n **self.constructor_kwargs)\n\n # Make sure that the constructor has all of the required attributes\n for attr in common_attr_value_dict:\n value = common_attr_value_dict[attr]\n if isinstance(value, pd.DataFrame):\n self.assertTrue(value.equals(model_obj.data))\n elif isinstance(value, np.ndarray):\n npt.assert_allclose(value,\n model_obj.__getattribute__(attr))\n else:\n self.assertEqual(value,\n model_obj.__getattribute__(attr))\n\n for attr in varying_attr_value_dict:\n value = varying_attr_value_dict[attr][model_name]\n\n self.assertEqual(value,\n model_obj.__getattribute__(attr))\n\n return None", "def get_model(self) -> torch.nn.Module:\n\n check.check_not_none(self.model)\n return cast(torch.nn.Module, self.model)", "def getModelObj(self):\n model_psp = self.getModelPsp()\n\n if not model_psp:\n log_func.warning(u'Not define model in <%s : %s>' % (self.getName(), self.getType()))\n return None\n model_obj = self.getKernel().createByPsp(psp=model_psp)\n return model_obj", "def test_add_model_components(self):\n create_abstract_model(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )", "def test_add_model_components(self):\n create_abstract_model(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )", "def test_get_model_moderator(self, *mocks):\n moderator = get_model_moderator(Article)\n self.assertIsNotNone(moderator)", "def test_load_model_method(self):\n # arrange\n # instantiating the model manager class\n model_manager = ModelManager()\n\n # adding the model\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n model_object = None\n # accessing the MLModelMock model object\n try:\n model_object = model_manager.get_model(qualified_name=\"qualified_name\")\n except Exception as e:\n exception_raised = True\n print_tb(e)\n\n # assert\n self.assertFalse(exception_raised)\n self.assertTrue(model_object is not None)", "def model(self):\n return self.model_", "def get_ori_model(model: nn.Module) -> nn.Module:\n if is_model_wrapper(model):\n return model.module\n else:\n return model", "def mock_bestbuy():\n bestbuy = BestBuy()\n bestbuy.potato1 = 'batata baroa'\n bestbuy.potato2 = 'batata inglesa'\n bestbuy.potato_number = 666\n return bestbuy", "def model_wrapper(cls):\n return _create_wrapper_cls(cls, reset_mutation_uid=True, stop_parsing=False)", "def get_fake_model(fields=None, model_base=PostgresModel, meta_options={}):\n\n model = define_fake_model(fields, model_base, meta_options)\n\n class TestProject:\n def clone(self, *_args, **_kwargs):\n return self\n\n @property\n def apps(self):\n return self\n\n class TestMigration(migrations.Migration):\n operations = [HStoreExtension()]\n\n with connection.schema_editor() as schema_editor:\n migration_executor = MigrationExecutor(schema_editor.connection)\n migration_executor.apply_migration(\n TestProject(), TestMigration(\"eh\", \"postgres_extra\")\n )\n\n schema_editor.create_model(model)\n\n return model", "def fake_model():\n app.config['FAKE_MODEL'] = True", "def model(self):\n return Product", "def get_model_reference(self, model_name):\n\n print_debug(\"Geting model :\" + model_name)\n model = ModelsFactory.get(model_name=model_name)\n return model", "def test_noarguments(self):\n self.assertEqual(BaseModel, type(BaseModel()))", "def _build_model(self):\n raise NotImplementedError()", "def test_test_client_model(self):\n pass", "def get_class(klass, kind):\n return getattr(sys.modules['model'], kind, None)", "def get_model():\n global model\n if model is None:\n model = AppModel()\n model.load_resources()\n return model", "def mock_data_manager(components):\n dm = Mock()\n dm.components = components\n dm.fixed_components = []\n return dm", "def test_model():\n pass", "def get_result_model(cls):\n raise NotImplementedError()", "def subject(\n decoy: Decoy,\n mock_engine_client: EngineClient,\n api_version: APIVersion,\n mock_sync_hardware_api: SyncHardwareAPI,\n) -> ProtocolCore:\n decoy.when(mock_engine_client.state.labware.get_fixed_trash_id()).then_return(\n \"fixed-trash-123\"\n )\n decoy.when(\n mock_engine_client.state.labware.get_definition(\"fixed-trash-123\")\n ).then_return(\n LabwareDefinition.construct(ordering=[[\"A1\"]]) # type: ignore[call-arg]\n )\n\n return ProtocolCore(\n engine_client=mock_engine_client,\n api_version=api_version,\n sync_hardware=mock_sync_hardware_api,\n )", "def get_model(self):\n return self._model", "def get_model(self):\n return self._model", "def get_non_wrapped_model(model: nn.Module) -> nn.Module:\n from torch.nn import DataParallel\n from torch.nn.parallel import DistributedDataParallel\n\n if not isinstance(model, nn.Module):\n raise RuntimeError(\"Input model must be a subclass of nn.Module.\")\n\n if isinstance(model, (DataParallel, DistributedDataParallel)):\n model = model.module\n\n return model", "def getModel(self):\n return self.model", "def model_class(self):\n return self.prop.composite_class", "def test_valid_model(self):\n model_cls = ModelContainer(APP_LABEL, TestModel2._meta.db_table).model_cls\n self.assertTrue(model_cls.__class__.__name__ is models.Model.__class__.__name__)", "def test_get_singular() -> None:\n mock_session = UnifiedAlchemyMagicMock()\n mock_session.add(Model(pk1=\"123\", name=\"test\"))\n user = mock_session.query(Model).get(\"123\")\n assert user is not None", "def __init__(self, model: object):\n self.model = model", "def build_model(self):\n raise NotImplementedError", "def get_model(self):\n return self.model.module if isinstance(self.model, DDP) else self.model", "def test_model_flow_node_model_flow_id_node_id_component_get(self):\n pass", "def get_model(*, name: str) -> typing.Optional[typing.Type]:\n return getattr(open_alchemy.models, name, None)", "def resolve(self, spec: \"ModelSpec\"):", "def _get_model(self, fl_ctx: FLContext):\n if isinstance(self.model, str):\n # treat it as model component ID\n model_component_id = self.model\n engine = fl_ctx.get_engine()\n self.model = engine.get_component(model_component_id)\n if not self.model:\n self.log_error(fl_ctx, f\"cannot find model component '{model_component_id}'\")\n return\n if self.model and isinstance(self.model, dict):\n # try building the model\n try:\n engine = fl_ctx.get_engine()\n # use provided or default optimizer arguments and add the model parameters\n if \"args\" not in self.model:\n self.model[\"args\"] = {}\n self.model = engine.build_component(self.model)\n except Exception as e:\n self.system_panic(\n f\"Exception while parsing `model`: \" f\"{self.model} with Exception {e}\",\n fl_ctx,\n )\n return\n if self.model and not isinstance(self.model, torch.nn.Module):\n self.system_panic(fl_ctx, f\"expect model to be torch.nn.Module but got {type(self.model)}: {self.model}\")\n return\n if self.model is None:\n self.system_panic(fl_ctx, f\"Model wasn't built correctly! It is {self.model}\")\n return\n self.log_info(fl_ctx, f\"Running model {self.model}\")", "def test_build_model(arguments):\n ...", "def entity():\n return Entity(\n u'Dummy', IDummy, 'icemac.addressbook.tests.test_entities.Dummy')", "def model(self):\n return self.__model", "def build_model(self):\n pass", "def build_model(self):\n pass" ]
[ "0.63239694", "0.6227962", "0.61123717", "0.60828376", "0.60729104", "0.59207785", "0.58533525", "0.5756956", "0.5729371", "0.5705438", "0.5661981", "0.56480694", "0.55965275", "0.5589341", "0.55885005", "0.553621", "0.55356354", "0.5533403", "0.55187505", "0.55115426", "0.5495379", "0.5476795", "0.54682446", "0.5439469", "0.5429802", "0.5429802", "0.5429802", "0.5429802", "0.5429802", "0.5429802", "0.5429802", "0.5429802", "0.5429802", "0.5429802", "0.5396816", "0.5378857", "0.53721505", "0.53687066", "0.53545225", "0.5352151", "0.53397954", "0.53384", "0.5332573", "0.53307086", "0.5330229", "0.5329222", "0.5329222", "0.5321667", "0.5312478", "0.5312357", "0.53091466", "0.52926844", "0.52896047", "0.52705526", "0.52705526", "0.52705526", "0.52705526", "0.52705526", "0.52662826", "0.5262175", "0.5258191", "0.5255052", "0.5255052", "0.52545077", "0.52451235", "0.5239324", "0.5232564", "0.5230302", "0.5222266", "0.5222193", "0.5222081", "0.52177954", "0.52093303", "0.5207005", "0.5206166", "0.5202784", "0.52000844", "0.5192844", "0.51914465", "0.51842594", "0.5182707", "0.517658", "0.5175167", "0.5175167", "0.5167466", "0.51657736", "0.5159334", "0.5156495", "0.5153565", "0.5153187", "0.51385754", "0.513814", "0.513422", "0.51327586", "0.5131867", "0.5130601", "0.51299936", "0.5125022", "0.5124133", "0.5113761", "0.5113761" ]
0.0
-1
Return a mock data manager of a general model.
def mock_data_manager(components): dm = Mock() dm.components = components dm.fixed_components = [] return dm
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_dummy_data_manager():\n import repoze.filesafe\n repoze.filesafe._local.manager = mgr = DummyDataManager()\n return mgr", "def _get_data_manager(self):\n\n ftype = self.conf['General']['save_as']\n if ftype == 'npz':\n return NPZDataManager(self.conf, self.log)\n elif ftype == 'hdf5':\n return HDF5DataManager(self.conf, self.log)\n else:\n raise ValueError('Invalid file type in config')", "def setUp(self):\n super().setUp()\n self.database.datamodels.find_one.return_value = self.DATA_MODEL", "def get_data_manager(self):\n\n return self._data_manager", "def setUp(self):\n self.mock_model = Mock()", "def data_manager_fixture():\n\n class DataManager:\n def __init__(self):\n self.gen = 1000\n self.cfg = get_cfg_defaults()\n mode = \"test_inference\"\n self.dataset = Dataset(None, self.cfg, mode)\n self.auto_anchors = AutoAnchors(self.dataset, self.cfg.model, self.gen)\n self.k_points = torch.ones((12, 2)) * 2.0\n self.wh = torch.ones((1000, 2)) * 2.0\n\n return DataManager()", "def manager(model):\n return model.objects", "def test_get_model_method(self):\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n model = None\n try:\n model = model_manager.get_model(qualified_name=\"qualified_name\")\n except Exception as e:\n exception_raised = True\n\n # assert\n self.assertFalse(exception_raised)\n self.assertTrue(type(model) is MLModelMock)", "def test_default_manager(self):\n\n class Book(RestObject):\n pass\n\n class Author(RestObject):\n pass\n \n self.assertTrue(isinstance(Book.objects, RestManager))\n self.assertTrue(Book.objects.object_class, Book)\n\n self.assertTrue(isinstance(Author.objects, RestManager))\n self.assertTrue(Author.objects.object_class, Author)\n\n self.assertNotEqual(Book.objects, Author.objects)\n \n book = Book()\n # Cannot test AttributeError with self.assertRaises\n try:\n book.objects.all()\n except AttributeError, e:\n self.assertEqual('%s' % e, 'Manager is not accessible via Book instances')", "def make_test_object(self):\n return self.orm_cls.testing_create()", "def create_model(self):\n self.skipTest(\"Base module should not be tested.\")", "def as_manager(cls):\n manager = DefaultManager.from_queryset(cls)()\n manager._built_with_as_manager = True\n return manager", "def test_get_model(self) -> None:\n get_model()", "def get_data_manager(\n hass: HomeAssistantType, entry: ConfigEntry\n) -> WithingsDataManager:\n profile = entry.data.get(const.PROFILE)\n\n if not hass.data.get(const.DOMAIN):\n hass.data[const.DOMAIN] = {}\n\n if not hass.data[const.DOMAIN].get(const.DATA_MANAGER):\n hass.data[const.DOMAIN][const.DATA_MANAGER] = {}\n\n if not hass.data[const.DOMAIN][const.DATA_MANAGER].get(profile):\n hass.data[const.DOMAIN][const.DATA_MANAGER][\n profile\n ] = create_withings_data_manager(hass, entry)\n\n return hass.data[const.DOMAIN][const.DATA_MANAGER][profile]", "def test_model_manager_will_return_same_instance_when_instantiated_many_times(self):\n # arrange, act\n # instantiating the model manager class twice\n first_model_manager = ModelManager()\n second_model_manager = ModelManager()\n\n # loading the MLModel objects from configuration\n first_model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n first_model_object = first_model_manager.get_model(qualified_name=\"qualified_name\")\n second_model_object = second_model_manager.get_model(qualified_name=\"qualified_name\")\n\n # assert\n self.assertTrue(str(first_model_manager) == str(second_model_manager))\n self.assertTrue(str(first_model_object) == str(second_model_object))", "def cleanup_dummy_data_manager():\n import repoze.filesafe\n manager = getattr(repoze.filesafe._local, 'manager', None)\n if isinstance(manager, DummyDataManager):\n del repoze.filesafe._local.manager\n return manager", "def get_fake_model(fields=None, model_base=PostgresModel, meta_options={}):\n\n model = define_fake_model(fields, model_base, meta_options)\n\n class TestProject:\n def clone(self, *_args, **_kwargs):\n return self\n\n @property\n def apps(self):\n return self\n\n class TestMigration(migrations.Migration):\n operations = [HStoreExtension()]\n\n with connection.schema_editor() as schema_editor:\n migration_executor = MigrationExecutor(schema_editor.connection)\n migration_executor.apply_migration(\n TestProject(), TestMigration(\"eh\", \"postgres_extra\")\n )\n\n schema_editor.create_model(model)\n\n return model", "def setUp(self):\n self.my_model1 = BaseModel()\n self.my_model1.name = \"hello\"\n self.my_model1.number = 9\n self.my_model2 = BaseModel()\n self.my_model2.name = \"goodbye\"\n self.my_model2.number = 19\n self.mock_stdin = create_autospec(sys.stdin)\n self.mock_stdout = create_autospec(sys.stdout)", "def test_get_model_moderator(self, *mocks):\n moderator = get_model_moderator(Article)\n self.assertIsNotNone(moderator)", "def test_load_model_method(self):\n # arrange\n # instantiating the model manager class\n model_manager = ModelManager()\n\n # adding the model\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n model_object = None\n # accessing the MLModelMock model object\n try:\n model_object = model_manager.get_model(qualified_name=\"qualified_name\")\n except Exception as e:\n exception_raised = True\n print_tb(e)\n\n # assert\n self.assertFalse(exception_raised)\n self.assertTrue(model_object is not None)", "def mock_rdata(): \n return {\n \"authors\": [{\"full_name\": \"N. Ame\"}],\n \"owners\": [{\"full_name\": \"N. Ame\"}],\n \"submitter\": {\"full_name\": \"N. Ame\"},\n \"paper_id\": \"1234.56789\",\n \"title\": \"some title\",\n \"abstract\": \"An abstract with math $/alpha * /alpha$ for you.\",\n }", "def test_model(base, fake_session):\n\n # Make a dummy model\n\n # these fields should be ignored and should not appear in the model\n ignored = (\"field1\", \"field2\", \"field3\")\n\n # these fields are in the model, but should not get dumped to json\n loadonly = (\"field6\", \"field7\")\n\n @add_schema\n class MyModel(base):\n fields = dict(ignore=ignored, load_only=loadonly)\n\n # load the model from dummy data\n values = range(10)\n keys = [\"field{}\".format(x) for x in values]\n data = dict(zip(keys, values))\n m = MyModel.load_from(data, fake_session)\n\n return m, ignored, loadonly, data, MyModel", "def mock_object(cls, profile=None):\n mo = ManagedObject()\n if profile:\n mo.profile = Profile.get_by_name(profile)\n mo.is_mock = True\n return mo", "def test_default_manager(self):\n self.assertIsInstance(FlatPage._default_manager, UrlNodeManager)\n self.assertIsInstance(FlatPage.objects.all(), UrlNodeQuerySet)", "def data_model(self) -> DataModel:\n return self._data_model", "def setUpClass(self):\n\n base_model = BaseModel()", "def setUp(self):\n self.base1 = BaseModel()", "def test_dataloader(self) -> DataLoader:\n return self._custom_data_loader()", "def mock_labware_data_provider(decoy: Decoy) -> LabwareDataProvider:\n return decoy.mock(cls=LabwareDataProvider)", "def test_get_object(self, detail_view, employee_model):\n\n employee = Mock()\n employee_model.objects.get.return_value = Mock()\n detail_view.get_object.return_value = employee\n\n emp = detail_view.get_object(1)\n self.assertEqual(employee, emp)", "def model() -> Model:\n return Model()", "def get_device_manager(device_model: str):\n return _get_device_handler_or_manager(device_model, True)", "def get_manager():\n return __manager__", "def get_metadata_manager(config):\n\n context = config.contextualization_type\n metadata_manager_class = '%sMetadataManager' % context\n if not (metadata_manager_class in globals()):\n raise NotImplementedError('Implementation for %s not available' % context)\n return (globals()[metadata_manager_class])(config)", "def resolve_dataset_manager() -> DatasetManager:\n _dataset_manager_class = conf.getimport(\n section=\"core\",\n key=\"dataset_manager_class\",\n fallback=\"airflow.datasets.manager.DatasetManager\",\n )\n _dataset_manager_kwargs = conf.getjson(\n section=\"core\",\n key=\"dataset_manager_kwargs\",\n fallback={},\n )\n return _dataset_manager_class(**_dataset_manager_kwargs)", "def dbm(cls):\n return cls.dbmanager", "def factory_manager():\n global _FACTORY_MANAGER\n\n if _FACTORY_MANAGER:\n return _FACTORY_MANAGER\n\n _FACTORY_MANAGER = Factories()\n\n return _FACTORY_MANAGER", "def new_manager() -> SyncManager:\n return Manager()", "def setUpTestData(cls):\n cls.emulate_off_api_manager_categories()\n cls.emulate_off_api_manager_products()\n cls.db_manager = Command()", "def db(self):\n return DbManager(self)", "def mgmt_client_fixture(mocker):\n mock_management_client = mocker.patch.object(ManagementClient, '__init__')\n mock_management_client.return_value = None\n return mock_management_client", "def test_settingmodel_init():\n SettingsModel()", "def get_model():\n global model_class\n if model_class is None:\n from fluent_comments.models import FluentComment\n\n # Our proxy model that performs select_related('user') for the comments\n model_class = FluentComment\n\n return model_class", "def testGetReigsteredModel(self):\n from soc.models.student import Student\n model = models_logic.getModel('soc.models.student.Student')\n self.assertEqual(model, Student)", "def setUp(self):\n self.database = Mock()", "def test_create_data_model(test_dao):\r\n DUT = dtmFunction(test_dao, test=True)\r\n\r\n assert isinstance(DUT, dtmFunction)\r\n assert isinstance(DUT.tree, Tree)\r\n assert isinstance(DUT.dao, DAO)", "def GetManager(self):\r\n\r\n return self.manager", "def mock_legacy_dataset(mock_dataset_with_cache_dir):\n archive_path = os.path.join(resource_filename('gtmcore.dataset.tests', 'data'), 'test-legacy-dataset.zip')\n temp_path = os.path.join(tempfile.gettempdir(), 'test-legacy-dataset.zip')\n shutil.copyfile(archive_path, temp_path)\n conf_file = mock_dataset_with_cache_dir[0].client_config.config_file\n import_dataset_from_zip(archive_path=temp_path, username=USERNAME,\n owner=USERNAME, config_file=conf_file)\n\n im = InventoryManager()\n ds = im.load_dataset(USERNAME, USERNAME, 'test-legacy-dataset')\n m = Manifest(ds, USERNAME)\n\n # yield dataset, manifest, working_dir\n yield ds, m, mock_dataset_with_cache_dir[1]", "def mock_unit_db(monkeypatch):\n mock_kv = mock.Mock()\n mock_kv.return_value = unitdata.Storage(path=\":memory:\")\n monkeypatch.setattr(\"libgitlab.unitdata.kv\", mock_kv)", "def setUp(self):\n self.factory = RequestFactory()\n StaffProfile.objects.rebuild()\n self.manager = mommy.make(\n \"auth.User\", first_name=\"Jane\", last_name=\"Ndoe\", email=\"[email protected]\"\n )\n self.user = mommy.make(\n \"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\", email=\"[email protected]\"\n )\n manager_mommy = Recipe(StaffProfile, lft=None, rght=None, user=self.manager)\n staff_mommy = Recipe(StaffProfile, lft=None, rght=None, user=self.user)\n self.manager_profile = manager_mommy.make()\n self.staffprofile = staff_mommy.make()", "def test_single(engine, sessionmaker):\n # Defining specification\n spec = {\n \"components\": {\n \"schemas\": {\n \"Employee\": {\n \"properties\": {\n \"id\": {\"type\": \"integer\", \"x-primary-key\": True},\n \"name\": {\"type\": \"string\"},\n \"type\": {\"type\": \"string\"},\n },\n \"x-tablename\": \"employee\",\n \"type\": \"object\",\n \"x-kwargs\": {\n \"__mapper_args__\": {\n \"polymorphic_on\": \"type\",\n \"polymorphic_identity\": \"employee\",\n }\n },\n },\n \"Manager\": {\n \"allOf\": [\n {\"$ref\": \"#/components/schemas/Employee\"},\n {\n \"x-inherits\": \"Employee\",\n \"type\": \"object\",\n \"properties\": {\"manager_data\": {\"type\": \"string\"}},\n \"x-kwargs\": {\n \"__mapper_args__\": {\"polymorphic_identity\": \"manager\"}\n },\n },\n ]\n },\n \"Engineer\": {\n \"allOf\": [\n {\"$ref\": \"#/components/schemas/Employee\"},\n {\n \"x-inherits\": \"Employee\",\n \"type\": \"object\",\n \"properties\": {\"engineer_info\": {\"type\": \"string\"}},\n \"x-kwargs\": {\n \"__mapper_args__\": {\"polymorphic_identity\": \"engineer\"}\n },\n },\n ]\n },\n }\n }\n }\n # Creating model factory\n base = declarative.declarative_base()\n model_factory = open_alchemy.init_model_factory(spec=spec, base=base)\n employee = model_factory(name=\"Employee\")\n manager = model_factory(name=\"Manager\")\n engineer = model_factory(name=\"Engineer\")\n\n # Creating models\n base.metadata.create_all(engine)\n # Creating instance of models\n employee_instance = employee(id=1, name=\"employee 1\")\n manager_instance = manager(id=2, name=\"employee 2\", manager_data=\"manager data 2\")\n engineer_instance = engineer(\n id=3, name=\"employee 3\", engineer_info=\"engineer info 3\"\n )\n session = sessionmaker()\n session.add(employee_instance)\n session.add(manager_instance)\n session.add(engineer_instance)\n session.flush()\n\n # Querying session for employee\n queried_employee = session.query(employee).first()\n assert queried_employee.id == 1\n assert queried_employee.name == \"employee 1\"\n assert queried_employee.type == \"employee\"\n # Querying session for manager\n queried_manager = session.query(manager).first()\n assert queried_manager.id == 2\n assert queried_manager.name == \"employee 2\"\n assert queried_manager.type == \"manager\"\n assert queried_manager.manager_data == \"manager data 2\"\n # Querying session for engineer\n queried_engineer = session.query(engineer).first()\n assert queried_engineer.id == 3\n assert queried_engineer.name == \"employee 3\"\n assert queried_engineer.type == \"engineer\"\n assert queried_engineer.engineer_info == \"engineer info 3\"", "def testGetModelsData(self):\n models = models_logic._getModelsData()\n self.assertTrue(models)", "def load_test_budget_manager(cls) -> BudgetManager:\n manager = BudgetManager()\n for category in list(BudgetCategory):\n budget = Budget(category, 100)\n manager.add_budget(budget)\n return manager", "def get_db():\n # this is a bit of a hack, since it assumes all the models talk to the same\n # db. that said a lot of our code relies on that assumption.\n # this import is here because of annoying dependencies\n return Database(settings.COUCH_DATABASE)", "def test_save(self):\n\n base_class = BaseModel()", "def get_manager(self, name):\n\n if name == \"control\":\n manager = self._control_manager\n elif name == \"alarm\":\n manager = self._alarm_manager\n elif name == \"state\":\n manager = self._machine_manager\n else:\n manager = self._function_manager\n\n return manager", "def state_store(decoy: Decoy) -> StateStore:\n return decoy.mock(cls=StateStore)", "def setUp(self):\n self.model = sqlite_model()\n self.model.create_new(':memory:')", "def get_model(self):\n return QueryS", "def fake_model():\n app.config['FAKE_MODEL'] = True", "def patch_mongo(monkeypatch):\n mock_db = mongomock.MongoClient().todo_database\n\n def fake_get_db():\n return mock_db\n\n monkeypatch.setattr(main.data_access, \"get_db\", fake_get_db)", "def get_main_model(self):\n return self", "def setUp(self):\n self.data = DatabaseIntermediary()", "def model_name(self) -> str:\n return \"mock-model-name\"", "def default_create_test_data(self, db_name):\n anchor1 = AppDeleteAnchor1.objects.using(db_name).create(value=100)\n anchor2 = AppDeleteAnchor1.objects.using(db_name).create(value=100)\n\n model = AppDeleteBaseModel.objects.using(db_name).create(\n char_field='test',\n int_field=1,\n anchor_fk=anchor1)\n model.m2m.add(anchor2)", "def initialize_model(self):\n model = self.model_class()\n return model", "def mock_item(title='Item One', author='Author One', location='Location One'):\n\n\titem_data = {'title': title, 'author': author, 'location': location}\n\n\treturn models.new_item(item_data), title, author, location", "def test_create_get_delete_one_model(self):\n self.assertEqual(0, len(self.model_manager.models()),\n 'Expecting no models to exist')\n handle = self.model_manager.create(name='test_model')\n self.assertEqual([handle],\n [m.handle for m in self.model_manager.models()],\n 'Expecting the created model to be listed')\n self.model_manager.delete(handle)\n self.assertEqual(0, len(self.model_manager.models()),\n 'Expecting no models to exist after deletion')", "def mock_dataset_with_cache_dir():\n conf_file, working_dir = _create_temp_work_dir()\n with patch.object(Configuration, 'find_default_config', lambda self: conf_file):\n im = InventoryManager(conf_file)\n ds = im.create_dataset(USERNAME, USERNAME, 'dataset-1', description=\"my dataset 1\",\n storage_type=\"gigantum_object_v1\")\n\n yield ds, working_dir, ds.git.repo.head.commit.hexsha\n shutil.rmtree(working_dir)", "def core_config_mock(request):\n from unittest import mock\n from sqlalchemy import Column\n from rucio.common.utils import generate_uuid\n from rucio.db.sqla.models import String, PrimaryKeyConstraint\n from rucio.db.sqla.session import get_session\n\n # Get the fixture parameters\n table_content = []\n params = __get_fixture_param(request)\n if params:\n table_content = params.get(\"table_content\", table_content)\n\n InMemoryConfig = __create_in_memory_db_table(\n 'configs_' + generate_uuid(),\n Column('section', String(128)),\n Column('opt', String(128)),\n Column('value', String(4000)),\n table_args=(PrimaryKeyConstraint('section', 'opt', name='CONFIGS_PK'),),\n )\n\n # Fill the table with the requested mock data\n session = get_session()()\n for section, option, value in (table_content or []):\n InMemoryConfig(section=section, opt=option, value=value).save(flush=True, session=session)\n session.commit()\n\n with mock.patch('rucio.core.config.models.Config', new=InMemoryConfig):\n yield", "def manager():\n return gilded_rose.GildedRose(fixtures.FIXTURES[0], fixtures.RULES)", "def model():\n global _cached_model\n if _cached_model:\n return _cached_model\n model = models.Root(os.path.join(app.root_path, '..'))\n if not app.config['DEBUG']:\n _cached_model = model\n return model", "def test_customer_manager(self):\n \n class BookManager(RestManager):\n def filter_on_author(self, author_resource):\n return self.params([('author', author_resource),])\n \n class Book(RestObject):\n objects = BookManager()\n class Meta:\n list = (r'^book/$', 'book_set')\n item = r'^book/(?P<id>\\d)$'\n \n class Author(RestObject):\n class Meta:\n item = r'^book/(?P<id>\\d)$'\n\n\n self.assertTrue(isinstance(Book.objects, BookManager))\n self.assertTrue(hasattr(Book.objects, 'filter_on_author'))\n self.assertTrue(Book.objects.object_class, Book)\n\n self.assertTrue(isinstance(Author.objects, RestManager))\n self.assertTrue(Author.objects.object_class, Author)\n\n self.assertNotEqual(Book.objects, Author.objects)\n\n book = Book()\n # Cannot test AttributeError with self.assertRaises\n try:\n book.objects.all()\n except AttributeError, e:\n self.assertEqual('%s' % e, 'Manager is not accessible via Book instances')", "def test_get_singular() -> None:\n mock_session = UnifiedAlchemyMagicMock()\n mock_session.add(Model(pk1=\"123\", name=\"test\"))\n user = mock_session.query(Model).get(\"123\")\n assert user is not None", "def _retrieve_manager(provider_id):\n provider = _retrieve_provider(provider_id)\n MachineManager = provider.get_provider_manager()\n return MachineManager(provider)", "def __init__(self, dataset_name, teacher_model, students_model):\n self.data_manager = DataManager(dataset_name)\n self.dataset_name = dataset_name\n self.teacher_model = teacher_model\n self.student_model = students_model", "def test_test_client_model(self):\n pass", "def test_get_model_method_with_missing_model(self):\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n exception_message = \"\"\n model = None\n try:\n model = model_manager.get_model(qualified_name=\"asdf\")\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"Instance of model 'asdf' not found in ModelManager.\")", "def test_dummydb_basic(self):\n db = DummyDB()", "def setUp(self):\n # Every test needs a client.\n self.client = Client()\n self.client, self.response = client_login(self.client, callback=reverse('tms_timesheets'))\n self.datetime = datetime.datetime.now\n self.user = new_test_user()\n self.job_model = any_model(Job, is_active=True)\n try:\n self.employee = any_model(Employee, user = self.user)\n except:\n #employee with this user already exists\n self.employee = Employee.objects.get(user=self.user)\n self.timesheet1 = any_model(Timesheet,\n employee=self.employee,\n is_submitted=False,\n is_billed=False,\n comment='Django_tests comment for timesheet model1')\n self.expense1 = any_model(Expense,\n comment='Django_tests comment for expense model1')", "def getDatastore(cls, d_type, log_level=logging.INFO):\n database_env_dir = '{}/{}_env'.format(DATASTORE_DIR, d_type)\n sys.path.append(database_env_dir)\n\n module_name = '{}_interface'.format(d_type)\n handle, path, description = imp.find_module(module_name)\n\n try:\n db_module = imp.load_module(module_name, handle, path, description)\n datastore = db_module.DatastoreProxy(log_level=log_level)\n finally:\n if handle:\n handle.close()\n\n return datastore", "def set_fake_model(cls, model):\n cls.fakes_model = model", "def test_model_class(self):\n db = Alchy(self.app)\n\n self.assertEquals(\n db.Model.__dict__['__init__'], alchy.model.ModelBase.__init__)\n self.assertIsInstance(\n db.Model.__dict__['query'], alchy.query.QueryProperty)", "def test_get_model_metadata_method_with_missing_model(self):\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n excpeption_raised = False\n exception_message = None\n try:\n model_metadata = model_manager.get_model_metadata(qualified_name=\"asdf\")\n except Exception as e:\n excpeption_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(excpeption_raised)\n self.assertTrue(exception_message == \"Instance of model 'asdf' not found in ModelManager.\")", "def __get_test_instance(username, user_id, group=None):\n with patch(\"ownbot.user.UserManager\") as usrmgr_mock:\n return User(username, user_id, group=group), usrmgr_mock", "def get_model(*args):\n return Model()", "def real_model(request):\n return request.config.option.real_model", "def getManager(self):\n return self._manager", "def mock(self, base_cls=None):\n if base_cls:\n return self.mocker.mock(base_cls)\n return self.mocker.mock()", "def mock_engine_client(decoy: Decoy) -> EngineClient:\n return decoy.mock(cls=EngineClient)", "def test_custom_querysets_managers_directly(self):\n\n class CustomQuerySetManager(QuerySetManager):\n @staticmethod\n def get_queryset(doc_cls, queryset):\n return queryset(is_published=True)\n\n class Post(Document):\n is_published = BooleanField(default=False)\n published = CustomQuerySetManager()\n\n Post.drop_collection()\n\n Post().save()\n Post(is_published=True).save()\n assert Post.objects.count() == 2\n assert Post.published.count() == 1\n\n Post.drop_collection()", "def get_model():\n global model\n if model is None:\n model = AppModel()\n model.load_resources()\n return model", "def setUp(self):\n self.model = ModelBase(\n '__TestModel__' + self.mixin.__name__,\n (self.mixin,),\n {'__module__': self.mixin.__module__}\n )\n\n with connection.schema_editor() as schema_editor:\n schema_editor.create_model(self.model)", "def mock_sync_hardware_api(decoy: Decoy) -> SyncHardwareAPI:\n return decoy.mock(cls=SyncHardwareAPI)", "def mock_msm(temp_dir):\n msm_mock = Mock(spec=MycroftSkillsManager)\n msm_mock.skills_dir = str(temp_dir)\n msm_mock.platform = 'test_platform'\n msm_mock.lock = Mock()\n msm_mock.repo = Mock(spec=SkillRepo)\n msm_mock.repo.get_default_skill_names = Mock(return_value=[\n ('default', ['time', 'weather']),\n ('test_platform', ['test_skill'])\n ])\n msm_mock.device_skill_state = dict(\n skills=[\n dict(name='test_skill', beta=False)\n ]\n )\n skill = Mock()\n skill.is_local = True\n skill.path = str(temp_dir)\n skill.skill_gid = 'test_skill|99.99'\n skill.meta_info = dict(display_name='Test Skill')\n msm_mock.list_all_defaults.return_value = [skill]\n msm_mock.default_skills = dict(test_skill=skill)\n msm_mock.all_skills = [skill]\n msm_mock.local_skills = dict(test_skill=skill)\n\n return msm_mock", "def test_model_manager_does_not_allow_duplicate_qualified_names(self):\n # arrange\n model_manager = ModelManager()\n\n # act\n # loading the first instance of the model object\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n exception_raised = False\n exception_message = \"\"\n try:\n # loading it again\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"A model with the same qualified name is already in the ModelManager singleton.\")", "def manager_factory(manager_type):\n return {\n 'web': WebManager,\n 'github': GitHubManager,\n 'apkdownloadmirror': ApkDownloadMirrorManager,\n 'apkplz': ApkPlzManager,\n }[manager_type]", "def test_model():\n pass", "def mock_bestbuy():\n bestbuy = BestBuy()\n bestbuy.potato1 = 'batata baroa'\n bestbuy.potato2 = 'batata inglesa'\n bestbuy.potato_number = 666\n return bestbuy", "def setUpClass(cls):\n super().setUpClass() # creates the first object\n # create the second onject of the same model\n cls.second_object = TestModel.objects.create(\n name=\"SecondTestObject\"\n )\n # create an object of anther model\n cls.another_object = AnotherTestModel.objects.create(\n name=\"AnotherTestObject\"\n )" ]
[ "0.63638645", "0.62232745", "0.5958567", "0.5882889", "0.5777589", "0.57463723", "0.5660761", "0.5626389", "0.5614345", "0.5575118", "0.55164546", "0.5475766", "0.54707235", "0.54639786", "0.54465616", "0.5437837", "0.5368954", "0.53603864", "0.5357883", "0.5350542", "0.5341743", "0.5313242", "0.5307788", "0.5295979", "0.52796817", "0.5273456", "0.52717143", "0.5263591", "0.5258852", "0.52574456", "0.52463335", "0.5217076", "0.52156377", "0.5209094", "0.5188622", "0.51756763", "0.5170967", "0.5156873", "0.51467764", "0.51441026", "0.51208866", "0.5118613", "0.5103518", "0.50874007", "0.50741494", "0.5066411", "0.5052257", "0.5049291", "0.504777", "0.50443184", "0.50422126", "0.5039663", "0.50390625", "0.5032177", "0.50224257", "0.5002292", "0.49953845", "0.49940035", "0.49804625", "0.4971761", "0.49605992", "0.4960127", "0.49520963", "0.49478224", "0.49450552", "0.49366546", "0.49316543", "0.49311993", "0.4912256", "0.49076498", "0.4907481", "0.49067765", "0.4906468", "0.49049583", "0.48991048", "0.4898627", "0.48958865", "0.48838055", "0.48695853", "0.4868423", "0.48635843", "0.48617697", "0.4853745", "0.48516253", "0.4847353", "0.48413196", "0.48406175", "0.48372918", "0.48309132", "0.48219937", "0.48217314", "0.48211074", "0.48176894", "0.48134586", "0.48124596", "0.48091647", "0.47951755", "0.47907835", "0.4782463", "0.47813028" ]
0.6330217
1
Test for the general active_parameter_manage class.
def test_general_apm(): components = { "scale": mock_component(), "decay": mock_component(), "absorption": mock_component(), } apm = active_parameter_manager(components, ["scale", "decay"]) assert "decay" in apm.components_list assert "scale" in apm.components_list assert "absorption" not in apm.components_list assert apm.n_active_params == ( components["scale"].n_params + components["decay"].n_params ) n_cumul = 0 for component in apm.components: assert apm.components[component]["n_params"] == components[component].n_params assert apm.components[component]["start_idx"] == n_cumul assert ( apm.components[component]["end_idx"] == n_cumul + apm.components[component]["n_params"] ) n_cumul += apm.components[component]["n_params"] apm.set_param_vals(flex.double([2.0, 1.5])) assert apm.get_param_vals() == flex.double([2.0, 1.5]) # Test params were updated in components assert list(components["scale"].free_parameters) == [2.0] assert list(components["decay"].free_parameters) == [1.5] # Test selection of parameters decay_params = apm.select_parameters("decay") assert len(decay_params) == 1 assert decay_params[0] == 1.5 # Test calculate model state uncertainties var_cov = flex.double([1.0, 0.5, 0.5, 2.0]) var_cov.reshape(flex.grid(2, 2)) apm.calculate_model_state_uncertainties(var_cov) assert components["scale"].var_cov_matrix[0, 0] == 1.0 assert components["decay"].var_cov_matrix[0, 0] == 2.0 # Test set param esds. apm.set_param_esds(flex.double([0.1, 0.2])) assert components["scale"].free_parameter_esds == flex.double([0.1]) assert components["decay"].free_parameter_esds == flex.double([0.2])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_overridable_parameter() -> None:\n param_dict = ParamClass.get_overridable_parameters()\n assert \"name\" in param_dict\n assert \"flag\" in param_dict\n assert \"not_flag\" in param_dict\n assert \"seed\" in param_dict\n assert \"number\" in param_dict\n assert \"integers\" in param_dict\n assert \"optional_int\" in param_dict\n assert \"optional_float\" in param_dict\n assert \"tuple1\" in param_dict\n assert \"int_tuple\" in param_dict\n assert \"enum\" in param_dict\n assert \"readonly\" not in param_dict\n assert \"_non_override\" not in param_dict\n assert \"constant\" not in param_dict", "def test_scaling_active_parameter_manager():\n components_2 = {\"1\": mock_scaling_component(2), \"2\": mock_scaling_component(2)}\n scaling_apm = scaling_active_parameter_manager(components_2, [\"1\"])\n assert list(scaling_apm.constant_g_values[0]) == list(\n components_2[\"2\"].calculate_scales()\n )\n assert len(scaling_apm.constant_g_values) == 1\n assert scaling_apm.n_obs == [2]\n\n # Test that no constant_g_values if both components selected\n scaling_apm = scaling_active_parameter_manager(components_2, [\"1\", \"2\"])\n assert scaling_apm.constant_g_values is None\n\n # Check that one can't initialise with an unequal number of reflections,\n # either within the selection or overall.\n with pytest.raises(AssertionError):\n components_2 = {\"1\": mock_scaling_component(2), \"2\": mock_scaling_component(1)}\n scaling_apm = scaling_active_parameter_manager(components_2, [\"1\", \"2\"])\n with pytest.raises(AssertionError):\n components_2 = {\"1\": mock_scaling_component(2), \"2\": mock_scaling_component(1)}\n scaling_apm = scaling_active_parameter_manager(components_2, [\"1\"])\n\n data_manager = mock_data_manager(components_2)\n pmg = ScalingParameterManagerGenerator(\n [data_manager], target=ScalingTarget(), mode=\"concurrent\"\n )\n assert isinstance(pmg.apm_type, type(scaling_active_parameter_manager))", "def check_params(self):\n raise NotImplementedError", "def test_class_callparams(self):\n\n @Configurable(\n conf=[\n Parameter('test0', value=True),\n Parameter('test1', value=False)\n ]\n )\n class Test(object):\n\n def __init__(self, test0=None):\n\n super(Test, self).__init__()\n\n self.test0 = test0\n\n test = Test()\n\n self.assertTrue(test.test0)\n self.assertFalse(test.test1)", "def test_options(self):\n for module in Parameters.__modules__:\n m = getattr(Parameters, module)\n if type(m) == AnyOf:\n for o in m.options:\n setattr(self.p, module, o)\n Parameters(1, **{module: o})", "def _check_params(self):\n pass", "def Check(self, parameters):", "def test_parameters(self):\n self.assert_initialize_driver()\n #reply = self.driver_client.cmd_dvr('get_resource', Parameter.ALL)\n #self.assert_driver_parameters(reply, verify_sample_interval=True)", "def test_configure_to_reconfigure_param(self):\n\n class ToConfigure(object):\n \"\"\"Class to configure.\"\"\"\n\n def __init__(self):\n super(ToConfigure, self).__init__()\n self.test = None\n\n target = ToConfigure()\n\n param = 'test'\n\n conf = configuration(category('TEST', Parameter(param, value=True)))\n\n self.configurable.configure(conf=conf, targets=[target])\n self.assertTrue(target.test)", "def test_all_params(self):\n persistence_helper = PersistenceHelper(use_riak=True, is_sync=True)\n self.assertEqual(persistence_helper.use_riak, True)\n self.assertEqual(persistence_helper.is_sync, True)", "def _load_parameter(self):", "def test_provider(self):\n msg = 'Wrong number of processing algorithm loaded.'\n self.assertEqual(len(self.provider.alglist), 6, msg)\n\n msg = 'InaSAFE should be activated by default in Processing.'\n self.assertEqual(self.provider.activate, True, msg)\n\n msg = 'Wrong processing provide.'\n for algorithm in self.provider.alglist:\n self.assertEqual(algorithm.provider, self.provider, msg)", "def _validate_params(self, request_set, target_set=None, context=None):\n\n # Perform first-pass validation in Function.__init__():\n # - returns full set of params based on subclass paramClassDefaults\n super(Mechanism, self)._validate_params(request_set,target_set,context)\n\n params = target_set\n\n #region VALIDATE TIME SCALE\n try:\n param_value = params[TIME_SCALE]\n except KeyError:\n if COMMAND_LINE in context:\n pass\n else:\n self.timeScale = timeScaleSystemDefault\n else:\n if isinstance(param_value, TimeScale):\n self.timeScale = params[TIME_SCALE]\n else:\n if self.prefs.verbosePref:\n print(\"Value for {0} ({1}) param of {2} must be of type {3}; default will be used: {4}\".\n format(TIME_SCALE, param_value, self.name, type(TimeScale), timeScaleSystemDefault))\n #endregion\n\n #region VALIDATE INPUT STATE(S)\n\n # MODIFIED 6/10/16\n # FIX: SHOULD CHECK LENGTH OF INPUT_STATES PARAM (LIST OF NAMES OR SPECIFICATION DICT) AGAINST LENGTH OF\n # FIX: self.variable 2D ARRAY AND COMPARE variable SPECS, IF PROVIDED, WITH CORRESPONDING ELEMENTS OF\n # FIX: self.variable 2D ARRAY\n try:\n param_value = params[INPUT_STATES]\n\n except KeyError:\n if COMMAND_LINE in context:\n pass\n else:\n # INPUT_STATES not specified:\n # - set to None, so that it is set to default (self.variable) in instantiate_inputState\n # - if in VERBOSE mode, warn in instantiate_inputState, where default value is known\n params[INPUT_STATES] = None\n\n else:\n # INPUT_STATES is specified, so validate:\n # If it is a single item or a non-OrderedDict, place in a list (for use here and in instantiate_inputState)\n if not isinstance(param_value, (list, OrderedDict)):\n param_value = [param_value]\n # Validate each item in the list or OrderedDict\n # Note:\n # * number of inputStates is validated against length of the owner mechanism's execute method variable (EMV)\n # in instantiate_inputState, where an inputState is assigned to each item (value) of the EMV\n i = 0\n for key, item in param_value if isinstance(param_value, dict) else enumerate(param_value):\n from PsyNeuLink.Components.States.InputState import InputState\n # If not valid...\n if not ((isclass(item) and (issubclass(item, InputState) or # InputState class ref\n issubclass(item, Projection))) or # Project class ref\n isinstance(item, InputState) or # InputState object\n isinstance(item, dict) or # InputState specification dict\n isinstance(item, ParamValueProjection) or # ParamValueProjection tuple\n isinstance(item, str) or # Name (to be used as key in inputStates dict)\n iscompatible(item, **{kwCompatibilityNumeric: True})): # value\n # set to None, so it is set to default (self.variable) in instantiate_inputState\n param_value[key] = None\n if self.prefs.verbosePref:\n print(\"Item {0} of {1} param ({2}) in {3} is not a\"\n \" InputState, specification dict or value, nor a list of dict of them; \"\n \"variable ({4}) of execute method for {5} will be used\"\n \" to create a default outputState for {3}\".\n format(i,\n INPUT_STATES,\n param_value,\n self.__class__.__name__,\n self.variable,\n self.execute.__self__.name))\n i += 1\n params[INPUT_STATES] = param_value\n #endregion\n\n #region VALIDATE EXECUTE METHOD PARAMS\n try:\n function_param_specs = params[FUNCTION_PARAMS]\n except KeyError:\n if COMMAND_LINE in context:\n pass\n elif self.prefs.verbosePref:\n print(\"No params specified for {0}\".format(self.__class__.__name__))\n else:\n if not (isinstance(function_param_specs, dict)):\n raise MechanismError(\"{0} in {1} must be a dict of param specifications\".\n format(FUNCTION_PARAMS, self.__class__.__name__))\n # Validate params\n from PsyNeuLink.Components.States.ParameterState import ParameterState\n for param_name, param_value in function_param_specs.items():\n try:\n default_value = self.paramInstanceDefaults[FUNCTION_PARAMS][param_name]\n except KeyError:\n raise MechanismError(\"{0} not recognized as a param of execute method for {1}\".\n format(param_name, self.__class__.__name__))\n if not ((isclass(param_value) and\n (issubclass(param_value, ParameterState) or\n issubclass(param_value, Projection))) or\n isinstance(param_value, ParameterState) or\n isinstance(param_value, Projection) or\n isinstance(param_value, dict) or\n isinstance(param_value, ParamValueProjection) or\n iscompatible(param_value, default_value)):\n params[FUNCTION_PARAMS][param_name] = default_value\n if self.prefs.verbosePref:\n print(\"{0} param ({1}) for execute method {2} of {3} is not a ParameterState, \"\n \"projection, ParamValueProjection, or value; default value ({4}) will be used\".\n format(param_name,\n param_value,\n self.execute.__self__.componentName,\n self.__class__.__name__,\n default_value))\n #endregion\n # FIX: MAKE SURE OUTPUT OF EXECUTE FUNCTION / SELF.VALUE IS 2D ARRAY, WITH LENGTH == NUM OUTPUT STATES\n\n #region VALIDATE OUTPUT STATE(S)\n\n # FIX: MAKE SURE # OF OUTPUTS == LENGTH OF OUTPUT OF EXECUTE FUNCTION / SELF.VALUE\n try:\n param_value = params[OUTPUT_STATES]\n\n except KeyError:\n if COMMAND_LINE in context:\n pass\n else:\n # OUTPUT_STATES not specified:\n # - set to None, so that it is set to default (self.value) in instantiate_outputState\n # Notes:\n # * if in VERBOSE mode, warning will be issued in instantiate_outputState, where default value is known\n # * number of outputStates is validated against length of owner mechanism's execute method output (EMO)\n # in instantiate_outputState, where an outputState is assigned to each item (value) of the EMO\n params[OUTPUT_STATES] = None\n\n else:\n # OUTPUT_STATES is specified, so validate:\n # If it is a single item or a non-OrderedDict, place in a list (for use here and in instantiate_outputState)\n if not isinstance(param_value, (list, OrderedDict)):\n param_value = [param_value]\n # Validate each item in the list or OrderedDict\n i = 0\n for key, item in param_value if isinstance(param_value, dict) else enumerate(param_value):\n from PsyNeuLink.Components.States.OutputState import OutputState\n # If not valid...\n if not ((isclass(item) and issubclass(item, OutputState)) or # OutputState class ref\n isinstance(item, OutputState) or # OutputState object\n isinstance(item, dict) or # OutputState specification dict\n isinstance(item, str) or # Name (to be used as key in outputStates dict)\n iscompatible(item, **{kwCompatibilityNumeric: True})): # value\n # set to None, so it is set to default (self.value) in instantiate_outputState\n param_value[key] = None\n if self.prefs.verbosePref:\n print(\"Item {0} of {1} param ({2}) in {3} is not a\"\n \" OutputState, specification dict or value, nor a list of dict of them; \"\n \"output ({4}) of execute method for {5} will be used\"\n \" to create a default outputState for {3}\".\n format(i,\n OUTPUT_STATES,\n param_value,\n self.__class__.__name__,\n self.value,\n self.execute.__self__.name))\n i += 1\n params[OUTPUT_STATES] = param_value", "def _check_parameters_support(self, parameters=()):\n for parameter in parameters:\n assert parameter in self._supported, \"Estimation %s is not implemented yet\" % parameter", "def test_direct_access_params(self):\n self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 10)\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 15)\n # use real driver sets here, the direct poke of the param dict is just\n # a test-with-base-class thing\n self.driver._protocol._param_dict.update(\"bar=20\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 20)\n\n # pretend to go into direct access mode,\n running_config = self.driver._protocol.get_cached_config()\n # make some changes to both, (foo to 100, bar to 200)\n self.driver._protocol._param_dict.update(\"foo=100\")\n self.driver._protocol._param_dict.update(\"bar=200\")\n # its like we came out of DA mode\n self.driver.restore_direct_access_params(running_config)\n\n # confirm that the default values were set back appropriately.\n self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 10)\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 200)", "def supported_parameters(cls):\n raise NotImplementedError()", "def test_checkCustoms(self):\n self.failUnlessEqual(self.nice.opts['myflag'], \"PONY!\")\n self.failUnlessEqual(self.nice.opts['myparam'], \"Tofu WITH A PONY!\")", "def test_parameter_read(request):\n print(\"\\n--Starting:\", request.node.name)\n\n params = Parameters()\n print(params.__dict__)\n ## todo write an assert that actually tests something", "def test_checkParameters(self):\n self.failUnlessEqual(self.nice.opts['long'], \"Alpha\")\n self.failUnlessEqual(self.nice.opts['another'], \"Beta\")\n self.failUnlessEqual(self.nice.opts['longonly'], \"noshort\")\n self.failUnlessEqual(self.nice.opts['shortless'], \"Gamma\")", "def define_parameters(self):", "def test_set_params():\n\n tpot_obj = TPOTClassifier()\n assert tpot_obj.set_params() is tpot_obj", "def test_multi_apm():\n\n components_1 = {\n \"scale\": mock_component(),\n \"decay\": mock_component(),\n \"absorption\": mock_component(),\n }\n components_2 = {\"scale\": mock_component(), \"decay\": mock_component()}\n\n multi_apm = multi_active_parameter_manager(\n ScalingTarget(),\n [components_1, components_2],\n [[\"scale\", \"decay\"], [\"scale\"]],\n active_parameter_manager,\n )\n\n # Test correct setup of apm_list attribute.\n for apm in multi_apm.apm_list:\n assert isinstance(apm, active_parameter_manager)\n assert len(multi_apm.apm_list) == 2\n assert multi_apm.components_list == [\"scale\", \"decay\", \"scale\"]\n assert multi_apm.n_active_params == 3\n assert multi_apm.apm_data[0] == {\"start_idx\": 0, \"end_idx\": 2}\n assert multi_apm.apm_data[1] == {\"start_idx\": 2, \"end_idx\": 3}\n\n # Test parameter selection.\n multi_apm.set_param_vals(flex.double([3.0, 2.5, 2.0]))\n assert multi_apm.get_param_vals() == flex.double([3.0, 2.5, 2.0])\n assert multi_apm.select_parameters(0) == flex.double([3.0, 2.5])\n assert multi_apm.select_parameters(1) == flex.double([2.0])\n\n # Test setting parameter esds.\n multi_apm.set_param_esds(flex.double([0.1, 0.2, 0.3]))\n assert components_1[\"scale\"].free_parameter_esds == flex.double([0.1])\n assert components_1[\"decay\"].free_parameter_esds == flex.double([0.2])\n assert components_2[\"scale\"].free_parameter_esds == flex.double([0.3])\n\n # Test setting var_cov matrices for each component.\n var_cov = flex.double([1.0, 0.5, 0.5, 0.5, 2.0, 0.5, 0.5, 0.5, 3.0])\n var_cov.reshape(flex.grid(3, 3))\n multi_apm.calculate_model_state_uncertainties(var_cov)\n assert components_1[\"scale\"].var_cov_matrix[0, 0] == 1.0\n assert components_1[\"decay\"].var_cov_matrix[0, 0] == 2.0\n assert components_2[\"scale\"].var_cov_matrix[0, 0] == 3.0", "def _validate_params(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def test_ptype(self):\n\n @Configurable(\n conf=[\n Parameter('test', ptype=int, svalue='1'),\n Parameter('ex', svalue='2', ptype=int)\n ]\n )\n class Test(object):\n\n def __init__(self, test=None, *args, **kwargs):\n\n super(Test, self).__init__(*args, **kwargs)\n\n self.testy = test\n\n test = Test()\n\n self.assertEqual(test.testy, 1)\n self.assertFalse(hasattr(test, 'test'))\n self.assertEqual(test.ex, 2)\n\n applyconfiguration(\n targets=[test], conf=[\n Parameter('test', svalue='2'),\n Parameter('ex', svalue='3')\n ]\n )\n\n self.assertEqual(test.testy, 1)\n self.assertEqual(test.test, 2)\n self.assertEqual(test.ex, 3)\n\n Configurable.get_annotations(test)[0].applyconfiguration(\n targets=[test], conf=[\n Parameter('test', svalue='3'),\n Parameter('ex', svalue='4', ptype=bool)\n ]\n )\n\n self.assertEqual(test.testy, 1)\n self.assertEqual(test.test, 3)\n self.assertTrue(test.ex)", "def test_set_virtualization_realm_active(self):\n pass", "def test_get_context_parameter(params, expected):\n assert get_context_parameter(params) == expected", "def potential_parameters(cls):\n raise NotImplementedError()", "def test_act_on_settings(self):\n pass # TODO(tlarsen)", "def test_act_on_settings(self):\n pass # TODO(tlarsen)", "def autotuned_kernel_parameter_check(instance, activate, all_optional=False):\n instance.tune_kernel_parameters()\n\n initial_kernel_parameters = instance.kernel_parameters\n\n if isinstance(instance._simulation.device, hoomd.device.CPU):\n # CPU instances have no parameters and are always complete.\n assert initial_kernel_parameters == {}\n assert instance.is_tuning_complete\n else:\n # GPU instances have parameters and start incomplete.\n assert initial_kernel_parameters != {}\n\n # is_tuning_complete is True when all tuners are optional.\n if not all_optional:\n assert not instance.is_tuning_complete\n\n activate()\n\n assert instance.kernel_parameters != initial_kernel_parameters\n\n # Note: It is not practical to automatically test that\n # `is_tuning_complete` is eventually achieved as failure results in an\n # infinite loop. Also, some objects (like neighbor lists) require\n # realistic simulation conditions to test adequately. `hoomd-benchmarks`\n # tests that tuning completes in all benchmarks.\n\n # Ensure that we can set parameters.\n instance.kernel_parameters = initial_kernel_parameters\n activate()\n assert instance.kernel_parameters == initial_kernel_parameters", "def param_check(self, params, func_name):\n help = None\n fun = getattr(self, func_name, None)\n if fun and getattr(fun, '__cement_meta__', None):\n help = fun.__cement_meta__['help']\n\n for p in params:\n param = getattr(self.app.pargs, p, None)\n if param is None:\n log.print_err(\"param {} miss, see help:\".format(p))\n if help:\n print(help)\n return False\n return True", "def checkParameters(self):\n EDVerbose.DEBUG(\"EDPluginControlAbsorptionv0_1.checkParameters\")\n self.checkMandatoryParameters(self.getDataInput(), \"Data Input is None\")", "def check_parameters():\r\n for par in PARAM:\r\n if isinstance(par, ExperimentFrame):\r\n EXP.change_variable(**par())\r\n else:\r\n EXP.change_variable(**par)", "def test_can_process(self):\n self.assertTrue(self.adapter.can_process(''))", "def _check_helper(self, value, raise_exceptions: bool = True) -> bool:\n return super(ParameterMixin, self)._check_helper(\n value, raise_exceptions=raise_exceptions\n )", "def _get_class():\n return ASParameters", "def test_cls_constants(self):\n for k, v in {\n 'empty': empty,\n 'POSITIONAL_ONLY': POSITIONAL_ONLY,\n 'POSITIONAL_OR_KEYWORD': POSITIONAL_OR_KEYWORD,\n 'VAR_POSITIONAL': VAR_POSITIONAL,\n 'KEYWORD_ONLY': KEYWORD_ONLY,\n 'VAR_KEYWORD': VAR_KEYWORD,\n }.items():\n assert getattr(FParameter, k) is v", "def check(self):\n if 'MISFIT' not in PAR:\n setattr(PAR, 'MISFIT', 'Waveform')\n\n if 'CHANNELS' not in PAR:\n raise ParameterError(PAR, 'CHANNELS')\n\n if 'READER' not in PAR:\n raise ParameterError(PAR, 'READER')\n\n if 'WRITER' not in PAR:\n setattr(PAR, 'WRITER', PAR.READER)\n\n if 'NORMALIZE' not in PAR:\n setattr(PAR, 'NORMALIZE', True)\n\n # mute settings\n if 'MUTE' not in PAR:\n setattr(PAR, 'MUTE', False)\n\n if 'MUTESLOPE' not in PAR:\n setattr(PAR, 'MUTESLOPE', 0.)\n\n if 'MUTECONST' not in PAR:\n setattr(PAR, 'MUTECONST', 0.)\n\n # filter settings\n if 'BANDPASS' not in PAR:\n setattr(PAR, 'BANDPASS', False)\n\n if 'FREQLO' not in PAR:\n setattr(PAR, 'FREQLO', 0.)\n\n if 'FREQHI' not in PAR:\n setattr(PAR, 'FREQHI', 0.)\n\n # assertions\n if PAR.READER not in dir(readers):\n print msg.ReaderError\n raise ParameterError()\n\n if PAR.WRITER not in dir(writers):\n print msg.WriterError\n raise ParameterError()", "def test_good_custom_params(self):\r\n self.xmodule.custom_parameters = ['test_custom_params=test_custom_param_value']\r\n self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', 'test_client_secret'))\r\n self.xmodule.oauth_params = Mock()\r\n self.xmodule.get_input_fields()\r\n self.xmodule.oauth_params.assert_called_with(\r\n {u'custom_test_custom_params': u'test_custom_param_value'},\r\n 'test_client_key', 'test_client_secret'\r\n )", "def identify_parameters(model: torch.nn.Module,\n type_mapping: Dict[str, Sequence],\n check_param_exist: bool = True):\n for module in model.modules():\n for _name, _types in type_mapping.items():\n if any([isinstance(module, _type) for _type in _types]):\n for param in module.parameters():\n if check_param_exist:\n assert not hasattr(param, _name)\n setattr(param, _name, True)", "def test_get_mt_settings(self):\n pass", "def _check_params(self):\n\t\tstrange_param_helper = False\n\t\tfor param in self.params:\n\t\t\n\t\t\t# It could be that the param encapsulates several values (e.g., \"FLUX_RADIUS(10)\")\n\t\t\t# So we have to dissect this\n\t\t\tmatch = re.compile(\"(\\w*)\\(\\d*\\)\").match(param)\n\t\t\tif match:\n\t\t\t\tcleanparam = match.group(1)\n\t\t\telse:\n\t\t\t\tcleanparam = param\n\t\t\t\t\n\t\t\tif cleanparam not in self.fullparamlist:\n\t\t\t\tlogger.warning(\"Parameter '%s' seems strange and might be unknown to SExtractor\" \\\n % (param))\n\t\t\t\tstrange_param_helper = True\n\t\t\t\t\n\t\tif strange_param_helper:\n\t\t\tlogger.warning(\"Known parameters are: %s\" % (self.fullparamtxt))", "def checkNeededParams(self):\n for clp,value in self.neededParamsNames.items():\n if value[0] not in self.neededParams:\n print >> sys.stderr, clp+\" is a mandatory parameter \"\n self.printUsage()\n sys.exit(1)", "def check_parameters_valid(self) :\n for check_parameter in self.parameters :\n if (not self.parameters[check_parameter]['set']) :\n error_message = \"Missing key -> '\" + check_parameter + \"'\"\n if (Config.logger) :\n dtpo_log('fatal', error_message)\n raise ParseError(error_message)\n\n if self.parameters[check_parameter]['type'] == 'dir' :\n value = self.parameters[check_parameter]['value']\n return_string = check_directory_permissions(value)\n if return_string :\n error_message = \"{0} not accessible \" \\\n \"-> {1}\".format(\n check_parameter,\n return_string)\n raise ParseError(error_message)\n elif self.parameters[check_parameter]['type'] == 'file' :\n value = self.parameters[check_parameter]['value']\n try :\n file_pointer = open(value)\n file_pointer.close()\n except IOError as io_error :\n error_message = \"File {0} not accessible -> {2}\" \\\n .format(\n check_parameter,\n self.parameters[check_parameter]['value'],\n str(io_error))\n raise ParseError(error_message)", "def test_good_custom_params(self):\n self.xmodule.custom_parameters = ['test_custom_params=test_custom_param_value']\n self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', 'test_client_secret'))\n self.xmodule.oauth_params = Mock()\n self.xmodule.get_input_fields()\n self.xmodule.oauth_params.assert_called_with(\n {'custom_test_custom_params': 'test_custom_param_value'},\n 'test_client_key', 'test_client_secret'\n )", "def _check_value(self,val,obj=None):\n if not val in self.objects:\n # CEBALERT: can be called before __init__ has called\n # super's __init__, i.e. before attrib_name has been set.\n try:\n attrib_name = self._attrib_name\n except AttributeError:\n attrib_name = \"\"\n raise ValueError(\"%s not in Parameter %s's list of possible objects\" \\\n %(val,attrib_name))", "def test_preferences_properties(self):\n with pytest.raises(AssertionError):\n self.preferences.exchange_params_by_currency_id\n with pytest.raises(AssertionError):\n self.preferences.utility_params_by_good_id", "def test_base_hyper_parameters_reg(self):\n hyper_parameter_set = modelgen.generate_base_hyper_parameter_set()\n assert 'regularization_rate' in hyper_parameter_set.keys()", "def test_configure_without_inheritance(self):\n\n @Configurable(conf=category('TEST', Parameter('test', value=True)))\n class BaseTest(object):\n \"\"\"base Class to configure.\"\"\"\n\n class Test(BaseTest):\n \"\"\"Class to configure.\"\"\"\n\n targets = Test()\n\n self.assertTrue(targets.test)", "def check_data(self):\n\n missing_params = {}\n flag = False\n\n missing_params['general'] = {}\n for name, param in self.params.items():\n if not param.check():\n missing_params['general'][name] = param.get_description()\n flag = True\n\n for component, comp_obj in self.components.items():\n missing_params[component], flag_comp = comp_obj.check_data()\n\n # Assign empty component parameters that have a general version:\n empty_general_params = set(missing_params[component]).intersection(\n set(self.params))\n for param in empty_general_params:\n comp_obj.change_param_object(param, self.params[param])\n del missing_params[component][param]\n\n if missing_params[component]:\n flag = True\n\n if flag:\n raise Exception('Following parameters are missing:\\n{}'\n .format(\n self._print_params(missing_params, disp=False)))\n\n return True", "def _check_valid_basic(self, get_params):\n try:\n if get_params(self.variable):\n return self.default\n except: # noqa e722\n pass\n return not self.default", "def test_ParameterVariable_init_basic_type(self):\n\n par = provide_parameter(\"double\", \"test\")\n\n self.assertEqual(par.name, \"test\")\n self.assertEqual(par.type, \"double\")", "def __mandatory_is_given(self):\n\n strTestName = 'Mandatory parameter is given (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('mandatory_parameter', 'Mandatory parameter')\n RxCSObject.mandatory_parameter = 1\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def check_parameters(self, parameter_set: CHARMMParameterFile) -> bool:\n return parameter_set.check_parameters(self)", "def _check_parameter(param: Dict, path_param):\n events = set()\n\n name = param.get(\"name\", \"unnamed-parameter\")\n required = param.get(\"required\", False)\n default = param.get(\"default\")\n _type = param.get(\"type\")\n format = param.get(\"format\")\n enum = param.get(\"enum\")\n\n # check if required=True and default are both given\n if required and default is not None:\n events.add(\n ParameterDefinitionValidationError(\n path=path_param,\n reason=f\"The parameter is required yet it has a default value\",\n parameter_name=name,\n )\n )\n\n # check if type==array that there is an items\n if _type == \"array\" and \"items\" not in param:\n events.add(\n ParameterDefinitionValidationError(\n path=path_param,\n reason=f\"The parameter is of type 'array' but is missing an 'items' field\",\n parameter_name=name,\n )\n )\n\n # check enum does not contain duplicates\n if enum:\n if len(set(enum)) != len(enum):\n events.add(\n ParameterDefinitionValidationError(\n path=path_param + (\"enum\",),\n reason=f\"The enum values {enum} contains duplicate values\",\n parameter_name=name,\n )\n )\n if default is not None and default not in enum:\n events.add(\n ParameterDefinitionValidationError(\n path=path_param + (\"default\",),\n reason=f\"The default value {repr(default)} is not one of the enum values {enum}\",\n parameter_name=name,\n )\n )\n\n # check type/format & default value in accordance with type/format\n # https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types\n map_type2subtypes_pythontype = {\n (\"string\", None): str,\n (\"string\", \"byte\"): re.compile(\n r\"^(?:[A-Za-z0-9+/\\s]{4})*(?:[A-Za-z0-9+/\\s]{2}==|[A-Za-z0-9+/\\s]{3}=)?$\"\n ),\n (\"string\", \"binary\"): str,\n (\"string\", \"date\"): re.compile(r\"^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])$\"),\n (\"string\", \"dateTime\"): re.compile(\n r\"^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])\" # date\n r\"[Tt]\"\n r\"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\\.[0-9]+)?\" # time\n r\"(([Zz])|([+|\\-]([01][0-9]|2[0-3]):[0-5][0-9]))$\" # offset\n ),\n (\"string\", \"password\"): str,\n (\"integer\", None): numbers.Integral,\n (\"integer\", \"int32\"): numbers.Integral,\n (\"integer\", \"int64\"): numbers.Integral,\n (\"number\", None): numbers.Real,\n (\"number\", \"float\"): numbers.Real,\n (\"number\", \"double\"): numbers.Real,\n (\"boolean\", None): bool,\n (\"array\", None): list,\n }\n if default is not None and _type:\n regexp_or_type = map_type2subtypes_pythontype.get((_type, format))\n # if no match with both _type, format, check if match only on _type (format being freeform)\n if not regexp_or_type:\n regexp_or_type = map_type2subtypes_pythontype.get((_type, None))\n\n if regexp_or_type:\n # the type & format matches one of the Swagger Specifications documented type & format combinations\n # we can check the default format\n\n # decompose regexp_or_type into type and RE expression\n if isinstance(regexp_or_type, type):\n # regexp_or_type is a standard python type\n re_pattern = None\n py_type = regexp_or_type\n else:\n # regexp_or_type is a regexp expression\n re_pattern = regexp_or_type\n py_type = str\n\n if not isinstance(default, py_type):\n events.add(\n ParameterDefinitionValidationError(\n path=path_param + (\"default\",),\n reason=f\"The default value {repr(default)} is not of the expected type '{_type}'\",\n parameter_name=name,\n )\n )\n\n # if a regexp matching string is expected\n if re_pattern is not None:\n if not (isinstance(default, str) and re_pattern.match(default)):\n events.add(\n ParameterDefinitionValidationError(\n path=path_param + (\"default\",),\n reason=f\"The default value '{default}' does not conform to the string format '{format}'\",\n parameter_name=name,\n )\n )\n\n return events", "def test_test_group_parameters(self):\n pass", "def test_checkLinkoStructure(self):\n self.performTestForParams()", "def test_01_CheckClassTool(self):\n portal = self.portal\n self.assertNotEqual(None,getattr(portal,'portal_classes',None))\n self.commit()\n # check if web UI works\n portal_classes = portal.portal_classes\n portal_classes.manage_viewDocumentList()\n portal_classes.manage_viewPropertySheetList()\n portal_classes.manage_viewConstraintList()\n portal_classes.manage_viewExtensionList()\n portal_classes.manage_viewTestList()", "def test_change_param(self):\n test_adc = ads1115_differential(assume_defaults)\n\n # Check to make sure an assertion is thrown if a position beyond 3 is\n # chosen.\n try:\n test_adc.change_param(4, 0)\n # If the assertion error hasn't been thrown, fail the test.\n self.fail()\n except AssertionError:\n # An assertion error is expected.\n pass\n\n # Reset the test_adc to make sure it's a clean slate after the expected\n # failure.\n test_adc = ads1115_single(assume_defaults)\n\n # Check to make sure assertion errors are still thrown for invalid\n # parameters. Only one check is done because test_device_creation\n # has already walked the check_params internal function to make sure\n # that all invalid parametes are failed.\n try:\n # Try changing address to an invalid value.\n test_adc.change_param(2, 0)\n # If the assertion error hasn't been thrown, fail the test.\n except AssertionError:\n # An assertion error is expected.\n pass\n\n # Reset the test_adc to make sure it's a clean slate after the expected\n # failure.\n test_adc = ads1115_single(assume_defaults)\n\n # Check to make sure that all parameters can be changed when the change\n # is valid.\n\n # Set channel to 3.\n test_adc.change_param(0, 3)\n self.assertEqual(test_adc.parameters[0], 3)\n\n # Set gain to 4.\n test_adc.change_param(1, 4)\n self.assertEqual(test_adc.parameters[1], 4)\n\n # Set address to 0x49.\n test_adc.change_param(2, 0x49)\n self.assertEqual(test_adc.parameters[2], 0x49)\n\n # Set busnum to 0.\n try:\n test_adc.change_param(3, 0)\n # If the 0th I2C bus exists, then assert that the parameter has\n # changed.\n self.assertEqual(test_adc.parameters[3], 0)\n except IOError:\n # This is just because the current system does not have a 0th I2C\n # bus.\n pass", "def test_create_hyperflex_feature_limit_internal(self):\n pass", "def test_set_params_2():\n tpot_obj = TPOTClassifier(generations=2)\n tpot_obj.set_params(generations=3)\n\n assert tpot_obj.generations == 3", "def test_change_param(self):\n test_adc = ads1115_single(assume_defaults)\n\n # Check to make sure an assertion is thrown if a position beyond 3 is\n # chosen.\n try:\n test_adc.change_param(4, 0)\n # If the assertion error hasn't been thrown, fail the test.\n self.fail()\n except AssertionError:\n # An assertion error is expected.\n pass\n\n # Reset the test_adc to make sure it's a clean slate after the expected\n # failure.\n test_adc = ads1115_single(assume_defaults)\n\n # Check to make sure assertion errors are still thrown for invalid\n # parameters. Only one check is done because test_device_creation\n # has already walked the check_params internal function to make sure\n # that all invalid parametes are failed.\n try:\n # Try changing address to an invalid value.\n test_adc.change_param(2, 0)\n # If the assertion error hasn't been thrown, fail the test.\n except AssertionError:\n # An assertion error is expected.\n pass\n\n # Reset the test_adc to make sure it's a clean slate after the expected\n # failure.\n test_adc = ads1115_single(assume_defaults)\n\n # Check to make sure that all parameters can be changed when the change\n # is valid.\n\n # Set channel to 3.\n test_adc.change_param(0, 3)\n self.assertEqual(test_adc.parameters[0], 3)\n\n # Set gain to 4.\n test_adc.change_param(1, 4)\n self.assertEqual(test_adc.parameters[1], 4)\n\n # Set address to 0x49.\n test_adc.change_param(2, 0x49)\n self.assertEqual(test_adc.parameters[2], 0x49)\n\n # Set busnum to 0.\n try:\n test_adc.change_param(3, 0)\n # If the 0th I2C bus exists, then assert that the parameter has\n # changed.\n self.assertEqual(test_adc.parameters[3], 0)\n except IOError:\n # This is just because the current system does not have a 0th I2C\n # bus.\n pass", "def __init__(self, param_name, required=True):\n super(ValidCommandValidator, self).__init__(param_name=param_name)\n\n self.required = required", "def default_capabilities(self):", "def testSetEnabled(self):\n self.mgr.enabled = True\n self.mgr.setGimbalEnabledParam()\n self.mgr.shotMgr.vehicle.message_factory.param_set_encode.assert_called_with(0, mavutil.mavlink.MAV_COMP_ID_GIMBAL, # target system, target component\n \"GMB_GP_CTRL\", 1.0, mavutil.mavlink.MAV_PARAM_TYPE_REAL32 )", "def get_param_info(self, param_name, provider=None, mode='simple'):\n\n if mode not in ['simple', 'complex']:\n log.error('mode must be either simple or complex')\n return None\n\n provider = self.get_provider(provider)\n if provider is None:\n return None\n\n r = requests.get(self._url('/dataproviders/{:s}/parameters'.format(provider)),\n headers={'Authorization': self.token},\n params={\n 'key': 'name',\n 'value': param_name,\n 'search': 'false',\n 'mode': mode.upper(),\n 'parameterType': 'TM'},\n proxies=self.proxy)\n r.raise_for_status()\n\n matches = r.json()\n if len(matches) == 0:\n log.warning('no matches found for parameter {:s}'.format(param_name))\n return None\n\n if mode=='simple':\n param = pd.Series(r.json())\n else:\n param = pd.DataFrame.from_dict(r.json()['metadata'])\n param.set_index('key', inplace=True, drop=True)\n param = param.squeeze()\n\n checks = pd.DataFrame.from_dict(r.json()['monitoringChecks'])\n\n if len(checks.useCalibrated.unique())>1:\n log.warning('mixed calibration type in checks, ignoring')\n param['check_cal'] = None\n else:\n param['check_cal'] = checks.useCalibrated.unique()[0]\n\n if len(checks.checkInterpretation.unique())>1:\n log.warning('mixed interpretation type in checks, ignoring')\n param['check_type'] = None\n else:\n param['check_type'] = checks.checkInterpretation.unique()[0]\n\n if len(checks)>2:\n log.warn('extracting limits for parameters with >2 checks no supported')\n else:\n for idx, check in checks.iterrows():\n details = check.checkDefinitions\n if details['type']=='SOFT':\n param['soft_low'] = float(details['lowValue'])\n param['soft_high'] = float(details['highValue'])\n elif details['type']=='HARD':\n param['hard_low'] = float(details['lowValue'])\n param['hard_high'] = float(details['highValue'])\n else:\n log.warn('unsupported check type')\n \n param['First Sample'] = pd.NaT if param['First Sample']=='N/A' else pd.to_datetime(param['First Sample'])\n param['Last Sample'] = pd.NaT if param['Last Sample']=='N/A' else pd.to_datetime(param['Last Sample'])\n\n\n log.info('parameter info for {:s} extracted'.format(param.Description))\n\n return param", "def test_expected_configurations_parameters(self):\n allowed_attrs = [\"configuration-parameters\"]\n instance_info.dbaas.configuration_parameters.parameters(\n instance_info.dbaas_datastore,\n instance_info.dbaas_datastore_version)\n resp, body = instance_info.dbaas.client.last_response\n attrcheck = AttrCheck()\n config_parameters_dict = json.loads(body.decode())\n attrcheck.contains_allowed_attrs(\n config_parameters_dict, allowed_attrs,\n msg=\"Configurations parameters\")\n # sanity check that a few options are in the list\n config_params_list = config_parameters_dict['configuration-parameters']\n config_param_keys = []\n for param in config_params_list:\n config_param_keys.append(param['name'])\n expected_configs = self.expected_default_datastore_configs()\n expected_config_params = expected_configs.get('parameters_list')\n # check for duplicate configuration parameters\n msg = \"check for duplicate configuration parameters\"\n assert_equal(len(config_param_keys), len(set(config_param_keys)), msg)\n for expected_config_item in expected_config_params:\n assert_true(expected_config_item in config_param_keys)", "def test_class_method(self):\n self.assertEqual(pyperry.Base.add_processor.im_self.__name__, 'Base')", "def test_create_hyperflex_capability_info(self):\n pass", "def check_the_parameters(parameter, name, type_of_parameter, min_value=None, max_value=None):\n # check the type\n if not isinstance(parameter, type_of_parameter):\n raise ValueError('%s must be of %s type. '\n '%s of type %s was passed.'\n % (name ,type_of_parameter,parameter, type(parameter)))\n # check the values\n if min_value:\n if parameter < min_value:\n raise ValueError('%s should be bigger than %s'\n % (name ,min_value))\n if max_value:\n if parameter > max_value:\n raise ValueError('%s should be smaller than %s'\n % (name ,max_value))", "def test_test_enum_parameters(self):\n pass", "def test_confirm_customization_details(self):\n pass", "def test_create_hyperflex_proxy_setting_policy(self):\n pass", "def check_params(self, model_params):\n return model_params", "def HasPerInstancePropertyProviders(self) -> bool:", "def has_parameter(self, name):\n for par in self.params:\n if par.name == name:\n return True\n return False", "def __init__(self, parameter_type):\n self.parameter_type = parameter_type", "def _setup_params(self,**params):\n ### a parameter might be passed in for one of the extra_pos;\n ### if a key in the params dict is not a *parameter* of this\n ### PO, then try it on the extra_pos\n for n,p in params.items():\n if n not in self.params():\n self.set_parameter_value(n,p)\n del params[n]\n\n Parameterized._setup_params(self,**params)", "def _check_allowed_values(self, parameters):\n for key, allowed_values in self.ALLOWED_VALUES:\n self.log([u\"Checking allowed values for parameter '%s'\", key])\n if key in parameters:\n value = parameters[key]\n if value not in allowed_values:\n self._failed(u\"Parameter '%s' has value '%s' which is not allowed.\" % (key, value))\n return\n self.log(u\"Passed\")", "def test_get_params():\n\n kwargs = {\n 'population_size': 500,\n 'generations': 1000,\n 'verbosity': 1\n }\n\n tpot_obj = TPOTClassifier(**kwargs)\n\n # Get default parameters of TPOT and merge with our specified parameters\n initializer = inspect.getargspec(TPOTBase.__init__)\n default_kwargs = dict(zip(initializer.args[1:], initializer.defaults))\n default_kwargs.update(kwargs)\n\n assert tpot_obj.get_params() == default_kwargs", "def __init__(self):\n self.params = arcpy.GetParameterInfo()", "def check_settings(self):\r\n pass", "def test_update_hyperflex_capability_info(self):\n pass", "def test_get_property_success(self):\r\n self.assertEqual(self.config.option1, 1337)", "def test_parameter_mode_invalid(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Should throw exceptions\n self.configuration.hgst_space_mode = ''\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)\n self.configuration.hgst_space_mode = 'Fred'\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)", "def applicable(self, input_parameter: str) -> bool:\n raise NotImplementedError()", "def _check_parameter(self, data):\n return self._pre_process_record(data) is not None", "def test_intent_classifier_set_params(self):\n pass", "def _check_parameter(self, h, i, j, v, integral=False,\n name=None, sym=None):\n if name is None:\n name = self.PARAMETER\n if sym is None:\n sym = self.SYMBOL\n return ASParameters._check_parameter(h, i, j, v, integral=integral,\n name=name, sym=sym)", "def _check_config(self):", "def test_init_custom_parameters():\n\n tpot_obj = TPOTClassifier(population_size=500, generations=1000,\n mutation_rate=0.05, crossover_rate=0.9,\n scoring='accuracy', num_cv_folds=10,\n verbosity=1, random_state=42,\n disable_update_check=True)\n\n assert tpot_obj.population_size == 500\n assert tpot_obj.generations == 1000\n assert tpot_obj.mutation_rate == 0.05\n assert tpot_obj.crossover_rate == 0.9\n assert tpot_obj.scoring_function == 'accuracy'\n assert tpot_obj.num_cv_folds == 10\n assert tpot_obj.max_time_mins is None\n assert tpot_obj.verbosity == 1\n assert tpot_obj._optimized_pipeline is None\n assert tpot_obj._fitted_pipeline is None\n assert not (tpot_obj._pset is None)\n assert not (tpot_obj._toolbox is None)", "def params_ok(): \n \n if parameters['details'].lower() in ['true', 'yes', '1']:\n parameters['details'] = True\n elif parameters['details'].lower() in ['false', 'no', '0']:\n parameters['details'] = False\n else:\n print 'unrecognized input for details = %s, so set details=False' % parameters['details']\n parameters['details'] = False\n\n if not parameters['db_tables']:\n parameters['db_tables'] = DB_TABLES\n\n # FIXME ideally, pre-check for tables on hosts here before diving in\n\n return True # params are OK; otherwise, we returned False above", "def test_optional_arg(self):\n obj = Base()\n self.assertEqual(obj.id, 1)", "def test_native(self):\n kwargs = dict(\n kind=POSITIONAL_ONLY,\n name='a',\n interface_name='b',\n default=None,\n type=int,\n )\n param = FParameter(**kwargs).native\n assert param.kind == kwargs['kind']\n assert param.name == kwargs['name']\n assert param.default == kwargs['default']\n assert param.annotation == kwargs['type']", "def isActiveFitParam(param):\n return isFitParam(param) and param.isActive()", "def __getitem__(self, name: str) -> object:\n return super(Parameter, self).__getitem__(name)", "def check_valid_params(cls, **user_params):\n # Check that the appropriate number of params are provided\n if not all(key in user_params for key in cls.param.keys()):\n raise ValueError(f\"Missing parameter! Expected {cls.param.keys()} but was given {user_params.keys()}\")\n\n # Check parameter units and values\n for (key, allowed_params), user_param in zip(cls.param.items(), user_params.values()):\n\n # If both have units, check that the user param value is valid. If valid, continue. Else, error\n if type(user_param) == Quantity and type(allowed_params) == Quantity:\n if get_physical_type(user_param.unit) != get_physical_type(allowed_params.unit):\n raise UnitTypeError(f\"Incorrect units {user_param.unit} provided for parameter {key}, \"\n f\"expected {allowed_params.unit}\")\n\n elif np.isin(user_param.to(allowed_params.unit).value, allowed_params.value):\n continue\n else:\n raise ValueError(f\"Invalid value '{user_param}' provided for parameter {key}, \"\n f\"allowed value(s): {allowed_params}\")\n\n # If one only one has units, then error\n elif (type(user_param) == Quantity) ^ (type(allowed_params) == Quantity):\n # User param has units, model param is unitless\n if type(user_param) == Quantity:\n raise ValueError(f\"Invalid units {user_param.unit} for parameter {key} provided, expected None\")\n else:\n raise ValueError(f\"Missing units for parameter {key}, expected {allowed_params.unit}\")\n\n # Check that unitless user param value is valid. If valid, continue. Else, Error\n elif user_param in allowed_params:\n continue\n else:\n raise ValueError(f\"Invalid value '{user_param}' provided for parameter {key}, \"\n f\"allowed value(s): {allowed_params}\")\n\n # Check Combinations (Logic lives inside model subclasses under model.isvalid_param_combo)\n if user_params not in cls.get_param_combinations():\n raise ValueError(\n f\"Invalid parameter combination. See {cls.__class__.__name__}.get_param_combinations() for a \"\n \"list of allowed parameter combinations.\")", "def test_ParameterManagerGenerator_concurrent():\n components_1 = {\n \"scale\": mock_component(),\n \"decay\": mock_component(),\n \"absorption\": mock_component(),\n }\n data_manager = mock_data_manager(components_1)\n\n pmg = ParameterManagerGenerator(\n [data_manager],\n apm_type=active_parameter_manager,\n target=ScalingTarget(),\n mode=\"concurrent\",\n )\n apms = pmg.parameter_managers()\n assert len(apms) == 1\n apm = apms[0]\n assert isinstance(apm, multi_active_parameter_manager)\n assert \"scale\" in apm.components_list\n assert \"decay\" in apm.components_list\n assert \"absorption\" in apm.components_list\n\n components_1 = {\n \"scale\": mock_component(),\n \"decay\": mock_component(),\n \"absorption\": mock_component(),\n }\n components_2 = {\"1\": mock_component(), \"2\": mock_component()}\n data_manager_1 = mock_data_manager(components_1)\n data_manager_2 = mock_data_manager(components_2)\n\n pmg = ParameterManagerGenerator(\n [data_manager_1, data_manager_2],\n apm_type=active_parameter_manager,\n target=ScalingTarget(),\n mode=\"concurrent\",\n )\n multi_apms = pmg.parameter_managers()\n assert len(multi_apms) == 1\n multi_apm = multi_apms[0]\n assert isinstance(multi_apm, multi_active_parameter_manager)\n for apm in multi_apm.apm_list:\n assert isinstance(apm, active_parameter_manager)\n assert \"scale\" in multi_apm.apm_list[0].components_list\n assert \"decay\" in multi_apm.apm_list[0].components_list\n assert \"absorption\" in multi_apm.apm_list[0].components_list\n assert \"1\" in multi_apm.apm_list[1].components_list\n assert \"2\" in multi_apm.apm_list[1].components_list\n\n # now try fixing a component\n data_manager.fixed_components = [\"absorption\"]\n pmg = ParameterManagerGenerator(\n [data_manager],\n apm_type=active_parameter_manager,\n target=ScalingTarget(),\n mode=\"concurrent\",\n )\n apms = pmg.parameter_managers()\n assert len(apms) == 1\n apm = apms[0]\n assert isinstance(apm, multi_active_parameter_manager)\n assert \"scale\" in apm.components_list\n assert \"decay\" in apm.components_list\n assert \"absorption\" not in apm.components_list", "def test_base_command(self):\n c = SeqPrep()\n # test base command\n self.assertEqual(c.BaseCommand,\n ''.join(['cd \"', os.getcwd(), '/\"; ', 'SeqPrep']))\n # test turning on parameter\n c.Parameters['-O'].on('15')\n self.assertEqual(c.BaseCommand,\\\n ''.join(['cd \"', os.getcwd(), '/\"; ', 'SeqPrep -O 15']))", "def checkParameters(self):\n EDVerbose.DEBUG(\"EDPluginExecGnomv0_1.checkParameters\")\n self.checkMandatoryParameters(self.dataInput, \"Data Input is None\")\n self.checkMandatoryParameters(self.dataInput.experimentalDataQ, \"Scattering vector values are missing\")\n self.checkMandatoryParameters(self.dataInput.experimentalDataValues, \"Experimental intensity values are missing\")" ]
[ "0.62300676", "0.5915647", "0.5863919", "0.56514627", "0.55497897", "0.5497675", "0.5491017", "0.54151815", "0.53953904", "0.53859353", "0.5378423", "0.5352581", "0.53417635", "0.53193915", "0.52967614", "0.5275244", "0.5273615", "0.5203125", "0.51854485", "0.51483536", "0.51482415", "0.5141941", "0.5091039", "0.5084706", "0.50822324", "0.50508124", "0.5050335", "0.5049498", "0.5049498", "0.5030209", "0.5024523", "0.5017761", "0.50158185", "0.50077456", "0.50064856", "0.500617", "0.50055397", "0.49999005", "0.49964696", "0.4995469", "0.49861452", "0.49822345", "0.497638", "0.4969149", "0.49662736", "0.49604124", "0.49431625", "0.4937649", "0.4923091", "0.49210533", "0.4920313", "0.49025053", "0.4900065", "0.48979038", "0.48930317", "0.4889462", "0.48856762", "0.48771286", "0.48757893", "0.48734096", "0.48715448", "0.48715076", "0.48460117", "0.48384282", "0.48234978", "0.48215434", "0.48129502", "0.48104393", "0.48056448", "0.4801569", "0.47978738", "0.47974113", "0.4793318", "0.4785243", "0.4784954", "0.4782561", "0.47807583", "0.4780355", "0.4774191", "0.47692633", "0.47669536", "0.47634295", "0.47631904", "0.4757973", "0.4757721", "0.47557092", "0.47537753", "0.4750269", "0.4749907", "0.47448984", "0.47401807", "0.47375923", "0.47370532", "0.47286132", "0.47286043", "0.47246918", "0.47230425", "0.4722743", "0.47226483", "0.47225237" ]
0.5788472
3
Test for the general multi_active_parameter_manage class.
def test_multi_apm(): components_1 = { "scale": mock_component(), "decay": mock_component(), "absorption": mock_component(), } components_2 = {"scale": mock_component(), "decay": mock_component()} multi_apm = multi_active_parameter_manager( ScalingTarget(), [components_1, components_2], [["scale", "decay"], ["scale"]], active_parameter_manager, ) # Test correct setup of apm_list attribute. for apm in multi_apm.apm_list: assert isinstance(apm, active_parameter_manager) assert len(multi_apm.apm_list) == 2 assert multi_apm.components_list == ["scale", "decay", "scale"] assert multi_apm.n_active_params == 3 assert multi_apm.apm_data[0] == {"start_idx": 0, "end_idx": 2} assert multi_apm.apm_data[1] == {"start_idx": 2, "end_idx": 3} # Test parameter selection. multi_apm.set_param_vals(flex.double([3.0, 2.5, 2.0])) assert multi_apm.get_param_vals() == flex.double([3.0, 2.5, 2.0]) assert multi_apm.select_parameters(0) == flex.double([3.0, 2.5]) assert multi_apm.select_parameters(1) == flex.double([2.0]) # Test setting parameter esds. multi_apm.set_param_esds(flex.double([0.1, 0.2, 0.3])) assert components_1["scale"].free_parameter_esds == flex.double([0.1]) assert components_1["decay"].free_parameter_esds == flex.double([0.2]) assert components_2["scale"].free_parameter_esds == flex.double([0.3]) # Test setting var_cov matrices for each component. var_cov = flex.double([1.0, 0.5, 0.5, 0.5, 2.0, 0.5, 0.5, 0.5, 3.0]) var_cov.reshape(flex.grid(3, 3)) multi_apm.calculate_model_state_uncertainties(var_cov) assert components_1["scale"].var_cov_matrix[0, 0] == 1.0 assert components_1["decay"].var_cov_matrix[0, 0] == 2.0 assert components_2["scale"].var_cov_matrix[0, 0] == 3.0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_scaling_active_parameter_manager():\n components_2 = {\"1\": mock_scaling_component(2), \"2\": mock_scaling_component(2)}\n scaling_apm = scaling_active_parameter_manager(components_2, [\"1\"])\n assert list(scaling_apm.constant_g_values[0]) == list(\n components_2[\"2\"].calculate_scales()\n )\n assert len(scaling_apm.constant_g_values) == 1\n assert scaling_apm.n_obs == [2]\n\n # Test that no constant_g_values if both components selected\n scaling_apm = scaling_active_parameter_manager(components_2, [\"1\", \"2\"])\n assert scaling_apm.constant_g_values is None\n\n # Check that one can't initialise with an unequal number of reflections,\n # either within the selection or overall.\n with pytest.raises(AssertionError):\n components_2 = {\"1\": mock_scaling_component(2), \"2\": mock_scaling_component(1)}\n scaling_apm = scaling_active_parameter_manager(components_2, [\"1\", \"2\"])\n with pytest.raises(AssertionError):\n components_2 = {\"1\": mock_scaling_component(2), \"2\": mock_scaling_component(1)}\n scaling_apm = scaling_active_parameter_manager(components_2, [\"1\"])\n\n data_manager = mock_data_manager(components_2)\n pmg = ScalingParameterManagerGenerator(\n [data_manager], target=ScalingTarget(), mode=\"concurrent\"\n )\n assert isinstance(pmg.apm_type, type(scaling_active_parameter_manager))", "def test_general_apm():\n components = {\n \"scale\": mock_component(),\n \"decay\": mock_component(),\n \"absorption\": mock_component(),\n }\n\n apm = active_parameter_manager(components, [\"scale\", \"decay\"])\n assert \"decay\" in apm.components_list\n assert \"scale\" in apm.components_list\n assert \"absorption\" not in apm.components_list\n assert apm.n_active_params == (\n components[\"scale\"].n_params + components[\"decay\"].n_params\n )\n n_cumul = 0\n for component in apm.components:\n assert apm.components[component][\"n_params\"] == components[component].n_params\n assert apm.components[component][\"start_idx\"] == n_cumul\n assert (\n apm.components[component][\"end_idx\"]\n == n_cumul + apm.components[component][\"n_params\"]\n )\n n_cumul += apm.components[component][\"n_params\"]\n\n apm.set_param_vals(flex.double([2.0, 1.5]))\n assert apm.get_param_vals() == flex.double([2.0, 1.5])\n # Test params were updated in components\n assert list(components[\"scale\"].free_parameters) == [2.0]\n assert list(components[\"decay\"].free_parameters) == [1.5]\n # Test selection of parameters\n decay_params = apm.select_parameters(\"decay\")\n assert len(decay_params) == 1\n assert decay_params[0] == 1.5\n\n # Test calculate model state uncertainties\n var_cov = flex.double([1.0, 0.5, 0.5, 2.0])\n var_cov.reshape(flex.grid(2, 2))\n apm.calculate_model_state_uncertainties(var_cov)\n assert components[\"scale\"].var_cov_matrix[0, 0] == 1.0\n assert components[\"decay\"].var_cov_matrix[0, 0] == 2.0\n\n # Test set param esds.\n apm.set_param_esds(flex.double([0.1, 0.2]))\n assert components[\"scale\"].free_parameter_esds == flex.double([0.1])\n assert components[\"decay\"].free_parameter_esds == flex.double([0.2])", "def test_options(self):\n for module in Parameters.__modules__:\n m = getattr(Parameters, module)\n if type(m) == AnyOf:\n for o in m.options:\n setattr(self.p, module, o)\n Parameters(1, **{module: o})", "def test_multi(self):\n self.assertEqual(6, foo.multi(2, 3))", "def test_class_callparams(self):\n\n @Configurable(\n conf=[\n Parameter('test0', value=True),\n Parameter('test1', value=False)\n ]\n )\n class Test(object):\n\n def __init__(self, test0=None):\n\n super(Test, self).__init__()\n\n self.test0 = test0\n\n test = Test()\n\n self.assertTrue(test.test0)\n self.assertFalse(test.test1)", "def test_test_group_parameters(self):\n pass", "def test_all_params(self):\n persistence_helper = PersistenceHelper(use_riak=True, is_sync=True)\n self.assertEqual(persistence_helper.use_riak, True)\n self.assertEqual(persistence_helper.is_sync, True)", "def test_overridable_parameter() -> None:\n param_dict = ParamClass.get_overridable_parameters()\n assert \"name\" in param_dict\n assert \"flag\" in param_dict\n assert \"not_flag\" in param_dict\n assert \"seed\" in param_dict\n assert \"number\" in param_dict\n assert \"integers\" in param_dict\n assert \"optional_int\" in param_dict\n assert \"optional_float\" in param_dict\n assert \"tuple1\" in param_dict\n assert \"int_tuple\" in param_dict\n assert \"enum\" in param_dict\n assert \"readonly\" not in param_dict\n assert \"_non_override\" not in param_dict\n assert \"constant\" not in param_dict", "def _validate_params(self, request_set, target_set=None, context=None):\n\n # Perform first-pass validation in Function.__init__():\n # - returns full set of params based on subclass paramClassDefaults\n super(Mechanism, self)._validate_params(request_set,target_set,context)\n\n params = target_set\n\n #region VALIDATE TIME SCALE\n try:\n param_value = params[TIME_SCALE]\n except KeyError:\n if COMMAND_LINE in context:\n pass\n else:\n self.timeScale = timeScaleSystemDefault\n else:\n if isinstance(param_value, TimeScale):\n self.timeScale = params[TIME_SCALE]\n else:\n if self.prefs.verbosePref:\n print(\"Value for {0} ({1}) param of {2} must be of type {3}; default will be used: {4}\".\n format(TIME_SCALE, param_value, self.name, type(TimeScale), timeScaleSystemDefault))\n #endregion\n\n #region VALIDATE INPUT STATE(S)\n\n # MODIFIED 6/10/16\n # FIX: SHOULD CHECK LENGTH OF INPUT_STATES PARAM (LIST OF NAMES OR SPECIFICATION DICT) AGAINST LENGTH OF\n # FIX: self.variable 2D ARRAY AND COMPARE variable SPECS, IF PROVIDED, WITH CORRESPONDING ELEMENTS OF\n # FIX: self.variable 2D ARRAY\n try:\n param_value = params[INPUT_STATES]\n\n except KeyError:\n if COMMAND_LINE in context:\n pass\n else:\n # INPUT_STATES not specified:\n # - set to None, so that it is set to default (self.variable) in instantiate_inputState\n # - if in VERBOSE mode, warn in instantiate_inputState, where default value is known\n params[INPUT_STATES] = None\n\n else:\n # INPUT_STATES is specified, so validate:\n # If it is a single item or a non-OrderedDict, place in a list (for use here and in instantiate_inputState)\n if not isinstance(param_value, (list, OrderedDict)):\n param_value = [param_value]\n # Validate each item in the list or OrderedDict\n # Note:\n # * number of inputStates is validated against length of the owner mechanism's execute method variable (EMV)\n # in instantiate_inputState, where an inputState is assigned to each item (value) of the EMV\n i = 0\n for key, item in param_value if isinstance(param_value, dict) else enumerate(param_value):\n from PsyNeuLink.Components.States.InputState import InputState\n # If not valid...\n if not ((isclass(item) and (issubclass(item, InputState) or # InputState class ref\n issubclass(item, Projection))) or # Project class ref\n isinstance(item, InputState) or # InputState object\n isinstance(item, dict) or # InputState specification dict\n isinstance(item, ParamValueProjection) or # ParamValueProjection tuple\n isinstance(item, str) or # Name (to be used as key in inputStates dict)\n iscompatible(item, **{kwCompatibilityNumeric: True})): # value\n # set to None, so it is set to default (self.variable) in instantiate_inputState\n param_value[key] = None\n if self.prefs.verbosePref:\n print(\"Item {0} of {1} param ({2}) in {3} is not a\"\n \" InputState, specification dict or value, nor a list of dict of them; \"\n \"variable ({4}) of execute method for {5} will be used\"\n \" to create a default outputState for {3}\".\n format(i,\n INPUT_STATES,\n param_value,\n self.__class__.__name__,\n self.variable,\n self.execute.__self__.name))\n i += 1\n params[INPUT_STATES] = param_value\n #endregion\n\n #region VALIDATE EXECUTE METHOD PARAMS\n try:\n function_param_specs = params[FUNCTION_PARAMS]\n except KeyError:\n if COMMAND_LINE in context:\n pass\n elif self.prefs.verbosePref:\n print(\"No params specified for {0}\".format(self.__class__.__name__))\n else:\n if not (isinstance(function_param_specs, dict)):\n raise MechanismError(\"{0} in {1} must be a dict of param specifications\".\n format(FUNCTION_PARAMS, self.__class__.__name__))\n # Validate params\n from PsyNeuLink.Components.States.ParameterState import ParameterState\n for param_name, param_value in function_param_specs.items():\n try:\n default_value = self.paramInstanceDefaults[FUNCTION_PARAMS][param_name]\n except KeyError:\n raise MechanismError(\"{0} not recognized as a param of execute method for {1}\".\n format(param_name, self.__class__.__name__))\n if not ((isclass(param_value) and\n (issubclass(param_value, ParameterState) or\n issubclass(param_value, Projection))) or\n isinstance(param_value, ParameterState) or\n isinstance(param_value, Projection) or\n isinstance(param_value, dict) or\n isinstance(param_value, ParamValueProjection) or\n iscompatible(param_value, default_value)):\n params[FUNCTION_PARAMS][param_name] = default_value\n if self.prefs.verbosePref:\n print(\"{0} param ({1}) for execute method {2} of {3} is not a ParameterState, \"\n \"projection, ParamValueProjection, or value; default value ({4}) will be used\".\n format(param_name,\n param_value,\n self.execute.__self__.componentName,\n self.__class__.__name__,\n default_value))\n #endregion\n # FIX: MAKE SURE OUTPUT OF EXECUTE FUNCTION / SELF.VALUE IS 2D ARRAY, WITH LENGTH == NUM OUTPUT STATES\n\n #region VALIDATE OUTPUT STATE(S)\n\n # FIX: MAKE SURE # OF OUTPUTS == LENGTH OF OUTPUT OF EXECUTE FUNCTION / SELF.VALUE\n try:\n param_value = params[OUTPUT_STATES]\n\n except KeyError:\n if COMMAND_LINE in context:\n pass\n else:\n # OUTPUT_STATES not specified:\n # - set to None, so that it is set to default (self.value) in instantiate_outputState\n # Notes:\n # * if in VERBOSE mode, warning will be issued in instantiate_outputState, where default value is known\n # * number of outputStates is validated against length of owner mechanism's execute method output (EMO)\n # in instantiate_outputState, where an outputState is assigned to each item (value) of the EMO\n params[OUTPUT_STATES] = None\n\n else:\n # OUTPUT_STATES is specified, so validate:\n # If it is a single item or a non-OrderedDict, place in a list (for use here and in instantiate_outputState)\n if not isinstance(param_value, (list, OrderedDict)):\n param_value = [param_value]\n # Validate each item in the list or OrderedDict\n i = 0\n for key, item in param_value if isinstance(param_value, dict) else enumerate(param_value):\n from PsyNeuLink.Components.States.OutputState import OutputState\n # If not valid...\n if not ((isclass(item) and issubclass(item, OutputState)) or # OutputState class ref\n isinstance(item, OutputState) or # OutputState object\n isinstance(item, dict) or # OutputState specification dict\n isinstance(item, str) or # Name (to be used as key in outputStates dict)\n iscompatible(item, **{kwCompatibilityNumeric: True})): # value\n # set to None, so it is set to default (self.value) in instantiate_outputState\n param_value[key] = None\n if self.prefs.verbosePref:\n print(\"Item {0} of {1} param ({2}) in {3} is not a\"\n \" OutputState, specification dict or value, nor a list of dict of them; \"\n \"output ({4}) of execute method for {5} will be used\"\n \" to create a default outputState for {3}\".\n format(i,\n OUTPUT_STATES,\n param_value,\n self.__class__.__name__,\n self.value,\n self.execute.__self__.name))\n i += 1\n params[OUTPUT_STATES] = param_value", "def test_configure_to_reconfigure_param(self):\n\n class ToConfigure(object):\n \"\"\"Class to configure.\"\"\"\n\n def __init__(self):\n super(ToConfigure, self).__init__()\n self.test = None\n\n target = ToConfigure()\n\n param = 'test'\n\n conf = configuration(category('TEST', Parameter(param, value=True)))\n\n self.configurable.configure(conf=conf, targets=[target])\n self.assertTrue(target.test)", "def check_params(self):\n raise NotImplementedError", "def test_multi(self):\n self.assertEqual(6, multi(2, 3))", "def test_ParameterManagerGenerator_concurrent():\n components_1 = {\n \"scale\": mock_component(),\n \"decay\": mock_component(),\n \"absorption\": mock_component(),\n }\n data_manager = mock_data_manager(components_1)\n\n pmg = ParameterManagerGenerator(\n [data_manager],\n apm_type=active_parameter_manager,\n target=ScalingTarget(),\n mode=\"concurrent\",\n )\n apms = pmg.parameter_managers()\n assert len(apms) == 1\n apm = apms[0]\n assert isinstance(apm, multi_active_parameter_manager)\n assert \"scale\" in apm.components_list\n assert \"decay\" in apm.components_list\n assert \"absorption\" in apm.components_list\n\n components_1 = {\n \"scale\": mock_component(),\n \"decay\": mock_component(),\n \"absorption\": mock_component(),\n }\n components_2 = {\"1\": mock_component(), \"2\": mock_component()}\n data_manager_1 = mock_data_manager(components_1)\n data_manager_2 = mock_data_manager(components_2)\n\n pmg = ParameterManagerGenerator(\n [data_manager_1, data_manager_2],\n apm_type=active_parameter_manager,\n target=ScalingTarget(),\n mode=\"concurrent\",\n )\n multi_apms = pmg.parameter_managers()\n assert len(multi_apms) == 1\n multi_apm = multi_apms[0]\n assert isinstance(multi_apm, multi_active_parameter_manager)\n for apm in multi_apm.apm_list:\n assert isinstance(apm, active_parameter_manager)\n assert \"scale\" in multi_apm.apm_list[0].components_list\n assert \"decay\" in multi_apm.apm_list[0].components_list\n assert \"absorption\" in multi_apm.apm_list[0].components_list\n assert \"1\" in multi_apm.apm_list[1].components_list\n assert \"2\" in multi_apm.apm_list[1].components_list\n\n # now try fixing a component\n data_manager.fixed_components = [\"absorption\"]\n pmg = ParameterManagerGenerator(\n [data_manager],\n apm_type=active_parameter_manager,\n target=ScalingTarget(),\n mode=\"concurrent\",\n )\n apms = pmg.parameter_managers()\n assert len(apms) == 1\n apm = apms[0]\n assert isinstance(apm, multi_active_parameter_manager)\n assert \"scale\" in apm.components_list\n assert \"decay\" in apm.components_list\n assert \"absorption\" not in apm.components_list", "def test_verify_set_multi(self):\n self._verify([self.applied_commands['setm']])", "def test_get_mt_settings(self):\n pass", "def test_set_params():\n\n tpot_obj = TPOTClassifier()\n assert tpot_obj.set_params() is tpot_obj", "def Check(self, parameters):", "def test_checkCustoms(self):\n self.failUnlessEqual(self.nice.opts['myflag'], \"PONY!\")\n self.failUnlessEqual(self.nice.opts['myparam'], \"Tofu WITH A PONY!\")", "def test_subsystems(self):\n pass", "def test_set_params_2():\n tpot_obj = TPOTClassifier(generations=2)\n tpot_obj.set_params(generations=3)\n\n assert tpot_obj.generations == 3", "def test_parameters(self):\n self.assert_initialize_driver()\n #reply = self.driver_client.cmd_dvr('get_resource', Parameter.ALL)\n #self.assert_driver_parameters(reply, verify_sample_interval=True)", "def test_provider(self):\n msg = 'Wrong number of processing algorithm loaded.'\n self.assertEqual(len(self.provider.alglist), 6, msg)\n\n msg = 'InaSAFE should be activated by default in Processing.'\n self.assertEqual(self.provider.activate, True, msg)\n\n msg = 'Wrong processing provide.'\n for algorithm in self.provider.alglist:\n self.assertEqual(algorithm.provider, self.provider, msg)", "def test_create_hyperflex_feature_limit_internal(self):\n pass", "def test_intent_classifier_set_params(self):\n pass", "def supported_parameters(cls):\n raise NotImplementedError()", "def define_parameters(self):", "def test_direct_access_params(self):\n self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 10)\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 15)\n # use real driver sets here, the direct poke of the param dict is just\n # a test-with-base-class thing\n self.driver._protocol._param_dict.update(\"bar=20\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 20)\n\n # pretend to go into direct access mode,\n running_config = self.driver._protocol.get_cached_config()\n # make some changes to both, (foo to 100, bar to 200)\n self.driver._protocol._param_dict.update(\"foo=100\")\n self.driver._protocol._param_dict.update(\"bar=200\")\n # its like we came out of DA mode\n self.driver.restore_direct_access_params(running_config)\n\n # confirm that the default values were set back appropriately.\n self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 10)\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 200)", "def test_change_param(self):\n test_adc = ads1115_single(assume_defaults)\n\n # Check to make sure an assertion is thrown if a position beyond 3 is\n # chosen.\n try:\n test_adc.change_param(4, 0)\n # If the assertion error hasn't been thrown, fail the test.\n self.fail()\n except AssertionError:\n # An assertion error is expected.\n pass\n\n # Reset the test_adc to make sure it's a clean slate after the expected\n # failure.\n test_adc = ads1115_single(assume_defaults)\n\n # Check to make sure assertion errors are still thrown for invalid\n # parameters. Only one check is done because test_device_creation\n # has already walked the check_params internal function to make sure\n # that all invalid parametes are failed.\n try:\n # Try changing address to an invalid value.\n test_adc.change_param(2, 0)\n # If the assertion error hasn't been thrown, fail the test.\n except AssertionError:\n # An assertion error is expected.\n pass\n\n # Reset the test_adc to make sure it's a clean slate after the expected\n # failure.\n test_adc = ads1115_single(assume_defaults)\n\n # Check to make sure that all parameters can be changed when the change\n # is valid.\n\n # Set channel to 3.\n test_adc.change_param(0, 3)\n self.assertEqual(test_adc.parameters[0], 3)\n\n # Set gain to 4.\n test_adc.change_param(1, 4)\n self.assertEqual(test_adc.parameters[1], 4)\n\n # Set address to 0x49.\n test_adc.change_param(2, 0x49)\n self.assertEqual(test_adc.parameters[2], 0x49)\n\n # Set busnum to 0.\n try:\n test_adc.change_param(3, 0)\n # If the 0th I2C bus exists, then assert that the parameter has\n # changed.\n self.assertEqual(test_adc.parameters[3], 0)\n except IOError:\n # This is just because the current system does not have a 0th I2C\n # bus.\n pass", "def test_act_on_settings(self):\n pass # TODO(tlarsen)", "def test_act_on_settings(self):\n pass # TODO(tlarsen)", "def test_set_virtualization_realm_active(self):\n pass", "def test_good_custom_params(self):\r\n self.xmodule.custom_parameters = ['test_custom_params=test_custom_param_value']\r\n self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', 'test_client_secret'))\r\n self.xmodule.oauth_params = Mock()\r\n self.xmodule.get_input_fields()\r\n self.xmodule.oauth_params.assert_called_with(\r\n {u'custom_test_custom_params': u'test_custom_param_value'},\r\n 'test_client_key', 'test_client_secret'\r\n )", "def test_can_process(self):\n self.assertTrue(self.adapter.can_process(''))", "def _check_params(self):\n pass", "def test_get_property_success(self):\r\n self.assertEqual(self.config.option1, 1337)", "def _check_parameters_support(self, parameters=()):\n for parameter in parameters:\n assert parameter in self._supported, \"Estimation %s is not implemented yet\" % parameter", "def test_confirm_customization_details(self):\n pass", "def test_good_custom_params(self):\n self.xmodule.custom_parameters = ['test_custom_params=test_custom_param_value']\n self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', 'test_client_secret'))\n self.xmodule.oauth_params = Mock()\n self.xmodule.get_input_fields()\n self.xmodule.oauth_params.assert_called_with(\n {'custom_test_custom_params': 'test_custom_param_value'},\n 'test_client_key', 'test_client_secret'\n )", "def parse_params(self, ngpu=1, **kwargs):\n\n return_status = super(MadryEtAlMultiGPU, self).parse_params(**kwargs)\n self.ngpu = ngpu\n\n return return_status", "def test_change_param(self):\n test_adc = ads1115_differential(assume_defaults)\n\n # Check to make sure an assertion is thrown if a position beyond 3 is\n # chosen.\n try:\n test_adc.change_param(4, 0)\n # If the assertion error hasn't been thrown, fail the test.\n self.fail()\n except AssertionError:\n # An assertion error is expected.\n pass\n\n # Reset the test_adc to make sure it's a clean slate after the expected\n # failure.\n test_adc = ads1115_single(assume_defaults)\n\n # Check to make sure assertion errors are still thrown for invalid\n # parameters. Only one check is done because test_device_creation\n # has already walked the check_params internal function to make sure\n # that all invalid parametes are failed.\n try:\n # Try changing address to an invalid value.\n test_adc.change_param(2, 0)\n # If the assertion error hasn't been thrown, fail the test.\n except AssertionError:\n # An assertion error is expected.\n pass\n\n # Reset the test_adc to make sure it's a clean slate after the expected\n # failure.\n test_adc = ads1115_single(assume_defaults)\n\n # Check to make sure that all parameters can be changed when the change\n # is valid.\n\n # Set channel to 3.\n test_adc.change_param(0, 3)\n self.assertEqual(test_adc.parameters[0], 3)\n\n # Set gain to 4.\n test_adc.change_param(1, 4)\n self.assertEqual(test_adc.parameters[1], 4)\n\n # Set address to 0x49.\n test_adc.change_param(2, 0x49)\n self.assertEqual(test_adc.parameters[2], 0x49)\n\n # Set busnum to 0.\n try:\n test_adc.change_param(3, 0)\n # If the 0th I2C bus exists, then assert that the parameter has\n # changed.\n self.assertEqual(test_adc.parameters[3], 0)\n except IOError:\n # This is just because the current system does not have a 0th I2C\n # bus.\n pass", "def test_checkParameters(self):\n self.failUnlessEqual(self.nice.opts['long'], \"Alpha\")\n self.failUnlessEqual(self.nice.opts['another'], \"Beta\")\n self.failUnlessEqual(self.nice.opts['longonly'], \"noshort\")\n self.failUnlessEqual(self.nice.opts['shortless'], \"Gamma\")", "def check_parameters():\r\n for par in PARAM:\r\n if isinstance(par, ExperimentFrame):\r\n EXP.change_variable(**par())\r\n else:\r\n EXP.change_variable(**par)", "def test_display_all_credential(self):\n self.assertEqual(Credential.display_credential(),Credential.credential_list)", "def test_test_enum_parameters(self):\n pass", "def test_tooManyModeParameters(self):\n self._sendModeChange(\"+s\", \"wrong\")\n self._checkModeChange([])\n errors = self.flushLoggedErrors(irc.IRCBadModes)\n self.assertEqual(len(errors), 1)\n self.assertSubstring(\"Too many parameters\", errors[0].getErrorMessage())", "def test_update_hyperflex_feature_limit_internal(self):\n pass", "def test_ptype(self):\n\n @Configurable(\n conf=[\n Parameter('test', ptype=int, svalue='1'),\n Parameter('ex', svalue='2', ptype=int)\n ]\n )\n class Test(object):\n\n def __init__(self, test=None, *args, **kwargs):\n\n super(Test, self).__init__(*args, **kwargs)\n\n self.testy = test\n\n test = Test()\n\n self.assertEqual(test.testy, 1)\n self.assertFalse(hasattr(test, 'test'))\n self.assertEqual(test.ex, 2)\n\n applyconfiguration(\n targets=[test], conf=[\n Parameter('test', svalue='2'),\n Parameter('ex', svalue='3')\n ]\n )\n\n self.assertEqual(test.testy, 1)\n self.assertEqual(test.test, 2)\n self.assertEqual(test.ex, 3)\n\n Configurable.get_annotations(test)[0].applyconfiguration(\n targets=[test], conf=[\n Parameter('test', svalue='3'),\n Parameter('ex', svalue='4', ptype=bool)\n ]\n )\n\n self.assertEqual(test.testy, 1)\n self.assertEqual(test.test, 3)\n self.assertTrue(test.ex)", "def make_multiinstantiate(self, special_properties, name, parameters):\n PARAM_SUBSCRIPT = \"_p\"\n self._model_namespace[\"ct_populationname\"] = name+\"Multi\"\n multi_ct = lems.ComponentType(self._model_namespace[\"ct_populationname\"], extends=BASE_POPULATION)\n structure = lems.Structure()\n multi_ins = lems.MultiInstantiate(component_type=name,\n number=\"N\")\n param_dict = {}\n # number of neruons\n multi_ct.add(lems.Parameter(name=\"N\", dimension=\"none\"))\n # other parameters\n for sp in special_properties:\n if special_properties[sp] is None:\n multi_ct.add(lems.Parameter(name=sp+PARAM_SUBSCRIPT, dimension=self._all_params_unit[sp]))\n multi_ins.add(lems.Assign(property=sp, value=sp+PARAM_SUBSCRIPT))\n param_dict[sp] = parameters[sp]\n else:\n # multi_ct.add(lems.Parameter(name=sp, dimension=self._all_params_unit[sp]))\n # check if there are some units in equations\n equation = special_properties[sp]\n # add spaces around brackets to prevent mismatching\n equation = re.sub(\"\\(\", \" ( \", equation)\n equation = re.sub(\"\\)\", \" ) \", equation)\n for i in get_identifiers(equation):\n # iterator is a special case\n if i == \"i\":\n regexp_noletter = \"[^a-zA-Z0-9]\"\n equation = re.sub(\"{re}i{re}\".format(re=regexp_noletter),\n \" {} \".format(INDEX), equation)\n # here it's assumed that we don't use Netwton in neuron models\n elif i in name_to_unit and i != \"N\":\n const_i = i+'const'\n multi_ct.add(lems.Constant(name=const_i, symbol=const_i,\n dimension=self._all_params_unit[sp], value=\"1\"+i))\n equation = re.sub(i, const_i, equation)\n multi_ins.add(lems.Assign(property=sp, value=equation))\n structure.add(multi_ins)\n multi_ct.structure = structure\n self._model.add(multi_ct)\n param_dict = dict([(k+\"_p\", v) for k, v in param_dict.items()])\n param_dict[\"N\"] = self._nr_of_neurons\n self._model_namespace[\"populationname\"] = self._model_namespace[\"ct_populationname\"] + \"pop\"\n self._model_namespace[\"networkname\"] = self._model_namespace[\"ct_populationname\"] + \"Net\"\n self.add_population(self._model_namespace[\"networkname\"],\n self._model_namespace[\"populationname\"],\n self._model_namespace[\"ct_populationname\"],\n **param_dict)", "def test_create_hyperflex_feature_limit_external(self):\n pass", "def test_public_manager_call(self):\n\n # GIVEN public access method to model's manager\n\n # WHEN fetching data listing using unsupported call method\n response = self.api.GET(self.app_label, self.model_name2, params={'call': 'call2'})\n\n # THEN it should fail\n self.assertTrue(response.error)\n\n # -----\n\n # WHEN fetching data listing using supported call method\n response = self.api.GET(self.app_label, self.model_name2, params={'call': 'call'})\n\n # THEN it should succeed\n self.assertTrue(response.success)", "def test_list_options(self):\n pass", "def testIMembraneUserManagement(self):\n from Products.membrane.interfaces import IMembraneUserManagement\n from Products.membrane.at.interfaces import IUserAuthentication\n \n user = IMembraneUserManagement(self.person);\n auth = IUserAuthentication(self.person);\n \n #test setting password directly, verify that verifyCredentials works as expected\n fsd_tool = getToolByName(self.portal, TOOLNAME)\n self.person.setPassword('secret1')\n if fsd_tool.getUseInternalPassword():\n self.failUnless(auth.verifyCredentials({'login':'abc123','password':'secret1'}), \"failed to verify correct login and password, setting password directly\")\n else:\n self.failIf(auth.verifyCredentials({'login':'abc123','password':'secret1'}), \"internal password not used, method should return none, setting password directly. Value returned: %s\" % returnval)\n \n # now set password using the userChanger method and verify that it worked\n user.doChangeUser('abc123', 'secret2')\n fsd_tool = getToolByName(self.portal, TOOLNAME)\n if fsd_tool.getUseInternalPassword():\n self.failUnless(auth.verifyCredentials({'login':'abc123','password':'secret2'}), \"failed to verify correct login and password, testing doChangeUser()\")\n else:\n self.failIf(auth.verifyCredentials({'login':'abc123','password':'secret2'}), \"internal password not used, method should return none, testing doChangeUser(). Value returned: %s\" % returnval)\n \n # set password and some other value with doChangeUser, using keywords\n self.failIf(self.person.getEmail(), \"email already set, and it shouldn't be: %s\" % self.person.getEmail())\n user.doChangeUser('abc123','secret', email='[email protected]')\n self.failUnlessEqual(self.person.getEmail(), '[email protected]', msg=\"failed to update email via doChangeUser(): %s\" % self.person.getEmail())\n \n # now try to delete the user\n self.failUnless(hasattr(self.directory,'abc123'), \"directory does not have person\")\n user.doDeleteUser('abc123')\n self.failIf(hasattr(self.directory,'abc123'), \"directory still contains person\")\n \n # we should not be able to log in as this person anymore\n self.logout()\n try:\n self.login('abc123')\n except AttributeError:\n pass\n else:\n self.fail(\"still able to login: %s\" % self.portal.portal_membership.getAuthenticatedMember().id)", "def test_parameters(self):\n # Try to create a machine without an image.\n status = self.proxy.server.create(PROVIDER_ID)\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Try to create a machine without a flavor.\n status = self.proxy.server.create(PROVIDER_ID, IMAGE)\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Try to create a machine with wrong image format.\n status = self.proxy.server.create(\n PROVIDER_ID, [\"name=image\", \"size\"], \"flavor\"\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Try to create a machine with wrong flavor format.\n status = self.proxy.server.create(\n PROVIDER_ID, [\"name=image\", \"size=20\"], \"flavor\"\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Try to create a machine with wrong machine_numbers.\n status = self.proxy.server.create(\n PROVIDER_ID, [\"name=image\", \"size=20\"], [\"flavor=flavor\"], -1\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Try to create a machine with wrong userdata.\n status = self.proxy.server.create(\n PROVIDER_ID, [\"name=image\", \"size=20\"], [\"name=flavor\"], 1,\n \"availability_zone\", \"key_name\", \"security_group\",\n \"private_network\", \"public_network\", \"userdata\"\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Try to create a machine with wrong scheduler_hints.\n status = self.proxy.server.create(\n PROVIDER_ID, [\"name=image\", \"size=20\"], [\"name=flavor\"], 1,\n \"availability_zone\", \"key_name\", \"security_group\",\n \"private_network\", \"public_network\", \"setup.py\", \"swap\",\n \"scheduler_hints\"\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Try to create a machine with wrong meta.\n status = self.proxy.server.create(\n PROVIDER_ID, [\"name=image\", \"size=20\"], [\"name=flavor\"], 1,\n \"availability_zone\", \"key_name\", \"security_group\",\n \"private_network\", \"public_network\", \"setup.py\", \"swap\",\n [\"name=scheduler_hints\"], [\"meta\"]\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Try to create a machine with reserved meta.\n status = self.proxy.server.create(\n PROVIDER_ID, [\"name=image\", \"size=20\"], [\"name=flavor\"], 1,\n \"availability_zone\", \"key_name\", \"security_group\",\n \"private_network\", \"public_network\", \"setup.py\", \"swap\",\n [\"name=scheduler_hints\"], [\"mysql-fabric=True\"]\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Create a machine.\n status = self.proxy.server.create(\n PROVIDER_ID, [\"name=image\", \"size=20\"], [\"name=flavor\"], 1,\n \"availability_zone\", \"key_name\", \"security_group\",\n \"private_network\", \"public_network\", \"setup.py\", \"swap\",\n [\"name=scheduler_hints\"], [\"name=meta\"]\n )\n self.check_xmlrpc_command_result(status)\n\n # TODO: Test other parameters that were included with database.", "def test_parameter_read(request):\n print(\"\\n--Starting:\", request.node.name)\n\n params = Parameters()\n print(params.__dict__)\n ## todo write an assert that actually tests something", "def test_base_hyper_parameters_reg(self):\n hyper_parameter_set = modelgen.generate_base_hyper_parameter_set()\n assert 'regularization_rate' in hyper_parameter_set.keys()", "def test_set_project_limits(self):\n pass", "def __init__(self, params={}, verbosity=0, testing_level=1, testing_verbosity=1):\r\n self.verbosity = verbosity\r\n self.testing_unit = UnitTests.ParticleSwarmUnitTests(testing_level=testing_level, verbosity=testing_verbosity)\r\n\r\n for key, val in params.items():\r\n self.set(key, val) # invoke set so that all continuous checking for changed parameters happens only once\r\n # place\r", "def test_getUserModeParams(self):\n add, remove = map(sorted, self.client.getUserModeParams())\n self.assertEqual(add, [])\n self.assertEqual(remove, [])", "def test_get_measure_parameters(self):\n pass", "def test_all_params(self):\n broker = FakeBroker()\n worker_helper = WorkerHelper(\"my_connector\", broker)\n self.assertEqual(worker_helper._connector_name, \"my_connector\")\n self.assertEqual(worker_helper.broker, broker)", "def test_user_can_change_inactive(self):\n self.assertTrue(self.story.user_can_change(self.user1))\n self.user1.is_active = False \n self.assertFalse(self.story.user_can_change(self.user1))", "def _load_parameter(self):", "def test_parameters(self):\n class Test(pyperry.Base): pass\n Test.add_processor('read', self.Processor)\n Test.add_processor('read', self.Processor, { 'foo': 'bar' })\n Test.add_processor('read', self.Processor, foo='bar')", "def test_update_hyperflex_feature_limit_external(self):\n pass", "def test_submithint_moderate(self):\r\n mock_module = CHModuleFactory.create(moderate='True')\r\n json_in = {'answer': '29.0', 'hint': 'This is a new hint.'}\r\n mock_module.submit_hint(json_in)\r\n self.assertTrue('29.0' not in mock_module.hints)\r\n self.assertTrue('29.0' in mock_module.mod_queue)", "def test_configure_without_inheritance(self):\n\n @Configurable(conf=category('TEST', Parameter('test', value=True)))\n class BaseTest(object):\n \"\"\"base Class to configure.\"\"\"\n\n class Test(BaseTest):\n \"\"\"Class to configure.\"\"\"\n\n targets = Test()\n\n self.assertTrue(targets.test)", "def test_save_multiple_credential(self):\n self.new_credential.save_credential()\n test_credential = Credential(\"winnie\",\"test\",\"login\",\"winnie\")\n test_credential.save_credential()\n self.assertEqual(len(Credential.credential_list),2)", "def test_default(self):\r\n self.assertEqual(self.option.default, 1234)", "def test_households_in_admin_unit(self):", "def test_checkLinkoStructure(self):\n self.performTestForParams()", "def _check_helper(self, value, raise_exceptions: bool = True) -> bool:\n return super(ParameterMixin, self)._check_helper(\n value, raise_exceptions=raise_exceptions\n )", "def test_get_context_parameter(params, expected):\n assert get_context_parameter(params) == expected", "def test_getboolean(self):\n self.assertEqual(self.config.getboolean('advanced','bool'),True)", "def test_create_hyperflex_proxy_setting_policy(self):\n pass", "def test_register_dynamic_plugin_manager1(self):\n pass", "def verify_common(self, tool_name, tool_instance):\n super().verify_common(tool_name, tool_instance)\n pos_args, kw_args = tool_instance.tool.call_args\n\n self.assertEqual(kw_args[\"options\"][\"oa3m\"], self.OUT_A3M_FILE)\n self.assertEqual(kw_args[\"options\"][\"output\"], self.OUT_REPORT)\n self.assertEqual(kw_args[\"options\"][\"database\"], self.DATABASE)", "def test_change_provisioned_throughput_usual_case():", "def test_framework_selections_post(self):\n pass", "def test_update_hyperflex_capability_info(self):\n pass", "def test_get_param_list(self):\n model = substitution_model.TimeReversibleNucleotide()\n self.assertEqual(model.get_param_list(), [])\n\n model = substitution_model.TimeReversibleNucleotide(\n predicates=[\"beta:transition\"]\n )\n self.assertEqual(model.get_param_list(), [\"beta\"])", "def check_valid_params(cls, **user_params):\n # Check that the appropriate number of params are provided\n if not all(key in user_params for key in cls.param.keys()):\n raise ValueError(f\"Missing parameter! Expected {cls.param.keys()} but was given {user_params.keys()}\")\n\n # Check parameter units and values\n for (key, allowed_params), user_param in zip(cls.param.items(), user_params.values()):\n\n # If both have units, check that the user param value is valid. If valid, continue. Else, error\n if type(user_param) == Quantity and type(allowed_params) == Quantity:\n if get_physical_type(user_param.unit) != get_physical_type(allowed_params.unit):\n raise UnitTypeError(f\"Incorrect units {user_param.unit} provided for parameter {key}, \"\n f\"expected {allowed_params.unit}\")\n\n elif np.isin(user_param.to(allowed_params.unit).value, allowed_params.value):\n continue\n else:\n raise ValueError(f\"Invalid value '{user_param}' provided for parameter {key}, \"\n f\"allowed value(s): {allowed_params}\")\n\n # If one only one has units, then error\n elif (type(user_param) == Quantity) ^ (type(allowed_params) == Quantity):\n # User param has units, model param is unitless\n if type(user_param) == Quantity:\n raise ValueError(f\"Invalid units {user_param.unit} for parameter {key} provided, expected None\")\n else:\n raise ValueError(f\"Missing units for parameter {key}, expected {allowed_params.unit}\")\n\n # Check that unitless user param value is valid. If valid, continue. Else, Error\n elif user_param in allowed_params:\n continue\n else:\n raise ValueError(f\"Invalid value '{user_param}' provided for parameter {key}, \"\n f\"allowed value(s): {allowed_params}\")\n\n # Check Combinations (Logic lives inside model subclasses under model.isvalid_param_combo)\n if user_params not in cls.get_param_combinations():\n raise ValueError(\n f\"Invalid parameter combination. See {cls.__class__.__name__}.get_param_combinations() for a \"\n \"list of allowed parameter combinations.\")", "def test_many_values(self):\n write this test!", "def check(self):\n if 'MISFIT' not in PAR:\n setattr(PAR, 'MISFIT', 'Waveform')\n\n if 'CHANNELS' not in PAR:\n raise ParameterError(PAR, 'CHANNELS')\n\n if 'READER' not in PAR:\n raise ParameterError(PAR, 'READER')\n\n if 'WRITER' not in PAR:\n setattr(PAR, 'WRITER', PAR.READER)\n\n if 'NORMALIZE' not in PAR:\n setattr(PAR, 'NORMALIZE', True)\n\n # mute settings\n if 'MUTE' not in PAR:\n setattr(PAR, 'MUTE', False)\n\n if 'MUTESLOPE' not in PAR:\n setattr(PAR, 'MUTESLOPE', 0.)\n\n if 'MUTECONST' not in PAR:\n setattr(PAR, 'MUTECONST', 0.)\n\n # filter settings\n if 'BANDPASS' not in PAR:\n setattr(PAR, 'BANDPASS', False)\n\n if 'FREQLO' not in PAR:\n setattr(PAR, 'FREQLO', 0.)\n\n if 'FREQHI' not in PAR:\n setattr(PAR, 'FREQHI', 0.)\n\n # assertions\n if PAR.READER not in dir(readers):\n print msg.ReaderError\n raise ParameterError()\n\n if PAR.WRITER not in dir(writers):\n print msg.WriterError\n raise ParameterError()", "def default_capabilities(self):", "def test_function_callparams(self):\n\n @Configurable(conf=category('', Parameter('test', value=True)))\n def twist(test=None):\n return test\n\n value = twist()\n\n self.assertTrue(value)\n\n value = twist(False)\n\n self.assertFalse(value)\n\n Configurable.get_annotations(twist)[0].conf['']['test'].value = 1\n\n value = twist()\n\n self.assertEqual(value, 1)", "def test_compatible_data(cps_subsample, puf_subsample,\n allparams, reform_xx,\n tc_objs, allparams_batch):\n # pylint: disable=too-many-arguments,too-many-locals\n # pylint: disable=too-many-statements,too-many-branches\n\n # Check NPARAMS value\n assert NPARAMS == len(allparams)\n\n # Get taxcalc objects from tc_objs fixture\n rec_xx, c_xx, puftest = tc_objs\n\n # These parameters are exempt because they are not active under\n # current law and activating them would deactivate other parameters,\n # or if it is difficult to devise a test for them.\n exempt_from_testing = [\n 'CG_ec', 'CG_reinvest_ec_rt',\n 'II_prt', 'ID_prt', 'ID_crt',\n 'CR_SchR_hc', 'ACTC_ChildNum'\n ]\n\n # Loop through the parameters in allparams_batch\n errmsg = 'ERROR: {} {}\\n'\n errors = ''\n for pname in allparams_batch:\n param = allparams_batch[pname]\n max_listed = param['valid_values']['max']\n # handle links to other params or self\n if isinstance(max_listed, str):\n if isinstance(allparams[max_listed]['value'][0], list):\n max_val = allparams[max_listed]['value'][0]\n else:\n max_val = float(allparams[max_listed]['value'][0])\n else:\n if isinstance(param['value'][0], list):\n max_val = [max_listed] * len(param['value'][0])\n else:\n max_val = max_listed\n min_listed = param['valid_values']['min']\n if isinstance(min_listed, str):\n if isinstance(allparams[min_listed]['value'][0], list):\n min_val = allparams[min_listed]['value'][0]\n else:\n min_val = float(allparams[min_listed]['value'][0])\n else:\n if isinstance(param['value'][0], list):\n min_val = [min_listed] * len(param['value'][0])\n else:\n min_val = min_listed\n # create reform dictionaries\n max_reform = copy.deepcopy(reform_xx)\n min_reform = copy.deepcopy(reform_xx)\n max_reform[XX_YEAR][str(pname)] = [max_val]\n min_reform[XX_YEAR][str(pname)] = [min_val]\n # assess whether max reform changes results\n if puftest:\n rec_yy = Records(data=puf_subsample)\n else:\n rec_yy = Records.cps_constructor(data=cps_subsample)\n p_yy = Policy()\n p_yy.implement_reform(max_reform, raise_errors=False)\n c_yy = Calculator(policy=p_yy, records=rec_yy, verbose=False)\n c_yy.advance_to_year(TEST_YEAR)\n c_yy.calc_all()\n if pname.startswith('BEN') and pname.endswith('_repeal'):\n max_reform_change = (\n c_yy.weighted_total('benefit_cost_total') -\n c_xx.weighted_total('benefit_cost_total')\n )\n else:\n max_reform_change = (\n c_yy.weighted_total('combined') -\n c_xx.weighted_total('combined')\n )\n min_reform_change = 0\n # assess whether min reform changes results, if max reform did not\n if max_reform_change == 0:\n p_yy = Policy()\n p_yy.implement_reform(min_reform, raise_errors=False)\n c_yy = Calculator(policy=p_yy, records=rec_xx)\n c_yy.advance_to_year(TEST_YEAR)\n c_yy.calc_all()\n if pname.startswith('BEN') and pname.endswith('_repeal'):\n min_reform_change = (\n c_yy.weighted_total('benefit_cost_total') -\n c_xx.weighted_total('benefit_cost_total')\n )\n else:\n min_reform_change = (\n c_yy.weighted_total('combined') -\n c_xx.weighted_total('combined')\n )\n if min_reform_change == 0 and pname not in exempt_from_testing:\n if puftest:\n if param['compatible_data']['puf'] is True:\n errors += errmsg.format(pname, 'is not True for puf')\n else:\n if param['compatible_data']['cps'] is True:\n errors += errmsg.format(pname, 'is not True for cps')\n if max_reform_change != 0 or min_reform_change != 0:\n if puftest:\n if param['compatible_data']['puf'] is False:\n errors += errmsg.format(pname, 'is not False for puf')\n else:\n if param['compatible_data']['cps'] is False:\n errors += errmsg.format(pname, 'is not False for cps')\n # test failure if any errors\n if errors:\n print(errors)\n assert 'compatible_data' == 'invalid'", "def test_api_calls_parameters(self):\n quantum_program = self._get_quantum_program()\n\n # Invoke with hub, group and project parameters.\n quantum_program.set_api(QE_TOKEN, QE_URL, QE_HUB, QE_GROUP, QE_PROJECT)\n\n self.log.info(quantum_program.online_backends())\n self.log.info(quantum_program.get_backend_parameters(self.backend))\n self.log.info(quantum_program.get_backend_calibration(self.backend))", "def test_subscribability(self):\n manager = ISubscriptionManager(self.root.document)\n self.assertEqual(manager.is_subscribable(), False)\n self.assertEqual(manager.subscribability, ACQUIRE_SUBSCRIBABILITY)\n\n # You can enable or disable that setting\n manager.subscribability = SUBSCRIBABLE\n self.assertEqual(manager.is_subscribable(), True)\n self.assertEqual(manager.subscribability, SUBSCRIBABLE)\n\n manager.subscribability = NOT_SUBSCRIBABLE\n self.assertEqual(manager.is_subscribable(), False)\n self.assertEqual(manager.subscribability, NOT_SUBSCRIBABLE)\n\n # You can set the setting on the parent root\n manager_root = ISubscriptionManager(self.root)\n self.assertEqual(manager_root.is_subscribable(), False)\n self.assertEqual(manager_root.subscribability, NOT_SUBSCRIBABLE)\n\n # You can change the setting. Not to acquired.\n manager_root.subscribability = SUBSCRIBABLE\n self.assertEqual(manager_root.is_subscribable(), True)\n self.assertEqual(manager_root.subscribability, SUBSCRIBABLE)\n\n self.assertRaises(\n AssertionError,\n setattr, manager_root, 'subscribability', ACQUIRE_SUBSCRIBABILITY)\n self.assertEqual(manager_root.is_subscribable(), True)\n self.assertEqual(manager_root.subscribability, SUBSCRIBABLE)\n\n # The setting was disabled on the document, it is still is.\n # However if we set it to acquired it will be enabled (since root is)\n self.assertEqual(manager.is_subscribable(), False)\n self.assertEqual(manager.subscribability, NOT_SUBSCRIBABLE)\n\n manager.subscribability = ACQUIRE_SUBSCRIBABILITY\n self.assertEqual(manager.is_subscribable(), True)\n self.assertEqual(manager.subscribability, ACQUIRE_SUBSCRIBABILITY)", "def test_create_hyperflex_capability_info(self):\n pass", "def test_text_classifier_set_params(self):\n pass", "def test_add_control(perfectModelEnsemble_initialized_control):\n assert perfectModelEnsemble_initialized_control.get_control()", "def identify_parameters(model: torch.nn.Module,\n type_mapping: Dict[str, Sequence],\n check_param_exist: bool = True):\n for module in model.modules():\n for _name, _types in type_mapping.items():\n if any([isinstance(module, _type) for _type in _types]):\n for param in module.parameters():\n if check_param_exist:\n assert not hasattr(param, _name)\n setattr(param, _name, True)", "def check_params_set():\n critical = {'machineinfo' : MACHINEID, \n 'error_serverinfo' : ERROR_SERVER, \n 'serverinfo' : SERVER}\n for i, val in critical.iteritems():\n if not val:\n print \"ERROR: Set value for \\\"%s\\\" in baseconfig.cfg file first\\n\" % i\n sys.exit(1)", "def test_register_dynamic_plugin_manager(self):\n pass", "def test_default(self):\r\n self.assertEqual(self.option.default, 'testing')", "def test_component_set_successful(self):\n # We create an instance of the panel so we can check existing values\n panel = ComponentAdminPanel(self.env)\n\n # Check the environment initially contains the default values.\n self.assertItemsEqual(panel.get_component_list(), self.default['component'])\n\n # create the section, option, and values in configuration\n self.env.config.set('ticket-field-config', 'component',\n ','.join(self.new['component']))\n # create component_owner option\n self.env.config.set('ticket-field-config','component_owner','test')\n\n admin_command = TicketFieldConfigCommand(self.env)\n\n # run our plugin\n admin_command.set_fields_from_config()\n\n self.assertItemsEqual(panel.get_component_list(), self.new['component'])", "def test_pwmerchactive_update_1(self, _enable_pwmerchactive):\n admin_payway = [pw['name'] for pw in admin.get_model(model='payway', _filter='is_active', value=True) if pw['is_public'] is True][0]\n admin.set_pwmerchactive(merch_id=user1.merchant1.id, payway_id=admin.payway_id[admin_payway], is_active=False)\n admin.set_pwmerchactive(merch_id=user1.merchant2.id, payway_id=admin.payway_id[admin_payway], is_active=False)\n admin.params['pw'] = {'m_id': user1.merchant2.id, 'pw_id': admin.payway_id[admin_payway]}\n user1.pwmerchactive(method='update', params={'m_lid': user1.merchant1.lid, 'payway': admin_payway, 'is_active': True})\n assert user1.resp_pwmerchactive == {'is_active': True, 'merchant_id': user1.merchant1.id, 'payway': admin_payway}\n assert admin.payway_id[admin_payway] in [pw['payway_id'] for pw in\n admin.get_model(model='pwmerchactive', _filter='merchant_id', value=user1.merchant1.id)\n if pw['is_active'] is True]\n assert admin.payway_id[admin_payway] in [pw['payway_id'] for pw in\n admin.get_model(model='pwmerchactive', _filter='merchant_id', value=user1.merchant2.id)\n if pw['is_active'] is False]", "def param_set(params, param):\n if param in params:\n if params[param] is True:\n return True\n return False", "def _validate_params(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def test_takes_param_list_attributes(self):\n class Test(pyperry.Base):\n def _config(cls):\n cls.attributes('id', 'name', 'poop')\n\n self.assertEqual(Test.defined_attributes, set(['id', 'name', 'poop']))" ]
[ "0.5979494", "0.564233", "0.56316334", "0.56300414", "0.55950177", "0.55662066", "0.5561874", "0.5522249", "0.55102235", "0.5508482", "0.5448773", "0.54069316", "0.5395452", "0.53479075", "0.5345451", "0.532887", "0.53067946", "0.5266664", "0.5219508", "0.52126247", "0.5177413", "0.51731807", "0.5144085", "0.51240355", "0.5108058", "0.5104126", "0.5083353", "0.50732505", "0.5071742", "0.5071742", "0.505964", "0.505935", "0.5043362", "0.5040058", "0.5037666", "0.503352", "0.50145143", "0.5014463", "0.5001931", "0.4989203", "0.4986626", "0.49851522", "0.4963252", "0.49514446", "0.49503118", "0.49493837", "0.49274692", "0.4927007", "0.491686", "0.49139982", "0.4913915", "0.4905218", "0.4897244", "0.4894103", "0.48920316", "0.48894763", "0.4887205", "0.4886015", "0.48840928", "0.48778442", "0.48762482", "0.48748744", "0.48739564", "0.4873743", "0.48724687", "0.48724598", "0.4859145", "0.48495668", "0.48491734", "0.4838136", "0.48354697", "0.48346055", "0.48334914", "0.4830935", "0.4826474", "0.48150682", "0.4810328", "0.48042092", "0.47909564", "0.47873652", "0.47872663", "0.47849438", "0.4782693", "0.4780189", "0.4779344", "0.47781095", "0.47762507", "0.47758943", "0.477251", "0.47717386", "0.47651368", "0.4759648", "0.47589335", "0.47574466", "0.47571617", "0.47565186", "0.4752311", "0.47506952", "0.47482082", "0.47451106" ]
0.5946211
1
Test the apm factory for concurrent refinement.
def test_ParameterManagerGenerator_concurrent(): components_1 = { "scale": mock_component(), "decay": mock_component(), "absorption": mock_component(), } data_manager = mock_data_manager(components_1) pmg = ParameterManagerGenerator( [data_manager], apm_type=active_parameter_manager, target=ScalingTarget(), mode="concurrent", ) apms = pmg.parameter_managers() assert len(apms) == 1 apm = apms[0] assert isinstance(apm, multi_active_parameter_manager) assert "scale" in apm.components_list assert "decay" in apm.components_list assert "absorption" in apm.components_list components_1 = { "scale": mock_component(), "decay": mock_component(), "absorption": mock_component(), } components_2 = {"1": mock_component(), "2": mock_component()} data_manager_1 = mock_data_manager(components_1) data_manager_2 = mock_data_manager(components_2) pmg = ParameterManagerGenerator( [data_manager_1, data_manager_2], apm_type=active_parameter_manager, target=ScalingTarget(), mode="concurrent", ) multi_apms = pmg.parameter_managers() assert len(multi_apms) == 1 multi_apm = multi_apms[0] assert isinstance(multi_apm, multi_active_parameter_manager) for apm in multi_apm.apm_list: assert isinstance(apm, active_parameter_manager) assert "scale" in multi_apm.apm_list[0].components_list assert "decay" in multi_apm.apm_list[0].components_list assert "absorption" in multi_apm.apm_list[0].components_list assert "1" in multi_apm.apm_list[1].components_list assert "2" in multi_apm.apm_list[1].components_list # now try fixing a component data_manager.fixed_components = ["absorption"] pmg = ParameterManagerGenerator( [data_manager], apm_type=active_parameter_manager, target=ScalingTarget(), mode="concurrent", ) apms = pmg.parameter_managers() assert len(apms) == 1 apm = apms[0] assert isinstance(apm, multi_active_parameter_manager) assert "scale" in apm.components_list assert "decay" in apm.components_list assert "absorption" not in apm.components_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_multiple_pis_simultaneously_to_vpg_check_reallocation(self):\n proj_obj, fabric_obj, pr_objs = self._create_prerequisites(\n create_second_pr=True)\n test_id = self.id()\n VPG_CLASS = self._api_server.get_resource_class('virtual-port-group')\n org_process_ae_id = VPG_CLASS._process_ae_id\n\n class MockVpg(VPG_CLASS):\n org_process_ae_id = VPG_CLASS._process_ae_id\n HOLD_API = True\n @classmethod\n def mock_process_ae_id(cls, db_obj_dict, vpg_name, obj_dict=None):\n while cls.HOLD_API:\n print('sleeping for HOLD_API to clear for '\n 'args = %s' % obj_dict)\n gevent.sleep(0.5)\n return cls.org_process_ae_id(db_obj_dict, vpg_name, obj_dict)\n\n def process_ae_ids(x):\n return [int(i) for i in sorted(x) if i is not None]\n\n def get_zk_ae_ids(prs=None):\n prefix = os.path.join(\n self.__class__.__name__,\n 'id', 'aggregated-ethernet')\n zk_client = self._api_server._db_conn._zk_db._zk_client._zk_client\n if not prs:\n prs = [os.path.join(prefix, pr.name) for pr in pr_objs]\n else:\n if not isinstance(prs, list):\n prs = [prs]\n prs = [os.path.join(prefix, pr) for pr in prs]\n ae_ids = {}\n for pr in prs:\n pr_org = os.path.split(pr)[-1]\n ae_ids[pr_org] = zk_client.get_children(pr)\n return ae_ids\n\n pi_per_pr = 6\n pi_objs = {}\n pr1_pi_names = ['%s_pr1_pi%d' % (test_id, i) for\n i in range(1, pi_per_pr + 1)]\n pr2_pi_names = ['%s_pr2_pi%d' % (test_id, i) for\n i in range(1, pi_per_pr + 1)]\n pr1_pi_objs = self._create_pi_objects(pr_objs[0], pr1_pi_names)\n pr2_pi_objs = self._create_pi_objects(pr_objs[1], pr2_pi_names)\n pi_objs.update(pr1_pi_objs)\n pi_objs.update(pr2_pi_objs)\n\n # create a VPG\n vpg_count = 3\n vpg_names = ['vpg_%s_%s' % (test_id, i) for i in range(\n 1, vpg_count + 1)]\n vpg_objs = self._create_vpgs(fabric_obj, vpg_names)\n\n # record AE-IDs in ZK before creating any VPG\n ae_ids = [x for x in get_zk_ae_ids().values() if x]\n self.assertEqual(len(ae_ids), 0)\n\n def _attach_pi_simultaneously(vpg_obj, pi_uuids):\n # Attach PIs from PR1 to VPG-1\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n try:\n # mock _process_ae_id at VPG resource\n VPG_CLASS._process_ae_id = MockVpg.mock_process_ae_id\n MockVpg.HOLD_API = True\n for pi_uuid in pi_uuids:\n gevent.spawn(\n self.api.ref_update,\n \"virtual-port-group\",\n vpg_obj.uuid,\n \"physical-interface\",\n pi_uuid,\n None,\n \"ADD\",\n None)\n gevent.sleep(2)\n MockVpg.HOLD_API = False\n gevent.sleep(3)\n except gevent.timeout.Timeout:\n self.assertFalse(\n False,\n '%s failed unexpectedly' % VPG_CLASS._process_ae_id)\n finally:\n # reset mock to original\n VPG_CLASS._process_ae_id = org_process_ae_id\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_refs = vpg_obj.get_physical_interface_refs()\n return vpg_obj, pi_refs\n\n # Case 1\n # Attach 2 PIs from PR1 to VPG-1\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_uuids = [pi.uuid for pi in list(pr1_pi_objs.values())[0:2]]\n vpg_obj, pi_refs = _attach_pi_simultaneously(vpg_obj, pi_uuids)\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 2)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertEqual(len(list(vpg_ae_ids.values())), 2)\n self.assertEqual(len(set(vpg_ae_ids.values())), 1)\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [0, 0])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0])\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [])\n\n # Case 2\n # Attach 2 PIs from PR1 to VPG-2\n vpg_name = vpg_names[1]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_uuids = [pi.uuid for pi in list(pr1_pi_objs.values())[2:4]]\n vpg_obj, pi_refs = _attach_pi_simultaneously(vpg_obj, pi_uuids)\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 2)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertEqual(len(list(vpg_ae_ids.values())), 2)\n self.assertEqual(len(set(vpg_ae_ids.values())), 1)\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [1, 1])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 2)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0, 1])\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [])\n\n # Case 3\n # Deattach 1 PIs from PR1 to VPG-1\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_obj = list(pr1_pi_objs.values())[0]\n pi_obj = self._vnc_lib.physical_interface_read(id=pi_obj.uuid)\n vpg_obj.del_physical_interface(pi_obj)\n self.api.virtual_port_group_update(vpg_obj)\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_refs = vpg_obj.get_physical_interface_refs()\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 1)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertIsNone(list(vpg_ae_ids.values())[0])\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [1])\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [])\n\n # Case 4\n # Attach 2 PIs from PR1 to VPG-3\n vpg_name = vpg_names[2]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_uuids = [pi.uuid for pi in list(pr1_pi_objs.values())[4:6]]\n vpg_obj, pi_refs = _attach_pi_simultaneously(vpg_obj, pi_uuids)\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 2)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertEqual(len(list(vpg_ae_ids.values())), 2)\n self.assertEqual(len(set(vpg_ae_ids.values())), 1)\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [0, 0])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 2)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0, 1])\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [])\n\n # Case 5\n # Attach 1 PIs from PR1 to VPG-1\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_obj = list(pr1_pi_objs.values())[0]\n pi_obj = self._vnc_lib.physical_interface_read(id=pi_obj.uuid)\n vpg_obj.add_physical_interface(pi_obj)\n self._vnc_lib.virtual_port_group_update(vpg_obj)\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_refs = vpg_obj.get_physical_interface_refs()\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 2)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertEqual(len(list(vpg_ae_ids.values())), 2)\n self.assertEqual(len(set(vpg_ae_ids.values())), 1)\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [2, 2])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 3)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0, 1, 2])\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [])", "def test_multiple_factories(self, mocker):\n sdk_ready_flag = threading.Event()\n\n def _init(self, ready_flag, some, auth_api, streaming_enabled, telemetry_runtime_producer, telemetry_init_consumer, sse_url=None):\n self._ready_flag = ready_flag\n self._synchronizer = mocker.Mock(spec=Synchronizer)\n self._streaming_enabled = False\n self._telemetry_runtime_producer = telemetry_runtime_producer\n self._telemetry_init_consumer = telemetry_init_consumer\n mocker.patch('splitio.sync.manager.Manager.__init__', new=_init)\n\n def _start(self, *args, **kwargs):\n sdk_ready_flag.set()\n mocker.patch('splitio.sync.manager.Manager.start', new=_start)\n\n def _stop(self, *args, **kwargs):\n pass\n mocker.patch('splitio.sync.manager.Manager.stop', new=_stop)\n\n mockManager = Manager(sdk_ready_flag, mocker.Mock(), mocker.Mock(), False, mocker.Mock(), mocker.Mock())\n\n def _make_factory_with_apikey(apikey, *_, **__):\n return SplitFactory(apikey, {}, True, mocker.Mock(spec=ImpressionsManager), mockManager, mocker.Mock(), mocker.Mock(), mocker.Mock())\n\n factory_module_logger = mocker.Mock()\n build_in_memory = mocker.Mock()\n build_in_memory.side_effect = _make_factory_with_apikey\n build_redis = mocker.Mock()\n build_redis.side_effect = _make_factory_with_apikey\n build_localhost = mocker.Mock()\n build_localhost.side_effect = _make_factory_with_apikey\n mocker.patch('splitio.client.factory._LOGGER', new=factory_module_logger)\n mocker.patch('splitio.client.factory._build_in_memory_factory', new=build_in_memory)\n mocker.patch('splitio.client.factory._build_redis_factory', new=build_redis)\n mocker.patch('splitio.client.factory._build_localhost_factory', new=build_localhost)\n\n _INSTANTIATED_FACTORIES.clear() # Clear all factory counters for testing purposes\n\n factory1 = get_factory('some_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 1\n assert factory_module_logger.warning.mock_calls == []\n\n factory2 = get_factory('some_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 2\n assert factory_module_logger.warning.mock_calls == [mocker.call(\n \"factory instantiation: You already have %d %s with this SDK Key. \"\n \"We recommend keeping only one instance of the factory at all times \"\n \"(Singleton pattern) and reusing it throughout your application.\",\n 1,\n 'factory'\n )]\n\n factory_module_logger.reset_mock()\n factory3 = get_factory('some_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 3\n assert factory_module_logger.warning.mock_calls == [mocker.call(\n \"factory instantiation: You already have %d %s with this SDK Key. \"\n \"We recommend keeping only one instance of the factory at all times \"\n \"(Singleton pattern) and reusing it throughout your application.\",\n 2,\n 'factories'\n )]\n\n factory_module_logger.reset_mock()\n factory4 = get_factory('some_other_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 3\n assert _INSTANTIATED_FACTORIES['some_other_api_key'] == 1\n assert factory_module_logger.warning.mock_calls == [mocker.call(\n \"factory instantiation: You already have an instance of the Split factory. \"\n \"Make sure you definitely want this additional instance. \"\n \"We recommend keeping only one instance of the factory at all times \"\n \"(Singleton pattern) and reusing it throughout your application.\"\n )]\n\n event = threading.Event()\n factory1.destroy(event)\n event.wait()\n assert _INSTANTIATED_FACTORIES['some_other_api_key'] == 1\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 2\n factory2.destroy()\n factory3.destroy()\n factory4.destroy()", "def test_add_multiple_pis_simultaneously_to_vpg_check_deallocation(self):\n proj_obj, fabric_obj, pr_objs = self._create_prerequisites(\n create_second_pr=True)\n test_id = self.id()\n VPG_CLASS = self._api_server.get_resource_class('virtual-port-group')\n org_process_ae_id = VPG_CLASS._process_ae_id\n\n class MockVpg(VPG_CLASS):\n org_process_ae_id = VPG_CLASS._process_ae_id\n HOLD_API = True\n @classmethod\n def mock_process_ae_id(cls, db_obj_dict, vpg_name, obj_dict=None):\n while cls.HOLD_API:\n print('sleeping for HOLD_API to clear for '\n 'args = %s' % obj_dict)\n gevent.sleep(0.5)\n return cls.org_process_ae_id(db_obj_dict, vpg_name, obj_dict)\n\n def process_ae_ids(x):\n return [int(i) for i in sorted(x) if i is not None]\n\n def get_zk_ae_ids(prs=None):\n prefix = os.path.join(\n self.__class__.__name__,\n 'id', 'aggregated-ethernet')\n zk_client = self._api_server._db_conn._zk_db._zk_client._zk_client\n if not prs:\n prs = [os.path.join(prefix, pr.name) for pr in pr_objs]\n else:\n if not isinstance(prs, list):\n prs = [prs]\n prs = [os.path.join(prefix, pr) for pr in prs]\n ae_ids = {}\n for pr in prs:\n pr_org = os.path.split(pr)[-1]\n ae_ids[pr_org] = zk_client.get_children(pr)\n return ae_ids\n\n pi_per_pr = 1\n pi_objs = {}\n pr1_pi_names = ['%s_pr1_pi%d' % (test_id, i) for\n i in range(1, pi_per_pr + 1)]\n pr2_pi_names = ['%s_pr2_pi%d' % (test_id, i) for\n i in range(1, pi_per_pr + 1)]\n pr1_pi_objs = self._create_pi_objects(pr_objs[0], pr1_pi_names)\n pr2_pi_objs = self._create_pi_objects(pr_objs[1], pr2_pi_names)\n pi_objs.update(pr1_pi_objs)\n pi_objs.update(pr2_pi_objs)\n\n # create a VPG\n vpg_count = 1\n vpg_names = ['vpg_%s_%s' % (test_id, i) for i in range(\n 1, vpg_count + 1)]\n vpg_objs = self._create_vpgs(fabric_obj, vpg_names)\n\n # record AE-IDs in ZK before creating any VPG\n ae_ids = [x for x in get_zk_ae_ids().values() if x]\n self.assertEqual(len(ae_ids), 0)\n\n def _attach_pi_simultaneously(vpg_obj, pi_uuids):\n # Attach PIs from PR1 to VPG-1\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n try:\n # mock _process_ae_id at VPG resource\n VPG_CLASS._process_ae_id = MockVpg.mock_process_ae_id\n MockVpg.HOLD_API = True\n for pi_uuid in pi_uuids:\n gevent.spawn(\n self.api.ref_update,\n \"virtual-port-group\",\n vpg_obj.uuid,\n \"physical-interface\",\n pi_uuid,\n None,\n \"ADD\",\n None)\n gevent.sleep(2)\n MockVpg.HOLD_API = False\n gevent.sleep(3)\n except gevent.timeout.Timeout:\n self.assertFalse(\n False,\n '%s failed unexpectedly' % VPG_CLASS._process_ae_id)\n finally:\n # reset mock to original\n VPG_CLASS._process_ae_id = org_process_ae_id\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_refs = vpg_obj.get_physical_interface_refs()\n return vpg_obj, pi_refs\n\n # Case 1\n # Attach 2 PIs from PR1 to VPG-1\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pr1_pi_uuids = list(pr1_pi_objs.values())[0].uuid\n pr2_pi_uuids = list(pr2_pi_objs.values())[0].uuid\n pi_uuids = [pr1_pi_uuids, pr2_pi_uuids]\n vpg_obj, pi_refs = _attach_pi_simultaneously(vpg_obj, pi_uuids)\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 2)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertEqual(len(list(vpg_ae_ids.values())), 2)\n self.assertEqual(len(set(vpg_ae_ids.values())), 1)\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [0, 0])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0])\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 1)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [0])\n\n # Case 2\n # Deattach 1 PIs from PR1 to VPG-1\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_obj = list(pr1_pi_objs.values())[0]\n pi_obj = self._vnc_lib.physical_interface_read(id=pi_obj.uuid)\n vpg_obj.del_physical_interface(pi_obj)\n self.api.virtual_port_group_update(vpg_obj)\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_refs = vpg_obj.get_physical_interface_refs()\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 1)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertIsNone(list(vpg_ae_ids.values())[0])\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [])\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [])", "def test_multi_apm():\n\n components_1 = {\n \"scale\": mock_component(),\n \"decay\": mock_component(),\n \"absorption\": mock_component(),\n }\n components_2 = {\"scale\": mock_component(), \"decay\": mock_component()}\n\n multi_apm = multi_active_parameter_manager(\n ScalingTarget(),\n [components_1, components_2],\n [[\"scale\", \"decay\"], [\"scale\"]],\n active_parameter_manager,\n )\n\n # Test correct setup of apm_list attribute.\n for apm in multi_apm.apm_list:\n assert isinstance(apm, active_parameter_manager)\n assert len(multi_apm.apm_list) == 2\n assert multi_apm.components_list == [\"scale\", \"decay\", \"scale\"]\n assert multi_apm.n_active_params == 3\n assert multi_apm.apm_data[0] == {\"start_idx\": 0, \"end_idx\": 2}\n assert multi_apm.apm_data[1] == {\"start_idx\": 2, \"end_idx\": 3}\n\n # Test parameter selection.\n multi_apm.set_param_vals(flex.double([3.0, 2.5, 2.0]))\n assert multi_apm.get_param_vals() == flex.double([3.0, 2.5, 2.0])\n assert multi_apm.select_parameters(0) == flex.double([3.0, 2.5])\n assert multi_apm.select_parameters(1) == flex.double([2.0])\n\n # Test setting parameter esds.\n multi_apm.set_param_esds(flex.double([0.1, 0.2, 0.3]))\n assert components_1[\"scale\"].free_parameter_esds == flex.double([0.1])\n assert components_1[\"decay\"].free_parameter_esds == flex.double([0.2])\n assert components_2[\"scale\"].free_parameter_esds == flex.double([0.3])\n\n # Test setting var_cov matrices for each component.\n var_cov = flex.double([1.0, 0.5, 0.5, 0.5, 2.0, 0.5, 0.5, 0.5, 3.0])\n var_cov.reshape(flex.grid(3, 3))\n multi_apm.calculate_model_state_uncertainties(var_cov)\n assert components_1[\"scale\"].var_cov_matrix[0, 0] == 1.0\n assert components_1[\"decay\"].var_cov_matrix[0, 0] == 2.0\n assert components_2[\"scale\"].var_cov_matrix[0, 0] == 3.0", "def acquire(ABC) -> bool:", "def test_run_alpha_rarefaction_parallel(self):\r\n\r\n run_alpha_rarefaction(\r\n self.test_data['biom'][0],\r\n self.test_data['map'][0],\r\n self.test_out,\r\n call_commands_serially,\r\n self.params,\r\n self.qiime_config,\r\n tree_fp=self.test_data['tree'][0],\r\n num_steps=5,\r\n parallel=True,\r\n min_rare_depth=3,\r\n max_rare_depth=18,\r\n status_update_callback=no_status_updates)\r\n\r\n html_fp = join(self.test_out, 'alpha_rarefaction_plots',\r\n 'rarefaction_plots.html')\r\n pd_averages_fp = join(self.test_out, 'alpha_rarefaction_plots',\r\n 'average_tables', 'PD_whole_treeSampleType.txt')\r\n pd_collated_fp = join(self.test_out, 'alpha_div_collated',\r\n 'PD_whole_tree.txt')\r\n\r\n # Confirm that palm and gut alpha diversities are different,\r\n # and suggestive of statistical significance (we only have a\r\n # few sequences, so we don't get significant results)\r\n ttest_res, alpha_avg = compare_alpha_diversities(open(pd_collated_fp),\r\n open(\r\n self.test_data[\r\n 'map'][0]),\r\n 'SampleType',\r\n 18,\r\n test_type='parametric')\r\n feces_palm_t = ttest_res[('feces', 'L_palm')][0]\r\n self.assertTrue(feces_palm_t < 0,\r\n \"t-statistic too high: %1.3f, but should be less than 0\"\r\n % feces_palm_t)\r\n\r\n # check that final output files have non-zero size\r\n self.assertTrue(getsize(html_fp) > 0)\r\n\r\n # Check that the log file is created and has size > 0\r\n log_fp = glob(join(self.test_out, 'log*.txt'))[0]\r\n self.assertTrue(getsize(log_fp) > 0)", "def test_general_apm():\n components = {\n \"scale\": mock_component(),\n \"decay\": mock_component(),\n \"absorption\": mock_component(),\n }\n\n apm = active_parameter_manager(components, [\"scale\", \"decay\"])\n assert \"decay\" in apm.components_list\n assert \"scale\" in apm.components_list\n assert \"absorption\" not in apm.components_list\n assert apm.n_active_params == (\n components[\"scale\"].n_params + components[\"decay\"].n_params\n )\n n_cumul = 0\n for component in apm.components:\n assert apm.components[component][\"n_params\"] == components[component].n_params\n assert apm.components[component][\"start_idx\"] == n_cumul\n assert (\n apm.components[component][\"end_idx\"]\n == n_cumul + apm.components[component][\"n_params\"]\n )\n n_cumul += apm.components[component][\"n_params\"]\n\n apm.set_param_vals(flex.double([2.0, 1.5]))\n assert apm.get_param_vals() == flex.double([2.0, 1.5])\n # Test params were updated in components\n assert list(components[\"scale\"].free_parameters) == [2.0]\n assert list(components[\"decay\"].free_parameters) == [1.5]\n # Test selection of parameters\n decay_params = apm.select_parameters(\"decay\")\n assert len(decay_params) == 1\n assert decay_params[0] == 1.5\n\n # Test calculate model state uncertainties\n var_cov = flex.double([1.0, 0.5, 0.5, 2.0])\n var_cov.reshape(flex.grid(2, 2))\n apm.calculate_model_state_uncertainties(var_cov)\n assert components[\"scale\"].var_cov_matrix[0, 0] == 1.0\n assert components[\"decay\"].var_cov_matrix[0, 0] == 2.0\n\n # Test set param esds.\n apm.set_param_esds(flex.double([0.1, 0.2]))\n assert components[\"scale\"].free_parameter_esds == flex.double([0.1])\n assert components[\"decay\"].free_parameter_esds == flex.double([0.2])", "def test_fleur_relax_continue_converged(self, run_with_cache, mock_code_factory):\n assert False", "def test_ipam_services_update(self):\n pass", "def testA_StraightThrough(self):\n # Do pre-submit job check\n nRunning = getCondorRunningJobs()\n self.assertEqual(nRunning, 0, \"User currently has %i running jobs. Test will not continue\" % (nRunning))\n\n myThread = threading.currentThread()\n workload = self.createTestWorkload()\n config = self.getConfig()\n\n\n name = 'WMAgent_Test1'\n site = self.sites[0]\n nSubs = 5\n nFiles = 10\n workloadPath = os.path.join(self.testDir, 'workloadTest',\n 'TestWorkload', 'WMSandbox',\n 'WMWorkload.pkl')\n\n # Create a collection of files\n self.createFileCollection(name = name, nSubs = nSubs,\n nFiles = nFiles,\n workflowURL = workloadPath,\n site = site)\n\n\n\n ############################################################\n # Test the JobCreator\n\n\n config.Agent.componentName = 'JobCreator'\n testJobCreator = JobCreatorPoller(config = config)\n\n testJobCreator.algorithm()\n time.sleep(5)\n\n\n # Did all jobs get created?\n getJobsAction = self.daoFactory(classname = \"Jobs.GetAllJobs\")\n result = getJobsAction.execute(state = 'Created', jobType = \"Processing\")\n self.assertEqual(len(result), nSubs*nFiles)\n\n\n # Count database objects\n result = myThread.dbi.processData('SELECT * FROM wmbs_sub_files_acquired')[0].fetchall()\n self.assertEqual(len(result), nSubs * nFiles)\n\n\n # Find the test directory\n testDirectory = os.path.join(self.testDir, 'TestWorkload', 'ReReco')\n self.assertTrue('JobCollection_1_0' in os.listdir(testDirectory))\n self.assertTrue(len(os.listdir(testDirectory)) <= 20)\n\n groupDirectory = os.path.join(testDirectory, 'JobCollection_1_0')\n\n # First job should be in here\n self.assertTrue('job_1' in os.listdir(groupDirectory))\n jobFile = os.path.join(groupDirectory, 'job_1', 'job.pkl')\n self.assertTrue(os.path.isfile(jobFile))\n with open(jobFile, 'rb') as f:\n job = pickle.load(f)\n\n\n self.assertEqual(job['workflow'], name)\n self.assertEqual(len(job['input_files']), 1)\n self.assertEqual(os.path.basename(job['sandbox']), 'TestWorkload-Sandbox.tar.bz2')\n\n\n\n\n\n\n\n\n\n\n ###############################################################\n # Now test the JobSubmitter\n\n config.Agent.componentName = 'JobSubmitter'\n testJobSubmitter = JobSubmitterPoller(config = config)\n\n\n testJobSubmitter.algorithm()\n\n\n # Check that jobs are in the right state\n result = getJobsAction.execute(state = 'Created', jobType = \"Processing\")\n self.assertEqual(len(result), 0)\n result = getJobsAction.execute(state = 'Executing', jobType = \"Processing\")\n self.assertEqual(len(result), nSubs * nFiles)\n\n\n\n # Check assigned locations\n getLocationAction = self.daoFactory(classname = \"Jobs.GetLocation\")\n for id in result:\n loc = getLocationAction.execute(jobid = id)\n self.assertEqual(loc, [[site]])\n\n\n # Check to make sure we have running jobs\n nRunning = getCondorRunningJobs()\n self.assertEqual(nRunning, nFiles * nSubs)\n\n\n #################################################################\n # Now the JobTracker\n\n\n config.Agent.componentName = 'JobTracker'\n testJobTracker = JobTrackerPoller(config = config)\n testJobTracker.setup()\n\n testJobTracker.algorithm()\n\n # Running the algo without removing the jobs should do nothing\n result = getJobsAction.execute(state = 'Executing', jobType = \"Processing\")\n self.assertEqual(len(result), nSubs * nFiles)\n\n\n condorRM()\n time.sleep(1)\n\n # All jobs gone?\n nRunning = getCondorRunningJobs()\n self.assertEqual(nRunning, 0)\n\n\n testJobTracker.algorithm()\n time.sleep(5)\n\n # Running the algo without removing the jobs should do nothing\n result = getJobsAction.execute(state = 'Executing', jobType = \"Processing\")\n self.assertEqual(len(result), 0)\n result = getJobsAction.execute(state = 'Complete', jobType = \"Processing\")\n self.assertEqual(len(result), nSubs * nFiles)\n\n\n\n\n #################################################################\n # Now the JobAccountant\n\n # First you need to load all jobs\n\n\n self.getFWJRAction = self.daoFactory(classname = \"Jobs.GetFWJRByState\")\n completeJobs = self.getFWJRAction.execute(state = \"complete\")\n\n\n # Create reports for all jobs\n self.createReports(jobs = completeJobs, retryCount = 0)\n\n\n\n\n\n\n config.Agent.componentName = 'JobAccountant'\n testJobAccountant = JobAccountantPoller(config = config)\n testJobAccountant.setup()\n\n\n # It should do something with the jobs\n testJobAccountant.algorithm()\n\n\n # All the jobs should be done now\n result = getJobsAction.execute(state = 'Complete', jobType = \"Processing\")\n self.assertEqual(len(result), 0)\n result = getJobsAction.execute(state = 'Success', jobType = \"Processing\")\n self.assertEqual(len(result), nSubs * nFiles)\n\n\n\n #######################################################################\n # Now the JobArchiver\n\n\n config.Agent.componentName = 'JobArchiver'\n testJobArchiver = JobArchiverPoller(config = config)\n\n\n testJobArchiver.algorithm()\n\n # All the jobs should be cleaned up\n result = getJobsAction.execute(state = 'Success', jobType = \"Processing\")\n self.assertEqual(len(result), 0)\n result = getJobsAction.execute(state = 'Cleanout', jobType = \"Processing\")\n self.assertEqual(len(result), nSubs * nFiles)\n\n\n logDir = os.path.join(self.testDir, 'logs')\n\n for job in completeJobs:\n self.assertFalse(os.path.exists(job['fwjr_path']))\n jobFolder = 'JobCluster_%i' \\\n % (int(job['id']/config.JobArchiver.numberOfJobsToCluster))\n jobPath = os.path.join(logDir, jobFolder, 'Job_%i.tar' %(job['id']))\n self.assertTrue(os.path.isfile(jobPath))\n self.assertTrue(os.path.getsize(jobPath) > 0)\n\n\n\n\n ###########################################################################\n # Now the TaskAchiver\n\n\n config.Agent.componentName = 'TaskArchiver'\n testTaskArchiver = TaskArchiverPoller(config = config)\n\n\n testTaskArchiver.algorithm()\n\n\n result = getJobsAction.execute(state = 'Cleanout', jobType = \"Processing\")\n self.assertEqual(len(result), 0)\n\n\n for jdict in completeJobs:\n job = Job(id = jdict['id'])\n self.assertFalse(job.exists())\n\n\n\n\n\n if os.path.isdir('testDir'):\n shutil.rmtree('testDir')\n shutil.copytree('%s' %self.testDir, os.path.join(os.getcwd(), 'testDir'))\n\n\n\n\n return", "def test_active_inference_SPM_1b(self):", "def test_parego(facade, make_scenario, configspace):\n N_TRIALS = 64\n RETRAIN_AFTER = 8\n\n scenario: Scenario = make_scenario(configspace, use_multi_objective=True, n_trials=N_TRIALS)\n multi_objective_algorithm = WrapStrategy(ParEGO, scenario=scenario)\n intensifier = Intensifier(scenario, max_config_calls=1, max_incumbents=10)\n config_selector = ConfigSelector(scenario, retrain_after=RETRAIN_AFTER)\n initial_design = RandomInitialDesign(scenario, n_configs=1)\n\n smac = facade(\n scenario=scenario,\n target_function=tae,\n multi_objective_algorithm=multi_objective_algorithm,\n intensifier=intensifier,\n config_selector=config_selector,\n initial_design=initial_design,\n overwrite=True,\n )\n incumbents = smac.optimize()\n\n sorted_incumbents = []\n for incumbent in incumbents:\n x, y = func(incumbent[\"x\"])\n sorted_incumbents.append((x, y))\n\n sorted_incumbents = sorted(sorted_incumbents, key=lambda x: x[0])\n previous_y = np.inf\n for x, y in sorted_incumbents:\n assert y <= previous_y\n previous_y = y\n\n # We expect N_TRIALS/RETRAIN_AFTER updates\n assert multi_objective_algorithm._n_calls_update_on_iteration_start == int(N_TRIALS / RETRAIN_AFTER)", "def test_lama_job_runner():\n\n configs = registration_root.glob('*.toml')\n\n for cfg in configs:\n delete_previous_files()\n\n print(f\"\\n{'#'*8} Doing config {cfg.name} {'#'*8}\")\n\n lama_job_runner.lama_job_runner(cfg, wt_registration_dir, make_job_file=True, log_level=logging.ERROR)\n lama_job_runner.lama_job_runner(cfg, wt_registration_dir, log_level=logging.ERROR)\n\n lama_job_runner.lama_job_runner(cfg, mut_registration_dir, make_job_file=True, log_level=logging.ERROR)\n lama_job_runner.lama_job_runner(cfg, mut_registration_dir, log_level=logging.ERROR)\n # return # Just do the first", "def test_active(self):\n nodes = [create_node(\"a\", \"service1\"),\n create_node(\"b\", \"service2\")]\n static = StaticRoutes(nodes).create(self.disco, self.runtime)\n self.runtime.dispatcher.startActor(static)\n self.runtime.dispatcher.pump()\n\n self.assertEqual(knownNodes(self.disco, \"service1\", \"sandbox\"), [nodes[0]])\n self.assertEqual(knownNodes(self.disco, \"service2\", \"sandbox\"), [nodes[1]])", "def test_delete_pi_simultaneously_to_vpg_with_multiple_pi(self):\n proj_obj, fabric_obj, pr_objs = self._create_prerequisites(\n create_second_pr=True)\n test_id = self.id()\n VPG_CLASS = self._api_server.get_resource_class('virtual-port-group')\n org_process_ae_id = VPG_CLASS._process_ae_id\n\n class MockVpg(VPG_CLASS):\n org_process_ae_id = VPG_CLASS._process_ae_id\n HOLD_API = True\n @classmethod\n def mock_process_ae_id(cls, db_obj_dict, vpg_name, obj_dict=None):\n while cls.HOLD_API:\n print('sleeping for HOLD_API to clear for '\n 'args = %s' % obj_dict)\n gevent.sleep(0.5)\n return cls.org_process_ae_id(db_obj_dict, vpg_name, obj_dict)\n\n def process_ae_ids(x):\n return [int(i) for i in sorted(x) if i is not None]\n\n def get_zk_ae_ids(prs=None):\n prefix = os.path.join(\n self.__class__.__name__,\n 'id', 'aggregated-ethernet')\n zk_client = self._api_server._db_conn._zk_db._zk_client._zk_client\n if not prs:\n prs = [os.path.join(prefix, pr.name) for pr in pr_objs]\n else:\n if not isinstance(prs, list):\n prs = [prs]\n prs = [os.path.join(prefix, pr) for pr in prs]\n ae_ids = {}\n for pr in prs:\n pr_org = os.path.split(pr)[-1]\n ae_ids[pr_org] = zk_client.get_children(pr)\n return ae_ids\n\n pi_per_pr = 3\n pi_objs = {}\n pr1_pi_names = ['%s_pr1_pi%d' % (test_id, i) for\n i in range(1, pi_per_pr + 1)]\n pr2_pi_names = ['%s_pr2_pi%d' % (test_id, i) for\n i in range(1, pi_per_pr + 1)]\n pr1_pi_objs = self._create_pi_objects(pr_objs[0], pr1_pi_names)\n pr2_pi_objs = self._create_pi_objects(pr_objs[1], pr2_pi_names)\n pi_objs.update(pr1_pi_objs)\n pi_objs.update(pr2_pi_objs)\n\n # create a VPG\n vpg_count = 1\n vpg_names = ['vpg_%s_%s' % (test_id, i) for i in range(\n 1, vpg_count + 1)]\n vpg_objs = self._create_vpgs(fabric_obj, vpg_names)\n\n # record AE-IDs in ZK before creating any VPG\n ae_ids = [x for x in get_zk_ae_ids().values() if x]\n self.assertEqual(len(ae_ids), 0)\n\n def _attach_pi_simultaneously(\n vpg_obj, create_pi_uuids=None, delete_pi_uuids=None):\n if create_pi_uuids is None:\n create_pi_uuids = []\n if delete_pi_uuids is None:\n delete_pi_uuids = []\n # Attach PIs from PR1 to VPG-1\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n try:\n # mock _process_ae_id at VPG resource\n VPG_CLASS._process_ae_id = MockVpg.mock_process_ae_id\n # MockVpg.HOLD_API = True\n MockVpg.HOLD_API = False\n for pi_uuid in create_pi_uuids:\n gevent.spawn(\n self.api.ref_update,\n \"virtual-port-group\",\n vpg_obj.uuid,\n \"physical-interface\",\n pi_uuid,\n None,\n \"ADD\",\n None)\n for pi_uuid in delete_pi_uuids:\n gevent.spawn(\n self.api.ref_update,\n \"virtual-port-group\",\n vpg_obj.uuid,\n \"physical-interface\",\n pi_uuid,\n None,\n \"DELETE\",\n None)\n gevent.sleep(2)\n MockVpg.HOLD_API = False\n gevent.sleep(3)\n except gevent.timeout.Timeout:\n self.assertFalse(\n False,\n '%s failed unexpectedly' % VPG_CLASS._process_ae_id)\n finally:\n # reset mock to original\n VPG_CLASS._process_ae_id = org_process_ae_id\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_refs = vpg_obj.get_physical_interface_refs()\n return vpg_obj, pi_refs\n\n # Case 1\n # Attach 3 PIs/PR1 and 3 PIs/PR2 to VPG1\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pr1_pi_uuids = [pi_objs[pr1_pi_names[pi]].uuid for pi in range(3)]\n pr2_pi_uuids = [pi_objs[pr2_pi_names[pi]].uuid for pi in range(3)]\n pi_uuids = pr1_pi_uuids + pr2_pi_uuids\n vpg_obj, pi_refs = _attach_pi_simultaneously(vpg_obj, pi_uuids)\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 6)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertEqual(len(vpg_ae_ids.values()), 6)\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [0] * 6)\n # verification at Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1)\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 1)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0])\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [0])\n\n # Case 2\n # Deattach PI-1/PR-1, PI-1/PR-2 from VPG-1\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_uuids = [pr1_pi_uuids[0], pr2_pi_uuids[0]]\n vpg_obj, pi_refs = _attach_pi_simultaneously(\n vpg_obj, delete_pi_uuids=pi_uuids)\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 4)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertEqual(len(vpg_ae_ids.values()), 4)\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [0] * 4)\n # verification at Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1)\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 1)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0])\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [0])\n\n # Case 3\n # Deattach all PIs/PR-1. AE-IDs at PR-1 to be de-allocated\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_uuids = pr1_pi_uuids[1:3]\n vpg_obj, pi_refs = _attach_pi_simultaneously(\n vpg_obj, delete_pi_uuids=pi_uuids)\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 2)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertEqual(len(vpg_ae_ids.values()), 2)\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [0] * 2)\n # verification at Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 0)\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 1)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [])\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [0])", "def test_raylet_infeasible_tasks(shutdown_only):\n addresses = ray.init(num_gpus=3)\n\n @ray.remote(num_gpus=5)\n class ActorRequiringGPU:\n def __init__(self):\n pass\n\n ActorRequiringGPU.remote()\n\n def test_infeasible_actor(ray_addresses):\n assert (wait_until_server_available(addresses[\"webui_url\"]) is True)\n webui_url = ray_addresses[\"webui_url\"].replace(\"localhost\",\n \"http://127.0.0.1\")\n raylet_info = requests.get(webui_url + \"/api/raylet_info\").json()\n actor_info = raylet_info[\"result\"][\"actors\"]\n assert len(actor_info) == 1\n\n _, infeasible_actor_info = actor_info.popitem()\n assert infeasible_actor_info[\"state\"] == -1\n assert infeasible_actor_info[\"invalidStateType\"] == \"infeasibleActor\"\n\n assert (wait_until_succeeded_without_exception(\n test_infeasible_actor,\n (AssertionError, requests.exceptions.ConnectionError),\n addresses,\n timeout_ms=30000,\n retry_interval_ms=1000) is True)", "def test_add_multiple_pis_simultaneously_to_vpg_with_1_pi(self):\n proj_obj, fabric_obj, pr_objs = self._create_prerequisites(\n create_second_pr=True)\n test_id = self.id()\n VPG_CLASS = self._api_server.get_resource_class('virtual-port-group')\n org_process_ae_id = VPG_CLASS._process_ae_id\n\n class MockVpg(VPG_CLASS):\n org_process_ae_id = VPG_CLASS._process_ae_id\n HOLD_API = True\n @classmethod\n def mock_process_ae_id(cls, db_obj_dict, vpg_name, obj_dict=None):\n while cls.HOLD_API:\n print('sleeping for HOLD_API to clear for '\n 'args = %s' % obj_dict)\n gevent.sleep(0.5)\n return cls.org_process_ae_id(db_obj_dict, vpg_name, obj_dict)\n\n def process_ae_ids(x):\n return [int(i) for i in sorted(x) if i is not None]\n\n def get_zk_ae_ids(prs=None):\n prefix = os.path.join(\n self.__class__.__name__,\n 'id', 'aggregated-ethernet')\n zk_client = self._api_server._db_conn._zk_db._zk_client._zk_client\n if not prs:\n prs = [os.path.join(prefix, pr.name) for pr in pr_objs]\n else:\n if not isinstance(prs, list):\n prs = [prs]\n prs = [os.path.join(prefix, pr) for pr in prs]\n ae_ids = {}\n for pr in prs:\n pr_org = os.path.split(pr)[-1]\n ae_ids[pr_org] = zk_client.get_children(pr)\n return ae_ids\n\n pi_per_pr = 150\n pi_objs = {}\n pr1_pi_names = ['%s_pr1_pi%d' % (test_id, i) for\n i in range(1, pi_per_pr + 1)]\n pr2_pi_names = ['%s_pr2_pi%d' % (test_id, i) for\n i in range(1, pi_per_pr + 1)]\n pr1_pi_objs = self._create_pi_objects(pr_objs[0], pr1_pi_names)\n pr2_pi_objs = self._create_pi_objects(pr_objs[1], pr2_pi_names)\n pi_objs.update(pr1_pi_objs)\n pi_objs.update(pr2_pi_objs)\n\n # create a VPG\n vpg_count = 1\n vpg_names = ['vpg_%s_%s' % (test_id, i) for i in range(\n 1, vpg_count + 1)]\n vpg_objs = self._create_vpgs(fabric_obj, vpg_names)\n\n # record AE-IDs in ZK before creating any VPG\n ae_ids = [x for x in get_zk_ae_ids().values() if x]\n self.assertEqual(len(ae_ids), 0)\n\n def _attach_pi_simultaneously(vpg_obj, pi_uuids):\n # Attach PIs from PR1 to VPG-1\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n try:\n # mock _process_ae_id at VPG resource\n VPG_CLASS._process_ae_id = MockVpg.mock_process_ae_id\n MockVpg.HOLD_API = True\n for pi_uuid in pi_uuids:\n gevent.spawn(\n self.api.ref_update,\n \"virtual-port-group\",\n vpg_obj.uuid,\n \"physical-interface\",\n pi_uuid,\n None,\n \"ADD\",\n None)\n gevent.sleep(2)\n MockVpg.HOLD_API = False\n gevent.sleep(6)\n except gevent.timeout.Timeout:\n self.assertFalse(\n False,\n '%s failed unexpectedly' % VPG_CLASS._process_ae_id)\n finally:\n # reset mock to original\n VPG_CLASS._process_ae_id = org_process_ae_id\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_refs = vpg_obj.get_physical_interface_refs()\n return vpg_obj, pi_refs\n\n # Case 1\n # Attach 1 PIs from PR1 to VPG-1\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_uuids = [pi_objs[pr1_pi_names[pi]].uuid for pi in range(1)]\n vpg_obj, pi_refs = _attach_pi_simultaneously(vpg_obj, pi_uuids)\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 1)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertIsNone(list(vpg_ae_ids.values())[0])\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [])\n\n # Case 2\n # Attach rest of 149 PIs from PR1 to VPG-1\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_uuids = [pi_objs[pr1_pi_names[pi]].uuid for pi in range(1, 150)]\n vpg_obj, pi_refs = _attach_pi_simultaneously(vpg_obj, pi_uuids)\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 150)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertEqual(len(set(vpg_ae_ids.values())), 1)\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [0] * 150)\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0])", "def __init__(self, asa_factory: AsaFactory):\n self.step_in_progress = False\n self.asa_factory = asa_factory", "def test_ipam_services_partial_update(self):\n pass", "def test_add_delete_a_pi_simultaneously_to_vpg_with_1_pi(self):\n proj_obj, fabric_obj, pr_objs = self._create_prerequisites(\n create_second_pr=True)\n test_id = self.id()\n VPG_CLASS = self._api_server.get_resource_class('virtual-port-group')\n org_process_ae_id = VPG_CLASS._process_ae_id\n\n class MockVpg(VPG_CLASS):\n org_process_ae_id = VPG_CLASS._process_ae_id\n HOLD_API = True\n @classmethod\n def mock_process_ae_id(cls, db_obj_dict, vpg_name, obj_dict=None):\n while cls.HOLD_API:\n print('sleeping for HOLD_API to clear for '\n 'args = %s' % obj_dict)\n gevent.sleep(0.5)\n return cls.org_process_ae_id(db_obj_dict, vpg_name, obj_dict)\n\n def process_ae_ids(x):\n return [int(i) for i in sorted(x) if i is not None]\n\n def get_zk_ae_ids(prs=None):\n prefix = os.path.join(\n self.__class__.__name__,\n 'id', 'aggregated-ethernet')\n zk_client = self._api_server._db_conn._zk_db._zk_client._zk_client\n if not prs:\n prs = [os.path.join(prefix, pr.name) for pr in pr_objs]\n else:\n if not isinstance(prs, list):\n prs = [prs]\n prs = [os.path.join(prefix, pr) for pr in prs]\n ae_ids = {}\n for pr in prs:\n pr_org = os.path.split(pr)[-1]\n ae_ids[pr_org] = zk_client.get_children(pr)\n return ae_ids\n\n pi_per_pr = 3\n pi_objs = {}\n pr1_pi_names = ['%s_pr1_pi%d' % (test_id, i) for\n i in range(1, pi_per_pr + 1)]\n pr1_pi_objs = self._create_pi_objects(pr_objs[0], pr1_pi_names)\n pi_objs.update(pr1_pi_objs)\n\n # create a VPG\n vpg_count = 1\n vpg_names = ['vpg_%s_%s' % (test_id, i) for i in range(\n 1, vpg_count + 1)]\n vpg_objs = self._create_vpgs(fabric_obj, vpg_names)\n\n # record AE-IDs in ZK before creating any VPG\n ae_ids = [x for x in get_zk_ae_ids().values() if x]\n self.assertEqual(len(ae_ids), 0)\n\n def _attach_pi_simultaneously(\n vpg_obj, create_pi_uuids, delete_pi_uuids=None):\n if delete_pi_uuids is None:\n delete_pi_uuids = []\n # Attach PIs from PR1 to VPG-1\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n try:\n # mock _process_ae_id at VPG resource\n VPG_CLASS._process_ae_id = MockVpg.mock_process_ae_id\n MockVpg.HOLD_API = True\n for pi_uuid in delete_pi_uuids:\n gevent.spawn(\n self.api.ref_update,\n \"virtual-port-group\",\n vpg_obj.uuid,\n \"physical-interface\",\n pi_uuid,\n None,\n \"DELETE\")\n for pi_uuid in create_pi_uuids:\n gevent.spawn(\n self.api.ref_update,\n \"virtual-port-group\",\n vpg_obj.uuid,\n \"physical-interface\",\n pi_uuid,\n None,\n \"ADD\",\n None)\n gevent.sleep(2)\n MockVpg.HOLD_API = False\n gevent.sleep(2)\n except gevent.timeout.Timeout:\n self.assertFalse(\n False,\n '%s failed unexpectedly' % VPG_CLASS._process_ae_id)\n finally:\n # reset mock to original\n VPG_CLASS._process_ae_id = org_process_ae_id\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_refs = vpg_obj.get_physical_interface_refs()\n return vpg_obj, pi_refs\n\n # Case 1\n # Attach PI-1/PR-1 to VPG-1\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_uuids = [list(pr1_pi_objs.values())[0].uuid]\n # vpg_obj, pi_refs = _attach_pi_simultaneously(vpg_obj, pi_uuids)\n pi_obj = self._vnc_lib.physical_interface_read(id=pi_uuids[0])\n vpg_obj.add_physical_interface(pi_obj)\n self.api.virtual_port_group_update(vpg_obj)\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_refs = vpg_obj.get_physical_interface_refs()\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 1)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertIsNone(list(vpg_ae_ids.values())[0])\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [])\n\n # Case 2\n # Attach PI-2 from PR1 to VPG-1 and delete exiting PI-1/PR-1\n # simultaneously\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_refs = vpg_obj.get_physical_interface_refs()\n existing_pi_uuids = [ref['uuid'] for ref in pi_refs]\n pi_uuids = [list(pr1_pi_objs.values())[1].uuid]\n vpg_obj, pi_refs = _attach_pi_simultaneously(\n vpg_obj, pi_uuids, existing_pi_uuids)\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 1)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertIsNone(list(vpg_ae_ids.values())[0])\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [])", "def test_update_instances_schedule_state(self):\n pass", "def test_active_inference_SPM_1a(self):\n array_path = os.path.join(os.getcwd(), DATA_PATH + \"vbx_test_1a.mat\")\n mat_contents = loadmat(file_name=array_path)\n\n A = mat_contents[\"A\"][0]\n B = mat_contents[\"B\"][0]\n C = to_arr_of_arr(mat_contents[\"C\"][0][0][:,0])\n obs_matlab = mat_contents[\"obs\"].astype(\"int64\")\n policy = mat_contents[\"policies\"].astype(\"int64\") - 1\n t_horizon = mat_contents[\"t_horizon\"][0, 0].astype(\"int64\")\n actions_matlab = mat_contents[\"actions\"].astype(\"int64\") - 1\n qs_matlab = mat_contents[\"qs\"][0]\n xn_matlab = mat_contents[\"xn\"][0]\n vn_matlab = mat_contents[\"vn\"][0]\n\n likelihoods_matlab = mat_contents[\"likelihoods\"][0]\n\n num_obs, num_states, _, num_factors = get_model_dimensions(A, B)\n obs = convert_observation_array(obs_matlab, num_obs)\n T = len(obs)\n\n agent = Agent(A=A, B=B, C=C, inference_algo=\"MMP\", policy_len=1, \n inference_horizon=t_horizon, use_BMA = False, \n policy_sep_prior = True)\n \n actions_python = np.zeros(T)\n\n for t in range(T):\n o_t = (np.where(obs[t])[0][0],)\n qx, xn_t, vn_t = agent.infer_states_test(o_t)\n q_pi, efe= agent.infer_policies()\n action = agent.sample_action()\n\n actions_python[t] = action\n\n xn_python = build_xn_vn_array(xn_t)\n vn_python = build_xn_vn_array(vn_t)\n\n if t == T-1:\n xn_python = xn_python[:,:,:-1,:]\n vn_python = vn_python[:,:,:-1,:]\n\n start_tstep = max(0, agent.curr_timestep - agent.inference_horizon)\n end_tstep = min(agent.curr_timestep + agent.policy_len, T)\n\n xn_validation = xn_matlab[0][:,:,start_tstep:end_tstep,t,:]\n vn_validation = vn_matlab[0][:,:,start_tstep:end_tstep,t,:]\n\n self.assertTrue(np.isclose(xn_python, xn_validation).all())\n self.assertTrue(np.isclose(vn_python, vn_validation).all())\n \n self.assertTrue(np.isclose(actions_matlab[0,:],actions_python[:-1]).all())", "def test_ipam_services_create(self):\n pass", "def test_ParameterManagerGenerator_consecutive():\n components_1 = {\n \"scale\": mock_component(),\n \"decay\": mock_component(),\n \"absorption\": mock_component(),\n }\n\n data_manager = mock_data_manager(components_1)\n data_manager.consecutive_refinement_order = [[\"scale\", \"decay\"], [\"absorption\"]]\n\n # Test single dataset case.\n pmg = ParameterManagerGenerator(\n [data_manager],\n apm_type=active_parameter_manager,\n target=ScalingTarget(),\n mode=\"consecutive\",\n )\n apms = list(pmg.parameter_managers())\n assert len(apms) == 2\n apm = apms[0]\n assert isinstance(apm, multi_active_parameter_manager)\n assert \"scale\" in apm.components_list\n assert \"decay\" in apm.components_list\n assert \"absorption\" not in apm.components_list\n apm = apms[1]\n assert isinstance(apm, multi_active_parameter_manager)\n assert \"scale\" not in apm.components_list\n assert \"decay\" not in apm.components_list\n assert \"absorption\" in apm.components_list\n\n # Test multi dataset case.\n components_2 = {\"1\": mock_component(), \"2\": mock_component()}\n data_manager_2 = mock_data_manager(components_2)\n data_manager_2.consecutive_refinement_order = [[\"1\"], [\"2\"]]\n\n pmg = ParameterManagerGenerator(\n [data_manager, data_manager_2],\n apm_type=active_parameter_manager,\n target=ScalingTarget(),\n mode=\"consecutive\",\n )\n apms = list(pmg.parameter_managers())\n assert len(apms) == 2\n multi_apm = apms[0]\n assert isinstance(multi_apm, multi_active_parameter_manager)\n apm_1 = multi_apm.apm_list[0]\n assert \"scale\" in apm_1.components_list\n assert \"decay\" in apm_1.components_list\n assert \"absorption\" not in apm_1.components_list\n assert multi_apm.apm_list[1].components_list == [\"1\"]\n multi_apm = apms[1]\n assert isinstance(multi_apm, multi_active_parameter_manager)\n assert multi_apm.apm_list[0].components_list == [\"absorption\"]\n assert multi_apm.apm_list[1].components_list == [\"2\"]\n\n # Test multi dataset case with different number of cycles for each data_manager.\n components_2 = {\"1\": mock_component()}\n data_manager_2 = mock_data_manager(components_2)\n data_manager_2.consecutive_refinement_order = [[\"1\"], [\"2\"]]\n pmg = ParameterManagerGenerator(\n [data_manager, data_manager_2],\n apm_type=active_parameter_manager,\n target=ScalingTarget(),\n mode=\"consecutive\",\n )\n assert pmg.param_lists[0] == [[\"scale\", \"decay\"], [\"absorption\"]]\n assert pmg.param_lists[1] == [[\"1\"]]\n apms = list(pmg.parameter_managers())\n assert len(apms) == 2\n multi_apm = apms[0]\n assert isinstance(multi_apm, multi_active_parameter_manager)\n apm_1 = multi_apm.apm_list[0]\n assert \"scale\" in apm_1.components_list\n assert \"decay\" in apm_1.components_list\n assert \"absorption\" not in apm_1.components_list\n assert multi_apm.apm_list[1].components_list == [\"1\"]\n multi_apm = apms[1]\n assert isinstance(multi_apm, multi_active_parameter_manager)\n assert multi_apm.apm_list[0].components_list == [\"absorption\"]\n # Only change relative to previous test case.\n assert multi_apm.apm_list[1].components_list == []\n\n # Test fixing the decay parameter.\n data_manager.fixed_components = [\"decay\"]\n pmg = ParameterManagerGenerator(\n [data_manager],\n apm_type=active_parameter_manager,\n target=ScalingTarget(),\n mode=\"consecutive\",\n )\n apms = list(pmg.parameter_managers())\n assert len(apms) == 2\n apm = apms[0]\n assert isinstance(apm, multi_active_parameter_manager)\n assert \"scale\" in apm.components_list\n assert \"decay\" not in apm.components_list\n assert \"absorption\" not in apm.components_list\n apm = apms[1]\n assert isinstance(apm, multi_active_parameter_manager)\n assert \"scale\" not in apm.components_list\n assert \"decay\" not in apm.components_list\n assert \"absorption\" in apm.components_list", "def case_real_runs(\n automl: AutoML,\n make_ensemble_builder_manager: Callable[..., EnsembleBuilderManager],\n) -> EnsembleBuilderManager:\n manager = make_ensemble_builder_manager(\n backend=automl._backend,\n metric=automl._metrics[0],\n task=automl._task,\n dataset_name=automl._dataset_name,\n seed=automl._seed,\n logger_port=automl._logger_port,\n random_state=DEFAULT_SEED,\n )\n return manager", "def test_destroy(self, mocker):\n def _split_task_init_mock(self, api, storage, period, event):\n self._task = mocker.Mock()\n self._api = api\n self._storage = storage\n self._period = period\n self._event = event\n event.set()\n mocker.patch('splitio.client.factory.SplitSynchronizationTask.__init__', new=_split_task_init_mock)\n\n def _segment_task_init_mock(self, api, storage, split_storage, period, event):\n self._task = mocker.Mock()\n self._worker_pool = mocker.Mock()\n self._api = api\n self._segment_storage = storage\n self._split_storage = split_storage\n self._period = period\n self._event = event\n event.set()\n mocker.patch('splitio.client.factory.SegmentSynchronizationTask.__init__', new=_segment_task_init_mock)\n\n imp_async_task_mock = mocker.Mock(spec=asynctask.AsyncTask)\n def _imppression_task_init_mock(self, api, storage, refresh_rate, bulk_size):\n self._logger = mocker.Mock()\n self._impressions_api = api\n self._storage = storage\n self._period = refresh_rate\n self._task = imp_async_task_mock\n self._failed = mocker.Mock()\n self._bulk_size = bulk_size\n mocker.patch('splitio.client.factory.ImpressionsSyncTask.__init__', new=_imppression_task_init_mock)\n\n evt_async_task_mock = mocker.Mock(spec=asynctask.AsyncTask)\n def _event_task_init_mock(self, api, storage, refresh_rate, bulk_size):\n self._logger = mocker.Mock()\n self._impressions_api = api\n self._storage = storage\n self._period = refresh_rate\n self._task = evt_async_task_mock\n self._failed = mocker.Mock()\n self._bulk_size = bulk_size\n mocker.patch('splitio.client.factory.EventsSyncTask.__init__', new=_event_task_init_mock)\n\n # Start factory and make assertions\n factory = get_factory('some_api_key')\n factory.block_until_ready()\n time.sleep(1) # give a chance for the bg thread to set the ready status\n assert factory.ready\n assert factory.destroyed is False\n\n factory.destroy()\n assert imp_async_task_mock.stop.mock_calls == [mocker.call(None)]\n assert evt_async_task_mock.stop.mock_calls == [mocker.call(None)]\n assert factory.destroyed is True", "def test_concurrent_instances(self):\n cm = contextlib.ExitStack() # TODO: clean this up\n\n work_dir1 = Path(cm.enter_context(tempfile.TemporaryDirectory())) # TODO: make these delete only if no exception occured\n work_dir2 = Path(cm.enter_context(tempfile.TemporaryDirectory()))\n\n archive = RemotePrometheusArchive.for_tag('latest').download()\n prometheus1: PrometheusInstance = cm.enter_context(PrometheusInstance(archive, work_dir1))\n prometheus2: PrometheusInstance = cm.enter_context(PrometheusInstance(archive, work_dir2))\n\n prometheus1.start()\n\n with self.assertRaisesRegex(Exception, 'certificate verify failed'):\n prometheus2.start()\n\n\n cm.close()", "def test_concurrent_access(self):\n num_threads = 4\n thread_pool = ThreadPool(num_threads)\n\n def test_func(x):\n \"\"\"Create, get, delete models.\"\"\"\n for i in range(32):\n handle = self.model_manager.create(name='%s-%s' % (x, i))\n self.assertTrue(\n handle in [m.handle for m in self.model_manager.models()])\n self.model_manager.delete(handle)\n self.assertTrue(\n handle not in\n [m.handle for m in self.model_manager.models()])\n return True\n for x in range(num_threads):\n thread_pool.add_func(test_func, x)\n thread_pool.join()\n self.assertTrue(len(self.model_manager.models()) == 0,\n 'Expecting no models to stick around')", "def test_run_started(self):", "async def test_change_pools_reorg(self, setup, trusted_and_fee, bt, self_hostname):\n trusted, fee = trusted_and_fee\n full_nodes, wallet_nodes, receive_address, client, rpc_cleanup = setup\n our_ph = receive_address[0]\n pool_a_ph = receive_address[1]\n wallets = [wallet_n.wallet_state_manager.main_wallet for wallet_n in wallet_nodes]\n pool_b_ph = await wallets[1].get_new_puzzlehash()\n full_node_api = full_nodes[0]\n WAIT_SECS = 30\n if trusted:\n wallet_nodes[0].config[\"trusted_peers\"] = {\n full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()\n }\n else:\n wallet_nodes[0].config[\"trusted_peers\"] = {}\n\n await wallet_nodes[0].server.start_client(\n PeerInfo(self_hostname, uint16(full_node_api.full_node.server._port)), None\n )\n\n try:\n assert len(await client.get_wallets(WalletType.POOLING_WALLET)) == 0\n\n async def have_chia():\n await farm_blocks(full_node_api, our_ph, 1)\n return (await wallets[0].get_confirmed_balance()) > 0\n\n await time_out_assert(timeout=WAIT_SECS, function=have_chia)\n await time_out_assert(20, wallet_is_synced, True, wallet_nodes[0], full_node_api)\n\n creation_tx: TransactionRecord = await client.create_new_pool_wallet(\n pool_a_ph, \"https://pool-a.org\", 5, f\"{self_hostname}:5000\", \"new\", \"FARMING_TO_POOL\", fee\n )\n\n await time_out_assert(\n 10,\n full_node_api.full_node.mempool_manager.get_spendbundle,\n creation_tx.spend_bundle,\n creation_tx.name,\n )\n\n await farm_blocks(full_node_api, our_ph, 6)\n assert full_node_api.full_node.mempool_manager.get_spendbundle(creation_tx.name) is None\n\n await time_out_assert(20, wallet_is_synced, True, wallet_nodes[0], full_node_api)\n\n summaries_response = await client.get_wallets(WalletType.POOLING_WALLET)\n assert len(summaries_response) == 1\n wallet_id: int = summaries_response[0][\"id\"]\n status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n\n assert status.current.state == PoolSingletonState.FARMING_TO_POOL.value\n assert status.target is None\n\n async def status_is_farming_to_pool():\n await farm_blocks(full_node_api, our_ph, 1)\n pw_status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n return pw_status.current.state == PoolSingletonState.FARMING_TO_POOL.value\n\n await time_out_assert(timeout=WAIT_SECS, function=status_is_farming_to_pool)\n\n pw_info: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n assert pw_info.current.pool_url == \"https://pool-a.org\"\n assert pw_info.current.relative_lock_height == 5\n\n join_pool_tx: TransactionRecord = (\n await client.pw_join_pool(\n wallet_id,\n pool_b_ph,\n \"https://pool-b.org\",\n 10,\n fee,\n )\n )[\"transaction\"]\n assert join_pool_tx is not None\n await time_out_assert(\n 10,\n full_node_api.full_node.mempool_manager.get_spendbundle,\n join_pool_tx.spend_bundle,\n join_pool_tx.name,\n )\n await farm_blocks(full_node_api, our_ph, 1)\n\n async def status_is_leaving_no_blocks():\n pw_status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n return pw_status.current.state == PoolSingletonState.LEAVING_POOL.value\n\n async def status_is_farming_to_pool_no_blocks():\n pw_status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n return pw_status.current.state == PoolSingletonState.FARMING_TO_POOL.value\n\n await time_out_assert(timeout=WAIT_SECS, function=status_is_leaving_no_blocks)\n\n current_blocks = await full_node_api.get_all_full_blocks()\n more_blocks = full_node_api.bt.get_consecutive_blocks(\n 3,\n farmer_reward_puzzle_hash=pool_a_ph,\n pool_reward_puzzle_hash=pool_b_ph,\n block_list_input=current_blocks[:-1],\n force_overflow=True,\n guarantee_transaction_block=True,\n seed=32 * b\"4\",\n transaction_data=join_pool_tx.spend_bundle,\n )\n\n for block in more_blocks[-3:]:\n await full_node_api.full_node.respond_block(RespondBlock(block))\n\n await asyncio.sleep(5)\n await time_out_assert(timeout=WAIT_SECS, function=status_is_leaving_no_blocks)\n\n # Eventually, leaves pool\n await time_out_assert(timeout=WAIT_SECS, function=status_is_farming_to_pool)\n\n finally:\n client.close()\n await client.await_closed()\n await rpc_cleanup()", "def test_async_config(self, affiliate_items):\n with mock.patch('chiton.rack.affiliates.bulk.BatchJob') as batch_job:\n bulk_update_affiliate_item_details(affiliate_items, workers=10, max_retries=20)\n\n call_kwargs = batch_job.call_args[1]\n assert call_kwargs['workers'] == 10\n assert call_kwargs['max_retries'] == 20", "def test_multiple_factories(self, mocker):\n def _make_factory_with_apikey(apikey, *_, **__):\n return SplitFactory(apikey, {}, True)\n\n factory_module_logger = mocker.Mock()\n build_in_memory = mocker.Mock()\n build_in_memory.side_effect = _make_factory_with_apikey\n build_redis = mocker.Mock()\n build_redis.side_effect = _make_factory_with_apikey\n build_uwsgi = mocker.Mock()\n build_uwsgi.side_effect = _make_factory_with_apikey\n build_localhost = mocker.Mock()\n build_localhost.side_effect = _make_factory_with_apikey\n mocker.patch('splitio.client.factory._LOGGER', new=factory_module_logger)\n mocker.patch('splitio.client.factory._build_in_memory_factory', new=build_in_memory)\n mocker.patch('splitio.client.factory._build_redis_factory', new=build_redis)\n mocker.patch('splitio.client.factory._build_uwsgi_factory', new=build_uwsgi)\n mocker.patch('splitio.client.factory._build_localhost_factory', new=build_localhost)\n\n _INSTANTIATED_FACTORIES.clear() # Clear all factory counters for testing purposes\n\n factory1 = get_factory('some_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 1\n assert factory_module_logger.warning.mock_calls == []\n\n factory2 = get_factory('some_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 2\n assert factory_module_logger.warning.mock_calls == [mocker.call(\n \"factory instantiation: You already have %d %s with this API Key. \"\n \"We recommend keeping only one instance of the factory at all times \"\n \"(Singleton pattern) and reusing it throughout your application.\",\n 1,\n 'factory'\n )]\n\n factory_module_logger.reset_mock()\n factory3 = get_factory('some_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 3\n assert factory_module_logger.warning.mock_calls == [mocker.call(\n \"factory instantiation: You already have %d %s with this API Key. \"\n \"We recommend keeping only one instance of the factory at all times \"\n \"(Singleton pattern) and reusing it throughout your application.\",\n 2,\n 'factories'\n )]\n\n factory_module_logger.reset_mock()\n factory4 = get_factory('some_other_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 3\n assert _INSTANTIATED_FACTORIES['some_other_api_key'] == 1\n assert factory_module_logger.warning.mock_calls == [mocker.call(\n \"factory instantiation: You already have an instance of the Split factory. \"\n \"Make sure you definitely want this additional instance. \"\n \"We recommend keeping only one instance of the factory at all times \"\n \"(Singleton pattern) and reusing it throughout your application.\"\n )]\n\n event = threading.Event()\n factory1.destroy(event)\n event.wait()\n assert _INSTANTIATED_FACTORIES['some_other_api_key'] == 1\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 2\n factory2.destroy()\n factory3.destroy()\n factory4.destroy()", "def test_pm_Completeness(self):\n pass", "async def test_form_multiple_services(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"errors\"] is None\n\n with patch(\"aussiebb.asyncio.AussieBB.__init__\", return_value=None), patch(\n \"aussiebb.asyncio.AussieBB.login\", return_value=True\n ), patch(\"aussiebb.asyncio.AussieBB.get_services\", return_value=FAKE_SERVICES):\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n FAKE_DATA,\n )\n await hass.async_block_till_done()\n\n assert result2[\"type\"] == RESULT_TYPE_FORM\n assert result2[\"step_id\"] == \"service\"\n assert result2[\"errors\"] is None\n\n with patch(\n \"homeassistant.components.aussie_broadband.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry:\n result3 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {CONF_SERVICES: [FAKE_SERVICES[1][\"service_id\"]]},\n )\n await hass.async_block_till_done()\n\n assert result3[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result3[\"title\"] == TEST_USERNAME\n assert result3[\"data\"] == FAKE_DATA\n assert result3[\"options\"] == {\n CONF_SERVICES: [FAKE_SERVICES[1][\"service_id\"]],\n }\n assert len(mock_setup_entry.mock_calls) == 1", "def test_rebuild(self):\n pacha.DB_DIR = '/tmp/pacha_test/db'\n pacha.DB_FILE ='/tmp/pacha_test/db/pacha_test.db' \n pacha.permissions.DB_FILE ='/tmp/pacha_test/db/pacha_test.db' \n pacha.sync.DB_FILE ='/tmp/pacha_test/db/pacha_test.db' \n pacha.hg.DB_FILE ='/tmp/pacha_test/db/pacha_test.db' \n pacha.database.DB_FILE = '/tmp/pacha_test/db/pacha_test.db'\n pacha.database.DB_DIR = '/tmp/pacha_test/db'\n pacha.daemon.PID_DIR = '/tmp/pacha_test'\n\n os.makedirs('/tmp/pacha_test/db')\n os.makedirs('/tmp/remote_pacha/hosts/%s' % host.hostname())\n cmd = pacha.PachaCommands(test=True, parse=False, db=ConfigMapper('/tmp/pacha_test/db/pacha_test.db'),\n db_file='/tmp/pacha_test/db/pacha_test.db')\n cmd.add_config('/tmp/pacha_test/pacha.conf')\n cmd.check_config()\n os.makedirs('/tmp/pacha_test/foo/bar')\n test_file = open('/tmp/pacha_test/foo/bar/test.txt', 'w')\n test_file.write('file should be rebuilt')\n test_file.close()\n cmd.watch('/tmp/pacha_test/foo/bar')\n\n\n # fake getting the db to the expected location \n shutil.copy('/tmp/pacha_test/db/pacha_test.db' , '/tmp/remote_pacha/hosts/%s/db/pacha.db' % host.hostname())\n shutil.rmtree('/tmp/pacha_test')\n\n server = \"%s@%s\" % (self.username, host.hostname()) \n run = rebuild.Rebuild(server=server,\n hostname=host.hostname(), \n source='/tmp/remote_pacha/hosts')\n run.retrieve_files()\n self.assertTrue(os.path.exists('/tmp/%s' % host.hostname()))\n self.assertFalse(os.path.exists('/tmp/pacha_test'))\n\n run.replace_manager()\n self.assertTrue(os.path.exists('/tmp/pacha_test/foo/bar'))\n self.assertEqual(open('/tmp/pacha_test/foo/bar/test.txt').readline(), 'file should be rebuilt')", "def test_scaling_active_parameter_manager():\n components_2 = {\"1\": mock_scaling_component(2), \"2\": mock_scaling_component(2)}\n scaling_apm = scaling_active_parameter_manager(components_2, [\"1\"])\n assert list(scaling_apm.constant_g_values[0]) == list(\n components_2[\"2\"].calculate_scales()\n )\n assert len(scaling_apm.constant_g_values) == 1\n assert scaling_apm.n_obs == [2]\n\n # Test that no constant_g_values if both components selected\n scaling_apm = scaling_active_parameter_manager(components_2, [\"1\", \"2\"])\n assert scaling_apm.constant_g_values is None\n\n # Check that one can't initialise with an unequal number of reflections,\n # either within the selection or overall.\n with pytest.raises(AssertionError):\n components_2 = {\"1\": mock_scaling_component(2), \"2\": mock_scaling_component(1)}\n scaling_apm = scaling_active_parameter_manager(components_2, [\"1\", \"2\"])\n with pytest.raises(AssertionError):\n components_2 = {\"1\": mock_scaling_component(2), \"2\": mock_scaling_component(1)}\n scaling_apm = scaling_active_parameter_manager(components_2, [\"1\"])\n\n data_manager = mock_data_manager(components_2)\n pmg = ScalingParameterManagerGenerator(\n [data_manager], target=ScalingTarget(), mode=\"concurrent\"\n )\n assert isinstance(pmg.apm_type, type(scaling_active_parameter_manager))", "def test_run():\n gid = 123\n azure = create_azure_mock('GROUP1', [1, 2, 4, 5, 6, 7])\n data = [create_everbridge_contacts([1, 2, 3, 5, 8], True)]\n delete_ids = [3, 8]\n update_ids = [1, 2]\n insert_ids = [4, 6, 7]\n modify_everbridge_data(data[0], update_ids, 'phone', '8087779999')\n modify_everbridge_data(data[0], delete_ids, 'groups', [gid])\n update_data = create_everbridge_contacts(update_ids, True)\n insert_data = create_everbridge_contacts(insert_ids, False)\n upsert_data = update_data + insert_data\n inserted_data = [create_everbridge_contacts(insert_ids, True)]\n inserted_exids = (\n '&[email protected]' +\n '&[email protected]' +\n '&[email protected]')\n ever = create_everbridge_mock(data)\n ever.get_contacts_by_external_ids = MagicMock(side_effect=inserted_data)\n app = Synchronizer(azure, ever)\n # Call run\n rslt = app.run([gid])\n # Tests each method call\n azure.get_group_name.assert_called_with(123)\n ever.get_group_id_by_name.assert_called_with('GROUP1')\n ever.add_group.assert_not_called()\n ever.delete_group.assert_not_called()\n ever.delete_members_from_group.assert_called_with(gid, delete_ids)\n ever.delete_contacts.assert_called_with(delete_ids)\n ever.upsert_contacts.assert_called_with(upsert_data)\n ever.get_contacts_by_external_ids.assert_called_with(inserted_exids)\n ever.add_members_to_group.assert_called_with(gid, insert_ids)\n assert rslt == {\n 'GROUP1': {\n 'azure_group_id': 123, 'everbridge_group_id': 123,\n 'azure_count': 6, 'everbridge_count': 5, 'error_contacts': 0,\n 'inserted_contacts': 3, 'updated_contacts': 2, 'removed_members': 2,\n 'deleted_contacts': 2, 'added_members': 3}\n }", "def test(self):\n \"\"\"WARNING: IT IS HIGHLY RECOMMENDED TO HAVE ONE TEST ONLY TO ISOLATE FUNCTIONAL TESTS FROM EACH OTHER. i.e. \n Start a new Python Interpreter and JVM for each test. In the end, it means only one test in this class. \"\"\"\n \n logger.info('**Starting test**')\n q = Queue()\n\n p = Process(target=self.client_process1, args=(q,))\n p.start()\n result = q.get()\n p.join()\n self.assertEqual(result, \"OK\") \n logger.debug(\"Restarting dataClay\")\n self.mock.mock.restartDataClay()\n p = Process(target=self.client_process2, args=(q,))\n p.start()\n result = q.get()\n p.join()\n self.assertEqual(result, \"OK\") \n\n logger.info(\"** Test OK!\")", "def test_factory(self):\n port = self.port(description=u'foo')\n port.startService()\n self.assertIdentical(self._service.factory, port.factory.realFactory)", "def test_parallel_alpha_diversity(self):\r\n params = {'metrics': 'observed_species,chao1,PD_whole_tree',\r\n 'tree_path': self.tree_fp,\r\n 'jobs_to_start': 2\r\n }\r\n app = ParallelAlphaDiversity()\r\n r = app(self.rt_fps,\r\n self.test_out,\r\n params,\r\n job_prefix='ATEST',\r\n poll_directly=True,\r\n suppress_submit_jobs=False)\r\n # confirm that the total number of output sequences equals the total\r\n # number of input sequences\r\n output_fps = glob(join(self.test_out, '*txt'))\r\n self.assertEqual(len(output_fps), len(self.rt_fps))", "def test_create_run(self):\n pass", "def testApmonInstance(self):\n with DashboardAPI() as dashboard:\n self.assertTrue(dashboard.apmon.initializedOK())", "def test_actor_matches_activity(self):", "def test_inmemory_client_creation(self, mocker):\n # Setup task mocks\n def _split_task_init_mock(self, api, storage, period, event):\n self._task = mocker.Mock()\n self._api = api\n self._storage = storage\n self._period = period\n self._event = event\n event.set()\n mocker.patch('splitio.client.factory.SplitSynchronizationTask.__init__', new=_split_task_init_mock)\n def _segment_task_init_mock(self, api, storage, split_storage, period, event):\n self._task = mocker.Mock()\n self._worker_pool = mocker.Mock()\n self._api = api\n self._segment_storage = storage\n self._split_storage = split_storage\n self._period = period\n self._event = event\n event.set()\n mocker.patch('splitio.client.factory.SegmentSynchronizationTask.__init__', new=_segment_task_init_mock)\n\n # Start factory and make assertions\n factory = get_factory('some_api_key')\n assert isinstance(factory._storages['splits'], inmemmory.InMemorySplitStorage)\n assert isinstance(factory._storages['segments'], inmemmory.InMemorySegmentStorage)\n assert isinstance(factory._storages['impressions'], inmemmory.InMemoryImpressionStorage)\n assert factory._storages['impressions']._impressions.maxsize == 10000\n assert isinstance(factory._storages['events'], inmemmory.InMemoryEventStorage)\n assert factory._storages['events']._events.maxsize == 10000\n assert isinstance(factory._storages['telemetry'], inmemmory.InMemoryTelemetryStorage)\n\n assert isinstance(factory._apis['splits'], SplitsAPI)\n assert factory._apis['splits']._client._timeout == 1.5\n assert isinstance(factory._apis['segments'], SegmentsAPI)\n assert factory._apis['segments']._client._timeout == 1.5\n assert isinstance(factory._apis['impressions'], ImpressionsAPI)\n assert factory._apis['impressions']._client._timeout == 1.5\n assert isinstance(factory._apis['events'], EventsAPI)\n assert factory._apis['events']._client._timeout == 1.5\n assert isinstance(factory._apis['telemetry'], TelemetryAPI)\n assert factory._apis['telemetry']._client._timeout == 1.5\n\n assert isinstance(factory._tasks['splits'], split_sync.SplitSynchronizationTask)\n assert factory._tasks['splits']._period == DEFAULT_CONFIG['featuresRefreshRate']\n assert factory._tasks['splits']._storage == factory._storages['splits']\n assert factory._tasks['splits']._api == factory._apis['splits']\n assert isinstance(factory._tasks['segments'], segment_sync.SegmentSynchronizationTask)\n assert factory._tasks['segments']._period == DEFAULT_CONFIG['segmentsRefreshRate']\n assert factory._tasks['segments']._segment_storage == factory._storages['segments']\n assert factory._tasks['segments']._split_storage == factory._storages['splits']\n assert factory._tasks['segments']._api == factory._apis['segments']\n assert isinstance(factory._tasks['impressions'], impressions_sync.ImpressionsSyncTask)\n assert factory._tasks['impressions']._period == DEFAULT_CONFIG['impressionsRefreshRate']\n assert factory._tasks['impressions']._storage == factory._storages['impressions']\n assert factory._tasks['impressions']._impressions_api == factory._apis['impressions']\n assert isinstance(factory._tasks['events'], events_sync.EventsSyncTask)\n assert factory._tasks['events']._period == DEFAULT_CONFIG['eventsPushRate']\n assert factory._tasks['events']._storage == factory._storages['events']\n assert factory._tasks['events']._events_api == factory._apis['events']\n assert isinstance(factory._tasks['telemetry'], telemetry_sync.TelemetrySynchronizationTask)\n assert factory._tasks['telemetry']._period == DEFAULT_CONFIG['metricsRefreshRate']\n assert factory._tasks['telemetry']._storage == factory._storages['telemetry']\n assert factory._tasks['telemetry']._api == factory._apis['telemetry']\n assert factory._labels_enabled is True\n factory.block_until_ready()\n time.sleep(1) # give a chance for the bg thread to set the ready status\n assert factory.ready\n factory.destroy()", "def test_AFQ_FA():\n _, bids_path, _ = get_temp_hardi()\n myafq = api.AFQ(\n bids_path=bids_path,\n dmriprep='vistasoft',\n reg_template='dti_fa_template',\n reg_subject='dti_fa_subject')\n myafq.rois", "async def test_update(airsensor, hass, config):\n\n feature_mock, entity_id = airsensor\n\n def initial_update():\n feature_mock.pm1 = 49\n feature_mock.pm2_5 = 222\n feature_mock.pm10 = 333\n\n feature_mock.async_update = AsyncMock(side_effect=initial_update)\n await async_setup_entity(hass, config, entity_id)\n\n state = hass.states.get(entity_id)\n\n assert state.attributes[ATTR_PM_0_1] == 49\n assert state.attributes[ATTR_PM_2_5] == 222\n assert state.attributes[ATTR_PM_10] == 333\n\n assert state.state == \"222\"", "async def test_service_calls_core(oppio_env, opp, aioclient_mock):\n assert await async_setup_component(opp, \"oppio\", {})\n\n aioclient_mock.post(\"http://127.0.0.1/openpeerpower/restart\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/openpeerpower/stop\", json={\"result\": \"ok\"})\n\n await opp.services.async_call(\"openpeerpower\", \"stop\")\n await opp.async_block_till_done()\n\n assert aioclient_mock.call_count == 4\n\n await opp.services.async_call(\"openpeerpower\", \"check_config\")\n await opp.async_block_till_done()\n\n assert aioclient_mock.call_count == 4\n\n with patch(\n \"openpeerpower.config.async_check_op_config_file\", return_value=None\n ) as mock_check_config:\n await opp.services.async_call(\"openpeerpower\", \"restart\")\n await opp.async_block_till_done()\n assert mock_check_config.called\n\n assert aioclient_mock.call_count == 5", "def test_ipam_vrfs_create(self):\n pass", "def test_alive():\n pass", "def test_alive():\n pass", "def test_alive():\n pass", "def runtest(self):", "def testSingleton(self):\r\n self.assertEqual(id(self.res_mgr), id(ReservationManager()))", "def test_bookkeeping():\n\n ## CASE 1: alanine dipeptide in vacuum\n # Create vanilla system\n ala = AlanineDipeptideVacuum()\n system = ala.system\n positions = ala.positions\n\n # Create REST system\n system.removeForce(4)\n res1 = list(ala.topology.residues())[1]\n rest_atoms = [atom.index for atom in res1.atoms()]\n factory = RESTTopologyFactory(system, solute_region=rest_atoms)\n REST_system = factory.REST_system\n\n # Compare energy components\n compare_energy_components(REST_system, system, positions)\n\n ## CASE 2: alanine dipeptide in solvent\n # Create vanilla system\n ala = AlanineDipeptideExplicit()\n system = ala.system\n positions = ala.positions\n\n # Create REST system\n system.removeForce(4)\n res1 = list(ala.topology.residues())[1]\n rest_atoms = [atom.index for atom in res1.atoms()]\n factory = RESTTopologyFactory(system, solute_region=rest_atoms, use_dispersion_correction=True)\n REST_system = factory.REST_system\n\n # Compare energy components\n compare_energy_components(REST_system, system, positions)\n\n ## CASE 3: alanine dipeptide in solvent with repartitioned hybrid system\n # Create repartitioned hybrid system for lambda 0 endstate\n atp, system_generator = generate_atp(phase='solvent')\n htf = generate_dipeptide_top_pos_sys(atp.topology,\n new_res='THR',\n system=atp.system,\n positions=atp.positions,\n system_generator=system_generator,\n conduct_htf_prop=True,\n generate_repartitioned_hybrid_topology_factory=True,\n endstate=0,\n validate_endstate_energy=False)\n\n # Create REST-ified hybrid system\n res1 = list(htf.hybrid_topology.residues)[1]\n rest_atoms = [atom.index for atom in list(res1.atoms)]\n factory = RESTTopologyFactory(htf.hybrid_system, solute_region=rest_atoms, use_dispersion_correction=True)\n REST_system = factory.REST_system\n\n # Compare energy components\n compare_energy_components(REST_system, htf.hybrid_system, htf.hybrid_positions)", "def pytest_started_handling_group(session, worker):", "def init(self):\n \n self._nc_session = TestBedTests.TBNetconfSession(self.log, self.loop)\n self._nc_proxy = TestBedTests.TBNetconfProxy(self._nc_session, UtCompositeYang, self.log)\n self._netconf_test_objects = []\n self._pbreq_test_objects = []\n\n for cls in NETCONF_TESTS:\n obj = cls(self._dts, self.log, self._nc_proxy, self._loop)\n yield from obj.dts_self_register()\n self._netconf_test_objects.append(obj)\n\n for cls in PBREQ_TESTS:\n obj = cls(self._dts, self.log, self._nc_proxy, self._loop)\n yield from obj.dts_self_register()\n self._pbreq_test_objects.append(obj)\n\n @asyncio.coroutine\n def run_all_tests(xact_info, action, ks_path, msg):\n ro1 = yield from self.run_tests(self._netconf_test_objects, msg.continue_on_failure)\n if ro1.failed_count is 0 or msg.continue_on_failure is True:\n ro2 = yield from self.run_tests(self._pbreq_test_objects, msg.continue_on_failure)\n\n ro = RwAgentTestbedYang.AgentTestsOp()\n ro.total_tests = ro1.total_tests + ro2.total_tests\n ro.passed_count = ro1.passed_count + ro2.passed_count\n ro.failed_count = ro1.failed_count + ro2.failed_count\n #ro.failed_tests = ro1.failed_tests + ro2.failed_tests\n\n xpath = \"O,/agt-tb:agent-tests\"\n xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath, ro)\n\n @asyncio.coroutine\n def run_all_netconf_tests(xact_info, action, ks_path, msg):\n ro = yield from self.run_tests(self._netconf_test_objects)\n xpath = \"O,/agt-tb:agent-tests\"\n xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath, ro)\n\n @asyncio.coroutine\n def run_all_pbreqs_tests(xact_info, action, ks_path, msg):\n ro = yield from self.run_tests(self._pbreq_test_objects)\n xpath = \"O,/agt-tb:agent-tests\"\n xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath, ro)\n \n # Register for all test-cases\n yield from self._dts.register(\n xpath=\"I,/agt-tb:agent-tests/agt-tb:all\",\n flags=rwdts.Flag.PUBLISHER,\n handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=run_all_tests))\n\n # Register for per category all test-cases\n yield from self._dts.register(\n xpath=\"I,/agt-tb:agent-tests/agt-tb:netconf-tests/agt-tb:all\",\n flags=rwdts.Flag.PUBLISHER,\n handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=run_all_netconf_tests))\n\n yield from self._dts.register(\n xpath=\"I,/agt-tb:agent-tests/agt-tb:pb-request-tests/agt-tb:all\",\n flags=rwdts.Flag.PUBLISHER,\n handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=run_all_pbreqs_tests))", "def test_fifo_sync_random():\n pass", "def test_mmp_active_inference(self):\n\n num_obs = [3, 2]\n num_states = [4, 3]\n num_control = [1, 3]\n A = random_A_matrix(num_obs, num_states)\n B = random_B_matrix(num_states, num_control)\n\n C = obj_array_zeros(num_obs)\n C[1][0] = 1.0 \n C[1][1] = -2.0 \n\n agent = Agent(A=A, B=B, C=C, control_fac_idx=[1], inference_algo=\"MMP\", policy_len=2, inference_horizon=3)\n\n T = 10\n\n for t in range(T):\n\n o = [np.random.randint(num_ob) for num_ob in num_obs] # just randomly generate observations at each timestep, no generative process\n qx = agent.infer_states(o)\n agent.infer_policies()\n action = agent.sample_action()\n \n print(agent.prev_actions)\n print(agent.prev_obs)", "def test_activate_mission(self):\n # Load sample data:\n TSStation.update_stations('TestTAMission.data/stations.xml')\n TASeries.import_xml('TestTAMission.data/series.xml')\n\n taskq = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)\n\n # FRS 10.5.1 If not specified, activate_mission must set nominalDate to the current date\n now = now_cet().replace(hour=2)\n mission = TAMission.get('nl.3046')\n mission.activate_mission()\n self.assertEqual(mission.nominalDate, now.date())\n taskq.FlushQueue('default')\n\n # FRS 10.5.2/3 activate_mission must generate origin_id, destination_id and stops\n test_set = ((mark_cet(datetime(2013, 2, 24, 2)), None, 0),\n (mark_cet(datetime(2013, 2, 18, 2)), 'nl.asd', 5),\n (mark_cet(datetime(2013, 2, 19, 2)), 'nl.amr', 8))\n for (testDate, destination, nr_of_stops) in test_set:\n mission.activate_mission(testDate)\n self.assertEqual(mission.destination_id, destination)\n self.assertEqual(len(mission.stops), nr_of_stops)\n mission.put()\n self.assertEqual(mission.origin_id, 'nl.ah')\n self.assertEqual(mission.last_stop.arrival_string, '15:48')\n\n # FRS 10.5.4 activated stops must get 'planned' status, last stop 'finalDestination'\n for index in range(0, 6):\n self.assertEqual(mission.stops[index].status, StopStatuses.planned)\n self.assertEqual(mission.stops[7].status, StopStatuses.finalDestination)\n\n # FRS 10.5.5 TAMission must queue a check-task while awaking a mission.\n tasks = taskq.GetTasks('default')\n self.assertEqual(len(tasks), 2)\n self.assertEqual(tasks[1]['url'], '/TAMission/nl.3046')\n self.assertEqual(tasks[1]['name'], '19_1231_xx_check_3046')\n taskq.FlushQueue('default')\n\n # FRS 10.5.6 Mission must check announcement of stops\n check_time = mark_cet(datetime(2013, 2, 19, 13, 41, 22))\n mission.stops[0].status = StopStatuses.announced\n mission.check_mission_announcements(check_time)\n tasks = taskq.GetTasks('default')\n self.assertEqual(len(tasks), 2)\n self.assertEqual(tasks[0]['url'], '/agent/station/nl.ed')\n self.assertEqual(tasks[0]['name'], '19_1241_25_check_3046')\n self.assertEqual(tasks[1]['url'], '/TAMission/nl.3046')\n self.assertEqual(tasks[1]['name'], '19_1246_xx_check_3046')\n taskq.FlushQueue('default')\n\n check_time = mark_cet(datetime(2013, 2, 19, 14, 02, 22))\n mission.stops[0].status = StopStatuses.planned\n mission.stops[1].status = StopStatuses.announced\n mission.stops[2].status = StopStatuses.announced\n mission.stops[3].status = StopStatuses.announced\n mission.stops[4].status = StopStatuses.announced\n mission.check_mission_announcements(check_time)\n tasks = taskq.GetTasks('default')\n self.assertEqual(len(tasks), 1)\n self.assertEqual(tasks[0]['url'], '/TAMission/nl.3046')\n self.assertEqual(tasks[0]['name'], '19_1348_xx_check_3046')\n\n # FRS 10.5.7 Mission must provide status and delay\n (status, delay) = mission.status_at_time(mark_cet(datetime(2013,2,19,14,0)))\n self.assertEqual(status, MissionStatuses.inactive)\n self.assertEqual(delay, 0)\n mission.first_stop.status = StopStatuses.announced\n (status, delay) = mission.status_at_time(mark_cet(datetime(2013,2,19,14,0)))\n self.assertEqual(delay, 0)\n self.assertEqual(status, MissionStatuses.announced)\n (status, delay) = mission.status_at_time(mark_cet(datetime(2013,2,19,14,30)))\n self.assertEqual(status, MissionStatuses.running)\n self.assertEqual(delay, 0)\n (status, delay) = mission.status_at_time(mark_cet(datetime(2013,2,19,15,49)))\n self.assertEqual(mission.est_arrival_cet, mark_cet(datetime(2013,2,19,15,48)))\n self.assertEqual(status, MissionStatuses.arrived)\n self.assertEqual(MissionStatuses.s[status], 'arrived')\n self.assertEqual(delay, 0)", "def test_check_occurs_once(self, test_generator):\n feature = test_generator()\n with mock_availability_test(feature) as check:\n check.assert_not_called()\n if feature:\n pass\n check.assert_called_once()\n\n if feature:\n feature.require_now(\"no message\")\n feature.require_in_call(lambda: None)()\n feature.require_in_call(\"no message\")(lambda: None)()\n feature.require_in_instance(type(\"Dummy\", (), {}))()\n feature.require_in_instance(\"no message\")(type(\"Dummy\", (), {}))()\n\n check.assert_called_once()", "async def test_running(coresys: CoreSys):\n\n class TestClass:\n \"\"\"Test class.\"\"\"\n\n def __init__(self, coresys: CoreSys):\n \"\"\"Initialize the test class.\"\"\"\n self.coresys = coresys\n\n @Job(conditions=[JobCondition.RUNNING])\n async def execute(self):\n \"\"\"Execute the class method.\"\"\"\n return True\n\n test = TestClass(coresys)\n\n coresys.core.state = CoreState.RUNNING\n assert await test.execute()\n\n coresys.core.state = CoreState.FREEZE\n assert not await test.execute()", "def ParallelToserial(self):\n pass", "async def test_change_pools(self, setup, trusted_and_fee, self_hostname):\n trusted, fee = trusted_and_fee\n full_nodes, wallet_nodes, receive_address, client, rpc_cleanup = setup\n our_ph = receive_address[0]\n pool_a_ph = receive_address[1]\n wallets = [wallet_n.wallet_state_manager.main_wallet for wallet_n in wallet_nodes]\n pool_b_ph = await wallets[1].get_new_puzzlehash()\n full_node_api = full_nodes[0]\n\n if trusted:\n wallet_nodes[0].config[\"trusted_peers\"] = {\n full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()\n }\n else:\n wallet_nodes[0].config[\"trusted_peers\"] = {}\n\n await wallet_nodes[0].server.start_client(\n PeerInfo(self_hostname, uint16(full_node_api.full_node.server._port)), None\n )\n\n WAIT_SECS = 200\n try:\n assert len(await client.get_wallets(WalletType.POOLING_WALLET)) == 0\n\n async def have_chia():\n await farm_blocks(full_node_api, our_ph, 1)\n return (await wallets[0].get_confirmed_balance()) > 0\n\n await time_out_assert(timeout=WAIT_SECS, function=have_chia)\n await time_out_assert(20, wallet_is_synced, True, wallet_nodes[0], full_node_api)\n\n creation_tx: TransactionRecord = await client.create_new_pool_wallet(\n pool_a_ph, \"https://pool-a.org\", 5, f\"{self_hostname}:5000\", \"new\", \"FARMING_TO_POOL\", fee\n )\n\n await time_out_assert(\n 10,\n full_node_api.full_node.mempool_manager.get_spendbundle,\n creation_tx.spend_bundle,\n creation_tx.name,\n )\n\n await farm_blocks(full_node_api, our_ph, 6)\n assert full_node_api.full_node.mempool_manager.get_spendbundle(creation_tx.name) is None\n\n await time_out_assert(20, wallet_is_synced, True, wallet_nodes[0], full_node_api)\n\n summaries_response = await client.get_wallets(WalletType.POOLING_WALLET)\n assert len(summaries_response) == 1\n wallet_id: int = summaries_response[0][\"id\"]\n status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n\n assert status.current.state == PoolSingletonState.FARMING_TO_POOL.value\n assert status.target is None\n\n async def status_is_farming_to_pool():\n await farm_blocks(full_node_api, our_ph, 1)\n pw_status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n return pw_status.current.state == PoolSingletonState.FARMING_TO_POOL.value\n\n await time_out_assert(timeout=WAIT_SECS, function=status_is_farming_to_pool)\n\n pw_info: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n assert pw_info.current.pool_url == \"https://pool-a.org\"\n assert pw_info.current.relative_lock_height == 5\n status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n\n join_pool_tx: TransactionRecord = (\n await client.pw_join_pool(\n wallet_id,\n pool_b_ph,\n \"https://pool-b.org\",\n 10,\n fee,\n )\n )[\"transaction\"]\n assert join_pool_tx is not None\n\n async def status_is_leaving():\n await farm_blocks(full_node_api, our_ph, 1)\n pw_status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n return pw_status.current.state == PoolSingletonState.LEAVING_POOL.value\n\n await time_out_assert(timeout=WAIT_SECS, function=status_is_leaving)\n pw_info: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n\n await time_out_assert(timeout=WAIT_SECS, function=status_is_farming_to_pool)\n pw_info: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n assert pw_info.current.pool_url == \"https://pool-b.org\"\n assert pw_info.current.relative_lock_height == 10\n assert len(await wallets[0].wallet_state_manager.tx_store.get_unconfirmed_for_wallet(2)) == 0\n\n finally:\n client.close()\n await client.await_closed()\n await rpc_cleanup()", "def vera_component_factory():\n with patch(\"pyvera.init_controller\") as init_controller_mock:\n yield ComponentFactory(init_controller_mock)", "def test_update_instance_limit1(self):\n pass", "async def testcog_load_rescheduled(self):\n self.cog._reschedule = mock.create_autospec(self.cog._reschedule)\n await self.cog.cog_load()\n self.cog._reschedule.assert_awaited_once_with()", "def test_initialized() -> None:\n MapieRegressor()", "def test_uwsgi_forked_client_creation(self):\n # Invalid API Key with preforked should exit after 3 attempts.\n factory = get_factory('some_api_key', config={'preforkedInitialization': True})\n assert isinstance(factory._storages['splits'], inmemmory.InMemorySplitStorage)\n assert isinstance(factory._storages['segments'], inmemmory.InMemorySegmentStorage)\n assert isinstance(factory._storages['impressions'], inmemmory.InMemoryImpressionStorage)\n assert factory._storages['impressions']._impressions.maxsize == 10000\n assert isinstance(factory._storages['events'], inmemmory.InMemoryEventStorage)\n assert factory._storages['events']._events.maxsize == 10000\n\n assert isinstance(factory._sync_manager, Manager)\n\n assert isinstance(factory._recorder, StandardRecorder)\n assert isinstance(factory._recorder._impressions_manager, ImpressionsManager)\n assert isinstance(factory._recorder._event_sotrage, inmemmory.EventStorage)\n assert isinstance(factory._recorder._impression_storage, inmemmory.ImpressionStorage)\n\n assert factory._status == Status.WAITING_FORK\n factory.destroy()", "def test_ipam_vrfs_update(self):\n pass", "def test_api_new_game(self):\n\n with self.client as client:\n ...\n # write a test for this route", "def setUp(self):\n\n self.testInit = TestInit(__file__)\n self.testInit.setLogging()\n self.testInit.setDatabaseConnection()\n self.testInit.setSchema(customModules = [\"WMCore.WMBS\",'WMCore.MsgService',\n 'WMCore.ResourceControl', 'WMCore.ThreadPool',\n 'WMCore.Agent.Database'],\n useDefault = False)\n\n myThread = threading.currentThread()\n self.daoFactory = DAOFactory(package = \"WMCore.WMBS\",\n logger = myThread.logger,\n dbinterface = myThread.dbi)\n\n\n\n locationAction = self.daoFactory(classname = \"Locations.New\")\n pendingSlots = self.daoFactory(classname = \"Locations.SetPendingSlots\")\n\n\n for site in self.sites:\n locationAction.execute(siteName = site, pnn = 'se.%s' % (site), ceName = site)\n pendingSlots.execute(siteName = site, pendingSlots = 1000)\n\n\n #Create sites in resourceControl\n resourceControl = ResourceControl()\n for site in self.sites:\n resourceControl.insertSite(siteName = site, pnn = 'se.%s' % (site), ceName = site)\n resourceControl.insertThreshold(siteName = site, taskType = 'Processing', \\\n maxSlots = 10000, pendingSlots = 10000)\n\n\n self.testDir = self.testInit.generateWorkDir()\n\n\n # Set heartbeat\n for component in self.components:\n heartbeatAPI = HeartbeatAPI(component)\n heartbeatAPI.registerComponent()\n\n self.configFile = EmulatorSetup.setupWMAgentConfig()\n\n return", "async def test_internet(coresys: CoreSys):\n coresys.core.state = CoreState.RUNNING\n\n class TestClass:\n \"\"\"Test class.\"\"\"\n\n def __init__(self, coresys: CoreSys):\n \"\"\"Initialize the test class.\"\"\"\n self.coresys = coresys\n\n @Job(conditions=[JobCondition.INTERNET_HOST])\n async def execute_host(self):\n \"\"\"Execute the class method.\"\"\"\n return True\n\n @Job(conditions=[JobCondition.INTERNET_SYSTEM])\n async def execute_system(self):\n \"\"\"Execute the class method.\"\"\"\n return True\n\n test = TestClass(coresys)\n\n coresys.host.network._connectivity = True\n coresys.supervisor._connectivity = True\n assert await test.execute_host()\n assert await test.execute_system()\n\n coresys.host.network._connectivity = True\n coresys.supervisor._connectivity = False\n assert await test.execute_host()\n assert not await test.execute_system()\n\n coresys.host.network._connectivity = None\n coresys.supervisor._connectivity = True\n assert await test.execute_host()\n assert await test.execute_system()\n\n coresys.host.network._connectivity = False\n coresys.supervisor._connectivity = True\n assert not await test.execute_host()\n assert await test.execute_system()", "def test_async_config(self, affiliate_items):\n with mock.patch('chiton.rack.affiliates.bulk.BatchJob') as batch_job:\n bulk_update_affiliate_item_metadata(affiliate_items, workers=10, max_retries=20)\n\n call_kwargs = batch_job.call_args[1]\n assert call_kwargs['workers'] == 10\n assert call_kwargs['max_retries'] == 20", "def test_agent(AgentFactory, steps, envs, percepts):\n print ('RUN TEST AGENT')\n envs.add_thing(AgentFactory)\n #envs.run(steps)\n \n agent = AgentFactory\n agent.program(percept)\n #envs.run(steps)\n envs.runPLWumpus(agent, steps)\n #envs.runPLWumpus(steps)\n print(' ------------PLWumpus test agent KB-----------------------')\n print(agent.KB.clauses)\n #print envs.to_string()\n print('test_agent', envs)\n #print agent.KB.clauses\n return agent.performance\n\n def score(env):\n agent = AgentFactory()\n env.add_thing(agent)\n env.run(steps)\n print('test_agent' , env)\n return agent.performance\n\n #return mean(map(score, envs))\n return None", "async def test_exectution_limit_once(coresys: CoreSys, loop: asyncio.BaseEventLoop):\n\n class TestClass:\n \"\"\"Test class.\"\"\"\n\n def __init__(self, coresys: CoreSys):\n \"\"\"Initialize the test class.\"\"\"\n self.coresys = coresys\n self.run = asyncio.Lock()\n\n @Job(limit=JobExecutionLimit.ONCE, on_condition=JobException)\n async def execute(self, sleep: float):\n \"\"\"Execute the class method.\"\"\"\n assert not self.run.locked()\n async with self.run:\n await asyncio.sleep(sleep)\n\n test = TestClass(coresys)\n run_task = loop.create_task(test.execute(0.3))\n\n await asyncio.sleep(0.1)\n with pytest.raises(JobException):\n await test.execute(0.1)\n\n await run_task", "def test_run_alpha_rarefaction(self):\r\n\r\n run_alpha_rarefaction(\r\n self.test_data['biom'][0],\r\n self.test_data['map'][0],\r\n self.test_out,\r\n call_commands_serially,\r\n self.params,\r\n self.qiime_config,\r\n tree_fp=self.test_data['tree'][0],\r\n num_steps=5,\r\n parallel=False,\r\n min_rare_depth=3,\r\n max_rare_depth=18,\r\n status_update_callback=no_status_updates)\r\n\r\n html_fp = join(self.test_out, 'alpha_rarefaction_plots',\r\n 'rarefaction_plots.html')\r\n pd_averages_fp = join(self.test_out, 'alpha_rarefaction_plots',\r\n 'average_tables', 'PD_whole_treeSampleType.txt')\r\n pd_collated_fp = join(self.test_out, 'alpha_div_collated',\r\n 'PD_whole_tree.txt')\r\n\r\n # Confirm that palm and gut alpha diversities are different,\r\n # and suggestive of statistical significance (we only have a\r\n # few sequences, so we don't get significant results)\r\n ttest_res, alpha_avg = compare_alpha_diversities(open(pd_collated_fp),\r\n open(\r\n self.test_data[\r\n 'map'][0]),\r\n 'SampleType',\r\n 18,\r\n test_type='parametric')\r\n feces_palm_t = ttest_res[('feces', 'L_palm')][0]\r\n self.assertTrue(feces_palm_t < 0,\r\n \"t-statistic too high: %1.3f, but should be less than 0\"\r\n % feces_palm_t)\r\n\r\n # check that final output files have non-zero size\r\n self.assertTrue(getsize(html_fp) > 0)\r\n\r\n # Check that the log file is created and has size > 0\r\n log_fp = glob(join(self.test_out, 'log*.txt'))[0]\r\n self.assertTrue(getsize(log_fp) > 0)", "async def test_update(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n interval_schedule = IntervalSchedule()\n interval_schedule.name = 'sleep10'\n interval_schedule.process_name = \"sleep10\"\n interval_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(interval_schedule) # Save update on _scheduler\n\n await asyncio.sleep(1)\n # Assert only 1 task is running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n # Update 'updated' schedule interval\n interval_schedule.name = 'updated'\n interval_schedule.process_name = \"sleep1\"\n interval_schedule.repeat = datetime.timedelta(seconds=5) # Set time interval to 5 sec\n\n await scheduler.save_schedule(interval_schedule) # Save update on _scheduler\n await asyncio.sleep(6)\n\n # Assert: only 1 task is running\n tasks = await scheduler.get_running_tasks() # list of current running tasks\n assert len(tasks) == 1\n\n interval_schedule.exclusive = False\n await scheduler.save_schedule(interval_schedule)\n\n # Check able to get same schedule after restart\n # Check fields have been modified\n await self.stop_scheduler(scheduler)\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n schedule = await scheduler.get_schedule(interval_schedule.schedule_id)\n\n # Make sure that the values used by schedule are as expected\n assert schedule.process_name == 'sleep1'\n assert schedule.name == 'updated'\n assert schedule.repeat.seconds == 5\n assert not schedule.exclusive\n\n await self.stop_scheduler(scheduler)", "def test_launch_composition(self):\n pass", "def test_leftover_single_pi_allocation(self):\n proj_obj, fabric_obj, pr_objs = self._create_prerequisites(\n create_second_pr=True)\n test_id = self.id()\n\n def process_ae_ids(x):\n return [int(i) for i in sorted(x)]\n\n def get_zk_ae_ids(prs=None):\n prefix = os.path.join(\n self.__class__.__name__,\n 'id', 'aggregated-ethernet')\n zk_client = self._api_server._db_conn._zk_db._zk_client._zk_client\n if not prs:\n prs = [os.path.join(prefix, pr.name) for pr in pr_objs]\n else:\n if not isinstance(prs, list):\n prs = [prs]\n prs = [os.path.join(prefix, pr) for pr in prs]\n ae_ids = {}\n for pr in prs:\n pr_org = os.path.split(pr)[-1]\n ae_ids[pr_org] = zk_client.get_children(pr)\n return ae_ids\n\n pi_per_pr = 3\n pi_objs = {}\n pr1_pi_names = ['%s_pr1_pi%d' % (test_id, i) for\n i in range(1, pi_per_pr + 1)]\n pr1_pi_objs = self._create_pi_objects(pr_objs[0], pr1_pi_names)\n pi_objs.update(pr1_pi_objs)\n\n # create one VPG\n vpg_count = 1\n vpg_names = ['vpg_%s_%s' % (test_id, i) for\n i in range(1, vpg_count + 1)]\n vpg_objs = self._create_vpgs(fabric_obj, vpg_names)\n\n # record AE-IDs in ZK before creating any VPG\n ae_ids = [x for x in get_zk_ae_ids().values() if x]\n self.assertEqual(len(ae_ids), 0)\n\n # attach only PI1/PR1 to VPG-1\n # no AE-ID to be allocated\n ae_ids = {}\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj.add_physical_interface(pi_objs[pr1_pi_names[0]])\n self.api.virtual_port_group_update(vpg_obj)\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n\n pi_refs = vpg_obj.get_physical_interface_refs()\n ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 1)\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs))\n self.assertEqual(len(set(ae_ids[vpg_name].values())), 1)\n self.assertIsNone(list(ae_ids[vpg_name].values())[0])\n\n # verification at Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [])\n\n # Attach PI1/PR1 and PI2/PR1 to VPG-1\n ae_ids = {}\n vpg_name = vpg_names[0]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n for pi in range(1, 3):\n vpg_obj.add_physical_interface(pi_objs[pr1_pi_names[pi]])\n self.api.virtual_port_group_update(vpg_obj)\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n\n pi_refs = vpg_obj.get_physical_interface_refs()\n ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 3)\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs))\n self.assertEqual(len(set(ae_ids[vpg_name].values())), 1)\n\n # verification at Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0])", "def testInterrogate(self):\n\n flow_name = \"Interrogate\"\n\n with test_lib.Stubber(flow.GRRFlow, \"SendReply\", self.MockSendReply):\n # Run the flow in the simulated way\n for _ in test_lib.TestFlowHelper(flow_name, InterrogatedClient(),\n token=self.token,\n client_id=self.client_id):\n pass\n\n # Now check that the AFF4 object is properly set\n fd = aff4.FACTORY.Open(self.client_id, token=self.token)\n\n self.assertEqual(fd.Get(fd.Schema.HOSTNAME), \"test_node\")\n self.assertEqual(fd.Get(fd.Schema.SYSTEM), \"Linux\")\n self.assertEqual(fd.Get(fd.Schema.INSTALL_DATE), 100 * 1000000)\n\n # Check the client info\n info = fd.Get(fd.Schema.CLIENT_INFO)\n\n self.assertEqual(info.client_name, config_lib.CONFIG[\"Client.name\"])\n self.assertEqual(info.client_version,\n int(config_lib.CONFIG[\"Client.version_numeric\"]))\n self.assertEqual(info.build_time, config_lib.CONFIG[\"Client.build_time\"])\n\n # Check the client config\n config_info = fd.Get(fd.Schema.GRR_CONFIG)\n self.assertEqual(config_info.location, \"http://www.example.com\")\n self.assertEqual(config_info.poll_min, 1.0)\n\n # Check that the index has been updated.\n index_fd = aff4.FACTORY.Create(fd.Schema.client_index, \"AFF4Index\",\n mode=\"r\", token=self.token)\n\n self.assertEqual(\n [fd.urn],\n [x for x in index_fd.Query([fd.Schema.HOSTNAME], \".*test.*\")])\n\n # Check for notifications\n user_fd = aff4.FACTORY.Open(\"aff4:/users/test\", token=self.token)\n notifications = user_fd.Get(user_fd.Schema.PENDING_NOTIFICATIONS)\n\n self.assertEqual(len(notifications), 1)\n notification = notifications[0]\n\n self.assertEqual(notification.subject, rdfvalue.RDFURN(self.client_id))\n\n # Check that reply sent from the flow is correct\n self.assertEqual(self.flow_reply.client_info.client_name,\n config_lib.CONFIG[\"Client.name\"])\n self.assertEqual(self.flow_reply.client_info.client_version,\n int(config_lib.CONFIG[\"Client.version_numeric\"]))\n self.assertEqual(self.flow_reply.client_info.build_time,\n config_lib.CONFIG[\"Client.build_time\"])\n\n self.assertEqual(self.flow_reply.system_info.system, \"Linux\")\n self.assertEqual(self.flow_reply.system_info.node, \"test_node\")\n self.assertEqual(self.flow_reply.system_info.release, \"5\")\n self.assertEqual(self.flow_reply.system_info.version, \"2\")\n self.assertEqual(self.flow_reply.system_info.machine, \"i386\")\n\n users = list(fd.Get(fd.Schema.USER))\n self.assertEqual(len(users), 3)\n self.assertEqual(users[0].username, \"Foo\")\n self.assertEqual(users[1].username, \"Bar\")\n self.assertEqual(users[2].username, u\"文德文\")\n self.assertEqual(str(fd.Get(fd.Schema.USERNAMES)),\n \"Foo Bar 文德文\")\n\n net_fd = fd.OpenMember(\"network\")\n interfaces = list(net_fd.Get(net_fd.Schema.INTERFACES))\n self.assertEqual(interfaces[0].mac_address, \"123456\")\n self.assertEqual(interfaces[0].addresses[0].human_readable, \"127.0.0.1\")\n self.assertEqual(socket.inet_ntoa(interfaces[0].addresses[0].packed_bytes),\n \"127.0.0.1\")\n\n # Mac addresses should be available as hex for searching\n mac_addresses = fd.Get(fd.Schema.MAC_ADDRESS)\n self.assertTrue(\"123456\".encode(\"hex\") in str(mac_addresses))\n\n # Check that virtual directories exist for the mount points\n fd = aff4.FACTORY.Open(self.client_id.Add(\"fs/os/mnt/data\"),\n token=self.token)\n # But no directory listing exists yet - we will need to fetch a new one\n self.assertEqual(len(list(fd.OpenChildren())), 0)\n\n fd = aff4.FACTORY.Open(self.client_id.Add(\"fs/tsk/dev/sda\"),\n token=self.token)\n # But no directory listing exists yet - we will need to fetch a new one\n self.assertEqual(len(list(fd.OpenChildren())), 0)\n\n fd = aff4.FACTORY.Open(self.client_id.Add(\"devices/dev/sda\"),\n token=self.token)\n # But no directory listing exists yet - we will need to fetch a new one\n self.assertEqual(len(list(fd.OpenChildren())), 0)\n\n # Check flow's reply\n self.assertEqual(len(self.flow_reply.users), 3)\n self.assertEqual(self.flow_reply.users[0].username, \"Foo\")\n self.assertEqual(self.flow_reply.users[1].username, \"Bar\")\n self.assertEqual(self.flow_reply.users[2].username, u\"文德文\")\n\n self.assertEqual(len(self.flow_reply.interfaces), 1)\n self.assertEqual(self.flow_reply.interfaces[0].mac_address, \"123456\")\n\n # Check that the client summary was published to the event listener.\n self.assertEqual(DiscoveryTestEventListener.event.client_id, self.client_id)\n self.assertEqual(\n DiscoveryTestEventListener.event.interfaces[0].mac_address,\n \"123456\")\n\n # Check that label indexes are updated.\n self.assertEqual(\n list(search.SearchClients(\"label:Label2\", token=self.token)),\n [self.client_id])", "def test_solve_task(self):\n pass", "async def test_workflow(center, caplog, api, rename_image):\n # pylint: disable=too-many-locals,too-many-statements\n caplog.set_level(logging.DEBUG)\n config_path = Path(resource_filename(bootstrap.__name__, DEFAULT_CONFIG_TEMPLATE))\n config = await center.add_executor_job(load_config_file, config_path)\n config.pop(\"logging\")\n await bootstrap.setup_dict(center, config)\n rename_image_auto = center.data[\"automations\"][\"rename_image\"]\n assert rename_image_auto.enabled\n set_img_ok_auto = center.data[\"automations\"][\"set_img_ok\"]\n assert set_img_ok_auto.enabled\n assert not center.samples.leica.data\n assert api.start_imaging.call_count == 0\n assert api.stop_imaging.call_count == 0\n assert center.actions.actions.get(\"rename_image\", {}).get(\"rename_image\")\n\n event = CamAcqStartEvent()\n await center.bus.notify(event)\n await center.wait_for()\n\n well = center.samples.leica.get_sample(\"well\", plate_name=\"00\", well_x=0, well_y=0)\n assert well is not None\n assert api.send.call_args_list[0] == call(command=\"/cmd:deletelist\")\n assert api.send.call_args_list[1] == call(\n command=(\n \"/cmd:add /tar:camlist /exp:p10xgain /ext:af /slide:0 \"\n \"/wellx:1 /welly:1 /fieldx:1 /fieldy:2 /dxpos:0 /dypos:0\"\n )\n )\n assert api.send.call_args_list[2] == call(\n command=(\n \"/cmd:add /tar:camlist /exp:p10xgain /ext:af /slide:0 \"\n \"/wellx:1 /welly:1 /fieldx:2 /fieldy:2 /dxpos:0 /dypos:0\"\n )\n )\n assert not rename_image_auto.enabled\n assert not set_img_ok_auto.enabled\n assert api.start_imaging.call_count == 1\n assert api.send.call_args_list[3] == call(command=\"/cmd:startcamscan\")\n\n event = WorkflowImageEvent(\n {\n \"path\": \"test_path\",\n \"plate_name\": \"00\",\n \"well_x\": 0,\n \"well_y\": 0,\n \"field_x\": 1,\n \"field_y\": 1,\n \"z_slice_id\": 0,\n \"job_id\": 2,\n \"channel_id\": 31,\n }\n )\n await center.bus.notify(event)\n await center.wait_for()\n\n assert api.stop_imaging.call_count == 1\n assert api.send.call_args_list[4] == call(\n command=\"/cmd:adjust /tar:pmt /num:1 /exp:gain_job_1 /prop:gain /value:800\"\n )\n channel = center.samples.leica.get_sample(\n \"channel\", plate_name=\"00\", well_x=0, well_y=0, channel_id=3\n )\n assert channel.values.get(\"gain\") == 800\n assert channel.values.get(\"channel_name\") == \"red\"\n assert api.send.call_args_list[5] == call(command=\"/cmd:deletelist\")\n for idx, api_call in enumerate(api.send.call_args_list[6:12]):\n field_x = int(idx / 3) + 1\n field_y = idx % 3 + 1\n assert api_call == call(\n (\n \"/cmd:add /tar:camlist /exp:p10xexp /ext:af /slide:0 /wellx:1 \"\n f\"/welly:1 /fieldx:{field_x} /fieldy:{field_y} /dxpos:0 /dypos:0\"\n )\n )\n assert rename_image_auto.enabled\n assert set_img_ok_auto.enabled\n assert api.start_imaging.call_count == 2\n assert api.send.call_args_list[12] == call(command=\"/cmd:startcamscan\")\n\n for x_number in range(2):\n for y_number in range(3):\n event = WorkflowImageEvent(\n {\n \"path\": f\"test_path_{x_number}_{y_number}_C00\",\n \"plate_name\": \"00\",\n \"well_x\": 0,\n \"well_y\": 0,\n \"field_x\": x_number,\n \"field_y\": y_number,\n \"z_slice_id\": 0,\n \"job_id\": 4,\n \"channel_id\": 1,\n }\n )\n await center.bus.notify(event)\n await center.wait_for()\n\n for idx, rename_call in enumerate(rename_image.call_args_list[:6]):\n field_x = int(idx / 3)\n field_y = idx % 3\n assert rename_call == call(\n Path(f\"test_path_{field_x}_{field_y}_C00\"),\n Path(f\"test_path_{field_x}_{field_y}_C03\"),\n )\n\n fields = get_matched_samples(\n center.samples.leica,\n \"field\",\n attrs={\"plate_name\": \"00\", \"well_x\": 0, \"well_y\": 0},\n )\n assert len(fields) == 6\n assert all(field.values.get(\"field_img_ok\", False) for field in fields)\n assert api.stop_imaging.call_count == 2\n well_0_1 = center.samples.leica.get_sample(\n \"well\", plate_name=\"00\", well_x=0, well_y=1\n )\n assert well_0_1.well_x == 0\n assert well_0_1.well_y == 1\n assert api.send.call_args_list[13] == call(command=\"/cmd:deletelist\")\n assert api.send.call_args_list[14] == call(\n command=(\n \"/cmd:add /tar:camlist /exp:p10xgain /ext:af /slide:0 \"\n \"/wellx:1 /welly:2 /fieldx:1 /fieldy:2 /dxpos:0 /dypos:0\"\n )\n )\n assert api.send.call_args_list[15] == call(\n command=(\n \"/cmd:add /tar:camlist /exp:p10xgain /ext:af /slide:0 \"\n \"/wellx:1 /welly:2 /fieldx:2 /fieldy:2 /dxpos:0 /dypos:0\"\n )\n )\n assert not rename_image_auto.enabled\n assert not set_img_ok_auto.enabled\n assert api.start_imaging.call_count == 3\n assert api.send.call_args_list[16] == call(command=\"/cmd:startcamscan\")", "def setup(env, NUM_TRACKS, landtime, t_inter):\n # Create the airport\n airport = Airport(env, NUM_TRACKS, landtime)\n\n # Create 4 initial planes\n for i in range(1):\n env.process(plane(env, 'Aviao %d' % i, airport))\n\n # Create more planes while the simulation is running\n while True:\n yield env.timeout(random.randint(t_inter-2, t_inter+2))\n# yield env.timeout(random.expovariate(1.0 / t_inter))\n i += 1\n env.process(plane(env, 'Aviao %d' % i, airport))", "def testMaybeScheduleNewActors(self):\n from xgboost_ray.main import _TrainingState\n from xgboost_ray.elastic import _update_scheduled_actor_states\n from xgboost_ray.elastic import _maybe_schedule_new_actors\n\n # Three actors are dead\n actors = [\n MagicMock(), None,\n MagicMock(),\n MagicMock(), None,\n MagicMock(), None,\n MagicMock()\n ]\n\n # Mock training state\n state = _TrainingState(\n actors=actors,\n queue=MagicMock(),\n stop_event=MagicMock(),\n checkpoint=MagicMock(),\n additional_results={},\n failed_actor_ranks=set(),\n )\n\n created_actors = []\n\n def fake_create_actor(rank, *args, **kwargs):\n created_actors.append(rank)\n return MagicMock()\n\n with patch(\"xgboost_ray.elastic._create_actor\") as create_actor:\n create_actor.side_effect = fake_create_actor\n\n _maybe_schedule_new_actors(\n training_state=state,\n num_cpus_per_actor=8,\n num_gpus_per_actor=0,\n resources_per_actor={\"custom\": 1.0},\n load_data=[],\n ray_params=RayParams(\n num_actors=8,\n elastic_training=True,\n max_failed_actors=1,\n max_actor_restarts=2))\n\n # 3 new actors should have been created\n self.assertEqual(len(created_actors), 3)\n self.assertEqual(len(state.pending_actors), 3)\n\n # The number of created actors shouldn't change even\n # if we run this function again.\n _maybe_schedule_new_actors(\n training_state=state,\n num_cpus_per_actor=8,\n num_gpus_per_actor=0,\n resources_per_actor={\"custom\": 1.0},\n load_data=[],\n ray_params=RayParams(\n num_actors=8,\n elastic_training=True,\n max_failed_actors=1,\n max_actor_restarts=2))\n\n self.assertEqual(len(created_actors), 3)\n self.assertEqual(len(state.pending_actors), 3)\n\n # The actors have not yet been promoted because the\n # loading task has not finished.\n self.assertFalse(actors[1])\n self.assertFalse(actors[4])\n self.assertFalse(actors[6])\n\n # Update status, nothing should change\n _update_scheduled_actor_states(training_state=state)\n\n self.assertFalse(actors[1])\n self.assertFalse(actors[4])\n self.assertFalse(actors[6])\n\n # Set loading task status to finished, but only for first actor\n for _, (_, task) in state.pending_actors.items():\n task.ready = True\n break\n\n # Update status. This shouldn't raise RayXGBoostActorAvailable\n # because we still have a grace period to wait for the second\n # actor.\n _update_scheduled_actor_states(training_state=state)\n\n # Grace period is set through ELASTIC_RESTART_GRACE_PERIOD_S\n # Allow for some slack in test execution\n self.assertGreaterEqual(state.restart_training_at,\n time.time() + 22)\n\n # The first actor should have been promoted to full actor\n self.assertTrue(actors[1])\n self.assertFalse(actors[4])\n self.assertFalse(actors[6])\n\n # Set loading task status to finished for all actors\n for _, (_, task) in state.pending_actors.items():\n task.ready = True\n\n # Update status. This should now raise RayXGBoostActorAvailable\n # immediately as there are no pending actors left to wait for.\n with self.assertRaises(RayXGBoostActorAvailable):\n _update_scheduled_actor_states(training_state=state)\n\n # All restarted actors should have been promoted to full actors\n self.assertTrue(actors[1])\n self.assertTrue(actors[4])\n self.assertTrue(actors[6])", "def test_primary(self):\n st = ServiceTicketFactory(primary=True)\n self.assertTrue(st.is_primary())", "def testAPICleanup(self):\n\n api = API({})\n api.pool = ThreadPool()\n\n # pylint: disable=C2801\n api.__del__()\n\n self.assertIsNone(api.pool)", "def test_create_activity_occurrence(self):\n pass", "def test_set_timeout(init_process_group_mock):\n test_timedelta = timedelta(seconds=30)\n strategy = FSDPStrategy(timeout=test_timedelta, parallel_devices=[torch.device(\"cpu\")])\n strategy.cluster_environment = LightningEnvironment()\n strategy.accelerator = Mock()\n strategy.setup_environment()\n process_group_backend = strategy._get_process_group_backend()\n global_rank = strategy.cluster_environment.global_rank()\n world_size = strategy.cluster_environment.world_size()\n init_process_group_mock.assert_called_with(\n process_group_backend, rank=global_rank, world_size=world_size, timeout=test_timedelta\n )", "async def test_haos(coresys: CoreSys):\n\n class TestClass:\n \"\"\"Test class.\"\"\"\n\n def __init__(self, coresys: CoreSys):\n \"\"\"Initialize the test class.\"\"\"\n self.coresys = coresys\n\n @Job(conditions=[JobCondition.HAOS])\n async def execute(self):\n \"\"\"Execute the class method.\"\"\"\n return True\n\n test = TestClass(coresys)\n coresys.hassos._available = True\n assert await test.execute()\n\n coresys.hassos._available = False\n assert not await test.execute()", "def test_pvc_reattach_time_performance(self, pvc_factory, teardown_factory):\n\n kernel_url = \"https://cdn.kernel.org/pub/linux/kernel/v4.x/linux-4.19.5.tar.gz\"\n download_path = \"tmp\"\n # Number of times we copy the kernel\n copies = 3\n\n # Download a linux Kernel\n import os\n\n dir_path = os.path.join(os.getcwd(), download_path)\n file_path = os.path.join(dir_path, \"file.gz\")\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n urllib.request.urlretrieve(kernel_url, file_path)\n\n worker_nodes_list = node.get_worker_nodes()\n assert len(worker_nodes_list) > 1\n node_one = worker_nodes_list[0]\n node_two = worker_nodes_list[1]\n\n # Create a PVC\n accessmode = constants.ACCESS_MODE_RWX\n if self.interface == constants.CEPHBLOCKPOOL:\n accessmode = constants.ACCESS_MODE_RWO\n pvc_obj = pvc_factory(\n interface=self.interface,\n access_mode=accessmode,\n status=constants.STATUS_BOUND,\n size=\"15\",\n )\n\n # Create a pod on one node\n logging.info(f\"Creating Pod with pvc {pvc_obj.name} on node {node_one}\")\n\n helpers.pull_images(constants.PERF_IMAGE)\n pod_obj1 = helpers.create_pod(\n interface_type=self.interface,\n pvc_name=pvc_obj.name,\n namespace=pvc_obj.namespace,\n node_name=node_one,\n pod_dict_path=constants.PERF_POD_YAML,\n )\n\n # Confirm that pod is running on the selected_nodes\n logging.info(\"Checking whether pods are running on the selected nodes\")\n helpers.wait_for_resource_state(\n resource=pod_obj1, state=constants.STATUS_RUNNING, timeout=120\n )\n\n pod_name = pod_obj1.name\n pod_path = \"/mnt\"\n\n _ocp = OCP(namespace=pvc_obj.namespace)\n\n rsh_cmd = f\"rsync {dir_path} {pod_name}:{pod_path}\"\n _ocp.exec_oc_cmd(rsh_cmd)\n\n rsh_cmd = f\"exec {pod_name} -- tar xvf {pod_path}/tmp/file.gz -C {pod_path}/tmp\"\n _ocp.exec_oc_cmd(rsh_cmd)\n\n for x in range(copies):\n rsh_cmd = f\"exec {pod_name} -- mkdir -p {pod_path}/folder{x}\"\n _ocp.exec_oc_cmd(rsh_cmd)\n rsh_cmd = f\"exec {pod_name} -- cp -r {pod_path}/tmp {pod_path}/folder{x}\"\n _ocp.exec_oc_cmd(rsh_cmd)\n rsh_cmd = f\"exec {pod_name} -- sync\"\n _ocp.exec_oc_cmd(rsh_cmd)\n\n log.info(\"Getting the amount of data written to the PVC\")\n rsh_cmd = f\"exec {pod_name} -- df -h {pod_path}\"\n data_written = _ocp.exec_oc_cmd(rsh_cmd).split()[-4]\n log.info(f\"The Amount of data that was written to the pod is {data_written}\")\n rsh_cmd = f\"delete pod {pod_name}\"\n _ocp.exec_oc_cmd(rsh_cmd)\n\n logging.info(f\"Creating Pod with pvc {pvc_obj.name} on node {node_two}\")\n\n pod_obj2 = helpers.create_pod(\n interface_type=self.interface,\n pvc_name=pvc_obj.name,\n namespace=pvc_obj.namespace,\n node_name=node_two,\n pod_dict_path=constants.PERF_POD_YAML,\n )\n\n start_time = time.time()\n\n pod_name = pod_obj2.name\n helpers.wait_for_resource_state(\n resource=pod_obj2, state=constants.STATUS_RUNNING, timeout=120\n )\n end_time = time.time()\n total_time = end_time - start_time\n if total_time > 60:\n raise ex.PerformanceException(\n f\"Pod creation time is {total_time} and greater than 60 seconds\"\n )\n logging.info(f\"Pod {pod_name} creation time took {total_time} seconds\")\n\n teardown_factory(pod_obj2)\n os.remove(file_path)\n os.rmdir(dir_path)", "def test_create_launch_expire_assignments(self):\n mock_data_array = self.get_mock_assignment_data_array()\n launcher = TaskLauncher(self.db, self.task_run, mock_data_array)\n launcher.create_assignments()\n\n self.assertEqual(\n len(launcher.assignments),\n len(mock_data_array),\n \"Inequal number of assignments existed than were launched\",\n )\n self.assertEqual(\n len(launcher.units),\n len(mock_data_array) * len(mock_data_array[0].unit_data),\n \"Inequal number of units created than were expected\",\n )\n\n for unit in launcher.units:\n self.assertEqual(unit.get_db_status(), AssignmentState.CREATED)\n for assignment in launcher.assignments:\n self.assertEqual(assignment.get_status(), AssignmentState.CREATED)\n\n launcher.launch_units(\"dummy-url:3000\")\n\n for unit in launcher.units:\n self.assertEqual(unit.get_db_status(), AssignmentState.LAUNCHED)\n time.sleep(WAIT_TIME_TILL_NEXT_UNIT)\n for assignment in launcher.assignments:\n self.assertEqual(assignment.get_status(), AssignmentState.LAUNCHED)\n\n launcher.expire_units()\n\n for unit in launcher.units:\n self.assertEqual(unit.get_db_status(), AssignmentState.EXPIRED)\n for assignment in launcher.assignments:\n self.assertEqual(assignment.get_status(), AssignmentState.EXPIRED)", "def minimal_test():\n # create an actor object in the actors_store\n actor = create_actor_object()\n aid = actor.db_id\n\n # send spawner a message to start a worker for a new actor\n worker_id = models.Worker.ensure_one_worker(aid, actor.tenant)\n ch = channels.CommandChannel()\n ch.put_cmd(actor_id=aid,\n worker_id=worker_id,\n image=actor.image,\n revision=actor.revision,\n tenant=actor.tenant,\n stop_existing=False)\n\n # send a message to the actor's inbox\n eid = create_execution_object(actor)\n ch = channels.ActorMsgChannel(actor=aid)\n d = {}\n d['_abaco_username'] = 'jstubbs'\n d['_abaco_api_server'] = actor.api_server\n d['_abaco_execution_id'] = eid\n d['_abaco_Content_Type'] = 'str'\n ch.put_msg(message='test', d=d)\n ch.close()\n\n # wait for execution to complete:\n wait_for_execution(aid, eid)", "def test_mcts_agent(self):\n logging.info(\"Starting test_mcts_agent\")\n dirname = os.path.dirname(__file__)\n filename = os.path.join(dirname, \"../configs/factory_floor_simple.yaml\")\n parameters = getParameters(filename)\n env = FactoryFloor(parameters)\n obs = env.reset()\n\n mctsAgents = []\n\n randomagent = 'aiagents.single.RandomAgent.RandomAgent'\n for robotId in env.action_space.spaces.keys():\n mctsparams = {'treeAgent':{'class': randomagent, 'id':robotId, 'parameters':{} },\n 'rolloutAgent':{'class': randomagent, 'id':robotId, 'parameters':{} }} \n mctsparams['simulator'] = dict(parameters)\n mctsparams['simulator']['fullname'] = \"aienvs.FactoryFloor.FactoryFloor.FactoryFloor\"\n \n mctsAgents.append(MctsAgent(robotId, env.action_space, env.observation_space , mctsparams))\n\n complexAgent = BasicComplexAgent(mctsAgents, env.action_space, env.observation_space)\n\n episode = Episode(complexAgent, env, obs, render=True)\n episode.run()", "async def test_fgsm(): \n #fgsm algo option:\n r = {}\n async with AsyncClient(app=app, base_url=\"http://test\") as ac:\n \n ALGO_NAME = AlteritAlgoName.fgsm_algo\n ffile = {'input_image': open(TEST_IMAGE_PATH, 'rb'),\n \"input_image_path\": TEST_IMAGE_PATH,\n \"alter_parameters\":json.dumps({\"acall\":True,\n \"epsilon\":0.01})\n }\n \n for epsilon_, result_ in zip([0.01, 0.1], ['saluki', 'weimaranner',]):\n r = await ac.post(f'/alterit/?algo_name={ALGO_NAME}', files = ffile)\n assert r.status_code == 200\n i1, i2, i3, i4 = await a_from_zip_stream_to_att_data(r)\n assert i1['0'][1] == result_ \n \n # async for i in mygen(5):\n # print(f'step {i}:')\n # #ALGO_NAME = AlteritAlgoName.fgsm_algo\n # ffile = {'input_image': open(TEST_IMAGE_PATH, 'rb'),\n # \"input_image_path\": TEST_IMAGE_PATH,\n # \"alter_parameters\":json.dumps({\"acall\":True,\n # \"epsilon\":0.01}),}\n # r[i] = await ac.post(f'/alterit/?algo_name={ALGO_NAME}', files = ffile)\n # assert r[i].status_code == 200\n # i1, i2, i3, i4 = await a_from_zip_stream_to_att_data(r[i])\n # assert i1['0'][1] == \"saluki\"", "def test_alchemical_phase_factory_building(self):\n with mmtools.utils.temporary_directory() as tmp_dir:\n template_script = self.get_implicit_template_script(tmp_dir)\n\n # AbsoluteAlchemicalFactory options.\n template_script['options']['alchemical_pme_treatment'] = 'exact'\n\n # Test that options are passed to AlchemicalPhaseFactory correctly.\n exp_builder = ExperimentBuilder(script=template_script)\n for experiment in exp_builder.build_experiments():\n for phase_factory in experiment.phases:\n assert phase_factory.alchemical_factory.alchemical_pme_treatment == 'exact'\n # Overwrite AbsoluteAlchemicalFactory default for disable_alchemical_dispersion_correction.\n assert phase_factory.alchemical_factory.disable_alchemical_dispersion_correction == True", "def __init__(self, asa_factory: AsaFactory):\n super().__init__(asa_factory) # initialize step_in_progress flag\n self.agent, self.sampler, self.algo = asa_factory()\n self.batch_spec = self.sampler.batch_spec\n self.grad = None\n self.traj_infos = None\n self.opt_info = None", "def test__call__(self):\n mock = Mock()\n factory = Factory(mock)\n factory()\n mock.assert_called_once_with()", "async def test_manual_schedule(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # Declare manual interval schedule\n manual_schedule = ManualSchedule()\n manual_schedule.name = 'manual task'\n manual_schedule.process_name = 'sleep10'\n manual_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(manual_schedule)\n manual_schedule = await scheduler.get_schedule(manual_schedule.schedule_id)\n\n await scheduler.queue_task(manual_schedule.schedule_id) # Added a task to the _scheduler queue\n await asyncio.sleep(5)\n\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n await self.stop_scheduler(scheduler)", "def test_simple_multitask():\n bucket = []\n def _foo():\n for i in range(10):\n bucket.append(i)\n yield\n\n scheduler = Scheduler()\n scheduler.new(_foo())\n scheduler.new(_foo())\n scheduler.mainloop()\n\n expect_bucket = []\n for i in range(10):\n expect_bucket.append(i)\n expect_bucket.append(i)\n assert bucket == expect_bucket", "def test_setting_state_parallel(self):\n no_replicates = 25\n\n replicate(experiment, no_replicates, parallel=True, no_processes=2)\n for i in range(no_replicates):\n self.assertIn('result', state[SUBSTATE_KEY_PATTERN % i])\n self.assertEqual(state[SUBSTATE_KEY_PATTERN % i]['result'], \"bla\")" ]
[ "0.588525", "0.58404994", "0.5700447", "0.5668184", "0.5639154", "0.5630297", "0.5601512", "0.558073", "0.5506044", "0.5464134", "0.5464046", "0.5454045", "0.54427963", "0.5430056", "0.54291415", "0.54067427", "0.54062814", "0.5388401", "0.53826404", "0.53818905", "0.53586805", "0.5358201", "0.5352884", "0.5345808", "0.53335357", "0.5314364", "0.5313943", "0.5313571", "0.5288687", "0.52776027", "0.52668977", "0.5266032", "0.5265183", "0.52645206", "0.5261797", "0.5259988", "0.52554256", "0.52491117", "0.52449685", "0.5240393", "0.52390605", "0.5232866", "0.5223555", "0.522115", "0.52162236", "0.521335", "0.52100945", "0.5192864", "0.5190344", "0.5190344", "0.5190344", "0.51878834", "0.51785225", "0.5169783", "0.5167435", "0.5166169", "0.5161918", "0.51615053", "0.51610714", "0.515381", "0.51465863", "0.51432294", "0.5131348", "0.51308197", "0.51281583", "0.5127267", "0.51249254", "0.51223195", "0.5122036", "0.511912", "0.5114319", "0.5108922", "0.5108439", "0.5103648", "0.50965863", "0.5093904", "0.5091811", "0.5091136", "0.50908566", "0.5090352", "0.50881034", "0.50804317", "0.50779563", "0.5071871", "0.5066679", "0.5065903", "0.5064502", "0.5055533", "0.5054716", "0.5052806", "0.5048427", "0.50431824", "0.5034935", "0.5033872", "0.50317407", "0.5030574", "0.5030496", "0.50294787", "0.50231147", "0.50199294" ]
0.63022053
0
Test the apm factory for consecutive refinement.
def test_ParameterManagerGenerator_consecutive(): components_1 = { "scale": mock_component(), "decay": mock_component(), "absorption": mock_component(), } data_manager = mock_data_manager(components_1) data_manager.consecutive_refinement_order = [["scale", "decay"], ["absorption"]] # Test single dataset case. pmg = ParameterManagerGenerator( [data_manager], apm_type=active_parameter_manager, target=ScalingTarget(), mode="consecutive", ) apms = list(pmg.parameter_managers()) assert len(apms) == 2 apm = apms[0] assert isinstance(apm, multi_active_parameter_manager) assert "scale" in apm.components_list assert "decay" in apm.components_list assert "absorption" not in apm.components_list apm = apms[1] assert isinstance(apm, multi_active_parameter_manager) assert "scale" not in apm.components_list assert "decay" not in apm.components_list assert "absorption" in apm.components_list # Test multi dataset case. components_2 = {"1": mock_component(), "2": mock_component()} data_manager_2 = mock_data_manager(components_2) data_manager_2.consecutive_refinement_order = [["1"], ["2"]] pmg = ParameterManagerGenerator( [data_manager, data_manager_2], apm_type=active_parameter_manager, target=ScalingTarget(), mode="consecutive", ) apms = list(pmg.parameter_managers()) assert len(apms) == 2 multi_apm = apms[0] assert isinstance(multi_apm, multi_active_parameter_manager) apm_1 = multi_apm.apm_list[0] assert "scale" in apm_1.components_list assert "decay" in apm_1.components_list assert "absorption" not in apm_1.components_list assert multi_apm.apm_list[1].components_list == ["1"] multi_apm = apms[1] assert isinstance(multi_apm, multi_active_parameter_manager) assert multi_apm.apm_list[0].components_list == ["absorption"] assert multi_apm.apm_list[1].components_list == ["2"] # Test multi dataset case with different number of cycles for each data_manager. components_2 = {"1": mock_component()} data_manager_2 = mock_data_manager(components_2) data_manager_2.consecutive_refinement_order = [["1"], ["2"]] pmg = ParameterManagerGenerator( [data_manager, data_manager_2], apm_type=active_parameter_manager, target=ScalingTarget(), mode="consecutive", ) assert pmg.param_lists[0] == [["scale", "decay"], ["absorption"]] assert pmg.param_lists[1] == [["1"]] apms = list(pmg.parameter_managers()) assert len(apms) == 2 multi_apm = apms[0] assert isinstance(multi_apm, multi_active_parameter_manager) apm_1 = multi_apm.apm_list[0] assert "scale" in apm_1.components_list assert "decay" in apm_1.components_list assert "absorption" not in apm_1.components_list assert multi_apm.apm_list[1].components_list == ["1"] multi_apm = apms[1] assert isinstance(multi_apm, multi_active_parameter_manager) assert multi_apm.apm_list[0].components_list == ["absorption"] # Only change relative to previous test case. assert multi_apm.apm_list[1].components_list == [] # Test fixing the decay parameter. data_manager.fixed_components = ["decay"] pmg = ParameterManagerGenerator( [data_manager], apm_type=active_parameter_manager, target=ScalingTarget(), mode="consecutive", ) apms = list(pmg.parameter_managers()) assert len(apms) == 2 apm = apms[0] assert isinstance(apm, multi_active_parameter_manager) assert "scale" in apm.components_list assert "decay" not in apm.components_list assert "absorption" not in apm.components_list apm = apms[1] assert isinstance(apm, multi_active_parameter_manager) assert "scale" not in apm.components_list assert "decay" not in apm.components_list assert "absorption" in apm.components_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_general_apm():\n components = {\n \"scale\": mock_component(),\n \"decay\": mock_component(),\n \"absorption\": mock_component(),\n }\n\n apm = active_parameter_manager(components, [\"scale\", \"decay\"])\n assert \"decay\" in apm.components_list\n assert \"scale\" in apm.components_list\n assert \"absorption\" not in apm.components_list\n assert apm.n_active_params == (\n components[\"scale\"].n_params + components[\"decay\"].n_params\n )\n n_cumul = 0\n for component in apm.components:\n assert apm.components[component][\"n_params\"] == components[component].n_params\n assert apm.components[component][\"start_idx\"] == n_cumul\n assert (\n apm.components[component][\"end_idx\"]\n == n_cumul + apm.components[component][\"n_params\"]\n )\n n_cumul += apm.components[component][\"n_params\"]\n\n apm.set_param_vals(flex.double([2.0, 1.5]))\n assert apm.get_param_vals() == flex.double([2.0, 1.5])\n # Test params were updated in components\n assert list(components[\"scale\"].free_parameters) == [2.0]\n assert list(components[\"decay\"].free_parameters) == [1.5]\n # Test selection of parameters\n decay_params = apm.select_parameters(\"decay\")\n assert len(decay_params) == 1\n assert decay_params[0] == 1.5\n\n # Test calculate model state uncertainties\n var_cov = flex.double([1.0, 0.5, 0.5, 2.0])\n var_cov.reshape(flex.grid(2, 2))\n apm.calculate_model_state_uncertainties(var_cov)\n assert components[\"scale\"].var_cov_matrix[0, 0] == 1.0\n assert components[\"decay\"].var_cov_matrix[0, 0] == 2.0\n\n # Test set param esds.\n apm.set_param_esds(flex.double([0.1, 0.2]))\n assert components[\"scale\"].free_parameter_esds == flex.double([0.1])\n assert components[\"decay\"].free_parameter_esds == flex.double([0.2])", "def test_multi_apm():\n\n components_1 = {\n \"scale\": mock_component(),\n \"decay\": mock_component(),\n \"absorption\": mock_component(),\n }\n components_2 = {\"scale\": mock_component(), \"decay\": mock_component()}\n\n multi_apm = multi_active_parameter_manager(\n ScalingTarget(),\n [components_1, components_2],\n [[\"scale\", \"decay\"], [\"scale\"]],\n active_parameter_manager,\n )\n\n # Test correct setup of apm_list attribute.\n for apm in multi_apm.apm_list:\n assert isinstance(apm, active_parameter_manager)\n assert len(multi_apm.apm_list) == 2\n assert multi_apm.components_list == [\"scale\", \"decay\", \"scale\"]\n assert multi_apm.n_active_params == 3\n assert multi_apm.apm_data[0] == {\"start_idx\": 0, \"end_idx\": 2}\n assert multi_apm.apm_data[1] == {\"start_idx\": 2, \"end_idx\": 3}\n\n # Test parameter selection.\n multi_apm.set_param_vals(flex.double([3.0, 2.5, 2.0]))\n assert multi_apm.get_param_vals() == flex.double([3.0, 2.5, 2.0])\n assert multi_apm.select_parameters(0) == flex.double([3.0, 2.5])\n assert multi_apm.select_parameters(1) == flex.double([2.0])\n\n # Test setting parameter esds.\n multi_apm.set_param_esds(flex.double([0.1, 0.2, 0.3]))\n assert components_1[\"scale\"].free_parameter_esds == flex.double([0.1])\n assert components_1[\"decay\"].free_parameter_esds == flex.double([0.2])\n assert components_2[\"scale\"].free_parameter_esds == flex.double([0.3])\n\n # Test setting var_cov matrices for each component.\n var_cov = flex.double([1.0, 0.5, 0.5, 0.5, 2.0, 0.5, 0.5, 0.5, 3.0])\n var_cov.reshape(flex.grid(3, 3))\n multi_apm.calculate_model_state_uncertainties(var_cov)\n assert components_1[\"scale\"].var_cov_matrix[0, 0] == 1.0\n assert components_1[\"decay\"].var_cov_matrix[0, 0] == 2.0\n assert components_2[\"scale\"].var_cov_matrix[0, 0] == 3.0", "def test_active_inference_SPM_1b(self):", "def test_add_multiple_pis_simultaneously_to_vpg_check_reallocation(self):\n proj_obj, fabric_obj, pr_objs = self._create_prerequisites(\n create_second_pr=True)\n test_id = self.id()\n VPG_CLASS = self._api_server.get_resource_class('virtual-port-group')\n org_process_ae_id = VPG_CLASS._process_ae_id\n\n class MockVpg(VPG_CLASS):\n org_process_ae_id = VPG_CLASS._process_ae_id\n HOLD_API = True\n @classmethod\n def mock_process_ae_id(cls, db_obj_dict, vpg_name, obj_dict=None):\n while cls.HOLD_API:\n print('sleeping for HOLD_API to clear for '\n 'args = %s' % obj_dict)\n gevent.sleep(0.5)\n return cls.org_process_ae_id(db_obj_dict, vpg_name, obj_dict)\n\n def process_ae_ids(x):\n return [int(i) for i in sorted(x) if i is not None]\n\n def get_zk_ae_ids(prs=None):\n prefix = os.path.join(\n self.__class__.__name__,\n 'id', 'aggregated-ethernet')\n zk_client = self._api_server._db_conn._zk_db._zk_client._zk_client\n if not prs:\n prs = [os.path.join(prefix, pr.name) for pr in pr_objs]\n else:\n if not isinstance(prs, list):\n prs = [prs]\n prs = [os.path.join(prefix, pr) for pr in prs]\n ae_ids = {}\n for pr in prs:\n pr_org = os.path.split(pr)[-1]\n ae_ids[pr_org] = zk_client.get_children(pr)\n return ae_ids\n\n pi_per_pr = 6\n pi_objs = {}\n pr1_pi_names = ['%s_pr1_pi%d' % (test_id, i) for\n i in range(1, pi_per_pr + 1)]\n pr2_pi_names = ['%s_pr2_pi%d' % (test_id, i) for\n i in range(1, pi_per_pr + 1)]\n pr1_pi_objs = self._create_pi_objects(pr_objs[0], pr1_pi_names)\n pr2_pi_objs = self._create_pi_objects(pr_objs[1], pr2_pi_names)\n pi_objs.update(pr1_pi_objs)\n pi_objs.update(pr2_pi_objs)\n\n # create a VPG\n vpg_count = 3\n vpg_names = ['vpg_%s_%s' % (test_id, i) for i in range(\n 1, vpg_count + 1)]\n vpg_objs = self._create_vpgs(fabric_obj, vpg_names)\n\n # record AE-IDs in ZK before creating any VPG\n ae_ids = [x for x in get_zk_ae_ids().values() if x]\n self.assertEqual(len(ae_ids), 0)\n\n def _attach_pi_simultaneously(vpg_obj, pi_uuids):\n # Attach PIs from PR1 to VPG-1\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n try:\n # mock _process_ae_id at VPG resource\n VPG_CLASS._process_ae_id = MockVpg.mock_process_ae_id\n MockVpg.HOLD_API = True\n for pi_uuid in pi_uuids:\n gevent.spawn(\n self.api.ref_update,\n \"virtual-port-group\",\n vpg_obj.uuid,\n \"physical-interface\",\n pi_uuid,\n None,\n \"ADD\",\n None)\n gevent.sleep(2)\n MockVpg.HOLD_API = False\n gevent.sleep(3)\n except gevent.timeout.Timeout:\n self.assertFalse(\n False,\n '%s failed unexpectedly' % VPG_CLASS._process_ae_id)\n finally:\n # reset mock to original\n VPG_CLASS._process_ae_id = org_process_ae_id\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_refs = vpg_obj.get_physical_interface_refs()\n return vpg_obj, pi_refs\n\n # Case 1\n # Attach 2 PIs from PR1 to VPG-1\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_uuids = [pi.uuid for pi in list(pr1_pi_objs.values())[0:2]]\n vpg_obj, pi_refs = _attach_pi_simultaneously(vpg_obj, pi_uuids)\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 2)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertEqual(len(list(vpg_ae_ids.values())), 2)\n self.assertEqual(len(set(vpg_ae_ids.values())), 1)\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [0, 0])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0])\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [])\n\n # Case 2\n # Attach 2 PIs from PR1 to VPG-2\n vpg_name = vpg_names[1]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_uuids = [pi.uuid for pi in list(pr1_pi_objs.values())[2:4]]\n vpg_obj, pi_refs = _attach_pi_simultaneously(vpg_obj, pi_uuids)\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 2)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertEqual(len(list(vpg_ae_ids.values())), 2)\n self.assertEqual(len(set(vpg_ae_ids.values())), 1)\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [1, 1])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 2)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0, 1])\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [])\n\n # Case 3\n # Deattach 1 PIs from PR1 to VPG-1\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_obj = list(pr1_pi_objs.values())[0]\n pi_obj = self._vnc_lib.physical_interface_read(id=pi_obj.uuid)\n vpg_obj.del_physical_interface(pi_obj)\n self.api.virtual_port_group_update(vpg_obj)\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_refs = vpg_obj.get_physical_interface_refs()\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 1)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertIsNone(list(vpg_ae_ids.values())[0])\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [1])\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [])\n\n # Case 4\n # Attach 2 PIs from PR1 to VPG-3\n vpg_name = vpg_names[2]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_uuids = [pi.uuid for pi in list(pr1_pi_objs.values())[4:6]]\n vpg_obj, pi_refs = _attach_pi_simultaneously(vpg_obj, pi_uuids)\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 2)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertEqual(len(list(vpg_ae_ids.values())), 2)\n self.assertEqual(len(set(vpg_ae_ids.values())), 1)\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [0, 0])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 2)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0, 1])\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [])\n\n # Case 5\n # Attach 1 PIs from PR1 to VPG-1\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_obj = list(pr1_pi_objs.values())[0]\n pi_obj = self._vnc_lib.physical_interface_read(id=pi_obj.uuid)\n vpg_obj.add_physical_interface(pi_obj)\n self._vnc_lib.virtual_port_group_update(vpg_obj)\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_refs = vpg_obj.get_physical_interface_refs()\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 2)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertEqual(len(list(vpg_ae_ids.values())), 2)\n self.assertEqual(len(set(vpg_ae_ids.values())), 1)\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [2, 2])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 3)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0, 1, 2])\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [])", "def test_add_multiple_pis_simultaneously_to_vpg_check_deallocation(self):\n proj_obj, fabric_obj, pr_objs = self._create_prerequisites(\n create_second_pr=True)\n test_id = self.id()\n VPG_CLASS = self._api_server.get_resource_class('virtual-port-group')\n org_process_ae_id = VPG_CLASS._process_ae_id\n\n class MockVpg(VPG_CLASS):\n org_process_ae_id = VPG_CLASS._process_ae_id\n HOLD_API = True\n @classmethod\n def mock_process_ae_id(cls, db_obj_dict, vpg_name, obj_dict=None):\n while cls.HOLD_API:\n print('sleeping for HOLD_API to clear for '\n 'args = %s' % obj_dict)\n gevent.sleep(0.5)\n return cls.org_process_ae_id(db_obj_dict, vpg_name, obj_dict)\n\n def process_ae_ids(x):\n return [int(i) for i in sorted(x) if i is not None]\n\n def get_zk_ae_ids(prs=None):\n prefix = os.path.join(\n self.__class__.__name__,\n 'id', 'aggregated-ethernet')\n zk_client = self._api_server._db_conn._zk_db._zk_client._zk_client\n if not prs:\n prs = [os.path.join(prefix, pr.name) for pr in pr_objs]\n else:\n if not isinstance(prs, list):\n prs = [prs]\n prs = [os.path.join(prefix, pr) for pr in prs]\n ae_ids = {}\n for pr in prs:\n pr_org = os.path.split(pr)[-1]\n ae_ids[pr_org] = zk_client.get_children(pr)\n return ae_ids\n\n pi_per_pr = 1\n pi_objs = {}\n pr1_pi_names = ['%s_pr1_pi%d' % (test_id, i) for\n i in range(1, pi_per_pr + 1)]\n pr2_pi_names = ['%s_pr2_pi%d' % (test_id, i) for\n i in range(1, pi_per_pr + 1)]\n pr1_pi_objs = self._create_pi_objects(pr_objs[0], pr1_pi_names)\n pr2_pi_objs = self._create_pi_objects(pr_objs[1], pr2_pi_names)\n pi_objs.update(pr1_pi_objs)\n pi_objs.update(pr2_pi_objs)\n\n # create a VPG\n vpg_count = 1\n vpg_names = ['vpg_%s_%s' % (test_id, i) for i in range(\n 1, vpg_count + 1)]\n vpg_objs = self._create_vpgs(fabric_obj, vpg_names)\n\n # record AE-IDs in ZK before creating any VPG\n ae_ids = [x for x in get_zk_ae_ids().values() if x]\n self.assertEqual(len(ae_ids), 0)\n\n def _attach_pi_simultaneously(vpg_obj, pi_uuids):\n # Attach PIs from PR1 to VPG-1\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n try:\n # mock _process_ae_id at VPG resource\n VPG_CLASS._process_ae_id = MockVpg.mock_process_ae_id\n MockVpg.HOLD_API = True\n for pi_uuid in pi_uuids:\n gevent.spawn(\n self.api.ref_update,\n \"virtual-port-group\",\n vpg_obj.uuid,\n \"physical-interface\",\n pi_uuid,\n None,\n \"ADD\",\n None)\n gevent.sleep(2)\n MockVpg.HOLD_API = False\n gevent.sleep(3)\n except gevent.timeout.Timeout:\n self.assertFalse(\n False,\n '%s failed unexpectedly' % VPG_CLASS._process_ae_id)\n finally:\n # reset mock to original\n VPG_CLASS._process_ae_id = org_process_ae_id\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_refs = vpg_obj.get_physical_interface_refs()\n return vpg_obj, pi_refs\n\n # Case 1\n # Attach 2 PIs from PR1 to VPG-1\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pr1_pi_uuids = list(pr1_pi_objs.values())[0].uuid\n pr2_pi_uuids = list(pr2_pi_objs.values())[0].uuid\n pi_uuids = [pr1_pi_uuids, pr2_pi_uuids]\n vpg_obj, pi_refs = _attach_pi_simultaneously(vpg_obj, pi_uuids)\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 2)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertEqual(len(list(vpg_ae_ids.values())), 2)\n self.assertEqual(len(set(vpg_ae_ids.values())), 1)\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [0, 0])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0])\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 1)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [0])\n\n # Case 2\n # Deattach 1 PIs from PR1 to VPG-1\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_obj = list(pr1_pi_objs.values())[0]\n pi_obj = self._vnc_lib.physical_interface_read(id=pi_obj.uuid)\n vpg_obj.del_physical_interface(pi_obj)\n self.api.virtual_port_group_update(vpg_obj)\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_refs = vpg_obj.get_physical_interface_refs()\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 1)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertIsNone(list(vpg_ae_ids.values())[0])\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [])\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [])", "def test_alchemical_phase_factory_building(self):\n with mmtools.utils.temporary_directory() as tmp_dir:\n template_script = self.get_implicit_template_script(tmp_dir)\n\n # AbsoluteAlchemicalFactory options.\n template_script['options']['alchemical_pme_treatment'] = 'exact'\n\n # Test that options are passed to AlchemicalPhaseFactory correctly.\n exp_builder = ExperimentBuilder(script=template_script)\n for experiment in exp_builder.build_experiments():\n for phase_factory in experiment.phases:\n assert phase_factory.alchemical_factory.alchemical_pme_treatment == 'exact'\n # Overwrite AbsoluteAlchemicalFactory default for disable_alchemical_dispersion_correction.\n assert phase_factory.alchemical_factory.disable_alchemical_dispersion_correction == True", "def test_02(self, test):\r\n\r\n return test.MANUAL()", "def test_04(self, test):\r\n globalConfig.test = test\r\n\r\n self.check_delayed_activation(SCHEDULED_ABSOLUTE_ACTIVATION)\r\n\r\n return test.PASS()", "def test_bookkeeping():\n\n ## CASE 1: alanine dipeptide in vacuum\n # Create vanilla system\n ala = AlanineDipeptideVacuum()\n system = ala.system\n positions = ala.positions\n\n # Create REST system\n system.removeForce(4)\n res1 = list(ala.topology.residues())[1]\n rest_atoms = [atom.index for atom in res1.atoms()]\n factory = RESTTopologyFactory(system, solute_region=rest_atoms)\n REST_system = factory.REST_system\n\n # Compare energy components\n compare_energy_components(REST_system, system, positions)\n\n ## CASE 2: alanine dipeptide in solvent\n # Create vanilla system\n ala = AlanineDipeptideExplicit()\n system = ala.system\n positions = ala.positions\n\n # Create REST system\n system.removeForce(4)\n res1 = list(ala.topology.residues())[1]\n rest_atoms = [atom.index for atom in res1.atoms()]\n factory = RESTTopologyFactory(system, solute_region=rest_atoms, use_dispersion_correction=True)\n REST_system = factory.REST_system\n\n # Compare energy components\n compare_energy_components(REST_system, system, positions)\n\n ## CASE 3: alanine dipeptide in solvent with repartitioned hybrid system\n # Create repartitioned hybrid system for lambda 0 endstate\n atp, system_generator = generate_atp(phase='solvent')\n htf = generate_dipeptide_top_pos_sys(atp.topology,\n new_res='THR',\n system=atp.system,\n positions=atp.positions,\n system_generator=system_generator,\n conduct_htf_prop=True,\n generate_repartitioned_hybrid_topology_factory=True,\n endstate=0,\n validate_endstate_energy=False)\n\n # Create REST-ified hybrid system\n res1 = list(htf.hybrid_topology.residues)[1]\n rest_atoms = [atom.index for atom in list(res1.atoms)]\n factory = RESTTopologyFactory(htf.hybrid_system, solute_region=rest_atoms, use_dispersion_correction=True)\n REST_system = factory.REST_system\n\n # Compare energy components\n compare_energy_components(REST_system, htf.hybrid_system, htf.hybrid_positions)", "def run(self):\n\n from dials.algorithms.refinement.refiner import phil_scope\n params = phil_scope.fetch(source=phil.parse('')).extract()\n\n # disable outlier rejection for speed of refiner construction\n params.refinement.reflections.outlier.algorithm='null'\n\n refiner = RefinerFactory.from_parameters_data_experiments(params,\n self._reflections, self._experiments)\n\n d1 = self._experiments[0].detector\n d2 = refiner.get_experiments()[0].detector\n\n assert d1.is_similar_to(d2)\n print \"OK\"\n return", "def test_1(self):\n # Generate constraint periods\n constr = {\"program\": {\"version\": \"python\"}}\n # Generate random initialization file\n params_spec, options_spec = generate_random_model(point_constr=constr)\n respy_obj = RespyCls(params_spec, options_spec)\n respy_obj = simulate_observed(respy_obj)\n\n # Extract class attributes\n (\n state_space,\n states_all,\n mapping_state_idx,\n periods_rewards_systematic,\n periods_emax,\n num_periods,\n num_draws_emax,\n edu_spec,\n optim_paras,\n num_types,\n ) = dist_class_attributes(\n respy_obj,\n \"state_space\",\n \"states_all\",\n \"mapping_state_idx\",\n \"periods_rewards_systematic\",\n \"periods_emax\",\n \"num_periods\",\n \"num_draws_emax\",\n \"edu_spec\",\n \"optim_paras\",\n \"num_types\",\n )\n\n # Sample draws\n draws_emax_standard = np.random.multivariate_normal(\n np.zeros(4), np.identity(4), num_draws_emax\n )\n draws_emax_risk = transform_disturbances(\n draws_emax_standard, np.zeros(4), optim_paras[\"shocks_cholesky\"]\n )\n\n # Sampling of random period and admissible state index\n period = np.random.choice(range(num_periods))\n k = np.random.choice(range(state_space.states_per_period[period]))\n\n # Select systematic rewards\n rewards_systematic = periods_rewards_systematic[period, k, :]\n\n # Evaluation of simulated expected future values. Limit to one individual as the\n # Fortran version.\n rewards_period = state_space.get_attribute_from_period(\"rewards\", period)[k]\n emaxs_period = state_space.get_attribute_from_period(\"emaxs\", period)[k, :4]\n max_education_period = (\n state_space.get_attribute_from_period(\"states\", period)[k, 3]\n >= edu_spec[\"max\"]\n )\n\n py = construct_emax_risk(\n rewards_period[-2:],\n rewards_period[:4],\n emaxs_period,\n draws_emax_risk,\n optim_paras[\"delta\"],\n max_education_period,\n )\n\n f90 = fort_debug.wrapper_construct_emax_risk(\n num_periods,\n num_draws_emax,\n period,\n k,\n draws_emax_risk,\n rewards_systematic,\n periods_emax,\n states_all,\n mapping_state_idx,\n edu_spec[\"start\"],\n edu_spec[\"max\"],\n optim_paras[\"delta\"],\n optim_paras[\"coeffs_common\"],\n optim_paras[\"coeffs_a\"],\n optim_paras[\"coeffs_b\"],\n num_types,\n )\n\n assert_allclose(py, f90)", "def test_parego(facade, make_scenario, configspace):\n N_TRIALS = 64\n RETRAIN_AFTER = 8\n\n scenario: Scenario = make_scenario(configspace, use_multi_objective=True, n_trials=N_TRIALS)\n multi_objective_algorithm = WrapStrategy(ParEGO, scenario=scenario)\n intensifier = Intensifier(scenario, max_config_calls=1, max_incumbents=10)\n config_selector = ConfigSelector(scenario, retrain_after=RETRAIN_AFTER)\n initial_design = RandomInitialDesign(scenario, n_configs=1)\n\n smac = facade(\n scenario=scenario,\n target_function=tae,\n multi_objective_algorithm=multi_objective_algorithm,\n intensifier=intensifier,\n config_selector=config_selector,\n initial_design=initial_design,\n overwrite=True,\n )\n incumbents = smac.optimize()\n\n sorted_incumbents = []\n for incumbent in incumbents:\n x, y = func(incumbent[\"x\"])\n sorted_incumbents.append((x, y))\n\n sorted_incumbents = sorted(sorted_incumbents, key=lambda x: x[0])\n previous_y = np.inf\n for x, y in sorted_incumbents:\n assert y <= previous_y\n previous_y = y\n\n # We expect N_TRIALS/RETRAIN_AFTER updates\n assert multi_objective_algorithm._n_calls_update_on_iteration_start == int(N_TRIALS / RETRAIN_AFTER)", "def test_multiple_factories(self, mocker):\n sdk_ready_flag = threading.Event()\n\n def _init(self, ready_flag, some, auth_api, streaming_enabled, telemetry_runtime_producer, telemetry_init_consumer, sse_url=None):\n self._ready_flag = ready_flag\n self._synchronizer = mocker.Mock(spec=Synchronizer)\n self._streaming_enabled = False\n self._telemetry_runtime_producer = telemetry_runtime_producer\n self._telemetry_init_consumer = telemetry_init_consumer\n mocker.patch('splitio.sync.manager.Manager.__init__', new=_init)\n\n def _start(self, *args, **kwargs):\n sdk_ready_flag.set()\n mocker.patch('splitio.sync.manager.Manager.start', new=_start)\n\n def _stop(self, *args, **kwargs):\n pass\n mocker.patch('splitio.sync.manager.Manager.stop', new=_stop)\n\n mockManager = Manager(sdk_ready_flag, mocker.Mock(), mocker.Mock(), False, mocker.Mock(), mocker.Mock())\n\n def _make_factory_with_apikey(apikey, *_, **__):\n return SplitFactory(apikey, {}, True, mocker.Mock(spec=ImpressionsManager), mockManager, mocker.Mock(), mocker.Mock(), mocker.Mock())\n\n factory_module_logger = mocker.Mock()\n build_in_memory = mocker.Mock()\n build_in_memory.side_effect = _make_factory_with_apikey\n build_redis = mocker.Mock()\n build_redis.side_effect = _make_factory_with_apikey\n build_localhost = mocker.Mock()\n build_localhost.side_effect = _make_factory_with_apikey\n mocker.patch('splitio.client.factory._LOGGER', new=factory_module_logger)\n mocker.patch('splitio.client.factory._build_in_memory_factory', new=build_in_memory)\n mocker.patch('splitio.client.factory._build_redis_factory', new=build_redis)\n mocker.patch('splitio.client.factory._build_localhost_factory', new=build_localhost)\n\n _INSTANTIATED_FACTORIES.clear() # Clear all factory counters for testing purposes\n\n factory1 = get_factory('some_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 1\n assert factory_module_logger.warning.mock_calls == []\n\n factory2 = get_factory('some_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 2\n assert factory_module_logger.warning.mock_calls == [mocker.call(\n \"factory instantiation: You already have %d %s with this SDK Key. \"\n \"We recommend keeping only one instance of the factory at all times \"\n \"(Singleton pattern) and reusing it throughout your application.\",\n 1,\n 'factory'\n )]\n\n factory_module_logger.reset_mock()\n factory3 = get_factory('some_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 3\n assert factory_module_logger.warning.mock_calls == [mocker.call(\n \"factory instantiation: You already have %d %s with this SDK Key. \"\n \"We recommend keeping only one instance of the factory at all times \"\n \"(Singleton pattern) and reusing it throughout your application.\",\n 2,\n 'factories'\n )]\n\n factory_module_logger.reset_mock()\n factory4 = get_factory('some_other_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 3\n assert _INSTANTIATED_FACTORIES['some_other_api_key'] == 1\n assert factory_module_logger.warning.mock_calls == [mocker.call(\n \"factory instantiation: You already have an instance of the Split factory. \"\n \"Make sure you definitely want this additional instance. \"\n \"We recommend keeping only one instance of the factory at all times \"\n \"(Singleton pattern) and reusing it throughout your application.\"\n )]\n\n event = threading.Event()\n factory1.destroy(event)\n event.wait()\n assert _INSTANTIATED_FACTORIES['some_other_api_key'] == 1\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 2\n factory2.destroy()\n factory3.destroy()\n factory4.destroy()", "def test_01_lighting(self):", "def test_4(self):\n params_spec, options_spec = generate_random_model()\n respy_obj = RespyCls(params_spec, options_spec)\n\n # Ensure that backward induction routines use the same grid for the\n # interpolation.\n write_interpolation_grid(respy_obj)\n\n # Extract class attributes\n (\n num_periods,\n edu_spec,\n optim_paras,\n num_draws_emax,\n seed_emax,\n is_debug,\n is_interpolated,\n num_points_interp,\n optimizer_options,\n file_sim,\n num_types,\n ) = dist_class_attributes(\n respy_obj,\n \"num_periods\",\n \"edu_spec\",\n \"optim_paras\",\n \"num_draws_emax\",\n \"seed_emax\",\n \"is_debug\",\n \"is_interpolated\",\n \"num_points_interp\",\n \"optimizer_options\",\n \"file_sim\",\n \"num_types\",\n )\n\n shocks_cholesky = optim_paras[\"shocks_cholesky\"]\n coeffs_common = optim_paras[\"coeffs_common\"]\n coeffs_home = optim_paras[\"coeffs_home\"]\n coeffs_edu = optim_paras[\"coeffs_edu\"]\n coeffs_a = optim_paras[\"coeffs_a\"]\n coeffs_b = optim_paras[\"coeffs_b\"]\n delta = optim_paras[\"delta\"]\n\n type_spec_shifts = optim_paras[\"type_shifts\"]\n type_spec_shares = optim_paras[\"type_shares\"]\n\n min_idx = edu_spec[\"max\"] + 1\n\n # Check the state space creation.\n state_space = StateSpace(\n num_periods, num_types, edu_spec[\"start\"], edu_spec[\"max\"], optim_paras\n )\n\n states_all, mapping_state_idx, _, _ = state_space._get_fortran_counterparts()\n\n pyth = (\n states_all,\n state_space.states_per_period,\n mapping_state_idx,\n state_space.states_per_period.max(),\n )\n\n f2py = fort_debug.wrapper_create_state_space(\n num_periods, num_types, edu_spec[\"start\"], edu_spec[\"max\"], min_idx\n )\n for i in range(4):\n # Slice Fortran output to shape of Python output.\n if isinstance(f2py[i], np.ndarray):\n f2py_reduced = f2py[i][tuple(map(slice, pyth[i].shape))]\n else:\n f2py_reduced = f2py[i]\n\n assert_allclose(pyth[i], f2py_reduced)\n\n _, _, pyth, _ = state_space._get_fortran_counterparts()\n\n f2py = fort_debug.wrapper_calculate_rewards_systematic(\n num_periods,\n state_space.states_per_period,\n states_all,\n state_space.states_per_period.max(),\n coeffs_common,\n coeffs_a,\n coeffs_b,\n coeffs_edu,\n coeffs_home,\n type_spec_shares,\n type_spec_shifts,\n )\n\n assert_allclose(pyth, f2py)\n\n # Carry some results from the systematic rewards calculation for future use and\n # create the required set of disturbances.\n periods_draws_emax = create_draws(\n num_periods, num_draws_emax, seed_emax, is_debug\n )\n\n # Save result for next test.\n periods_rewards_systematic = pyth.copy()\n\n # Fix for hardcoded myopic agents.\n optim_paras[\"delta\"] = 0.00000000000000001\n\n # Check backward induction procedure.\n state_space = pyth_backward_induction(\n periods_draws_emax,\n state_space,\n is_debug,\n is_interpolated,\n num_points_interp,\n optim_paras,\n file_sim,\n False,\n )\n _, _, _, pyth = state_space._get_fortran_counterparts()\n\n f2py = fort_debug.wrapper_backward_induction(\n num_periods,\n False,\n state_space.states_per_period.max(),\n periods_draws_emax,\n num_draws_emax,\n state_space.states_per_period,\n periods_rewards_systematic,\n mapping_state_idx,\n states_all,\n is_debug,\n is_interpolated,\n num_points_interp,\n edu_spec[\"start\"],\n edu_spec[\"max\"],\n shocks_cholesky,\n delta,\n coeffs_common,\n coeffs_a,\n coeffs_b,\n file_sim,\n False,\n )\n\n assert_allclose(pyth, f2py)", "def test_bare_pass_manager_multiple(self):\n qc0 = QuantumCircuit(1)\n qc1 = QuantumCircuit(2)\n\n pm = PassManager([])\n result = pm.run([qc0, qc1])\n\n self.assertIsInstance(result, list)\n self.assertEqual(len(result), 2)\n\n for qc, new_qc in zip([qc0, qc1], result):\n self.assertIsInstance(new_qc, QuantumCircuit)\n self.assertEqual(new_qc, qc) # pm has no passes", "async def test_signal_repetitions_alternation(hass: HomeAssistant, monkeypatch) -> None:\n config = {\n \"rflink\": {\"port\": \"/dev/ttyABC0\"},\n DOMAIN: {\n \"platform\": \"rflink\",\n \"devices\": {\n \"protocol_0_0\": {\"name\": \"test\", \"signal_repetitions\": 2},\n \"protocol_0_1\": {\"name\": \"test1\", \"signal_repetitions\": 2},\n },\n },\n }\n\n # setup mocking rflink module\n _, _, protocol, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)\n\n await hass.services.async_call(\n DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: f\"{DOMAIN}.test\"}\n )\n await hass.services.async_call(\n DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: f\"{DOMAIN}.test1\"}\n )\n\n await hass.async_block_till_done()\n\n assert protocol.send_command_ack.call_args_list[0][0][0] == \"protocol_0_0\"\n assert protocol.send_command_ack.call_args_list[1][0][0] == \"protocol_0_1\"\n assert protocol.send_command_ack.call_args_list[2][0][0] == \"protocol_0_0\"\n assert protocol.send_command_ack.call_args_list[3][0][0] == \"protocol_0_1\"", "async def test_form_multiple_services(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"errors\"] is None\n\n with patch(\"aussiebb.asyncio.AussieBB.__init__\", return_value=None), patch(\n \"aussiebb.asyncio.AussieBB.login\", return_value=True\n ), patch(\"aussiebb.asyncio.AussieBB.get_services\", return_value=FAKE_SERVICES):\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n FAKE_DATA,\n )\n await hass.async_block_till_done()\n\n assert result2[\"type\"] == RESULT_TYPE_FORM\n assert result2[\"step_id\"] == \"service\"\n assert result2[\"errors\"] is None\n\n with patch(\n \"homeassistant.components.aussie_broadband.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry:\n result3 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {CONF_SERVICES: [FAKE_SERVICES[1][\"service_id\"]]},\n )\n await hass.async_block_till_done()\n\n assert result3[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result3[\"title\"] == TEST_USERNAME\n assert result3[\"data\"] == FAKE_DATA\n assert result3[\"options\"] == {\n CONF_SERVICES: [FAKE_SERVICES[1][\"service_id\"]],\n }\n assert len(mock_setup_entry.mock_calls) == 1", "def test_check_occurs_once(self, test_generator):\n feature = test_generator()\n with mock_availability_test(feature) as check:\n check.assert_not_called()\n if feature:\n pass\n check.assert_called_once()\n\n if feature:\n feature.require_now(\"no message\")\n feature.require_in_call(lambda: None)()\n feature.require_in_call(\"no message\")(lambda: None)()\n feature.require_in_instance(type(\"Dummy\", (), {}))()\n feature.require_in_instance(\"no message\")(type(\"Dummy\", (), {}))()\n\n check.assert_called_once()", "def test_ParameterManagerGenerator_concurrent():\n components_1 = {\n \"scale\": mock_component(),\n \"decay\": mock_component(),\n \"absorption\": mock_component(),\n }\n data_manager = mock_data_manager(components_1)\n\n pmg = ParameterManagerGenerator(\n [data_manager],\n apm_type=active_parameter_manager,\n target=ScalingTarget(),\n mode=\"concurrent\",\n )\n apms = pmg.parameter_managers()\n assert len(apms) == 1\n apm = apms[0]\n assert isinstance(apm, multi_active_parameter_manager)\n assert \"scale\" in apm.components_list\n assert \"decay\" in apm.components_list\n assert \"absorption\" in apm.components_list\n\n components_1 = {\n \"scale\": mock_component(),\n \"decay\": mock_component(),\n \"absorption\": mock_component(),\n }\n components_2 = {\"1\": mock_component(), \"2\": mock_component()}\n data_manager_1 = mock_data_manager(components_1)\n data_manager_2 = mock_data_manager(components_2)\n\n pmg = ParameterManagerGenerator(\n [data_manager_1, data_manager_2],\n apm_type=active_parameter_manager,\n target=ScalingTarget(),\n mode=\"concurrent\",\n )\n multi_apms = pmg.parameter_managers()\n assert len(multi_apms) == 1\n multi_apm = multi_apms[0]\n assert isinstance(multi_apm, multi_active_parameter_manager)\n for apm in multi_apm.apm_list:\n assert isinstance(apm, active_parameter_manager)\n assert \"scale\" in multi_apm.apm_list[0].components_list\n assert \"decay\" in multi_apm.apm_list[0].components_list\n assert \"absorption\" in multi_apm.apm_list[0].components_list\n assert \"1\" in multi_apm.apm_list[1].components_list\n assert \"2\" in multi_apm.apm_list[1].components_list\n\n # now try fixing a component\n data_manager.fixed_components = [\"absorption\"]\n pmg = ParameterManagerGenerator(\n [data_manager],\n apm_type=active_parameter_manager,\n target=ScalingTarget(),\n mode=\"concurrent\",\n )\n apms = pmg.parameter_managers()\n assert len(apms) == 1\n apm = apms[0]\n assert isinstance(apm, multi_active_parameter_manager)\n assert \"scale\" in apm.components_list\n assert \"decay\" in apm.components_list\n assert \"absorption\" not in apm.components_list", "def test_mutate(self):\n f0 = 5 * (np.random.rand(10, 5) - 0.5)\n ga = population.Evolver(f0, eval_one_max)\n\n self.assertFalse(ga.generations[-1].new)\n\n for i in range(10):\n ga.mutate()\n\n self.assertTrue(ga.generations[-1].new)", "def __init__(self, asa_factory: AsaFactory):\n self.step_in_progress = False\n self.asa_factory = asa_factory", "def test_02_visit_again(self):", "def test_fleur_relax_continue_converged(self, run_with_cache, mock_code_factory):\n assert False", "def test_setup_multiple_parameters_system():\n with mmtools.utils.temporary_directory() as tmp_dir:\n yaml_script = get_template_script(tmp_dir)\n\n # Force antechamber parametrization of benzene to output frcmod file\n exp_builder = ExperimentBuilder(yaml_script)\n exp_builder._db._setup_molecules('benzene')\n benzene_dir = exp_builder._db.get_molecule_dir('benzene')\n frcmod_path = os.path.join(benzene_dir, 'benzene.frcmod')\n benzene_path = os.path.join(benzene_dir, 'benzene.gaff.mol2')\n\n # Redefine benzene to use leaprc.gaff and benzene.frcmod\n # and set up system for hydration free energy calculation\n yaml_script['molecules'] = {\n 'benzene-frcmod': {'filepath': benzene_path,\n 'leap': {'parameters': ['leaprc.gaff', frcmod_path]}}}\n yaml_script['systems'] = {\n 'system':\n {'solute': 'benzene-frcmod', 'solvent1': 'PME', 'solvent2': 'vacuum',\n 'leap': {'parameters': 'oldff/leaprc.ff14SB'}}\n }\n del yaml_script['experiments']\n\n exp_builder = ExperimentBuilder(yaml_script)\n system_files_path = exp_builder._db.get_system('system')\n\n # Check that output exist:\n for phase in system_files_path:\n assert os.path.exists(phase.parameters_path)\n assert os.path.exists(phase.position_path)\n assert os.path.getsize(phase.parameters_path) > 0\n assert os.path.getsize(phase.position_path) > 0", "def test_ajuste(self):\n\n def test(clk, nrst, tick, ajuste, ajuste_hora, ajuste_min, ajuste_seg, hora, min, seg):\n\n yield delay(tick_period * randint(60, 180))\n ajuste.next = 1\n ajuste_hora.next = 5\n ajuste_min.next = 10\n ajuste_seg.next = 0\n\n yield delay(tick_period*2)\n self.assertEqual(5, hora)\n self.assertEqual(10, min)\n self.assertEqual(0, seg)\n\n ajuste.next = 0\n yield delay(tick_period)\n self.assertEqual(5, hora)\n self.assertEqual(10, min)\n self.assertEqual(1, seg)\n\n runSim(test, 60*60*3*tick_period)", "def test_AFQ_FA():\n _, bids_path, _ = get_temp_hardi()\n myafq = api.AFQ(\n bids_path=bids_path,\n dmriprep='vistasoft',\n reg_template='dti_fa_template',\n reg_subject='dti_fa_subject')\n myafq.rois", "def test_ipam_services_partial_update(self):\n pass", "def test_forces_and_energies(simulation_factory, lattice_snapshot_factory,\n external_params):\n # unpack parameters\n cls_obj, param_attr, list_params, evaluator = external_params\n\n for param in list_params:\n # create class instance\n obj_instance = cls_obj()\n getattr(obj_instance, param_attr)['A'] = param\n\n # set up simulation and run a bit\n snap = lattice_snapshot_factory(n=2)\n if snap.communicator.rank == 0:\n snap.particles.charge[:] = np.random.random(\n snap.particles.N) * 2 - 1\n sim = simulation_factory(snap)\n sim.operations.integrator = hoomd.md.Integrator(dt=0.001)\n sim.operations.integrator.forces.append(obj_instance)\n sim.run(10)\n\n # test energies\n new_snap = sim.state.get_snapshot()\n forces = sim.operations.integrator.forces[0].forces\n energies = sim.operations.integrator.forces[0].energies\n if new_snap.communicator.rank == 0:\n expected_forces, expected_energies = evaluator(new_snap, param)\n # Set atol as the energies and forces very close to 0.\n # It would be better to run a test that applies appreciable forces\n # and energies.\n np.testing.assert_allclose(expected_forces, forces, atol=1e-5)\n np.testing.assert_allclose(expected_energies, energies, atol=1e-5)", "def test_10(self, test):\r\n return test.MANUAL()", "def test_ipam_services_create(self):\n pass", "def test_ipam_vrfs_create(self):\n pass", "async def test_flow_two_bridges_discovered_one_new(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(aioclient_mock, [(\"1.2.3.4\", \"bla\"), (\"5.6.7.8\", \"beer\")])\n MockConfigEntry(\n domain=\"hue\", unique_id=\"bla\", data={\"host\": \"1.2.3.4\"}\n ).add_to_hass(hass)\n\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"init\"\n assert result[\"data_schema\"]({\"id\": \"beer\"})\n assert result[\"data_schema\"]({\"id\": \"manual\"})\n with pytest.raises(vol.error.MultipleInvalid):\n assert not result[\"data_schema\"]({\"id\": \"bla\"})", "def testBeliefs1sk(self):", "def test_order(self):\n class Mock(object):\n def __init__(self):\n self.spike = [False]\n\n def evolve(self, t, dt):\n self.spike = [True]\n\n G0 = Mock()\n M0 = simulation.EventMonitor(G0)\n sim = simulation.Simulation(G0, M0)\n sim.run(sim.dt)\n\n self.assertEqual(len(M0.t), 1)\n self.assertEqual(len(M0.i), 1)", "def suite_ended(self, module):", "def setup(env, NUM_TRACKS, landtime, t_inter):\n # Create the airport\n airport = Airport(env, NUM_TRACKS, landtime)\n\n # Create 4 initial planes\n for i in range(1):\n env.process(plane(env, 'Aviao %d' % i, airport))\n\n # Create more planes while the simulation is running\n while True:\n yield env.timeout(random.randint(t_inter-2, t_inter+2))\n# yield env.timeout(random.expovariate(1.0 / t_inter))\n i += 1\n env.process(plane(env, 'Aviao %d' % i, airport))", "def test_ipam_services_update(self):\n pass", "def case_real_runs(\n automl: AutoML,\n make_ensemble_builder_manager: Callable[..., EnsembleBuilderManager],\n) -> EnsembleBuilderManager:\n manager = make_ensemble_builder_manager(\n backend=automl._backend,\n metric=automl._metrics[0],\n task=automl._task,\n dataset_name=automl._dataset_name,\n seed=automl._seed,\n logger_port=automl._logger_port,\n random_state=DEFAULT_SEED,\n )\n return manager", "def test_success(database):\n tas = TASFactory()\n database.session.add(tas)\n database.session.flush()\n\n ap = AppropriationFactory(account_num=tas.account_num, deobligations_recoveries_r_cpe=8)\n # Contributes 4\n op_1 = ObjectClassProgramActivityFactory(\n account_num=tas.account_num, ussgl487100_downward_adjus_cpe=1, ussgl497100_downward_adjus_cpe=1,\n ussgl487200_downward_adjus_cpe=1, ussgl497200_downward_adjus_cpe=1)\n # Contributes another 4\n op_2 = ObjectClassProgramActivityFactory(\n account_num=tas.account_num, ussgl487100_downward_adjus_cpe=1, ussgl497100_downward_adjus_cpe=1,\n ussgl487200_downward_adjus_cpe=1, ussgl497200_downward_adjus_cpe=1)\n\n assert number_of_errors(_FILE, database, models=[ap, op_1, op_2]) == 0", "def test_add_multiple_pis_simultaneously_to_vpg_with_1_pi(self):\n proj_obj, fabric_obj, pr_objs = self._create_prerequisites(\n create_second_pr=True)\n test_id = self.id()\n VPG_CLASS = self._api_server.get_resource_class('virtual-port-group')\n org_process_ae_id = VPG_CLASS._process_ae_id\n\n class MockVpg(VPG_CLASS):\n org_process_ae_id = VPG_CLASS._process_ae_id\n HOLD_API = True\n @classmethod\n def mock_process_ae_id(cls, db_obj_dict, vpg_name, obj_dict=None):\n while cls.HOLD_API:\n print('sleeping for HOLD_API to clear for '\n 'args = %s' % obj_dict)\n gevent.sleep(0.5)\n return cls.org_process_ae_id(db_obj_dict, vpg_name, obj_dict)\n\n def process_ae_ids(x):\n return [int(i) for i in sorted(x) if i is not None]\n\n def get_zk_ae_ids(prs=None):\n prefix = os.path.join(\n self.__class__.__name__,\n 'id', 'aggregated-ethernet')\n zk_client = self._api_server._db_conn._zk_db._zk_client._zk_client\n if not prs:\n prs = [os.path.join(prefix, pr.name) for pr in pr_objs]\n else:\n if not isinstance(prs, list):\n prs = [prs]\n prs = [os.path.join(prefix, pr) for pr in prs]\n ae_ids = {}\n for pr in prs:\n pr_org = os.path.split(pr)[-1]\n ae_ids[pr_org] = zk_client.get_children(pr)\n return ae_ids\n\n pi_per_pr = 150\n pi_objs = {}\n pr1_pi_names = ['%s_pr1_pi%d' % (test_id, i) for\n i in range(1, pi_per_pr + 1)]\n pr2_pi_names = ['%s_pr2_pi%d' % (test_id, i) for\n i in range(1, pi_per_pr + 1)]\n pr1_pi_objs = self._create_pi_objects(pr_objs[0], pr1_pi_names)\n pr2_pi_objs = self._create_pi_objects(pr_objs[1], pr2_pi_names)\n pi_objs.update(pr1_pi_objs)\n pi_objs.update(pr2_pi_objs)\n\n # create a VPG\n vpg_count = 1\n vpg_names = ['vpg_%s_%s' % (test_id, i) for i in range(\n 1, vpg_count + 1)]\n vpg_objs = self._create_vpgs(fabric_obj, vpg_names)\n\n # record AE-IDs in ZK before creating any VPG\n ae_ids = [x for x in get_zk_ae_ids().values() if x]\n self.assertEqual(len(ae_ids), 0)\n\n def _attach_pi_simultaneously(vpg_obj, pi_uuids):\n # Attach PIs from PR1 to VPG-1\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n try:\n # mock _process_ae_id at VPG resource\n VPG_CLASS._process_ae_id = MockVpg.mock_process_ae_id\n MockVpg.HOLD_API = True\n for pi_uuid in pi_uuids:\n gevent.spawn(\n self.api.ref_update,\n \"virtual-port-group\",\n vpg_obj.uuid,\n \"physical-interface\",\n pi_uuid,\n None,\n \"ADD\",\n None)\n gevent.sleep(2)\n MockVpg.HOLD_API = False\n gevent.sleep(6)\n except gevent.timeout.Timeout:\n self.assertFalse(\n False,\n '%s failed unexpectedly' % VPG_CLASS._process_ae_id)\n finally:\n # reset mock to original\n VPG_CLASS._process_ae_id = org_process_ae_id\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_refs = vpg_obj.get_physical_interface_refs()\n return vpg_obj, pi_refs\n\n # Case 1\n # Attach 1 PIs from PR1 to VPG-1\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_uuids = [pi_objs[pr1_pi_names[pi]].uuid for pi in range(1)]\n vpg_obj, pi_refs = _attach_pi_simultaneously(vpg_obj, pi_uuids)\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 1)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertIsNone(list(vpg_ae_ids.values())[0])\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [])\n\n # Case 2\n # Attach rest of 149 PIs from PR1 to VPG-1\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_uuids = [pi_objs[pr1_pi_names[pi]].uuid for pi in range(1, 150)]\n vpg_obj, pi_refs = _attach_pi_simultaneously(vpg_obj, pi_uuids)\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 150)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertEqual(len(set(vpg_ae_ids.values())), 1)\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [0] * 150)\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0])", "def test_09(self, test):\r\n return test.MANUAL()", "def test_run_alpha_rarefaction(self):\r\n\r\n run_alpha_rarefaction(\r\n self.test_data['biom'][0],\r\n self.test_data['map'][0],\r\n self.test_out,\r\n call_commands_serially,\r\n self.params,\r\n self.qiime_config,\r\n tree_fp=self.test_data['tree'][0],\r\n num_steps=5,\r\n parallel=False,\r\n min_rare_depth=3,\r\n max_rare_depth=18,\r\n status_update_callback=no_status_updates)\r\n\r\n html_fp = join(self.test_out, 'alpha_rarefaction_plots',\r\n 'rarefaction_plots.html')\r\n pd_averages_fp = join(self.test_out, 'alpha_rarefaction_plots',\r\n 'average_tables', 'PD_whole_treeSampleType.txt')\r\n pd_collated_fp = join(self.test_out, 'alpha_div_collated',\r\n 'PD_whole_tree.txt')\r\n\r\n # Confirm that palm and gut alpha diversities are different,\r\n # and suggestive of statistical significance (we only have a\r\n # few sequences, so we don't get significant results)\r\n ttest_res, alpha_avg = compare_alpha_diversities(open(pd_collated_fp),\r\n open(\r\n self.test_data[\r\n 'map'][0]),\r\n 'SampleType',\r\n 18,\r\n test_type='parametric')\r\n feces_palm_t = ttest_res[('feces', 'L_palm')][0]\r\n self.assertTrue(feces_palm_t < 0,\r\n \"t-statistic too high: %1.3f, but should be less than 0\"\r\n % feces_palm_t)\r\n\r\n # check that final output files have non-zero size\r\n self.assertTrue(getsize(html_fp) > 0)\r\n\r\n # Check that the log file is created and has size > 0\r\n log_fp = glob(join(self.test_out, 'log*.txt'))[0]\r\n self.assertTrue(getsize(log_fp) > 0)", "def test_bed(self):\n #TODO write bed tests", "def test_am_basic(Simulator, plt, seed, rng):\n\n d = 64\n vocab = Vocabulary(d, pointer_gen=rng)\n vocab.populate('A; B; C; D')\n\n with spa.Network('model', seed=seed) as m:\n m.am = ThresholdingAssocMem(threshold=0.3, input_vocab=vocab,\n mapping=vocab.keys(),\n function=filtered_step_fn)\n spa.sym.A >> m.am\n\n in_p = nengo.Probe(m.am.input)\n out_p = nengo.Probe(m.am.output, synapse=0.03)\n\n with Simulator(m) as sim:\n sim.run(0.2)\n t = sim.trange()\n\n plt.subplot(3, 1, 1)\n plt.plot(t, similarity(sim.data[in_p], vocab))\n plt.ylabel(\"Input\")\n plt.ylim(top=1.1)\n plt.subplot(3, 1, 2)\n plt.plot(t, similarity(sim.data[out_p], vocab))\n plt.plot(t[t > 0.15], np.ones(t.shape)[t > 0.15] * 0.95, c='g', lw=2)\n plt.ylabel(\"Output\")\n\n assert_sp_close(t, sim.data[in_p], vocab['A'], skip=0.15, atol=0.05)\n assert_sp_close(t, sim.data[out_p], vocab['A'], skip=0.15)", "def test_multiple_factories(self, mocker):\n def _make_factory_with_apikey(apikey, *_, **__):\n return SplitFactory(apikey, {}, True)\n\n factory_module_logger = mocker.Mock()\n build_in_memory = mocker.Mock()\n build_in_memory.side_effect = _make_factory_with_apikey\n build_redis = mocker.Mock()\n build_redis.side_effect = _make_factory_with_apikey\n build_uwsgi = mocker.Mock()\n build_uwsgi.side_effect = _make_factory_with_apikey\n build_localhost = mocker.Mock()\n build_localhost.side_effect = _make_factory_with_apikey\n mocker.patch('splitio.client.factory._LOGGER', new=factory_module_logger)\n mocker.patch('splitio.client.factory._build_in_memory_factory', new=build_in_memory)\n mocker.patch('splitio.client.factory._build_redis_factory', new=build_redis)\n mocker.patch('splitio.client.factory._build_uwsgi_factory', new=build_uwsgi)\n mocker.patch('splitio.client.factory._build_localhost_factory', new=build_localhost)\n\n _INSTANTIATED_FACTORIES.clear() # Clear all factory counters for testing purposes\n\n factory1 = get_factory('some_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 1\n assert factory_module_logger.warning.mock_calls == []\n\n factory2 = get_factory('some_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 2\n assert factory_module_logger.warning.mock_calls == [mocker.call(\n \"factory instantiation: You already have %d %s with this API Key. \"\n \"We recommend keeping only one instance of the factory at all times \"\n \"(Singleton pattern) and reusing it throughout your application.\",\n 1,\n 'factory'\n )]\n\n factory_module_logger.reset_mock()\n factory3 = get_factory('some_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 3\n assert factory_module_logger.warning.mock_calls == [mocker.call(\n \"factory instantiation: You already have %d %s with this API Key. \"\n \"We recommend keeping only one instance of the factory at all times \"\n \"(Singleton pattern) and reusing it throughout your application.\",\n 2,\n 'factories'\n )]\n\n factory_module_logger.reset_mock()\n factory4 = get_factory('some_other_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 3\n assert _INSTANTIATED_FACTORIES['some_other_api_key'] == 1\n assert factory_module_logger.warning.mock_calls == [mocker.call(\n \"factory instantiation: You already have an instance of the Split factory. \"\n \"Make sure you definitely want this additional instance. \"\n \"We recommend keeping only one instance of the factory at all times \"\n \"(Singleton pattern) and reusing it throughout your application.\"\n )]\n\n event = threading.Event()\n factory1.destroy(event)\n event.wait()\n assert _INSTANTIATED_FACTORIES['some_other_api_key'] == 1\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 2\n factory2.destroy()\n factory3.destroy()\n factory4.destroy()", "def test_ex_2_3(self):\n\n wam = WAM()\n wam.execute(self.fig_2_3_instrs)\n aW = wam.deref_reg(5)\n aZ = wam.deref_reg(2)\n wam.execute(self.fig_2_4_instrs)\n aX = wam.deref_reg(5)\n aY = wam.deref_reg(4)\n self.assertEqual(wam.get_term_repr(aW), 'f(a)')\n self.assertEqual(wam.get_term_repr(aX), 'f(a)')\n self.assertEqual(wam.get_term_repr(aY), 'f(f(a))')\n self.assertEqual(wam.get_term_repr(aZ), 'f(f(a))')", "def TestOneStep(self):\n pass", "def test_generated_protocol_end_to_end(self):\n # AEA components\n ledger_apis = LedgerApis({}, FETCHAI)\n\n wallet_1 = Wallet({FETCHAI: FETCHAI_PRIVATE_KEY_FILE})\n wallet_2 = Wallet({FETCHAI: FETCHAI_PRIVATE_KEY_FILE})\n\n identity_1 = Identity(\n name=\"my_aea_1\",\n address=wallet_1.addresses.get(FETCHAI),\n default_address_key=FETCHAI,\n )\n identity_2 = Identity(\n name=\"my_aea_2\",\n address=wallet_2.addresses.get(FETCHAI),\n default_address_key=FETCHAI,\n )\n\n oef_connection_1 = OEFConnection(\n address=identity_1.address, oef_addr=HOST, oef_port=PORT\n )\n oef_connection_2 = OEFConnection(\n address=identity_2.address, oef_addr=HOST, oef_port=PORT\n )\n\n resources_1 = Resources()\n resources_2 = Resources()\n\n # add generated protocols to resources\n generated_protocol_configuration = ProtocolConfig.from_json(\n yaml.safe_load(\n open(\n os.path.join(\n self.cwd,\n \"tests\",\n \"data\",\n \"generator\",\n \"two_party_negotiation\",\n \"protocol.yaml\",\n )\n )\n )\n )\n generated_protocol = Protocol(\n TwoPartyNegotiationMessage.protocol_id,\n TwoPartyNegotiationSerializer(),\n generated_protocol_configuration,\n )\n resources_1.protocol_registry.register(\n TwoPartyNegotiationMessage.protocol_id, generated_protocol\n )\n resources_2.protocol_registry.register(\n TwoPartyNegotiationMessage.protocol_id, generated_protocol\n )\n\n # create AEAs\n aea_1 = AEA(identity_1, [oef_connection_1], wallet_1, ledger_apis, resources_1)\n aea_2 = AEA(identity_2, [oef_connection_2], wallet_2, ledger_apis, resources_2)\n\n inform_number = tuple((1370, 1991, 1, 4, 17, 6))\n # message 1\n message = TwoPartyNegotiationMessage(\n message_id=1,\n dialogue_reference=(str(0), \"\"),\n target=0,\n performative=TwoPartyNegotiationMessage.Performative.INFORM,\n inform_number=inform_number,\n )\n encoded_message_in_bytes = TwoPartyNegotiationSerializer().encode(message)\n envelope = Envelope(\n to=identity_2.address,\n sender=identity_1.address,\n protocol_id=TwoPartyNegotiationMessage.protocol_id,\n message=encoded_message_in_bytes,\n )\n # message 2\n reply_message = {1: \"number one\", 2: \"number two\", 7: \"number seven\"}\n message_2 = TwoPartyNegotiationMessage(\n message_id=2,\n dialogue_reference=(str(0), \"\"),\n target=1,\n performative=TwoPartyNegotiationMessage.Performative.INFORM_REPLY,\n reply_message=reply_message,\n )\n encoded_message_2_in_bytes = TwoPartyNegotiationSerializer().encode(message_2)\n\n # add handlers to AEA resources\n agent_1_handler = Agent1Handler(\n skill_context=SkillContext(aea_1.context), name=\"fake_skill\"\n )\n resources_1.handler_registry.register(\n (\n PublicId.from_str(\"fetchai/fake_skill:0.1.0\"),\n TwoPartyNegotiationMessage.protocol_id,\n ),\n agent_1_handler,\n )\n agent_2_handler = Agent2Handler(\n encoded_messsage=encoded_message_2_in_bytes,\n skill_context=SkillContext(aea_2.context),\n name=\"fake_skill\",\n )\n resources_2.handler_registry.register(\n (\n PublicId.from_str(\"fetchai/fake_skill:0.1.0\"),\n TwoPartyNegotiationMessage.protocol_id,\n ),\n agent_2_handler,\n )\n\n # add error skill to AEAs\n error_skill_1 = Skill.from_dir(\n os.path.join(AEA_DIR, \"skills\", \"error\"), aea_1.context\n )\n resources_1.add_skill(error_skill_1)\n\n error_skill_2 = Skill.from_dir(\n os.path.join(AEA_DIR, \"skills\", \"error\"), aea_2.context\n )\n resources_2.add_skill(error_skill_2)\n\n # Start threads\n t_1 = Thread(target=aea_1.start)\n t_2 = Thread(target=aea_2.start)\n try:\n t_1.start()\n t_2.start()\n time.sleep(1.0)\n aea_1.outbox.put(envelope)\n time.sleep(5.0)\n assert (\n agent_2_handler.handled_message.message_id == message.message_id\n ), \"Message from Agent 1 to 2: message ids do not match\"\n assert (\n agent_2_handler.handled_message.dialogue_reference\n == message.dialogue_reference\n ), \"Message from Agent 1 to 2: dialogue references do not match\"\n assert (\n agent_2_handler.handled_message.dialogue_reference[0]\n == message.dialogue_reference[0]\n ), \"Message from Agent 1 to 2: dialogue reference[0]s do not match\"\n assert (\n agent_2_handler.handled_message.dialogue_reference[1]\n == message.dialogue_reference[1]\n ), \"Message from Agent 1 to 2: dialogue reference[1]s do not match\"\n assert (\n agent_2_handler.handled_message.target == message.target\n ), \"Message from Agent 1 to 2: targets do not match\"\n assert (\n agent_2_handler.handled_message.performative == message.performative\n ), \"Message from Agent 1 to 2: performatives do not match\"\n assert (\n agent_2_handler.handled_message.inform_number == message.inform_number\n ), \"Message from Agent 1 to 2: inform_numbers do not match\"\n\n assert (\n agent_1_handler.handled_message.message_id == message_2.message_id\n ), \"Message from Agent 1 to 2: dialogue references do not match\"\n assert (\n agent_1_handler.handled_message.dialogue_reference\n == message_2.dialogue_reference\n ), \"Message from Agent 2 to 1: dialogue references do not match\"\n assert (\n agent_1_handler.handled_message.dialogue_reference[0]\n == message_2.dialogue_reference[0]\n ), \"Message from Agent 2 to 1: dialogue reference[0]s do not match\"\n assert (\n agent_1_handler.handled_message.dialogue_reference[1]\n == message_2.dialogue_reference[1]\n ), \"Message from Agent 2 to 1: dialogue reference[1]s do not match\"\n assert (\n agent_1_handler.handled_message.target == message_2.target\n ), \"Message from Agent 2 to 1: targets do not match\"\n assert (\n agent_1_handler.handled_message.performative == message_2.performative\n ), \"Message from Agent 2 to 1: performatives do not match\"\n assert (\n agent_1_handler.handled_message.reply_message == message_2.reply_message\n ), \"Message from Agent 1 to 2: reply_messages do not match\"\n time.sleep(2.0)\n finally:\n aea_1.stop()\n aea_2.stop()\n t_1.join()\n t_2.join()", "def test_actor_matches_activity(self):", "def test_order(self):\n n = 3\n was_called = n*[False]\n class Mock(object):\n def __init__(self, i):\n self.i = i\n\n def evolve(self1, t, dt):\n was_called[self1.i] = True\n self.assertTrue(all(was_called[:self1.i]))\n\n sim = simulation.Simulation(*[Mock(_) for _ in xrange(n)])\n sim.run(sim.dt)", "def test_make_amr(self):\n basic_test_runner(self, 'amrs', nrows=0)", "def test_scaling_active_parameter_manager():\n components_2 = {\"1\": mock_scaling_component(2), \"2\": mock_scaling_component(2)}\n scaling_apm = scaling_active_parameter_manager(components_2, [\"1\"])\n assert list(scaling_apm.constant_g_values[0]) == list(\n components_2[\"2\"].calculate_scales()\n )\n assert len(scaling_apm.constant_g_values) == 1\n assert scaling_apm.n_obs == [2]\n\n # Test that no constant_g_values if both components selected\n scaling_apm = scaling_active_parameter_manager(components_2, [\"1\", \"2\"])\n assert scaling_apm.constant_g_values is None\n\n # Check that one can't initialise with an unequal number of reflections,\n # either within the selection or overall.\n with pytest.raises(AssertionError):\n components_2 = {\"1\": mock_scaling_component(2), \"2\": mock_scaling_component(1)}\n scaling_apm = scaling_active_parameter_manager(components_2, [\"1\", \"2\"])\n with pytest.raises(AssertionError):\n components_2 = {\"1\": mock_scaling_component(2), \"2\": mock_scaling_component(1)}\n scaling_apm = scaling_active_parameter_manager(components_2, [\"1\"])\n\n data_manager = mock_data_manager(components_2)\n pmg = ScalingParameterManagerGenerator(\n [data_manager], target=ScalingTarget(), mode=\"concurrent\"\n )\n assert isinstance(pmg.apm_type, type(scaling_active_parameter_manager))", "def test_factory(self):\n port = self.port(description=u'foo')\n port.startService()\n self.assertIdentical(self._service.factory, port.factory.realFactory)", "def setup_test():\n test_airplane = Airplane(env, 'Test Airplane', 3, 2, 1, False)\n seats = test_airplane.get_seats()\n passengers = []\n\n for j in range(0, test_airplane.get_number_of_seats()):\n passenger = Passenger(env, seats[j], test_airplane)\n passengers.append(passenger)\n\n _algorithms = BoardingAlgorithm(env, test_airplane, passengers)\n\n return test_airplane, passengers, _algorithms", "def test_PoissonRegression_solver_step(self):\n for solver in PoissonRegression._solvers.keys():\n if solver == 'bfgs':\n learner = PoissonRegression(solver=solver)\n self.assertIsNone(learner.step)\n learner = PoissonRegression(solver=solver, step=self.float_1)\n self.assertIsNone(learner.step)\n msg = '^Solver \"bfgs\" has no settable step$'\n with self.assertWarnsRegex(RuntimeWarning, msg):\n learner.step = self.float_2\n self.assertIsNone(learner.step)\n else:\n learner = PoissonRegression(solver=solver, step=self.float_1)\n self.assertEqual(learner.step, self.float_1)\n self.assertEqual(learner._solver_obj.step, self.float_1)\n learner.step = self.float_2\n self.assertEqual(learner.step, self.float_2)\n self.assertEqual(learner._solver_obj.step, self.float_2)", "def test_bare_pass_manager_single(self):\n qc = QuantumCircuit(1)\n pm = PassManager([])\n new_qc = pm.run(qc)\n self.assertIsInstance(new_qc, QuantumCircuit)\n self.assertEqual(qc, new_qc) # pm has no passes", "def test_leftover_single_pi_deallocation(self):\n proj_obj, fabric_obj, pr_objs = self._create_prerequisites(\n create_second_pr=True)\n test_id = self.id()\n\n def process_ae_ids(x):\n return [int(i) for i in sorted(x)]\n\n def get_zk_ae_ids(prs=None):\n prefix = os.path.join(\n self.__class__.__name__,\n 'id', 'aggregated-ethernet')\n zk_client = self._api_server._db_conn._zk_db._zk_client._zk_client\n if not prs:\n prs = [os.path.join(prefix, pr.name) for pr in pr_objs]\n else:\n if not isinstance(prs, list):\n prs = [prs]\n prs = [os.path.join(prefix, pr) for pr in prs]\n ae_ids = {}\n for pr in prs:\n pr_org = os.path.split(pr)[-1]\n ae_ids[pr_org] = zk_client.get_children(pr)\n return ae_ids\n\n pi_per_pr = 2\n pi_objs = {}\n pr1_pi_names = ['%s_pr1_pi%d' % (test_id, i) for\n i in range(1, pi_per_pr + 1)]\n pr1_pi_objs = self._create_pi_objects(pr_objs[0], pr1_pi_names)\n pi_objs.update(pr1_pi_objs)\n\n # create one VPG\n vpg_count = 1\n vpg_names = ['vpg_%s_%s' % (test_id, i) for\n i in range(1, vpg_count + 1)]\n vpg_objs = self._create_vpgs(fabric_obj, vpg_names)\n\n # record AE-IDs in ZK before creating any VPG\n ae_ids = [x for x in get_zk_ae_ids().values() if x]\n self.assertEqual(len(ae_ids), 0)\n\n # Attach PI1/PR1 and PI2/PR1 to VPG-1\n ae_ids = {}\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n for pi in range(2):\n vpg_obj.add_physical_interface(pi_objs[pr1_pi_names[pi]])\n self.api.virtual_port_group_update(vpg_obj)\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n\n pi_refs = vpg_obj.get_physical_interface_refs()\n ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 2)\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs))\n self.assertEqual(len(set(ae_ids[vpg_name].values())), 1)\n\n # verification at Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0])\n\n # Delete PI1/PR1\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n vpg_obj.del_physical_interface(pr1_pi_objs[pr1_pi_names[0]])\n self.api.virtual_port_group_update(vpg_obj)\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n\n # verify PI-refs are correct\n pi_refs = vpg_obj.get_physical_interface_refs()\n self.assertEqual(len(pi_refs), 1)\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(ae_ids[vpg_name].values())), 1)\n\n # verification at Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [])", "def part2a_0():\n xs = exampleInput\n phi = Counter({('-BEGIN-', '-FEAT-'): 1.0, ('-FEAT-', 'Beautiful'): 1.0, ('-FEAT-', 'PREV:-BEGIN-'): 1.0, ('-FEAT-', 'NEXT:2'): 1.0, ('-FEAT-', '-CAPITALIZED-'): 1.0, ('-FEAT-', '-POST-CAPITALIZED-'): 0.0})\n phi_ = submission.nerFeatureFunction(0, '-BEGIN-', '-FEAT-', xs)\n grader.requireIsTrue( Counters.approximateEquals(phi, phi_) )\n\n phi = Counter({('-FEAT-', '-SIZE-'): 1.0, ('-SIZE-', 'PREV:Beautiful'): 1.0, ('-SIZE-', 'NEXT:bedroom'): 1.0, ('-SIZE-', '-PRE-CAPITALIZED-'): 1.0, ('-SIZE-', '2'): 1.0, ('-SIZE-', '-POST-CAPITALIZED-'): 0.0, ('-SIZE-', '-CAPITALIZED-'): 0.0})\n phi_ = submission.nerFeatureFunction(1, '-FEAT-', '-SIZE-', xs)\n grader.requireIsTrue( Counters.approximateEquals(phi, phi_) )\n \n phi = Counter({('-SIZE-', '-SIZE-'): 1.0, ('-SIZE-', 'PREV:2'): 1.0, ('-SIZE-', 'bedroom'): 1.0, ('-SIZE-', 'NEXT:-END-'): 1.0, ('-SIZE-', '-CAPITALIZED-'): 0.0, ('-SIZE-', '-PRE-CAPITALIZED-'): 0.0})\n phi_ = submission.nerFeatureFunction(2, '-SIZE-', '-SIZE-', xs)\n grader.requireIsTrue( Counters.approximateEquals(phi, phi_) )", "def test_T01():", "def test_register_route_factory():\n\n current_factory = application_services.get_current_route_factory()\n application_services.register_route_factory(mock_route_factory)\n assert application_services.get_current_route_factory() == mock_route_factory\n application_services.register_route_factory(current_factory)", "def test_theft_and_stealing(self):", "def test_three_arms_two_winners(self):\n self._test_three_arms_two_winners()", "def test_solution_usage(self, test_data):\n for finput in test_data[\"EKFSLAM.EKFSLAM.f\"][:1]:\n params = finput\n\n solution.used[\"EKFSLAM.EKFSLAM.f\"] = False\n\n EKFSLAM.EKFSLAM.f(**params)\n\n assert not solution.used[\"EKFSLAM.EKFSLAM.f\"], \"The function uses the solution\"", "def test_solution_usage(self, test_data):\n for finput in test_data[\"EKFSLAM.EKFSLAM.add_landmarks\"][:1]:\n params = finput\n\n solution.used[\"EKFSLAM.EKFSLAM.add_landmarks\"] = False\n\n EKFSLAM.EKFSLAM.add_landmarks(**params)\n\n assert not solution.used[\"EKFSLAM.EKFSLAM.add_landmarks\"], \"The function uses the solution\"", "def test_mgre(self):\n\n for itf in self.pg_interfaces[3:]:\n #\n # one underlay nh for each overlay/tunnel peer\n #\n itf.generate_remote_hosts(4)\n itf.configure_ipv4_neighbors()\n\n #\n # Create an L3 GRE tunnel.\n # - set it admin up\n # - assign an IP Addres\n # - Add a route via the tunnel\n #\n gre_if = VppGreInterface(\n self,\n itf.local_ip4,\n \"0.0.0.0\",\n mode=(VppEnum.vl_api_tunnel_mode_t.TUNNEL_API_MODE_MP),\n )\n gre_if.add_vpp_config()\n gre_if.admin_up()\n gre_if.config_ip4()\n gre_if.generate_remote_hosts(4)\n\n self.logger.info(self.vapi.cli(\"sh adj\"))\n self.logger.info(self.vapi.cli(\"sh ip fib\"))\n\n #\n # ensure we don't match to the tunnel if the source address\n # is all zeros\n #\n tx = self.create_tunnel_stream_4o4(\n self.pg0,\n \"0.0.0.0\",\n itf.local_ip4,\n self.pg0.local_ip4,\n self.pg0.remote_ip4,\n )\n self.send_and_assert_no_replies(self.pg0, tx)\n\n #\n # for-each peer\n #\n for ii in range(1, 4):\n route_addr = \"4.4.4.%d\" % ii\n tx_e = self.create_stream_ip4(self.pg0, \"5.5.5.5\", route_addr)\n\n #\n # route traffic via the peer\n #\n route_via_tun = VppIpRoute(\n self,\n route_addr,\n 32,\n [VppRoutePath(gre_if._remote_hosts[ii].ip4, gre_if.sw_if_index)],\n )\n route_via_tun.add_vpp_config()\n\n # all packets dropped at this point\n rx = self.send_and_assert_no_replies(self.pg0, tx_e)\n\n gre_if.admin_down()\n gre_if.admin_up()\n rx = self.send_and_assert_no_replies(self.pg0, tx_e)\n\n #\n # Add a TEIB entry resolves the peer\n #\n teib = VppTeib(\n self,\n gre_if,\n gre_if._remote_hosts[ii].ip4,\n itf._remote_hosts[ii].ip4,\n )\n teib.add_vpp_config()\n\n #\n # Send a packet stream that is routed into the tunnel\n # - packets are GRE encapped\n #\n rx = self.send_and_expect(self.pg0, tx_e, itf)\n self.verify_tunneled_4o4(\n self.pg0, rx, tx_e, itf.local_ip4, itf._remote_hosts[ii].ip4\n )\n\n tx_i = self.create_tunnel_stream_4o4(\n self.pg0,\n itf._remote_hosts[ii].ip4,\n itf.local_ip4,\n self.pg0.local_ip4,\n self.pg0.remote_ip4,\n )\n rx = self.send_and_expect(self.pg0, tx_i, self.pg0)\n self.verify_decapped_4o4(self.pg0, rx, tx_i)\n\n #\n # delete and re-add the TEIB\n #\n teib.remove_vpp_config()\n self.send_and_assert_no_replies(self.pg0, tx_e)\n self.send_and_assert_no_replies(self.pg0, tx_i)\n\n teib.add_vpp_config()\n rx = self.send_and_expect(self.pg0, tx_e, itf)\n self.verify_tunneled_4o4(\n self.pg0, rx, tx_e, itf.local_ip4, itf._remote_hosts[ii].ip4\n )\n rx = self.send_and_expect(self.pg0, tx_i, self.pg0)\n self.verify_decapped_4o4(self.pg0, rx, tx_i)\n\n #\n # bounce the interface state and try packets again\n #\n gre_if.admin_down()\n gre_if.admin_up()\n rx = self.send_and_expect(self.pg0, tx_e, itf)\n self.verify_tunneled_4o4(\n self.pg0, rx, tx_e, itf.local_ip4, itf._remote_hosts[ii].ip4\n )\n rx = self.send_and_expect(self.pg0, tx_i, self.pg0)\n self.verify_decapped_4o4(self.pg0, rx, tx_i)\n\n gre_if.admin_down()\n gre_if.unconfig_ip4()", "def test_generate_with_putaway(self):\n nbre_of_lines = 4\n shelf_location = self.env['stock.location'].create({\n 'name': 'shelf1',\n 'usage': 'internal',\n 'location_id': self.location_dest.id,\n })\n\n # Checks a first time without putaway...\n move = self.get_new_move(nbre_of_lines)\n form_wizard = Form(self.env['stock.assign.serial'].with_context(\n default_move_id=move.id,\n ))\n form_wizard.next_serial_count = nbre_of_lines\n form_wizard.next_serial_number = '001'\n wiz = form_wizard.save()\n wiz.generate_serial_numbers()\n\n for move_line in move.move_line_nosuggest_ids:\n self.assertEqual(move_line.qty_done, 1)\n # The location dest must be the default one.\n self.assertEqual(move_line.location_dest_id.id, self.location_dest.id)\n\n # We need to activate multi-locations to use putaway rules.\n grp_multi_loc = self.env.ref('stock.group_stock_multi_locations')\n self.env.user.write({'groups_id': [(4, grp_multi_loc.id)]})\n # Creates a putaway rule\n putaway_product = self.env['stock.putaway.rule'].create({\n 'product_id': self.product_serial.id,\n 'location_in_id': self.location_dest.id,\n 'location_out_id': shelf_location.id,\n })\n\n # Checks now with putaway...\n move = self.get_new_move(nbre_of_lines)\n form_wizard = Form(self.env['stock.assign.serial'].with_context(\n default_move_id=move.id,\n ))\n form_wizard.next_serial_count = nbre_of_lines\n form_wizard.next_serial_number = '001'\n wiz = form_wizard.save()\n wiz.generate_serial_numbers()\n\n for move_line in move.move_line_nosuggest_ids:\n self.assertEqual(move_line.qty_done, 1)\n # The location dest must be now the one from the putaway.\n self.assertEqual(move_line.location_dest_id.id, shelf_location.id)", "def test_active_inference_SPM_1a(self):\n array_path = os.path.join(os.getcwd(), DATA_PATH + \"vbx_test_1a.mat\")\n mat_contents = loadmat(file_name=array_path)\n\n A = mat_contents[\"A\"][0]\n B = mat_contents[\"B\"][0]\n C = to_arr_of_arr(mat_contents[\"C\"][0][0][:,0])\n obs_matlab = mat_contents[\"obs\"].astype(\"int64\")\n policy = mat_contents[\"policies\"].astype(\"int64\") - 1\n t_horizon = mat_contents[\"t_horizon\"][0, 0].astype(\"int64\")\n actions_matlab = mat_contents[\"actions\"].astype(\"int64\") - 1\n qs_matlab = mat_contents[\"qs\"][0]\n xn_matlab = mat_contents[\"xn\"][0]\n vn_matlab = mat_contents[\"vn\"][0]\n\n likelihoods_matlab = mat_contents[\"likelihoods\"][0]\n\n num_obs, num_states, _, num_factors = get_model_dimensions(A, B)\n obs = convert_observation_array(obs_matlab, num_obs)\n T = len(obs)\n\n agent = Agent(A=A, B=B, C=C, inference_algo=\"MMP\", policy_len=1, \n inference_horizon=t_horizon, use_BMA = False, \n policy_sep_prior = True)\n \n actions_python = np.zeros(T)\n\n for t in range(T):\n o_t = (np.where(obs[t])[0][0],)\n qx, xn_t, vn_t = agent.infer_states_test(o_t)\n q_pi, efe= agent.infer_policies()\n action = agent.sample_action()\n\n actions_python[t] = action\n\n xn_python = build_xn_vn_array(xn_t)\n vn_python = build_xn_vn_array(vn_t)\n\n if t == T-1:\n xn_python = xn_python[:,:,:-1,:]\n vn_python = vn_python[:,:,:-1,:]\n\n start_tstep = max(0, agent.curr_timestep - agent.inference_horizon)\n end_tstep = min(agent.curr_timestep + agent.policy_len, T)\n\n xn_validation = xn_matlab[0][:,:,start_tstep:end_tstep,t,:]\n vn_validation = vn_matlab[0][:,:,start_tstep:end_tstep,t,:]\n\n self.assertTrue(np.isclose(xn_python, xn_validation).all())\n self.assertTrue(np.isclose(vn_python, vn_validation).all())\n \n self.assertTrue(np.isclose(actions_matlab[0,:],actions_python[:-1]).all())", "def test_leftover_single_pi_allocation(self):\n proj_obj, fabric_obj, pr_objs = self._create_prerequisites(\n create_second_pr=True)\n test_id = self.id()\n\n def process_ae_ids(x):\n return [int(i) for i in sorted(x)]\n\n def get_zk_ae_ids(prs=None):\n prefix = os.path.join(\n self.__class__.__name__,\n 'id', 'aggregated-ethernet')\n zk_client = self._api_server._db_conn._zk_db._zk_client._zk_client\n if not prs:\n prs = [os.path.join(prefix, pr.name) for pr in pr_objs]\n else:\n if not isinstance(prs, list):\n prs = [prs]\n prs = [os.path.join(prefix, pr) for pr in prs]\n ae_ids = {}\n for pr in prs:\n pr_org = os.path.split(pr)[-1]\n ae_ids[pr_org] = zk_client.get_children(pr)\n return ae_ids\n\n pi_per_pr = 3\n pi_objs = {}\n pr1_pi_names = ['%s_pr1_pi%d' % (test_id, i) for\n i in range(1, pi_per_pr + 1)]\n pr1_pi_objs = self._create_pi_objects(pr_objs[0], pr1_pi_names)\n pi_objs.update(pr1_pi_objs)\n\n # create one VPG\n vpg_count = 1\n vpg_names = ['vpg_%s_%s' % (test_id, i) for\n i in range(1, vpg_count + 1)]\n vpg_objs = self._create_vpgs(fabric_obj, vpg_names)\n\n # record AE-IDs in ZK before creating any VPG\n ae_ids = [x for x in get_zk_ae_ids().values() if x]\n self.assertEqual(len(ae_ids), 0)\n\n # attach only PI1/PR1 to VPG-1\n # no AE-ID to be allocated\n ae_ids = {}\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj.add_physical_interface(pi_objs[pr1_pi_names[0]])\n self.api.virtual_port_group_update(vpg_obj)\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n\n pi_refs = vpg_obj.get_physical_interface_refs()\n ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 1)\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs))\n self.assertEqual(len(set(ae_ids[vpg_name].values())), 1)\n self.assertIsNone(list(ae_ids[vpg_name].values())[0])\n\n # verification at Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [])\n\n # Attach PI1/PR1 and PI2/PR1 to VPG-1\n ae_ids = {}\n vpg_name = vpg_names[0]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n for pi in range(1, 3):\n vpg_obj.add_physical_interface(pi_objs[pr1_pi_names[pi]])\n self.api.virtual_port_group_update(vpg_obj)\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n\n pi_refs = vpg_obj.get_physical_interface_refs()\n ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 3)\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs))\n self.assertEqual(len(set(ae_ids[vpg_name].values())), 1)\n\n # verification at Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0])", "def test_agent(AgentFactory, steps, envs, percepts):\n print ('RUN TEST AGENT')\n envs.add_thing(AgentFactory)\n #envs.run(steps)\n \n agent = AgentFactory\n agent.program(percept)\n #envs.run(steps)\n envs.runPLWumpus(agent, steps)\n #envs.runPLWumpus(steps)\n print(' ------------PLWumpus test agent KB-----------------------')\n print(agent.KB.clauses)\n #print envs.to_string()\n print('test_agent', envs)\n #print agent.KB.clauses\n return agent.performance\n\n def score(env):\n agent = AgentFactory()\n env.add_thing(agent)\n env.run(steps)\n print('test_agent' , env)\n return agent.performance\n\n #return mean(map(score, envs))\n return None", "def visitspec(load,plate,mjd,fiber,gridfile='apg_rvsynthgrid',apstar=False) :\n grid = fits.open(os.environ['APOGEE_DIR']+'/data/synthgrid/'+gridfile+'.fits')\n if gridfile == 'apg_rvsynthgrid' : hdu=1\n elif gridfile == 'apg_rvsynthgrid_v2': hdu=0\n elif apstar : hdu=2\n else : hdu=1\n gridspec=grid[hdu].data\n gridwave = 10.**spectra.fits2vector(grid[hdu].header,2)\n griderr = np.ones(gridspec.shape[0])\n #for ispec in range(gridspec.shape[1]) :\n # cont = norm.cont(gridspec[:,ispec],griderr)\n # gridspec[:,ispec] /= cont\n\n data = load.apVisit(plate,mjd,fiber)\n\n # compare with DR14 \n comp(a,b,domatch=False,out='plots/dr14all')\n grid.append(['dr14all_1.png',''])\n xtit.append('all stars: DR14 (dotted) and test DR16 (solid)')\n\n comp(a,b,domatch=True,out='plots/dr14match')\n grid.append(['dr14match_1.png','dr14match_2.png'])\n xtit.append('same stars: DR14 (dotted) and test DR16 (solid)')\n # set bad pixels to nan\n shape=data[1].data.shape\n spec = copy.copy(data[1].data).flatten()\n specerr = copy.copy(data[2].data)\n specwave=data[4].data\n pixmask=bitmask.PixelBitMask()\n bd = np.where( ((data[3].data & pixmask.badval()) > 0) | \n ((data[3].data & pixmask.getval('SIG_SKYLINE')) > 0) ) [0]\n spec[bd] = np.nan\n spec = spec.reshape(shape)\n\n # continuum normalize and sample to grid\n outspec = np.full(len(gridwave),np.nan)\n if not apstar :\n # apVisit wavelengths are reversed\n spec=np.flip(spec)\n specwave=np.flip(specwave)\n specerr=np.flip(specerr)\n for ichip in range(3) :\n cont = norm.cont(spec[ichip,:],specerr[ichip,:])\n spec[ichip,:] /= cont\n gd=np.where(np.isfinite(spec[ichip,:]))[0]\n ip= interpolate.InterpolatedUnivariateSpline(specwave[ichip,gd],spec[ichip,gd],k=3)\n out = ip(gridwave)\n gd = np.where( (gridwave > specwave[ichip,0]) & (gridwave < specwave[ichip,-1]) )[0]\n outspec[gd] = out[gd]\n plt.plot(specwave[ichip,:],spec[ichip,:])\n plt.plot(gridwave[gd],out[gd])\n plt.show()\n\n for ispec in range(gridspec.shape[1]) :\n print(ispec)\n bd=np.where(np.isnan(outspec))\n outspec[bd]=1.\n out=correlate(outspec,gridspec[:,ispec])\n pdb.set_trace()", "def test6():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(A = 'h1', C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('ataque')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.iperfMeasure()\n exp1.trafico.iperfMeasure(filename='iperf_ataque_test.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador", "def test_run_alpha_rarefaction_parallel(self):\r\n\r\n run_alpha_rarefaction(\r\n self.test_data['biom'][0],\r\n self.test_data['map'][0],\r\n self.test_out,\r\n call_commands_serially,\r\n self.params,\r\n self.qiime_config,\r\n tree_fp=self.test_data['tree'][0],\r\n num_steps=5,\r\n parallel=True,\r\n min_rare_depth=3,\r\n max_rare_depth=18,\r\n status_update_callback=no_status_updates)\r\n\r\n html_fp = join(self.test_out, 'alpha_rarefaction_plots',\r\n 'rarefaction_plots.html')\r\n pd_averages_fp = join(self.test_out, 'alpha_rarefaction_plots',\r\n 'average_tables', 'PD_whole_treeSampleType.txt')\r\n pd_collated_fp = join(self.test_out, 'alpha_div_collated',\r\n 'PD_whole_tree.txt')\r\n\r\n # Confirm that palm and gut alpha diversities are different,\r\n # and suggestive of statistical significance (we only have a\r\n # few sequences, so we don't get significant results)\r\n ttest_res, alpha_avg = compare_alpha_diversities(open(pd_collated_fp),\r\n open(\r\n self.test_data[\r\n 'map'][0]),\r\n 'SampleType',\r\n 18,\r\n test_type='parametric')\r\n feces_palm_t = ttest_res[('feces', 'L_palm')][0]\r\n self.assertTrue(feces_palm_t < 0,\r\n \"t-statistic too high: %1.3f, but should be less than 0\"\r\n % feces_palm_t)\r\n\r\n # check that final output files have non-zero size\r\n self.assertTrue(getsize(html_fp) > 0)\r\n\r\n # Check that the log file is created and has size > 0\r\n log_fp = glob(join(self.test_out, 'log*.txt'))[0]\r\n self.assertTrue(getsize(log_fp) > 0)", "def test_ANOVA_one_way(self):\r\n g1 = array([10.0, 11.0, 10.0, 5.0, 6.0])\r\n g2 = array([1.0, 2.0, 3.0, 4.0, 1.0, 2.0])\r\n g3 = array([6.0, 7.0, 5.0, 6.0, 7.0])\r\n i = [g1, g2, g3]\r\n # dfn, dfd, F, between_MS, within_MS, group_means, prob = ANOVA_one_way(i)\r\n F, pval = ANOVA_one_way(i)\r\n # self.assertEqual(dfn, 2)\r\n # self.assertEqual(dfd, 13)\r\n self.assertFloatEqual(F, 18.565450643776831)\r\n # self.assertFloatEqual(between_MS, 55.458333333333343)\r\n # self.assertFloatEqual(within_MS, 2.9871794871794868)\r\n # self.assertFloatEqual(group_means, [8.4000000000000004, 2.1666666666666665, 6.2000000000000002])\r\n self.assertFloatEqual(pval, 0.00015486238993089464)", "def test_activate_mission(self):\n # Load sample data:\n TSStation.update_stations('TestTAMission.data/stations.xml')\n TASeries.import_xml('TestTAMission.data/series.xml')\n\n taskq = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)\n\n # FRS 10.5.1 If not specified, activate_mission must set nominalDate to the current date\n now = now_cet().replace(hour=2)\n mission = TAMission.get('nl.3046')\n mission.activate_mission()\n self.assertEqual(mission.nominalDate, now.date())\n taskq.FlushQueue('default')\n\n # FRS 10.5.2/3 activate_mission must generate origin_id, destination_id and stops\n test_set = ((mark_cet(datetime(2013, 2, 24, 2)), None, 0),\n (mark_cet(datetime(2013, 2, 18, 2)), 'nl.asd', 5),\n (mark_cet(datetime(2013, 2, 19, 2)), 'nl.amr', 8))\n for (testDate, destination, nr_of_stops) in test_set:\n mission.activate_mission(testDate)\n self.assertEqual(mission.destination_id, destination)\n self.assertEqual(len(mission.stops), nr_of_stops)\n mission.put()\n self.assertEqual(mission.origin_id, 'nl.ah')\n self.assertEqual(mission.last_stop.arrival_string, '15:48')\n\n # FRS 10.5.4 activated stops must get 'planned' status, last stop 'finalDestination'\n for index in range(0, 6):\n self.assertEqual(mission.stops[index].status, StopStatuses.planned)\n self.assertEqual(mission.stops[7].status, StopStatuses.finalDestination)\n\n # FRS 10.5.5 TAMission must queue a check-task while awaking a mission.\n tasks = taskq.GetTasks('default')\n self.assertEqual(len(tasks), 2)\n self.assertEqual(tasks[1]['url'], '/TAMission/nl.3046')\n self.assertEqual(tasks[1]['name'], '19_1231_xx_check_3046')\n taskq.FlushQueue('default')\n\n # FRS 10.5.6 Mission must check announcement of stops\n check_time = mark_cet(datetime(2013, 2, 19, 13, 41, 22))\n mission.stops[0].status = StopStatuses.announced\n mission.check_mission_announcements(check_time)\n tasks = taskq.GetTasks('default')\n self.assertEqual(len(tasks), 2)\n self.assertEqual(tasks[0]['url'], '/agent/station/nl.ed')\n self.assertEqual(tasks[0]['name'], '19_1241_25_check_3046')\n self.assertEqual(tasks[1]['url'], '/TAMission/nl.3046')\n self.assertEqual(tasks[1]['name'], '19_1246_xx_check_3046')\n taskq.FlushQueue('default')\n\n check_time = mark_cet(datetime(2013, 2, 19, 14, 02, 22))\n mission.stops[0].status = StopStatuses.planned\n mission.stops[1].status = StopStatuses.announced\n mission.stops[2].status = StopStatuses.announced\n mission.stops[3].status = StopStatuses.announced\n mission.stops[4].status = StopStatuses.announced\n mission.check_mission_announcements(check_time)\n tasks = taskq.GetTasks('default')\n self.assertEqual(len(tasks), 1)\n self.assertEqual(tasks[0]['url'], '/TAMission/nl.3046')\n self.assertEqual(tasks[0]['name'], '19_1348_xx_check_3046')\n\n # FRS 10.5.7 Mission must provide status and delay\n (status, delay) = mission.status_at_time(mark_cet(datetime(2013,2,19,14,0)))\n self.assertEqual(status, MissionStatuses.inactive)\n self.assertEqual(delay, 0)\n mission.first_stop.status = StopStatuses.announced\n (status, delay) = mission.status_at_time(mark_cet(datetime(2013,2,19,14,0)))\n self.assertEqual(delay, 0)\n self.assertEqual(status, MissionStatuses.announced)\n (status, delay) = mission.status_at_time(mark_cet(datetime(2013,2,19,14,30)))\n self.assertEqual(status, MissionStatuses.running)\n self.assertEqual(delay, 0)\n (status, delay) = mission.status_at_time(mark_cet(datetime(2013,2,19,15,49)))\n self.assertEqual(mission.est_arrival_cet, mark_cet(datetime(2013,2,19,15,48)))\n self.assertEqual(status, MissionStatuses.arrived)\n self.assertEqual(MissionStatuses.s[status], 'arrived')\n self.assertEqual(delay, 0)", "def _test_example_eda_adf():\n main([\"pnictogen/repo/split.ADF.in\", \"data/water-dimer.xyz\"])\n assert_equals(\n open(\"data/water-dimer_eda.in\").read(),\n \"\"\"TITLE data/water-dimer.xyz eda\n\nCHARGE 0 0\n\nNumber of atoms\n 6\n\nATOMS Cartesian\nO 0.12908 -0.26336 0.64798 f=f1\nH 0.89795 0.28805 0.85518 f=f1\nH 0.10833 -0.20468 -0.33302 f=f1\nO 0.31020 0.07569 -2.07524 f=f2\nH -0.26065 0.64232 -2.62218 f=f2\nH 0.64083 -0.57862 -2.71449 f=f2\nEnd\n\nFragments\n f1 data/water-dimer_f1.t21\n f2 data/water-dimer_f2.t21\nEnd\n\nBasis\nEnd\n\nGeometry\nEnd\n\n\"\"\",\n )\n assert_equals(\n open(\"data/water-dimer_f1.in\").read(),\n \"\"\"TITLE data/water-dimer.xyz f1\n\nCHARGE 0 0\n\nNumber of atoms\n 3\n\nATOMS Cartesian\nO 0.12908 -0.26336 0.64798\nH 0.89795 0.28805 0.85518\nH 0.10833 -0.20468 -0.33302\nEnd\n\nBasis\nEnd\n\nGeometry\nEnd\n\n\"\"\",\n )\n assert_equals(\n open(\"data/water-dimer_f2.in\").read(),\n \"\"\"TITLE data/water-dimer.xyz f2\n\nCHARGE 0 0\n\nNumber of atoms\n 3\n\nATOMS Cartesian\nO 0.31020 0.07569 -2.07524\nH -0.26065 0.64232 -2.62218\nH 0.64083 -0.57862 -2.71449\nEnd\n\nBasis\nEnd\n\nGeometry\nEnd\n\n\"\"\",\n )", "def test_apply_endorsements(self):", "async def test_signal_repetitions(hass: HomeAssistant, monkeypatch) -> None:\n config = {\n \"rflink\": {\"port\": \"/dev/ttyABC0\"},\n DOMAIN: {\n \"platform\": \"rflink\",\n \"device_defaults\": {\"signal_repetitions\": 3},\n \"devices\": {\n \"protocol_0_0\": {\"name\": \"test\", \"signal_repetitions\": 2},\n \"protocol_0_1\": {\"name\": \"test1\"},\n },\n },\n }\n\n # setup mocking rflink module\n _, _, protocol, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)\n\n # test if signal repetition is performed according to configuration\n await hass.services.async_call(\n DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: f\"{DOMAIN}.test\"}\n )\n\n # wait for commands and repetitions to finish\n await hass.async_block_till_done()\n\n assert protocol.send_command_ack.call_count == 2\n\n # test if default apply to configured devices\n await hass.services.async_call(\n DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: f\"{DOMAIN}.test1\"}\n )\n\n # wait for commands and repetitions to finish\n await hass.async_block_till_done()\n\n assert protocol.send_command_ack.call_count == 5", "def test_5(self):\n params_spec, options_spec = generate_random_model()\n respy_obj = RespyCls(params_spec, options_spec)\n\n # Ensure that backward induction routines use the same grid for the\n # interpolation.\n max_states_period = write_interpolation_grid(respy_obj)\n\n # Extract class attributes\n (\n num_periods,\n edu_spec,\n optim_paras,\n num_draws_emax,\n is_debug,\n is_interpolated,\n num_points_interp,\n is_myopic,\n num_agents_sim,\n num_draws_prob,\n tau,\n seed_sim,\n num_agents_est,\n optimizer_options,\n file_sim,\n num_types,\n num_paras,\n ) = dist_class_attributes(\n respy_obj,\n \"num_periods\",\n \"edu_spec\",\n \"optim_paras\",\n \"num_draws_emax\",\n \"is_debug\",\n \"is_interpolated\",\n \"num_points_interp\",\n \"is_myopic\",\n \"num_agents_sim\",\n \"num_draws_prob\",\n \"tau\",\n \"seed_sim\",\n \"num_agents_est\",\n \"optimizer_options\",\n \"file_sim\",\n \"num_types\",\n \"num_paras\",\n )\n\n min_idx = edu_spec[\"max\"] + 1\n shocks_cholesky = optim_paras[\"shocks_cholesky\"]\n coeffs_common = optim_paras[\"coeffs_common\"]\n coeffs_home = optim_paras[\"coeffs_home\"]\n coeffs_edu = optim_paras[\"coeffs_edu\"]\n coeffs_a = optim_paras[\"coeffs_a\"]\n coeffs_b = optim_paras[\"coeffs_b\"]\n delta = optim_paras[\"delta\"]\n\n type_spec_shares = optim_paras[\"type_shares\"]\n type_spec_shifts = optim_paras[\"type_shifts\"]\n\n # Write out random components and interpolation grid to align the three\n # implementations.\n max_draws = max(num_agents_sim, num_draws_emax, num_draws_prob)\n write_types(type_spec_shares, num_agents_sim)\n write_edu_start(edu_spec, num_agents_sim)\n write_draws(num_periods, max_draws)\n write_lagged_start(num_agents_sim)\n\n # It is critical that the model is simulated after all files have been written\n # to the disk because they are picked up in the subroutines.\n respy_obj = simulate_observed(respy_obj)\n\n periods_draws_emax = read_draws(num_periods, num_draws_emax)\n periods_draws_prob = read_draws(num_periods, num_draws_prob)\n periods_draws_sims = read_draws(num_periods, num_agents_sim)\n\n fort, _ = resfort_interface(respy_obj, \"simulate\")\n\n state_space = pyth_solve(\n is_interpolated,\n num_points_interp,\n num_periods,\n is_debug,\n periods_draws_emax,\n edu_spec,\n optim_paras,\n file_sim,\n num_types,\n )\n\n (\n states_all,\n mapping_state_idx,\n periods_rewards_systematic,\n periods_emax,\n ) = state_space._get_fortran_counterparts()\n\n py = (\n periods_rewards_systematic,\n state_space.states_per_period,\n mapping_state_idx,\n periods_emax,\n states_all,\n )\n\n f2py = fort_debug.wrapper_solve(\n is_interpolated,\n num_points_interp,\n num_draws_emax,\n num_periods,\n is_myopic,\n is_debug,\n periods_draws_emax,\n min_idx,\n edu_spec[\"start\"],\n edu_spec[\"max\"],\n coeffs_common,\n coeffs_a,\n coeffs_b,\n coeffs_edu,\n coeffs_home,\n shocks_cholesky,\n delta,\n file_sim,\n max_states_period,\n num_types,\n type_spec_shares,\n type_spec_shifts,\n )\n\n assert_allclose(py[0], fort[0])\n assert_allclose(py[1], fort[1])\n assert_allclose(py[2], fort[2])\n assert_allclose(py[3], fort[3])\n assert_allclose(py[4], fort[4])\n\n assert_allclose(py[0], f2py[0])\n assert_allclose(py[1], f2py[1])\n assert_allclose(py[2], f2py[2])\n assert_allclose(py[3], f2py[3])\n assert_allclose(py[4], f2py[4])\n\n (\n states_all,\n mapping_state_idx,\n periods_rewards_systematic,\n periods_emax,\n ) = state_space._get_fortran_counterparts()\n\n simulated_data = pyth_simulate(\n state_space,\n num_agents_sim,\n periods_draws_sims,\n seed_sim,\n file_sim,\n edu_spec,\n optim_paras,\n is_debug,\n )\n py = simulated_data.copy().fillna(MISSING_FLOAT).values\n\n data_array = process_dataset(respy_obj).to_numpy()\n\n # Is is very important to cut the data array down to the size of the estimation\n # sample for the calculation of contributions.\n data_array = py[: num_agents_est * num_periods, :]\n\n f2py = fort_debug.wrapper_simulate(\n periods_rewards_systematic,\n mapping_state_idx,\n periods_emax,\n states_all,\n num_periods,\n num_agents_sim,\n periods_draws_sims,\n seed_sim,\n file_sim,\n edu_spec[\"start\"],\n edu_spec[\"max\"],\n edu_spec[\"share\"],\n edu_spec[\"lagged\"],\n optim_paras[\"coeffs_common\"],\n optim_paras[\"coeffs_a\"],\n optim_paras[\"coeffs_b\"],\n shocks_cholesky,\n delta,\n num_types,\n type_spec_shares,\n type_spec_shifts,\n is_debug,\n )\n assert_allclose(py, f2py)\n\n # We have to cut the simulated data to `num_agents_est` as the Python\n # implementation calculates the likelihood contributions for all agents in the\n # data.\n simulated_data = simulated_data.loc[\n simulated_data.Identifier.lt(num_agents_est)\n ]\n\n py = pyth_contributions(\n state_space, simulated_data, periods_draws_prob, tau, optim_paras\n )\n\n num_obs_agent = np.bincount(simulated_data.Identifier.to_numpy())\n\n f2py = fort_debug.wrapper_contributions(\n periods_rewards_systematic,\n mapping_state_idx,\n periods_emax,\n states_all,\n data_array,\n periods_draws_prob,\n tau,\n num_periods,\n num_draws_prob,\n num_agents_est,\n num_obs_agent,\n num_types,\n edu_spec[\"start\"],\n edu_spec[\"max\"],\n shocks_cholesky,\n delta,\n type_spec_shares,\n type_spec_shifts,\n )\n\n assert_allclose(py, f2py)\n\n # Evaluation of criterion function\n x0 = get_optim_paras(optim_paras, num_paras, \"all\", is_debug)\n\n py = pyth_criterion(\n x0,\n is_interpolated,\n num_points_interp,\n is_debug,\n simulated_data,\n tau,\n periods_draws_emax,\n periods_draws_prob,\n state_space,\n )\n\n f2py = fort_debug.wrapper_criterion(\n x0,\n is_interpolated,\n num_draws_emax,\n num_periods,\n num_points_interp,\n is_myopic,\n is_debug,\n data_array,\n num_draws_prob,\n tau,\n periods_draws_emax,\n periods_draws_prob,\n states_all,\n state_space.states_per_period,\n mapping_state_idx,\n max_states_period,\n num_agents_est,\n num_obs_agent,\n num_types,\n edu_spec[\"start\"],\n edu_spec[\"max\"],\n edu_spec[\"share\"],\n type_spec_shares,\n type_spec_shifts,\n num_paras,\n )\n\n assert_allclose(py, f2py)", "def test_ipam_vrfs_update(self):\n pass", "def test4():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('normal')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.iperfMeasure()\n exp1.trafico.iperfMeasure(filename='iperf_normal_test.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador", "def test_fleur_relax_structure_Si(self, run_with_cache, mock_code_factory):\n assert False", "def test_repeatable(self):\n\n def run(seed, ModelClass=Model):\n \"\"\"Return the history of a run\"\"\"\n model = ModelClass(random_seed=seed)\n return model.one_trial(1, 10)\n\n self.assertEqual(run(0, ModelClass=Model).data, run(0, ModelClass=Model).data)\n self.assertEqual(run(0, ModelClass=ReplicatedModel).data, run(0, ModelClass=ReplicatedModel).data)", "def test_add_delete_a_pi_simultaneously_to_vpg_with_1_pi(self):\n proj_obj, fabric_obj, pr_objs = self._create_prerequisites(\n create_second_pr=True)\n test_id = self.id()\n VPG_CLASS = self._api_server.get_resource_class('virtual-port-group')\n org_process_ae_id = VPG_CLASS._process_ae_id\n\n class MockVpg(VPG_CLASS):\n org_process_ae_id = VPG_CLASS._process_ae_id\n HOLD_API = True\n @classmethod\n def mock_process_ae_id(cls, db_obj_dict, vpg_name, obj_dict=None):\n while cls.HOLD_API:\n print('sleeping for HOLD_API to clear for '\n 'args = %s' % obj_dict)\n gevent.sleep(0.5)\n return cls.org_process_ae_id(db_obj_dict, vpg_name, obj_dict)\n\n def process_ae_ids(x):\n return [int(i) for i in sorted(x) if i is not None]\n\n def get_zk_ae_ids(prs=None):\n prefix = os.path.join(\n self.__class__.__name__,\n 'id', 'aggregated-ethernet')\n zk_client = self._api_server._db_conn._zk_db._zk_client._zk_client\n if not prs:\n prs = [os.path.join(prefix, pr.name) for pr in pr_objs]\n else:\n if not isinstance(prs, list):\n prs = [prs]\n prs = [os.path.join(prefix, pr) for pr in prs]\n ae_ids = {}\n for pr in prs:\n pr_org = os.path.split(pr)[-1]\n ae_ids[pr_org] = zk_client.get_children(pr)\n return ae_ids\n\n pi_per_pr = 3\n pi_objs = {}\n pr1_pi_names = ['%s_pr1_pi%d' % (test_id, i) for\n i in range(1, pi_per_pr + 1)]\n pr1_pi_objs = self._create_pi_objects(pr_objs[0], pr1_pi_names)\n pi_objs.update(pr1_pi_objs)\n\n # create a VPG\n vpg_count = 1\n vpg_names = ['vpg_%s_%s' % (test_id, i) for i in range(\n 1, vpg_count + 1)]\n vpg_objs = self._create_vpgs(fabric_obj, vpg_names)\n\n # record AE-IDs in ZK before creating any VPG\n ae_ids = [x for x in get_zk_ae_ids().values() if x]\n self.assertEqual(len(ae_ids), 0)\n\n def _attach_pi_simultaneously(\n vpg_obj, create_pi_uuids, delete_pi_uuids=None):\n if delete_pi_uuids is None:\n delete_pi_uuids = []\n # Attach PIs from PR1 to VPG-1\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n try:\n # mock _process_ae_id at VPG resource\n VPG_CLASS._process_ae_id = MockVpg.mock_process_ae_id\n MockVpg.HOLD_API = True\n for pi_uuid in delete_pi_uuids:\n gevent.spawn(\n self.api.ref_update,\n \"virtual-port-group\",\n vpg_obj.uuid,\n \"physical-interface\",\n pi_uuid,\n None,\n \"DELETE\")\n for pi_uuid in create_pi_uuids:\n gevent.spawn(\n self.api.ref_update,\n \"virtual-port-group\",\n vpg_obj.uuid,\n \"physical-interface\",\n pi_uuid,\n None,\n \"ADD\",\n None)\n gevent.sleep(2)\n MockVpg.HOLD_API = False\n gevent.sleep(2)\n except gevent.timeout.Timeout:\n self.assertFalse(\n False,\n '%s failed unexpectedly' % VPG_CLASS._process_ae_id)\n finally:\n # reset mock to original\n VPG_CLASS._process_ae_id = org_process_ae_id\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_refs = vpg_obj.get_physical_interface_refs()\n return vpg_obj, pi_refs\n\n # Case 1\n # Attach PI-1/PR-1 to VPG-1\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_uuids = [list(pr1_pi_objs.values())[0].uuid]\n # vpg_obj, pi_refs = _attach_pi_simultaneously(vpg_obj, pi_uuids)\n pi_obj = self._vnc_lib.physical_interface_read(id=pi_uuids[0])\n vpg_obj.add_physical_interface(pi_obj)\n self.api.virtual_port_group_update(vpg_obj)\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_refs = vpg_obj.get_physical_interface_refs()\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 1)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertIsNone(list(vpg_ae_ids.values())[0])\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [])\n\n # Case 2\n # Attach PI-2 from PR1 to VPG-1 and delete exiting PI-1/PR-1\n # simultaneously\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_refs = vpg_obj.get_physical_interface_refs()\n existing_pi_uuids = [ref['uuid'] for ref in pi_refs]\n pi_uuids = [list(pr1_pi_objs.values())[1].uuid]\n vpg_obj, pi_refs = _attach_pi_simultaneously(\n vpg_obj, pi_uuids, existing_pi_uuids)\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 1)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertIsNone(list(vpg_ae_ids.values())[0])\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [])", "def test_generate_all_testing(self):\n pass", "def test_relaxation_end(self):\n tau = 50.0\n mrate = 40.0\n Mrate = 120.0\n\n tmax = 50.0\n dt = 0.1\n relaxation = 20.0\n\n tutor = SimpleNeurons(2, out_fct=lambda _: Mrate*np.random.rand())\n reward = MockReward(lambda t: np.sin(10*t/tmax))\n tutor_rule = ReinforcementTutorRule(tutor, reward, tau=tau,\n constrain_rates=True, min_rate=mrate, max_rate=Mrate,\n learning_rate=0.1, relaxation=relaxation, use_tutor_baseline=False)\n\n # reproducible arbitrariness\n np.random.seed(1)\n\n M = simulation.StateMonitor(tutor_rule, 'out')\n\n sim = simulation.Simulation(tutor, reward, tutor_rule, M, dt=dt)\n sim.run(tmax)\n\n mask = (M.t > tmax - relaxation/2)\n mavg = 0.5*(mrate + Mrate)\n\n self.assertAlmostEqual(np.mean(np.abs(M.out[:, mask] - mavg)), 0.0)", "def test_fleur_relax_structure_Si_film(self, run_with_cache, mock_code_factory):\n assert False", "def test_ex_3_1(self):\n fact1, fact2, query, scope = parse(PROLOG_OPS, 'q(a, b)', 'r(b, c)', 'p(U, V)')\n\n compiler = Compiler()\n fact1_instrs = compiler.compile_rule(fact1, [])\n fact2_instrs = compiler.compile_rule(fact2, [])\n reg_allocation = RegisterAllocation()\n query_instrs = compiler.compile_query_m1(query, reg_allocation)\n\n wam = WAM()\n wam.load(('p', 2), self.fig_3_1_instrs)\n wam.load(('q', 2), fact1_instrs)\n wam.load(('r', 2), fact2_instrs)\n query_offset = wam.load(None, query_instrs)\n wam.p = query_offset\n\n wam.run()\n\n aU = wam.deref_reg(reg_allocation[scope.var('U')])\n self.assertEqual(wam.get_term_repr(aU), 'a')\n aV = wam.deref_reg(reg_allocation[scope.var('V')])\n self.assertEqual(wam.get_term_repr(aV), 'c')", "def test_mmp_active_inference(self):\n\n num_obs = [3, 2]\n num_states = [4, 3]\n num_control = [1, 3]\n A = random_A_matrix(num_obs, num_states)\n B = random_B_matrix(num_states, num_control)\n\n C = obj_array_zeros(num_obs)\n C[1][0] = 1.0 \n C[1][1] = -2.0 \n\n agent = Agent(A=A, B=B, C=C, control_fac_idx=[1], inference_algo=\"MMP\", policy_len=2, inference_horizon=3)\n\n T = 10\n\n for t in range(T):\n\n o = [np.random.randint(num_ob) for num_ob in num_obs] # just randomly generate observations at each timestep, no generative process\n qx = agent.infer_states(o)\n agent.infer_policies()\n action = agent.sample_action()\n \n print(agent.prev_actions)\n print(agent.prev_obs)", "def test_ap_hs20_gas_while_associated_with_pmf(dev, apdev):\n bssid = apdev[0]['bssid']\n params = hs20_ap_params()\n params['hessid'] = bssid\n hostapd.add_ap(apdev[0]['ifname'], params)\n\n bssid2 = apdev[1]['bssid']\n params = hs20_ap_params()\n params['hessid'] = bssid2\n params['nai_realm'] = [ \"0,no-match.example.org,13[5:6],21[2:4][5:7]\" ]\n hostapd.add_ap(apdev[1]['ifname'], params)\n\n dev[0].hs20_enable()\n dev[0].request(\"SET pmf 2\")\n id = dev[0].add_cred_values({ 'realm': \"example.com\",\n 'username': \"hs20-test\",\n 'password': \"password\",\n 'domain': \"example.com\" })\n interworking_select(dev[0], bssid, \"home\", freq=\"2412\")\n interworking_connect(dev[0], bssid, \"TTLS\")\n\n logger.info(\"Verifying GAS query while associated\")\n dev[0].request(\"FETCH_ANQP\")\n for i in range(0, 2 * 6):\n ev = dev[0].wait_event([\"RX-ANQP\"], timeout=5)\n if ev is None:\n raise Exception(\"Operation timed out\")", "def test_FEMM_periodicity_angle():\n\n SPMSM_015 = load(join(DATA_DIR, \"Machine\", \"SPMSM_015.json\"))\n\n assert SPMSM_015.comp_periodicity() == (9, False, 9, True)\n\n simu = Simu1(name=\"test_FEMM_periodicity_angle\", machine=SPMSM_015)\n\n # Definition of the enforced output of the electrical module\n I0_rms = 250 / sqrt(2)\n Phi0 = 140 * pi / 180 # Maximum Torque Per Amp\n\n Id_ref = (I0_rms * exp(1j * Phi0)).real\n Iq_ref = (I0_rms * exp(1j * Phi0)).imag\n\n simu.input = InputCurrent(\n Id_ref=Id_ref,\n Iq_ref=Iq_ref,\n Na_tot=252 * 9,\n Nt_tot=4 * 9,\n N0=1000,\n )\n\n # Definition of the magnetic simulation: with periodicity\n simu.mag = MagFEMM(\n type_BH_stator=1,\n type_BH_rotor=1,\n is_periodicity_a=True,\n is_periodicity_t=False,\n nb_worker=cpu_count(),\n Kmesh_fineness=2,\n )\n simu.force = ForceMT()\n\n # Definition of the magnetic simulation: no periodicity\n # Definition of the magnetic simulation: no periodicity\n simu2 = simu.copy()\n simu2.mag.is_periodicity_a = False\n\n simu2.force = ForceMT()\n\n # Run simulations\n out = Output(simu=simu)\n simu.run()\n\n out2 = Output(simu=simu2)\n simu2.run()\n\n # Plot the result\n out.mag.B.plot_2D_Data(\n \"time\",\n \"angle[0]{°}\",\n data_list=[out2.mag.B],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_B_time.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.mag.B.plot_2D_Data(\n \"angle{°}\",\n \"time[1]\",\n data_list=[out2.mag.B],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_B_space.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.force.AGSF.plot_2D_Data(\n \"wavenumber=[0,100]\",\n \"time[0]\",\n data_list=[out2.force.AGSF],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_P_space_fft.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.force.AGSF.plot_2D_Data(\n \"freqs\",\n \"angle[0]\",\n data_list=[out2.force.AGSF],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_P_fft2.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.mag.Tem.plot_2D_Data(\n \"time\",\n data_list=[out2.mag.Tem],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_Tem_time.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.mag.Phi_wind_stator.plot_2D_Data(\n \"time\",\n \"phase\",\n data_list=[out2.mag.Phi_wind_stator],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_Phi_wind_stator_time.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n # Compare both simu\n Bflux = out.mag.B\n arg_list = [\"angle\"]\n result = Bflux.get_rphiz_along(*arg_list)\n Brad = result[\"radial\"]\n angle = result[\"angle\"]\n\n Bflux2 = out2.mag.B\n arg_list = [\"angle\"]\n result2 = Bflux2.get_rphiz_along(*arg_list)\n Brad2 = result2[\"radial\"]\n\n assert_array_almost_equal(Brad, Brad2, decimal=1)\n\n return out, out2", "def test_strategy(self):\n self.first_play_test(C)", "def test_lama_job_runner():\n\n configs = registration_root.glob('*.toml')\n\n for cfg in configs:\n delete_previous_files()\n\n print(f\"\\n{'#'*8} Doing config {cfg.name} {'#'*8}\")\n\n lama_job_runner.lama_job_runner(cfg, wt_registration_dir, make_job_file=True, log_level=logging.ERROR)\n lama_job_runner.lama_job_runner(cfg, wt_registration_dir, log_level=logging.ERROR)\n\n lama_job_runner.lama_job_runner(cfg, mut_registration_dir, make_job_file=True, log_level=logging.ERROR)\n lama_job_runner.lama_job_runner(cfg, mut_registration_dir, log_level=logging.ERROR)\n # return # Just do the first", "def test_7(self):\n # Impose constraints\n point_constr = {\"num_periods\": np.random.randint(2, 5)}\n\n params_spec, options_spec = generate_random_model(point_constr=point_constr)\n respy_obj = RespyCls(params_spec, options_spec)\n\n # Extract class attributes\n is_debug, num_periods = dist_class_attributes(\n respy_obj, \"is_debug\", \"num_periods\"\n )\n\n # Write out a grid for the interpolation\n max_states_period = write_interpolation_grid(respy_obj)\n\n # Draw random request for testing\n num_states = np.random.randint(1, max_states_period)\n candidates = list(range(num_states))\n\n period = np.random.randint(1, num_periods)\n num_points_interp = np.random.randint(1, num_states + 1)\n\n # Check function for random choice and make sure that there are no duplicates.\n args = (candidates, num_states, num_points_interp)\n f90 = fort_debug.wrapper_random_choice(*args)\n assert_equal(len(set(f90)), len(f90))\n assert_equal(len(f90), num_points_interp)\n\n # Check the standard cases of the function.\n args = (num_points_interp, num_states, period, is_debug, num_periods)\n f90 = fort_debug.wrapper_get_simulated_indicator(*args)\n\n assert_equal(len(f90), num_states)\n assert_equal(np.all(f90) in [0, 1], True)\n\n # Test the standardization across PYTHON, F2PY, and FORTRAN implementations.\n # This is possible as we write out an interpolation grid to disk which is used\n # for both functions.\n base_args = (num_points_interp, num_states, period, is_debug)\n args = base_args\n py = get_simulated_indicator(*args)\n args = base_args + (num_periods,)\n f90 = fort_debug.wrapper_get_simulated_indicator(*args)\n assert_array_equal(f90, 1 * py)\n os.unlink(\".interpolation.respy.test\")\n\n # Special case where number of interpolation points are same as the number of\n # candidates. In that case the returned indicator should be all TRUE.\n args = (num_states, num_states, period, True, num_periods)\n f90 = fort_debug.wrapper_get_simulated_indicator(*args)\n assert_equal(sum(f90), num_states)", "def test_shapes_and_exceptions(self):\n output_path = FLAGS.test_tmpdir\n output_name = 'temp'\n equation_name = 'advection_diffusion'\n discretization = 'finite_volume'\n dataset_type = 'all_derivatives'\n high_resolution = 125\n low_resolution = 25\n shards = 2\n example_num_time_steps = 3\n batch_size = 4\n diffusion_coefficient = 0.3\n\n expected_equation = advection_equations.FiniteVolumeAdvectionDiffusion(\n diffusion_coefficient=diffusion_coefficient)\n\n # create a temporary dataset\n with flagsaver.flagsaver(\n dataset_path=output_path,\n dataset_name=output_name,\n equation_name=equation_name,\n discretization=discretization,\n simulation_grid_size=high_resolution,\n output_grid_size=low_resolution,\n equation_kwargs=str(dict(diffusion_coefficient=diffusion_coefficient)),\n dataset_type=dataset_type,\n num_shards=shards,\n total_time_steps=10,\n example_num_time_steps=example_num_time_steps,\n time_step_interval=5,\n num_seeds=4,\n ):\n create_training_data.main([], runner=beam.runners.DirectRunner())\n\n metadata_path = os.path.join(output_path, output_name + '.metadata.json')\n self.assertTrue(gfile.exists(metadata_path))\n dataset_metadata = readers.load_metadata(metadata_path)\n low_res_grid = readers.get_output_grid(dataset_metadata)\n high_res_grid = readers.get_simulation_grid(dataset_metadata)\n equation = readers.get_equation(dataset_metadata)\n\n self.assertEqual(low_res_grid.size_x, low_resolution)\n self.assertEqual(low_res_grid.size_y, low_resolution)\n self.assertEqual(high_res_grid.size_x, high_resolution)\n self.assertEqual(high_res_grid.size_y, high_resolution)\n self.assertAlmostEqual(high_res_grid.step, 2 * np.pi / high_resolution)\n self.assertAlmostEqual(\n equation.diffusion_coefficient, diffusion_coefficient)\n self.assertIs(type(equation), type(expected_equation))\n\n state_keys = expected_equation.key_definitions\n valid_data_keys = ((state_keys['concentration'].exact(),),\n (state_keys['concentration_edge_x'].exact(),\n state_keys['concentration_y_edge_y'].exact()))\n invalid_data_keys = ((state_keys['concentration'],\n state_keys['concentration_edge_x']),\n (state_keys['concentration_edge_x'],))\n valid_data_grids = (low_res_grid, low_res_grid)\n invalid_data_grids = (low_res_grid, high_res_grid)\n\n with self.assertRaises(ValueError):\n readers.initialize_dataset(\n dataset_metadata, invalid_data_keys, valid_data_grids)\n with self.assertRaises(ValueError):\n readers.initialize_dataset(\n dataset_metadata, valid_data_keys, invalid_data_grids)\n with self.assertRaises(ValueError):\n readers.initialize_dataset(\n dataset_metadata, invalid_data_keys, invalid_data_grids)\n\n dataset = readers.initialize_dataset(\n dataset_metadata, valid_data_keys, valid_data_grids)\n dataset = dataset.repeat()\n dataset = dataset.batch(batch_size)\n\n [(first_state, second_state)] = dataset.take(1)\n self.assertEqual(set(first_state.keys()), set(valid_data_keys[0]))\n self.assertEqual(set(second_state.keys()), set(valid_data_keys[1]))\n first_state_shape = np.shape(first_state[valid_data_keys[0][0]])\n second_state_shape = np.shape(second_state[valid_data_keys[1][0]])\n expected_shape = (\n batch_size, example_num_time_steps, low_resolution, low_resolution)\n self.assertEqual(first_state_shape, expected_shape)\n self.assertEqual(second_state_shape, expected_shape)", "def test_ex_2_1(self):\n\n wam = WAM()\n wam.execute(self.fig_2_3_instrs)\n #s = wam.get_term_repr(wam.deref_reg(0))\n s = wam.get_term_repr(7)\n self.assertEqual(s, 'p(_G2, h(_G2, _G3), f(_G3))')", "def test(self):\n # -- Test --\n\n # (1)\n\n # (2)\n\n # (3)\n\n # (4)\n # -- Test --", "def test_timestep(self):\n class Mock(object):\n def __init__(self):\n self.t = 0.0\n self.dt = None\n\n def evolve(self1, t, dt):\n if self1.dt is not None:\n self.assertAlmostEqual(self1.dt, dt)\n else:\n self1.dt = dt\n\n self.assertAlmostEqual(self1.t, t)\n\n self1.t += self1.dt\n\n t_max = 10.0\n dt = 0.2\n\n G = Mock()\n simulation.Simulation(G, dt=dt).run(t_max)\n self.assertAlmostEqual(G.dt, dt)", "def test_delete_pi_simultaneously_to_vpg_with_multiple_pi(self):\n proj_obj, fabric_obj, pr_objs = self._create_prerequisites(\n create_second_pr=True)\n test_id = self.id()\n VPG_CLASS = self._api_server.get_resource_class('virtual-port-group')\n org_process_ae_id = VPG_CLASS._process_ae_id\n\n class MockVpg(VPG_CLASS):\n org_process_ae_id = VPG_CLASS._process_ae_id\n HOLD_API = True\n @classmethod\n def mock_process_ae_id(cls, db_obj_dict, vpg_name, obj_dict=None):\n while cls.HOLD_API:\n print('sleeping for HOLD_API to clear for '\n 'args = %s' % obj_dict)\n gevent.sleep(0.5)\n return cls.org_process_ae_id(db_obj_dict, vpg_name, obj_dict)\n\n def process_ae_ids(x):\n return [int(i) for i in sorted(x) if i is not None]\n\n def get_zk_ae_ids(prs=None):\n prefix = os.path.join(\n self.__class__.__name__,\n 'id', 'aggregated-ethernet')\n zk_client = self._api_server._db_conn._zk_db._zk_client._zk_client\n if not prs:\n prs = [os.path.join(prefix, pr.name) for pr in pr_objs]\n else:\n if not isinstance(prs, list):\n prs = [prs]\n prs = [os.path.join(prefix, pr) for pr in prs]\n ae_ids = {}\n for pr in prs:\n pr_org = os.path.split(pr)[-1]\n ae_ids[pr_org] = zk_client.get_children(pr)\n return ae_ids\n\n pi_per_pr = 3\n pi_objs = {}\n pr1_pi_names = ['%s_pr1_pi%d' % (test_id, i) for\n i in range(1, pi_per_pr + 1)]\n pr2_pi_names = ['%s_pr2_pi%d' % (test_id, i) for\n i in range(1, pi_per_pr + 1)]\n pr1_pi_objs = self._create_pi_objects(pr_objs[0], pr1_pi_names)\n pr2_pi_objs = self._create_pi_objects(pr_objs[1], pr2_pi_names)\n pi_objs.update(pr1_pi_objs)\n pi_objs.update(pr2_pi_objs)\n\n # create a VPG\n vpg_count = 1\n vpg_names = ['vpg_%s_%s' % (test_id, i) for i in range(\n 1, vpg_count + 1)]\n vpg_objs = self._create_vpgs(fabric_obj, vpg_names)\n\n # record AE-IDs in ZK before creating any VPG\n ae_ids = [x for x in get_zk_ae_ids().values() if x]\n self.assertEqual(len(ae_ids), 0)\n\n def _attach_pi_simultaneously(\n vpg_obj, create_pi_uuids=None, delete_pi_uuids=None):\n if create_pi_uuids is None:\n create_pi_uuids = []\n if delete_pi_uuids is None:\n delete_pi_uuids = []\n # Attach PIs from PR1 to VPG-1\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n try:\n # mock _process_ae_id at VPG resource\n VPG_CLASS._process_ae_id = MockVpg.mock_process_ae_id\n # MockVpg.HOLD_API = True\n MockVpg.HOLD_API = False\n for pi_uuid in create_pi_uuids:\n gevent.spawn(\n self.api.ref_update,\n \"virtual-port-group\",\n vpg_obj.uuid,\n \"physical-interface\",\n pi_uuid,\n None,\n \"ADD\",\n None)\n for pi_uuid in delete_pi_uuids:\n gevent.spawn(\n self.api.ref_update,\n \"virtual-port-group\",\n vpg_obj.uuid,\n \"physical-interface\",\n pi_uuid,\n None,\n \"DELETE\",\n None)\n gevent.sleep(2)\n MockVpg.HOLD_API = False\n gevent.sleep(3)\n except gevent.timeout.Timeout:\n self.assertFalse(\n False,\n '%s failed unexpectedly' % VPG_CLASS._process_ae_id)\n finally:\n # reset mock to original\n VPG_CLASS._process_ae_id = org_process_ae_id\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_refs = vpg_obj.get_physical_interface_refs()\n return vpg_obj, pi_refs\n\n # Case 1\n # Attach 3 PIs/PR1 and 3 PIs/PR2 to VPG1\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pr1_pi_uuids = [pi_objs[pr1_pi_names[pi]].uuid for pi in range(3)]\n pr2_pi_uuids = [pi_objs[pr2_pi_names[pi]].uuid for pi in range(3)]\n pi_uuids = pr1_pi_uuids + pr2_pi_uuids\n vpg_obj, pi_refs = _attach_pi_simultaneously(vpg_obj, pi_uuids)\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 6)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertEqual(len(vpg_ae_ids.values()), 6)\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [0] * 6)\n # verification at Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1)\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 1)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0])\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [0])\n\n # Case 2\n # Deattach PI-1/PR-1, PI-1/PR-2 from VPG-1\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_uuids = [pr1_pi_uuids[0], pr2_pi_uuids[0]]\n vpg_obj, pi_refs = _attach_pi_simultaneously(\n vpg_obj, delete_pi_uuids=pi_uuids)\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 4)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertEqual(len(vpg_ae_ids.values()), 4)\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [0] * 4)\n # verification at Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1)\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 1)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0])\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [0])\n\n # Case 3\n # Deattach all PIs/PR-1. AE-IDs at PR-1 to be de-allocated\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_uuids = pr1_pi_uuids[1:3]\n vpg_obj, pi_refs = _attach_pi_simultaneously(\n vpg_obj, delete_pi_uuids=pi_uuids)\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 2)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertEqual(len(vpg_ae_ids.values()), 2)\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [0] * 2)\n # verification at Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 0)\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 1)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [])\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [0])", "def test_ex_2_7(self):\n\n wam = WAM()\n wam.execute(self.fig_2_9_instrs[:-1]) # last instruction is call; remove it\n wam.execute(self.fig_2_10_instrs)\n aW = wam.deref_reg(4)\n aX = wam.deref_reg(4)\n aY = wam.deref_reg(5)\n aZ = wam.deref_reg(1)\n self.assertEqual(wam.get_term_repr(aW), 'f(a)')\n self.assertEqual(wam.get_term_repr(aX), 'f(a)')\n self.assertEqual(wam.get_term_repr(aY), 'f(f(a))')\n self.assertEqual(wam.get_term_repr(aZ), 'f(f(a))')" ]
[ "0.5912187", "0.57896715", "0.57230085", "0.56915814", "0.5550722", "0.55056584", "0.5462291", "0.54244", "0.54005516", "0.53685725", "0.53471875", "0.53402764", "0.5339042", "0.53294677", "0.528481", "0.5281426", "0.52730227", "0.5270677", "0.52555203", "0.5252101", "0.5242871", "0.5232969", "0.5232711", "0.522285", "0.5222638", "0.52195626", "0.5218014", "0.52177167", "0.5209131", "0.5196099", "0.51912427", "0.5181915", "0.51718205", "0.51712227", "0.517081", "0.51693344", "0.5168884", "0.5163019", "0.5152028", "0.51491207", "0.51438004", "0.51329917", "0.5132944", "0.5118188", "0.5112732", "0.5108157", "0.5106667", "0.51035416", "0.50985056", "0.5095022", "0.509158", "0.5078657", "0.5078106", "0.5076484", "0.5075959", "0.5075509", "0.5070435", "0.5061978", "0.50600624", "0.5053043", "0.50440055", "0.50423706", "0.5034533", "0.5034473", "0.5032851", "0.5032591", "0.503189", "0.5029734", "0.50290465", "0.5027805", "0.5026731", "0.5022638", "0.50191027", "0.5017695", "0.5016448", "0.5014899", "0.50147796", "0.5008581", "0.5004991", "0.50029886", "0.5000019", "0.49992153", "0.49981996", "0.498693", "0.49864954", "0.498061", "0.49782473", "0.49772155", "0.49766353", "0.49757475", "0.49728337", "0.4971988", "0.4969695", "0.4968611", "0.49633172", "0.4960702", "0.4960542", "0.49594507", "0.49592444", "0.49570966" ]
0.6325197
0
Test the scalingspecific parameter manager.
def test_scaling_active_parameter_manager(): components_2 = {"1": mock_scaling_component(2), "2": mock_scaling_component(2)} scaling_apm = scaling_active_parameter_manager(components_2, ["1"]) assert list(scaling_apm.constant_g_values[0]) == list( components_2["2"].calculate_scales() ) assert len(scaling_apm.constant_g_values) == 1 assert scaling_apm.n_obs == [2] # Test that no constant_g_values if both components selected scaling_apm = scaling_active_parameter_manager(components_2, ["1", "2"]) assert scaling_apm.constant_g_values is None # Check that one can't initialise with an unequal number of reflections, # either within the selection or overall. with pytest.raises(AssertionError): components_2 = {"1": mock_scaling_component(2), "2": mock_scaling_component(1)} scaling_apm = scaling_active_parameter_manager(components_2, ["1", "2"]) with pytest.raises(AssertionError): components_2 = {"1": mock_scaling_component(2), "2": mock_scaling_component(1)} scaling_apm = scaling_active_parameter_manager(components_2, ["1"]) data_manager = mock_data_manager(components_2) pmg = ScalingParameterManagerGenerator( [data_manager], target=ScalingTarget(), mode="concurrent" ) assert isinstance(pmg.apm_type, type(scaling_active_parameter_manager))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_measure_parameters(self):\n pass", "def test_scale(app):\n\n assert False", "def test_general_apm():\n components = {\n \"scale\": mock_component(),\n \"decay\": mock_component(),\n \"absorption\": mock_component(),\n }\n\n apm = active_parameter_manager(components, [\"scale\", \"decay\"])\n assert \"decay\" in apm.components_list\n assert \"scale\" in apm.components_list\n assert \"absorption\" not in apm.components_list\n assert apm.n_active_params == (\n components[\"scale\"].n_params + components[\"decay\"].n_params\n )\n n_cumul = 0\n for component in apm.components:\n assert apm.components[component][\"n_params\"] == components[component].n_params\n assert apm.components[component][\"start_idx\"] == n_cumul\n assert (\n apm.components[component][\"end_idx\"]\n == n_cumul + apm.components[component][\"n_params\"]\n )\n n_cumul += apm.components[component][\"n_params\"]\n\n apm.set_param_vals(flex.double([2.0, 1.5]))\n assert apm.get_param_vals() == flex.double([2.0, 1.5])\n # Test params were updated in components\n assert list(components[\"scale\"].free_parameters) == [2.0]\n assert list(components[\"decay\"].free_parameters) == [1.5]\n # Test selection of parameters\n decay_params = apm.select_parameters(\"decay\")\n assert len(decay_params) == 1\n assert decay_params[0] == 1.5\n\n # Test calculate model state uncertainties\n var_cov = flex.double([1.0, 0.5, 0.5, 2.0])\n var_cov.reshape(flex.grid(2, 2))\n apm.calculate_model_state_uncertainties(var_cov)\n assert components[\"scale\"].var_cov_matrix[0, 0] == 1.0\n assert components[\"decay\"].var_cov_matrix[0, 0] == 2.0\n\n # Test set param esds.\n apm.set_param_esds(flex.double([0.1, 0.2]))\n assert components[\"scale\"].free_parameter_esds == flex.double([0.1])\n assert components[\"decay\"].free_parameter_esds == flex.double([0.2])", "def test_parameters(self):\n self.assert_initialize_driver()\n #reply = self.driver_client.cmd_dvr('get_resource', Parameter.ALL)\n #self.assert_driver_parameters(reply, verify_sample_interval=True)", "def test_get_all_scaled_scores_success(self):\n with mock.patch('score.ScoresGenerator.split_data') as mock_split_data:\n with mock.patch('score.ScoresGenerator.create_category_scaled_score') \\\n as mock_scaled_category:\n with mock.patch('score.ScoresGenerator.create_total_scaled_score') \\\n as mock_scaled_total:\n for test in self.success_get_all_scaled_score_test_params:\n score_test = score.ScoresGenerator()\n score_test.get_all_scaled_scores(test[KEY_INPUT])\n self.assertDictEqual(score_test.SCALED_SCORES, test[KEY_EXPECTED])", "def test_arguments(self):\n\n h.test_function_arguments(\n func=ScalingTransformer.__init__,\n expected_arguments=[\"self\", \"columns\", \"scaler\", \"scaler_kwargs\"],\n expected_default_values=({},),\n )", "def test_arguments(self):\n\n h.test_function_arguments(\n func=ScalingTransformer.fit,\n expected_arguments=[\"self\", \"X\", \"y\"],\n expected_default_values=(None,),\n )", "def is_scale_enabled(self) -> bool:\r\n ...", "async def test_floating_point_scale(hass, mock_hub):\n register_config = {\n CONF_COUNT: 1,\n CONF_DATA_TYPE: DATA_TYPE_INT,\n CONF_SCALE: 2.4,\n CONF_OFFSET: 0,\n CONF_PRECISION: 2,\n }\n await run_test(\n hass,\n mock_hub,\n register_config,\n SENSOR_DOMAIN,\n register_words=[1],\n expected=\"2.40\",\n )", "def test_multi_apm():\n\n components_1 = {\n \"scale\": mock_component(),\n \"decay\": mock_component(),\n \"absorption\": mock_component(),\n }\n components_2 = {\"scale\": mock_component(), \"decay\": mock_component()}\n\n multi_apm = multi_active_parameter_manager(\n ScalingTarget(),\n [components_1, components_2],\n [[\"scale\", \"decay\"], [\"scale\"]],\n active_parameter_manager,\n )\n\n # Test correct setup of apm_list attribute.\n for apm in multi_apm.apm_list:\n assert isinstance(apm, active_parameter_manager)\n assert len(multi_apm.apm_list) == 2\n assert multi_apm.components_list == [\"scale\", \"decay\", \"scale\"]\n assert multi_apm.n_active_params == 3\n assert multi_apm.apm_data[0] == {\"start_idx\": 0, \"end_idx\": 2}\n assert multi_apm.apm_data[1] == {\"start_idx\": 2, \"end_idx\": 3}\n\n # Test parameter selection.\n multi_apm.set_param_vals(flex.double([3.0, 2.5, 2.0]))\n assert multi_apm.get_param_vals() == flex.double([3.0, 2.5, 2.0])\n assert multi_apm.select_parameters(0) == flex.double([3.0, 2.5])\n assert multi_apm.select_parameters(1) == flex.double([2.0])\n\n # Test setting parameter esds.\n multi_apm.set_param_esds(flex.double([0.1, 0.2, 0.3]))\n assert components_1[\"scale\"].free_parameter_esds == flex.double([0.1])\n assert components_1[\"decay\"].free_parameter_esds == flex.double([0.2])\n assert components_2[\"scale\"].free_parameter_esds == flex.double([0.3])\n\n # Test setting var_cov matrices for each component.\n var_cov = flex.double([1.0, 0.5, 0.5, 0.5, 2.0, 0.5, 0.5, 0.5, 3.0])\n var_cov.reshape(flex.grid(3, 3))\n multi_apm.calculate_model_state_uncertainties(var_cov)\n assert components_1[\"scale\"].var_cov_matrix[0, 0] == 1.0\n assert components_1[\"decay\"].var_cov_matrix[0, 0] == 2.0\n assert components_2[\"scale\"].var_cov_matrix[0, 0] == 3.0", "def test_scaling(self):\n def runs_successfully(use_scal, coeffs):\n prob = om.Problem()\n prob.model.add_subsystem('row1', ScalingTestComp(row=1, coeffs=coeffs,\n use_scal=use_scal))\n prob.model.add_subsystem('row2', ScalingTestComp(row=2, coeffs=coeffs,\n use_scal=use_scal))\n prob.model.connect('row1.y', 'row2.x')\n prob.model.connect('row2.y', 'row1.x')\n prob.model.nonlinear_solver = om.NewtonSolver(solve_subsystems=False, maxiter=2, atol=1e-5, rtol=0)\n prob.model.nonlinear_solver.linear_solver = om.ScipyKrylov(maxiter=1)\n\n prob.set_solver_print(level=0)\n\n prob.setup()\n prob.run_model()\n\n return np.linalg.norm(prob.model._residuals.asarray()) < 1e-5\n\n # ---------------------------\n # coeffs: r1, r2, c1, c2\n coeffs = [1.e0, 1.e0, 1.e0, 1.e0]\n\n # Don't use scaling - but there's no need\n use_scal = False\n self.assertTrue(runs_successfully(use_scal, coeffs))\n # Use scaling - but there's no need\n use_scal = True\n self.assertTrue(runs_successfully(use_scal, coeffs))\n\n # ---------------------------\n # coeffs: r1, r2, c1, c2 - test output scaling:\n coeffs = [1.e0, 1.e0, 1.e10, 1.e0]\n\n # Don't use scaling - but output scaling needed\n use_scal = False\n self.assertTrue(not runs_successfully(use_scal, coeffs))\n # Use scaling - output scaling works successfully\n use_scal = True\n self.assertTrue(runs_successfully(use_scal, coeffs))\n\n # ---------------------------\n # coeffs: r1, r2, c1, c2 - test residual scaling:\n coeffs = [1.e10, 1.e0, 1.e10, 1.e0]\n\n # Don't use scaling - but residual scaling needed\n use_scal = False\n self.assertTrue(not runs_successfully(use_scal, coeffs))\n # Use scaling - residual scaling works successfully\n use_scal = True\n self.assertTrue(runs_successfully(use_scal, coeffs))", "def _validate_params(self, request_set, target_set=None, context=None):\n\n # Perform first-pass validation in Function.__init__():\n # - returns full set of params based on subclass paramClassDefaults\n super(Mechanism, self)._validate_params(request_set,target_set,context)\n\n params = target_set\n\n #region VALIDATE TIME SCALE\n try:\n param_value = params[TIME_SCALE]\n except KeyError:\n if COMMAND_LINE in context:\n pass\n else:\n self.timeScale = timeScaleSystemDefault\n else:\n if isinstance(param_value, TimeScale):\n self.timeScale = params[TIME_SCALE]\n else:\n if self.prefs.verbosePref:\n print(\"Value for {0} ({1}) param of {2} must be of type {3}; default will be used: {4}\".\n format(TIME_SCALE, param_value, self.name, type(TimeScale), timeScaleSystemDefault))\n #endregion\n\n #region VALIDATE INPUT STATE(S)\n\n # MODIFIED 6/10/16\n # FIX: SHOULD CHECK LENGTH OF INPUT_STATES PARAM (LIST OF NAMES OR SPECIFICATION DICT) AGAINST LENGTH OF\n # FIX: self.variable 2D ARRAY AND COMPARE variable SPECS, IF PROVIDED, WITH CORRESPONDING ELEMENTS OF\n # FIX: self.variable 2D ARRAY\n try:\n param_value = params[INPUT_STATES]\n\n except KeyError:\n if COMMAND_LINE in context:\n pass\n else:\n # INPUT_STATES not specified:\n # - set to None, so that it is set to default (self.variable) in instantiate_inputState\n # - if in VERBOSE mode, warn in instantiate_inputState, where default value is known\n params[INPUT_STATES] = None\n\n else:\n # INPUT_STATES is specified, so validate:\n # If it is a single item or a non-OrderedDict, place in a list (for use here and in instantiate_inputState)\n if not isinstance(param_value, (list, OrderedDict)):\n param_value = [param_value]\n # Validate each item in the list or OrderedDict\n # Note:\n # * number of inputStates is validated against length of the owner mechanism's execute method variable (EMV)\n # in instantiate_inputState, where an inputState is assigned to each item (value) of the EMV\n i = 0\n for key, item in param_value if isinstance(param_value, dict) else enumerate(param_value):\n from PsyNeuLink.Components.States.InputState import InputState\n # If not valid...\n if not ((isclass(item) and (issubclass(item, InputState) or # InputState class ref\n issubclass(item, Projection))) or # Project class ref\n isinstance(item, InputState) or # InputState object\n isinstance(item, dict) or # InputState specification dict\n isinstance(item, ParamValueProjection) or # ParamValueProjection tuple\n isinstance(item, str) or # Name (to be used as key in inputStates dict)\n iscompatible(item, **{kwCompatibilityNumeric: True})): # value\n # set to None, so it is set to default (self.variable) in instantiate_inputState\n param_value[key] = None\n if self.prefs.verbosePref:\n print(\"Item {0} of {1} param ({2}) in {3} is not a\"\n \" InputState, specification dict or value, nor a list of dict of them; \"\n \"variable ({4}) of execute method for {5} will be used\"\n \" to create a default outputState for {3}\".\n format(i,\n INPUT_STATES,\n param_value,\n self.__class__.__name__,\n self.variable,\n self.execute.__self__.name))\n i += 1\n params[INPUT_STATES] = param_value\n #endregion\n\n #region VALIDATE EXECUTE METHOD PARAMS\n try:\n function_param_specs = params[FUNCTION_PARAMS]\n except KeyError:\n if COMMAND_LINE in context:\n pass\n elif self.prefs.verbosePref:\n print(\"No params specified for {0}\".format(self.__class__.__name__))\n else:\n if not (isinstance(function_param_specs, dict)):\n raise MechanismError(\"{0} in {1} must be a dict of param specifications\".\n format(FUNCTION_PARAMS, self.__class__.__name__))\n # Validate params\n from PsyNeuLink.Components.States.ParameterState import ParameterState\n for param_name, param_value in function_param_specs.items():\n try:\n default_value = self.paramInstanceDefaults[FUNCTION_PARAMS][param_name]\n except KeyError:\n raise MechanismError(\"{0} not recognized as a param of execute method for {1}\".\n format(param_name, self.__class__.__name__))\n if not ((isclass(param_value) and\n (issubclass(param_value, ParameterState) or\n issubclass(param_value, Projection))) or\n isinstance(param_value, ParameterState) or\n isinstance(param_value, Projection) or\n isinstance(param_value, dict) or\n isinstance(param_value, ParamValueProjection) or\n iscompatible(param_value, default_value)):\n params[FUNCTION_PARAMS][param_name] = default_value\n if self.prefs.verbosePref:\n print(\"{0} param ({1}) for execute method {2} of {3} is not a ParameterState, \"\n \"projection, ParamValueProjection, or value; default value ({4}) will be used\".\n format(param_name,\n param_value,\n self.execute.__self__.componentName,\n self.__class__.__name__,\n default_value))\n #endregion\n # FIX: MAKE SURE OUTPUT OF EXECUTE FUNCTION / SELF.VALUE IS 2D ARRAY, WITH LENGTH == NUM OUTPUT STATES\n\n #region VALIDATE OUTPUT STATE(S)\n\n # FIX: MAKE SURE # OF OUTPUTS == LENGTH OF OUTPUT OF EXECUTE FUNCTION / SELF.VALUE\n try:\n param_value = params[OUTPUT_STATES]\n\n except KeyError:\n if COMMAND_LINE in context:\n pass\n else:\n # OUTPUT_STATES not specified:\n # - set to None, so that it is set to default (self.value) in instantiate_outputState\n # Notes:\n # * if in VERBOSE mode, warning will be issued in instantiate_outputState, where default value is known\n # * number of outputStates is validated against length of owner mechanism's execute method output (EMO)\n # in instantiate_outputState, where an outputState is assigned to each item (value) of the EMO\n params[OUTPUT_STATES] = None\n\n else:\n # OUTPUT_STATES is specified, so validate:\n # If it is a single item or a non-OrderedDict, place in a list (for use here and in instantiate_outputState)\n if not isinstance(param_value, (list, OrderedDict)):\n param_value = [param_value]\n # Validate each item in the list or OrderedDict\n i = 0\n for key, item in param_value if isinstance(param_value, dict) else enumerate(param_value):\n from PsyNeuLink.Components.States.OutputState import OutputState\n # If not valid...\n if not ((isclass(item) and issubclass(item, OutputState)) or # OutputState class ref\n isinstance(item, OutputState) or # OutputState object\n isinstance(item, dict) or # OutputState specification dict\n isinstance(item, str) or # Name (to be used as key in outputStates dict)\n iscompatible(item, **{kwCompatibilityNumeric: True})): # value\n # set to None, so it is set to default (self.value) in instantiate_outputState\n param_value[key] = None\n if self.prefs.verbosePref:\n print(\"Item {0} of {1} param ({2}) in {3} is not a\"\n \" OutputState, specification dict or value, nor a list of dict of them; \"\n \"output ({4}) of execute method for {5} will be used\"\n \" to create a default outputState for {3}\".\n format(i,\n OUTPUT_STATES,\n param_value,\n self.__class__.__name__,\n self.value,\n self.execute.__self__.name))\n i += 1\n params[OUTPUT_STATES] = param_value", "def test_get_measure_parameters_by_id(self):\n pass", "def test_sample_quality(self):\r\n self.assertEqual(self.test_sample.quality, 'medium')", "def __init__(self, params={}, verbosity=0, testing_level=1, testing_verbosity=1):\r\n self.verbosity = verbosity\r\n self.testing_unit = UnitTests.ParticleSwarmUnitTests(testing_level=testing_level, verbosity=testing_verbosity)\r\n\r\n for key, val in params.items():\r\n self.set(key, val) # invoke set so that all continuous checking for changed parameters happens only once\r\n # place\r", "def test_test_group_parameters(self):\n pass", "def describe_scaling_parameters(DomainName=None):\n pass", "def plane_scale(self, scale):\n cmd = '{}testPlaneScale {}'.format(self.console, scale)\n self.write_command(cmd)", "def test_get_scale_factors(generate_workchain, generate_eos_inputs, scaling_inputs, expected):\n inputs = generate_eos_inputs()\n\n # This conditional and conversion is necessary because for `aiida-core<2.0` the `list` type is not automatically\n # serialized to a `List` node. Once we require `aiida-core>=2.0`, this can be removed. The reason we couldn't\n # already simply turn the ``scaling_inputs`` into a ``orm.List`` is that during the parametrization done by pytest\n # no AiiDA profile will have been loaded yet and so creating a node will raise an exception.\n if 'scale_factors' in scaling_inputs and isinstance(scaling_inputs['scale_factors'], list):\n scaling_inputs['scale_factors'] = orm.List(list=scaling_inputs['scale_factors'])\n\n inputs.update(scaling_inputs)\n process = generate_workchain('common_workflows.eos', inputs)\n assert process.get_scale_factors() == expected", "def test_SMEL_args():\n testing_function('sme', bilinear=False)", "def test_arguments(self):\n\n h.test_function_arguments(\n func=ScalingTransformer.transform,\n expected_arguments=[\"self\", \"X\"],\n expected_default_values=None,\n )", "def mock_scaling_component(n_refl):\n component = mock_component()\n component.calculate_scales.return_value = flex.double(n_refl, 1.0)\n component.n_refl = [n_refl]\n return component", "def testSimParametersCorrectlyStored(self):\n sim_pars = self.tree.get_simulation_parameters()\n self.assertEqual(10 ** -8, sim_pars[\"m_probability\"])\n self.assertEqual(160, sim_pars[\"cutoff\"])", "async def test_scale_and_offset(hass, mock_hub):\n register_config = {\n CONF_COUNT: 1,\n CONF_DATA_TYPE: DATA_TYPE_INT,\n CONF_SCALE: 3,\n CONF_OFFSET: 13,\n CONF_PRECISION: 0,\n }\n await run_test(\n hass,\n mock_hub,\n register_config,\n SENSOR_DOMAIN,\n register_words=[7],\n expected=\"34\",\n )", "def testSimParamsStored(self):\n params = self.tree.get_simulation_parameters()\n actual_sim_parameters = dict(\n seed=1,\n task=30,\n output_dir=\"output\",\n speciation_rate=0.1,\n sigma=4.0,\n tau=4.0,\n deme=1,\n sample_size=0.1,\n max_time=2.0,\n dispersal_relative_cost=1.0,\n min_num_species=1,\n habitat_change_rate=0.0,\n gen_since_historical=0.0,\n time_config_file=\"null\",\n coarse_map_file=\"sample/SA_sample_coarse.tif\",\n coarse_map_x=35,\n coarse_map_y=41,\n coarse_map_x_offset=11,\n coarse_map_y_offset=14,\n coarse_map_scale=1.0,\n fine_map_file=\"sample/SA_sample_fine.tif\",\n fine_map_x=13,\n fine_map_y=13,\n fine_map_x_offset=0,\n fine_map_y_offset=0,\n sample_file=\"null\",\n grid_x=13,\n grid_y=13,\n sample_x=13,\n sample_y=13,\n sample_x_offset=0,\n sample_y_offset=0,\n historical_coarse_map=\"none\",\n historical_fine_map=\"none\",\n sim_complete=1,\n dispersal_method=\"normal\",\n m_probability=0.0,\n cutoff=0.0,\n landscape_type=\"tiled_coarse\",\n protracted=0,\n min_speciation_gen=0.0,\n max_speciation_gen=0.0,\n dispersal_map=\"none\",\n )\n for key in params.keys():\n self.assertEqual(params[key], actual_sim_parameters[key], msg=\"Error in {}\".format(key))\n self.assertEqual(self.tree.get_job()[0], 1)\n self.assertEqual(self.tree.get_job()[1], 30)", "def test_sample_one_quality(self):\r\n self.assertEqual(self.test_sample.quality, 'medium')", "def testParamsAreStrings(self):\n self.chart.display.extra_params['test'] = 32\n self.assertEqual(self.Param('test'), '32')", "def test_base_hyper_parameters_reg(self):\n hyper_parameter_set = modelgen.generate_base_hyper_parameter_set()\n assert 'regularization_rate' in hyper_parameter_set.keys()", "def test_get_mt_settings(self):\n pass", "def test_set_params_2():\n tpot_obj = TPOTClassifier(generations=2)\n tpot_obj.set_params(generations=3)\n\n assert tpot_obj.generations == 3", "def test_options(self):\n for module in Parameters.__modules__:\n m = getattr(Parameters, module)\n if type(m) == AnyOf:\n for o in m.options:\n setattr(self.p, module, o)\n Parameters(1, **{module: o})", "def testSimParamsStored(self):\n params = self.tree.get_simulation_parameters()\n actual_sim_parameters = dict(\n seed=1,\n task=29,\n output_dir=\"output\",\n speciation_rate=0.1,\n sigma=4.0,\n tau=4.0,\n deme=1,\n sample_size=0.1,\n max_time=2.0,\n dispersal_relative_cost=1.0,\n min_num_species=1,\n habitat_change_rate=0.0,\n gen_since_historical=0.0,\n time_config_file=\"null\",\n coarse_map_file=\"none\",\n coarse_map_x=13,\n coarse_map_y=13,\n coarse_map_x_offset=0,\n coarse_map_y_offset=0,\n coarse_map_scale=1.0,\n fine_map_file=\"sample/SA_sample_fine.tif\",\n fine_map_x=13,\n fine_map_y=13,\n fine_map_x_offset=0,\n fine_map_y_offset=0,\n sample_file=\"null\",\n grid_x=13,\n grid_y=13,\n sample_x=13,\n sample_y=13,\n sample_x_offset=0,\n sample_y_offset=0,\n historical_coarse_map=\"none\",\n historical_fine_map=\"none\",\n sim_complete=1,\n dispersal_method=\"normal\",\n m_probability=0.0,\n cutoff=0.0,\n landscape_type=\"tiled_fine\",\n protracted=0,\n min_speciation_gen=0.0,\n max_speciation_gen=0.0,\n dispersal_map=\"none\",\n )\n for key in params.keys():\n self.assertEqual(params[key], actual_sim_parameters[key], msg=\"Error in {}\".format(key))\n self.assertEqual(self.tree.get_job()[0], 1)\n self.assertEqual(self.tree.get_job()[1], 29)", "def GetScale(self):\n ...", "def test_parameter_read(request):\n print(\"\\n--Starting:\", request.node.name)\n\n params = Parameters()\n print(params.__dict__)\n ## todo write an assert that actually tests something", "def testSimParamsStored(self):\n params = self.tree.get_simulation_parameters()\n actual_sim_parameters = dict(\n seed=1,\n task=1,\n output_dir=\"output\",\n speciation_rate=0.1,\n sigma=4.0,\n tau=4.0,\n deme=1,\n sample_size=1.0,\n max_time=2.0,\n dispersal_relative_cost=1.0,\n min_num_species=1,\n habitat_change_rate=0.0,\n gen_since_historical=0.0,\n time_config_file=\"null\",\n coarse_map_file=\"null\",\n coarse_map_x=20,\n coarse_map_y=20,\n coarse_map_x_offset=0,\n coarse_map_y_offset=0,\n coarse_map_scale=1.0,\n fine_map_file=\"null\",\n fine_map_x=10,\n fine_map_y=10,\n fine_map_x_offset=0,\n fine_map_y_offset=0,\n sample_file=\"null\",\n grid_x=10,\n grid_y=10,\n sample_x=10,\n sample_y=10,\n sample_x_offset=0,\n sample_y_offset=0,\n historical_coarse_map=\"none\",\n historical_fine_map=\"none\",\n sim_complete=1,\n dispersal_method=\"fat-tail\",\n m_probability=0.0,\n cutoff=0.0,\n landscape_type=\"infinite\",\n protracted=0,\n min_speciation_gen=0.0,\n max_speciation_gen=0.0,\n dispersal_map=\"none\",\n )\n for key in params.keys():\n self.assertEqual(params[key], actual_sim_parameters[key])\n self.assertEqual(self.tree.get_job()[0], 1)\n self.assertEqual(self.tree.get_job()[1], 1)", "def testSimParamsStored(self):\n params = self.tree.get_simulation_parameters()\n actual_sim_parameters = dict(\n seed=3,\n task=3,\n output_dir=\"output\",\n speciation_rate=0.1,\n sigma=4.0,\n tau=4.0,\n deme=1,\n sample_size=0.1,\n max_time=2.0,\n dispersal_relative_cost=1.0,\n min_num_species=1,\n habitat_change_rate=0.0,\n gen_since_historical=0.0,\n time_config_file=\"null\",\n coarse_map_file=\"none\",\n coarse_map_x=13,\n coarse_map_y=13,\n coarse_map_x_offset=0,\n coarse_map_y_offset=0,\n coarse_map_scale=1.0,\n fine_map_file=\"sample/SA_sample_fine.tif\",\n fine_map_x=13,\n fine_map_y=13,\n fine_map_x_offset=0,\n fine_map_y_offset=0,\n sample_file=\"null\",\n grid_x=13,\n grid_y=13,\n sample_x=13,\n sample_y=13,\n sample_x_offset=0,\n sample_y_offset=0,\n historical_coarse_map=\"none\",\n historical_fine_map=\"none\",\n sim_complete=1,\n dispersal_method=\"normal\",\n m_probability=0.0,\n cutoff=0.0,\n landscape_type=\"closed\",\n protracted=0,\n min_speciation_gen=0.0,\n max_speciation_gen=0.0,\n dispersal_map=\"none\",\n )\n for key in params.keys():\n self.assertEqual(params[key], actual_sim_parameters[key], msg=\"Error in {}\".format(key))\n self.assertEqual(self.tree.get_job()[0], 3)\n self.assertEqual(self.tree.get_job()[1], 3)", "def c_test_set_inp(self, param, value):\r\n ret = 1\r\n if \"__hash__\" not in dir(param): # param must be hashable\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" is not hashable. It will be unable to be set in a dict.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is hashable.\")\r\n if param in [\"population_size\", \"time_constraint\", \"generations\", \"point_count\",\r\n \"PSO_VELOCITY_WEIGHT\", \"PSO_INDIVIDUAL_WEIGHT\", \"PSO_GROUP_WEIGHT\"]:\r\n if not ((isinstance(value, int) or\r\n isinstance(value, float) or\r\n isinstance(value, long))):\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be of a number. It is \" + str(value))\r\n ret = 0\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is correctly set to a number.\")\r\n if value < 0:\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be greater than zero.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is greater than zero.\")\r\n if param in [\"population_size\", \"generations\", \"point_count\"]:\r\n if not isinstance(value, int):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be an integer. It is \" + str(value))\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is an integer.\")\r\n if param in [\"fitness_function\", \"weighting_bias\"]:\r\n if not callable(value):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be a callable function.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is a callable function.\")\r\n if param == \"end_condition\":\r\n if value not in [\"time_constraint\", \"generations\"]:\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be 'time_constraint' or 'generations'\")\r\n else:\r\n if self.verbosity > 1:\r\n print(\"ERROR: \" + param + \" is a correct string.\")\r\n if param == \"seed\":\r\n if not (value is None or isinstance(value, int)):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" is incorrectly set.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is correctly set.\")\r\n return ret", "def test_reset_params(self):\n\t\t\n\t\tparams = DEFAULT_PARAMS.copy()\n\t\tparams['fit']=PL\n\t\tvalid = self.watcher.valid_params(params)\n\t\tself.assertTrue(valid)\n\t\tparams = self.watcher.normalize_params(params)\n\t\tself.assertEqual(params['fit'], POWER_LAW)\n\t\t\n\t\tparams = DEFAULT_PARAMS.copy()\n\t\tparams['fit']=TPL\n\t\tvalid = self.watcher.valid_params(params)\n\t\tself.assertFalse(valid)\n\t\t\n\t\tparams = DEFAULT_PARAMS.copy()\n\t\tparams['fit']=TPL\n\t\tparams[PL_PACKAGE]=POWERLAW_PACKAGE\n\t\tparams[XMAX]=XMAX_FORCE\n\n\t\tvalid = self.watcher.valid_params(params)\n\t\tself.assertTrue(valid)\n\t\tparams = self.watcher.normalize_params(params)\n\t\tself.assertEqual(params['fit'], TRUNCATED_POWER_LAW)", "def test_parameter(self):\n # Setup test\n infilename = os.path.join(_SAMPLE_FILES_DIR, \"reg_good_simple.xml\")\n filename = os.path.join(_TMP_DIR, \"reg_parameter.xml\")\n out_source_name = \"physics_types_parameter\"\n in_source = os.path.join(_SAMPLE_FILES_DIR, out_source_name + '.F90')\n out_source = os.path.join(_TMP_DIR, out_source_name + '.F90')\n in_meta = os.path.join(_SAMPLE_FILES_DIR, out_source_name + '.meta')\n out_meta = os.path.join(_TMP_DIR, out_source_name + '.meta')\n remove_files([out_source, out_meta])\n tree, root = read_xml_file(infilename)\n # Change output filename and add a parameter with an initial value\n for obj in root:\n oname = obj.get('name')\n if (obj.tag == 'file') and (oname == 'physics_types_simple'):\n # Reset the filename\n obj.set('name', out_source_name)\n # Add a new variable with an unknown dimension\n new_var = ET.SubElement(obj, \"variable\")\n new_var.set(\"local_name\", \"pver\")\n new_var.set(\"standard_name\", \"vertical_layer_dimension\")\n new_var.set(\"units\", \"count\")\n new_var.set(\"type\", \"integer\")\n new_var.set(\"allocatable\", \"parameter\")\n dims_elem = ET.SubElement(new_var, \"initial_value\")\n dims_elem.text = '42'\n break\n # End if\n # End for\n tree.write(filename)\n # Run test\n retcode, files = gen_registry(filename, 'eul', {}, _TMP_DIR, 2,\n _SRC_MOD_DIR, _CAM_ROOT,\n loglevel=logging.ERROR,\n error_on_no_validate=True)\n # Check return code\n amsg = \"Test failure: retcode={}\".format(retcode)\n self.assertEqual(retcode, 0, msg=amsg)\n flen = len(files)\n amsg = \"Test failure: Found {} files, expected 1\".format(flen)\n self.assertEqual(flen, 1, msg=amsg)\n # Make sure each output file was created\n self.assertTrue(os.path.exists(out_meta))\n self.assertTrue(os.path.exists(out_source))\n # For each output file, make sure it matches input file\n amsg = \"{} does not match {}\".format(in_meta, out_meta)\n self.assertTrue(filecmp.cmp(in_meta, out_meta, shallow=False), msg=amsg)\n amsg = \"{} does not match {}\".format(in_source, out_source)\n self.assertTrue(filecmp.cmp(in_source, out_source, shallow=False),\n msg=amsg)", "def testSimParamsStored(self):\n params = self.tree.get_simulation_parameters()\n actual_sim_parameters = dict(\n seed=2,\n task=2,\n output_dir=\"output\",\n speciation_rate=0.1,\n sigma=4.0,\n tau=4.0,\n deme=1,\n sample_size=1.0,\n max_time=2.0,\n dispersal_relative_cost=1.0,\n min_num_species=1,\n habitat_change_rate=0.0,\n gen_since_historical=0.0,\n time_config_file=\"null\",\n coarse_map_file=\"null\",\n coarse_map_x=20,\n coarse_map_y=20,\n coarse_map_x_offset=0,\n coarse_map_y_offset=0,\n coarse_map_scale=1.0,\n fine_map_file=\"null\",\n fine_map_x=10,\n fine_map_y=10,\n fine_map_x_offset=0,\n fine_map_y_offset=0,\n sample_file=\"null\",\n grid_x=10,\n grid_y=10,\n sample_x=10,\n sample_y=10,\n sample_x_offset=0,\n sample_y_offset=0,\n historical_coarse_map=\"none\",\n historical_fine_map=\"none\",\n sim_complete=1,\n dispersal_method=\"normal\",\n m_probability=1.0,\n cutoff=0.0,\n landscape_type=\"infinite\",\n protracted=0,\n min_speciation_gen=0.0,\n max_speciation_gen=0.0,\n dispersal_map=\"none\",\n )\n for key in params.keys():\n self.assertEqual(params[key], actual_sim_parameters[key], msg=\"Error in {}\".format(key))\n self.assertEqual(self.tree.get_job()[0], 2)\n self.assertEqual(self.tree.get_job()[1], 2)", "def test_all_params(self):\n persistence_helper = PersistenceHelper(use_riak=True, is_sync=True)\n self.assertEqual(persistence_helper.use_riak, True)\n self.assertEqual(persistence_helper.is_sync, True)", "def check(self):\n if 'MISFIT' not in PAR:\n setattr(PAR, 'MISFIT', 'Waveform')\n\n if 'CHANNELS' not in PAR:\n raise ParameterError(PAR, 'CHANNELS')\n\n if 'READER' not in PAR:\n raise ParameterError(PAR, 'READER')\n\n if 'WRITER' not in PAR:\n setattr(PAR, 'WRITER', PAR.READER)\n\n if 'NORMALIZE' not in PAR:\n setattr(PAR, 'NORMALIZE', True)\n\n # mute settings\n if 'MUTE' not in PAR:\n setattr(PAR, 'MUTE', False)\n\n if 'MUTESLOPE' not in PAR:\n setattr(PAR, 'MUTESLOPE', 0.)\n\n if 'MUTECONST' not in PAR:\n setattr(PAR, 'MUTECONST', 0.)\n\n # filter settings\n if 'BANDPASS' not in PAR:\n setattr(PAR, 'BANDPASS', False)\n\n if 'FREQLO' not in PAR:\n setattr(PAR, 'FREQLO', 0.)\n\n if 'FREQHI' not in PAR:\n setattr(PAR, 'FREQHI', 0.)\n\n # assertions\n if PAR.READER not in dir(readers):\n print msg.ReaderError\n raise ParameterError()\n\n if PAR.WRITER not in dir(writers):\n print msg.WriterError\n raise ParameterError()", "def test_api_calls_parameters(self):\n quantum_program = self._get_quantum_program()\n\n # Invoke with hub, group and project parameters.\n quantum_program.set_api(QE_TOKEN, QE_URL, QE_HUB, QE_GROUP, QE_PROJECT)\n\n self.log.info(quantum_program.online_backends())\n self.log.info(quantum_program.get_backend_parameters(self.backend))\n self.log.info(quantum_program.get_backend_calibration(self.backend))", "def test_read_namespaced_scale_scale(self):\n pass", "def test_patch_namespaced_scale_scale(self):\n pass", "def testSimParamsStored(self):\n params = self.tree.get_simulation_parameters()\n actual_sim_parameters = dict(\n seed=4,\n task=4,\n output_dir=\"output\",\n speciation_rate=0.1,\n sigma=4.0,\n tau=4.0,\n deme=1,\n sample_size=0.01,\n max_time=2.0,\n dispersal_relative_cost=1.0,\n min_num_species=1,\n habitat_change_rate=0.0,\n gen_since_historical=0.0,\n time_config_file=\"null\",\n coarse_map_file=\"sample/SA_sample_coarse.tif\",\n coarse_map_x=35,\n coarse_map_y=41,\n coarse_map_x_offset=11,\n coarse_map_y_offset=14,\n coarse_map_scale=1.0,\n fine_map_file=\"sample/SA_sample_fine.tif\",\n fine_map_x=13,\n fine_map_y=13,\n fine_map_x_offset=0,\n fine_map_y_offset=0,\n sample_file=\"null\",\n grid_x=13,\n grid_y=13,\n sample_x=13,\n sample_y=13,\n sample_x_offset=0,\n sample_y_offset=0,\n historical_coarse_map=\"none\",\n historical_fine_map=\"none\",\n sim_complete=1,\n dispersal_method=\"fat-tail\",\n m_probability=0.0,\n cutoff=0.0,\n landscape_type=\"closed\",\n protracted=0,\n min_speciation_gen=0.0,\n max_speciation_gen=0.0,\n dispersal_map=\"none\",\n )\n for key in params.keys():\n self.assertEqual(\n params[key],\n actual_sim_parameters[key],\n msg=\"Error in {}: {}!={}\".format(key, params[key], actual_sim_parameters[key]),\n )\n self.assertEqual(self.tree.get_job()[0], 4, msg=\"Job number not stored correctly.\")\n self.assertEqual(self.tree.get_job()[1], 4, msg=\"Job number not stored correctly.\")", "def test_get_scaling_policy(self):\n self.assertEquals(\n self.get_policy_response.status_code, 200,\n msg='Get scaling policy failed with {0} for group {1}'\n .format(self.get_policy_response.status_code,\n self.group.id))\n self.validate_headers(self.get_policy_response.headers)\n self.assert_get_policy(self.policy, self.get_policy)", "def param_vals_test(param_dict):\n file_msg = param_dict['Prog_msg']\n ##\n ## Testing if `wget` exists in the system\n if is_tool('wget'):\n pass\n else:\n msg = '{0} You need to have `wget` installed in your system to run '\n msg += 'this script. You can download the entire dataset at {1}.\\n\\t\\t'\n msg += 'Exiting....'\n msg = msg.format(file_msg, param_dict['url_catl'])\n raise ValueError(msg)\n ##\n ## Checking that Esmeralda is not ran when doing 'SO' halos\n if (param_dict['halotype'] == 'so') and (param_dict['sample'] == 20):\n msg = '{0} The `halotype`==`so` and `sample`==`20` are no compatible '\n msg += 'input parameters.\\n\\t\\t'\n msg += 'Exiting...'\n msg = msg.format(file_msg)\n raise ValueError(msg)\n ##\n ## Checking that `hod_model_n` is set to zero for FoF-Halos\n if (param_dict['halotype'] == 'fof') and (param_dict['hod_n'] != 0):\n msg = '{0} The `halotype`==`{1}` and `hod_n`==`{2}` are no compatible '\n msg += 'input parameters.\\n\\t\\t'\n msg += 'Exiting...'\n msg = msg.format( file_msg,\n param_dict['halotype'],\n param_dict['hod_n'])\n raise ValueError(msg)\n ##\n ## Checking input different types of `test_train_opt`\n #\n # `sample_frac`\n if (param_dict['test_train_opt'] == 'sample_frac'):\n # `sample_frac`\n if not ((param_dict['sample_frac'] > 0) and\n (param_dict['sample_frac'] <= 1.)):\n msg = '{0} `sample_frac` ({1}) must be between (0,1]'.format(\n file_msg, param_dict['sample_frac'])\n raise ValueError(msg)\n # `test_size`\n if not ((param_dict['test_size'] > 0) and\n (param_dict['test_size'] < 1)):\n msg = '{0} `test_size` ({1}) must be between (0,1)'.format(\n file_msg, param_dict['test_size'])\n raise ValueError(msg)\n #\n # boxes_n\n if (param_dict['test_train_opt'] == 'boxes_n'):\n box_n_arr = num.array(param_dict['box_idx'].split('_')).astype(int)\n box_n_diff = num.diff(box_n_arr)\n # Larger than zero\n if not (all(box_n_arr >= 0)):\n msg = '{0} All values in `box_idx` ({1}) must be larger than 0!'\n msg = msg.format(file_msg, box_n_arr)\n raise ValueError(msg)\n # Difference between elements\n if not (all(box_n_diff > 0)):\n msg = '{0} The value of `box_idx` ({1}) is not valid!'.format(\n file_msg, param_dict['box_idx'])\n raise ValueError(msg)\n #\n # `box_test`\n if (param_dict['test_train_opt'] == 'box_sample_frac'):\n # Value of `box_test`\n if not (param_dict['box_test'] >= 0):\n msg = '{0} `box_test` ({1}) must be larger or equal to `0`.'\n msg = msg.format(file_msg, param_dict['box_test'])\n raise ValueError(msg)\n # Testing `test_size`\n # `test_size`\n if not ((param_dict['test_size'] > 0) and\n (param_dict['test_size'] < 1)):\n msg = '{0} `test_size` ({1}) must be between (0,1)'.format(\n file_msg, param_dict['test_size'])\n raise ValueError(msg)\n ##\n ## Checking that `kf_splits` is larger than `2`\n if (param_dict['kf_splits'] < 2):\n msg = '{0} The value for `kf_splits` ({1}) must be LARGER than `2`'\n msg += 'Exiting...'\n msg = msg.format(param_dict['Prog_msg'], param_dict['kf_splits'])\n raise ValueError(msg)\n ##\n ## Checking that `n_predict` is not smaller than `1`.\n if (param_dict['n_predict'] < 1):\n msg = '{0} The value for `n_predict` ({1}) must be LARGER than `1`'\n msg += 'Exiting...'\n msg = msg.format(param_dict['Prog_msg'], param_dict['n_predict'])\n raise ValueError(msg)", "def test_randomize_mp_fits(self):\n\t\tdetails = self.watcher.analyze(mp_fit=True, randomize=True, pool=True)\n\t\tself.assertTrue((details.rand_sigma_mp < 1.10).all())\n\t\tself.assertTrue((details.rand_sigma_mp > 0.96).all())\n\t\tself.assertTrue((details.rand_num_spikes.to_numpy() < 80).all())", "def test_configure_to_reconfigure_param(self):\n\n class ToConfigure(object):\n \"\"\"Class to configure.\"\"\"\n\n def __init__(self):\n super(ToConfigure, self).__init__()\n self.test = None\n\n target = ToConfigure()\n\n param = 'test'\n\n conf = configuration(category('TEST', Parameter(param, value=True)))\n\n self.configurable.configure(conf=conf, targets=[target])\n self.assertTrue(target.test)", "def testDispersalParamStorage(self):\n t = CoalescenceTree(self.c)\n self.assertEqual(t.get_simulation_parameters()[\"dispersal_map\"], \"sample/dispersal_fine.tif\")", "def test_to_scaler_non_allowed_value_error(self):\n\n with pytest.raises(\n ValueError,\n match=r\"\"\"scaler should be one of; \\['min_max', 'max_abs', 'standard'\\]\"\"\",\n ):\n\n ScalingTransformer(columns=\"b\", scaler=\"zzz\", scaler_kwargs={\"a\": 1})", "def test_ParameterWidget(self):\n\n instrument = setup_populated_instr_McStas()\n\n instrument.add_parameter(\"string\", \"choice\", options=[\"A\", \"B\", \"Long\"])\n\n parameters = {}\n # get default parameters from instrument\n for parameter in instrument.parameters:\n if parameter_has_default(parameter):\n parameters[parameter.name] = get_parameter_default(parameter)\n\n parameter_widgets = []\n parameterwidget_objects = []\n for parameter in instrument.parameters:\n par_widget = ParameterWidget(parameter, parameters)\n parameterwidget_objects.append(par_widget)\n parameter_widgets.append(par_widget.make_widget())\n\n self.assertEqual(parameterwidget_objects[0].name, \"theta\")\n self.assertEqual(parameterwidget_objects[0].default_value, None)\n # Parameter does not exist in parameter dict yet\n with self.assertRaises(KeyError):\n parameters[parameterwidget_objects[0].name]\n\n change = FakeChange(new=222)\n parameterwidget_objects[0].update(change)\n self.assertEqual(parameters[parameterwidget_objects[0].name], 222)\n\n self.assertEqual(parameterwidget_objects[1].name, \"has_default\")\n self.assertEqual(parameterwidget_objects[1].default_value, 37)\n self.assertEqual(parameters[parameterwidget_objects[1].name], 37)\n\n change = FakeChange(new=227)\n parameterwidget_objects[1].update(change)\n self.assertEqual(parameters[parameterwidget_objects[1].name], 227)\n\n self.assertEqual(parameterwidget_objects[2].name, \"choice\")\n self.assertEqual(parameterwidget_objects[2].default_value, None)\n with self.assertRaises(KeyError):\n parameters[parameterwidget_objects[2].name]\n\n change = FakeChange(new=\"test\")\n parameterwidget_objects[2].update(change) # Should add necessary extra quotation marks\n self.assertEqual(parameters[parameterwidget_objects[2].name], \"\\\"test\\\"\")", "def test_get_all_scaled_scores_failure(self):\n with mock.patch('score.ScoresGenerator.split_data') \\\n as mock_split_data:\n with mock.patch('score.ScoresGenerator.create_category_scaled_score') \\\n as mock_scaled_category:\n with mock.patch('score.ScoresGenerator.create_total_scaled_score') \\\n as mock_scaled_total:\n for test in self.failure_get_all_scaled_score_test_params:\n score_test = score.ScoresGenerator()\n score_test.get_all_scaled_scores(test[KEY_INPUT])\n self.assertNotAlmostEqual(score_test.SCORES[KEY_WORK_LOAD],\n test[KEY_EXPECTED][KEY_WORK_LOAD], places=1)\n self.assertNotAlmostEqual(score_test.SCORES[KEY_INDEPENDENCE],\n test[KEY_EXPECTED][KEY_INDEPENDENCE], places=1)\n self.assertNotAlmostEqual(score_test.SCORES[KEY_LEADER_SUPPORT],\n test[KEY_EXPECTED][KEY_LEADER_SUPPORT], places=1)\n self.assertNotAlmostEqual(score_test.SCORES[KEY_PEER_RELATIONSHIPS],\n test[KEY_EXPECTED][KEY_PEER_RELATIONSHIPS],\n places=1)\n self.assertNotAlmostEqual(\n score_test.SCORES[KEY_CONTRIBUTION_IMPACT],\n test[KEY_EXPECTED][KEY_CONTRIBUTION_IMPACT], places=1)\n self.assertNotAlmostEqual(\n score_test.SCORES[KEY_DEVELOPMENT],\n test[KEY_EXPECTED][KEY_DEVELOPMENT], places=1)", "def test_get_params(self):\n\n credentials = Mock(base_url=\"\")\n manager = Manager(\"reports\", credentials)\n\n # test no parameters or headers sent by default\n uri, params, method, body, headers, singleobject = manager._get(\"ProfitAndLoss\")\n self.assertEqual(params, {}, \"test params not sent by default\")\n\n # test params can be provided\n passed_params = {\n \"fromDate\": \"2015-01-01\",\n \"toDate\": \"2015-01-15\",\n }\n uri, params, method, body, headers, singleobject = manager._get(\n \"ProfitAndLoss\", params=passed_params\n )\n self.assertEqual(params, passed_params, \"test params can be set\")\n\n # test params respect, but can override, existing configuration\n manager = Manager(\"reports\", credentials, unit_price_4dps=True)\n uri, params, method, body, headers, singleobject = manager._get(\n \"ProfitAndLoss\", params=passed_params\n )\n self.assertEqual(params, {\n \"fromDate\": \"2015-01-01\",\n \"toDate\": \"2015-01-15\",\n \"unitdp\": 4,\n }, \"test params respects existing values\")", "def testSimParamsStored(self):\n params = self.tree.get_simulation_parameters()\n actual_sim_parameters = dict(\n seed=7,\n task=4,\n output_dir=\"output\",\n speciation_rate=0.1,\n sigma=4.0,\n tau=1.0,\n deme=0.01,\n sample_size=1.5,\n max_time=3600.0,\n dispersal_relative_cost=1.0,\n min_num_species=1,\n habitat_change_rate=0.0,\n gen_since_historical=0.0,\n time_config_file=\"null\",\n coarse_map_file=\"sample/SA_sample_coarse.tif\",\n coarse_map_x=35,\n coarse_map_y=41,\n coarse_map_x_offset=11,\n coarse_map_y_offset=14,\n coarse_map_scale=1.0,\n fine_map_file=\"sample/SA_sample_fine.tif\",\n fine_map_x=13,\n fine_map_y=13,\n fine_map_x_offset=0,\n fine_map_y_offset=0,\n sample_file=\"null\",\n grid_x=13,\n grid_y=13,\n sample_x=13,\n sample_y=13,\n sample_x_offset=0,\n sample_y_offset=0,\n historical_coarse_map=\"none\",\n historical_fine_map=\"none\",\n sim_complete=1,\n dispersal_method=\"normal\",\n m_probability=0.0,\n cutoff=0.0,\n landscape_type=\"closed\",\n protracted=0,\n min_speciation_gen=0.0,\n max_speciation_gen=0.0,\n dispersal_map=\"none\",\n )\n for key in params.keys():\n self.assertEqual(\n params[key],\n actual_sim_parameters[key],\n msg=\"Error in {}: {}!={}\".format(key, params[key], actual_sim_parameters[key]),\n )\n self.assertEqual(self.tree.get_job()[0], 7, msg=\"Seed not stored correctly.\")\n self.assertEqual(self.tree.get_job()[1], 4, msg=\"Job number not stored correctly.\")", "def test_checkParameters(self):\n self.failUnlessEqual(self.nice.opts['long'], \"Alpha\")\n self.failUnlessEqual(self.nice.opts['another'], \"Beta\")\n self.failUnlessEqual(self.nice.opts['longonly'], \"noshort\")\n self.failUnlessEqual(self.nice.opts['shortless'], \"Gamma\")", "def testSimParamsStored(self):\n params = self.tree.get_simulation_parameters()\n actual_sim_parameters = dict(\n seed=5,\n task=4,\n output_dir=\"output\",\n speciation_rate=0.1,\n sigma=4.0,\n tau=1.0,\n deme=0.25,\n sample_size=1.0,\n max_time=3600.0,\n dispersal_relative_cost=1.0,\n min_num_species=1,\n habitat_change_rate=0.0,\n gen_since_historical=0.0,\n time_config_file=\"null\",\n coarse_map_file=\"sample/SA_sample_coarse.tif\",\n coarse_map_x=35,\n coarse_map_y=41,\n coarse_map_x_offset=11,\n coarse_map_y_offset=14,\n coarse_map_scale=1.0,\n fine_map_file=\"sample/SA_sample_fine.tif\",\n fine_map_x=13,\n fine_map_y=13,\n fine_map_x_offset=0,\n fine_map_y_offset=0,\n sample_file=\"null\",\n grid_x=13,\n grid_y=13,\n sample_x=13,\n sample_y=13,\n sample_x_offset=0,\n sample_y_offset=0,\n historical_coarse_map=\"none\",\n historical_fine_map=\"none\",\n sim_complete=1,\n dispersal_method=\"normal\",\n m_probability=0.0,\n cutoff=0.0,\n landscape_type=\"closed\",\n protracted=0,\n min_speciation_gen=0.0,\n max_speciation_gen=0.0,\n dispersal_map=\"none\",\n )\n for key in params.keys():\n self.assertEqual(\n params[key],\n actual_sim_parameters[key],\n msg=\"Error in {}: {}!={}\".format(key, params[key], actual_sim_parameters[key]),\n )\n self.assertEqual(self.tree.get_job()[0], 5, msg=\"Seed not stored correctly.\")\n self.assertEqual(self.tree.get_job()[1], 4, msg=\"Job number not stored correctly.\")", "def testSimParamsStored(self):\n params = self.tree.get_simulation_parameters()\n actual_sim_parameters = dict(\n seed=2,\n task=5,\n output_dir=\"output\",\n speciation_rate=0.1,\n sigma=4.0,\n tau=4.0,\n deme=1,\n sample_size=1.0,\n max_time=2.0,\n dispersal_relative_cost=1.0,\n min_num_species=1,\n habitat_change_rate=0.0,\n gen_since_historical=0.0,\n time_config_file=\"null\",\n coarse_map_file=\"null\",\n coarse_map_x=20,\n coarse_map_y=20,\n coarse_map_x_offset=0,\n coarse_map_y_offset=0,\n coarse_map_scale=1.0,\n fine_map_file=\"null\",\n fine_map_x=10,\n fine_map_y=10,\n fine_map_x_offset=0,\n fine_map_y_offset=0,\n sample_file=\"null\",\n grid_x=10,\n grid_y=10,\n sample_x=10,\n sample_y=10,\n sample_x_offset=0,\n sample_y_offset=0,\n historical_coarse_map=\"none\",\n historical_fine_map=\"none\",\n sim_complete=1,\n dispersal_method=\"normal\",\n m_probability=0.0,\n cutoff=0.0,\n landscape_type=\"closed\",\n protracted=0,\n min_speciation_gen=0.0,\n max_speciation_gen=0.0,\n dispersal_map=\"none\",\n )\n for key in params.keys():\n self.assertEqual(params[key], actual_sim_parameters[key])\n # self.assertDictEqual(params, actual_sim_parameters)\n self.assertEqual(self.tree.get_job()[0], 2)\n self.assertEqual(self.tree.get_job()[1], 5)", "def test_getfloat(self):\n self.assertEqual(self.config.getfloat('advanced','m'),42.0)", "def camera_scale(self, camera):\n cmd = '{}testCameraViewScale {}'.format(self.console, camera)\n self.write_command(cmd)", "def test_test_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.test.equals(atom.mnb.test)\n assert check_scaling(atom.lr.test)", "def test_pvresize():\n pvdisplay = MagicMock(return_value=False)\n with patch(\"salt.modules.linux_lvm.pvdisplay\", pvdisplay):\n mock = MagicMock(return_value=True)\n with patch.dict(linux_lvm.__salt__, {\"lvm.pvdisplay\": mock}):\n ret = {\n \"stdout\": \"saltines\",\n \"stderr\": \"cheese\",\n \"retcode\": 0,\n \"pid\": \"1337\",\n }\n mock = MagicMock(return_value=ret)\n with patch.dict(linux_lvm.__salt__, {\"cmd.run_all\": mock}):\n assert linux_lvm.pvresize(\"A\") is True", "def testConfigG(self):\n assert type(self.config['maxfps']) == int", "def test_generators_multiple_scales(self, generators, scale_list=None, failure_threshold=0, verbose=True,\n visualisations: str = None):\n # -1 indicates a pass at no scales.\n results_dict = {rng.get_name(): {\n 'max_scale': -1,\n 'results': []\n } for rng in generators}\n total_start = time.time()\n original_scale = self.scale\n if scale_list is None:\n scale_list = [1.0]\n for scale in scale_list:\n # break if no more generators.\n if len(generators) == 0:\n break\n if verbose:\n print(\"Scale:\", scale)\n self.scale = scale\n self.filtration_range = self.create_filtration_range(scale)\n for rng in list(generators):\n start = time.time()\n passes = self.perform_test(rng)\n end = time.time()\n # If the end of file is reached, then set the value to EOF.\n if passes < 0:\n results_dict[rng.get_name()]['max_scale'] = 'EOF'\n elif verbose:\n print('{}:{}/{}'.format(rng.get_name(), passes, self.runs))\n print(\"Time elapsed:\", end - start)\n # add this result to the output.\n results_dict[rng.get_name()]['results'].append('{}/{}'.format(passes, self.runs))\n # produce visualisation if all are wanted.\n if visualisations == 'all':\n self.visualise_failure(rng, os.environ['OUTPUTDIR'])\n if passes <= failure_threshold:\n # produce visualisation if they are created on failure.\n if visualisations == 'fail':\n self.visualise_failure(rng, os.environ['OUTPUTDIR'])\n generators.remove(rng)\n if verbose:\n print(\"Generator removed.\")\n else:\n # record this as the new scale passed at.\n results_dict[rng.get_name()]['max_scale'] = scale\n total_end = time.time()\n total_time = total_end - total_start\n if verbose:\n print(\"Done, total time:\", total_time)\n # return to previous values.\n self.scale = original_scale\n self.filtration_range = self.create_filtration_range(self.scale)\n return results_dict", "def test_set_params():\n\n tpot_obj = TPOTClassifier()\n assert tpot_obj.set_params() is tpot_obj", "def handle_scaling():\n os_type = platform.system()\n if os_type == \"Windows\":\n from ctypes import windll\n windll.user32.SetProcessDPIAware()", "def scaling_enabled(self):\n return False", "def test_set_parameters_in_a_cell():\n with pytest.raises(ValueError):\n topo.Jungle.set_parameters({\"fmax\": 100})\n with pytest.raises(ValueError):\n topo.Jungle.set_parameters({\"f_max\": -100})\n with pytest.raises(ValueError):\n topo.Savanna.set_parameters({\"fmax\": 100})\n with pytest.raises(ValueError):\n topo.Savanna.set_parameters({\"f_max\": -100})\n topo.Jungle.set_parameters({\"f_max\": 800})\n topo.Savanna.set_parameters({\"f_max\": 300})\n assert topo.Jungle.parameters[\"f_max\"] == 800\n assert topo.Savanna.parameters[\"f_max\"] == 300", "def check_parameters():\r\n for par in PARAM:\r\n if isinstance(par, ExperimentFrame):\r\n EXP.change_variable(**par())\r\n else:\r\n EXP.change_variable(**par)", "def set_model_parameters(test_type: str, parameter_value: float):\n # assigning default parameters for the model\n som_width = Config.som_width\n som_height = Config.som_height\n n_iter = Config.n_iter\n sigma = Config.sigma\n learning_rate = Config.learning_rate\n\n # assign testing parameter to the model parameter basing on test_parameter value\n if test_type == 'map_size':\n som_width = parameter_value\n som_height = parameter_value\n if test_type == 'n_iter':\n n_iter = parameter_value\n if test_type == 'learning_rate':\n learning_rate = parameter_value / 1000\n if test_type == 'sigma':\n sigma = parameter_value / 100\n return som_width, som_height, n_iter, sigma, learning_rate", "def testSimulationParametersStored(self):\n simulation_parameters = self.tree.get_simulation_parameters()\n actual_sim_parameters = dict(\n seed=6,\n task=6,\n output_dir=\"output\",\n speciation_rate=0.5,\n sigma=4.0,\n tau=4.0,\n deme=1,\n sample_size=0.1,\n max_time=10.0,\n dispersal_relative_cost=1.0,\n min_num_species=1,\n habitat_change_rate=0.0,\n gen_since_historical=0.0,\n time_config_file=\"set\",\n coarse_map_file=\"sample/SA_sample_coarse.tif\",\n coarse_map_x=35,\n coarse_map_y=41,\n coarse_map_x_offset=11,\n coarse_map_y_offset=14,\n coarse_map_scale=1.0,\n fine_map_file=\"sample/SA_sample_fine.tif\",\n fine_map_x=13,\n fine_map_y=13,\n fine_map_x_offset=0,\n fine_map_y_offset=0,\n sample_file=\"null\",\n grid_x=13,\n grid_y=13,\n sample_x=13,\n sample_y=13,\n sample_x_offset=0,\n sample_y_offset=0,\n historical_coarse_map=\"none\",\n historical_fine_map=\"none\",\n sim_complete=1,\n dispersal_method=\"normal\",\n m_probability=0.0,\n cutoff=0.0,\n landscape_type=\"closed\",\n protracted=0,\n min_speciation_gen=0.0,\n max_speciation_gen=0.0,\n dispersal_map=\"none\",\n )\n for key in simulation_parameters.keys():\n self.assertEqual(simulation_parameters[key], actual_sim_parameters[key])\n self.assertDictEqual(simulation_parameters, actual_sim_parameters)\n # self.assertListEqual(simulation_parameters, actual_sim_parameters)\n self.assertEqual(self.tree.get_job()[0], 6)\n self.assertEqual(self.tree.get_job()[1], 6)", "def voltmeter_settings(self, scalefactor, offset):\n if scalefactor is not None and offset is not None:\n if self._request('SM', str(scalefactor), str(offset))[0]:\n return scalefactor, offset\n else:\n done, data = self._request('GM')\n if done:\n return int(data[0]), int(data[1])\n\n raise EvseError", "def ConfigureCustomImageSettings(cam_params, nodemap):\r\n\tprint('\\n*** CONFIGURING CUSTOM IMAGE SETTINGS *** \\n')\r\n\ttry:\r\n\t\tresult = True\r\n\t\twidth_to_set = cam_params[\"frameWidth\"]\r\n\t\theight_to_set = cam_params[\"frameHeight\"]\r\n\r\n\t\t# Set maximum width\r\n\t\t#\r\n\t\t# *** NOTES ***\r\n\t\t# Other nodes, such as those corresponding to image width and height,\r\n\t\t# might have an increment other than 1. In these cases, it can be\r\n\t\t# important to check that the desired value is a multiple of the\r\n\t\t# increment. However, as these values are being set to the maximum,\r\n\t\t# there is no reason to check against the increment.\r\n\t\tnode_width = PySpin.CIntegerPtr(nodemap.GetNode('Width'))\r\n\t\tif PySpin.IsAvailable(node_width) and PySpin.IsWritable(node_width):\r\n\t\t\t# width_to_set = node_width.GetMax()\r\n\t\t\twidth_to_set = cam_params[\"frameWidth\"]\r\n\t\t\tnode_width.SetValue(width_to_set)\r\n\t\t\tprint('Width set to %i...' % node_width.GetValue())\r\n\t\telse:\r\n\t\t\t print('Width not available...')\r\n\r\n\t\t# Set maximum height\r\n\t\t# *** NOTES ***\r\n\t\t# A maximum is retrieved with the method GetMax(). A node's minimum and\r\n\t\t# maximum should always be a multiple of its increment.\r\n\t\tnode_height = PySpin.CIntegerPtr(nodemap.GetNode('Height'))\r\n\t\tif PySpin.IsAvailable(node_height) and PySpin.IsWritable(node_height):\r\n\t\t\t# height_to_set = node_height.GetMax()\r\n\t\t\theight_to_set = cam_params[\"frameHeight\"]\r\n\t\t\tnode_height.SetValue(height_to_set)\r\n\t\t\tprint('Height set to %i...' % node_height.GetValue())\r\n\t\telse:\r\n\t\t\tprint('Height not available...')\r\n\r\n\texcept PySpin.SpinnakerException as ex:\r\n\t\tprint('Error: %s' % ex)\r\n\t\treturn False\r\n\r\n\treturn result, width_to_set, height_to_set", "def scale_parameter(self):\n return self._scale_parameter", "def scale_settings(self) -> Optional[pulumi.Input['ScaleSettingsArgs']]:\n return pulumi.get(self, \"scale_settings\")", "def test_parameter_mode_invalid(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Should throw exceptions\n self.configuration.hgst_space_mode = ''\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)\n self.configuration.hgst_space_mode = 'Fred'\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)", "def test_properties(self):\n self.assertEqual(LENGTH_KILOMETERS, METRIC_SYSTEM.length_unit)\n self.assertEqual(TEMP_CELSIUS, METRIC_SYSTEM.temperature_unit)\n self.assertEqual(MASS_GRAMS, METRIC_SYSTEM.mass_unit)\n self.assertEqual(VOLUME_LITERS, METRIC_SYSTEM.volume_unit)", "def test_active_inference_SPM_1b(self):", "def c_test_set_inp(self, param, value):\r\n ret = 1\r\n if \"__hash__\" not in dir(param): # param must be hashable\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" is not hashable. It will be unable to be set in a dict.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is hashable.\")\r\n if param in [\"population_size\", \"time_constraint\", \"generations\", \"point_count\"]:\r\n if not ((isinstance(value, int) or\r\n isinstance(value, float) or\r\n isinstance(value, long))):\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be of a number. It is \" + str(value))\r\n ret = 0\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is correctly set to a number.\")\r\n if value < 0:\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be greater than zero.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is greater than zero.\")\r\n if param in [\"population_size\", \"generations\", \"point_count\"]:\r\n if not isinstance(value, int):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be an integer. It is \" + str(value))\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is an integer.\")\r\n if param in [\"fitness_function\", \"population_function\",\r\n \"mutate_function\", \"cross_function\", \"weighting_bias\"]:\r\n if not callable(value):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be a callable function.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is a callable function.\")\r\n if param == \"end_condition\":\r\n if value not in [\"time_constraint\", \"generations\"]:\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be 'time_constraint' or 'generations'\")\r\n else:\r\n if self.verbosity > 1:\r\n print(\"ERROR: \" + param + \" is a correct string.\")\r\n if param == \"seed\":\r\n if not (value is None or isinstance(value, int)):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" is incorrectly set.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is correctly set.\")\r\n return ret", "def param_scale_check(shape_x, shape_scale):\n\n length_x = len(shape_x)\n length_scale = len(shape_scale)\n\n if not(length_scale == 1 and shape_scale[0] == 1):\n if length_x != length_scale:\n raise RuntimeError(\n \"length_x and length_scale must be equal\")\n for i in range(length_scale):\n if shape_scale[i] != shape_x[i] and shape_scale[i] != 1:\n raise RuntimeError(\n \"shape_scale is not match to broadcast\")", "def tune_and_find_parameter(self,algo_name, algo, rating_data,param_grid):\n\n\n print(\"tuning for\", algo_name, \"hyperparameters\")\n\n # algo: algo class name\n grid_search = GridSearchCV(algo, param_grid, measures=['rmse', 'mae'])\n grid_search.fit(rating_data)\n\n print('best RMSE for ', algo_name, ' ', grid_search.best_score['rmse'])\n\n best_params = grid_search.best_params['rmse']\n # print the best set of parameters\n print(\"best params:\", best_params)\n return best_params", "def testParametersCorrectlyStored(self):\n params = self.tree.get_community_parameters(1)\n self.assertEqual(0.1, params[\"speciation_rate\"])\n self.assertEqual(0, params[\"metacommunity_reference\"])\n params = self.tree.get_community_parameters(2)\n self.assertEqual(0.5, params[\"speciation_rate\"])\n self.assertEqual(0, params[\"metacommunity_reference\"])\n params = self.tree.get_community_parameters(3)\n self.assertEqual(0.9, params[\"speciation_rate\"])\n self.assertEqual(0, params[\"metacommunity_reference\"])\n params = self.tree.get_community_parameters(4)\n self.assertEqual(0.1, params[\"speciation_rate\"])\n self.assertEqual(1, params[\"metacommunity_reference\"])\n params = self.tree.get_community_parameters(5)\n self.assertEqual(0.5, params[\"speciation_rate\"])\n self.assertEqual(1, params[\"metacommunity_reference\"])\n params = self.tree.get_community_parameters(6)\n self.assertEqual(0.9, params[\"speciation_rate\"])\n self.assertEqual(1, params[\"metacommunity_reference\"])", "def _set_params(self, estimator_args, scaler_args, execution_args, metric_args=None, dim_reduction_args=None):\n \n # Set default values which will be used if execution arguments are not passed\n \n # Default parameters:\n self.model.overwrite = True\n self.model.debug = False\n self.model.test_size = 0.33\n self.model.cv = 0\n self.model.time_series_split = 0\n self.model.max_train_size = None\n self.model.random_state = 42\n self.model.compress = 3\n self.model.retain_data = False\n self.model.scale_hashed = True\n self.model.scale_vectors = True\n self.model.scaler = \"StandardScaler\"\n self.model.scaler_kwargs = {}\n self.model.estimator_kwargs = {}\n self.model.missing = \"zeros\"\n self.model.calc_feature_importances = False\n self.model.importances_n_repeats = 30\n self.model.lags= None\n self.model.lag_target = False\n self.model.scale_target = False\n self.model.scale_lag_target= True\n self.model.make_stationary = None\n self.model.stationarity_lags = [1]\n self.model.using_keras = False\n self.model.current_sample_as_input = True\n self.model.prediction_periods = 1\n \n # Default metric parameters:\n if metric_args is None:\n self.model.metric_args = {}\n \n # Set execution parameters\n \n # If the execution key word arguments were included in the request, get the parameters and values\n if len(execution_args) > 0:\n \n # Transform the string of arguments into a dictionary\n execution_args = utils.get_kwargs(execution_args)\n \n # Set the overwite parameter if any existing model with the specified name should be overwritten\n if 'overwrite' in execution_args:\n self.model.overwrite = 'true' == execution_args['overwrite'].lower()\n \n # Set the test_size parameter that will be used to split the samples into training and testing data sets\n # Default value is 0.33, i.e. we use 66% of the samples for training and 33% for testing\n if 'test_size' in execution_args:\n self.model.test_size = utils.atof(execution_args['test_size'])\n\n # Enable K-fold cross validation. For more information see: http://scikit-learn.org/stable/modules/cross_validation.html#multimetric-cross-validation\n # Default value is 0 in which case a simple holdout strategy based on the test_size parameter is used.\n # If cv > 0 then the model is validated used K = cv folds and the test_size parameter is ignored.\n if 'cv' in execution_args:\n self.model.cv = utils.atoi(execution_args['cv'])\n \n # Enable timeseries backtesting using TimeSeriesSplit. https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.TimeSeriesSplit.html\n # This will select the a validation strategy appropriate for time series and sequential data.\n # The feature definitions must include an 'identifier' field which can be used to sort the series into the correct order.\n # The integer supplied in this parameter will split the data into the given number of subsets for training and testing.\n if 'time_series_split' in execution_args:\n self.model.time_series_split = utils.atoi(execution_args['time_series_split'])\n\n # This parameter can be used together with time_series_split.\n # It specifies the maximum samples to be used for training in each split, which allows for rolling/ walk forward validation.\n if 'max_train_size' in execution_args:\n self.model.max_train_size = utils.atoi(execution_args['max_train_size'])\n\n # Add lag observations to the feature matrix. Only applicable for Keras models.\n # An identifier field must be included in the feature definitions to correctly sort the data for this capability.\n # For e.g. if lags=2, features from the previous two samples will be concatenated as input features for the current sample.\n # This is useful for framing timeseries and sequence prediction problems into 3D or 4D data required for deep learning.\n if 'lags' in execution_args:\n self.model.lags = utils.atoi(execution_args['lags'])\n\n # Include targets in the lag observations\n # If True an additional feature will be created for each sample using the previous value of y \n if 'lag_target' in execution_args:\n self.model.lag_target = 'true' == execution_args['lag_target'].lower()\n \n # Scale the target before fitting\n # The scaling will be inversed before predictions so they are returned in the original scale \n if 'scale_target' in execution_args:\n self.model.scale_target = 'true' == execution_args['scale_target'].lower()\n\n # Scale lag values of the targets before fitting\n # Even if scale_target is set to false, the lag values of targets being used as features can be scaled by setting this to true \n if 'scale_lag_target' in execution_args:\n self.model.scale_lag_target = 'true' == execution_args['scale_lag_target'].lower()\n\n # Make the target series more stationary. This only applies to sequence prediction problems.\n # Valid values are 'log' in which case we apply a logarithm to the target values,\n # or 'difference' in which case we transform the targets into variance from the previous value.\n # The transformation will be reversed before returning predictions.\n if 'make_stationary' in execution_args:\n self.model.make_stationary = execution_args['make_stationary'].lower()\n\n # Provide lags periods for differencing\n # By default the difference will be done with lag = 1. Alternate lags can be provided by passing a list of lags as a list.\n # e.g. 'stationarity_lags=1;12|list|int'\n if 'stationarity_lags' in execution_args:\n self.model.stationarity_lags = utils.get_kwargs_by_type({'stationarity_lags': execution_args['stationarity_lags']})['stationarity_lags']\n\n # Specify if the current sample should be used as input to the model\n # This is to allow for models that only use lag observations to make future predictions\n if 'current_sample_as_input' in execution_args:\n self.model.current_sample_as_input = 'true' == execution_args['current_sample_as_input'].lower()\n\n # Specify the number of predictions expected from the model\n # This can be used to get a model to predict the next m periods given inputs for the previous n periods.\n # This is only valid for Keras models which have a final output layer with more than one node\n if 'prediction_periods' in execution_args:\n self.model.prediction_periods = utils.atoi(execution_args['prediction_periods'])\n \n # Seed used by the random number generator when generating the training testing split\n if 'random_state' in execution_args:\n self.model.random_state = utils.atoi(execution_args['random_state'])\n \n # Compression level between 1-9 used by joblib when saving the model\n if 'compress' in execution_args:\n self.model.compress = utils.atoi(execution_args['compress'])\n \n # Flag to determine if the training and test data should be saved in the model\n if 'retain_data' in execution_args:\n self.model.retain_data = 'true' == execution_args['retain_data'].lower()\n\n # Flag to determine if feature importances should be calculated when the fit method is called\n if 'calculate_importances' in execution_args:\n self.model.calc_feature_importances = 'true' == execution_args['calculate_importances'].lower()\n\n # Sets the number of times a feature is randomly shuffled during the feature importance calculation\n if 'importances_n_repeats' in execution_args:\n self.model.importances_n_repeats = utils.atoi(execution_args['importances_n_repeats'])\n \n # Set the debug option for generating execution logs\n # Valid values are: true, false\n if 'debug' in execution_args:\n self.model.debug = 'true' == execution_args['debug'].lower()\n \n # Additional information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n # Increment log counter for the class. Each instance of the class generates a new log.\n self.__class__.log_no += 1\n\n # Create a log file for the instance\n # Logs will be stored in ..\\logs\\SKLearn Log <n>.txt\n self.logfile = os.path.join(os.getcwd(), 'logs', 'SKLearn Log {}.txt'.format(self.log_no))\n \n # Create dictionary of parameters to display for debug\n self.exec_params = {\"overwrite\":self.model.overwrite, \"test_size\":self.model.test_size, \"cv\":self.model.cv,\\\n \"time_series_split\": self.model.time_series_split, \"max_train_size\":self.model.max_train_size, \"lags\":self.model.lags,\\\n \"lag_target\":self.model.lag_target, \"scale_target\":self.model.scale_target, \"make_stationary\":self.model.make_stationary,\\\n \"random_state\":self.model.random_state, \"compress\":self.model.compress, \"retain_data\":self.model.retain_data,\\\n \"calculate_importances\": self.model.calc_feature_importances, \"importances_n_repeats\": self.model.importances_n_repeats,\\\n \"debug\":self.model.debug}\n\n self._print_log(1)\n \n # If the scaler key word arguments were included in the request, get the parameters and values\n if len(scaler_args) > 0:\n \n # Transform the string of arguments into a dictionary\n scaler_args = utils.get_kwargs(scaler_args)\n \n # Set scaler arguments that will be used when preprocessing the data\n # Valid values are: StandardScaler, MinMaxScaler, MaxAbsScaler, RobustScaler and QuantileTransformer\n # More information here: http://scikit-learn.org/stable/modules/preprocessing.html\n if 'scaler' in scaler_args:\n self.model.scaler = scaler_args.pop('scaler')\n \n if 'missing' in scaler_args:\n self.model.missing = scaler_args.pop('missing').lower()\n \n if 'scale_hashed' in scaler_args:\n self.model.scale_hashed = 'true' == scaler_args.pop('scale_hashed').lower()\n \n if 'scale_vectors' in scaler_args:\n self.model.scale_vectors = 'true' == scaler_args.pop('scale_vectors').lower()\n \n # Get the rest of the scaler parameters, converting values to the correct data type\n self.model.scaler_kwargs = utils.get_kwargs_by_type(scaler_args) \n else:\n err = \"Arguments for scaling did not include the scaler name e.g StandardScaler\"\n raise Exception(err)\n \n # If the estimator key word arguments were included in the request, get the parameters and values\n if len(estimator_args) > 0:\n \n # Transform the string of arguments into a dictionary\n estimator_args = utils.get_kwargs(estimator_args)\n \n # Set estimator arguments that will be used when preprocessing the data\n # The parameters available will depend on the selected estimator\n # More information here: http://scikit-learn.org/stable/modules/classes.html#api-reference\n if 'estimator' in estimator_args:\n self.model.estimator = estimator_args.pop('estimator')\n \n # Set the estimator type for the model\n if self.model.estimator in self.classifiers:\n self.model.estimator_type = \"classifier\"\n elif self.model.estimator in self.regressors:\n self.model.estimator_type = \"regressor\"\n elif self.model.estimator in self.decomposers:\n self.model.estimator_type = \"decomposer\"\n elif self.model.estimator in self.clusterers:\n self.model.estimator_type = \"clusterer\"\n else:\n err = \"Unknown estimator class: {0}\".format(self.model.estimator)\n raise Exception(err)\n\n # Get the rest of the estimator parameters, converting values to the correct data type\n self.model.estimator_kwargs = utils.get_kwargs_by_type(estimator_args) \n else:\n err = \"Arguments for estimator did not include the estimator class e.g. RandomForestClassifier\"\n raise Exception(err)\n \n # If key word arguments for model evaluation metrics are included in the request, get the parameters and values\n if metric_args is not None and len(metric_args) > 0:\n # Transform the string of arguments into a dictionary\n metric_args = utils.get_kwargs(metric_args)\n \n # Get the metric parameters, converting values to the correct data type\n self.model.metric_args = utils.get_kwargs_by_type(metric_args) \n \n # If key word arguments for dimensionality reduction are included in the request, get the parameters and values\n if dim_reduction_args is not None and len(dim_reduction_args) > 0:\n # Transform the string of arguments into a dictionary\n dim_reduction_args = utils.get_kwargs(dim_reduction_args)\n \n # Set dim_reduction arguments that will be used after preprocessing the data\n # The parameters available will depend on the selected dimensionality reduction method\n # Acceptable classes are PCA, KernelPCA, IncrementalPCA, TruncatedSVD\n # More information here: http://scikit-learn.org/stable/modules/classes.html#api-reference\n if 'reduction' in dim_reduction_args:\n self.model.reduction = dim_reduction_args.pop('reduction')\n \n # Get the rest of the dim_reduction parameters, converting values to the correct data type\n self.model.dim_reduction_args = utils.get_kwargs_by_type(dim_reduction_args) \n else:\n err = \"Arguments for dimensionality reduction did not include the class e.g. PCA\"\n raise Exception(err)\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(2)", "def test019(testDir, dirDict, pflag):\n shutil.copy(osp.join(dirDict[\"baseConfig\"], \"params_system.cfg\"), testDir)\n shutil.copy(osp.join(dirDict[\"baseConfig\"], \"params_c.cfg\"), testDir)\n psys = osp.join(testDir, \"params_system.cfg\")\n ptrode = osp.join(testDir, \"params_c.cfg\")\n P_s = IO.get_config(psys)\n P_s.set(\"Sim Params\", \"Nvol_c\", \"3\")\n P_s.set(\"Sim Params\", \"Nvol_s\", \"3\")\n P_s.set(\"Geometry\", \"L_c\", \"120e-6\")\n P_s.set(\"Geometry\", \"L_s\", \"90e-6\")\n P_s.set(\"Electrolyte\", \"elyteModelType\", \"SM\")\n IO.write_config_file(P_s, psys)\n P = IO.get_config(ptrode)\n P.set(\"Particles\", \"type\", \"homog\")\n P.set(\"Material\", \"muRfunc\", \"LiFePO4\")\n IO.write_config_file(P, ptrode)\n main.main(psys, keepArchive=False)\n shutil.move(dirDict[\"simOut\"], testDir)\n if pflag:\n corePlots(testDir, dirDict)\n elytePlots(testDir, dirDict)\n electrodePlots(testDir, dirDict, \"c\")", "def get_params(self, test):\n super(DaosServer, self).get_params(test)\n self.yaml_params.get_params(test)", "def test_options(fixture_code, generate_structure):\n code = fixture_code('quantumespresso.pw')\n structure = generate_structure()\n\n queue_name = 'super-fast'\n withmpi = False # The protocol default is ``True``\n\n options = {'queue_name': queue_name, 'withmpi': withmpi}\n builder = PwBandsWorkChain.get_builder_from_protocol(code, structure, options=options)\n\n for subspace in (\n builder.relax.base.pw.metadata,\n builder.scf.pw.metadata, # pylint: disable=no-member\n builder.bands.pw.metadata, # pylint: disable=no-member\n ):\n assert subspace['options']['queue_name'] == queue_name, subspace", "def _check_log_params(self):\n steps_per_stats = self.configs['steps_per_stats']\n if not steps_per_stats or steps_per_stats < 0:\n steps_per_stats = 100\n steps_per_eval = self.configs['steps_per_eval']\n if not steps_per_eval:\n steps_per_eval = 10 * steps_per_stats\n steps_per_external_eval = self.configs['steps_per_external_eval']\n if not steps_per_external_eval:\n steps_per_external_eval = 5 * steps_per_eval\n self.configs['steps_per_stats'] = steps_per_stats\n self.configs['steps_per_eval'] = steps_per_eval\n self.configs['steps_per_external_eval'] = steps_per_external_eval", "def testDispersalParamStorage(self):\n t = CoalescenceTree(self.c)\n self.assertEqual(t.get_simulation_parameters()[\"dispersal_map\"], \"sample/dispersal_fine_nodata.tif\")", "def update_scaling_parameters(DomainName=None, ScalingParameters=None):\n pass", "def set_parameters(api_name='',\r\n targeted_flag='true',\r\n tv_flag='false',\r\n hinge_flag='true',\r\n cos_flag='false',\r\n interpolation='bilinear',\r\n model_type='large',\r\n loss_type='triplet',\r\n dataset_type='vgg',\r\n target_model='large',\r\n target_loss='center',\r\n target_dataset='VGG',\r\n attack='CW',\r\n norm='2',\r\n epsilon=0.1,\r\n iterations=20,\r\n binary_steps=5,\r\n learning_rate=0.01,\r\n epsilon_steps=0.01,\r\n init_const=0.3,\r\n mean_loss='embeddingmean',\r\n batch_size=-1,\r\n margin=15.0,\r\n amplification=6.0,\r\n granularity='normal',\r\n whitebox_target=False,\r\n pair_flag='false'):\r\n \r\n params = {}\r\n params['model_type'] = model_type\r\n params['loss_type'] = loss_type\r\n params['dataset_type'] = dataset_type\r\n params['target_model'] = target_model\r\n params['target_loss'] = target_loss\r\n params['target_dataset'] = target_dataset\r\n params['attack'] = attack\r\n params['norm'] = norm\r\n params['epsilon'] = epsilon\r\n params['iterations'] = iterations\r\n params['binary_steps'] = binary_steps\r\n params['learning_rate'] = learning_rate\r\n params['epsilon_steps'] = epsilon_steps\r\n params['init_const'] = init_const\r\n params['mean_loss'] = mean_loss\r\n params['batch_size'] = batch_size\r\n params['test_dir'] = TEST_DIR\r\n params['full_dir'] = FULL_DIR\r\n params['whitebox_target'] = whitebox_target\r\n params['targeted_flag'] = string_to_bool(targeted_flag)\r\n params['tv_flag'] = string_to_bool(tv_flag)\r\n params['hinge_flag'] = string_to_bool(hinge_flag)\r\n params['cos_flag'] = string_to_bool(cos_flag)\r\n params['pair_flag'] = string_to_bool(pair_flag)\r\n params['api_name'] = api_name\r\n\r\n if model_type == 'small' and loss_type == 'center':\r\n params['pixel_max'] = 1.0\r\n params['pixel_min'] = -1.0\r\n else:\r\n params['pixel_max'] = 1.0\r\n params['pixel_min'] = 0.0\r\n\r\n if dataset_type == 'vggsmall' and not whitebox_target:\r\n params['align_dir'] = VGG_ALIGN_160_DIR\r\n params['test_dir'] = VGG_TEST_DIR\r\n elif model_type == 'large' or dataset_type == 'casia':\r\n params['align_dir'] = ALIGN_160_DIR\r\n elif model_type == 'small':\r\n params['align_dir'] = ALIGN_96_DIR\r\n else:\r\n ValueError('ValueError: Argument must be either \"small\" or \"large\".')\r\n \r\n if interpolation == 'nearest':\r\n params['interpolation'] = cv2.INTER_NEAREST\r\n elif interpolation == 'bilinear':\r\n params['interpolation'] = cv2.INTER_LINEAR\r\n elif interpolation == 'bicubic':\r\n params['interpolation'] = cv2.INTER_CUBIC\r\n elif interpolation == 'lanczos':\r\n params['interpolation'] = cv2.INTER_LANCZOS4\r\n elif interpolation == 'super':\r\n ValueError('ValueError: Super interpolation not yet implemented.')\r\n else:\r\n raise ValueError('ValueError: Argument must be of the following, [nearest, bilinear, bicubic, lanczos, super].')\r\n\r\n if granularity == 'fine':\r\n params['margin_list'] = np.arange(0.0, margin, margin / 20.0)\r\n params['amp_list'] = np.arange(1.0, amplification, 0.2)\r\n elif granularity == 'normal':\r\n params['margin_list'] = np.arange(0.0, margin, margin / 10.0)\r\n params['amp_list'] = np.arange(1.0, amplification, 0.5)\r\n elif granularity == 'coarse':\r\n params['margin_list'] = np.arange(0.0, margin, margin / 5.0)\r\n params['amp_list'] = np.arange(1.0, amplification, 1.0)\r\n elif granularity == 'coarser':\r\n params['margin_list'] = np.arange(0.0, margin, margin / 3.0)\r\n params['amp_list'] = np.arange(1.0, amplification, 0.2)\r\n elif granularity == 'coarsest':\r\n params['margin_list'] = np.arange(0.0, margin, margin / 3.0)\r\n params['amp_list'] = np.arange(1.0, amplification, 1.0)\r\n elif granularity == 'single':\r\n params['margin_list'] = np.array([margin])\r\n params['amp_list'] = np.array([amplification])\r\n elif granularity == 'fine-tuned':\r\n params['margin_list'] = np.arange(10.0, margin, 1.0)\r\n params['amp_list'] = np.arange(1.0, amplification, 0.2)\r\n elif granularity == 'coarse-single':\r\n params['margin_list'] = np.arange(0.0, margin, margin / 3.0)\r\n params['amp_list'] = np.array([1.0])\r\n elif granularity == 'api-eval':\r\n params['margin_list'] = np.arange(0.0, margin, margin / 3.0)\r\n params['amp_list'] = np.arange(1.0, amplification, 0.8)\r\n else:\r\n raise ValueError('ValueError: Argument must be of the following, [fine, normal, coarse, coarser, single].')\r\n\r\n if params['hinge_flag']:\r\n params['attack_loss'] = 'hinge'\r\n else:\r\n params['attack_loss'] = 'target'\r\n if not params['targeted_flag']:\r\n params['attack_loss'] = 'target'\r\n if norm == 'inf':\r\n norm_name = 'i'\r\n else:\r\n norm_name = '2'\r\n if params['tv_flag']:\r\n tv_name = '_tv'\r\n else:\r\n tv_name = ''\r\n if params['cos_flag']:\r\n cos_name = '_cos'\r\n else:\r\n cos_name = ''\r\n\r\n params['model_name'] = '{}_{}'.format(model_type, loss_type)\r\n if dataset_type == 'casia' or dataset_type == 'vggsmall':\r\n params['model_name'] = dataset_type\r\n params['target_model_name'] = '{}_{}_{}'.format(target_model, target_loss, target_dataset)\r\n params['attack_name'] = '{}_l{}{}{}'.format(attack.lower(), norm_name, tv_name, cos_name)\r\n params['directory_path'] = os.path.join(ROOT,\r\n OUT_DIR,\r\n params['attack_name'],\r\n params['model_name'],\r\n '{}_loss/full'.format(params['attack_loss']))\r\n params['directory_path_crop'] = os.path.join(ROOT,\r\n OUT_DIR,\r\n params['attack_name'],\r\n params['model_name'],\r\n '{}_loss/crop'.format(params['attack_loss']))\r\n params['directory_path_npz'] = os.path.join(ROOT,\r\n OUT_DIR,\r\n params['attack_name'],\r\n params['model_name'],\r\n '{}_loss/npz'.format(params['attack_loss']))\r\n params['api_path'] = os.path.join(ROOT,\r\n API_DIR,\r\n params['attack_name'],\r\n params['model_name'],\r\n '{}_loss/npz'.format(params['attack_loss']))\r\n if params['mean_loss'] == 'embedding':\r\n params['directory_path'] += '_mean'\r\n params['directory_path_crop'] += '_mean'\r\n params['directory_path_npz'] += '_mean'\r\n params['api_path'] += '_mean'\r\n\r\n return params", "def scaling(self) -> Optional['outputs.AiFeatureStoreOnlineServingConfigScaling']:\n return pulumi.get(self, \"scaling\")", "def test_validate_scale_factors(ctx):\n assert eos.validate_scale_factors(None, ctx) is None\n assert eos.validate_scale_factors(orm.List(list=[0.98, 1, 1.02]), ctx) is None\n assert eos.validate_scale_factors(orm.List(list=[0, 1]), ctx) == 'need at least 3 scaling factors.'", "def get_params(self, test):\n server_params = [\"debug\", \"sudo\", \"srv_timeout\"]\n server_start_params = [\"insecure\", \"recreate\"]\n runner_params = [\"enable_recovery\", \"export\", \"report_uri\"]\n super(ServerManager, self).get_params(test)\n self.runner.job.yaml_params.get_params(test)\n self.runner.get_params(test)\n for name in self.get_param_names():\n if name in server_params:\n if name == \"sudo\":\n setattr(self.runner.job, name, getattr(self, name).value)\n elif name == \"srv_timeout\":\n setattr(\n self.runner.job, \"timeout\", getattr(self, name).value)\n else:\n getattr(\n self.runner.job, name).value = getattr(self, name).value\n if name in server_start_params:\n getattr(self.runner.job.action_command, name).value = \\\n getattr(self, name).value\n if name in runner_params:\n getattr(self.runner, name).value = getattr(self, name).value\n\n # Run daos_server with test variant specific log file names if specified\n self.runner.job.yaml_params.update_log_files(\n getattr(test, \"control_log\"),\n getattr(test, \"helper_log\"),\n getattr(test, \"server_log\")\n )", "def test_SVM_test_C_parameter(params, X_train, X_test, y_train, y_test):", "def test_SMEB_args():\n testing_function('sme_bl', bilinear=True)", "def test_invalid_ssm_parameter_without_default():\n with pytest.raises(GetParameterError):\n _ = get_ssm_parameter(ssm_parameter_key=\"/oops/not/valid\")", "def get_parameter_estimation_parameters(self, friendly=True):\n #Get the sensitivities task:\n fitTask=self._getTask('parameterFitting')\n fitProblem = fitTask.find(xmlns + 'Problem')\n optimizationItems = fitProblem.find(xmlns + 'ParameterGroup')\n parameters = []\n for subGroup in optimizationItems:\n name = None\n lowerBound = None\n upperBound = None\n startValue = None\n \n for item in subGroup:\n if item.attrib['name'] == 'ObjectCN':\n name = item.attrib['value']\n elif item.attrib['name'] == 'UpperBound':\n upperBound = item.attrib['value']\n elif item.attrib['name'] == 'LowerBound':\n lowerBound = item.attrib['value']\n elif item.attrib['name'] == 'StartValue':\n startValue = item.attrib['value']\n assert name !=None\n assert lowerBound != None\n assert upperBound != None\n assert startValue != None\n \n if friendly:\n #Construct a user-friendly name for the parameter name using regexs\n #Look for a match for global parameters: Vector=Values[Test parameter],\n global_string = r'.*Vector=Values\\[(?P<name>.*)\\].*'\n global_string_re = re.compile(global_string)\n global_match = re.match(global_string_re, name)\n \n if global_match:\n name = global_match.group('name')\n \n #else check for a local match.\n #Vector=Reactions[Reaction] Parameter=k1\n local_string = r'.*Vector=Reactions\\[(?P<reaction>.*)\\].*Parameter=(?P<parameter>.*),Reference=Value.*'\n local_string_re = re.compile(local_string)\n local_match = re.match(local_string_re, name)\n \n if local_match:\n reaction = local_match.group('reaction')\n parameter = local_match.group('parameter')\n name = '(%s).%s'%(reaction, parameter)\n\n parameters.append((name, lowerBound, upperBound, startValue))\n\n return parameters", "def test_get_voltage_maps(self):\n pass", "def c_test_set_inp(self, param, value):\r\n ret = 1\r\n if \"__hash__\" not in dir(param): # param must be hashable\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" is not hashable. It will be unable to be set in a dict.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is hashable.\")\r\n if param in [\"population_size\", \"time_constraint\", \"generations\"]:\r\n if not ((isinstance(value, int) or\r\n isinstance(value, float) or\r\n isinstance(value, long))):\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be of a number. It is \" + str(value))\r\n ret = 0\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is correctly set to a number.\")\r\n if value < 0:\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be greater than zero.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is greater than zero.\")\r\n if param in [\"population_size\", \"generations\"]:\r\n if not isinstance(value, int):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be an integer. It is \" + str(value))\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is an integer.\")\r\n if param in [\"fitness_function\", \"population_function\",\r\n \"mutate_function\", \"cross_function\", \"weighting_bias\"]:\r\n if not callable(value):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be a callable function.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is a callable function.\")\r\n if param == \"end_condition\":\r\n if value not in [\"time_constraint\", \"generations\"]:\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be 'time_constraint' or 'generations'\")\r\n else:\r\n if self.verbosity > 1:\r\n print(\"ERROR: \" + param + \" is a correct string.\")\r\n if param == \"seed\":\r\n if not (value is None or isinstance(value, int)):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" is incorrectly set.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is correctly set.\")\r\n return ret" ]
[ "0.6702554", "0.6387912", "0.6046434", "0.6030869", "0.59028023", "0.5901838", "0.5870547", "0.58489865", "0.5837942", "0.583072", "0.5788725", "0.57385606", "0.57318795", "0.5716319", "0.56859964", "0.56810164", "0.56458664", "0.5612986", "0.56105846", "0.5608693", "0.5561435", "0.55288047", "0.55212134", "0.55166095", "0.5496967", "0.5490376", "0.54730386", "0.54667073", "0.5458826", "0.5451223", "0.5446257", "0.5444624", "0.54413307", "0.5440897", "0.54358923", "0.5431287", "0.5428664", "0.5427187", "0.5420457", "0.5413307", "0.53964394", "0.53763807", "0.53745073", "0.53718966", "0.5365436", "0.53613746", "0.5342829", "0.53338516", "0.5332781", "0.53325194", "0.5332353", "0.5320746", "0.5320055", "0.53162694", "0.5312617", "0.53116304", "0.53084856", "0.5303006", "0.5298525", "0.5297812", "0.52968407", "0.5293842", "0.528509", "0.52658314", "0.52628213", "0.52565914", "0.523887", "0.5233039", "0.52275896", "0.5226751", "0.52259004", "0.5223511", "0.5222215", "0.52217597", "0.5212537", "0.5208675", "0.52078086", "0.52073383", "0.5201019", "0.5196791", "0.5187541", "0.51779306", "0.51739603", "0.51732606", "0.51728386", "0.51685464", "0.51651245", "0.51586115", "0.5157784", "0.5157644", "0.5152149", "0.5152054", "0.51482236", "0.51466846", "0.5146298", "0.5146084", "0.51459575", "0.51425004", "0.5141817", "0.51400256" ]
0.7040565
0
Sentiment stats which displays buzz, news score, articles last week, articles weekly average, bullish vs bearish percentages, sector average bullish percentage, and sector average news score
def sentiment_stats(other_args: List[str], ticker: str): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="stats", description=""" Sentiment stats which displays buzz, news score, articles last week, articles weekly average, bullish vs bearish percentages, sector average bullish percentage, and sector average news score. [Source: https://finnhub.io] """, ) try: ns_parser = parse_known_args_and_warn(parser, other_args) if not ns_parser: return d_stats = get_sentiment_stats(ticker) if d_stats: print(f"Buzz: {round(100*d_stats['buzz']['buzz'],2)} %") print(f"News Score: {round(100*d_stats['companyNewsScore'],2)} %") print("") print(f"Articles Last Week: {d_stats['buzz']['articlesInLastWeek']}") print(f"Articles Weekly Average: {d_stats['buzz']['weeklyAverage']}") print("") print(f"Bullish: {round(100*d_stats['sentiment']['bullishPercent'],2)} %") print(f"Bearish: {round(100*d_stats['sentiment']['bearishPercent'],2)} %") print("") print( f"Sector Average Bullish: {round(100*d_stats['sectorAverageBullishPercent'],2)} %" ) print( f"Sector Average News Score: {round(100*d_stats['sectorAverageNewsScore'],2)} %" ) else: print("No sentiment stats found.") print("") except Exception as e: print(e, "\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sentiment(self) -> Dict[str, float]:", "def overall_sentiment(self, _testing=False):\n df = self.df.copy()\n\n sentiment_scores = df[self.review_column].apply(self.sentiment_for_one_comment)\n self.sentiment_scores_all = sentiment_scores\n print(\"Average sentiment score: {}\".format(round(sentiment_scores.mean(), 2)))\n print(\"{}% of the comments are positive,; {}% of the comments are neutral; {}% of the comments are negative\".\n format(\n round(100 * sum(sentiment_scores > 0) / len(sentiment_scores), 2),\n round(100 * sum(sentiment_scores == 0) / len(sentiment_scores), 2),\n round((100 * sum(sentiment_scores < 0) / len(sentiment_scores)), 2)\n )\n )\n plt.figure(figsize=(5, 5))\n plt.rc('xtick', labelsize=15)\n plt.rc('ytick', labelsize=15)\n\n fig, ax = plt.subplots()\n ax.hist(sentiment_scores)\n ax.set_title('Sentiment scores of all comments (avg: {})'.format(round(sentiment_scores.mean(), 2)),\n fontsize = 20)\n\n if not _testing:\n plt.show()\n else:\n return fig", "def text_analytics(self):\n\n headers = {\n # Request headers\n 'Content-Type': 'application/json',\n 'Ocp-Apim-Subscription-Key': self.keys['text_analytics'],\n }\n \n sentiment_url = 'https://westus.api.cognitive.microsoft.com/text/analytics/v2.0/sentiment'\n \n raw_text = self.article_params['text']\n\n # Build post for sentiment\n try:\n sentences = tokenize.sent_tokenize(str(raw_text))\n content = []\n for i, sentence in enumerate(sentences):\n content.append({'id': str(i), 'language': 'en', 'text': sentence})\n body = json.dumps({\"documents\": content}).encode('utf-8')\n\n request = urllib.request.Request(sentiment_url, body, headers)\n response = urllib.request.urlopen(request)\n json_response = json.loads(response.read().decode('utf-8'))\n \n # A list of dictionaries, with each dictionary containing a sentence\n # sentiment score\n sentiments_list = json_response['documents']\n\n # Calculate the articles average sentiment from all the sentences\n cumulative_sentiment_score = 0\n for sent in sentiments_list:\n cumulative_sentiment_score += sent['score']\n avg_article_sentiment = cumulative_sentiment_score/len(sentiments_list)\n\n # Put article sentiments in bucket from 1 to 5, with 1 being very\n # negative and 5 being very positive\n if avg_article_sentiment < 0.2:\n sentiment = 1\n elif 0.2 <= avg_article_sentiment < 0.4:\n sentiment = 2\n elif 0.4 <= avg_article_sentiment < 0.6:\n sentiment = 3\n elif 0.6 <= avg_article_sentiment < 0.8:\n sentiment = 4\n else:\n sentiment = 5\n\n except Exception as e:\n print('Unable to process sentiment for article. Assuming '\n 'sentiment is neutral.')\n sentiment = 3\n\n return sentiment", "def sentiment():\r\n scores = []\r\n for index, row in topics_data.iterrows():\r\n if index in actual_list:\r\n scores.append(row['score'])\r\n\r\n sentiments = []\r\n for index, row in topics_data.iterrows():\r\n if index in actual_list:\r\n url = row['url']\r\n if 'newsweek' or 'democracynow' in url:\r\n user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36'\r\n config = Config()\r\n config.browser_user_agent = user_agent\r\n article = Article(url, config=config)\r\n else:\r\n article = Article(url)\r\n article.download()\r\n article.parse()\r\n article.nlp()\r\n text = article.summary\r\n obj = TextBlob(text)\r\n subjectivity = obj.sentiment.subjectivity\r\n sentiment = obj.sentiment.polarity\r\n sentiments.append(sentiment)\r\n\r\n plt.figure(figsize=(50, 10))\r\n plt.scatter(sentiments, scores)\r\n plt.xlabel('Sentiments')\r\n plt.ylabel('Score')\r\n plt.title('Posts in r/politics')\r\n plt.show()", "def sentiment(text):\n words = pattern_split.split(text.lower())\n sentiments = map(lambda word: afinn.get(word, 0), words)\n if sentiments:\n # How should you weight the individual word sentiments? \n # You could do N, sqrt(N) or 1 for example. Here I use sqrt(N)\n sentiment = float(sum(sentiments))/math.sqrt(len(sentiments))\n \n else:\n sentiment = 0\n return sentiment", "def print_sentiment_summary(self, sentiment_data):\n\n self.print_recent_tweets('positive')\n self.print_recent_tweets('negative')\n self.print_recent_tweets('neutral')\n\n self.print_extreme_tweets('positive', num_score=True)\n self.print_extreme_tweets('negative', num_score=True)\n\n self.print_objective_tweets(count=5)\n self.print_objective_tweets(count=5, objective=False)", "def averages():\r\n totalsubs = 0\r\n for sub in subs:\r\n totalsubs += sub\r\n avgsubs = totalsubs / len(subs)\r\n\r\n totalsent = 0\r\n for sent in sentiments:\r\n totalsent += sent\r\n avgsent = totalsent / len(sentiments)\r\n print('The average subjectivity is: ' + str(avgsubs))\r\n print('The average sentiment is: ' + str(avgsent))", "def aggregate_sentiment(tweets):\r\n\r\n positive = 0\r\n negative = 0\r\n neutral = 0\r\n\r\n for tweet in tweets:\r\n if tweet.sentiment_type == \"positive\":\r\n positive += 1\r\n elif tweet.sentiment_type == \"negative\":\r\n negative += 1\r\n else:\r\n neutral += 1\r\n\r\n result = [[\"Positive\", positive], [\"Neutral\", neutral], [\"Negative\", negative]]\r\n return result", "def process_sentiment(self):\r\n\r\n\r\n print(\"Beginning sentiment analysis\")\r\n # textblob time\r\n #tweet_sentiment = [TextBlob(tweet['filtered_text']).sentiment for index, tweet in self.tweet_dataframe.iterrows()]\r\n #self.tweet_dataframe['polarity'] = [i.polarity for i in tweet_sentiment]\r\n #self.tweet_dataframe['subjectivity'] = [i.subjectivity for i in tweet_sentiment]\r\n\r\n #vader time\r\n #http://t-redactyl.io/blog/2017/04/applying-sentiment-analysis-with-vader-and-the-twitter-api.html\r\n sentiment = []\r\n\r\n analyzer = SentimentIntensityAnalyzer()\r\n\r\n for tweet in self.tweet_dataframe['filtered_text']:\r\n vs = analyzer.polarity_scores(tweet)\r\n sentiment.append(vs['compound'])\r\n\r\n self.tweet_dataframe['vader_polarity'] = pd.Series(sentiment)", "def test_get_average_of_sentiment_scores():\n\n dict_of_avg_scores = get_average_of_sentiment_scores(\n 'politics_30_months_comments_cleaned_standardized_vader_flair.csv')\n print('average sentiment scores all comments')\n for key, value in dict_of_avg_scores.items():\n print(key, value)\n print()", "def do_sentiment_analysis(self):\n\n tweets_sentiment = []\n\n for tweet in self.tweets:\n parsed_tweet = {}\n parsed_tweet['text'] = tweet\n sentiment_data = self.tweet_sentiment_analysis(tweet)\n parsed_tweet['sentiment'] = sentiment_data[0]\n parsed_tweet['polarity'] = sentiment_data[1]\n parsed_tweet['subjectivity'] = sentiment_data[2]\n\n tweets_sentiment.append(parsed_tweet)\n\n self.sentiment_data = tweets_sentiment\n self.positive_tweets = [tweet for tweet in self.sentiment_data if tweet['sentiment'] == 'Positive']\n self.negative_tweets = [tweet for tweet in self.sentiment_data if tweet['sentiment'] == 'Negative']\n self.neutral_tweets = [tweet for tweet in self.sentiment_data if tweet['sentiment'] == 'Neutral']\n\n return tweets_sentiment", "def get_overall_sentiment(text):\n return alchemy_language.sentiment(text=text)", "def get_query_sentiment_avg(tweets):\r\n\r\n total = 0\r\n count = len(tweets)\r\n\r\n for tweet in tweets:\r\n total += tweet.sentiment_score\r\n\r\n # Calculate average\r\n avg = total / count\r\n avg = float(\"{0:.2f}\".format((float(avg))))\r\n\r\n return avg", "def sentiment(data_list):\n for x in data_list:\n print(x)\n analysis = TextBlob(x)\n print(analysis.sentiment)", "def sentiment_score(review):\n return sum([sentence_score(sentence, None, 0.0) for sentence in review])", "def analyze_sentiment(blob):\n intensity = list(blob.sentiment)[0]\n if intensity > 0:\n sentiment = 'pos'\n elif intensity < 0:\n sentiment = 'neg'\n else:\n sentiment = 'neu'\n\n return sentiment", "def question_sentiment_analysis(self):\n sentiments = get_sentiments()\n student_data = self.responses\n question_text = 'In one word'\n\n # Set up data for calculations\n num_scores = 0\n sentiment_sum = 0\n score_list = list()\n\n for response in student_data:\n\n if question_text in response.question.text:\n words = response.response.lower().split()\n\n # Find the sentiment score for each word, and add it to our data\n for word in words:\n # Ignore the word if it's not in the sentiment dictionary\n if word in sentiments:\n sentiment_sum += sentiments[word]\n num_scores += 1\n score_list.append(sentiments[word])\n\n average = sentiment_sum / num_scores\n standard_dev = statistics.stdev(score_list)\n\n return average, standard_dev", "def tweet_df(n):\n # Retrieve the tweet contents\n first_tweet = get_value(df_1t, n)\n second_tweet = get_value(df_2t, n) \n third_tweet = get_value(df_3t, n)\n fourth_tweet = get_value(df_4t, n)\n fifth_tweet = get_value(df_5t, n)\n sixth_tweet = get_value(df_6t, n)\n seventh_tweet = get_value(df_7t, n)\n eighth_tweet = get_value(df_8t, n)\n nineth_tweet = get_value(df_9t, n)\n tenth_tweet = get_value(df_10t, n) \n \n # Sentiment of each tweet\n sa_first_tweet = sentiment_analyzer_scores(first_tweet)\n sa_second_tweet = sentiment_analyzer_scores(second_tweet)\n sa_third_tweet = sentiment_analyzer_scores(third_tweet)\n sa_fourth_tweet = sentiment_analyzer_scores(fourth_tweet)\n sa_fifth_tweet = sentiment_analyzer_scores(fifth_tweet)\n sa_sixth_tweet = sentiment_analyzer_scores(sixth_tweet)\n sa_seventh_tweet = sentiment_analyzer_scores(seventh_tweet)\n sa_eighth_tweet = sentiment_analyzer_scores(eighth_tweet)\n sa_nineth_tweet = sentiment_analyzer_scores(nineth_tweet)\n sa_tenth_tweet = sentiment_analyzer_scores(tenth_tweet)\n \n # Compute the compound score for obtaining a sentiment class\n compound_score_first_tweet = sentiment_logic((list(sa_first_tweet.values())[list(sa_first_tweet.keys()).index('compound')] ))\n compound_score_second_tweet = sentiment_logic((list(sa_second_tweet.values())[list(sa_second_tweet.keys()).index('compound')] )) \n compound_score_third_tweet = sentiment_logic((list(sa_third_tweet.values())[list(sa_third_tweet.keys()).index('compound')] ))\n compound_score_fourth_tweet = sentiment_logic((list(sa_fourth_tweet.values())[list(sa_fourth_tweet.keys()).index('compound')] ))\n compound_score_fifth_tweet = sentiment_logic((list(sa_fifth_tweet.values())[list(sa_fifth_tweet.keys()).index('compound')] ))\n compound_score_sixth_tweet = sentiment_logic((list(sa_sixth_tweet.values())[list(sa_sixth_tweet.keys()).index('compound')] ))\n compound_score_seventh_tweet = sentiment_logic((list(sa_seventh_tweet.values())[list(sa_seventh_tweet.keys()).index('compound')] ))\n compound_score_eighth_tweet = sentiment_logic((list(sa_eighth_tweet.values())[list(sa_eighth_tweet.keys()).index('compound')] ))\n compound_score_nineth_tweet = sentiment_logic((list(sa_nineth_tweet.values())[list(sa_nineth_tweet.keys()).index('compound')] ))\n compound_score_tenth_tweet = sentiment_logic((list(sa_tenth_tweet.values())[list(sa_tenth_tweet.keys()).index('compound')] ))\n \n # Create a new temporary dataframe for the tweet contents and sentiment\n compound_score_list = [compound_score_first_tweet, compound_score_second_tweet,\n compound_score_third_tweet, compound_score_fourth_tweet,\n compound_score_fifth_tweet, compound_score_sixth_tweet, \n compound_score_seventh_tweet, compound_score_eighth_tweet,\n compound_score_nineth_tweet, compound_score_tenth_tweet]\n \n \n first_col = [first_tweet, second_tweet,\n third_tweet, fourth_tweet,\n fifth_tweet, sixth_tweet,\n seventh_tweet, eighth_tweet,\n nineth_tweet, tenth_tweet]\n \n second_col = compound_score_list\n \n tmp_df = pd.DataFrame(data = {'Tweets' : first_col, \n 'Sentiment' : second_col})\n \n \n return tmp_df.to_json(date_format = 'iso', orient = 'split')", "def get_query_statistics(tweets, sentiment_aggregate_list):\r\n\r\n total = len(tweets)\r\n positive_percentage = float(\"{0:.2f}\".format((float(sentiment_aggregate_list[0][1]/total*100))))\r\n neutral_percentage = float(\"{0:.2f}\".format((float(sentiment_aggregate_list[1][1]/total*100))))\r\n negative_percentage = float(\"{0:.2f}\".format((float(sentiment_aggregate_list[2][1]/total*100))))\r\n\r\n result = {\"%Positive\": positive_percentage, \"%Neutral\": neutral_percentage, \"%Negative\": negative_percentage, \"Total\": total}\r\n return result", "def statistics(all_new_tweets, all_retweets, all_quote_tweets):\n length_all_quote_tweets = len(all_quote_tweets)\n length_all_retweets = len(all_retweets)\n length_all_tweets = len(all_new_tweets)\n\n # print(db_twitter.collections.stats())\n total_tweets = length_all_quote_tweets + length_all_retweets + length_all_tweets\n print(\n f\"Number of all tweets via streaming collected: {total_tweets - return_rest_tweets_number()}\"\n )\n print(f\"Number of new tweets collected: {length_all_tweets}\")\n print(f\"Number of retweets collected: {length_all_retweets}\")\n print(f\"Number of quote tweets collected: {length_all_quote_tweets}\")\n print(f\"Number of tweets collected via rest is {return_rest_tweets_number()}\")\n\n # Calculates mean sentiment, where 1 is very positive, -1 is very negative\n mean_sentiment = 0.0\n\n for tweet in all_new_tweets:\n mean_sentiment += tweet[\"sentiment_polarity\"]\n mean_sentiment = mean_sentiment / length_all_tweets\n print(\"The mean sentiment of tweets is: \", mean_sentiment)\n\n # Calculates mean subjectivity, where 1 is very subjective, -1 is very objective\n mean_subjectivity = 0.0\n\n for tweet in all_new_tweets:\n mean_subjectivity += tweet[\"subjectivity\"]\n mean_subjectivity = mean_subjectivity / length_all_tweets\n print(\"The mean subjectivity of retweets is: \", mean_subjectivity)\n return mean_sentiment, mean_subjectivity, total_tweets", "def get_sentiment(self, sentances):\n sentiment_total = 0\n # Add each sentances combined sentiment to a total tally\n for sentance in sentances:\n sentiment = self.sentiment_analyzer.polarity_scores(sentance)\n sentiment_total += sentiment['compound']\n return sentiment_total / len(sentances)", "def analyze(self, text): #takes the text to be analyzed for sentiment\n #initialize inicial score to 0\n score = 0\n #Create tokenizer instance\n tokenizer = nltk.tokenize.TweetTokenizer()\n #create list of words in a tweets\n tokens = tokenizer.tokenize(text)\n \n #iterate over tokens(list of words)\n for word in tokens:\n #check if word is positive or negative\n if word.lower() in self.positives_words:\n score+=1\n if word.lower() in self.negatives_words:\n score-=1\n #neutral if its neither, doesnt add anything, 0\n return score", "def get_sentiment():\n # USER REQUEST PARAMETERS\n hashtag = request.args.get('hashtag', '')\n if hashtag == \"\":\n return \"Please specify a non null hashtag\"\n nb_days = request.args.get('nb_days', 7,type=int)\n nb_days = int(min(max(nb_days, 1), 7))\n nb_tweets = max(request.args.get('nb_tweets', nb_days * 10), nb_days,type=int)\n get_topic_words = bool(int(request.args.get('get_topic_words',\"1\")))\n n_topics = request.args.get('n_topics', 1,type=int)\n n_words_per_topic = request.args.get('n_words_per_topic', 10,type=int)\n lda_passes = request.args.get('lda_passes', 4,type=int)\n return_tweets = bool(int(request.args.get('return_tweets', \"0\")))\n language = request.args.get('language', \"en\")\n\n # TWITTER REQUEST PARAMETERS\n days_offsets = range(-nb_days + 1, 1)\n query_key_value = \" -is:retweet -is:quote lang:\" + language\n tweet_fields = \"created_at,public_metrics,author_id\"\n max_nb_tweets_per_day = nb_tweets // len(days_offsets)\n query_string = \"#\" + hashtag.strip() + query_key_value\n\n # COMPUTE RESULTS\n tweets = get_tweets(query_string, days_offsets, tweet_fields,\n max_nb_tweets_per_day, nb_tweets, search_tweets_args)\n sentiments_df, cleaned_tweets_texts, filtered_tweets_df = compute_sentiment(\n tweets, model, tokenizer)\n\n if get_topic_words:\n top_topics = get_topics_from_tweets(NLTK_DATA_PATH, cleaned_tweets_texts, n_topics=n_topics,\n n_words_per_topic=n_words_per_topic, n_passes=lda_passes,\n force_download=False)\n\n if return_tweets:\n sentiments_tweets_df = pd.concat(\n (sentiments_df, filtered_tweets_df.reset_index(drop=True)), axis=1)\n\n results = {\"sentiments_json\": sentiments_tweets_df.to_json()}\n else:\n results = {\"sentiments_json\": sentiments_df.to_json()}\n\n if get_topic_words:\n results[\"top_topics_json\"] = top_topics.to_json()\n\n return json.dumps(results)", "def stockSentiment(stockName, numTweets=100):\n\n listOfTweets = user.search(stockName, count=numTweets)\n threshold = posSentTweet = negSentTweet = 0\n\n for tweet in listOfTweets:\n analysis = TextBlob(tweet.text)\n if analysis.sentiment.polarity >= threshold:\n posSentTweet = posSentTweet + 1\n else:\n negSentTweet = negSentTweet + 1\n\n if posSentTweet > negSentTweet:\n print(\"Overall Positive\")\n return True\n else:\n print(\"Overall Negative\")\n return False", "def createReport(query):\n sentiments = get_sentiments(query)\n print(\"Based on the query, %s has an average sentiment value of %d\", query, sentiments)", "def display_sentiment(ticker: str, n_tweets: int, n_days_past: int, export: str = \"\"):\n # Date format string required by twitter\n dtformat = \"%Y-%m-%dT%H:%M:%SZ\"\n\n # Algorithm to extract\n dt_recent = datetime.now() - timedelta(seconds=20)\n dt_old = dt_recent - timedelta(days=n_days_past)\n print(\n f\"From {dt_recent.date()} retrieving {n_tweets*24} tweets ({n_tweets} tweets/hour)\"\n )\n\n df_tweets = pd.DataFrame(\n columns=[\n \"created_at\",\n \"text\",\n \"sentiment\",\n \"positive\",\n \"negative\",\n \"neutral\",\n ]\n )\n while True:\n # Iterate until we haven't passed the old number of days\n if dt_recent < dt_old:\n break\n # Update past datetime\n dt_past = dt_recent - timedelta(minutes=60)\n\n temp = twitter_model.load_analyze_tweets(\n ticker,\n n_tweets,\n start_time=dt_past.strftime(dtformat),\n end_time=dt_recent.strftime(dtformat),\n )\n\n if temp.empty:\n return\n\n df_tweets = pd.concat([df_tweets, temp])\n\n if dt_past.day < dt_recent.day:\n print(\n f\"From {dt_past.date()} retrieving {n_tweets*24} tweets ({n_tweets} tweets/hour)\"\n )\n\n # Update recent datetime\n dt_recent = dt_past\n\n # Sort tweets per date\n df_tweets.sort_index(ascending=False, inplace=True)\n df_tweets[\"cumulative_compound\"] = df_tweets[\"sentiment\"].cumsum()\n df_tweets[\"prob_sen\"] = 1\n\n # df_tweets.to_csv(r'notebooks/tweets.csv', index=False)\n df_tweets.reset_index(inplace=True)\n df_tweets[\"Month\"] = pd.to_datetime(df_tweets[\"created_at\"]).apply(\n lambda x: x.month\n )\n df_tweets[\"Day\"] = pd.to_datetime(df_tweets[\"created_at\"]).apply(lambda x: x.day)\n df_tweets[\"date\"] = pd.to_datetime(df_tweets[\"created_at\"])\n df_tweets = df_tweets.sort_values(by=\"date\")\n df_tweets[\"cumulative_compound\"] = df_tweets[\"sentiment\"].cumsum()\n _, ax = plt.subplots(2, 1, figsize=plot_autoscale(), dpi=cfg_plot.PLOT_DPI)\n ax[0].plot(\n pd.to_datetime(df_tweets[\"created_at\"]),\n df_tweets[\"cumulative_compound\"].values,\n lw=3,\n c=\"cyan\",\n )\n ax[0].set_ylabel(\"Cumulative VADER Sentiment\")\n xlocations = []\n xlabels = []\n for _, day_df in df_tweets.groupby(by=\"Day\"):\n day_df[\"time\"] = pd.to_datetime(day_df[\"created_at\"])\n day_df = day_df.sort_values(by=\"time\")\n ax[0].plot(day_df[\"time\"], day_df[\"sentiment\"].cumsum(), c=\"tab:blue\")\n xlocations.append(day_df.time.values[0])\n xlabels.append(day_df[\"time\"].apply(lambda x: x.strftime(\"%m-%d\")).values[0])\n\n ax[1].bar(df_tweets[\"date\"], df_tweets[\"positive\"], color=\"green\", width=0.02)\n ax[1].bar(df_tweets[\"date\"], -1 * df_tweets[\"negative\"], color=\"red\", width=0.02)\n ax[0].grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\", lw=1.5, alpha=0.5)\n ax[0].minorticks_on()\n ax[0].grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n ax[0].set_xticks(xlocations)\n ax[0].set_xticklabels(xlabels)\n\n ax[1].grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\", lw=1.5, alpha=0.5)\n ax[1].minorticks_on()\n ax[1].grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n ax[1].set_ylabel(\"VADER Polarity Scores\")\n ax[1].set_xticks(xlocations)\n ax[1].set_xticklabels(xlabels)\n plt.suptitle(\n f\"Twitter's {ticker} total compound sentiment over time is {np.sum(df_tweets['sentiment'])}\"\n )\n if gtff.USE_ION:\n plt.ion()\n plt.show()\n print(\"\")\n export_data(\n export, os.path.dirname(os.path.abspath(__file__)), \"sentiment\", df_tweets\n )", "def feat_eng(self, tweets):\n self.tweets['emojis'] = get_emojis(self.tweets['text']) # get emojis as text\n self.tweets['polarity'] = self.tweets['text'].map(\n lambda x: TextBlob(x).sentiment.polarity)\n self.tweets['word_count'] = self.tweets['text'].map(lambda x: len(str(x).split()))", "def getSentiment(tweets, location):\n sentiment = [0, 0, 0]\n for tweet in tweets:\n analyser(tweets[tweet], sentiment,location)\n return sentiment", "def get_sentiment_data():\n params = request.args\n result = None\n\n def set_result(x):\n nonlocal result # This is ugly, ew, gotta fix this\n result = x\n\n pipeline_zoo.get_sentiment_analysis_pipeline(set_result).feed_data((params, None))\n return jsonify({\n 'sentiment_score': result\n })", "def article_stats(s_list,subject):\n word_dicts = {languages[0]:{},languages[1]:{}}\n stats = [subject]\n for i,article in enumerate(s_list):\n word_dicts[languages[i]] = get_words(article)\n wc = total_wc(word_dicts[languages[i]])\n stats.append((wc,avg_word_length(article, wc),avg_par_length(article)))\n stats.append(compute_similarity(word_dicts[languages[0]],word_dicts[languages[1]]))\n return stats", "def process_sentiment(self, sentiment_data):\n new_utts_dict = {'1':[], '2':[], '3':[], '4':[], '5':[]}\n for l in sentiment_data:\n title = [\"<s>\"] + l[0] + [\"</s>\"]\n context = [\"<s>\"] + l[1] + [\"</s>\"]\n target = [\"<s>\"] + l[2] + [\"</s>\"]\n sentiment = l[3][0]\n new_utts_dict[sentiment].append([title, context, target, sentiment])\n return new_utts_dict", "def update_tweets_feed(n):\n \n # Retrieve the tweets\n first_tweet = get_value(df_1t, n)\n second_tweet = get_value(df_2t, n) \n third_tweet = get_value(df_3t, n)\n fourth_tweet = get_value(df_4t, n)\n fifth_tweet = get_value(df_5t, n)\n sixth_tweet = get_value(df_6t, n)\n seventh_tweet = get_value(df_7t, n)\n eighth_tweet = get_value(df_8t, n)\n nineth_tweet = get_value(df_9t, n)\n tenth_tweet = get_value(df_10t, n) \n \n # Compute the sentiment of each tweet\n sa_first_tweet = sentiment_analyzer_scores(first_tweet)\n sa_second_tweet = sentiment_analyzer_scores(second_tweet)\n sa_third_tweet = sentiment_analyzer_scores(third_tweet)\n sa_fourth_tweet = sentiment_analyzer_scores(fourth_tweet)\n sa_fifth_tweet = sentiment_analyzer_scores(fifth_tweet)\n sa_sixth_tweet = sentiment_analyzer_scores(sixth_tweet)\n sa_seventh_tweet = sentiment_analyzer_scores(seventh_tweet)\n sa_eighth_tweet = sentiment_analyzer_scores(eighth_tweet)\n sa_nineth_tweet = sentiment_analyzer_scores(nineth_tweet)\n sa_tenth_tweet = sentiment_analyzer_scores(tenth_tweet)\n \n # Return the tweet contents and a pie graph of the sentiment.\n \n return html.Div([\n html.Div([\n\n# First Tweet\n html.Div([\n html.Div([\n html.Pre(str(first_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '2px 2px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px',\n }\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_first_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\", }\n ),\n ], \n className = 'row' \n ),\n \n# Second Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(second_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_second_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n \n # Third Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(third_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_third_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n \n # Fourth Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(fourth_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_fourth_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n\n\n # Fifth Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(fifth_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_fifth_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n \n\n # Sixth Tweet\n html.Div([\n html.Div([\n html.Pre(str(sixth_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_sixth_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n \n # Seventh Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(seventh_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n \n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_seventh_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n\n # Eighth Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(eighth_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n \n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_eighth_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n\n # Nineth\n \n html.Div([\n html.Div([\n html.Pre(str(nineth_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_nineth_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n\n # Tenth Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(tenth_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_tenth_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n ], style = {'overflowY': 'scroll', 'overflowX': 'hidden',\n 'maxHeight': '105ex', 'backgroundColor' : '#eaeaea'}\n ),\n \n ])", "def sentiment(text):\n\n sentiment_dict = TextBlob(text).sentiment._asdict()\n return sentiment_dict", "def get_average_of_sentiment_scores(input_file_name):\n\n subreddit_name = input_file_name.split('_')[0]\n\n list_of_columns_to_be_graphed = ['vader_compound_score', 'vader_negative_score', 'vader_neutral_score',\n 'vader_positive_score', 'whole_comment_sentiment_flair']\n\n avg_scores = {'avg_vader_compound_score': 0, 'avg_vader_negative_score': 0, 'avg_vader_neutral_score': 0,\n 'avg_vader_positive_score': 0, 'avg_whole_comment_sentiment_flair': 0}\n\n # gets the dataframe\n df = get_df_from_csv(input_file_name)\n\n # creates date object column for matplotlib\n df['date'] = df['created_utc'].apply(lambda x: mdate.epoch2num(x))\n\n # sorts df according to created_utc\n df = df.sort_values(by=['date'])\n\n # get total number of comments\n num_comments = len(df)\n\n # avg_vader_compound_score = df['vader_compound_score'].mean()\n # avg_vader_negative_score = df['vader_negative_score'].mean()\n # avg_vader_neutral_score = df['vader_neutral_score'].mean()\n # avg_vader_positive_score = df['vader_positive_score'].mean()\n # avg_whole_comment_sentiment_flair = df['whole_comment_sentiment_flair'].mean()\n\n for col in list_of_columns_to_be_graphed:\n # print('Average ' + col + ':', df[col].mean())\n avg_scores['avg_' + col] = df[col].mean()\n\n return avg_scores", "def sentiment_analysis(self):\n train_pos = pd.read_csv(\"data/train_Arabic_tweets_positive_20190413.tsv\", sep='\\t', names=[\"label\", \"tweet\"])\n train_neg = pd.read_csv(\"data/train_Arabic_tweets_negative_20190413.tsv\", sep='\\t', names=[\"label\", \"tweet\"])\n train = pd.concat([train_pos, train_neg])\n train.tweet = train.tweet.apply(self.preprocessor).apply(tokenization).apply(lambda x: x.tokens[0])\n le = LabelEncoder()\n le.fit(train.label)\n train.label = le.transform(train.label)\n\n sentence_inds, vocab, self.num_tokens, word_index, index_word = helper.encode_tokens(train.tweet.values)\n\n\n self.embeddings_matrix = helper.load_embedding_matrix(self.num_tokens, self.embedding_size, \n word_index, self.embeddings_index)\n\n\n train_padded = pad_sequences(sentence_inds, padding=\"post\", truncating=\"post\", maxlen=100)\n\n self.X_train, self.X_valid, self.y_train, self.y_valid = train_test_split(train_padded, train.label.values, test_size=0.5,random_state=0, stratify=train.label.values)\n\n model = self.train_model()\n y_pred = model.predict(self.X_valid)\n return (np.argmax(y_pred, axis=1) == self.y_valid).sum() / self.y_valid.shape[0]", "def overall(update: Update, context: CallbackContext) -> None:\n\n today, _ = get_latest_stats_from_db()\n seven_day, rolling_avg = return_weekly_figure()\n \n logger.info(\"Getting overall stats for \" + str(update.message.chat_id))\n \n text = \\\n (\n \"📊*Overall stats as of \" + today['date'] + \"*\\n\\n\"\n + \"\\t\\t\\t🔢 Overall Total - \" + str('{:,}'.format(today['totalVaccinations']))\n + \"\\n\\n\\t\\t\\t🅿️ Pfizer : \" + str('{:,}'.format(today['pfizer']))\n + \"\\n\\t\\t\\t🅰️ AstraZeneca : \" + str('{:,}'.format(today['astraZeneca']))\n + \"\\n\\t\\t\\tⓂ️ Moderna : \" + str('{:,}'.format(today['moderna']))\n + \"\\n\\t\\t\\t🇯 J&J - \" + str('{:,}'.format(today['jj'])) + \"\\n\\n\"\n + \"*🧑 Total population vaccinated*\\n\\n\"\n + \"\\t\\t\\t🌓 First dose (of a two dose vaccine) - \" + str('{0:.2%}'.format(today['firstDose']/4977400)) + \"\\n\"\n + \"\\t\\t\\t🌓 Single dose vaccine - \" + str('{0:.2%}'.format(today['jj']/4977400)) + \"\\n\"\n + \"\\t\\t\\t🌝 Fully vaccinated - \" + str('{0:.2%}'.format(today['secondDose']/4977400)) + \"\\n\"\n +\"\\n\\n*🧑 12+ population vaccinated*\\n\"\n +\"\\n\\t\\t\\t🌓 First dose (of a two dose vaccine) - \" + str('{0:.2%}'.format(today['firstDose']/4183700))\n +\"\\n\\t\\t\\t🌓 Single dose vaccine - \" + str('{0:.2%}'.format(today['jj']/4183700))\n +\"\\n\\t\\t\\t🌝 Fully vaccinated - \" + str('{0:.2%}'.format(today['secondDose']/4183700)) + \"\\n\"\n + \"\\n📅 *Rolling 7 Day Stats*\\n\" \n + \"\\n\\t\\t\\t📈 Rolling 7 Day Doses - \" + str('{:,}'.format(seven_day))\n + \"\\n\\t\\t\\t💉 Average Daily Doses - \" + str('{:,}'.format(rolling_avg))\n + \"\\n\\n👇* Commands *\"\n + \"\\n\\n\\t\\t\\t/daily - Subscribe for daily updates\"\n + \"\\n\\n\\t\\t\\t/unsubscribe - Unsubscribe from updates\"\n + \"\\n\\n\\t\\t\\t/start - See all commands\"\n )\n\n update.message.reply_markdown(text)", "def analyze(self, text):\n\n tokenizer = nltk.tokenize.TweetTokenizer()\n \n tokens = tokenizer.tokenize(text)\n \n sentiment = 0\n \n for word in tokens:\n if word in self.__positives:\n sentiment += 1\n elif word in self.__negatives:\n sentiment -= 1\n \n return sentiment", "def get_average_sentiment(self, list_sentiments):\n average_polarity = 0\n for sentiment in list_sentiments: \n polarity = sentiment[1]\n average_polarity += polarity \n average_polarity /= len(list_sentiments)\n return average_polarity", "def getSentiment(s):\n headers = {\"Ocp-Apim-Subscription-Key\" : \"4c28d3a67a12442cad6666a3200c49f5\",\n \"Content-Type\" : \"application/json\", \"Accept\" : \"application/json\"}\n url = \"https://westus.api.cognitive.microsoft.com/text/analytics/v2.0/sentiment\"\n json = {\"documents\": [{\"language\": \"en\", \"id\" : \"1\"}]}\n json['documents'][0]['text'] = s\n sentiment = r.post(url, headers = headers, json = json)\n sentiment = j.loads(sentiment.text)\n return sentiment['documents'][0]['score']", "def calculate_sentiment(positive, negative):\n denominator = (positive - negative)\n numerator = (positive + negative)\n if numerator == 0:\n return 0\n return 0.268 * (denominator / numerator)", "def overall_polarity(self):\r\n d = {}\r\n d['subjectivity'] = mean(self.subjectivity)\r\n d['neg'] = mean(self.negative_sentiment)\r\n d['neu'] = mean(self.neutral_sentiment)\r\n d['pos'] = mean(self.positive_sentiment)\r\n return d", "def sentiment_analyzer(text):\n\n\tlower_text = text.lower()\n\t\t\n\thashtag_scaling = 0.3\n\texclamation_scaling = 0.5\n\tuppercase_scaling = 0.2\n\n\n\tsent_index = 0\n\n\tfor x in range(len(positive_words)):\n\t\tsent_index += lower_text.count(positive_words[x])\n\tfor x in range(len(negative_words)):\n\t\tsent_index -= lower_text.count(negative_words[x])\n\tif '!' in text:\n\t\tsent_index *= exclamation_scaling * lower_text.count('!') + 1\n\tif '#' in text:\n\t\tsent_index *= hashtag_scaling * lower_text.count('#') + 1\n\tsent_index *= uppercase_scaling * sum(1 for c in text if c.isupper())\n\t\t\n\treturn sent_index", "def LM_sentiment(news_df):#be sure to set tick as an argument after testing\r\n OUTPUT_FILE = f'Sentiment_Data/test_file.csv' # User defined output file to write data to\r\n L=[]\r\n #D.append(OUTPUT_FIELDS)\r\n \r\n for i in range(len(news_df)): # Uses date in DataFrame as indexing loop\r\n #print(\"Sources for this day are: \"+news_df.loc[DATE]['Media']) # getting the news sources (Find better way of Collecting financial news)\r\n articles=news_df.iloc[i]['Article'] # get articles from specified date\r\n articles= re.sub('(May|MAY)', ' ', articles) # drop all May month references; avoid conflicting with \"may\" a modal word\r\n articles=articles.upper() # make everything uppercase\r\n output_data=get_data(articles) # returning sentiment scores from function as a list \r\n output_data[0]=news_df.iloc[i].name # storing the date of articles as first entry of list \r\n L.append(output_data) # appending article info to list\r\n L=pd.DataFrame(L,columns=OUTPUT_FIELDS) # constructing DataFrame from article data\r\n L.set_index('date',inplace=True) # setting the index in place\r\n return L # returning the DataFrame\r", "def test_get_avg_link_sentiment_scores():\n print('average sentiment values when grouped by link_id')\n avg_scores = get_avg_link_sentiment_scores('politics_30_months_comments_cleaned_standardized_vader_flair.csv')\n for key, value in avg_scores.items():\n print(key, value)\n print()", "def sentiment_analyzer_scores(sentence):\n score = get_sentiment_analyzer().polarity_scores(sentence)\n return 'Negative Score:', score['neg'], 'Neutral Score:', score['neu'], 'Positive Score:', score['pos'], 'Compound Score:', score['compound']", "def test_print_rolling_average_of_sentiment_scores():\n # sizes = [10, 100, 500, 1000, 5000, 10000]\n sizes = [500]\n for size in sizes:\n print_rolling_average_of_sentiment_scores('politics_30_months_comments_cleaned_standardized_vader_flair.csv',\n size)", "def generateSentimentAnalysis(self, fs_db, cleaned_submissions, cleaned_tweets):\n all_posts = []\n\n for p in range(len(cleaned_submissions)):\n print('reddit', self.clean(cleaned_submissions[p][3]))\n all_posts.append(self.clean(cleaned_submissions[p][3]))\n\n for t in range(len(cleaned_tweets)):\n print('twitter', self.clean(cleaned_tweets[t][2]))\n all_posts.append(self.clean(cleaned_tweets[t][2]))\n \n if len(all_posts) == 0:\n raise Exception(\"No crawled data\")\n\n count = 0\n\n for c in all_posts:\n blob = TextBlob(c)\n\n polarity = blob.sentiment.polarity\n subjectivity = blob.sentiment.subjectivity\n\n doc_ref = fs_db.collection(u'sentimentAnalysis').document('first')\n if (polarity != 0 and subjectivity != 0):\n count += 1\n doc_ref.set({str(count): {'post': c, 'polarity': polarity, 'subjectivity':subjectivity}}, merge=True)\n\n with open('wc.txt', 'w') as output:\n for data in all_posts:\n output.write('%s\\n' % data)", "def sentiment_aspects(docs: Iterable[tokens.Doc]) -> List[collections.Counter]:\n sent_dict_list = []\n start_time = time.time()\n\n for doc in docs:\n sent_dict = collections.Counter()\n for token in doc:\n # check if the word is an opinion word, then assign sentiment\n if token.text.lower() in _OPINION_WORDS:\n sentiment = 1 if token.text.lower() in _POS_WORDS else -1\n if (token.dep_ == \"advmod\"):\n # if target is an adverb modifier (i.e. pretty, highly, etc.)\n # but happens to be an opinion word, ignore and pass\n continue\n\n elif (token.dep_ == \"amod\"):\n sent_dict[token.head.text.lower()] += sentiment\n\n else:\n for child in token.children:\n # if there's a adj modifier (i.e. very, pretty, etc.) add\n # more weight to sentiment\n # This could be better updated for modifiers that either\n # positively or negatively emphasize\n if _is_opinion_mod(child):\n sentiment *= 1.5\n # check for negation words and flip the sign of sentiment\n if child.dep_ == \"neg\":\n sentiment *= -1\n for child in token.children:\n if (token.pos_ == \"VERB\") & (child.dep_ == \"dobj\"):\n # if verb, check if there's a direct object\n sent_dict[child.text.lower()] += sentiment\n # check for conjugates (a AND b), then add both to dictionary\n subchildren = []\n conj = 0\n for subchild in child.children:\n if subchild.text.lower() == \"and\": conj=1\n if (conj == 1) and (subchild.text.lower() != \"and\"):\n subchildren.append(subchild.text.lower())\n conj = 0\n for subchild in subchildren:\n sent_dict[subchild] += sentiment\n\n # check for negation\n for child in token.head.children:\n noun = \"\"\n if _is_opinion_mod(child):\n sentiment *= 1.5\n if (child.dep_ == \"neg\"):\n # check for negation words and flip the sign of sentiment\n sentiment *= -1\n\n # check for nouns\n for child in token.head.children:\n noun = \"\"\n if (child.pos_ == \"NOUN\") and (child.text not in sent_dict):\n noun = child.text.lower()\n # Check for compound nouns\n for subchild in child.children:\n if subchild.dep_ == \"compound\":\n noun = subchild.text.lower() + \" \" + noun\n sent_dict[noun] += sentiment\n sent_dict_list.append(collections.Counter(sent_dict))\n\n print(\"\\nFound aspects on {} reviews.\".format(len(sent_dict_list)))\n print(time.time() - start_time)\n return sent_dict_list", "def tweet_sentiment_analysis(self, tweet):\n analysis = TextBlob(self.clean_tweet(tweet))\n\n if analysis.sentiment.polarity > 0:\n return ['Positive', analysis.sentiment.polarity, analysis.sentiment.subjectivity]\n elif analysis.sentiment.polarity == 0:\n return ['Neutral', analysis.sentiment.polarity, analysis.sentiment.subjectivity]\n else:\n return ['Negative', analysis.sentiment.polarity, analysis.sentiment.subjectivity]", "def predictionSentiment(company):\n #change the key for the API in here. This is the AlchemyDataNews\n KEY = '2190f450728492113ce4e5b880a72eefbea73308'\n alchemy_data_news = AlchemyDataNewsV1(api_key=KEY)\n timeBegin ='now-2d'\n timeEnd = 'now'\n company_query = '|text=' + company + ',type=company|'\n results = alchemy_data_news.get_news_documents(\n start=timeBegin,\n end=timeEnd,\n return_fields=['enriched.url.title',\n 'enriched.url.entities.entity.sentiment.type',\n 'enriched.url.entities.entity.sentiment.score'\n ],\n query_fields={'q.enriched.url.enrichedTitle.entities.entity': company_query})\n r = json.dumps(results, indent=2)\n f = open(\"/home/kid/Github/Oracle/watson/jsonp2.json\", 'w')\n f.write(str(r))", "def sentiment_analysis(con, cur):\n # Retrieve data from DB\n description = np.array(select(cur,\"DESCRIPTION\", \"data11\"))\n description_trans = np.array(select(cur,\"DESCRIPTION_TRANSLATED\", \"data11\")) \n \n description_list = []\n sentimentscore_list=[]\n magnitude_list=[]\n sentences_score_list=[]\n sentences_magnitude_list=[]\n sum= 0\n \n # Create a Language client\n language_client = google.cloud.language.LanguageServiceClient()\n \n # Check whether to use original or translated description\n for i in range(len(description)):\n if description_trans[i] == '':\n descr = description[i]\n else:\n descr = description_trans[i]\n \n document = google.cloud.language.types.Document(\n content=descr,\n type=google.cloud.language.enums.Document.Type.PLAIN_TEXT)\n # Use Language to detect the sentiment of the text\n try:\n response = language_client.analyze_sentiment(document=document)\n except InvalidArgument as e:\n print(\"Invalid: \", i)\n sum += 1\n continue\n \n #SAVE SENTENCE ATTRIBUTES\n score_all=[]\n magnitude_all=[]\n for y in range(len(response.sentences)):\n score_all.append((response.sentences[y].sentiment.score))\n magnitude_all.append((response.sentences[y].sentiment.magnitude))\n \n sentences_score_list.append(repr(score_all))\n sentences_magnitude_list.append(repr(magnitude_all))\n # use eval() to turn it back into a list of floats\n \n description_list.append(descr)\n sentiment = response.document_sentiment\n sentimentscore_list.append(sentiment.score)\n magnitude_list.append(sentiment.magnitude)\n print ('Progress: {}/{} rows processed'.format(i, len(description)))\n \n # Save all scores to the DB\n print(\"Sum of skipped rows: \", sum)\n cur.execute(\"DROP TABLE IF EXISTS temp\")\n cur.execute(\"CREATE TABLE temp(DESCRIPTIONS text, SENTIMENTSCORE numeric, MAGNITUDE numeric, SENTENCESCORES text, SENTENCEMAGNITUDES text)\")\n \n def insert(d, ss, m, sens, senm):\n cur.execute(\"INSERT INTO temp (DESCRIPTIONS, SENTIMENTSCORE, MAGNITUDE, SENTENCESCORES, SENTENCEMAGNITUDES) VALUES (?, ?, ?, ?, ?)\", (d, ss, m, sens, senm))\n \n for d, ss, m, sens, senm in zip(description_list, sentimentscore_list, magnitude_list, sentences_score_list, sentences_magnitude_list):\n insert(d, ss, m, sens, senm)\n \n cur.execute(\"DROP TABLE IF EXISTS data22\")\n cur.execute(\"CREATE TABLE data22 AS SELECT success.*, temp.SENTIMENTSCORE, temp.MAGNITUDE, temp.SENTENCESCORES, temp.SENTENCEMAGNITUDES FROM success, temp WHERE temp.DESCRIPTIONS IN (success.DESCRIPTION, success.DESCRIPTION_TRANSLATED)\")\n con.commit()", "def analyse_reddit(self):\r\n currency_codes = ['ARS', 'BHD', 'BWP', 'BRL', 'BND', 'BGN', 'CLP', 'CNY', 'COP', 'HRK', 'CZK', 'DKK', 'HKD',\r\n 'ISK', 'IDR', 'IRR', 'ILS', 'KZT', 'KRW', 'KWD', 'LYD', 'MYR', 'MUR', 'MXN', 'NPR', 'NZD',\r\n 'NOK', 'OMR', 'PKR', 'PHP', 'PLN', 'QAR', 'RON', 'RUB', 'SAR', 'ZAR', 'LKR', 'SEK', 'CHF',\r\n 'TWD', 'THB', 'TTD', 'TRY', 'AED', 'VEF', 'AUD', 'CAD', 'EUR', 'HUF', 'INR', 'JPY', 'GBP',\r\n 'USD']\r\n\r\n # Word to determine the post\r\n word = ['all time high', 'positive', 'high', 'back up', 'peak', 'bounding off', 'playing well', 'drop',\r\n 'skyrocket']\r\n counter = 0\r\n total = 0\r\n today = datetime.date.today()\r\n first = today.replace(day=1)\r\n thisMonth = today.strftime(\"%b\")\r\n lastMonth = (first - datetime.timedelta(days=1)).strftime(\"%b\")\r\n # Reddit posts from this month and last month are accounted for analysis\r\n if self.choice in currency_codes:\r\n with open('forex.csv', \"rt\", encoding='utf-8') as f:\r\n reader = csv.DictReader(f, delimiter=\",\")\r\n for row in reader:\r\n if self.choice in row['Currency']:\r\n if thisMonth or lastMonth in row['Date']:\r\n # Count the total number of post scraped\r\n total += 1\r\n for i in word:\r\n # If the post contain the word, increase the counter\r\n if i in row[\"Title\"]:\r\n counter += 1\r\n # Calculate the percentage\r\n if counter != 0:\r\n percentage = (counter / total) * 100\r\n percentage = round(percentage, 2)\r\n return percentage\r\n else:\r\n return 0", "def sentiment_analysis(name, dictionary):\n\ttone_analyzer = ToneAnalyzerV3(\n\t\t username='2ed2f0c6-1722-472d-9126-224897b991af',\n\t\t password='UcuSde1YmeK6',\n\t\t version='2016-05-19')\n\tl = open(name + '.txt')\n\tlines = l.readlines()\n\tfeel_dict = {'Anger':1.0,'Fear':2.0, 'Sadness':3.0, 'Disgust':4.0,'Joy':5.0, 'Excitement':6.0}\n\tdictionary[name] = []\n\tfor i in lines:\n\t\t#print('-----------------')\n\t\t#print(i)\n\t\tmax_score = 0.0\n\t\tmax_feel = ''\n\t\ttone = tone_analyzer.tone(i, 'emotion')\n\t\tfor feel in tone['document_tone']['tone_categories']:\n\t\t\tfor feeling in feel['tones']:\n\t\t\t\tif feeling['score'] > max_score:\n\t\t\t\t\tmax_score = feeling['score']\n\t\t\t\t\tmax_feel = feeling['tone_name']\n\t\t#print(max_score, max_feel)\n\t\t#blob1 = TextBlob(i, pos_tagger=PatternTagger(), analyzer=PatternAnalyzer())\n\t\tif max_feel != '':\n\t\t\ttweet_tbu = db.Tweet.objects(rating=feel_dict[max_feel]).first()\n\t\t\tdict_tbu = {}\n\t\t\tif tweet_tbu:\n\t\t\t\tdict_tbu = mongo_to_dict(tweet_tbu)\n\t\t\t\tprint('exists')\n\t\t\t\tprint(dict_tbu)\n\t\t\t\tif max_feel != '':\n\t\t\t\t\tnew_dict = {}\n\t\t\t\t\tnew_dict['tweet'] = dict_tbu['tweet']\n\t\t\t\t\tnew_dict['tweet'].append(i[0:-2])\n\t\t\t\t\ttweet_tbu.update(**new_dict)\n\t\t\t\t\ttweet_tbu.reload()\n\t\t\telse:\n\t\t\t\tprint('not exists - with max')\n\t\t\t\tnew_dict = {}\n\t\t\t\tnew_dict['tweet'] = [i[0:-1]]\n\t\t\t\tif max_feel != '':\n\t\t\t\t\tnew_dict['rating'] = feel_dict[max_feel]\n\t\t\t\telse:\n\t\t\t\t\tnew_dict['rating'] = 0.0\n\t\t\t\tprint(new_dict)\n\t\t\t\tnew_tweet = db.Tweet(**new_dict)\n\t\t\t\tnew_tweet.save()\n\t\telse:\n\t\t\tprint('not exists - without')\n\t\t\tnew_dict = {}\n\t\t\tnew_dict['tweet'] = [i[0:-1]]\n\t\t\tif max_feel != '':\n\t\t\t\tnew_dict['rating'] = feel_dict[max_feel]\n\t\t\telse:\n\t\t\t\tnew_dict['rating'] = 0.0\n\t\t\tprint(new_dict)\n\t\t\tnew_tweet = db.Tweet(**new_dict)\n\t\t\tnew_tweet.save()\n\tresult = db.Tweet.objects()\n\treturn(result)", "def get_feature_set_SC(tweet, sentimentvalues):\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# if tag=='PRtinf':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJS':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJ':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='NP':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='DET':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='P':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n #Add lexicon values\n #total subjectivity score from word polarities, total objectivity score, number of subjective words, number of objective words, e\n sub_score = 0.0\n obj_score = 0.0\n nrof_subwords = 0\n nrof_objwords = 0\n for word in sentimentvalues.keys():\n if sentimentvalues[word][0]>0:\n sub_score = sub_score + sentimentvalues[word][0]\n nrof_subwords = nrof_subwords + 1\n if sentimentvalues[word][1]>0:\n sub_score = sub_score + sentimentvalues[word][1]\n nrof_subwords = nrof_subwords + 1\n if sentimentvalues[word][2]>0:\n obj_score = obj_score + sentimentvalues[word][2]\n nrof_objwords = nrof_objwords + 1\n if sub_score>0:\n additional_freq[\"sub_score\"] = sub_score+1.0\n if obj_score>0:\n additional_freq[\"obj_score\"] = obj_score+1.0\n if nrof_subwords>0:\n additional_freq[\"subjective_words\"] = nrof_subwords*1.0\n if nrof_objwords>0:\n additional_freq[\"objective_words\"] = nrof_objwords*1.0\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n \n return features", "def analyze(self, text):\n tknzr = nltk.tokenize.casual.TweetTokenizer(preserve_case=True, reduce_len=False, strip_handles=False)\n tknTxt = tknzr.tokenize(text)\n sentiment = 0\n \n for i in range(len(tknTxt)):\n if tknTxt[i] in self.posTxt:\n #print(\"POS\")\n #print(tknTxt[i])\n sentiment += 1\n elif tknTxt[i] in self.negTxt:\n #print(\"NEG\")\n #print(tknTxt[i])\n sentiment -= 1\n \n return sentiment", "def __classify_using_historical_data(self, text: str) -> tuple:\n score = self.__calculate_gcloud_score(text=text)\n sentiment = \"\"\n\n if self.__threshold_pos is not None and self.__threshold_neg is not None:\n sentiment = InterfaceLabel.label_sentiment(score=score,\n thresholds=(self.__threshold_neg, self.__threshold_pos))\n\n return score, sentiment", "def get_sentiment(sentence):\n\tblob = tb.TextBlob(sentence.decode('utf-8','ignore'))\n\treturn blob.sentiment[0]", "def get_sentiment_trends(order):\r\n\r\n # Get date seven days ago\r\n seven_days_ago = datetime.now() - timedelta(days=7)\r\n\r\n # Get raw PyMongo collection\r\n collection = Tweet._get_collection()\r\n\r\n # Perform aggregate query\r\n result = collection.aggregate([\r\n {\r\n \"$match\":\r\n {\r\n \"tweet_time\": {\"$gt\": seven_days_ago}\r\n }\r\n },\r\n {\r\n \"$group\":\r\n {\r\n \"_id\": \"$keyword_search_term\",\r\n \"average\":\r\n {\r\n \"$avg\": \"$sentiment_score\"\r\n }\r\n }\r\n },\r\n {\r\n \"$sort\":\r\n {\r\n \"average\": order\r\n }\r\n },\r\n {\r\n \"$limit\": 10\r\n }\r\n ])\r\n\r\n return result", "def __sentiment_scan(self, title, text):\n\n return (pattern.en.sentiment(title), pattern.en.sentiment(text))", "def test(self, tweets, without_neutral=True):\n correct = 0\n total = 0\n for tweet in tweets:\n assert tweet.polarity is not None\n if tweet.is_neutral() and without_neutral:\n continue\n\n if tweet.polarity == self.predict_sentiment_enum(tweet, without_neutral):\n correct += 1\n\n total += 1\n\n print(\"correct = \", correct, \"total = \", total)\n return correct / total", "def analyze_sentiment(test_files_list: list, classification_dict: dict):\n\n # Lexicon words used for sentiment analysis\n pos_lex_words = get_lexicon_words(POS_LEXICON_DIR_PATH)\n neg_lex_words = get_lexicon_words(NEG_LEXICON_DIR_PATH)\n\n classification_scores = []\n true_labels = []\n\n for file in test_files_list:\n \n # Read the file to analyze\n with open(file) as f:\n sentences = f.readlines()\n\n # tokenize the sentences in the file\n tokens = []\n for sentence in sentences:\n tokens += tokenize(sentence) # Do not want to remove duplicate words, so we have more data\n \n # Get number of positive and negative words found in the file\n positive_words, negative_words = get_pos_neg_word_count(tokens, pos_lex_words, neg_lex_words)\n \n # Keep an array of all the scores we have (negative, positive)\n classification_score = [negative_words, positive_words]\n classification_scores.append(classification_score)\n \n # Maintain the true answer (negative, positive)\n true_label = [0, 0]\n if file.split('/')[1] == 'pos': true_label[1] += 1\n else: true_label[0] += 1\n true_labels.append(true_label)\n\n # Print for submitting assignment\n if true_label[0]: #file is actually negative\n classification_dict['neg'][file.split('/')[2]] = 'neutral'\n if positive_words > negative_words: classification_dict['neg'][file.split('/')[2]] = 'positive'\n else: classification_dict['neg'][file.split('/')[2]] = 'negative'\n else:\n classification_dict['pos'][file.split('/')[2]] = 'neutral'\n if positive_words > negative_words: classification_dict['pos'][file.split('/')[2]] = 'positive'\n else: classification_dict['pos'][file.split('/')[2]] = 'negative'\n\n \n return np.array(classification_scores), np.array(true_labels)", "def sentiment_score(text, loaded_model = loaded_model, vectorizer = tokenizer):\n # tweet_tf_idf = vect_char.transform(text)\n tweet_token = tokenizer.texts_to_sequences(text)\n tweet_token = pad_sequences(tweet_token, maxlen = 40)\n sentiment = loaded_model.predict_proba(tweet_token)\n neg_prob = sentiment[0][0]\n pos_prob = sentiment[0][1]\n return neg_prob, pos_prob", "def get_whole_and_per_sentence_flair_sentiments(list_of_comments):\n\n for comment in list_of_comments:\n result_sum = get_whole_flair_sentiment(comment)\n print(comment)\n print('Whole comment sentiment:', result_sum)\n print()\n sentence_score_list = get_sentence_sentiments(comment)\n print(comment)\n print('per sentence sentiment:', sentence_score_list)\n print()", "def _evaluate_sentiment(self, text):\n na_record = {\n 'probability': {\n 'neg': numpy.nan, \n 'pos': numpy.nan, \n 'neutral': numpy.nan},\n 'label': numpy.nan} \n if text is not numpy.nan:\n payload = {'text': text}\n r = requests.post(\"http://text-processing.com/api/sentiment/\", data=payload)\n if int(r.status_code) == 503:\n print(\"We're being throttled! Going to sleep for 55672 seconds.\")\n time.sleep(55672) # delays for 5 seconds\n sentiment_data = json.loads(r.text)\n #except ValueError:\n #print(text)\n #print(r.status_code)\n #print(r.text)\n #return na_record\n \n self.record += 1\n return sentiment_data\n else:\n print(text)\n print(type(text))\n return na_record", "def classify(tweets, positives, negatives):\n sentiment_list = makelist(tweets, positives, negatives)\n n_positives = 0\n n_negatives = 0\n n_neutral = 0\n\n # Counts the amount of times each number is in sentiment_list\n for i in sentiment_list:\n if i == 1:\n n_positives += 1\n elif i == -1:\n n_negatives += 1\n else:\n n_neutral += 1\n\n print(\"Trump's tweets classified:\")\n print(\" positive: {}\".format(n_positives))\n print(\" negative: {}\".format(n_negatives))\n print(\" neutral : {}\".format(n_neutral))", "def sent_features(tweet):\n twitter_objs = count_twitter_objs(tweet)\n tweet=clean_tweet(tweet) \n sentiment = sentiment_analyzer.polarity_scores(tweet)\n #Get text only\n words = preprocess(tweet) \n syllables = textstat.syllable_count(words)\n num_chars = sum(len(w) for w in words)\n num_chars_total = len(tweet)\n num_terms = len(tweet.split())\n num_words = len(words.split())\n avg_syl = round(float((syllables+0.001))/float(num_words+0.001),4)\n num_unique_terms = len(set(words.split()))\n \n ###Modified FK grade, where avg words per sentence is just num words/1\n FKRA = round(float(0.39 * float(num_words)/1.0) + float(11.8 * avg_syl) - 15.59,1)\n ##Modified FRE score, where sentence fixed to 1\n FRE = round(206.835 - 1.015*(float(num_words)/1.0) - (84.6*float(avg_syl)),2)\n \n \\\n retweet = 0\n if \"rt\" in words:\n retweet = 1\n features = [FKRA, FRE,syllables, avg_syl, num_chars, num_chars_total, num_terms, num_words,\n num_unique_terms, sentiment['neg'], sentiment['pos'], sentiment['neu'], sentiment['compound'],\n twitter_objs[2], twitter_objs[1],\n twitter_objs[0], retweet]\n return features", "def get_sentiment(ticker_symbol, page=None):\n if page is None:\n page = scrape_page(BASE_URL + ticker_symbol)\n\n #get strings\n bullish_sentiment = get_bullish_sentiment(ticker_symbol, page)\n bearish_sentiment = get_bearish_sentiment(ticker_symbol, page)\n price = get_price(ticker_symbol, page)\n name = get_name(ticker_symbol, page)\n\n title = get_title(ticker_symbol, page)\n article = get_article(ticker_symbol, page)\n link = get_link(ticker_symbol, page)\n\n my_trader = Robinhood()\n logged_in = my_trader.login(username=username, password=password)\n description = my_trader.get_fundamentals(ticker_symbol)\n news = my_trader.get_news(ticker_symbol)\n\n #see strings for verification\n #print(bullish_sentiment);\n #print(bearish_sentiment);\n\n #find digits in string\n bull=int(''.join(list(filter(str.isdigit, bullish_sentiment))))\n bear=int(''.join(list(filter(str.isdigit, bearish_sentiment))))\n #price=int(''.join(list(filter(str.isdigit, price))))\n #print(bull)\n #print(bear)\n\n\n\n return Response({\"bullish\": bull, \"bearish\": bear, \"price\":price, \"name\":name, \"description\":description, \"news\":news})\n\n '''\n if bull>bear:\n print(\"bull!\")\n import example\n else:\n return None\n '''\n #if bullish_sentiment:\n # return bullish_sentiment, get_bearish_sentiment(ticker_symbol, page)\n\n #else:\n # return None", "def sentiment():\n\n request_json = request.json\n power = request_json['power']\n angle = request_json['angle']\n\n print(power, angle)\n\n resp_dict = dict()\n resp_dict['kick'] = 'ok'\n\n resp = Response(json.dumps(resp_dict), status=200)\n\n return resp", "def get_tweet_sentiment(self, tweet):\n\n analyzer = SentimentIntensityAnalyzer()\n vs = analyzer.polarity_scores(tweet)\n # set sentiment\n if vs['compound'] >= 0.05:\n return 'positive'\n elif -0.5 < vs['compound'] < 0.05:\n return 'neutral'\n else:\n return 'negative'", "def sentiment(sentences: List[str]) -> List[List[float]]:\n try:\n _create_unverified_https_context = ssl._create_unverified_context\n except AttributeError:\n pass\n else:\n ssl._create_default_https_context = _create_unverified_https_context\n\n nltk.download('vader_lexicon')\n darth = SentimentIntensityAnalyzer()\n collector = []\n for sentence in sentences:\n ss = darth.polarity_scores(sentence)\n temp = []\n for k in ss.values():\n temp.append(k)\n collector.append(temp)\n return collector", "def display_inference(ticker: str, num: int, export: str = \"\"):\n df_tweets = twitter_model.load_analyze_tweets(ticker, num)\n\n if df_tweets.empty:\n return\n\n # Parse tweets\n dt_from = dparse.parse(df_tweets[\"created_at\"].values[-1])\n dt_to = dparse.parse(df_tweets[\"created_at\"].values[0])\n print(f\"From: {dt_from.strftime('%Y-%m-%d %H:%M:%S')}\")\n print(f\"To: {dt_to.strftime('%Y-%m-%d %H:%M:%S')}\")\n\n print(f\"{len(df_tweets)} tweets were analyzed.\")\n dt_delta = dt_to - dt_from\n n_freq = dt_delta.total_seconds() / len(df_tweets)\n print(f\"Frequency of approx 1 tweet every {round(n_freq)} seconds.\")\n\n pos = df_tweets[\"positive\"]\n neg = df_tweets[\"negative\"]\n\n percent_pos = len(np.where(pos > neg)[0]) / len(df_tweets)\n percent_neg = len(np.where(pos < neg)[0]) / len(df_tweets)\n total_sent = np.round(np.sum(df_tweets[\"sentiment\"]), 2)\n mean_sent = np.round(np.mean(df_tweets[\"sentiment\"]), 2)\n print(f\"The summed compound sentiment of {ticker} is: {total_sent}\")\n print(f\"The average compound sentiment of {ticker} is: {mean_sent}\")\n print(\n f\"Of the last {len(df_tweets)} tweets, {100*percent_pos:.2f} % had a higher positive sentiment\"\n )\n print(\n f\"Of the last {len(df_tweets)} tweets, {100*percent_neg:.2f} % had a higher negative sentiment\"\n )\n print(\"\")\n export_data(export, os.path.dirname(os.path.abspath(__file__)), \"infer\", df_tweets)", "def add_sentiment(self):\n self.record = 0\n letter_series = self.dataframe.letter \n sentiment_call = lambda letter_text: self._evaluate_sentiment(letter_text)\n sentiment_data = letter_series.map(sentiment_call)\n self.dataframe['sentiment'] = sentiment_data\n self._unpack_sentiment_data()", "def get_sentiment(desc):\n # create TextBlob object of passed tweet text\n analysis = TextBlob(desc)\n # set sentiment\n if analysis.sentiment.polarity > 0:\n return 'positive'\n elif analysis.sentiment.polarity == 0:\n return 'neutral'\n else:\n return 'negative'", "def sentiment_analysis(df):\n analyzer = SentimentIntensityAnalyzer()\n polarity = []\n for tweet in df['clean_text'].astype(str):\n sentiment = analyzer.polarity_scores(tweet)\n polarity.append(sentiment['compound'])\n df['sentiment'] = pd.Series(polarity)\n return df", "def analyse_tweet(self, tweet):\r\n sentiment = 0\r\n subjects = []\r\n\r\n is_comparison = False # sentiment will be the LHS of the comparison\r\n seen_not = False\r\n for word in myparser.parse(tweet,self.company_names,True):\r\n if word == \"not\" or word == \"don't\":\r\n seen_not = True\r\n elif word in self.positive_words:\r\n sentiment = sentiment + 1\r\n elif word in self.negative_words:\r\n sentiment = sentiment - 1\r\n if word in self.company_names:\r\n subjects += [word]\r\n for (p, c) in self.product_names:\r\n if word == p:\r\n subjects += [c]\r\n for (c,s) in self.comparisons:\r\n if word == c:\r\n sentiment = s\r\n is_comparison = True\r\n if seen_not:\r\n sentiment = -sentiment\r\n\r\n #print((tweet, subjects, sentiment, is_comparison))\r\n\r\n if is_comparison:\r\n subjects += [None, None]\r\n return[(subjects[0], sentiment), (subjects[1], -sentiment)]\r\n else:\r\n return [(sub, sentiment) for sub in subjects]", "def analyze():\n content = request.get_json()\n if model is None:\n return\n max_seq_length = model.max_seq_length\n test_data = content['text']\n data, seq_lengths, targets = prepare_text(\n test_data, max_seq_length, vocab_mapping)\n input_feed = {}\n input_feed[model.seq_input.name] = data\n input_feed[model.target.name] = targets\n input_feed[model.seq_lengths.name] = seq_lengths\n output_feed = [model.y]\n outputs = sess.run(output_feed, input_feed)\n score = np.argmax(outputs[0])\n probability = outputs[0].max(axis=1)[0]\n message = 'Value of sentiment: '\n if score > 0:\n message = message + 'positive'\n else:\n message = message + 'negative'\n message = message + ' with probability: ' + str(probability)\n result = json.dumps({\n 'score': str(score),\n 'probability': str(probability)\n })\n\n resp = Response(response=result, status=200, mimetype='application/json')\n\n return resp", "def sentiment(sense, out_scores, out_labels, model, max_decimals=6, lexicon=None):\n\n if not lexicon:\n lexicon = util.PickledLexicon(model)\n # Otherwise use pre-loaded lexicon (from catapult)\n\n sense = util.read_annotation(sense)\n result_scores = {}\n result_labels = {}\n\n for token in sense:\n # Get set of senses for each token and sort them according to their probabilities\n token_senses = [tuple(s.rsplit(util.SCORESEP, 1)) if util.SCORESEP in s else (s, -1.0)\n for s in sense[token].split(util.DELIM) if s]\n token_senses.sort(key=lambda x: x[1], reverse=True)\n\n # Lookup the sentiment score for the most probable sense and assign a sentiment label\n if token_senses:\n best_sense = token_senses[0][0]\n score = lexicon.lookup(best_sense, None)\n else:\n score = None\n\n if score:\n result_scores[token] = score\n result_labels[token] = SENTIMENT_LABLES.get(int(score))\n else:\n result_scores[token] = None\n result_labels[token] = None\n\n util.write_annotation(out_scores, result_scores)\n util.write_annotation(out_labels, result_labels)", "def run(self, input_type, file_name):\n data = self.get_data(file_name)\n\n sentiment = dict()\n mood = dict()\n emoticon = dict()\n\n for line in data:\n weight = 1\n # Twitter data has a weight defined before the |\n if input_type == \"Twitter\":\n columns = line.split(\"|\")\n weight += int(columns[0])\n # Everything but the weight at the beginning\n line = '|'.join(columns[1:])\n\n # Prepare data for analysis\n sentances = self.prepare_data(line)\n\n # Perform analysis\n sentiment_val = self.get_sentiment(sentances)\n mood_val = self.get_mood(sentances)\n emoticon_val = self.get_emoticons_value(line)\n\n # Add each sentiment value to a dictionary along with its weight\n sentiment[sentiment_val] = weight if sentiment_val not in sentiment else sentiment[sentiment_val] + weight\n # Add results to mood totals\n for m, count in mood_val.items():\n mood[m] = count if m not in mood else mood[m] + count\n # Add results to emote totals\n for e in emoticon_val:\n emoticon[e] = 1 if e not in emoticon else emoticon[e] + 1\n\n return sentiment, mood, emoticon", "def get_tweet_sentiment(self, tweet):\n # create TextBlob object of passed tweet text\n analysis = TextBlob(self.clean_tweet(tweet))\n\n # set sentiment\n if analysis.sentiment.polarity > 0:\n return 'positive'\n elif analysis.sentiment.polarity == 0:\n return 'neutral'\n else:\n return 'negative'", "def get_tweet_sentiment(self, tweet):\r\n # create TextBlob object of passed tweet text\r\n polarity = TextBlob(self.clean_tweet(tweet)).sentiment.polarity\r\n if polarity > 0:\r\n return 1.0\r\n if polarity < 0:\r\n return -1.0\r\n return 0", "def get_feature_set_SC2(tweet, sentimentvalues):\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n #Add lexicon values\n #total subjectivity score from word polarities, total objectivity score, number of subjective words, number of objective words, e\n sub_score = sentimentvalues[0]+sentimentvalues[1]\n obj_score = sentimentvalues[2]\n if sub_score>0:\n additional_freq[\"sub_score\"] = sub_score+1.0\n if obj_score>0:\n additional_freq[\"obj_score\"] = obj_score+1.0\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n \n return features", "def performSupervisedSentimentAnalysis(data):\n processed_tweets = data[4] # get the complete dataset to work on\n labels = getTweetsLabels()[1] # get the labels of the dataset\n tweets = []\n for pos in range(len(processed_tweets)):\n tweets.append(\n (processed_tweets[pos], labels[pos])) # store each tweet and its corresponding sentiment label in one list\n split_perc = 0.1 # specify the percentage of the dataset splitting into train and test sets (10% for training set)\n split_size = int(len(tweets) * split_perc) # specify the size of the split\n train_tweets, test_tweets = tweets[split_size:], tweets[:split_size] # split the dataset into train and test sets\n sentimentAnalysis(train_tweets, test_tweets) # perform the sentiment analysis based on supervised approaches", "def analyze(content):\r\n client = language.LanguageServiceClient()\r\n\r\n document = types.Document(\r\n content=content,\r\n type=enums.Document.Type.PLAIN_TEXT)\r\n annotations = client.analyze_sentiment(document=document)\r\n\r\n # Write results to GCS \r\n return annotations.document_sentiment.score", "def analyze(text):\n client = language_service_client.LanguageServiceClient()\n\n # with open(movie_review_filename, 'r') as review_file:\n # Instantiates a plain text document.\n \n # content = text.read()\n content=text\n document = language_v1.types.Document(\n content=content,\n type=enums.Document.Type.PLAIN_TEXT,\n language='en'\n )\n # type='PLAIN_TEXT',\n # )\n \n try:\n response = client.analyze_sentiment(\n document=document,\n encoding_type='UTF32',\n )\n sentiment = response.document_sentiment\n return (sentiment.score)\n except InvalidArgument:\n sentiment=0.0\n return sentiment", "def get_historical_sentiment_avg(search_term, location=None):\r\n\r\n total = 0\r\n\r\n if location:\r\n tweets = Tweet.objects(Q(keyword_search_term=search_term) & Q(location_address=location))\r\n count = len(tweets)\r\n else:\r\n tweets = Tweet.objects(Q(keyword_search_term=search_term))\r\n count = len(tweets)\r\n\r\n for tweet in tweets:\r\n total += tweet.sentiment_score\r\n\r\n # Calculate average\r\n avg = total / count\r\n avg = float(\"{0:.2f}\".format((float(avg))))\r\n\r\n return avg", "def naive_sentiment_analyzer(text: str) -> dict:\n result = {}\n text_blob = TextBlob(text, analyzer=NaiveBayesAnalyzer())\n result['overall'] = text_blob.sentiment\n for sentence in text_blob.sentences:\n result[sentence] = sentence.sentiment\n\n return result", "def predominant_sentiment(sentiment_aggregate_list):\r\n\r\n positive = int(sentiment_aggregate_list[0][1])\r\n neutral = int(sentiment_aggregate_list[1][1])\r\n negative = int(sentiment_aggregate_list[2][1])\r\n\r\n if positive > neutral and positive > negative:\r\n return \"positive\"\r\n elif neutral > positive and neutral > negative:\r\n return \"neutral\"\r\n elif negative > positive and negative > neutral:\r\n return \"negative\"\r\n else:\r\n return \"mixed\"", "def app_view(request):\n prior_queries = (request.dbsession.query(Sentiments, User)\n .join(User)\n .filter(User.username == request.authenticated_userid)\n .order_by(Sentiments.id.desc())\n .all())\n sentient_bodies = (query[0].body for query in prior_queries)\n sentimental_parts = (percentage(query[0].negative_sentiment) for query in prior_queries)\n logical_bits = (percentage(query[0].positive_sentiment) for query in prior_queries)\n sublime_insight = zip(sentient_bodies, sentimental_parts, logical_bits)\n if request.method == \"POST\":\n text_body = request.POST['body']\n url = \"http://text-processing.com/api/sentiment/\"\n payload = {'text': text_body}\n response = requests.request('POST', url, data=payload, headers=None)\n response_dict = json.loads(response.text)\n user_query = request.dbsession.query(User).filter(User.username == request.authenticated_userid).one().id\n sentiment_entry = Sentiments(\n body=text_body,\n negative_sentiment=response_dict['probability']['neg'],\n positive_sentiment=response_dict['probability']['pos'],\n user_id=user_query\n )\n request.dbsession.add(sentiment_entry)\n response_dict['probability']['neg'] = percentage(response_dict['probability']['neg'])\n response_dict['probability']['pos'] = percentage(response_dict['probability']['pos'])\n return {'response_dict': response_dict,\n 'text_body': text_body,\n 'consummate_awareness': sentient_bodies,\n 'conscious whole': sentimental_parts,\n 'divine oneness': logical_bits,\n 'hallowed_provenance': sublime_insight}\n return {'consummate_awareness': sentient_bodies,\n 'conscious whole': sentimental_parts,\n 'divine oneness': logical_bits,\n 'hallowed_provenance': sublime_insight}", "def test_msg_sentiment_analysis(self):\n\n pos_messages = Message.query.filter_by(message_id=101).all()\n analyzed_messages = sentiment_analysis.analyze_messages(pos_messages)\n self.assertTrue(analyzed_messages[0].sentiment.polarity > 0.3)\n\n neg_messages = Message.query.filter_by(message_id=104).all()\n analyzed_messages = sentiment_analysis.analyze_messages(neg_messages)\n self.assertTrue(analyzed_messages[0].sentiment.polarity < -0.3)", "def AnalyzeSentiment(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def analyze_data(df, sentiment_col, tweet_col, path):\n\n # create empty dictionaries to store all encountered words and their frequencies\n all_dict = {}\n pos_dict = {}\n neg_dict = {}\n neu_dict = {}\n # initialize counters to counter total number of tweets based on their emotion\n pos_count = 0\n neg_count = 0\n neu_count = 0\n\n # iterate through each row of the df\n for index, row in df.iterrows():\n if row[sentiment_col] == \"positive\":\n pos_count = iterate_words(\n pos_count, row[tweet_col], all_dict, pos_dict)\n\n if row[sentiment_col] == \"negative\":\n neg_count = iterate_words(\n neg_count, row[tweet_col], all_dict, neg_dict)\n\n if row[sentiment_col] == \"neutral\":\n neu_count = iterate_words(\n neu_count, row[tweet_col], all_dict, neu_dict)\n\n # visualize statistics\n visualize_stats(all_dict, 'all_plot.png', 'all_cloud.png',\n 'Word frequency in all tweets', path)\n visualize_stats(pos_dict, 'pos_plot.png', 'pos_cloud.png',\n 'Word frequency in positive tweets', path)\n visualize_stats(neg_dict, 'neg_plot.png', 'neg_cloud.png',\n 'Word frequency in negative tweets', path)\n visualize_stats(neu_dict, 'neu_plot.png', 'neu_cloud.png',\n 'Word frequency in neutral tweets', path)\n\n # make plot for emotion frequency\n emotions = ('Positive', 'Negative', 'Neutral')\n freq = [pos_count, neg_count, neu_count]\n sns.set_style(\"darkgrid\")\n ax = plt.figure().gca()\n ax.xaxis.grid(False)\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n plt.bar(range(len(emotions)), freq, align='center',\n color=['forestgreen', 'firebrick', 'goldenrod'])\n plt.xticks(range(len(emotions)), emotions)\n plt.title('Tweet frequency based on emotion')\n plt.savefig(path + 'emotion_plot.png')\n plt.close()\n\n # make pie for emotion frequency\n sizes = [pos_count / len(df.index), neg_count /\n len(df.index), neu_count / len(df.index)]\n colors = ['forestgreen', 'firebrick', 'goldenrod']\n plt.pie(sizes, labels=emotions, colors=colors,\n autopct='%1.1f%%', startangle=140)\n plt.title('Tweet frequency based on emotion')\n plt.axis('equal')\n plt.savefig(path + 'emotion_pie.png')\n plt.close()", "def classify(tweets,table,positives,negatives,p_tweets,n_tweets):\n\n\n st = LancasterStemmer()\n\n n_words = len(table)\n in_table = 0\n not_in_table = 0\n\n\n y_pred = np.zeros(len(tweets)).astype('int32')\n\n for i in range(len(tweets)):\n likelihood_pos = 0\n likelihood_neg = 0\n \n # MAP negatives and positives\n for word in tweets[i].split():\n word = st.stem(word.decode('utf-8'))\n if word in table:\n in_table += 1\n likelihood_pos += m.log((table[word][0]+1)/float(positives + 1*n_words))\n likelihood_neg += m.log((table[word][1]+1)/float(negatives + 1*n_words))\n \n else:\n not_in_table += 1\n likelihood_pos += m.log(1/float(positives + 1*n_words))\n likelihood_neg += m.log(1/float(negatives + 1*n_words))\n\n likelihood_pos += m.log(p_tweets/float(p_tweets + n_tweets))\n likelihood_neg += m.log(n_tweets/float(p_tweets + n_tweets))\n\n\n\n # Classify as positive or negative\n if likelihood_neg < likelihood_pos: \n y_pred[i] = 1\n\n prediction = np.bincount(y_pred)\n\n print \"Known words: %d\" % in_table\n print \"Unknown words %d\\n\" % not_in_table\n\n positive_ratio = prediction[1]/float(prediction[1] + prediction[0])\n\n group = \"Positive\" if positive_ratio > 0.5 else \"Negative\" \n\n\n return positive_ratio,group", "def sentiment_plot(self, top_words=25):\n if top_words > 25:\n warnings.warn('Including more than 25 words on the X-axis will cause words to be excluded from the axis')\n\n daily_comments = self.comments[(self.comments['days_after_release'].\\\n isin(list(range(self.day_window[0], self.day_window[1] + 1))))]\n if len(daily_comments) == 0:\n warnings.warn('No comments found for this day, trying future dates until comments are found')\n\n while len(daily_comments) == 0:\n if self.day_window[1] > self.comments['days_after_release'].max():\n raise KeyError('Reached bounds of comment dates available. Make sure all comments are present')\n self.day_window[1] += 1\n daily_comments = self.comments[(self.comments['days_after_release'].\\\n isin(list(range(self.day_window[0], self.day_window[1] + 1))))]\n\n print('Now looking at {} to {} days after release'.format(self.day_window[0], self.day_window[1]))\n\n if 'pos' not in daily_comments['sentiment'].values or 'neu' not in daily_comments['sentiment'].values or \\\n 'neg' not in daily_comments['sentiment'].values:\n warnings.warn('No negative or positive sentiments found on this day, trying future dates until positive or negative comments are found')\n\n while 'pos' not in daily_comments['sentiment'].values or 'neu' not in daily_comments['sentiment'].values or \\\n 'neg' not in daily_comments['sentiment'].values:\n if self.day_window[1] > self.comments['days_after_release'].max():\n raise KeyError('Reached bounds of comment dates available. Make sure all comments are present')\n self.day_window[1] += 1\n daily_comments = self.comments[(self.comments['days_after_release']. \\\n isin(list(range(self.day_window[0], self.day_window[1] + 1))))]\n\n print('Now looking at {} to {} days after release'.format(self.day_window[0], self.day_window[1]))\n\n res_positive = daily_comments[(daily_comments['sentiment']=='pos')]['comment_message'].str.split(expand=True)\\\n .stack().value_counts().to_dict()\n res_neutral = daily_comments[(daily_comments['sentiment']=='neu')]['comment_message'].str.split(expand=True)\\\n .stack().value_counts().to_dict()\n res_negative = daily_comments[daily_comments['sentiment']=='neg']['comment_message'].str.split(expand=True)\\\n .stack().value_counts().to_dict()\n\n fig = make_subplots(rows=3, cols=1,\n y_title='Count',\n subplot_titles=('Positive', 'Neutral', 'Negative'))\n trace = fig.add_trace(px.bar(x=list(res_positive.keys())[:top_words], y=list(res_positive.values())[:top_words]).data[0],\n row=1, col=1)\n fig.append_trace(px.bar(x=list(res_neutral.keys())[:top_words], y=list(res_neutral.values())[:top_words]).data[0],\n row=2, col=1)\n fig.append_trace(px.bar(x=list(res_negative.keys())[:top_words], y=list(res_negative.values())[:top_words]).data[0],\n row=3, col=1)\n\n left = np.where(self.day_window[0] < 0, 'Before', 'After')\n right = np.where(self.day_window[1] < 0, 'Before', 'After')\n fig.update_layout(\n title='Top {} Words at {} Days {} Release to {} Days {} Release'.format(top_words,\n self.day_window[0], left,\n self.day_window[1], right)\n )\n fig.show()", "def stat(data, data_word, data_sent):\n #basic counts\n sent = len(data_sent)\n syll = textstat.syllable_count(data)\n word = len(data_word)\n\n #average calcs\n avg_syll = syll / word\n avg_word = word / sent\n read_time = word/265\n\n #advance stat\n flesch_kincaid_grade = fkg(int(word), int(sent), int(syll))\n verbose = len([word for word in data_word if textstat.syllable_count(word) > 3])\n\n wordy = 0\n for item in data_sent:\n token = word_tokenize(item)\n if len(token) > 40:\n wordy += 1\n #writing to list\n stats = [syll,word,sent,avg_syll,avg_word,read_time,flesch_kincaid_grade, verbose, wordy]\n\n return stats", "def get_feature_set_PC(tweet, sentimentvalues):\n features= {\n 'text_length': np.log(len(tweet.text))\n } #ADD ADDITIONAL FEATURES\n if tweet.nrof_sademoticons>0:\n features['sademoticons'] = tweet.nrof_sademoticons\n if tweet.nrof_happyemoticons>0:\n features['happyemoticons'] = tweet.nrof_happyemoticons\n \n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n features[tag] = features.get(tag, 0) + 1\n if tag in ADJECTIVES:\n features['adjectives'] = features.get(tag, 0) + 1\n elif tag in ADVERBS: \n features['adverbs'] = features.get(tag, 0) + 1\n elif tag in PRONOUNS:\n features['pronoun'] = 1\n except KeyError:\n continue\n for key in features.keys():\n features[key] = features[key]*1.0\n \n #Add lexical features\n # total polarity score, number of positive words, number of negative words\n pos_score = 0\n neg_score = 0\n nrof_pos_words = 0\n nrof_neg_words = 0\n for word in sentimentvalues.keys():\n if sentimentvalues[word][0]>0:\n nrof_pos_words = nrof_pos_words + 1\n pos_score = pos_score + sentimentvalues[word][0]\n if sentimentvalues[word][1]>0:\n nrof_neg_words = nrof_neg_words + 1\n neg_score = neg_score + sentimentvalues[word][1]\n\n if neg_score>0:\n features['neg_score'] = neg_score+1.0\n if pos_score>0:\n features['pos_score'] = pos_score+1.0\n if nrof_pos_words>0:\n features['positive_words'] = nrof_pos_words*1.0\n if nrof_neg_words>0:\n features['negative_words'] = nrof_neg_words*1.0\n \n return features", "def subjectivity():\r\n scores = []\r\n for index, row in topics_data.iterrows():\r\n if index in actual_list:\r\n scores.append(row['score'])\r\n\r\n subs = []\r\n for index, row in topics_data.iterrows():\r\n if index in actual_list:\r\n url = row['url']\r\n if 'newsweek' or 'democracynow' in url:\r\n user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36'\r\n config = Config()\r\n config.browser_user_agent = user_agent\r\n article = Article(url, config=config)\r\n else:\r\n article = Article(url)\r\n article.download()\r\n article.parse()\r\n article.nlp()\r\n text = article.summary\r\n obj = TextBlob(text)\r\n subjectivity = obj.sentiment.subjectivity\r\n subs.append(subjectivity)\r\n\r\n plt.figure(figsize=(50, 10))\r\n plt.scatter(subs, scores)\r\n plt.xlabel('Subjectivity')\r\n plt.ylabel('Score')\r\n plt.title('Posts in r/politics')\r\n plt.show()", "def analyze(self, text):\n #Check each word in text\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n total_score = 0\n #Sum the total score\n for token in tokens:\n token = token.lower()\n if token in self.positives:\n total_score = total_score + 1\n elif token in self.negatives:\n total_score = total_score - 1\n else:\n total_score = total_score + 0\n \n return total_score", "def analyze(self, text):\n #analize every word in the text a value -1, 1 or 0 and calculate total score\n #tokens allow us to split words in single tokens we can initialize tokens like this:\n\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text.lower())\n\n score = 0\n\n if tokens[0] in self.negatives:\n score =- 1\n elif tokens[0] in self.positives:\n score =+ 1\n else:\n score = 0\n\n #print('', text)\n\n return score", "def AnalyzeSentiment(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def AnalyzeSentiment(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')" ]
[ "0.71262354", "0.6855994", "0.67449534", "0.6719744", "0.66272366", "0.6555126", "0.6548427", "0.65435684", "0.6531834", "0.6526213", "0.64024013", "0.63911426", "0.6358385", "0.6352042", "0.6334273", "0.6325912", "0.6321714", "0.6317291", "0.62689185", "0.62674236", "0.6253891", "0.61877346", "0.6172615", "0.6144188", "0.61255646", "0.6122504", "0.6101083", "0.6093699", "0.60681355", "0.60482466", "0.604005", "0.6027582", "0.5995888", "0.59897333", "0.5987245", "0.59784245", "0.5976432", "0.59748775", "0.59471846", "0.59378964", "0.593113", "0.5920002", "0.59148765", "0.5912756", "0.59085447", "0.5892421", "0.588905", "0.5888797", "0.58851093", "0.58820087", "0.5875807", "0.5870617", "0.5858646", "0.5848941", "0.5847757", "0.583608", "0.58332604", "0.58317816", "0.58304673", "0.58194274", "0.5807167", "0.5805803", "0.5795612", "0.57947046", "0.5779564", "0.5771822", "0.576806", "0.576602", "0.5748053", "0.5744892", "0.57405114", "0.5739689", "0.57358974", "0.57276016", "0.57180446", "0.570987", "0.56979024", "0.5675246", "0.56701857", "0.5664505", "0.56623524", "0.5654774", "0.5641793", "0.56310934", "0.56306416", "0.5610858", "0.5607269", "0.5598266", "0.55940074", "0.5593714", "0.55855685", "0.5583452", "0.5574599", "0.55699813", "0.55689645", "0.55654967", "0.5560872", "0.5559381", "0.5559041", "0.5559041" ]
0.60600317
29
_deserialize defines a custom Marshmallow Schema Field that takes in mutlitype input data to applevel objects.
def _deserialize( self, value: Any, attr: str = None, data: Mapping[str, Any] = None, **kwargs ): errors = [] # iterate through the types being passed into UnionField via val_types for field in self.valid_types: try: # inherit deserialize method from Fields class return field.deserialize(value, attr, data, **kwargs) # if error, add error message to error list except ValidationError as error: errors.append(error.messages) raise ValidationError(errors)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _post_deserialize (self):\n pass", "def deserialize(self, data):", "def deserialize(self, data, schema, **kwargs):\n return self.serializer.load(data, schema, **kwargs)", "def SplitDataclassField(default: str):\n\n\n class SplitMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict for a valid split config from the split_registry and\n creates a corresponding JSON schema for external usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n if TYPE in value and value[TYPE] in split_config_registry.data:\n split_class = split_config_registry.data[value[TYPE]]\n try:\n return split_class.get_schema_cls().Schema().load(value)\n except (TypeError, ValidationError) as error:\n raise ValidationError(f'Invalid split params: {value}, see `{split_class}` definition. Error: {error}')\n raise ValidationError(f'Invalid params for splitter: {value}, expected dict with at least a valid `type` attribute.')\n raise ValidationError('Field should be None or dict')\n\n @staticmethod\n def _jsonschema_type_mapping():\n return {'type': 'object', 'properties': {'type': {'type': 'string', 'enum': list(split_config_registry.data.keys()), 'default': default}}, 'title': 'split_options', 'allOf': get_split_conds()}\n try:\n splitter = split_config_registry.data[default]\n load_default = splitter.Schema().load({'type': default})\n dump_default = splitter.Schema().dump({'type': default})\n return field(metadata={'marshmallow_field': SplitMarshmallowField(allow_none=False, dump_default=dump_default, load_default=load_default)}, default_factory=lambda : load_default)\n except Exception as e:\n raise ValidationError(f'Unsupported splitter type: {default}. See split_registry. Details: {e}')", "def DecoderDataclassField(feature_type: str, default: str):\n\n\n class DecoderMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict for a valid decoder config from the decoder_registry\n and creates a corresponding `oneOf` JSON schema for external usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n if TYPE in value and value[TYPE] in get_decoder_classes(feature_type):\n dec = get_decoder_cls(feature_type, value[TYPE])\n try:\n return dec.Schema().load(value)\n except (TypeError, ValidationError) as error:\n raise ValidationError(f'Invalid decoder params: {value}, see `{dec}` definition. Error: {error}')\n raise ValidationError(f'Invalid params for decoder: {value}, expect dict with at least a valid `type` attribute.')\n raise ValidationError('Field should be None or dict')\n\n @staticmethod\n def _jsonschema_type_mapping():\n decoder_classes = list(get_decoder_classes(feature_type).keys())\n return {'type': 'object', 'properties': {'type': {'type': 'string', 'enum': decoder_classes, 'default': default}}, 'title': 'decoder_options', 'allOf': get_decoder_conds(feature_type)}\n try:\n decoder = get_decoder_cls(feature_type, default)\n load_default = decoder.Schema().load({'type': default})\n dump_default = decoder.Schema().dump({'type': default})\n return field(metadata={'marshmallow_field': DecoderMarshmallowField(allow_none=False, dump_default=dump_default, load_default=load_default)}, default_factory=lambda : load_default)\n except Exception as e:\n raise ValidationError(f'Unsupported decoder type: {default}. See decoder_registry. Details: {e}')", "def deserialize(self, value):\n raise NotImplementedError", "def deserialize(self, data):\n return NotImplementedError", "def getDeserializer():", "def deserialize(cls, field_name, field_sig_dict, sig_version):\n field_type = field_sig_dict['field_type']\n field_attrs = {}\n\n for attr in cls._iter_attrs_for_field_type(field_type):\n if hasattr(cls, attr):\n # This is stored on the field signature class itself, so\n # it's not attribute data we want to load.\n continue\n\n alias = cls._ATTRIBUTE_ALIASES.get(attr)\n\n if alias and alias in field_sig_dict:\n value = field_sig_dict[alias]\n elif attr in field_sig_dict:\n value = field_sig_dict[attr]\n else:\n # The signature didn't contain a value for this attribute.\n continue\n\n field_attrs[attr] = value\n\n return cls(field_name=field_name,\n field_type=field_type,\n field_attrs=field_attrs,\n related_model=field_sig_dict.get('related_model'))", "def SplitDataclassField(default: str):\n\n class SplitMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict for a valid split config from the split_registry and\n creates a corresponding JSON schema for external usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n if TYPE in value and value[TYPE] in split_config_registry.data:\n split_class = split_config_registry.data[value[TYPE]]\n try:\n return split_class.get_schema_cls().Schema().load(value)\n except (TypeError, ValidationError) as error:\n raise ValidationError(\n f\"Invalid split params: {value}, see `{split_class}` definition. Error: {error}\"\n )\n raise ValidationError(\n f\"Invalid params for splitter: {value}, expected dict with at least a valid `type` attribute.\"\n )\n raise ValidationError(\"Field should be None or dict\")\n\n @staticmethod\n def _jsonschema_type_mapping():\n return {\n \"type\": \"object\",\n \"properties\": {\n \"type\": {\"type\": \"string\", \"enum\": list(split_config_registry.data.keys()), \"default\": default},\n },\n \"title\": \"split_options\",\n \"allOf\": get_split_conds(),\n }\n\n try:\n splitter = split_config_registry.data[default]\n load_default = splitter.Schema().load({\"type\": default})\n dump_default = splitter.Schema().dump({\"type\": default})\n\n return field(\n metadata={\n \"marshmallow_field\": SplitMarshmallowField(\n allow_none=False,\n dump_default=dump_default,\n load_default=load_default,\n )\n },\n default_factory=lambda: load_default,\n )\n except Exception as e:\n raise ValidationError(f\"Unsupported splitter type: {default}. See split_registry. \" f\"Details: {e}\")", "def unmarshal(self):\n ...", "def deserialize(self, obj):\n raise NotImplementedError", "def DefaultsDataclassField(feature_type: str):\n\n\n class DefaultMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict for a valid defaults config from the feature_registry\n and creates a corresponding JSON schema for external usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n input_feature_class = input_mixin_registry[feature_type]\n output_feature_class = output_mixin_registry.get(feature_type, None)\n try:\n input_schema = input_feature_class.Schema().load(value)\n if output_feature_class:\n output_schema = output_feature_class.Schema().load(value)\n combined = input_schema + output_schema\n else:\n combined = input_schema\n return combined\n except (TypeError, ValidationError) as error:\n raise ValidationError(f'Invalid params: {value}, see `{attr}` definition. Error: {error}')\n raise ValidationError(f'Invalid params: {value}')\n\n @staticmethod\n def _jsonschema_type_mapping():\n input_feature_cls = input_mixin_registry.get(feature_type)\n output_feature_cls = output_mixin_registry.get(feature_type, None)\n input_props = schema_utils.unload_jsonschema_from_marshmallow_class(input_feature_cls)['properties']\n if output_feature_cls:\n output_props = schema_utils.unload_jsonschema_from_marshmallow_class(output_feature_cls)['properties']\n combined_props = {**output_props, **input_props}\n else:\n combined_props = input_props\n return {'type': 'object', 'properties': combined_props, 'additionalProperties': False, 'title': 'defaults_options'}\n try:\n input_cls = input_mixin_registry[feature_type]\n output_cls = output_mixin_registry.get(feature_type, None)\n dump_default = input_cls.Schema().dump({'type': feature_type})\n if output_cls:\n output_dump = output_cls.Schema().dump({'type': feature_type})\n dump_default = {**output_dump, **dump_default}\n load_default = input_cls.Schema().load({'type': feature_type})\n if output_cls:\n output_load = output_cls.Schema().load({'type': feature_type})\n for k in dump_default.keys():\n if getattr(load_default, k, -1) == -1:\n setattr(load_default, k, getattr(output_load, k))\n return field(metadata={'marshmallow_field': DefaultMarshmallowField(allow_none=False, dump_default=dump_default, load_default=load_default)}, default_factory=lambda : load_default)\n except Exception as e:\n raise ValidationError(f'Unsupported feature type: {feature_type}. See input_type_registry. Details: {e}')", "def deserialize(cls, serialized):\n if serialized.WhichOneof(\"layer_data\") == \"argmax_data\":\n return cls()\n return None", "def _deserialize_data(self, schema, datum):\n if schema.type in AvroJsonDeserializer.PRIMITIVE_CONVERTERS:\n return datum\n\n if schema.type in AvroJsonDeserializer.COMPLEX_CONVERTERS:\n return self.COMPLEX_CONVERTERS[schema.type](self, schema, datum)\n\n raise avro.schema.AvroException(\"Unknown type: %s\" % schema.type)", "def _get_marshmallow_field_cls(self):\n return self.MARSHMALLOW_FIELD_CLS", "def _deserialize_data(self):\n try:\n self._func_name, self._instance, self._args, self._kwargs = self.serializer.loads(self.data)\n except Exception as e:\n raise DeserializationError() from e", "def _decode_field(self, field_type, field_data, subcontent=None):\n # check wire type\n wt_schema = self.__class__.FIELD_WIRE_TYPE[field_type]\n wt_data = field_data['wire_type']\n if wt_schema != wt_data:\n raise TypeError(\n 'Wire type mismatch (expect {0} but got {1})'\\\n .format(wt_schema, wt_data)\n )\n\n field_decoded = None\n\n # the actual decoding process\n # nested structure\n if field_type == 'a' and subcontent:\n self.logger.debug('_decode_field(): nested field begin')\n if self._kv_fmt:\n field_decoded = dict(self._decode_wire(\n io.BytesIO(field_data['data']),\n subcontent\n ))\n else:\n field_decoded = tuple(self._decode_wire(\n io.BytesIO(field_data['data']),\n subcontent\n ))\n self.logger.debug('_decode_field(): nested field end')\n\n # string, unsigned vint (2sc)\n elif field_type in 'aT':\n field_decoded = field_data['data']\n\n # unicode\n elif field_type in 'U':\n field_decoded = field_data['data'].decode('utf-8')\n\n # vint (zigzag)\n elif field_type == 'z':\n field_decoded = self._vint_dezigzagify(field_data['data'])\n\n # signed 2sc\n elif field_type == 't':\n field_decoded = self._vint_2sctosigned(field_data['data'])\n\n # fixed, float, double\n elif field_type in 'iIfdqQ':\n field_decoded = struct.unpack(\n '<{0}'.format(field_type), field_data['data']\n )[0]\n\n # boolean\n elif field_type == 'b':\n if field_data['data'] == 0:\n field_decoded = False\n else:\n field_decoded = True\n\n return field_decoded", "def _deserialize(self, handle):\n raise NotImplementedError", "def deserialize_model(data, klass):\n instance = klass()\n\n if not instance.swagger_types:\n return data\n\n for attr, attr_type in iteritems(instance.swagger_types):\n if data is not None \\\n and instance.attribute_map[attr] in data \\\n and isinstance(data, (list, dict)):\n value = data[instance.attribute_map[attr]]\n setattr(instance, attr, _deserialize(value, attr_type))\n\n return instance", "def deserialize_object(d):\n pass", "def deserialize(cls, data):\r\n dtype = data.get('_type')\r\n if dtype == 'vertex':\r\n vertex_type = data['element_type']\r\n if vertex_type not in vertex_types:\r\n raise ElementDefinitionException('Vertex \"{}\" not defined'.format(vertex_type))\r\n translated_data = vertex_types[vertex_type].translate_db_fields(data)\r\n return vertex_types[vertex_type](**translated_data)\r\n elif dtype == 'edge':\r\n edge_type = data['_label']\r\n if edge_type not in edge_types:\r\n raise ElementDefinitionException('Edge \"{}\" not defined'.format(edge_type))\r\n translated_data = edge_types[edge_type].translate_db_fields(data)\r\n return edge_types[edge_type](data['_outV'], data['_inV'], **translated_data)\r\n else:\r\n raise TypeError(\"Can't deserialize '{}'\".format(dtype))", "def deserialize(self, source):\n attrs = {}\n for k, v in source.iteritems():\n try:\n attrs[k] = self.deserialize_field(source, k)\n except (AttributeError, FieldDoesNotExist):\n # m2m, abstract\n pass\n\n return self.instanciate(attrs)\n # TODO: we can assign m2ms now", "def deserialize(self, data):\n if not data:\n return None\n vallist = data.split(self.SPLIT)\n return self.decode(vallist)", "def _decode_struct(\n data_type, obj, alias_validators, strict, old_style, for_msgpack):\n if obj is None and data_type.has_default():\n return data_type.get_default()\n elif not isinstance(obj, dict):\n raise bv.ValidationError('expected object, got %s' %\n bv.generic_type_name(obj))\n if strict:\n for key in obj:\n if (key not in data_type.definition._all_field_names_ and\n not key.startswith('.tag')):\n raise bv.ValidationError(\"unknown field '%s'\" % key)\n ins = data_type.definition()\n _decode_struct_fields(\n ins, data_type.definition._all_fields_, obj, alias_validators, strict,\n old_style, for_msgpack)\n # Check that all required fields have been set.\n data_type.validate_fields_only(ins)\n return ins", "def _deserialize(self, data):\n uri = data[1:-1]\n # We have to retrieve the type to rebuild the object\n attr = self.__dict__['field']\n # Be careful when orig = None !!!!!\n orig = getattr(attr.model, attr.name)\n if None == orig:\n return rdfSubject(rdflib.term.URIRef(uri))\n elif isinstance(orig, list):\n # rdfalchemy mapper gives me the solution\n rt = attr.model.__class__.__dict__[attr.name].range_type\n from rdfalchemy.orm import mapper\n alch_map = mapper()\n try:\n cls = alch_map[str(rt)]\n return cls(rdflib.term.URIRef(uri))\n except:\n rdfSubject(rdflib.term.URIRef(uri))\n else:\n return type(orig)(rdflib.term.URIRef(uri))", "def _deserialize(cls, *args):\n return cls(*args)", "def SchedulerDataclassField(default={'type': 'fifo'}, description='Hyperopt scheduler settings.'):\n\n\n class SchedulerMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict to a valid scheduler from\n `ludwig.schema.hyperopt.scheduler_registry` and creates a corresponding `oneOf` JSON schema for external\n usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n if 'type' in value and value['type'] in scheduler_config_registry:\n scheduler_config_cls = scheduler_config_registry[value['type'].lower()]\n try:\n return scheduler_config_cls.Schema().load(value)\n except (TypeError, ValidationError) as e:\n raise ValidationError(f'Invalid params for scheduler: {value}, see `{opt}` definition. Error: {e}')\n raise ValidationError(f'Invalid params for scheduler: {value}, expect dict with at least a valid `type` attribute.')\n raise ValidationError('Field should be None or dict')\n\n @staticmethod\n def _jsonschema_type_mapping():\n return {'type': 'object', 'properties': {'type': {'type': 'string', 'enum': list(scheduler_config_registry.keys()), 'default': default['type'], 'description': 'The type of scheduler to use during hyperopt'}}, 'title': 'scheduler_options', 'allOf': get_scheduler_conds(), 'required': ['type'], 'description': description}\n if not isinstance(default, dict) or 'type' not in default or default['type'] not in scheduler_config_registry:\n raise ValidationError(f'Invalid default: `{default}`')\n try:\n opt = scheduler_config_registry[default['type'].lower()]\n load_default = opt.Schema().load(default)\n dump_default = opt.Schema().dump(default)\n return field(metadata={'marshmallow_field': SchedulerMarshmallowField(allow_none=False, dump_default=dump_default, load_default=load_default, metadata={'description': description})}, default_factory=lambda : load_default)\n except Exception as e:\n raise ValidationError(f\"Unsupported scheduler type: {default['type']}. See scheduler_config_registry. Details: {e}\")", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .app_list_item import AppListItem\n from .app_list_type import AppListType\n from .device_configuration import DeviceConfiguration\n from .required_password_type import RequiredPasswordType\n\n from .app_list_item import AppListItem\n from .app_list_type import AppListType\n from .device_configuration import DeviceConfiguration\n from .required_password_type import RequiredPasswordType\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"compliantAppListType\": lambda n : setattr(self, 'compliant_app_list_type', n.get_enum_value(AppListType)),\n \"compliantAppsList\": lambda n : setattr(self, 'compliant_apps_list', n.get_collection_of_object_values(AppListItem)),\n \"emailInDomainSuffixes\": lambda n : setattr(self, 'email_in_domain_suffixes', n.get_collection_of_primitive_values(str)),\n \"passwordBlockSimple\": lambda n : setattr(self, 'password_block_simple', n.get_bool_value()),\n \"passwordExpirationDays\": lambda n : setattr(self, 'password_expiration_days', n.get_int_value()),\n \"passwordMinimumCharacterSetCount\": lambda n : setattr(self, 'password_minimum_character_set_count', n.get_int_value()),\n \"passwordMinimumLength\": lambda n : setattr(self, 'password_minimum_length', n.get_int_value()),\n \"passwordMinutesOfInactivityBeforeLock\": lambda n : setattr(self, 'password_minutes_of_inactivity_before_lock', n.get_int_value()),\n \"passwordMinutesOfInactivityBeforeScreenTimeout\": lambda n : setattr(self, 'password_minutes_of_inactivity_before_screen_timeout', n.get_int_value()),\n \"passwordPreviousPasswordBlockCount\": lambda n : setattr(self, 'password_previous_password_block_count', n.get_int_value()),\n \"passwordRequired\": lambda n : setattr(self, 'password_required', n.get_bool_value()),\n \"passwordRequiredType\": lambda n : setattr(self, 'password_required_type', n.get_enum_value(RequiredPasswordType)),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def deserialize_bytes(self, serialized_bytes):\n # Do a poor-man's evolvable schema implementation. First try to\n # deserialize with the reader schema, and if that fails do it with\n # the writer schema. If deserialization fails with the writer schema,\n # let the exception propogate back to the caller.\n try:\n result = self._deserialize_bytes(serialized_bytes, self._quickavro_decoder)\n return result\n except SchemaResolutionException as e:\n result = self._deserialize_bytes(serialized_bytes, self._quickavro_writer_decoder)\n return result", "def _deserialize_object(value):\n return value", "def deserialize_request(self, serialized_request):\n raise NotImplementedError()", "def deserialize(cls, field_name, field_sig_dict, sig_version,\n database=DEFAULT_DB_ALIAS):\n validate_sig_version(sig_version)\n\n if sig_version == 2:\n field_sig_attrs = field_sig_dict.get('attrs', {})\n\n # Load the class for the referenced field type.\n field_type_module, field_type_name = \\\n field_sig_dict['type'].rsplit('.', 1)\n\n # If we have a field path in the signature that lives in\n # django.db.models.fields, update it to look in django.db.models\n # instead. This is for compatibility across all Django versions.\n if field_type_module.startswith('django.db.models.fields'):\n field_type_module = 'django.db.models'\n\n try:\n field_type = getattr(import_module(field_type_module),\n field_type_name)\n except (AttributeError, ImportError):\n raise ImportError('Unable to locate field type %s'\n % '%s.%s' % (field_type_module,\n field_type_name))\n elif sig_version == 1:\n field_sig_attrs = field_sig_dict\n field_type = field_sig_dict['field_type']\n\n field_attrs = {}\n\n for attr in cls._iter_attrs_for_field_type(field_type):\n if hasattr(cls, attr):\n # This is stored on the field signature class itself, so\n # it's not attribute data we want to load.\n continue\n\n alias = cls._ATTRIBUTE_ALIASES.get(attr)\n\n if alias and alias in field_sig_attrs:\n value = field_sig_attrs[alias]\n elif attr in field_sig_attrs:\n value = field_sig_attrs[attr]\n else:\n # The signature didn't contain a value for this attribute.\n continue\n\n field_attrs[attr] = value\n\n return cls(field_name=field_name,\n field_type=field_type,\n field_attrs=field_attrs,\n related_model=field_sig_dict.get('related_model'))", "def deserialize(self, value):\n if value == 'auto':\n return Recollection\n else:\n return self._klass.deserialize(value)", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .app_list_item import AppListItem\n from .app_list_type import AppListType\n from .device_configuration import DeviceConfiguration\n from .ios_network_usage_rule import IosNetworkUsageRule\n from .media_content_rating_australia import MediaContentRatingAustralia\n from .media_content_rating_canada import MediaContentRatingCanada\n from .media_content_rating_france import MediaContentRatingFrance\n from .media_content_rating_germany import MediaContentRatingGermany\n from .media_content_rating_ireland import MediaContentRatingIreland\n from .media_content_rating_japan import MediaContentRatingJapan\n from .media_content_rating_new_zealand import MediaContentRatingNewZealand\n from .media_content_rating_united_kingdom import MediaContentRatingUnitedKingdom\n from .media_content_rating_united_states import MediaContentRatingUnitedStates\n from .rating_apps_type import RatingAppsType\n from .required_password_type import RequiredPasswordType\n from .web_browser_cookie_settings import WebBrowserCookieSettings\n\n from .app_list_item import AppListItem\n from .app_list_type import AppListType\n from .device_configuration import DeviceConfiguration\n from .ios_network_usage_rule import IosNetworkUsageRule\n from .media_content_rating_australia import MediaContentRatingAustralia\n from .media_content_rating_canada import MediaContentRatingCanada\n from .media_content_rating_france import MediaContentRatingFrance\n from .media_content_rating_germany import MediaContentRatingGermany\n from .media_content_rating_ireland import MediaContentRatingIreland\n from .media_content_rating_japan import MediaContentRatingJapan\n from .media_content_rating_new_zealand import MediaContentRatingNewZealand\n from .media_content_rating_united_kingdom import MediaContentRatingUnitedKingdom\n from .media_content_rating_united_states import MediaContentRatingUnitedStates\n from .rating_apps_type import RatingAppsType\n from .required_password_type import RequiredPasswordType\n from .web_browser_cookie_settings import WebBrowserCookieSettings\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"accountBlockModification\": lambda n : setattr(self, 'account_block_modification', n.get_bool_value()),\n \"activationLockAllowWhenSupervised\": lambda n : setattr(self, 'activation_lock_allow_when_supervised', n.get_bool_value()),\n \"airDropBlocked\": lambda n : setattr(self, 'air_drop_blocked', n.get_bool_value()),\n \"airDropForceUnmanagedDropTarget\": lambda n : setattr(self, 'air_drop_force_unmanaged_drop_target', n.get_bool_value()),\n \"airPlayForcePairingPasswordForOutgoingRequests\": lambda n : setattr(self, 'air_play_force_pairing_password_for_outgoing_requests', n.get_bool_value()),\n \"appStoreBlockAutomaticDownloads\": lambda n : setattr(self, 'app_store_block_automatic_downloads', n.get_bool_value()),\n \"appStoreBlockInAppPurchases\": lambda n : setattr(self, 'app_store_block_in_app_purchases', n.get_bool_value()),\n \"appStoreBlockUIAppInstallation\": lambda n : setattr(self, 'app_store_block_u_i_app_installation', n.get_bool_value()),\n \"appStoreBlocked\": lambda n : setattr(self, 'app_store_blocked', n.get_bool_value()),\n \"appStoreRequirePassword\": lambda n : setattr(self, 'app_store_require_password', n.get_bool_value()),\n \"appleNewsBlocked\": lambda n : setattr(self, 'apple_news_blocked', n.get_bool_value()),\n \"appleWatchBlockPairing\": lambda n : setattr(self, 'apple_watch_block_pairing', n.get_bool_value()),\n \"appleWatchForceWristDetection\": lambda n : setattr(self, 'apple_watch_force_wrist_detection', n.get_bool_value()),\n \"appsSingleAppModeList\": lambda n : setattr(self, 'apps_single_app_mode_list', n.get_collection_of_object_values(AppListItem)),\n \"appsVisibilityList\": lambda n : setattr(self, 'apps_visibility_list', n.get_collection_of_object_values(AppListItem)),\n \"appsVisibilityListType\": lambda n : setattr(self, 'apps_visibility_list_type', n.get_enum_value(AppListType)),\n \"bluetoothBlockModification\": lambda n : setattr(self, 'bluetooth_block_modification', n.get_bool_value()),\n \"cameraBlocked\": lambda n : setattr(self, 'camera_blocked', n.get_bool_value()),\n \"cellularBlockDataRoaming\": lambda n : setattr(self, 'cellular_block_data_roaming', n.get_bool_value()),\n \"cellularBlockGlobalBackgroundFetchWhileRoaming\": lambda n : setattr(self, 'cellular_block_global_background_fetch_while_roaming', n.get_bool_value()),\n \"cellularBlockPerAppDataModification\": lambda n : setattr(self, 'cellular_block_per_app_data_modification', n.get_bool_value()),\n \"cellularBlockPersonalHotspot\": lambda n : setattr(self, 'cellular_block_personal_hotspot', n.get_bool_value()),\n \"cellularBlockVoiceRoaming\": lambda n : setattr(self, 'cellular_block_voice_roaming', n.get_bool_value()),\n \"certificatesBlockUntrustedTlsCertificates\": lambda n : setattr(self, 'certificates_block_untrusted_tls_certificates', n.get_bool_value()),\n \"classroomAppBlockRemoteScreenObservation\": lambda n : setattr(self, 'classroom_app_block_remote_screen_observation', n.get_bool_value()),\n \"classroomAppForceUnpromptedScreenObservation\": lambda n : setattr(self, 'classroom_app_force_unprompted_screen_observation', n.get_bool_value()),\n \"compliantAppListType\": lambda n : setattr(self, 'compliant_app_list_type', n.get_enum_value(AppListType)),\n \"compliantAppsList\": lambda n : setattr(self, 'compliant_apps_list', n.get_collection_of_object_values(AppListItem)),\n \"configurationProfileBlockChanges\": lambda n : setattr(self, 'configuration_profile_block_changes', n.get_bool_value()),\n \"definitionLookupBlocked\": lambda n : setattr(self, 'definition_lookup_blocked', n.get_bool_value()),\n \"deviceBlockEnableRestrictions\": lambda n : setattr(self, 'device_block_enable_restrictions', n.get_bool_value()),\n \"deviceBlockEraseContentAndSettings\": lambda n : setattr(self, 'device_block_erase_content_and_settings', n.get_bool_value()),\n \"deviceBlockNameModification\": lambda n : setattr(self, 'device_block_name_modification', n.get_bool_value()),\n \"diagnosticDataBlockSubmission\": lambda n : setattr(self, 'diagnostic_data_block_submission', n.get_bool_value()),\n \"diagnosticDataBlockSubmissionModification\": lambda n : setattr(self, 'diagnostic_data_block_submission_modification', n.get_bool_value()),\n \"documentsBlockManagedDocumentsInUnmanagedApps\": lambda n : setattr(self, 'documents_block_managed_documents_in_unmanaged_apps', n.get_bool_value()),\n \"documentsBlockUnmanagedDocumentsInManagedApps\": lambda n : setattr(self, 'documents_block_unmanaged_documents_in_managed_apps', n.get_bool_value()),\n \"emailInDomainSuffixes\": lambda n : setattr(self, 'email_in_domain_suffixes', n.get_collection_of_primitive_values(str)),\n \"enterpriseAppBlockTrust\": lambda n : setattr(self, 'enterprise_app_block_trust', n.get_bool_value()),\n \"enterpriseAppBlockTrustModification\": lambda n : setattr(self, 'enterprise_app_block_trust_modification', n.get_bool_value()),\n \"faceTimeBlocked\": lambda n : setattr(self, 'face_time_blocked', n.get_bool_value()),\n \"findMyFriendsBlocked\": lambda n : setattr(self, 'find_my_friends_blocked', n.get_bool_value()),\n \"gameCenterBlocked\": lambda n : setattr(self, 'game_center_blocked', n.get_bool_value()),\n \"gamingBlockGameCenterFriends\": lambda n : setattr(self, 'gaming_block_game_center_friends', n.get_bool_value()),\n \"gamingBlockMultiplayer\": lambda n : setattr(self, 'gaming_block_multiplayer', n.get_bool_value()),\n \"hostPairingBlocked\": lambda n : setattr(self, 'host_pairing_blocked', n.get_bool_value()),\n \"iBooksStoreBlockErotica\": lambda n : setattr(self, 'i_books_store_block_erotica', n.get_bool_value()),\n \"iBooksStoreBlocked\": lambda n : setattr(self, 'i_books_store_blocked', n.get_bool_value()),\n \"iCloudBlockActivityContinuation\": lambda n : setattr(self, 'i_cloud_block_activity_continuation', n.get_bool_value()),\n \"iCloudBlockBackup\": lambda n : setattr(self, 'i_cloud_block_backup', n.get_bool_value()),\n \"iCloudBlockDocumentSync\": lambda n : setattr(self, 'i_cloud_block_document_sync', n.get_bool_value()),\n \"iCloudBlockManagedAppsSync\": lambda n : setattr(self, 'i_cloud_block_managed_apps_sync', n.get_bool_value()),\n \"iCloudBlockPhotoLibrary\": lambda n : setattr(self, 'i_cloud_block_photo_library', n.get_bool_value()),\n \"iCloudBlockPhotoStreamSync\": lambda n : setattr(self, 'i_cloud_block_photo_stream_sync', n.get_bool_value()),\n \"iCloudBlockSharedPhotoStream\": lambda n : setattr(self, 'i_cloud_block_shared_photo_stream', n.get_bool_value()),\n \"iCloudRequireEncryptedBackup\": lambda n : setattr(self, 'i_cloud_require_encrypted_backup', n.get_bool_value()),\n \"iTunesBlockExplicitContent\": lambda n : setattr(self, 'i_tunes_block_explicit_content', n.get_bool_value()),\n \"iTunesBlockMusicService\": lambda n : setattr(self, 'i_tunes_block_music_service', n.get_bool_value()),\n \"iTunesBlockRadio\": lambda n : setattr(self, 'i_tunes_block_radio', n.get_bool_value()),\n \"keyboardBlockAutoCorrect\": lambda n : setattr(self, 'keyboard_block_auto_correct', n.get_bool_value()),\n \"keyboardBlockDictation\": lambda n : setattr(self, 'keyboard_block_dictation', n.get_bool_value()),\n \"keyboardBlockPredictive\": lambda n : setattr(self, 'keyboard_block_predictive', n.get_bool_value()),\n \"keyboardBlockShortcuts\": lambda n : setattr(self, 'keyboard_block_shortcuts', n.get_bool_value()),\n \"keyboardBlockSpellCheck\": lambda n : setattr(self, 'keyboard_block_spell_check', n.get_bool_value()),\n \"kioskModeAllowAssistiveSpeak\": lambda n : setattr(self, 'kiosk_mode_allow_assistive_speak', n.get_bool_value()),\n \"kioskModeAllowAssistiveTouchSettings\": lambda n : setattr(self, 'kiosk_mode_allow_assistive_touch_settings', n.get_bool_value()),\n \"kioskModeAllowAutoLock\": lambda n : setattr(self, 'kiosk_mode_allow_auto_lock', n.get_bool_value()),\n \"kioskModeAllowColorInversionSettings\": lambda n : setattr(self, 'kiosk_mode_allow_color_inversion_settings', n.get_bool_value()),\n \"kioskModeAllowRingerSwitch\": lambda n : setattr(self, 'kiosk_mode_allow_ringer_switch', n.get_bool_value()),\n \"kioskModeAllowScreenRotation\": lambda n : setattr(self, 'kiosk_mode_allow_screen_rotation', n.get_bool_value()),\n \"kioskModeAllowSleepButton\": lambda n : setattr(self, 'kiosk_mode_allow_sleep_button', n.get_bool_value()),\n \"kioskModeAllowTouchscreen\": lambda n : setattr(self, 'kiosk_mode_allow_touchscreen', n.get_bool_value()),\n \"kioskModeAllowVoiceOverSettings\": lambda n : setattr(self, 'kiosk_mode_allow_voice_over_settings', n.get_bool_value()),\n \"kioskModeAllowVolumeButtons\": lambda n : setattr(self, 'kiosk_mode_allow_volume_buttons', n.get_bool_value()),\n \"kioskModeAllowZoomSettings\": lambda n : setattr(self, 'kiosk_mode_allow_zoom_settings', n.get_bool_value()),\n \"kioskModeAppStoreUrl\": lambda n : setattr(self, 'kiosk_mode_app_store_url', n.get_str_value()),\n \"kioskModeBuiltInAppId\": lambda n : setattr(self, 'kiosk_mode_built_in_app_id', n.get_str_value()),\n \"kioskModeManagedAppId\": lambda n : setattr(self, 'kiosk_mode_managed_app_id', n.get_str_value()),\n \"kioskModeRequireAssistiveTouch\": lambda n : setattr(self, 'kiosk_mode_require_assistive_touch', n.get_bool_value()),\n \"kioskModeRequireColorInversion\": lambda n : setattr(self, 'kiosk_mode_require_color_inversion', n.get_bool_value()),\n \"kioskModeRequireMonoAudio\": lambda n : setattr(self, 'kiosk_mode_require_mono_audio', n.get_bool_value()),\n \"kioskModeRequireVoiceOver\": lambda n : setattr(self, 'kiosk_mode_require_voice_over', n.get_bool_value()),\n \"kioskModeRequireZoom\": lambda n : setattr(self, 'kiosk_mode_require_zoom', n.get_bool_value()),\n \"lockScreenBlockControlCenter\": lambda n : setattr(self, 'lock_screen_block_control_center', n.get_bool_value()),\n \"lockScreenBlockNotificationView\": lambda n : setattr(self, 'lock_screen_block_notification_view', n.get_bool_value()),\n \"lockScreenBlockPassbook\": lambda n : setattr(self, 'lock_screen_block_passbook', n.get_bool_value()),\n \"lockScreenBlockTodayView\": lambda n : setattr(self, 'lock_screen_block_today_view', n.get_bool_value()),\n \"mediaContentRatingApps\": lambda n : setattr(self, 'media_content_rating_apps', n.get_enum_value(RatingAppsType)),\n \"mediaContentRatingAustralia\": lambda n : setattr(self, 'media_content_rating_australia', n.get_object_value(MediaContentRatingAustralia)),\n \"mediaContentRatingCanada\": lambda n : setattr(self, 'media_content_rating_canada', n.get_object_value(MediaContentRatingCanada)),\n \"mediaContentRatingFrance\": lambda n : setattr(self, 'media_content_rating_france', n.get_object_value(MediaContentRatingFrance)),\n \"mediaContentRatingGermany\": lambda n : setattr(self, 'media_content_rating_germany', n.get_object_value(MediaContentRatingGermany)),\n \"mediaContentRatingIreland\": lambda n : setattr(self, 'media_content_rating_ireland', n.get_object_value(MediaContentRatingIreland)),\n \"mediaContentRatingJapan\": lambda n : setattr(self, 'media_content_rating_japan', n.get_object_value(MediaContentRatingJapan)),\n \"mediaContentRatingNewZealand\": lambda n : setattr(self, 'media_content_rating_new_zealand', n.get_object_value(MediaContentRatingNewZealand)),\n \"mediaContentRatingUnitedKingdom\": lambda n : setattr(self, 'media_content_rating_united_kingdom', n.get_object_value(MediaContentRatingUnitedKingdom)),\n \"mediaContentRatingUnitedStates\": lambda n : setattr(self, 'media_content_rating_united_states', n.get_object_value(MediaContentRatingUnitedStates)),\n \"messagesBlocked\": lambda n : setattr(self, 'messages_blocked', n.get_bool_value()),\n \"networkUsageRules\": lambda n : setattr(self, 'network_usage_rules', n.get_collection_of_object_values(IosNetworkUsageRule)),\n \"notificationsBlockSettingsModification\": lambda n : setattr(self, 'notifications_block_settings_modification', n.get_bool_value()),\n \"passcodeBlockFingerprintModification\": lambda n : setattr(self, 'passcode_block_fingerprint_modification', n.get_bool_value()),\n \"passcodeBlockFingerprintUnlock\": lambda n : setattr(self, 'passcode_block_fingerprint_unlock', n.get_bool_value()),\n \"passcodeBlockModification\": lambda n : setattr(self, 'passcode_block_modification', n.get_bool_value()),\n \"passcodeBlockSimple\": lambda n : setattr(self, 'passcode_block_simple', n.get_bool_value()),\n \"passcodeExpirationDays\": lambda n : setattr(self, 'passcode_expiration_days', n.get_int_value()),\n \"passcodeMinimumCharacterSetCount\": lambda n : setattr(self, 'passcode_minimum_character_set_count', n.get_int_value()),\n \"passcodeMinimumLength\": lambda n : setattr(self, 'passcode_minimum_length', n.get_int_value()),\n \"passcodeMinutesOfInactivityBeforeLock\": lambda n : setattr(self, 'passcode_minutes_of_inactivity_before_lock', n.get_int_value()),\n \"passcodeMinutesOfInactivityBeforeScreenTimeout\": lambda n : setattr(self, 'passcode_minutes_of_inactivity_before_screen_timeout', n.get_int_value()),\n \"passcodePreviousPasscodeBlockCount\": lambda n : setattr(self, 'passcode_previous_passcode_block_count', n.get_int_value()),\n \"passcodeRequired\": lambda n : setattr(self, 'passcode_required', n.get_bool_value()),\n \"passcodeRequiredType\": lambda n : setattr(self, 'passcode_required_type', n.get_enum_value(RequiredPasswordType)),\n \"passcodeSignInFailureCountBeforeWipe\": lambda n : setattr(self, 'passcode_sign_in_failure_count_before_wipe', n.get_int_value()),\n \"podcastsBlocked\": lambda n : setattr(self, 'podcasts_blocked', n.get_bool_value()),\n \"safariBlockAutofill\": lambda n : setattr(self, 'safari_block_autofill', n.get_bool_value()),\n \"safariBlockJavaScript\": lambda n : setattr(self, 'safari_block_java_script', n.get_bool_value()),\n \"safariBlockPopups\": lambda n : setattr(self, 'safari_block_popups', n.get_bool_value()),\n \"safariBlocked\": lambda n : setattr(self, 'safari_blocked', n.get_bool_value()),\n \"safariCookieSettings\": lambda n : setattr(self, 'safari_cookie_settings', n.get_enum_value(WebBrowserCookieSettings)),\n \"safariManagedDomains\": lambda n : setattr(self, 'safari_managed_domains', n.get_collection_of_primitive_values(str)),\n \"safariPasswordAutoFillDomains\": lambda n : setattr(self, 'safari_password_auto_fill_domains', n.get_collection_of_primitive_values(str)),\n \"safariRequireFraudWarning\": lambda n : setattr(self, 'safari_require_fraud_warning', n.get_bool_value()),\n \"screenCaptureBlocked\": lambda n : setattr(self, 'screen_capture_blocked', n.get_bool_value()),\n \"siriBlockUserGeneratedContent\": lambda n : setattr(self, 'siri_block_user_generated_content', n.get_bool_value()),\n \"siriBlocked\": lambda n : setattr(self, 'siri_blocked', n.get_bool_value()),\n \"siriBlockedWhenLocked\": lambda n : setattr(self, 'siri_blocked_when_locked', n.get_bool_value()),\n \"siriRequireProfanityFilter\": lambda n : setattr(self, 'siri_require_profanity_filter', n.get_bool_value()),\n \"spotlightBlockInternetResults\": lambda n : setattr(self, 'spotlight_block_internet_results', n.get_bool_value()),\n \"voiceDialingBlocked\": lambda n : setattr(self, 'voice_dialing_blocked', n.get_bool_value()),\n \"wallpaperBlockModification\": lambda n : setattr(self, 'wallpaper_block_modification', n.get_bool_value()),\n \"wiFiConnectOnlyToConfiguredNetworks\": lambda n : setattr(self, 'wi_fi_connect_only_to_configured_networks', n.get_bool_value()),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def deserialize_from_deconstructed(cls, type_cls, args, kwargs):\n raise NotImplementedError", "def deserialize(self, str):\n try:\n end = 0\n start = end\n end += 1\n (self.type,) = _struct_B.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model = str[start:end].decode('utf-8')\n else:\n self.model = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.head_version = str[start:end].decode('utf-8')\n else:\n self.head_version = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.body_version = str[start:end].decode('utf-8')\n else:\n self.body_version = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.arm_version = str[start:end].decode('utf-8')\n else:\n self.arm_version = str[start:end]\n _x = self\n start = end\n end += 14\n (_x.has_laser, _x.has_extended_arms, _x.number_of_legs, _x.number_of_arms, _x.number_of_hands,) = _struct_2B3i.unpack(str[start:end])\n self.has_laser = bool(self.has_laser)\n self.has_extended_arms = bool(self.has_extended_arms)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def PreprocessingDataclassField(feature_type: str):\n\n\n class PreprocessingMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict for a valid preprocessing config from the\n preprocessing_registry and creates a corresponding JSON schema for external usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n if feature_type in preprocessing_registry:\n pre = preprocessing_registry[feature_type]\n try:\n return pre.Schema().load(value)\n except (TypeError, ValidationError) as error:\n raise ValidationError(f'Invalid preprocessing params: {value}, see `{pre}` definition. Error: {error}')\n raise ValidationError(f'Invalid params for preprocessor: {value}, expect dict with at least a valid `type` attribute.')\n raise ValidationError('Field should be None or dict')\n\n @staticmethod\n def _jsonschema_type_mapping():\n preprocessor_cls = preprocessing_registry[feature_type]\n props = schema_utils.unload_jsonschema_from_marshmallow_class(preprocessor_cls)['properties']\n return {'type': 'object', 'properties': props, 'title': 'preprocessing_options', 'additionalProperties': True}\n try:\n preprocessor = preprocessing_registry[feature_type]\n load_default = preprocessor.Schema().load({'feature_type': feature_type})\n dump_default = preprocessor.Schema().dump({'feature_type': feature_type})\n return field(metadata={'marshmallow_field': PreprocessingMarshmallowField(allow_none=False, dump_default=dump_default, load_default=load_default)}, default_factory=lambda : load_default)\n except Exception as e:\n raise ValidationError(f'Unsupported preprocessing type: {feature_type}. See preprocessing_registry. Details: {e}')", "def deserialize(self, data):\n self.data_vals = iter(data.split())\n return self.decode()", "def deserialize(self, blob):\n pass", "def marshmallow(self, formencode_schema, in_='formData'):\n return self.schema(ext.dump_marshmallow(formencode_schema), in_=in_)", "def deserialize(self, instream):\n\n raise Exception(\"Not implemented!\"+self.__class__)", "def deserialize(self, data):\n self.vals = iter(data.split()) ### split() convert string to list's iterator\n return self.decode()", "def get_marshmallow_from_dataclass_field(dfield):\n return dfield.metadata[\"marshmallow_field\"]", "def deserialize(self, data):\n payload = self._unpack(data)\n return decode(payload['body'], content_type=payload['content_type'],\n content_encoding=payload['content_encoding'], force=True)", "def deserialize(self, data):\n root_data = data[\"data_root\"]\n\n self._data_root.deserialize(root_data)\n self.set_model_index(data[\"model_index\"])\n\n return True", "def restore_after_serialize(self):\n self.on_deserialize()", "def deserialize_response(self, serialized_response):\n raise NotImplementedError()", "def EncoderDataclassField(feature_type: str, default: str):\n\n\n class EncoderMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict for a valid encoder config from the encoder_registry\n and creates a corresponding `oneOf` JSON schema for external usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n if TYPE in value and value[TYPE] in get_encoder_classes(feature_type):\n enc = get_encoder_cls(feature_type, value[TYPE])\n try:\n return enc.Schema().load(value)\n except (TypeError, ValidationError) as error:\n raise ValidationError(f'Invalid encoder params: {value}, see `{enc}` definition. Error: {error}')\n raise ValidationError(f'Invalid params for encoder: {value}, expect dict with at least a valid `type` attribute.')\n raise ValidationError('Field should be None or dict')\n\n @staticmethod\n def _jsonschema_type_mapping():\n encoder_classes = list(get_encoder_classes(feature_type).keys())\n return {'type': 'object', 'properties': {'type': {'type': 'string', 'enum': encoder_classes, 'default': default}}, 'title': 'encoder_options', 'allOf': get_encoder_conds(feature_type)}\n try:\n encoder = get_encoder_cls(feature_type, default)\n load_default = encoder.Schema().load({'type': default})\n dump_default = encoder.Schema().dump({'type': default})\n return field(metadata={'marshmallow_field': EncoderMarshmallowField(allow_none=False, dump_default=dump_default, load_default=load_default)}, default_factory=lambda : load_default)\n except Exception as e:\n raise ValidationError(f'Unsupported encoder type: {default}. See encoder_registry. Details: {e}')", "def __init__(self, py_dict=None):\n super(TypeSchema, self).__init__()\n self.set_data_type('xml')\n\n self.typeName = None", "def deserialize(self, payload: str) -> object:\n raise NotImplementedError()", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .managed_app_configuration import ManagedAppConfiguration\n from .managed_app_policy_deployment_summary import ManagedAppPolicyDeploymentSummary\n from .managed_mobile_app import ManagedMobileApp\n from .targeted_managed_app_policy_assignment import TargetedManagedAppPolicyAssignment\n\n from .managed_app_configuration import ManagedAppConfiguration\n from .managed_app_policy_deployment_summary import ManagedAppPolicyDeploymentSummary\n from .managed_mobile_app import ManagedMobileApp\n from .targeted_managed_app_policy_assignment import TargetedManagedAppPolicyAssignment\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"apps\": lambda n : setattr(self, 'apps', n.get_collection_of_object_values(ManagedMobileApp)),\n \"assignments\": lambda n : setattr(self, 'assignments', n.get_collection_of_object_values(TargetedManagedAppPolicyAssignment)),\n \"deployedAppCount\": lambda n : setattr(self, 'deployed_app_count', n.get_int_value()),\n \"deploymentSummary\": lambda n : setattr(self, 'deployment_summary', n.get_object_value(ManagedAppPolicyDeploymentSummary)),\n \"isAssigned\": lambda n : setattr(self, 'is_assigned', n.get_bool_value()),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def __post_init__(self):\n for field in dataclasses.fields(self):\n value = getattr(self, field.name)\n if not isinstance(value, field.type) and value:\n try:\n setattr(self, field.name, field.type(value))\n except ValueError:\n raise ValueError(f\"Expected {field.name} \"\n f\"to be {field.type}, \"\n f\"got {repr(value)}\")", "def test_jsonify_decode(self):\n\n Point = namedtuple('Point', ['x', 'y'], False)\n class Foo(Base):\n _table = ClassReadonlyProperty('foos')\n _primary_key = ClassReadonlyProperty('foo_id')\n\n foo_id = IDField('_id')\n str_field = StringField(default='this is default')\n date_field = DateField()\n int_field = IntField()\n bool_field = BoolField()\n list_field = ListField()\n tuple_field = TupleField(np=Point)\n\n json_str = '''{\n \"__class__\": \"Foo\",\n \"foo_id\": \"1234\",\n \"str_field\": \"anything\",\n \"int_field\": 123,\n \"date_field\": \"2014-12-13\",\n \"bool_field\": false,\n \"tuple_field\":{\n \"x\": 1,\n \"y\": 2\n }\n }'''\n foo = Foo.from_jsonify(json.loads(json_str))\n\n self.assertEqual(foo.foo_id, '1234')\n self.assertEqual(foo.int_field, 123)\n self.assertEqual(foo.bool_field, False)\n self.assertEqual(foo.date_field, datetime.date(2014, 12, 13))\n Point = namedtuple('Point', ['x', 'y'], False)\n self.assertEqual(foo.tuple_field, Point(x=1, y=2))", "def _deserialize(self, value, attr, data):\n if not isinstance(value, str):\n raise ValueError(\"Value must be a string\")\n return super(StringList, self)._deserialize(value.split(self.delimiter), attr, data)", "def _decode(self, msgCls, data):\r\n rosMsg = msgCls()\r\n\r\n for (slotName, slotType) in zip(rosMsg.__slots__, rosMsg._slot_types):\r\n if slotName not in data:\r\n continue\r\n\r\n if '[]' == slotType[-2:]:\r\n listBool = True\r\n slotType = slotType[:-2]\r\n else:\r\n listBool = False\r\n\r\n field = data[slotName]\r\n\r\n if listBool and not isinstance(field, (list, tuple)):\r\n raise TypeError('Given data does not match the definition of '\r\n 'the ROS message.')\r\n\r\n if slotType == 'string':\r\n convFunc = _stringify\r\n elif slotType in self._BASE_TYPES:\r\n convFunc = self._BASE_TYPES[slotType]\r\n elif slotType in self._SPECIAL_TYPES:\r\n convFunc = self._SPECIAL_TYPES[slotType]().decode\r\n elif slotType in self._customTypes and _checkIsStringIO(field):\r\n convFunc = self._customTypes[slotType][0]().decode\r\n else:\r\n convFunc = partial(self._decode,\r\n self._loader.loadMsg(*slotType.split('/')))\r\n\r\n if listBool:\r\n convFunc = partial(map, convFunc)\r\n\r\n setattr(rosMsg, slotName, convFunc(field))\r\n\r\n return rosMsg", "def _deserialize_record(self, schema, annotated_datum):\n if not isinstance(annotated_datum, dict):\n raise AvroTypeException(schema, annotated_datum)\n\n result = {}\n for field in schema.fields:\n val = self._deserialize_data(field.type, annotated_datum.get(field.name))\n if val:\n result[field.name] = val\n return result", "def to_python(self, value):\n # Composite types are serialized as JSON blobs. If BaseField.to_python\n # is called with a string, assume it was produced by value_to_string\n # and decode it\n if isinstance(value, str):\n try:\n value = json.loads(value)\n except ValueError as exc:\n raise ValidationError(\n self.error_messages[\"bad_json\"],\n code=\"bad_json\",\n ) from exc\n\n return self.Meta.model(\n **{\n name: field.to_python(value.get(name))\n for name, field in self.Meta.fields\n }\n )\n\n return super().to_python(value)", "def __init__(self, ignoreUnknownFields = False):\n super(Deserializer, self).__init__(ignore_unknown_fields = ignoreUnknownFields)", "def deserialize(self, value, **kwargs):\n kwargs.update({'trusted': kwargs.get('trusted', False)})\n if self.deserializer is not None:\n return self.deserializer(value, **kwargs)\n if value is None:\n return None\n if issubclass(self.instance_class, HasProperties):\n return self.instance_class.deserialize(value, **kwargs)\n return self.from_json(value, **kwargs)", "def get_schema(cls):\n mc_question = schema_fields.FieldRegistry(\n 'Multiple Choice Question',\n description='multiple choice question',\n extra_schema_dict_values={'className': 'mc-container'})\n\n mc_question.add_property(schema_fields.SchemaField(\n 'version', '', 'string', optional=True, hidden=True))\n mc_question.add_property(schema_fields.SchemaField(\n 'question', 'Question', 'html', optional=True,\n extra_schema_dict_values={'className': 'mc-question'}))\n mc_question.add_property(schema_fields.SchemaField(\n 'description', 'Description', 'string', optional=True,\n extra_schema_dict_values={'className': 'mc-description'},\n description=messages.QUESTION_DESCRIPTION))\n mc_question.add_property(schema_fields.SchemaField(\n 'multiple_selections', 'Selection', 'boolean',\n optional=True,\n select_data=[\n ('false', 'Allow only one selection'),\n ('true', 'Allow multiple selections')],\n extra_schema_dict_values={\n '_type': 'radio',\n 'className': 'mc-selection'}))\n\n choice_type = schema_fields.FieldRegistry(\n 'Choice',\n extra_schema_dict_values={'className': 'mc-choice'})\n choice_type.add_property(schema_fields.SchemaField(\n 'score', 'Score', 'string', optional=True,\n extra_schema_dict_values={\n 'className': 'mc-choice-score', 'value': '0'}))\n choice_type.add_property(schema_fields.SchemaField(\n 'text', 'Text', 'html', optional=True,\n extra_schema_dict_values={'className': 'mc-choice-text'}))\n choice_type.add_property(schema_fields.SchemaField(\n 'feedback', 'Feedback', 'html', optional=True,\n extra_schema_dict_values={'className': 'mc-choice-feedback'}))\n\n choices_array = schema_fields.FieldArray(\n 'choices', '', item_type=choice_type,\n extra_schema_dict_values={\n 'className': 'mc-choice-container',\n 'listAddLabel': 'Add a choice',\n 'listRemoveLabel': 'Delete choice'})\n\n mc_question.add_property(choices_array)\n\n return mc_question", "def deserializer():\n return bytes.decode", "def deserialize(self, datastring, content_type):\r\n return self.get_deserialize_handler(content_type).deserialize(\r\n datastring)", "def _spark_struct_field(self) -> StructField:", "def deserialize(cls, record):\n return cls(\n source=record.get(\"source\", \"\"),\n category=record.get(\"category\", \"\"),\n name=record.get(\"name\", \"\"),\n message=record.get(\"message\", \"\"),\n timestamp=record.get(\"timestamp\", \"\"),\n **record[\"data\"],\n )", "def test_is_deserialized(self, base):\n\n @add_schema\n class MyModel(base):\n fields = dict(field=Callback(\"find\", cache=True))\n\n def find(self):\n return 5\n\n mymodel = MyModel()\n\n # field is not deserialized\n assert not mymodel.is_deserialized(\"field\")\n\n # field gets cached\n assert mymodel.field == 5\n\n # field is now derserialized\n print(mymodel._get_deserialized_data())\n assert mymodel.is_deserialized(\"field\")\n\n # reset the field by setting to None\n mymodel.field = Callback.ACCESSOR.HOLDER\n assert not mymodel.is_deserialized(\"field\")\n print(mymodel._get_deserialized_data())\n mymodel.field\n assert mymodel.is_deserialized(\"field\")", "def deserialize(self, payload: bytes) -> object:\n raise NotImplementedError()", "def _deserialize(data, klass):\n if data is None:\n return None\n\n if klass in integer_types or klass in (float, str, bool):\n return _deserialize_primitive(data, klass)\n elif klass == object:\n return _deserialize_object(data)\n elif klass == date:\n return deserialize_date(data)\n elif klass == datetime:\n return deserialize_datetime(data)\n elif type(klass) == GenericMeta:\n if klass.__extra__ == list:\n return _deserialize_list(data, klass.__args__[0])\n if klass.__extra__ == dict:\n return _deserialize_dict(data, klass.__args__[1])\n else:\n return deserialize_model(data, klass)", "def __init__(self, raw_type: Dict):\n\n self.kind = raw_type.get(\"kind\")\n self.name = raw_type.get(\"name\")\n self.description = raw_type.get(\"description\")\n self.fields: List[SchemaTypeField] = [SchemaTypeField(f) for f in raw_type.get(\"fields\") or [] if f]\n self.input_fields = [SchemaTypeInputField(i) for i in raw_type.get(\"inputFields\") or [] if i]\n self.interfaces = [SchemaTypeInterface(i) for i in raw_type.get(\"interfaces\") or [] if i]\n self.enum_values = [SchemaTypeEnum(e) for e in raw_type.get(\"enumValues\") or [] if e]\n self.possible_types = raw_type.get(\"possibleTypes\")", "def schema(self):\n if not self._schema:\n # Inherit context from parent.\n context = getattr(self.parent, \"context\", {})\n if callable(self.nested) and not isinstance(self.nested, type):\n nested = self.nested()\n else:\n nested = self.nested\n if isinstance(nested, dict):\n # defer the import of `marshmallow.schema` to avoid circular imports\n from marshmallow.schema import Schema\n\n nested = Schema.from_dict(nested)\n\n if isinstance(nested, SchemaABC):\n self._schema = copy.copy(nested)\n self._schema.context.update(context)\n # Respect only and exclude passed from parent and re-initialize fields\n set_class = self._schema.set_class\n if self.only is not None:\n if self._schema.only is not None:\n original = self._schema.only\n else: # only=None -> all fields\n original = self._schema.fields.keys()\n self._schema.only = set_class(self.only) & set_class(original)\n if self.exclude:\n original = self._schema.exclude\n self._schema.exclude = set_class(self.exclude) | set_class(original)\n self._schema._init_fields()\n else:\n if isinstance(nested, type) and issubclass(nested, SchemaABC):\n schema_class = nested\n elif not isinstance(nested, (str, bytes)):\n raise ValueError(\n \"`Nested` fields must be passed a \"\n \"`Schema`, not {}.\".format(nested.__class__)\n )\n elif nested == \"self\":\n schema_class = self.root.__class__\n else:\n schema_class = class_registry.get_class(nested)\n self._schema = schema_class(\n many=self.many,\n only=self.only,\n exclude=self.exclude,\n context=context,\n load_only=self._nested_normalized_option(\"load_only\"),\n dump_only=self._nested_normalized_option(\"dump_only\"),\n )\n return self._schema", "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 24\n (_x.sysid, _x.compid, _x.limits_state, _x.last_trigger, _x.last_action, _x.last_recovery, _x.last_clear, _x.breach_count, _x.mods_enabled, _x.mods_required, _x.mods_triggered,) = _struct_3B4IH3B.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 11\n (_x.partial_view, _x.resolution, _x.type, _x.use_simple_occlusion, _x.add_point_colors,) = _struct_B2i2B.unpack(str[start:end])\n self.partial_view = bool(self.partial_view)\n self.use_simple_occlusion = bool(self.use_simple_occlusion)\n self.add_point_colors = bool(self.add_point_colors)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def _deserialize_union(self, schema, annotated_datum):\n if not annotated_datum:\n return self._deserialize_null()\n\n if not isinstance(annotated_datum, dict):\n raise AvroTypeException(schema, annotated_datum)\n\n key = list(annotated_datum.keys())[0]\n for candidate_schema in schema.schemas:\n if isinstance(candidate_schema, avro.schema.NamedSchema):\n if candidate_schema.name == key:\n return self._deserialize_data(candidate_schema, annotated_datum[key])\n else:\n if candidate_schema.type == key:\n return self._deserialize_data(candidate_schema, annotated_datum[key])\n raise schema.AvroTypeException(schema, datum)", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .win32_lob_app_rule import Win32LobAppRule\n from .win32_lob_app_rule_operator import Win32LobAppRuleOperator\n\n from .win32_lob_app_rule import Win32LobAppRule\n from .win32_lob_app_rule_operator import Win32LobAppRuleOperator\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"productCode\": lambda n : setattr(self, 'product_code', n.get_str_value()),\n \"productVersion\": lambda n : setattr(self, 'product_version', n.get_str_value()),\n \"productVersionOperator\": lambda n : setattr(self, 'product_version_operator', n.get_enum_value(Win32LobAppRuleOperator)),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n start = end\n end += 1\n (self.type,) = _struct_B.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model = str[start:end].decode('utf-8')\n else:\n self.model = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.head_version = str[start:end].decode('utf-8')\n else:\n self.head_version = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.body_version = str[start:end].decode('utf-8')\n else:\n self.body_version = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.arm_version = str[start:end].decode('utf-8')\n else:\n self.arm_version = str[start:end]\n _x = self\n start = end\n end += 14\n (_x.has_laser, _x.has_extended_arms, _x.number_of_legs, _x.number_of_arms, _x.number_of_hands,) = _struct_2B3i.unpack(str[start:end])\n self.has_laser = bool(self.has_laser)\n self.has_extended_arms = bool(self.has_extended_arms)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(cls, serd):\r\n return cls(*serd)", "def _deserialize_deconstructed(cls, payload):\n cls_path = payload['type']\n cls_module, cls_name = cls_path.rsplit('.', 1)\n\n try:\n cls_type = getattr(import_module(cls_module), cls_name)\n except (AttributeError, ImportError):\n raise ImportError('Unable to locate value type %s' % cls_path)\n\n args = tuple(\n deserialize_from_signature(_arg_value)\n for _arg_value in payload['args']\n )\n\n kwargs = {\n _key: deserialize_from_signature(_arg_value)\n for _key, _arg_value in six.iteritems(payload['kwargs'])\n }\n\n return cls_type, args, kwargs", "def OptimizerDataclassField(default={'type': 'adam'}, description='TODO'):\n\n\n class OptimizerMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict to a valid optimizer from\n `ludwig.modules.optimization_modules.optimizer_registry` and creates a corresponding `oneOf` JSON schema\n for external usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n if 'type' in value and value['type'] in optimizer_registry:\n opt = optimizer_registry[value['type'].lower()][1]\n try:\n return opt.Schema().load(value)\n except (TypeError, ValidationError) as e:\n raise ValidationError(f'Invalid params for optimizer: {value}, see `{opt}` definition. Error: {e}')\n raise ValidationError(f'Invalid params for optimizer: {value}, expect dict with at least a valid `type` attribute.')\n raise ValidationError('Field should be None or dict')\n\n @staticmethod\n def _jsonschema_type_mapping():\n return {'type': 'object', 'properties': {'type': {'type': 'string', 'enum': list(optimizer_registry.keys()), 'default': default['type'], 'description': 'The type of optimizer to use during the learning process'}}, 'title': 'optimizer_options', 'allOf': get_optimizer_conds(), 'required': ['type'], 'description': description}\n if not isinstance(default, dict) or 'type' not in default or default['type'] not in optimizer_registry:\n raise ValidationError(f'Invalid default: `{default}`')\n try:\n opt = optimizer_registry[default['type'].lower()][1]\n load_default = opt.Schema()\n load_default = load_default.load(default)\n dump_default = opt.Schema().dump(default)\n return field(metadata={'marshmallow_field': OptimizerMarshmallowField(allow_none=False, dump_default=dump_default, load_default=load_default, metadata={'description': description})}, default_factory=lambda : load_default)\n except Exception as e:\n raise ValidationError(f\"Unsupported optimizer type: {default['type']}. See optimizer_registry. Details: {e}\")", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .entity import Entity\n from .identity_set import IdentitySet\n from .teams_app_authorization import TeamsAppAuthorization\n from .teams_app_publishing_state import TeamsAppPublishingState\n from .teamwork_bot import TeamworkBot\n\n from .entity import Entity\n from .identity_set import IdentitySet\n from .teams_app_authorization import TeamsAppAuthorization\n from .teams_app_publishing_state import TeamsAppPublishingState\n from .teamwork_bot import TeamworkBot\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"authorization\": lambda n : setattr(self, 'authorization', n.get_object_value(TeamsAppAuthorization)),\n \"bot\": lambda n : setattr(self, 'bot', n.get_object_value(TeamworkBot)),\n \"createdBy\": lambda n : setattr(self, 'created_by', n.get_object_value(IdentitySet)),\n \"description\": lambda n : setattr(self, 'description', n.get_str_value()),\n \"displayName\": lambda n : setattr(self, 'display_name', n.get_str_value()),\n \"lastModifiedDateTime\": lambda n : setattr(self, 'last_modified_date_time', n.get_datetime_value()),\n \"publishingState\": lambda n : setattr(self, 'publishing_state', n.get_enum_value(TeamsAppPublishingState)),\n \"shortDescription\": lambda n : setattr(self, 'short_description', n.get_str_value()),\n \"teamsAppId\": lambda n : setattr(self, 'teams_app_id', n.get_str_value()),\n \"version\": lambda n : setattr(self, 'version', n.get_str_value()),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def _json_compat_obj_decode_helper(\n data_type, obj, alias_validators, strict, old_style, for_msgpack):\n if isinstance(data_type, bv.StructTree):\n return _decode_struct_tree(\n data_type, obj, alias_validators, strict, for_msgpack)\n elif isinstance(data_type, bv.Struct):\n return _decode_struct(\n data_type, obj, alias_validators, strict, old_style, for_msgpack)\n elif isinstance(data_type, bv.Union):\n if old_style:\n return _decode_union_old(\n data_type, obj, alias_validators, strict, for_msgpack)\n else:\n return _decode_union(\n data_type, obj, alias_validators, strict, for_msgpack)\n elif isinstance(data_type, bv.List):\n return _decode_list(\n data_type, obj, alias_validators, strict, old_style, for_msgpack)\n elif isinstance(data_type, bv.Nullable):\n return _decode_nullable(\n data_type, obj, alias_validators, strict, old_style, for_msgpack)\n elif isinstance(data_type, bv.Primitive):\n # Set validate to false because validation will be done by the\n # containing struct or union when the field is assigned.\n return _make_babel_friendly(\n data_type, obj, alias_validators, strict, False, for_msgpack)\n else:\n raise AssertionError('Cannot handle type %r.' % data_type)", "def json_decode(\n data_type, serialized_obj, alias_validators=None, strict=True,\n old_style=False):\n try:\n deserialized_obj = json.loads(serialized_obj)\n except ValueError:\n raise bv.ValidationError('could not decode input as JSON')\n else:\n return json_compat_obj_decode(\n data_type, deserialized_obj, alias_validators, strict, old_style)", "def deserialize(serializer_class, data, **kwargs):\n\n serializer = serializer_class(data=data, **kwargs)\n serializer.is_valid(raise_exception=True)\n\n return serializer", "def deserialize(self, data):\n return self.insertLevelOrder(data, None, 0, len(data))", "def deserialize(self, jsonData):\n super(AnyPin, self).deserialize(jsonData)\n if \"currDataType\" in jsonData:\n self.setType(jsonData[\"currDataType\"])\n\n pinClass = findPinClassByType(self.activeDataType)\n try:\n self.setData(json.loads(\n jsonData['value'], cls=pinClass.jsonDecoderClass()))\n except:\n self.setData(self.defaultValue())\n\n self.updateError([])", "def _deserialize_attr_value(cls, sig_value):\n if (isinstance(sig_value, dict) and\n sig_value.get('_deconstructed') is True):\n attr_cls_path = sig_value['type']\n attr_cls_module, attr_cls_name = attr_cls_path.rsplit('.', 1)\n\n try:\n attr_cls = getattr(import_module(attr_cls_module),\n attr_cls_name)\n except (AttributeError, ImportError):\n raise ImportError('Unable to locate constraint attribute '\n 'value type %s'\n % attr_cls_path)\n\n args = tuple(\n cls._deserialize_attr_value(arg_value)\n for arg_value in sig_value['args']\n )\n\n kwargs = {\n key: cls._deserialize_attr_value(arg_value)\n for key, arg_value in six.iteritems(sig_value['kwargs'])\n }\n\n # Let any exception bubble up.\n value = attr_cls(*args, **kwargs)\n else:\n value = sig_value\n\n return value", "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 36\n (_x.mask, _x.dynModel, _x.fixMode, _x.fixedAlt, _x.fixedAltVar, _x.minElev, _x.drLimit, _x.pDop, _x.tDop, _x.pAcc, _x.tAcc, _x.staticHoldThresh, _x.dgpsTimeOut, _x.reserved2, _x.reserved3, _x.reserved4,) = _get_struct_H2BiIbB4H2B3I().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def from_dict(cls, dikt) -> 'LightSourceMaterialSchema':\n return util.deserialize_model(dikt, cls)", "def test_deserialize_a_recommendation(self):\n data = {\n \"product_id\": 1,\n \"recommendation_product_id\": 2,\n \"relationship\": Type.UP_SELL\n }\n recommendation = Recommendation()\n recommendation.deserialize(data)\n self.assertNotEqual(recommendation, None)\n self.assertEqual(recommendation.product_id, 1)\n self.assertEqual(recommendation.recommendation_product_id, 2)\n self.assertEqual(recommendation.relationship, Type.UP_SELL)", "def presavemodel(self):\n # one thing we need to do here is handle any lazy serialization helpers.\"\"\"\n self.presavemodel_serializationhelpers_updatefields()", "def __init__(self, reader_schema, writer_schema=None, input_file=None):\n\n if writer_schema is None:\n writer_schema = reader_schema\n self._reader_schema = reader_schema\n self._writer_schema = writer_schema\n self._reader_schema_json = json.loads(str(self._reader_schema))\n self._writer_schema_json = json.loads(str(self._writer_schema))\n self._input_file = input_file\n self._set_avro_readers()", "def deserialize_object(self, data: Dict[str, Any], many: bool, **kwargs) -> Job:\n return self.__model__(**data)", "def convert_type(self, value, schema_type, **kwargs):", "def deserialize(self, reader: serialization.BinaryReader) -> None:\n self.deserialize_unsigned(reader)\n witness_obj_count = reader.read_uint8()\n if witness_obj_count != 1:\n raise ValueError(f\"Deserialization error - Witness object count is {witness_obj_count} must be 1\")\n self.witness = reader.read_serializable(payloads.Witness)", "def serializer_class(self):", "def deserialize(self, data, hashmap={}):\n raise NotImplemented()", "def from_serializable(self, _):\n\n assert False, \"Not implemented\"", "def test_deserialize_incomplete(self):\n if (self._cls != 'MetaschemaType') and (len(self._valid_decoded) > 0):\n out = self.instance.serialize(self._valid_decoded[0])\n obj, metadata = self.instance.deserialize(out[:-1])\n self.assert_equal(metadata['incomplete'], True)", "def _deserialize(obj):\r\n from thunderdome.models import Element\r\n\r\n if isinstance(obj, dict) and '_id' in obj and '_type' in obj:\r\n return Element.deserialize(obj)\r\n elif isinstance(obj, dict):\r\n return {k:GremlinMethod._deserialize(v) for k,v in obj.items()}\r\n elif isinstance(obj, list):\r\n return [GremlinMethod._deserialize(v) for v in obj]\r\n else:\r\n return obj", "def deserialize(self, data):\n deserial_list = data.split(' ')\n for i in range(len(deserial_list)):\n if deserial_list[i] == 'None':\n deserial_list[i] = None\n else:\n deserial_list[i] = int(deserial_list[i])\n index = -1\n return self.__preorder_deserial(deserial_list, index)[0]", "def deserialize_from_signature(cls, payload):\n raise NotImplementedError" ]
[ "0.6336738", "0.62138134", "0.62059647", "0.5973148", "0.5872674", "0.5866745", "0.58309895", "0.5817072", "0.5784642", "0.5752867", "0.5675256", "0.56737214", "0.5669053", "0.5660375", "0.564848", "0.55696976", "0.55506283", "0.5532286", "0.5526995", "0.5522127", "0.5515528", "0.5504151", "0.54166436", "0.5416623", "0.541428", "0.5399027", "0.53949577", "0.5384951", "0.53748685", "0.53747857", "0.53734237", "0.5359449", "0.5351173", "0.53296864", "0.53042763", "0.52972704", "0.5293298", "0.52662873", "0.52230805", "0.52044815", "0.5150817", "0.5140733", "0.51344484", "0.5110033", "0.5103627", "0.510026", "0.50941586", "0.5076317", "0.5075238", "0.50681573", "0.50574684", "0.50571734", "0.505575", "0.50544316", "0.5042057", "0.5041936", "0.50386405", "0.50253695", "0.50243217", "0.50155944", "0.5010161", "0.49998027", "0.49982864", "0.49912167", "0.49898314", "0.4983348", "0.49819008", "0.49656054", "0.4962624", "0.4959338", "0.49232244", "0.49214548", "0.4920822", "0.49129558", "0.49039203", "0.4901403", "0.48833337", "0.48713923", "0.48626593", "0.48621675", "0.48553798", "0.4850995", "0.48483357", "0.484544", "0.48435083", "0.48399377", "0.4835757", "0.483532", "0.483423", "0.48338416", "0.48287976", "0.4825418", "0.48240057", "0.48229688", "0.48164243", "0.4814487", "0.481162", "0.4809684", "0.4802885", "0.48004737" ]
0.6168223
3
Receive a request from the worker work_socket receive a request on this socket timeout if request isn't received by the timeout, raise six.moves.queue.Empty default = blocks forever This polls on both the worker and up_queue sockets and will throw an exception if there is anything available on the upqueue as this indicates that nothing is running.
def recv(self, work_socket, timeout=None): poller = zmq.Poller() poller.register(self.up_queue_recv_socket, zmq.POLLIN) poller.register(work_socket, zmq.POLLIN) for socket, state in poller.poll(timeout): if socket == self.up_queue_recv_socket and state == zmq.POLLIN: result, e = self.up_queue.get() if e is not None: raise e else: raise cellprofiler_core.pipeline.event.CancelledException( "Unexpected exit during recv" ) if socket == work_socket and state == zmq.POLLIN: return cellprofiler_core.utilities.zmq.communicable.Communicable.recv( work_socket ) raise six.moves.queue.Empty
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_request_thread(self):\n while True:\n try:\n request, client_address = self._request_queue.get(\n timeout=self.timeout_on_get,\n )\n except Queue.Empty:\n # You wouldn't believe how much crap this can end up leaking,\n # so we clear the exception.\n sys.exc_clear()\n if self._shutdown_event.isSet():\n return\n continue\n try:\n self.finish_request(request, client_address)\n self.shutdown_request(request)\n except:\n self.handle_error(request, client_address)\n self.shutdown_request(request)\n self._request_queue.task_done()", "def poll(self, timeout=-1):\n future = self._Future()\n if timeout == 0:\n try:\n result = super(_AsyncPoller, self).poll(0)\n except Exception as e:\n future.set_exception(e)\n else:\n future.set_result(result)\n return future\n \n loop = self._default_loop()\n \n # register Future to be called as soon as any event is available on any socket\n watcher = self._Future()\n \n # watch raw sockets:\n raw_sockets = []\n def wake_raw(*args):\n if not watcher.done():\n watcher.set_result(None)\n\n watcher.add_done_callback(lambda f: self._unwatch_raw_sockets(loop, *raw_sockets))\n\n for socket, mask in self.sockets:\n if isinstance(socket, _zmq.Socket):\n if not isinstance(socket, self._socket_class):\n # it's a blocking zmq.Socket, wrap it in async\n socket = self._socket_class.from_socket(socket)\n if mask & _zmq.POLLIN:\n socket._add_recv_event('poll', future=watcher)\n if mask & _zmq.POLLOUT:\n socket._add_send_event('poll', future=watcher)\n else:\n raw_sockets.append(socket)\n evt = 0\n if mask & _zmq.POLLIN:\n evt |= self._READ\n if mask & _zmq.POLLOUT:\n evt |= self._WRITE\n self._watch_raw_socket(loop, socket, evt, wake_raw)\n\n def on_poll_ready(f):\n if future.done():\n return\n if watcher.cancelled():\n try:\n future.cancel()\n except RuntimeError:\n # RuntimeError may be called during teardown\n pass\n return\n if watcher.exception():\n future.set_exception(watcher.exception())\n else:\n try:\n result = super(_AsyncPoller, self).poll(0)\n except Exception as e:\n future.set_exception(e)\n else:\n future.set_result(result)\n watcher.add_done_callback(on_poll_ready)\n \n if timeout is not None and timeout > 0:\n # schedule cancel to fire on poll timeout, if any\n def trigger_timeout():\n if not watcher.done():\n watcher.set_result(None)\n \n timeout_handle = loop.call_later(\n 1e-3 * timeout,\n trigger_timeout\n )\n def cancel_timeout(f):\n if hasattr(timeout_handle, 'cancel'):\n timeout_handle.cancel()\n else:\n loop.remove_timeout(timeout_handle)\n future.add_done_callback(cancel_timeout)\n\n def cancel_watcher(f):\n if not watcher.done():\n watcher.cancel()\n future.add_done_callback(cancel_watcher)\n\n return future", "def ecute(self):\n msg = self.up_queue_recv_socket.recv()\n result, e = self.up_queue.get()\n if e is not None:\n raise e\n return result", "def poll(self):\n while self.running and reactor._started and not reactor._stopped:\n self.check_response_queue()\n sleep(0.5)", "def _recv(self) -> None:\n if not self.connected or now() < self.next_poll:\n return\n self.next_poll += self.poll_interval\n data = []\n while True:\n try:\n data.append(self.endpoint.recv(BUFFSIZE))\n except BlockingIOError:\n break\n if data:\n stream = io.BytesIO(b\"\".join(data))\n while True:\n try:\n info = pickle.load(stream)\n msg = Message(*info)\n self.inq.append(msg)\n except EOFError:\n break", "def WaitForRequest(self):\r\n selector = selectors.DefaultSelector()\r\n selector.register(self.listenSocket, selectors.EVENT_READ)\r\n while True:\r\n events = selector.select(timeout = 10)\r\n for __, __ in events:\r\n\r\n self.listenSocket.setblocking(True)\r\n sock, address = self.listenSocket.accept()\r\n self.listenSocket.setblocking(False) \r\n bgThread = threading.Thread(target=self.HandleRemoteCall, args=(sock, address))\r\n bgThread.start()", "def receive(self):\n events = self.poller.poll(self.timeout)\n\n # If there is control socket, he has the priority\n if len(events) == 2:\n return self._recv_serialized(self.control_socket)\n elif len(events) == 1:\n return self._recv_serialized(events[0][0])\n return None", "def _receive(self):\n # initialize sockets map\n r, w, x = [self.socket], [], []\n r, w, x = select.select(r, w, x, self.sessiondata.timeout)\n if r:\n return self.socket.recv(4096)\n # return nothing on timeout\n return None", "def _worker(self):\n while True:\n request = self.queue.get()\n self.worker(request)\n self.queue.task_done()", "def _run(self):\n\n while self._thread_alive_event.is_set():\n reported_events = self._poll.poll(self.POLL_TIMEOUT)\n\n for fd_event_pair in reported_events:\n fd, event = fd_event_pair\n\n if event & select.POLLIN or event & select.POLLPRI:\n self._recv(fd)\n\n elif event & select.POLLERR:\n self.logger.error(\"Error condition of some sort\")\n self._thread_alive_event.clear()\n break\n\n elif event & select.POLLNVAL:\n self.logger.error(\"Invalid request: descriptor not open\")\n self._thread_alive_event.clear()\n break", "def run(self):\n while True:\n # Check to see if we should stop\n if self._stop.isSet():\n logger.debug(\"Worker thread stopping.\")\n break\n\n # Try to pull from the queue\n try:\n func, args, kwargs = self.queue.get_nowait()\n func(*args, **kwargs)\n except Queue.Empty:\n time.sleep(5)\n continue\n except Exception as e:\n logger.exception(e)", "def work():\n with rq.Connection(create_connection()):\n worker = rq.Worker(list(map(rq.Queue, listen)))\n worker.work()", "def receive(self, request_id, timeout=None):\n res = None\n start_time = time.time()\n while res is None:\n with self.connlock:\n res = self.conn.do_receive(request_id)\n if res is None:\n time.sleep(0.1)\n if timeout and (time.time() - start_time > timeout):\n raise RequestTimeout(request_id)\n\n if 'Error' in res:\n raise ServerError(res['Error'], res)\n\n try:\n return res['Response']\n except:\n raise BadResponseError(\"Failed to parse response: {}\".format(res))", "def run(self) -> None:\n\n while not self.stop_event.is_set():\n if self.my_queue:\n # if heartbeat received at '/heartbeat' route from the monitored peer,\n # sleep until next\n self.my_queue.clear()\n time.sleep(7)\n\n else:\n # else drop peer data from database and inform central server appending '0'\n # to my queue\n self.db_access.drop_peer(self.peer_id)\n self.my_queue.append(0)\n break", "async def read(self) -> None:\n make_non_blocking(self.stream)\n\n while not self.stream.closed:\n message = None\n try:\n message = await self.read_one()\n\n if not message:\n await self.sleep()\n continue\n else:\n self.wake()\n\n IOLoop.current().add_callback(self.queue.put_nowait, message)\n except Exception as e: # pragma: no cover\n self.log.exception(\n \"%s couldn't enqueue message: %s (%s)\", self, message, e\n )\n await self.sleep()", "def run(self):\n self.poller = select.epoll()\n self.pollmask = select.EPOLLIN | select.EPOLLHUP | select.EPOLLERR\n self.poller.register(self.server,self.pollmask)\n self.timeout = float(self.webconfig.parameters[\"timeout\"])\n lastSweep = time.time()\n\n while True:\n # poll sockets\n\n if (time.time() - lastSweep) > .5: #sweet through every half second\n self.socketCheck()\n lastSweep = time.time()\n try:\n fds = self.poller.poll(timeout=1.0)\n except:\n return\n fd = 0\n for (fd,event) in fds:\n # handle errors\n if event & (select.POLLHUP | select.POLLERR):\n self.handleError(fd)\n continue\n # handle the server socket\n if fd == self.server.fileno():\n self.handleServer()\n continue\n # handle client socket\n result = self.handleClient(fd)", "def work(self, worker_id):\n\n try:\n while self.running:\n # blocking request - timeout 3 seconds\n messageSent = False\n try:\n # throws queue.Empty exception if it fails to get an item in 3 seconds\n priorityItem = self.message_queue.get(True, 3)\n topic = priorityItem.item.topic\n self.metric_handler.increment_observed()\n print(f\"sending message on topic {topic} approximate queue size: {self.message_queue.qsize()}\")\n\n if self.sampling == True:\n if self.worker_sample_counts[worker_id][topic] == self.topic_sample_rates[topic]:\n self.send_message(priorityItem.item, worker_id)\n self.worker_sample_counts[worker_id][topic] = 0\n else:\n self.worker_sample_counts[worker_id][topic] += 1\n else:\n self.send_message(priorityItem.item, worker_id)\n \n # might not have actually been sent if we are sampling, but dont attempt to send it in finally\n messageSent = True\n\n except (ConnectionResetError, BrokenPipeError, ConnectionResetError) as e:\n # should maybe record number of times connection breaks? Will get wordy\n self.get_logger().error(f\"Error sending socket message: {str(e)}\")\n self.init_socket_with_rety(worker_id)\n except queue.Empty:\n priorityItem = None\n pass\n finally:\n # give one more attempt at sending the message if we failed\n if not messageSent and priorityItem is not None:\n try:\n self.send_message(priorityItem.item, worker_id)\n except:\n pass\n except Exception as ex:\n self.get_logger().error(f\"Worker thread {worker_id} exitting unexpectedly with error: {str(ex)}\")\n self.get_logger().error(traceback.format_exc())\n finally:\n self.get_logger().info(f\"Worker thread {worker_id} finishing.\")", "def _poll(self):\n return self.zmq_core.poll(10)", "def queue_processor(self):\n\n while self.state != consts.SMPP_CLIENT_STATE_CLOSED:\n try:\n p = self.queue.get(timeout=1)\n self._request_handler(p)\n self.queue.task_done()\n except Empty:\n pass", "def run(self):\n self.channel.queue_declare(self._request_queue)\n self.channel.basic_consume(self._request_queue, self.on_message)\n try:\n msg = \"Waiting for message ...\"\n print(msg)\n logging.info(msg)\n self.channel.start_consuming()\n except KeyboardInterrupt:\n self.channel.stop_consuming()\n\n self.connection.close()", "def _client(self):\n while True:\n body = self.queue.get(True)\n print \"Sending %s bytes (%s/%s)\" % (len(body), self.queue.qsize(), self.queue.maxsize)\n\n try:\n req = urllib2.Request(self.endpoint, body)\n urllib2.urlopen(req).read()\n except:\n print \"Cannot send request. Retrying in 5 seconds\"\n print_exception(*sys.exc_info())\n print \"continuing...\"\n self.enqueue(body)\n sleep(5)", "def monitor_queue(self):\n\n while True:\n job = self.queue.next()\n if job:\n # print(\"found %s\" % (job.job_id))\n\n job_name = job.payload[\"job_name\"]\n\n if job_name in self.mul_func_map:\n\n t = self.mul_func_map[job_name]\n p = multiprocessing.Process(target=t, args=(job,))\n p.daemon = True\n p.start()\n\n elif job_name in self.th_func_map:\n\n t = self.th_func_map[job_name]\n # create a thread to process the job\n p = threading.Thread(target=t, args=(job,))\n p.daemon = True\n # start the thread, going into the worker function\n p.start()\n\n elif job_name in self.fk_func_map:\n t = self.fk_func_map[job_name]\n if not os.fork():\n os.setsid()\n t(job)\n exit()\n else:\n # jobs in this queue that are unknown are presently being skipped\n # however they could probably get moved to a 'dead letter' queue\n # for closer examination\n print(\"unknown job name %s, skipping\" % (job_name))\n\n # throttle so that other worker subscribers get a chance\n time.sleep(self.queue_delay)\n else:\n time.sleep(self.poll_delay)\n\n # prints the number of threads\n # print len(threading.enumerate())", "def dequeue(self, timeout=0):\n result = self.connection.dequeue_any([self], timeout)\n if result:\n job, queue = result\n return job\n else:\n return None", "def listen_for_messages(self, callback):\n # generate get requests for all input queues\n requests = [port.in_queue.get() for port in self.ports]\n while requests:\n # helper variable for the asserts\n queues_with_pending_requests = [req.resource for req in requests]\n # There is a request for each input queue.\n assert set(self.input_queues) == set(queues_with_pending_requests)\n # For each input queue there's exactly one request.\n assert (\n len(queues_with_pending_requests) ==\n len(set(queues_with_pending_requests)))\n\n log.debug(\"{} waiting for next reception\".format(self))\n completed_requests = (yield self.env.any_of(requests))\n received_messages = list(completed_requests.values())\n log.debug(\"{} received {}\".format(\n self, received_messages))\n\n callback(received_messages)\n\n # Only leave the requests which have not been completed yet\n remaining_requests = [\n req for req in requests if req not in completed_requests]\n # Input queues that have been emptied since the last wake up.\n emptied_queues = [req.resource for req in completed_requests]\n # Add new get requests for the input queues that have been emptied.\n new_requests = []\n for input_queue in emptied_queues:\n new_requests.append(input_queue.get())\n requests = remaining_requests + new_requests", "async def _process_queue(self, callback, socket_info,\n has_heartbeat_seq=True):\n pending_callback = False\n while True:\n unparsed_message = await socket_info.queue.get()\n #log.debug(\"Received: \" + unparsed_message)\n response = json.loads(unparsed_message)\n # Sometimes the response is a list sometimes not. Convert to list.\n message_list = response if type(response) == list else [response]\n if not message_list:\n log.warning(\"Received empty message from Gemini. This isn't a \"\n \"type of response documented in their API docs.\")\n continue\n if message_list[0]['type'] == 'heartbeat':\n if has_heartbeat_seq:\n self._process_heartbeat(message_list[0], socket_info)\n self._check_sequence(message_list[0], socket_info)\n continue\n # A non heartbeat message.\n for message in message_list:\n self._check_sequence(message, socket_info)\n state_update = callback(message)\n if state_update:\n pending_callback = True\n if not socket_info.queue.empty():\n continue\n if pending_callback and self.is_setup():\n self.exchange_state.update_publisher.notify()\n pending_callback = False", "def _read_from_socket(self):\n data = \"\"\n try:\n data = self.__socket.recv(SOCKET_BUFFER_SIZE)\n except socket.timeout:\n self.state[\"Errors\"] = True\n raise socket.timeout(\"Error! Socket did not get info, when expected\")\n if not data:\n s = \"Empty\"\n else:\n s = data.decode('utf-8')\n print(\"\\n === Read from socket === \\n%s\\n\" % s)\n self._load_to_queue(s)", "async def run(self):\n while True:\n await asyncio.sleep(0)\n # See if any sockets have anything\n try:\n socks, events = self.poller.poll(1000)\n for sock, event in zip(socks,events):\n if sock in self.subscriptions:\n states = sock.recv_json()\n await self.main_server.sync_states(states)\n\n # Nothing to report - Poller did not find any sockets with updates\n except ValueError:\n pass\n # Exiting\n except KeyboardInterrupt:\n break", "def receive_bytes(self, timeout: Optional[float] = None) -> Optional[bytes]:\n if timeout is None:\n return self._recv_socket.recv()\n\n # * 1000 as ms required here but seconds are used everywhere else\n if self._recv_poller.poll(timeout * 1000):\n return self._recv_socket.recv(flags=zmq.NOBLOCK)\n return None", "def wait(self):\n while self._worker is None:\n # wait() before self._run()\n time.sleep(0.1)\n self._worker.join()\n return self.poll()", "def _reply_remove_job(self):\n self.remove_job_socket.linger = 0\n self.remove_job_socket.setsockopt(zmq.RCVTIMEO, remote_constants.HEARTBEAT_RCVTIMEO_S * 1000)\n while self.worker_is_alive and self.master_is_alive:\n try:\n message = self.remove_job_socket.recv_multipart()\n tag = message[0]\n assert tag == remote_constants.KILLJOB_TAG\n to_remove_job_address = to_str(message[1])\n logger.info(\"[Worker] A job requests the worker to stop this job.\")\n self._remove_job(to_remove_job_address)\n self.remove_job_socket.send_multipart([remote_constants.NORMAL_TAG])\n except zmq.error.Again as e:\n #detect whether `self.worker_is_alive` is True periodically\n pass", "def receive(self):\n try:\n itr = 0\n while self.connected:\n \n r, w, e = select.select(\n (self.sock, ), (), (), 10.0)\n \n if r:\n message = json.loads(self.recv())\n \n if 'fetch' in message:\n self.fetch_id = message['fetch']\n \n if 'failure' in message:\n raise InvalidFetch(message['failure'])\n \n if 'records' in message:\n if 'added' in message['records']:\n num = len(message['records']['added'])\n else:\n num = len(message['records'])\n \n logger.debug('Query returned %s records.', num)\n if self.max_recv:\n itr += 1\n \n if 'end' in message:\n logger.debug('Received end message: %s' % message['end'])\n yield message\n break\n \n yield message\n if self.max_recv and self.max_recv <= itr:\n break \n\n except (Exception, KeyboardInterrupt, SystemExit, FetchAborted) as e:\n logger.info('Caught exception in receive: %s -> %s', type(e), str(e))\n if isinstance(e, (SystemExit, InvalidFetch)):\n # propagate SystemExit, InvalidFetch\n raise\n finally:\n if self.connected:\n if self.fetch_id:\n self.send(json.dumps({'abort': self.fetch_id}))\n self.close()\n \n if self.thread:\n self.event.set()\n while self.thread.isAlive():\n self.event.wait(1)\n \n logger.info('Closed web socket connection normally.')", "def receive_request(self):\n try:\n payload = self.SUB_COMMAND.recv_string(flags=zmq.NOBLOCK)\n topic, command = payload.split()\n if (topic == zmq_socket_config.TOPIC_REQUEST):\n if (command == zmq_socket_config.COMMAND_START):\n logger.debug(\"Noxalarm receive COMMAND_START\")\n self.start_alarm()\n elif (command == zmq_socket_config.COMMAND_STOP):\n logger.debug(\"Noxalarm receive COMMAND_STOP\")\n self.stop_alarm()\n elif (command == zmq_socket_config.STATUS_UPDATE):\n logger.debug(\"Noxalarm receive REQUEST_STATUS_UPDATE\")\n self.push_socket_state()\n \n # Else if no command received, do nothing\n except zmq.error.Again:\n pass", "def run(self):\n if self.init():\n while not self._stop.value:\n try:\n sockets = dict(self.poll.poll(100))\n if (self.sock_reply in sockets and\n sockets[self.sock_reply] == zmq.POLLIN):\n request = self.sock_reply.recv_multipart()\n # do some 'work', update status\n cmd = loads(request[0])\n self.running = 1\n self.coroutine.run(cmd)\n self.running = 0\n self.nb_job_done += 1\n # send reply back to router/queuer\n self.sock_reply.send_multipart(request)\n\n except Exception as e:\n self.log.error('CONSUMER exception {}'.format(e))\n break\n self.sock_reply.close()\n self.finish()\n self.done = True", "def poll(self):\n if self._worker is None:\n self.returncode = None\n return self.returncode\n elif self._worker.is_alive():\n self.returncode = None\n return self.returncode\n else:\n self.returncode = self._worker.state.return_value\n return self.returncode", "def recv(self, timeout=None):\n if self.recv_buffer is not None:\n result = self.recv_buffer\n self.recv_buffer = None\n return result\n if timeout is not None:\n tfinal = time.time() + timeout\n while not self.mail_flag():\n if time.time() > tfinal:\n raise Timeout()\n result = self.mpi_comm.recv(source=self.remote_rank, status=self.status)\n return self.process_incoming(result, self.status)", "def receive(self, timeout = TIMEOUT_CURRENT):\n if self.balance > 0: \n #we know there is a sender so no need for timeout logic\n return self._channel.receive()\n elif timeout == TIMEOUT_NEVER:\n #no timeout\n return self._channel.receive()\n else:\n #either tasklet defined or specific timeout\n current_task = Tasklet.current()\n if timeout == TIMEOUT_CURRENT:\n #tasklet defines the timeout\n timeout = current_task.timeout\n #\n if timeout == TIMEOUT_NEVER:\n #still no timeout\n return self._channel.receive()\n else:\n #with timeout\n def on_timeout():\n current_task.raise_exception(TimeoutError)\n event_timeout = TimeoutEvent(timeout, on_timeout)\n try:\n return self._channel.receive()\n finally:\n event_timeout.close()", "def non_blocking_get(self):\n try:\n return self.q.get(block=False)\n except queue.Empty:\n time.sleep(0)\n return None", "def _wait_queue(self):\n while True:\n time.sleep(0.1)\n if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():\n return", "def timeout_get(self, timeout):\n try:\n return self.q.get(timeout=timeout)\n except queue.Empty:\n time.sleep(0)\n return None", "def _receive(self, callback):\n selector = DefaultSelector()\n selector.register(self.socket, EVENT_READ)\n\n while self.socket is not None:\n try:\n val = selector.select(self.SELECT_TIMEOUT_INTERVAL)\n if val:\n with self.socket_lock:\n header = self.socket.recv(self.HEADER_SIZE)\n if header:\n data = self._read_data(header)\n callback(data)\n else: # connection closed from other end\n self.close()\n except Exception as err:\n getLogger(__name__).debug((\"Unexpected exception occurred,\"\n \" receive thread may be in a\"\n \" corrupted state\\n\"\n \"Error: {}\".format(err)))", "def test_stop_when_result_queue_is_full(self):\n SLEEP_DELTA = 0.01\n TIMEOUT = 20\n QUEUE_SIZE = 2\n\n pool = ThreadPool(10, results_queue_size=QUEUE_SIZE)\n pool.start(WorkerIdGeneratingWorker)\n\n for _ in range(100):\n pool.ventilate()\n\n cumulative_wait = 0\n while pool.results_qsize() != QUEUE_SIZE:\n time.sleep(SLEEP_DELTA)\n cumulative_wait += SLEEP_DELTA\n # Make sure we wait no longer than the timeout. Otherwise, something is very wrong\n self.assertLess(cumulative_wait, TIMEOUT, msg='Timeout while waiting for the results queue to fill')\n\n # No need to read from the queue. We are testing ability to exit when workers might be blocked on the\n # results queue\n\n pool.stop()\n pool.join()", "def consume(q):\n while not q.empty():\n name = threading.currentThread().getName()\n LOG.debug(f\"Thread: {name} getting host from queue[current size = {q.qsize()}] {time.strftime('%H:%M:%S')}\")\n host = q.get()\n LOG.debug(f\"Pinging host: {host}\")\n res = os.system(f'ping -c 1 {host} >/dev/null 2>&1')\n if res == 0:\n LOG.info(f\"Host {host} is alive.\")\n LOG.debug(f\"Thread: {name} finished queue[current size = {q.qsize()}] {time.strftime('%H:%M:%S')}\")\n q.task_done()", "def receive_message(socket, callback = None):\n while not exit_flag:\n try:\n message = socket.recv(1024)\n if message:\n if callback:\n callback(json.loads(message))\n except Exception as err:\n if err.message == \"timed out\":\n pass\n else:\n print err", "def _queue_thread(self):\n while self.running:\n try:\n msg = self.q.get(True, max(self.blocktime / 1000, 1))\n self.busy = True\n self.send(msg)\n self.update()\n except Empty:\n self.busy = False\n pass\n\n # Prune the events list of dead events\n self.events_lock.acquire()\n self.events = filter(lambda t: t.is_alive(), self.events)\n self.events_lock.release()", "def work(self, poll_timeout=60):\n \n continue_working = True\n worker_connections = []\n\n def continue_while_connections_alive(any_activity):\n return self.after_poll(any_activity)\n\n while continue_working and self.cont==True:\n worker_connections = self.establish_worker_connections()\n continue_working = self.poll_connections_until_stopped(worker_connections, continue_while_connections_alive, timeout=poll_timeout)\n\n for current_connection in worker_connections:\n current_connection.close()", "def fetch_job_from_queue(self):\n while 1:\n time.sleep(2)\n try:\n credentials = pika.PlainCredentials('USER', 'PASSWORD')\n connection = pika.BlockingConnection(pika.ConnectionParameters('rabbitmq', 5672, '/', credentials))\n channel = connection.channel()\n channel.queue_declare(queue='purserq')\n method_frame, header_frame, body = channel.basic_get(queue='purserq')\n if method_frame.NAME == 'Basic.GetEmpty':\n connection.close()\n else:\n channel.basic_ack(delivery_tag=method_frame.delivery_tag)\n print \"Received job:\", body, \"starting job to reply\"\n connection.close()\n self.reply_to_master(body)\n except AttributeError:\n print \"No content\"\n connection.close()\n except pika.exceptions.ConnectionClosed:\n print \"You get Connection Closed\"\n continue", "def run(self):\n global socket_connections\n\n self.start_server_socket(self.ticks_per_min)\n\n while True: \n\n try: \n # keep track of the time that the server started\n start_time = time.time() \n c, addr = self.server.accept()\n data, addr_2 = c.recvfrom(1024)\n\n self.server.shutdown(socket.SHUT_RDWR)\n self.server.close()\n\n # keep track of the time that the server finishes receiving\n # a request\n end_time = time.time() \n\n # set the timeout of the server to end_time - start_time to get\n # around the GIL\n self.start_server_socket(end_time - start_time)\n\n data = data.decode()\n\n # add the received message to the msg_queue\n if data: \n self.msg_queue.put(data)\n print str(self.id) + \" got some! \" + data\n\n # every time the socket timesout, callback to the clock's instruction\n except Exception, e:\n # shutdown the server first \n try: \n self.server.shutdown(socket.SHUT_RDWR)\n except:\n pass\n self.server.close()\n print \"exception: \" + str(e)\n print \"complete an instruction\"\n self.perform_clock_instruction()\n # restart server\n self.start_server_socket(self.ticks_per_min)", "async def queue_worker(async_q):\n while True:\n if async_q.empty() == True:\n await asyncio.sleep(5)\n else:\n action = async_q.get()\n await action.run()", "def _receive_request(self, sdu):\n if self._busy: raise RuntimeError(\"Channel busy\")\n self._busy = True\n self._propagate(sdu)", "def monitor(self):\r\n while True:\r\n for worker, start_time in self.workers.items():\r\n if (not worker.isAlive() or\r\n self.timeout\r\n and datetime.now() - start_time > self.timeout): \r\n\r\n self.work_count.get_nowait()\r\n self.jobs.task_done()\r\n del self.workers[worker]\r\n\r\n time.sleep(1)", "def q_get(self):\n while not self.stopped():\n try:\n return self.in_q.get(timeout=self.heart_beat)\n except queue.Empty:\n pass", "def blocking_poll(self, look_for_work, polling_seconds=600, coalesce_seconds=0.1):\n amqp_failed_at = None\n amqp_retry_count = 0\n last_notice_event = 0\n\n def next_poll_time():\n return max(\n 1,\n polling_seconds\n - (time.time() - last_notice_event)\n )\n\n def next_amqp_time():\n if amqp_failed_at is None:\n return 0\n return max(\n 0,\n 5**amqp_retry_count # exponential backoff 5s ... ~21h\n - (time.time() - amqp_failed_at)\n )\n\n while True:\n try:\n if (self.amqp_connection is None or not self.amqp_connection.is_open) \\\n and next_amqp_time() <= next_poll_time():\n # initialize AMQP (unless we're in a cool-down period)\n time.sleep(next_amqp_time())\n self._amqp_bind()\n polling_gen = self.notice_channel.consume(\n self.notice_queue_name,\n exclusive=True,\n inactivity_timeout=polling_seconds\n )\n coalesce_gen = self.notice_channel.consume(\n self.notice_queue_name,\n exclusive=True,\n inactivity_timeout=coalesce_seconds\n )\n amqp_failed_at = None\n amqp_retry_count = 0\n sys.stderr.write('Using AMQP hybrid polling.\\n')\n # drain any pre-existing work that won't fire an AMQP event for us\n self._run_notice_event(look_for_work)\n last_notice_event = time.time()\n\n if self.amqp_connection and self.amqp_connection.is_open:\n # wait for AMQP event or timeout to wake us\n for result in polling_gen:\n sys.stderr.write('Woke up on %s.\\n' % ('change-notice' if result else 'poll timeout'))\n # ... and delay for up to coalesce_seconds to combine multiple notices into one wakeup\n while next(coalesce_gen)[0] is not None:\n pass\n # run once per wakeup\n self._run_notice_event(look_for_work)\n last_notice_event = time.time()\n else:\n # wait for next poll deadline and run once\n time.sleep(next_poll_time())\n self._run_notice_event(look_for_work)\n last_notice_event = time.time()\n\n except pika.exceptions.AMQPConnectionError as e:\n if amqp_failed_at is None:\n sys.stderr.write('Using basic polling due to AMQP communication problems.\\n')\n self.amqp_connection = None\n amqp_failed_at = time.time()\n if amqp_retry_count < 6:\n # don't let retry exponent get bigger than 7...\n amqp_retry_count += 1\n\n except Exception as e:\n sys.stderr.write('Got error %s in main event loop.' % e)\n raise", "async def _monitor_recv(self):\n\n while True:\n await RisingEdge(self.clock)\n await ReadOnly()\n if self.bus.valid.value:\n self._recv(int(self.bus.data.value))", "def run(self):\n while True:\n socks = select.select(self.sockets.values(), [], [], 0.1)[0]\n for conn in socks:\n try:\n k = conn.recv(65535)\n except:\n # either died on a connection reset, or was SIGTERM's by parent\n return\n if k:\n for sock in self.sockets:\n if self.sockets[sock] == conn:\n srcif = sock\n msg = json.loads(k)\n if not self.handle_packet(srcif, msg):\n self.send_error(conn, msg, srcif)\n else:\n return", "def send_announcement_get_work_request(self):\n self.analysis_id = uuid.uuid4().hex\n while True:\n self.announce_socket.send_json(((self.analysis_id, self.work_addr),))\n try:\n return self.awthread.recv(self.work_socket, 250)\n except six.moves.queue.Empty:\n continue", "def Wait(self, request_id, timeout):\n end_time = time.time() + timeout\n while not self._IsComplete(request_id):\n if end_time < time.time():\n self.AbortRequest(request_id)\n raise WorkQueueTimeout(request_id, timeout)\n time.sleep(self._WAIT_POLL_INTERVAL)\n completion_file = self._GetRequestPathname(request_id, self._COMPLETE)\n with open(completion_file, 'r') as f:\n result = pickle.load(f)\n os.remove(completion_file)\n if isinstance(result, Exception):\n raise result\n assert not isinstance(result, BaseException)\n return result", "def get_server_queue(self, server_name, timeout=5):\n start_time = time()\n while time() < start_time + timeout:\n sq = self.server_queues[server_name]\n if sq.job_queue_size() > 0:\n returnable_sq = copy.deepcopy(sq)\n del sq.job_queue[0]\n return returnable_sq\n raise ValueError(\"Server queue has no jobs in it\")", "def poll(self):\n Monitor.poll(self)\n return deferToThread(self._poll)", "def receive():\n now = time.time()\n end = now + MAX_DURATION\n tmp = None\n # Heroku doesn't notify when clients disconnect so we have to impose a\n # maximum connection duration.\n while now < end:\n if not tmp:\n tmp = AsyncResult()\n BROADCAST_QUEUE.put(tmp)\n try:\n yield tmp.get(timeout=KEEP_ALIVE_DELAY)\n tmp = None\n except Timeout:\n yield ''\n now = time.time()", "def doRead(self):\n if self.read_scheduled is not None:\n if not self.read_scheduled.called:\n self.read_scheduled.cancel()\n self.read_scheduled = None\n\n while True:\n if self.factory is None: # disconnected\n return\n\n events = self.socket_get(constants.EVENTS)\n\n if (events & constants.POLLIN) != constants.POLLIN:\n return\n\n try:\n message = self._readMultipart()\n except error.ZMQError as e:\n if e.errno == constants.EAGAIN:\n continue\n\n raise e\n\n log.callWithLogger(self, self.messageReceived, message)", "def run(self):\r\n self.logger.info('NetworkReceiverThread thread started')\r\n global receiver_running\r\n\r\n self.server.bind(('', self.port))\r\n self.server.listen(5)\r\n\r\n while receiver_running:\r\n # test which socket sent data\r\n all_sockets = self.sockets.values() + [self.server]\r\n recv_from_sockets, _, _ = select.select(all_sockets, [], [], 0.2)\r\n\r\n for s in recv_from_sockets:\r\n # if new client trying to connect, accept it\r\n if s is self.server:\r\n new_socket, address = s.accept()\r\n ip = new_socket.getpeername()[0]\r\n self.sockets[ip] = new_socket\r\n\r\n message = protocol.thread.new_socket(\r\n socket=new_socket)\r\n self.network_queue.put(message)\r\n\r\n self.logger.debug('new connection to client %s' % ip)\r\n # if a connected client send a message, receive it\r\n else:\r\n # if this is a part of a message that alreay begun to send,\r\n # add it to the messages dict\r\n if s in self.messages:\r\n buff, missing_bytes = self.messages[s]\r\n received = self.receive(s, missing_bytes)\r\n\r\n buff.write(received)\r\n missing_bytes -= len(received)\r\n\r\n self.messages[s] = buff, missing_bytes\r\n # if no bytes are missing, the message is received\r\n # pass it to the logic thread, and remove from queue\r\n if missing_bytes == 0:\r\n\r\n content = buff.getvalue()\r\n buff.close()\r\n message = protocol.thread.received(\r\n message=content, client=s.getpeername()[0])\r\n self.logic_queue.put(message)\r\n del self.messages[s]\r\n\r\n self.logger.debug(\r\n 'received a message from %s: %s...'\r\n % (s.getpeername()[0], repr(content[:5])))\r\n else:\r\n self.logger.debug('received part of message. ' +\r\n 'total bytes: %s, left %s.' %\r\n (len(received), missing_bytes))\r\n\r\n # if this is the first part of a message,\r\n # add it to the message dict\r\n else:\r\n size_str = self.receive(s, 4)\r\n\r\n # if connection is closed, remove it\r\n if size_str != '':\r\n size = protocol.get_size(size_str)\r\n self.messages[s] = (StringIO.StringIO(), size)\r\n self.logger.debug('new message of size %s' % size)\r\n\r\n self.server.close()\r\n self.logger.info('NetworkReceiverThread thread ended')", "def check_queue():\n while True:\n logging.info( 'Awaiting task ' )\n yield from asyncio.sleep( 5 )\n loop.create_task( (start_background_tasks()) )", "def receive_message(self):\n try:\n self.clockCheckStop = datetime.now()\n data = self.listener.recvfrom(BUF_SZ)\n return fxp_bytes_subscriber.unmarshal_message(data[0])\n except ConnectionError as err:\n # a ConnectionError means it will never succeed\n print('closing: {}'.format(err))\n return\n except Exception as err:\n # other exceptions we assume are due to being non-blocking;\n # we expect them to succeed in future\n print('failed {}'.format(err))\n return", "def poll(self, timeout=None, flags=_zmq.POLLIN):\n\n if self.closed:\n raise _zmq.ZMQError(_zmq.ENOTSUP)\n\n p = self._poller_class()\n p.register(self, flags)\n f = p.poll(timeout)\n\n future = self._Future()\n def unwrap_result(f):\n if future.done():\n return\n if f.cancelled():\n try:\n future.cancel()\n except RuntimeError:\n # RuntimeError may be called during teardown\n pass\n return\n if f.exception():\n future.set_exception(f.exception())\n else:\n evts = dict(f.result())\n future.set_result(evts.get(self, 0))\n\n if f.done():\n # hook up result if\n unwrap_result(f)\n else:\n f.add_done_callback(unwrap_result)\n return future", "def get(self):\n try:\n return self.url_queue.get(timeout=self.timeout)\n except Exception as e:\n print(e)\n return None", "def WaitUntilNoFlowsToProcess(self, timeout=None):\n t = self.flow_handler_thread\n if not t:\n return\n\n start_time = time.time()\n while True:\n with self.lock:\n # If the thread is dead, or there are no requests\n # to be processed/being processed, we stop waiting\n # and return from the function.\n if (not t.is_alive() or\n (not self._GetFlowRequestsReadyForProcessing() and\n not self.flow_handler_num_being_processed)):\n return\n\n time.sleep(0.2)\n\n if timeout and time.time() - start_time > timeout:\n raise TimeOutWhileWaitingForFlowsToBeProcessedError(\n \"Flow processing didn't finish in time.\")", "def run(self):\n\n self.logger.debug(f\"Starting runner loop ...\")\n self.state = RunnerState.WORKING\n\n while self.state in (RunnerState.WORKING, RunnerState.WAITING):\n try:\n current_job = self.job_manager.get(self.queue_access_timeout)\n self.state = RunnerState.WORKING\n except queue.Empty:\n if self.state == RunnerState.WORKING:\n self.state = RunnerState.WAITING\n if self.quit_on_empty_queue:\n self.state = RunnerState.ENDING\n continue\n\n current_job.logger = self.logger\n\n try:\n worker = _processor.DownloadProcessor(current_job)\n if worker.run():\n self.logger.debug(f\"Worker processed {current_job} successfully.\")\n else:\n self.logger.warning(f\"Processing of {current_job} failed somehow.\")\n\n if len(worker.descendants) > 0:\n self.logger.warning(f\"Found {len(worker.descendants)} new derived jobs.\")\n for job in worker.descendants:\n self.job_manager.put(job)\n\n for reference in set(current_job.references):\n self.job_manager.put(current_job.copy(reference))\n\n except Exception as exc:\n self.exception = exc\n self.logger.error(f\"Error during handling of '{current_job}'!\", exc_info=True)\n if self.crash_on_error:\n self.state = RunnerState.CRASHED\n raise\n\n finally:\n self.job_manager.complete(current_job)\n\n self.state = RunnerState.EXITED", "def TODO_testTimeout(self):\n return \"\"\"TODO: Highly dependent on hardcoded downstream timeout val\"\"\"\n\n # Assuming proxy's downstream_max is 1,\n # and number of threads is 1.\n\n self.client_connect(0)\n\n self.client_send('get time0\\r\\n', 0)\n self.mock_recv('get time0\\r\\n', 0)\n\n # Mock server is 'busy' at this point, so\n # downstream timeout logic should kick in,\n # without our mock server having to send anything.\n\n self.wait(210)\n\n self.client_recv('END\\r\\n', 0)\n\n # TODO: The number of server sessions should be 0,\n # except the close might not have propagated.", "def ReceiveTimeout(self) -> int:", "def ReceiveTimeout(self) -> int:", "def _read_socket(self):\n data = ''\n while True:\n try:\n input_buffer = self._connection_socket.recv(self._buffer_size)\n if not input_buffer:\n raise ConnectionClosedException()\n else:\n data += input_buffer.strip()\n if re.search(self.REQUEST_END, data):\n break\n except socket.timeout:\n continue\n return data", "def WaitUntilServing(self, timeout=30.0):\n assert self._process, 'server was not started'\n finish_time = time.time() + timeout\n while time.time() < finish_time:\n if self._process.poll() is not None:\n raise Error('server has already exited with return: %r',\n self._process.returncode)\n if self._CanConnect():\n return\n time.sleep(0.2)\n raise Error('server did not start after %f seconds', timeout)", "def __listener(self, sock):\n self.__serving = True\n try:\n pr = [sock]\n pw = []\n pe = [sock]\n while self.__thread is not None:\n rd, rw, re = select.select(pr, pw, pe, 0.5)\n if len(re) != 0:\n raise Exception(\"Error on select was detected.\")\n if len(rd) == 0:\n continue\n while 1: # Slurp up waiting packets, return to select if EAGAIN\n try:\n data = sock.recv(8192, socket.MSG_DONTWAIT)\n self.checkMsg(data)\n except Exception:\n break # Go back to select so we don't busy-wait\n finally:\n sock.close()\n self.__serving = False", "def run(self):\n while True:\n socks = select.select(self.sockets.values(), [], [], 0.1)[0]\n for conn in socks:\n try:\n k = conn.recv(65535)\n except:\n # either died on a connection reset, or was SIGTERM's by parent\n return\n if k:\n for sock in self.sockets:\n if self.sockets[sock] == conn:\n srcip = sock\n msg = json.loads(k)\n # TODO commented out below code because handled by handle packet\n #self.update(srcip, msg)\n #print(msg[TYPE])\n #print(json.dumps(msg, sort_keys=True, indent=4))\n if not self.handle_packet(srcip, msg):\n self.send_error(conn, msg)\n else:\n return", "def collect(self):\n while True:\n if not self._queue.empty():\n message = self._queue.get()\n self.working_on = message['job_type']\n else:\n break\n logging.info(\"Popped off message: {}\\n\".format(str(message)))\n\n if message['job_type'] == 'STOP':\n break\n\n if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE':\n raise ValueError('{} is not a recognized task type'.format(message['job_type']))\n pass\n\n # Query all repos with repo url of given task\n repoUrlSQL = s.sql.text(\"\"\"\n SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'\n \"\"\".format(message['given']['github_url']))\n repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id'])\n\n try:\n if message['models'][0] == 'pull_requests':\n self.pull_requests_model(message, repo_id)\n elif message['models'][0] == 'pull_request_commits':\n self.pull_request_commits_model(message, repo_id)\n elif message['models'][0] == 'pull_request_files':\n self.pull_requests_graphql(message, repo_id)\n except Exception as e:\n register_task_failure(self, message, repo_id, e)\n pass", "def get(self):\n with self.__lock:\n while True:\n try:\n job = self.__queue.get(False)\n self.__lock.notify_all()\n return job\n except Queue.Empty:\n self.__lock.wait()", "def recv(self, timeout=.0001):\n # First get data length\n self.settimeout(timeout)\n length = self._socket.recv(8)\n length = int.from_bytes(length, \"little\")\n if length == 0:\n raise ConnectionAbortedError\n # Then receive data\n self.settimeout(None)\n return self._socket.recv(length, socket.MSG_WAITALL)", "def _receive_thread(self):\r\n while True:\r\n try:\r\n self.response, ip = self.socket.recvfrom(3000)\r\n except socket.error as exc:\r\n print (f\"Caught exception socket.error: {exc}\")", "def _receive_thread(self):\r\n while True:\r\n try:\r\n self.response, ip = self.socket.recvfrom(2048)\r\n print(\"Response \", self.response)\r\n except socket.error as exc:\r\n print (\"Receive Thread caught exception socket.error : %s\" % exc)", "def receive_packet(self, time=0):\n if time == 0:\n try:\n return self.in_queue.get(False)\n except queue.Empty:\n return None\n elif time < 0:\n try:\n return self.in_queue.get(True)\n except queue.Empty:\n return None\n else:\n try:\n return self.in_queue.get(True, time)\n except queue.Empty:\n return None", "def run(self) -> None:\n while self.data_incoming or len(self._queue):\n if not self._queue:\n logging.info(\"Consumer %d is sleeping since queue is empty\", self._name)\n time.sleep(0.75)\n print(self._queue.get())\n time.sleep(0.5)", "def recvall(self):\n\n while True:\n try:\n data = self.sock.recv(4096)\n\n if data == '':\n self.status = 'closed'\n break\n\n except error as e:\n # Not all errors are bad. Most of the time, we're\n # waiting for EWOULDBLOCK as the loop end condition\n self.errno = e.errno\n if not self.errno in (errno.EWOULDBLOCK,errno.EAGAIN):\n self.status = 'closed'\n break\n\n self.inbox.feed(data)", "def poll(self):\n if self._server:\n self._server.poll()", "async def test_blocking_timeout(self):\n with await self.redis as r:\n result = await r.blpop('missing', timeout=1)\n assert result is None", "async def test_queue_timeout(self) -> None:\n\n class SlowDerivedHandler(Handler, DerivedTestingServiceInterface):\n async def getName(self) -> str:\n time.sleep(1)\n return \"SlowDerivedTesting\"\n\n async def derived_pick_a_color(self, color: Color) -> Color:\n return color\n\n testing = TestServer(handler=SlowDerivedHandler())\n testing.server.set_queue_timeout(0.01)\n loop = asyncio.get_event_loop()\n\n async def client_call(sa: SocketAddress) -> str:\n ip, port = sa.ip, sa.port\n assert ip and port\n async with get_client(DerivedTestingService, host=ip, port=port) as client:\n try:\n return await client.getName()\n except ApplicationError as err:\n if \"Queue Timeout\" in str(err):\n return \"Queue Timeout\"\n else:\n return \"\"\n\n async def clients_run(server: TestServer) -> None:\n async with server as sa:\n results = await asyncio.gather(\n client_call(sa),\n client_call(sa),\n client_call(sa),\n client_call(sa),\n client_call(sa),\n )\n self.assertIn(\"Queue Timeout\", results)\n\n await clients_run(testing)", "async def poll_endpoint(url):\n response = requests.get(url)\n logger.info(f\"PING Url: {url}\")\n if response.status_code not in [200, 201]:\n # Failures in the polling itself are being discarded here, due to a simple design\n # You could, however, have a second message queue filled with failed poll requests\n # and keep a dedicated consumer for that MQ.\n logger.critical(f\"Error during polling {url}: Got response code - {response.status_code}\")\n else:\n channel.basic_publish(exchange='', routing_key='poll', body=response.content, properties=pika.BasicProperties(delivery_mode=2))\n logger.info(\"Job Enqueued\")", "def accept(self):\n self._acquire() # prevent other calls to protect data structures\n try:\n pool, requests, ready = self._lists\n while not requests:\n # There are no requests in the queue. Wait until there are.\n\n # This thread is waiting, to remove a lock from the collection\n # of locks corresponding to threads not waiting for work\n l = pool.pop()\n\n # And add it to the collection of locks representing threads\n # ready and waiting for work.\n ready.append(l)\n self._release() # allow other calls\n\n # Now try to acquire the lock. We will block until\n # someone calls handle to queue a request and releases the lock\n # which handle finds in the ready queue\n l.acquire()\n\n # prevent calls so we can update not waiting pool\n self._acquire()\n pool.append(l)\n\n # return the *first* request\n return requests.pop(0)\n\n finally:\n self._release() # allow calls", "def listen(self):\n while not self.stop.is_set():\n rlist, _, _ = select([self.udpsock], [], [], LISTEN_TIMEOUT)\n if self.udpsock in rlist:\n try:\n data, addr = self.udpsock.recvfrom(4096)\n if should_respond(data.decode(\"ascii\")):\n response_str = self.response.format(\n strftime(\"%a, %d %b %Y %H:%M:%S GMT\", gmtime())\n )\n self.udpsock.sendto(\n bytearray(response_str, \"ascii\"),\n addr\n )\n except Exception as e:\n logging.error(e)\n self._shutdown()", "def run_forever(self):\n while True:\n if not self._mailbox:\n self._event.wait()\n self._event = _event.Event()\n else:\n # leave the message in the mailbox until after it's\n # been processed so the event doesn't get triggered\n # while in the received method\n self._pool.spawn_n(\n self.received, self._mailbox[0])\n self._mailbox.popleft()", "def receive_loop(self):\n msg_buffer = bytes() # The message input buffer\n while not self._shutdown.is_set():\n if msg_buffer:\n try:\n msg_length = self.determine_length_of_json_msg(msg_buffer)\n except InvalidLengthHeader:\n msg_length = float(\"inf\")\n if len(msg_buffer) >= msg_length:\n message = self.extract_msg(msg_buffer, msg_length)\n try:\n handler = getattr(self, \"handle_\" + message['type'])\n except AttributeError:\n print(\"Can't handle message of type: \" +\n str(message['type']))\n continue\n handler(message)\n msg_buffer = msg_buffer[msg_length:]\n else:\n try:\n msg_buffer += connection.recv(1024)\n except socket.timeout:\n pass\n else:\n try:\n msg_buffer += connection.recv(1024)\n except socket.timeout:\n pass", "async def worker(\n self, queue: asyncio.Queue, session: aiohttp.ClientSession\n ) -> None:\n while True:\n url = await queue.get()\n await self.fetch(url, session)\n queue.task_done()", "def _wait_for_event_in_queue(self):\n try:\n event = self._queue.get(timeout=SendTelemetryEventsHandler._MAX_TIMEOUT)\n self._queue.task_done()\n except Empty:\n # No elements in Queue, return None\n event = None\n\n return event", "async def _read(self):\n try:\n logger.debug('Enter Task._read for %s', self.url)\n while self.websocket:\n await self._read_once()\n except Exception:\n logger.exception('Unhandled exception in Task._read for %s', self.url)\n finally:\n logger.debug('Exit Task._read for %s', self.url)", "def receive(self):\n if self.sock is not None:\n return recv_msg(self.sock)\n return None", "def serve_requests(self):\n while True:\n self.server_socket.listen(self.request_queue_size)\n client_connection, client_address = self.server_socket.accept()\n self.request_handler(client_connection)", "def _start_receive_from_queue(self):\n while True:\n received_message = recv_msg(self.TCPSock)\n # received_message = self.TCPSock.recv(self.buf)\n if self.verbose: print \"Server sends: \" + received_message\n self.receive_message_queue.put(received_message)", "def rq_worker():\n setup_experiment(log)\n with Connection(db.redis_conn):\n # right now we care about low queue for bots\n worker = Worker(\"low\")\n worker.work()", "def __receive_next_update(self) -> telegram.Update:\n # Pop data from the queue\n try:\n data = self.queue.get(timeout=self.cfg.telegram[\"conversation_timeout\"])\n except queuem.Empty:\n # If the conversation times out, gracefully stop the thread\n self.__graceful_stop(StopSignal(\"timeout\"))\n # Check if the data is a stop signal instance\n if isinstance(data, StopSignal):\n # Gracefully stop the process\n log.debug(\"Waiting for a specific message...\")\n self.__graceful_stop(data)\n # Return the received update\n return data", "def wait(self, condition, timeout=False, msg=None):\n if timeout is False:\n timeout = self.timeout\n if timeout is None:\n while not condition() and not self.disconnected:\n self.container.process()\n else:\n container_timeout = self.container.timeout\n self.container.timeout = timeout\n try:\n deadline = time.time() + timeout\n while not condition() and not self.disconnected:\n self.container.process()\n if deadline < time.time():\n txt = \"Connection %s timed out\" % self.url\n if msg: txt += \": \" + msg\n raise Timeout(txt)\n finally:\n self.container.timeout = container_timeout\n if self.disconnected or self._is_closed():\n self.container.stop()\n self.conn.handler = None # break cyclical reference\n if self.disconnected and not self._is_closed():\n raise ConnectionException(\n \"Connection %s disconnected: %s\" % (self.url, self.disconnected))", "def _wait_for_queue_space(self, timeout=DEFAULT_WAIT_TIME):\n wait_time = 5\n\n (DEBUG and len(self.queue) > 1 * 1000 * 1000) and Log.warning(\"Queue {{name}} has over a million items\")\n\n now = time()\n if timeout != None:\n time_to_stop_waiting = now + timeout\n else:\n time_to_stop_waiting = now + DEFAULT_WAIT_TIME\n\n if self.next_warning < now:\n self.next_warning = now + wait_time\n\n while not self.please_stop and len(self.queue) >= self.max:\n if now > time_to_stop_waiting:\n Log.error(THREAD_TIMEOUT)\n\n if self.silent:\n self.lock.wait(Till(till=time_to_stop_waiting))\n else:\n self.lock.wait(Till(seconds=wait_time))\n if len(self.queue) >= self.max:\n now = time()\n if self.next_warning < now:\n self.next_warning = now + wait_time\n Log.alert(\n \"Queue by name of {{name|quote}} is full with ({{num}} items), thread(s) have been waiting {{wait_time}} sec\",\n name=self.name,\n num=len(self.queue),\n wait_time=wait_time\n )" ]
[ "0.6019085", "0.6004343", "0.58936167", "0.5832211", "0.5812502", "0.58101034", "0.5788062", "0.5670236", "0.5595741", "0.55511177", "0.55298686", "0.54770106", "0.54572827", "0.5441434", "0.5433049", "0.54328305", "0.5424303", "0.5412369", "0.5375704", "0.5372488", "0.5343022", "0.5339628", "0.5337792", "0.53236455", "0.5305691", "0.5303386", "0.52980363", "0.5281956", "0.5263355", "0.52613515", "0.5255476", "0.5253463", "0.52490073", "0.52289695", "0.5226632", "0.5211842", "0.5203398", "0.5178606", "0.51674813", "0.51448387", "0.5143869", "0.51341987", "0.5132209", "0.5129364", "0.5128741", "0.51260686", "0.5119592", "0.5113246", "0.5112868", "0.510658", "0.51041615", "0.5100542", "0.50926936", "0.508464", "0.50803834", "0.5076173", "0.50684273", "0.506485", "0.5062369", "0.5046485", "0.50383055", "0.50336343", "0.5032234", "0.50298816", "0.5028373", "0.50273997", "0.5025466", "0.50195", "0.50179166", "0.50179166", "0.50143397", "0.50044394", "0.50041425", "0.5002941", "0.49996558", "0.49991083", "0.4992687", "0.49920222", "0.49916893", "0.49910936", "0.49859998", "0.4981038", "0.49748293", "0.4965524", "0.49616435", "0.49597645", "0.49488282", "0.4941988", "0.49402508", "0.49380773", "0.49372754", "0.49250755", "0.4923915", "0.492235", "0.4919678", "0.49134108", "0.4910005", "0.49093103", "0.49020278", "0.48996198" ]
0.7843548
0
Execute a closure on the AnalysisWorker thread fn closure to execute Returns the function's result or throws whatever exception was thrown by the function.
def execute(self, fn, *args, **kwargs): self.ex(fn, *args, **kwargs) return self.ecute()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apply(self, external_callable, *args, **kwargs):\n self.work_request_queue.put((external_callable, args, kwargs))\n return self.result_queue.get()", "async def call(fn: Callable, *args, **kwargs) -> Any:\n async with websockets.connect(WS_SERVER_URI) as websocket:\n\n task = serialize((fn, args, kwargs))\n\n await websocket.send(task)\n message = await websocket.recv()\n\n results = deserialize(message)\n\n if isinstance(results, TaskExecutionError):\n raise results\n\n return results", "def _invoke_app_function(evt, **kwds):\n LOG.debug(\"Running _invoke_app_function in Thread: %s\", threading.currentThread().name)\n\n result_list = []\n\n # Validate the fn_inputs in the Message\n fn_inputs = validate_fields([], kwds)\n LOG.info(\"[%s] Validated function inputs\", evt.name)\n LOG.debug(\"[%s] fn_inputs: %s\", evt.name, fn_inputs)\n\n rp = ResultPayload(itself.PACKAGE_NAME, version=constants.APP_FUNCTION_PAYLOAD_VERSION, **fn_inputs)\n\n fn_inputs_tuple = namedtuple(\"fn_inputs\", fn_inputs.keys())(*fn_inputs.values())\n\n # Set evt.message in local thread storage\n itself.set_fn_msg(evt.message)\n\n # Invoke the actual Function\n fn_results = fn(itself, fn_inputs_tuple)\n\n for r in fn_results:\n if isinstance(r, StatusMessage):\n LOG.info(\"[%s] StatusMessage: %s\", evt.name, r)\n itself.fire(StatusMessageEvent(parent=evt, message=r.text))\n\n elif isinstance(r, FunctionResult):\n r.name = evt.name\n if not r.custom_results:\n r.value = rp.done(\n content=r.value,\n success=r.success,\n reason=r.reason)\n LOG.info(\"[%s] Returning results\", r.name)\n result_list.append(r)\n\n elif isinstance(r, Exception):\n raise r\n\n else:\n # Whatever this is, add it to the results\n LOG.debug(r)\n result_list.append(r)\n\n return result_list", "def safe_run(self, function: Callable) -> Callable:\n\n def wrapper(*args, **kwargs) -> Any:\n result = None\n try:\n result = function(*args, **kwargs)\n except BaseException:\n self._new_error(ExceptionInfo(*sys.exc_info()))\n\n self._show_info()\n return result\n\n return wrapper", "def __apply_func_with_worker_stream(args):\n\n # set up logging\n logger = multiprocessing.log_to_stderr()\n logger.setLevel(logging.WARNING)\n from cea import suppress_3rd_party_debug_loggers\n suppress_3rd_party_debug_loggers()\n\n # unpack the arguments\n func, queue, on_complete, i_queue, n, args = args[0], args[1], args[2], args[3], args[4], args[5:]\n\n # set up printing to stderr and stdout to go through the queue\n sys.stdout = QueueWorkerStream('stdout', queue)\n sys.stderr = QueueWorkerStream('stderr', queue)\n\n # CALL\n result = func(*args)\n\n if on_complete:\n on_complete(i_queue.get(), n, args, result)\n\n return result", "def submit( # type: ignore[override]\n self, fn: Callable[..., T], *args: Any, **kwargs: Any\n ) -> FutureType[T]:\n future = cast(FutureType[T], Future())\n try:\n result = fn(*args, **kwargs)\n except BaseException as exc_info:\n future.set_exception(exc_info)\n else:\n future.set_result(result)\n return future", "def progress_wrapper(user_defined_function: Callable, master_workers_queue: multiprocessing.Queue, index: int, chunk_size: int) -> Callable:\n ...", "def execute_deferred(fn):\n\n pass", "def inner(*args):\n # Setup.\n stats = args[-1]\n stats[desc] = -1\n start = time.time()\n\n # Execute the function.\n ret_val = func(*args)\n\n # No exception, so save the runtime and return ret_val.\n stats[desc] = time.time() - start\n return ret_val", "def submit_to_executor(self, func: Callable, *args, **kwargs) -> Future:\n\n callback = kwargs.pop(\"callback\", None)\n\n # get stuff we'll need to fake scheduler call\n sched_data = {\n \"id\": uuid.uuid4().hex,\n \"name\": self.name,\n \"objectid\": self.AD.app_management.objects[self.name][\"id\"],\n \"type\": \"scheduler\",\n \"function\": callback,\n \"pin_app\": self.get_app_pin(),\n \"pin_thread\": self.get_pin_thread(),\n }\n\n def callback_inner(f):\n try:\n # @todo : use our own callback type instead of borrowing\n # from scheduler\n rargs = {}\n rargs[\"result\"] = f.result()\n sched_data[\"kwargs\"] = rargs\n self.create_task(self.AD.threading.dispatch_worker(self.name, sched_data))\n\n # callback(f.result(), kwargs)\n except Exception as e:\n self.error(e, level=\"ERROR\")\n\n f = self.AD.executor.submit(func, *args, **kwargs)\n\n if callback is not None:\n self.logger.debug(\"Adding add_done_callback for future %s for %s\", f, self.name)\n f.add_done_callback(callback_inner)\n\n self.AD.futures.add_future(self.name, f)\n return f", "def run(self):\n\n # Retrieve args/kwargs here; and fire processing using them\n try:\n result = self.fn(*self.args, **self.kwargs)\n except:\n traceback.print_exc()\n exctype, value = sys.exc_info()[:2]\n self.signals.error.emit((exctype, value, traceback.format_exc()))\n else:\n self.signals.result.emit(result) # Return the result of the processing\n finally:\n self.signals.finished.emit() # Done", "def run(self):\n\n # Retrieve args/kwargs here; and fire processing using them\n try:\n result = self.fn(*self.args, **self.kwargs)\n except:\n traceback.print_exc()\n exctype, value = sys.exc_info()[:2]\n self.signals.error.emit((exctype, value, traceback.format_exc()))\n else:\n self.signals.result.emit(result) # Return the result of the processing\n finally:\n self.signals.finished.emit() # Done", "def run_in_background(self, function):\n return function()", "async def run_in_executor(self, func: Callable, *args, **kwargs) -> Callable:\n\n return await utils.run_in_executor(self, func, *args, **kwargs)", "def thread_func(*args, **kwargs):\n exception, res = None, None\n try:\n res = func(*args, **kwargs)\n except Exception as e:\n exception = e\n return callback(exception, res)", "def _run_func(\n func: Callable, *args: Any, return_value: Any | None = None, **kwargs: Any\n) -> None:\n try:\n if try_bind(func, *args, return_value=return_value, **kwargs):\n func(*args, return_value=return_value, **kwargs)\n elif try_bind(func, *args, **kwargs):\n func(*args, **kwargs)\n else:\n raise SignatureMismatch(func)\n except SignatureMismatch:\n # always re-raise SignatureMismatch as this means we have been unable\n # to run the side-effect function at all.\n raise\n except Exception: # noqa: B902\n logger.exception(\"Error running side_effect function '%s'\", fname(func))\n if settings.ABORT_ON_ERROR or settings.TEST_MODE_FAIL:\n raise", "def submit(fn, *args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except Exception as e:\n return e", "def wrapper_execute_on_main_thread(prepped_function, return_object):\n try:\n return_object.result = prepped_function()\n except Exception as e:\n return_object.exception = e\n\n return 0", "def _handle_thread(self, func):\n @wraps(func)\n def handle_thread(*args, **kwargs):\n \"\"\"Wrapped function\"\"\"\n try:\n ident = threading.get_ident()\n # Execute the function\n result = func(*args, **kwargs)\n except:\n error = sys.exc_info()\n with self.lock:\n # Record the error\n if (self.thread.ident == ident):\n self.result = None\n self.error = error\n raise error[1].with_traceback(error[2])\n else:\n with self.lock:\n # Record the result\n if (self.thread.ident == ident):\n self.result = result\n self.error = None\n return handle_thread", "def callFromWorker(cls, func, args, on_success=None, on_failure=None, on_complete=None):\n worker = cls(func, args)\n if on_success is not None:\n worker.job_succeeded.connect(on_success)\n if on_failure is not None:\n worker.job_failed.connect(on_failure)\n if on_complete is not None:\n worker.finished.connect(on_complete)\n worker.start()\n\n return worker", "def _run_func(func, *args, **kwargs):\n try:\n if pass_return_value(func):\n func(*args, **kwargs)\n else:\n kwargs.pop(\"return_value\", None)\n func(*args, **kwargs)\n except Exception:\n logger.exception(\"Error running side_effect function '%s'\", fname(func))\n if settings.ABORT_ON_ERROR or settings.TEST_MODE_FAIL:\n raise", "def doctest_BackgroundWorkerThread_run_outer_exception_handling():", "def _thread_run_for_result(future, func, *args):\n result = func(future, *args)\n future._set_result(result)", "def run(self):\n try:\n self._execute_func(self._params)\n except Exception, e:\n print str(e)\n self._parallel_executer.release()", "def _process_data(f, work_queue, results_queue):\n for element in iter(work_queue.get, FINISHED):\n try:\n results_queue.put(f(element))\n except Exception, work_error:\n LOG.critical('parallel_pc Error: {0}\\n\\n\\tconfig settings {1}\\n'.format(work_error, element))\n results_queue.put(FINISHED)", "def doctest_BackgroundWorkerThread_run_exception_handling():", "def make_tasker(func):\n def anonFunc(*args, **kwdargs):\n class anonTask(Task):\n def execute(self):\n self.logger.debug(\"Executing fn %s\" % func)\n try:\n val = func(*args, **kwdargs)\n\n self.logger.debug(\"Done executing fn %s\" % func)\n return val\n\n except Exception as e:\n # Log error message and re-raise exception.\n self.logger.error(\"fn %s raised exception: %s\" % (\n func, str(e)))\n raise e\n\n return anonTask()\n return anonFunc", "def async_func(self, fun: types.FunctionType) -> asyncio.Future:\n future = self._event_loop.run_in_executor(self._executor, fun)\n return future", "def _run(data):\n try:\n func, args, kwds = cPickle.loads(data)\n except Exception, e:\n raise deferred.PermanentTaskFailure(e)\n \n try:\n func(*args, **kwds)\n except TypeError:\n logging.debug(\"Deferred function arguments: %s %s\", args, kwds)\n raise", "def __call__(self):\n return self.fn()", "def run_in_worker_thread(f: Callable[..., T], *args: Any, **kwargs: Any) -> Awaitable[T]:\n return _run_in_runner(sublime.set_timeout_async, f, *args, **kwargs)", "def fn():", "def run_async(function):\n @functools.wraps(function)\n def wrapped_fxn(*args, **kwargs):\n t = threading.Thread(target=function, args=args, kwargs=kwargs)\n t.daemon = True\n t.start()\n return wrapped_fxn", "def run_in_thread(self, fn, *args, **kwargs):\r\n thread = threading.Thread(target=fn, args=args, kwargs=kwargs)\r\n thread.start()\r\n \r\n return thread", "def _run_async(fn, **inputs):\n tp = concurrent.futures.ThreadPoolExecutor(1)\n future = tp.submit(fn=fn, **inputs)\n tp.shutdown(False)\n return future", "def apply_async(self, *args, **kwargs):\n print(f\"apply_async running, args:{args}, kwargs:{kwargs}\")\n if \"args\" not in kwargs:\n raise FalseCeleryAppError(\"'args' was not present?\")\n return self.an_function(*kwargs[\"args\"])", "def submit(self, fn, *args, **kwargs):\n fn = self._prepare_fn(fn)\n future = self._self.submit(fn, *args, **kwargs)\n for callback in self._default_done_callbacks:\n future.add_done_callback(callback)\n return FutureProxy(future, self)", "def do(self, fun):\n with self.mutex:\n self.value = fun(self.value)\n return self.value", "def doctest_BackgroundWorkerThread_run():", "def ex(self, fn, *args, **kwargs):\n if len(args) == 0 and len(kwargs) == 0:\n self.down_queue.put(fn)\n else:\n\n def closure():\n return fn(*args, **kwargs)\n\n self.down_queue.put(closure)", "def compute(args, fun, max_workers=6):\n print(\"\\nProcessing symbols in parallel\")\n ex = futures.ThreadPoolExecutor(max_workers=max_workers)\n ex.map(fun, args)", "def __call__(self, f):\r\n return self.apply(f, None)", "def task_6_insert_function_result_into_string(func: Callable):\n return f'start {func()} finish'", "def __call__(self, func):\n LOG.debug(\"@function %s\", func)\n\n func.handler = True\n func.function = True\n\n # Circuits properties\n func.names = self.names\n func.priority = self.kwargs.get(\"priority\", 0)\n func.channel = self.kwargs.get(\"channel\", \",\".join([\"functions.{}\".format(name) for name in self.names]))\n func.override = self.kwargs.get(\"override\", False)\n \n # If getfullargspec if available to us \n if hasattr(_inspect, 'getfullargspec'):\n args = _inspect.getfullargspec(func)[0]\n else: # fall back to deprecated getargspec\n args = _inspect.getargspec(func)[0]\n\n if args and args[0] == \"self\":\n del args[0]\n func.event = getattr(func, \"event\", bool(args and args[0] == \"event\"))\n\n @wraps(func)\n def decorated(itself, event, *args, **kwargs):\n \"\"\"the decorated function\"\"\"\n LOG.debug(\"decorated\")\n function_parameters = event.message.get(\"inputs\", {})\n\n def _the_task(event, *args, **kwargs):\n return func(itself, event, *args, **kwargs)\n\n def _call_the_task(evt, **kwds):\n # On the worker thread, call the function, and handle a single or generator result.\n LOG.debug(\"%s: _call_the_task\", threading.currentThread().name)\n result_list = []\n task_result_or_gen = _the_task(evt, *args, **kwds)\n if not isinstance(task_result_or_gen, GeneratorType):\n task_result_or_gen = [task_result_or_gen]\n for val in task_result_or_gen:\n if isinstance(val, StatusMessage):\n # Fire the wrapped status message event to notify resilient\n LOG.info(\"[%s] StatusMessage: %s\", evt.name, val)\n itself.fire(StatusMessageEvent(parent=evt, message=val.text))\n elif isinstance(val, FunctionResult):\n # Collect the result for return\n LOG.debug(\"[%s] FunctionResult: %s\", evt.name, val)\n val.name = evt.name\n result_list.append(val)\n elif isinstance(val, Event):\n # Some other event, just fire it\n LOG.debug(val)\n itself.fire(val)\n elif isinstance(val, FunctionError_):\n LOG.error(\"[%s] FunctionError: %s\", evt.name, val)\n itself.fire(FunctionErrorEvent(parent=evt, message=str(val)))\n evt.success = False\n return # Don't wait for more results!\n elif isinstance(val, Exception):\n raise val\n else:\n # Whatever this is, add it to the results\n LOG.debug(val)\n result_list.append(val)\n return result_list\n\n the_task = task(_call_the_task, event, **function_parameters)\n ret = yield itself.call(the_task, \"functionworker\")\n xxx = ret.value\n # Return value is the result_list that was yielded from the wrapped function\n yield xxx\n return decorated", "def async_worker(*args, **kwargs):\n logging.info('Context %s, function %s', *args)\n\n return args", "def doctest_BackgroundWorkerThread():", "def _wrap_executor(fn, args, tracer, ctx):\n # the AsyncioContextProvider knows that this is a new thread\n # so it is legit to pass the Context in the thread-local storage;\n # fn() will be executed outside the asyncio loop as a synchronous code\n tracer.context_provider.activate(ctx)\n return fn(*args)", "def run_in_thread(fn):\n def run(*k, **kw):\n t = threading.Thread(target=fn, args=k, kwargs=kw)\n t.start()\n return t\n return run", "def run_in_thread(fn):\n def run(*k, **kw):\n t = threading.Thread(target=fn, args=k, kwargs=kw)\n t.start()\n return t\n return run", "def call(self):\n current_thread = threading.current_thread() # get current thread·\n event = self.q.get() # get task from queue\n while event != self.StopEvent: # Determine whether task is a terminator\n\n func, arguments, callback = event # get funcname,params,callback name\n try:\n result = func(*arguments)\n func_excute_status = True # set func executed status success\n except Exception as e:\n func_excute_status = False # set func executed status failure\n result = None\n print('{} executed error:'.format(func.__name__), e)\n\n if func_excute_status: #\n if callback is not None: # determine whetherif callback is None\n try:\n callback(result)\n except Exception as e:\n print(callback.__name__, e)\n\n with self.worker_state(self.free_list, current_thread):\n if self.terminal:\n event = self.StopEvent\n else:\n event = self.q.get()\n\n else:\n self.created_list.remove(current_thread)", "def apply_only(self, function, worker, *args, **kwargs):\n pass", "def run(self, func: Callable, args: tuple) -> ExecReport:\n process = self._prepare(func, args)\n time_usage, stats, killed = self._measure(process)\n return self._report(args, time_usage, stats, killed)", "def _run_evaluator(self, func, stats):\n host_stats = stats['host_stats']\n host_caps = stats['host_caps']\n extra_specs = stats['extra_specs']\n share_stats = stats['share_stats']\n\n result = evaluator.evaluate(\n func,\n extra=extra_specs,\n stats=host_stats,\n capabilities=host_caps,\n share=share_stats)\n\n return result", "def lambda_function(f):\n @functools.wraps(f)\n def wrapper(event, context):\n global _CURRENT_LAMBDA_CONTEXT\n _CURRENT_LAMBDA_CONTEXT = context\n try:\n result = f(event, context)\n return wait(lambda: result)\n except:\n cls, exc, trace = sys.exc_info()\n report_exc_info((cls, exc, trace.tb_next))\n wait()\n raise\n return wrapper", "def __call__(self, fn):\n fn.handler = True\n fn.function = True\n\n # Circuits properties\n fn.names = self.names\n fn.priority = self.kwargs.get(\"priority\", 0)\n fn.channel = \"functions.{0}\".format(self.names[0])\n fn.override = self.kwargs.get(\"override\", False)\n fn.event = True\n\n @wraps(fn)\n def app_function_decorator(itself, event, *args, **kwargs):\n \"\"\"\n The decorated function\n\n :param itself: The function to decorate\n :type itself: resilient_circuits.ResilientComponent\n :param event: The Event with the StompFrame and the Message read off the Message Destination\n :type event: resilient_circuits.action_message.FunctionMessage\n \"\"\"\n function_inputs = event.message.get(\"inputs\", {})\n\n def _invoke_app_function(evt, **kwds):\n \"\"\"\n The code to call when a function with the decorator `@app_function(api_name)`\n is invoked.\n\n Returns result_list when function with the decorator `@app_function(api_name)` is\n finished processing.\n\n A method that has this handler should yield a StatusMessage or a FunctionResult\n - When a StatusMessage is yield'ed a StatusMessageEvent is fired with the text of the StatusMessage\n - When a FunctionResult is yield'ed it calls resilient-lib.ResultPayload.done() with the parameters of\n FunctionResult being passed to it and appends the result to result_list. E.g:\n `yield FunctionResult({\"key\":\"value\"})`\n `yield FunctionResult({\"key\": \"value\"}, success=False, reason=\"Bad call\")`\n\n :param evt: The Event with the StompFrame and the Message read off the Message Destination\n :type fn: resilient_circuits.action_message.FunctionMessage\n \"\"\"\n LOG.debug(\"Running _invoke_app_function in Thread: %s\", threading.currentThread().name)\n\n result_list = []\n\n # Validate the fn_inputs in the Message\n fn_inputs = validate_fields([], kwds)\n LOG.info(\"[%s] Validated function inputs\", evt.name)\n LOG.debug(\"[%s] fn_inputs: %s\", evt.name, fn_inputs)\n\n rp = ResultPayload(itself.PACKAGE_NAME, version=constants.APP_FUNCTION_PAYLOAD_VERSION, **fn_inputs)\n\n fn_inputs_tuple = namedtuple(\"fn_inputs\", fn_inputs.keys())(*fn_inputs.values())\n\n # Set evt.message in local thread storage\n itself.set_fn_msg(evt.message)\n\n # Invoke the actual Function\n fn_results = fn(itself, fn_inputs_tuple)\n\n for r in fn_results:\n if isinstance(r, StatusMessage):\n LOG.info(\"[%s] StatusMessage: %s\", evt.name, r)\n itself.fire(StatusMessageEvent(parent=evt, message=r.text))\n\n elif isinstance(r, FunctionResult):\n r.name = evt.name\n if not r.custom_results:\n r.value = rp.done(\n content=r.value,\n success=r.success,\n reason=r.reason)\n LOG.info(\"[%s] Returning results\", r.name)\n result_list.append(r)\n\n elif isinstance(r, Exception):\n raise r\n\n else:\n # Whatever this is, add it to the results\n LOG.debug(r)\n result_list.append(r)\n\n return result_list\n\n invoke_app_function = task(_invoke_app_function, event, **function_inputs)\n fn_result = yield itself.call(invoke_app_function, \"functionworker\")\n yield fn_result.value\n\n return app_function_decorator", "def _spawn_worker(self, func, *args, **kwargs):\n if self._worker_pool.free():\n return self._worker_pool.spawn(func, *args, **kwargs)\n else:\n raise exception.NoFreeConductorWorker()", "def __call__(self):\n return self._executor()", "def doctest_BackgroundWorkerThread_forSite():", "def async(fnc, *args, **kwargs):\n gen = fnc(*args, **kwargs)\n\n def perform(result):\n if (\n type(result) is tuple and len(result) and\n issubclass(result[0], Exception)\n ):\n gen.throw(result[0](result[1]))\n return\n\n try:\n actor, msg, data = gen.send(result)\n actor.send(msg, perform, **data)\n except StopIteration:\n return\n\n perform(None)", "def _workerFunc(self):\n while True:\n try:\n with self._callCondition:\n if None in (self._args, self._kwargs):\n # Idle, wait for a new call\n self._busy = False\n self.busyChanged.emit()\n self._callCondition.wait()\n args = self._args\n kwargs = self._kwargs\n # Reset to `None`. If still `None` in the next loop run\n # (i.e., they have not been set anew by `__call__`), the\n # worker will go to sleep.\n self._args = None\n self._kwargs = None\n try:\n with self._exceptionLock:\n # Check if thread should exit\n if self._stopRequested:\n # Don't forget to reset\n self._stopRequested = False\n return\n # Tell `enabled` setter and `abort` that they can\n # raise an exception to interrupt thread execution\n self._allowException = True\n # Actual function call\n result = self._func(*args, **kwargs)\n finally:\n with self._exceptionLock:\n # From here on, don't use exceptions to interrupt the\n # thread as they would lead to \"exception raised during\n # exception handling\" errors. It is sufficient to set\n # `_stopRequested = True`, which will be honored in the\n # next run of the `while` loop.\n self._allowException = False\n except _InterruptThread:\n # `self._func` call was interrupted\n continue\n except Exception as e:\n # An exception was raised in `self._func`\n self.error.emit(e)\n else:\n # No exception, also no `return` due to `_stopRequested`\n self.finished.emit(result)", "def threaded_call(self, thread_callable, result_callable, *args, **kwargs):\n thread_args = (thread_callable, result_callable, None, args, kwargs)\n t = threading.Thread(target=self._thread_caller, group=None, args=thread_args)\n t.start()", "def perform(func, data, func_args=None, asynch=False, workers=None , progress=False, desc='Loading...'):\n if not callable(func) :\n raise ValueError('func must be callable')\n #Setting the arguments on the function\n func = functools.partial(func, **(func_args if func_args is not None else {}))\n #The data returned by function\n returned=list() \n elements=data\n try: import tqdm\n except ImportError: progress=False\n tqdm_args=dict()\n #The message will appear on loading bar if progress is True\n if progress is True :\n tqdm_args=dict(desc=desc, total=len(elements))\n #Runs the callable on list on executor or by iterating\n if asynch == True :\n if isinstance(workers, int) :\n if progress==True :\n returned=list(tqdm.tqdm(concurrent.futures.ThreadPoolExecutor(\n max_workers=workers ).map(\n func, elements), **tqdm_args))\n else:\n returned=list(concurrent.futures.ThreadPoolExecutor(\n max_workers=workers ).map(\n func, elements))\n else:\n raise AttributeError('When asynch == True : You must specify a integer value for workers')\n else :\n if progress==True:\n elements=tqdm.tqdm(elements, **tqdm_args)\n for index_or_item in elements:\n returned.append(func(index_or_item))\n return(returned)", "def run_callback(func, plus, result):\n data = result.value\n error = None if result.successful() else \"%s\" % result.exception\n try:\n if plus is None:\n func(data, error=error)\n else:\n func(data, plus, error=error)\n except Exception as error:\n logger.error(\"RPC callback for %s.%s raised exception.\",\n self.remote_service_coord.name, method,\n exc_info=True)", "def run(self):\n self.func()", "def _future_work_():\n pass", "def _fmm(args):\n # an easier to debug traceback when multiprocessing\n # thanks to https://stackoverflow.com/a/16618842/1710603\n try:\n return apply_interp(*args)\n except:\n import traceback\n\n raise Exception(\"\".join(traceback.format_exception(*sys.exc_info())))", "def _queue_intermediate_table_job(self, executor, futures_to_cb, fn, description):\n self._logger.debug('Calculating intermediate table containing {0} [QUEUED]...'.format(description))\n futures_to_cb[executor.submit(fn, executor)] = partial(self._process_intermediate_table_job_result,\n description)", "def _ThreadCall(self, func, args, callback=None, thread=None):\n if thread is None:\n thread = self._infeed_pool\n\n def _ErrorCallback(e):\n tf.logging.exception(e)\n # Terminate the main thread.\n _thread.interrupt_main()\n\n return thread.apply_async(\n func, args, callback=callback, error_callback=_ErrorCallback)", "def call_execute_sync_and_get_result(\n prepped_function, execute_sync_flag=idaapi.MFF_WRITE\n):\n # if the caller has deliberately specified no execute_sync flag, don't call it - just call the prepped function directly and return the result\n if execute_sync_flag is None:\n return prepped_function()\n\n # where we get our result\n return_object = WrapperReturn()\n\n # bind the prepped function bound with args into the callable wrapper we'll pass to execute sync, along with the return object\n prepped_wrapper = functools.partial(\n wrapper_execute_on_main_thread, prepped_function, return_object\n )\n\n # run it on the main thread\n idaapi.execute_sync(prepped_wrapper, execute_sync_flag)\n\n if return_object.exception is not None:\n # something went wrong. reraise the exception on this thread, so it'll get passed back\n raise return_object.exception\n\n # and we're done!\n return return_object.result", "def PyHiew_ExecuteCallable(func_name, g, *args, **kwargs):\r\n PY_COMPILE_ERR = None\r\n try:\r\n g[func_name](*args, **kwargs)\r\n except Exception, e:\r\n PY_COMPILE_ERR = str(e) + \"\\n\" + traceback.format_exc()\r\n return PY_COMPILE_ERR", "def enqueue(self, f, *args, **kwargs):\n if not isinstance(f, basestring) and f.__module__ == '__main__':\n raise ValueError(\n 'Functions from the __main__ module cannot be processed '\n 'by workers.')\n\n # Detect explicit invocations, i.e. of the form:\n # q.enqueue(foo, args=(1, 2), kwargs={'a': 1}, timeout=30)\n timeout = None\n result_ttl = None\n if 'args' in kwargs or 'kwargs' in kwargs:\n assert args == (), 'Extra positional arguments cannot be used when using explicit args and kwargs.' # noqa\n timeout = kwargs.pop('timeout', None)\n args = kwargs.pop('args', None)\n result_ttl = kwargs.pop('result_ttl', None)\n kwargs = kwargs.pop('kwargs', None)\n\n job = yield self.enqueue_call(func=f, args=args, kwargs=kwargs,\n timeout=timeout, result_ttl=result_ttl)\n defer.returnValue(job)", "def exec_function(self, args):\n raise NotImplementedError()", "def RunSynchronous(self, run_func, *args, **kwargs):\n try:\n self.AcquireLock()\n return run_func(*args, **kwargs)\n finally:\n self.ReleaseLock()", "def submit(self, func, *args, **kwargs):\n errors = []\n arguments = []\n keyword_arguments = {}\n result = None\n try:\n for arg in args:\n if isinstance(arg, futures.Future) and arg.failed:\n exc = arg._exception\n if isinstance(exc, exceptions.MultipleExceptions):\n errors.extend(exc.exceptions)\n else:\n errors.append(exc)\n else:\n arguments.append(executor.get_actual_value(arg))\n\n for key, val in kwargs.iteritems():\n if isinstance(val, futures.Future) and val.failed:\n exc = val._exception\n if isinstance(exc, exceptions.MultipleExceptions):\n errors.extend(exc.exceptions)\n else:\n errors.append(val._exception)\n else:\n keyword_arguments[key] = executor.get_actual_value(val)\n\n except exceptions.ExecutionBlocked:\n result = futures.Future()\n finally:\n if errors:\n result = futures.Future()\n result._state = futures.FINISHED\n result._exception = exceptions.MultipleExceptions(\n 'futures failed',\n errors,\n )\n if result is not None:\n return result\n\n try:\n if isinstance(func, Activity):\n make_task = self.make_activity_task\n elif issubclass(func, Workflow):\n make_task = self.make_workflow_task\n else:\n raise TypeError\n task = make_task(func, *arguments, **keyword_arguments)\n except TypeError:\n raise TypeError('invalid type {} for {}'.format(\n type(func), func))\n\n return self.resume(task, *arguments, **keyword_arguments)", "def getCallable():", "def enqueue(self, f, *args, **kwargs):\n if not isinstance(f, string_types) and f.__module__ == '__main__':\n raise ValueError('Functions from the __main__ module cannot be processed '\n 'by workers')\n\n # Detect explicit invocations, i.e. of the form:\n # q.enqueue(foo, args=(1, 2), kwargs={'a': 1}, timeout=30)\n timeout = kwargs.pop('timeout', None)\n description = kwargs.pop('description', None)\n result_ttl = kwargs.pop('result_ttl', None)\n ttl = kwargs.pop('ttl', None)\n depends_on = kwargs.pop('depends_on', None)\n at_front = kwargs.pop('at_front', False)\n meta = kwargs.pop('meta', None)\n\n if 'args' in kwargs or 'kwargs' in kwargs:\n assert args == (), 'Extra positional arguments cannot be used when using explicit args and kwargs' # noqa\n args = kwargs.pop('args', None)\n kwargs = kwargs.pop('kwargs', None)\n\n return self.enqueue_call(func=f, args=args, kwargs=kwargs,\n timeout=timeout, result_ttl=result_ttl, ttl=ttl,\n description=description, depends_on=depends_on,\n at_front=at_front, meta=meta)", "def background(func):\n def func_wrapper(*args, **kwargs):\n if settings.TEST or settings.DEBUG:\n # In a test environment we just wanna run everything synchronously\n # so just run the function right away. We should get these tasks\n # running in a local environment, but for the time being, just run\n # those synchronously as well.\n func(*args, **kwargs)\n elif settings.BACKGROUND or settings.SHELL:\n # If we're in the background we wanna run the function, but we need\n # some kind of error reporting mechanism.\n try:\n func(*args, **kwargs)\n except Exception:\n log_error('{} error'.format(func.__name__), exc_info=sys.exc_info())\n else:\n # Otherwise we're on web or something and we wanna kick this function\n # off to a background thread.\n try:\n q.enqueue(func, *args, **kwargs)\n except Exception as ex:\n search_for = \"OOM command not allowed when used memory > 'maxmemory'\"\n is_response_error = isinstance(ex, ResponseError)\n if is_response_error and ex.args[0].find(search_for) >= 0:\n message = (\n 'I fixed this one time by running\\n'\n 'heroku addons:destroy redistogo -a stayd-prod\\n'\n 'heroku addons:create redistogo:nano -a stayd-prod\\n'\n 'heroku restart -a stayd-prod')\n log_error('Redis is out of memory', message, sys.exc_info())\n else:\n log_error(\n 'Unknown error enquing background task', func.__name__,\n sys.exc_info())\n\n return func_wrapper", "def Task(func, *args, **kwargs):\n future = Future()\n\n def handle_exception(typ, value, tb):\n if future.done():\n return False\n future.set_exc_info((typ, value, tb))\n return True\n\n def set_result(result):\n if future.done():\n return\n future.set_result(result)\n with stack_context.ExceptionStackContext(handle_exception):\n func(*args, callback=_argument_adapter(set_result), **kwargs)\n return future", "def _invoke_inbound_app(evt, **kwds):\n result_list = []\n LOG.debug(\"Running _invoke_inbound_app in Thread: %s\", threading.currentThread().name)\n\n # Invoke the actual Function\n ia_results = ia(itself, evt.message, evt.message.get(\"action\", \"Unknown\"))\n\n for r in ia_results:\n LOG.debug(r)\n result_list.append(r)\n\n return result_list", "def run_in_thread(fn):\r\n @staticmethod\r\n def run(*k):\r\n thread = threading.Thread(target=fn, args=(*k,), daemon = True)\r\n thread.start()\r\n return thread # <-- return the thread\r\n return run", "def lambda_fn(ds, fn):\n logger.info(\"Applying function '%s' on dataset.\", str(fn))\n return fn(ds)", "def _in_thread(func, *args, **kwargs):\r\n def _f():\r\n func(*args, **kwargs)\r\n t = threading.Thread(target=_f, name='/*/*')\r\n t.start()\r\n return t", "async def _executor(self, func):\n return await asyncio.coroutine(func)()", "def run(self, worker, evaluator=None):\n pass", "def threaded(fn):\n def wrapper(*args, **kwargs):\n Thread(target=fn, args=args, kwargs=kwargs).start()\n return wrapper", "def submit(self, fn, *args, **kwargs):\n\n with self._shutdown_lock:\n name = FadeBuilder.serialize(fn, args, kwargs)\n module = fn.__module__\n\n exe_name = None # Badisa.build(ser)\n\n exe_key = ftpclient.upload(self.ftp_server, exe_name) # Fabio.upload(exe)\n url = \"www.Nachi.com\"\n payload = {\"name\": \"exe name\",\"args\": name, \"module\": module, \"key\": exe_key}\n requests.post(url=url, data=json.dumps(payload)) # send exe to Nachi\n\n results_key = requests.get(url) # get results key from Nachi\n f = FadeFuture(results_key.text, self.ftp_server) # apply future to our needs? requires FadeFuture?\n\n return f", "def retfun():\r\n return fn(*args, **kwargs)", "def apply(self,function,args=(),keywords=None):\n package = self.workers[0].apply_pack(function,args,keywords)\n return self._send_recv(package)", "def __call__(self, *args, **kwargs):\n value = None\n for callback in self.callbacks:\n try:\n local_value = callback(*args, **kwargs)\n except Exception as e:\n ip = get_ipython()\n if ip is None:\n self.log.warning(\"Exception in callback %s: %s\", callback, e, exc_info=True)\n else:\n ip.showtraceback()\n else:\n value = local_value if local_value is not None else value\n return value", "def check_result(f):\n\n def g(self, *args, **kwargs):\n\n if self._results is None:\n raise exceptions.Error(\"Called before `execute`\")\n return f(self, *args, **kwargs)\n\n return g", "async def execute(self, fn, *args):\n infs = [self.get_inferrer_for(poss)\n for poss in await fn.get()]\n argrefs = [VirtualReference(a) for a in args]\n return await execute_inferrers(self, infs, None, argrefs)", "def _worker_fn(example, transform):\n feature = transform(example)\n return feature", "def execute(self) -> Any:\n return self.function(**self.kwargs)", "def execution_rule(f):\n return _ExecutionRuleFunction(f)", "def threaded_fn(func):\n def wrapper(*args, **kwargs):\n thread = threading.Thread(target=func, args=args, kwargs=kwargs)\n thread.start()\n return thread\n return wrapper", "async def invoke(\n fn: Callable,\n *args,\n **kwargs):\n\n # Add aliases for the kwargs, directly linked to the body, or to the assumed defaults.\n if 'event' in kwargs:\n event = kwargs.get('event')\n kwargs.update(\n type=event['type'],\n body=event['object'],\n spec=event['object'].setdefault('spec', {}),\n meta=event['object'].setdefault('metadata', {}),\n status=event['object'].setdefault('status', {}),\n uid=event['object'].get('metadata', {}).get('uid'),\n name=event['object'].get('metadata', {}).get('name'),\n namespace=event['object'].get('metadata', {}).get('namespace'),\n )\n if 'cause' in kwargs:\n cause = kwargs.get('cause')\n kwargs.update(\n event=cause.event,\n body=cause.body,\n diff=cause.diff,\n old=cause.old,\n new=cause.new,\n patch=cause.patch,\n logger=cause.logger,\n spec=cause.body.setdefault('spec', {}),\n meta=cause.body.setdefault('metadata', {}),\n status=cause.body.setdefault('status', {}),\n uid=cause.body.get('metadata', {}).get('uid'),\n name=cause.body.get('metadata', {}).get('name'),\n namespace=cause.body.get('metadata', {}).get('namespace'),\n )\n\n if is_async_fn(fn):\n result = await fn(*args, **kwargs)\n else:\n\n # Not that we want to use functools, but for executors kwargs, it is officially recommended:\n # https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.loop.run_in_executor\n real_fn = functools.partial(fn, *args, **kwargs)\n\n # Copy the asyncio context from current thread to the handlr's thread.\n # It can be copied 2+ times if there are sub-sub-handlers (rare case).\n context = contextvars.copy_context()\n real_fn = functools.partial(context.run, real_fn)\n\n loop = asyncio.get_event_loop()\n task = loop.run_in_executor(executor, real_fn)\n await asyncio.wait([task])\n result = task.result() # re-raises\n return result", "def callback(self, fun: Callable[[], None] | None) -> None:", "def ReturnWrapper(queue, fn):\n queue.put(fn())", "def wrapper(func, retvals_queue, chunk):\n return_value = func(*chunk['args'], **chunk['kwargs'])\n retvals_queue.put(dict(indices=chunk['indices'],\n return_value=return_value))", "def run(self):\r\n self.fn(*self.args, **self.kwargs)\r\n self.schedule()" ]
[ "0.58918166", "0.5804437", "0.5800247", "0.57977927", "0.5780088", "0.5774668", "0.56417096", "0.56402177", "0.5606427", "0.5577625", "0.5576409", "0.5576409", "0.55548936", "0.5543454", "0.5538874", "0.55007344", "0.5496635", "0.5471326", "0.54556096", "0.54122365", "0.5410197", "0.54021627", "0.53652525", "0.5350851", "0.53414893", "0.532355", "0.5319874", "0.53134304", "0.52891487", "0.52852297", "0.5230068", "0.51903504", "0.51879805", "0.51831853", "0.5164188", "0.51597106", "0.5159624", "0.5140741", "0.5130644", "0.5116821", "0.50919044", "0.50887865", "0.50866336", "0.50816846", "0.50793195", "0.5071211", "0.50706875", "0.5049973", "0.5049973", "0.50390613", "0.50385565", "0.5017554", "0.501009", "0.5008394", "0.50041527", "0.5004081", "0.49963477", "0.49950343", "0.49922764", "0.49794453", "0.49723288", "0.49648327", "0.49616536", "0.49518305", "0.4950722", "0.4947853", "0.49434918", "0.491544", "0.49117893", "0.491041", "0.49094614", "0.48915362", "0.48912477", "0.48832574", "0.48815146", "0.48802358", "0.48655283", "0.48634762", "0.48581836", "0.4855124", "0.48483336", "0.48481682", "0.48441854", "0.48437813", "0.48403752", "0.48343542", "0.48314345", "0.48274222", "0.48220038", "0.48216912", "0.4820258", "0.4816988", "0.4815395", "0.48088557", "0.4805104", "0.4804304", "0.47997066", "0.4799102", "0.479588", "0.47918" ]
0.5575183
12
Do the first part of a functional execution
def ex(self, fn, *args, **kwargs): if len(args) == 0 and len(kwargs) == 0: self.down_queue.put(fn) else: def closure(): return fn(*args, **kwargs) self.down_queue.put(closure)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def firstFunction(self):", "def run_one_step(self):\n pass", "def do_begin(begin):\n if begin:\n do_action(begin)", "def step(self):\n self.function()", "def do_twice(f):\n f()\n f()", "def do_twice(f):\n f()\n f()", "def complete_run():\n pass", "def return_first(fn):\n def wrapped(*args, **kwargs):\n res = fn(*args, **kwargs)\n return res if _HVD.rank() == 0 else None\n return wrapped", "def reduce_run():", "def run(self):\n temp_function_to_run = [f for (f, k) in self._function_to_run\n if k <= singletons.BLOCKCHAIN_INSTANCE.block_number]\n for f in temp_function_to_run:\n f()\n self._function_to_run = [(f, k) for (f, k) in self._function_to_run\n if f not in temp_function_to_run]", "def run(self):\n self.func()", "def pre_execute(self):", "def __call__(self):\n if grinder.runNumber == 0: self.initialSleep()\n (param1, param2) = self.getParam()\n self.request1(param1, param2)", "def caller():\n\n for func in funcs:\n func()", "def fn():", "def RUN(self):", "def one():\n return lambda f: lambda x: f(x)", "def __call__ ( self , *x ) :\n return partial ( self.__index ,\n self.func ,\n x ,\n self.step ,\n self.order ,\n self.err )", "def _run(self):\n\n self.is_running = False\n self.start()\n self.function(*self.args, **self.kwargs)", "def run(self):\n self.fn(*self.args, **self.kwargs)", "def run(_):\n pass", "def part_1(code: List):\n acc, _ = run_code(code)\n\n return acc", "def process(self, func=lambda test: test.run()):\n yield self, func(self.bind(context=None))", "def executor(self):", "def do_step(self) -> None:", "def first(self):", "def First():\n return CheckForError(lib.Generators_Get_First())", "def run_once(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n if not wrapper.has_run:\n result = f(*args, **kwargs)\n wrapper.has_run = True\n wrapper.result = result\n \n return wrapper.result\n \n wrapper.has_run = False\n return wrapper", "def next ( num = 1 ) :\n return run ( num )", "def symbol_execute_always_choice_first(self):\n rst = []\n start_s = self.p.factory.blank_state(addr=self.start + 1, option=[angr.sim_options.CALLLESS])\n sm: angr.SimulationManager = self.p.factory.simulation_manager(start_s)\n\n while True:\n one_active = sm.one_active\n rst.append(one_active)\n print(one_active)\n if len(sm.active) > 0:\n sm.active = [one_active]\n if self.is_state_return(one_active):\n break\n sm.step(selector_func=set_callless_to_state)\n return rst", "def execute_deferred(fn):\n\n pass", "def run(self, time_or_fn):\n raise NotImplementedError", "def process():", "def run_this(self, script):\n for line in script.strip().split(\"\\n\"):\n # TODO Interpret lines more than just calling functions\n if line.startswith(\"#\"):\n # Skip lines that start with #\n continue\n retval = self.call_function(line.strip())\n #print retval", "def run(self):\r\n self.fn(*self.args, **self.kwargs)\r\n self.schedule()", "def run_test_first(op_list_file):\n old_list = get_op_list(op_list_file)\n new_list = filter(lambda x: x not in black_list, old_list)\n eager_op_test = transform_list_to_str(new_list)\n os.system(\"ctest -R \\\"(\" + eager_op_test + \")\\\" >& test_op_log.txt\")", "def _run(self):\n self._algorithm(self._list, self)", "def preLoopFunctions(self):\n\t\treturn", "def run_single(self):\n self.run_sim_time(1)", "def getFirstFunction(self) -> ghidra.program.model.listing.Function:\n ...", "def __call__(self):\n return self._executor()", "def func_wrapper():\n set_interval_sequence(functions[1:] + functions[:1], sec)\n functions[0]()", "def run(self):\n if self.next_state == \"initialize_rexarm\":\n self.initialize_rexarm()\n\n if self.next_state == \"idle\":\n self.idle()\n\n if self.next_state == \"estop\":\n self.estop()\n\n if self.next_state == \"execute_tp\":\n self.execute_tp()\n\n if self.next_state == \"execute\":\n self.execute()\n\n if self.next_state == \"calibrate\":\n self.calibrate()\n\n if self.next_state == \"manual\":\n self.manual()\n\n if self.next_state == \"learn\":\n self.learn()\n\n if self.next_state == \"remember\":\n self.remember()\n\n if self.next_state == \"write\":\n self.write()\n\n if self.next_state == \"get_color\":\n self.get_color()\n\n if self.next_state == \"find_blocks\":\n self.find_blocks()\n\n # if self.next_state == \"dance\":\n # self.execute_dance()", "def process():\n pass", "def Run():\r\n pass", "def run_starter(self, expect_to_fail=False):", "def udcall_map_first(*args):\n return _ida_hexrays.udcall_map_first(*args)", "def prepost_hook_one(self) -> None:\n self.poutput(\"one\")", "def task():", "def start_func_default(self, activation):\n activation.prepare()\n activation.done()\n return activation", "def _execute(self, _):\r\n pass", "def compute_first(self):\n compute_first_sets(self, self.rules)", "def func():", "def parallelizer(func, arg=False):\n if arg:\n func(arg)\n else:\n func()", "def _get_first(details: CallableDetails) -> CallableArg:\n return details.args[0]", "def main_function():\n return 1", "def very_simple():\n yield 1", "def run(self):\n\t\tcurX = self.x0\n\t\tcurY = self.func.evalAt(curX)\n\t\tcurT = self.T0\n\t\tfor i in range(1, self.iters + 1):\n\t\t\tif curT == 0:\n\t\t\t\tbreak\n\t\t\tnX = self.newStateFunc(curX)\n\t\t\tnY = self.func.evalAt(nX)\n\t\t\tif nY <= curY or self.acceptanceFunc(nY - curY, curT) > random.random(): # accept if lower energy or probability check passes\n\t\t\t\tcurX = nX\n\t\t\t\tcurY = nY \n\t\t\tcurT = self.coolingFunc(self.T0, i)\t\n\t\treturn (curX, curY)", "def call(self):\n self.call() # Call a function", "def task1(self):\n \n pass", "def first_page_execution(self):\n self.errors_and_correct_input_values_helper(wrong_pattern_error=True)\n self.utility_page.click_next_button()\n self.utility_page.click_next_button()\n self.second_page.wait_for_page()", "def run_once(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n if not wrapper.has_run:\n result = func(*args, **kwargs)\n wrapper.has_run = True\n return result\n wrapper.has_run = False\n return wrapper", "def main():\n print(\"My test started\")\n\n ten = my_function_1(5, 5)\n twenty = my_function_2(5, 5, 10)\n\n print(\"My test finished\")", "def test_first(self):\n\n test_cases = [\n Case(\n description=\"lists of strings\",\n val=[\"a\", \"b\"],\n args=[],\n kwargs={},\n expect=\"a\",\n ),\n Case(\n description=\"lists of things\",\n val=[\"a\", \"b\", 1, [], {}],\n args=[],\n kwargs={},\n expect=\"a\",\n ),\n Case(\n description=\"empty list\",\n val=[],\n args=[],\n kwargs={},\n expect=None,\n ),\n Case(\n description=\"unexpected argument\",\n val=[\"a\", \"b\"],\n args=[\", \"],\n kwargs={},\n expect=FilterArgumentError,\n ),\n Case(\n description=\"value not an array\",\n val=12,\n args=[],\n kwargs={},\n expect=FilterValueError,\n ),\n Case(\n description=\"first of undefined\",\n val=self.env.undefined(\"test\"),\n args=[],\n kwargs={},\n expect=None,\n ),\n ]\n\n self._test(First, test_cases)", "def select_first(condition):\n return where(condition) | unless(StopIteration, next)", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def __call__(self):\n return self.fn()", "def _run(self):\n logging.warning('-> perform EMPTY experiment...')", "def process(self):\n try:\n if not self._successor:\n return self.loading_strategy()\n else:\n return self._successor.process_next(self.loading_strategy())\n except Exception as e:\n Oprint.err(e, 'lmdo')", "def run():\n main()", "def next():", "def next():", "def main():\n lines = [line for line in sys.stdin]\n oneTest = TestOneRun()\n \n # We expect two types of statement: \"produced\" and \"exited\"\n for line in lines:\n if \"produced\" in line:\n if oneTest.produceTest(line):\n break\n elif \"exited\" in line:\n if oneTest.exitTest(line):\n break", "def __call__(self, x_init):\n x = x_init\n for f in self.blocks:\n x = f(x)\n return x + x_init", "def quick_run(self, *args):\n self.inputs(*args)\n self.run()", "def __call__(self):\n # apply(self.func, self.args)\n self.func(*self.args)", "def f():", "def f():", "def symbol_execute1(self):\n pending_s = []\n start_s = self._create_start_state()\n pending_s.append(start_s)\n\n while len(pending_s) > 0:\n new_s = []\n for s in pending_s:\n cur_node = util.nodes_get_node_at(self.keep_ex_nodes, s.addr)\n util.assert_msg(cur_node, f\"Why can't get node at: {hex(s.addr)}\")\n step_rst = s.step(selector_func=self._symbol_execute_selector)\n for next_s in step_rst.successors:\n # like the main_dispather. find is none\n exnode = util.nodes_get_node_at(self.keep_ex_nodes, next_s.addr)\n if exnode:\n cur_node.successors.add(exnode)\n if not self.is_ex_node_end(self, exnode):\n new_s.append(next_s)\n pending_s.clear()\n pending_s.extend(new_s)", "def __firstRun(self, t1, t2):\n # this is the first run - initialize the timestep manager of metafor\n tsm = self.metafor.getTimeStepManager()\n dt = t2-t1 # time-step size\n dt0 = dt # initial time step\n dtmax = dt # maximum size of the time step\n tsm.setInitialTime(t1, dt0)\n tsm.setNextTime(t2, 1, dtmax)\n # launches metafor from t1 to t2\n #meta() # use toolbox.utilities\n log = LogFile(\"resFile.txt\")\n self.runOK = self.metafor.getTimeIntegration().integration()\n # at this stage, 2 archive files have been created in the workspace", "def __call__(self, func, *args, **kwds):\r\n results = self.map(func, *args, **kwds)\r\n if results:\r\n return results[0]", "def execute():", "def test_runs_given_function(self):\n from furious.processors import _handle_results\n\n processor = Mock()\n\n _handle_results({'_process_results': processor})\n\n processor.assert_called_once_with()", "def insertFunctionRun(self):\n\t\treturn", "def _step(self) -> None:", "def take_until_first(predicate, iterable):\n for x in iterable:\n yield x\n if predicate(x):\n break", "def main_code():\n pass", "def run(self, initial_next_state='starting', *args, **kwargs):\n return super().run(initial_next_state=initial_next_state, *args, **kwargs)", "def run():\n # main(sys.argv[1:])\n main()", "def take_first(count):\n def _take_first(iterable):\n return islice(iterable, count)\n return pipe | set_name('take_first(%s)' % count, _take_first)", "def no_arg():\n run_no_arg()", "def does_it_run(func, args):\n \n if args is None:\n func()\n else:\n func(*args)", "def run_one_dsp(tb_data, dsp_config, db_dict=None, fom_function=None, verbosity=0):\n \n pc, _, tb_out = build_processing_chain(tb_data, dsp_config, db_dict=db_dict, verbosity=verbosity)\n pc.execute()\n if fom_function is not None: return fom_function(tb_out, verbosity)\n else: return tb_out", "def callee(calls):\n calls.append(1)", "def run_functions(self):\n for function in self.functions:\n try:\n function()\n except Exception as err:\n logger.exception(\n f\"[red]Failed running and collecting data for function: {function.__name__}[/red]\"\n )\n logger.error(traceback.format_exc())\n logger.error(f\"[red]{err}[/red]\")\n logger.error(\"Continuing..\")", "def test_no_requirements(self):\n def f():\n pass\n self._run_as_operator(f)", "def simple():" ]
[ "0.67399466", "0.6280568", "0.587187", "0.5847407", "0.5788252", "0.5788252", "0.57825094", "0.57793474", "0.573395", "0.57085407", "0.56923556", "0.5653058", "0.56507367", "0.56455547", "0.56219184", "0.5605289", "0.5575513", "0.55392635", "0.5532281", "0.5517023", "0.5505936", "0.54590654", "0.54485136", "0.5441897", "0.54402995", "0.5438968", "0.5418619", "0.5411898", "0.5405846", "0.54058355", "0.5404099", "0.5401114", "0.5395621", "0.53951937", "0.53871745", "0.53749985", "0.5364694", "0.53642416", "0.53447616", "0.5344265", "0.5342105", "0.5325496", "0.5317103", "0.5315021", "0.5313693", "0.53126407", "0.5295999", "0.5291891", "0.5280739", "0.5271572", "0.52690625", "0.5266765", "0.52644753", "0.5254129", "0.52517635", "0.52314496", "0.522874", "0.521823", "0.5216749", "0.52164936", "0.5207288", "0.5191047", "0.5188095", "0.51861525", "0.51844186", "0.5177725", "0.5177725", "0.5177725", "0.5177725", "0.5177725", "0.5170982", "0.51619935", "0.5160484", "0.5158007", "0.51565975", "0.51565975", "0.51506776", "0.51366216", "0.5134149", "0.51273847", "0.5126148", "0.5126148", "0.5124669", "0.5118229", "0.51167357", "0.5116518", "0.51129", "0.51116914", "0.5107601", "0.51050353", "0.50962454", "0.5094321", "0.5093553", "0.5093002", "0.5089106", "0.5087778", "0.5081711", "0.50805724", "0.5080199", "0.50788707", "0.5075701" ]
0.0
-1
Retrieve the results of self.ex()
def ecute(self): msg = self.up_queue_recv_socket.recv() result, e = self.up_queue.get() if e is not None: raise e return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def results(self):\r\n pass", "def get_results(self):\n return self.result", "def _get_result(self):\r\n \r\n return self._result", "def results(self):\n pass", "def getResults():", "def result(self):", "def result(self):", "def result(self):\n if self._result is not None:\n return self._result\n if self._exc_info is not None:\n raise self._exc_info[0], self._exc_info[1], self._exc_info[2]\n self._check_done()\n return self._result", "def result(self):\r\n raise NotImplementedError('method result() is not implemented')", "def get_results(self):\n self.report('Checking finished evaluations.')\n outputs = {}\n while self.indices_to_retrieve:\n idx = self.indices_to_retrieve.pop(0)\n key = self.eval_key(idx)\n self.report('Retrieving output for evaluation {}'.format(idx))\n eval_proc = self.ctx[key]\n if not eval_proc.is_finished_ok:\n return self.exit_codes.ERROR_EVALUATE_PROCESS_FAILED\n outputs[idx] = get_outputs_dict(eval_proc)\n\n with self.optimizer() as opt:\n opt.update(outputs)", "def get_result(self) -> Any:\n ...", "def result( self):\n return self._result", "def Results(self):\n return self.data", "def Results(self):\n return self.data", "def result(self):\n return self['result']", "def result(self):\n return self._result", "def result(self):\n return self._result", "def result(self):\n return self._result", "def get_result(self):\n print('''message: {}\nopen key: ({}, {})\nencoded message: {}'''.format(self.msg, self.n, self.e, self.__encoded_msg))", "def get_results(self):\n return self.results", "def get_results(self):\n return self.results", "def getResults(self):\n return self.Results", "def output(self):\r\n return self.result", "def ERR(self):", "def GetResults(self):\n return self._results", "def getResults(self, cleanup=True):\n self.wait_on_job()\n stdout_str = self.ofile_string()\n stderr_str = self.efile_string()\n if cleanup:\n self.erase_files()\n return (stdout_str, stderr_str)", "def results(self):\r\n return self._results", "def get_data(self):\n return self._result", "def result(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def result(self, *args, **kwargs):\n raise NotImplementedError()", "def _get_results(self, res):\n self.async_res = res\n self.full_res = res.wait() # pragma: no cover\n self.trained = True # pragma: no cover\n self.mod_id = self.full_res['model_id'] # pragma: no cover\n self.data_id = self.full_res['data_id'] # pragma: no cover\n self.params_dump = self.full_res['params_dump'] # pragma: no cover\n if self.verbose > 0: # pragma: no cover\n print(\"Result {} | {} ready\".format(\n self.mod_id, self.data_id)) # pragma: no cover", "def getResult(self, *args, **kwargs):\r\n return None", "def retrieve_results(self):\n # return the results of the last calculation\n last_calc = self.ctx.calculations[-1]\n\n # check exit status of a last calc\n if last_calc.exit_status not in (None, 0):\n self.report(f'The calculations failed with exit message: {last_calc.exit_message}')\n exit_status = last_calc.exit_status // 100\n if exit_status == 3:\n return self.exit_codes.ERROR_CRYSTAL\n else:\n return self.exit_codes.UNKNOWN_ERROR\n\n for name, port in self.spec().outputs.items():\n if port.required and name not in last_calc.outputs:\n self.report('The spec specifies the output {} as required '\n 'but was not an output of {}<{}>'.format(name, self._calculation.__name__,\n last_calc.pk))\n\n if name in last_calc.outputs:\n self.out(name, last_calc.outputs[name])\n return", "def returnData(self):\r\n return self.returnRes", "def result(self):\n assert(self.__complete)\n return self.__result", "def remote_getResult(i=None):", "def getResult(self):\n return self.ok", "def results(self):\n return self._results", "def results(self):\n return self._results", "def results(self):\n return self._results", "def getResults(self) -> Tuple[str, Results]:\n\n return self.moss_results", "def result(self): \n return self.body", "def return_results(self):\n\n message = 'INFO: entering return_results'\n self.report(message)\n\n # try/except to capture as mnuch as possible (everything that is there even when workflow exits unsuccessfully)\n # capture pk and uuids of last calc, params and remote\n try:\n last_calc_uuid = self.ctx.last_calc.uuid\n last_calc_pk = self.ctx.last_calc.pk\n last_params_uuid = self.ctx.last_params.uuid\n last_params_pk = self.ctx.last_params.pk\n last_remote_uuid = self.ctx.last_remote.uuid\n last_remote_pk = self.ctx.last_remote.pk\n except:\n last_calc_uuid = None\n last_calc_pk = None\n last_params_uuid = None\n last_params_pk = None\n last_remote_uuid = None\n last_remote_pk = None\n\n all_pks = []\n for calc in self.ctx.calcs:\n try:\n all_pks.append(calc.pk)\n except:\n self.ctx.warnings.append(f'cound not get pk of calc {calc}')\n\n # capture links to last parameter, calcualtion and output\n try:\n last_calc_out = self.ctx.kkr.out['output_parameters']\n last_calc_out_dict = last_calc_out.get_dict()\n last_RemoteData = self.ctx.last_remote\n last_InputParameters = self.ctx.last_params\n except:\n last_InputParameters = None\n last_RemoteData = None\n last_calc_out = None\n last_calc_out_dict = {}\n\n # capture convergence info\n try:\n last_rms = self.ctx.rms[-1]\n except:\n last_rms = None\n\n # now collect results saved in results node of workflow\n message = 'INFO: collect outputnode_dict'\n self.report(message)\n outputnode_dict = {}\n outputnode_dict['workflow_name'] = self.__class__.__name__\n outputnode_dict['workflow_version'] = self._workflowversion\n outputnode_dict['material'] = self.ctx.formula\n outputnode_dict['loop_count'] = self.ctx.loop_count\n outputnode_dict['warnings'] = self.ctx.warnings\n outputnode_dict['successful'] = self.ctx.successful\n outputnode_dict['last_params_nodeinfo'] = {'uuid': last_params_uuid, 'pk': last_params_pk}\n outputnode_dict['last_remote_nodeinfo'] = {'uuid': last_remote_uuid, 'pk': last_remote_pk}\n outputnode_dict['last_calc_nodeinfo'] = {'uuid': last_calc_uuid, 'pk': last_calc_pk}\n outputnode_dict['pks_all_calcs'] = all_pks\n outputnode_dict['convergence_value'] = last_rms\n outputnode_dict['convergence_values_all_steps'] = array(self.ctx.rms_all_steps)\n outputnode_dict['convergence_values_last_step'] = array(self.ctx.last_rms_all)\n outputnode_dict['convergence_reached'] = self.ctx.kkr_converged\n outputnode_dict['kkr_step_success'] = self.ctx.kkr_step_success\n outputnode_dict['used_higher_accuracy'] = self.ctx.kkr_higher_accuracy\n\n # report the status\n if self.ctx.successful:\n self.report(\n 'STATUS: Done, the convergence criteria are reached.\\n'\n 'INFO: The charge density of the KKR calculation pk= {} '\n 'converged after {} KKR runs and {} iterations to {} \\n'\n ''.format(\n last_calc_pk, self.ctx.loop_count - 1, sum(self.ctx.KKR_steps_stats.get('isteps', [])),\n self.ctx.last_rms_all[-1]\n )\n )\n else: # Termination ok, but not converged yet...\n self.report(\n 'STATUS/WARNING: Done, the maximum number of runs '\n 'was reached or something failed.\\n INFO: The '\n 'charge density of the KKR calculation pk= '\n 'after {} KKR runs and {} iterations is {} \"me/bohr^3\"\\n'\n ''.format(\n self.ctx.loop_count - 1, sum(self.ctx.KKR_steps_stats.get('isteps', [])), self.ctx.last_rms_all[-1]\n )\n )\n\n # create results node and link all calculations\n message = 'INFO: create results nodes'\n self.report(message)\n link_nodes = {}\n icalc = 0\n for calc in self.ctx.calcs:\n link_nodes[f'KkrimpCalc{icalc}'] = calc.outputs.remote_folder\n icalc += 1\n if not self.ctx.dos_run:\n link_nodes['final_imp_potential'] = self.ctx.last_pot\n outputnode_t = create_out_dict_node(Dict(dict=outputnode_dict), **link_nodes)\n outputnode_t.label = 'kkr_scf_wc_results'\n outputnode_t.description = 'Contains results of workflow (e.g. workflow version number, info about success of wf, lis tof warnings that occured during execution, ...)'\n\n self.out('workflow_info', outputnode_t)\n # store out_potential as SingleFileData only if this was no DOS run\n if not self.ctx.dos_run:\n self.out('host_imp_pot', self.ctx.last_pot)\n\n # print results table for overview\n # table layout:\n message = 'INFO: overview of the result:\\n\\n'\n message += '|------|---------|--------|------|--------|---------|-----------------|---------------------------------------------|\\n'\n message += '| irun | success | isteps | imix | mixfac | qbound | rms | pk and uuid |\\n'\n message += '| | | | | | | first | last | |\\n'\n message += '|------|---------|--------|------|--------|---------|--------|--------|---------------------------------------------|\\n'\n KKR_steps_stats = self.ctx.KKR_steps_stats\n for irun in range(len(KKR_steps_stats.get('success', []))):\n message += '|%6i|%9s|%8i|%6i|%.2e|%.3e|%.2e|%.2e|' % (\n irun + 1, KKR_steps_stats.get('success')[irun], KKR_steps_stats.get('isteps')[irun],\n KKR_steps_stats.get('imix')[irun], KKR_steps_stats.get('mixfac')[irun],\n KKR_steps_stats.get('qbound')[irun], KKR_steps_stats.get('first_rms')[irun],\n KKR_steps_stats.get('last_rms')[irun]\n )\n message += f\" {KKR_steps_stats.get('pk')[irun]} | {KKR_steps_stats.get('uuid')[irun]}|\\n\"\n message += '|------|---------|--------|------|--------|---------|-----------------|---------------------------------------------|\\n'\n \"\"\"\n message += \"#|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|\\n\".format(irun+1,\n KKR_steps_stats.get('success')[irun], KKR_steps_stats.get('isteps')[irun],\n KKR_steps_stats.get('imix')[irun], KKR_steps_stats.get('mixfac')[irun],\n KKR_steps_stats.get('qbound')[irun],\n KKR_steps_stats.get('first_rms')[irun], KKR_steps_stats.get('last_rms')[irun])\n \"\"\"\n self.report(message)\n\n # cleanup of unnecessary files after convergence\n # WARNING: THIS DESTROYS CACHABILITY OF THE WORKFLOW!!!\n if self.ctx.do_final_cleanup:\n if self.ctx.successful:\n self.report('INFO: clean output of calcs')\n remove_out_pot_impcalcs(self.ctx.successful, all_pks)\n self.report('INFO: clean up raw_input folders')\n clean_raw_input(self.ctx.successful, all_pks)\n\n # clean intermediate single file data which are not needed after successful run or after DOS run\n if self.ctx.successful or self.ctx.dos_run:\n self.final_cleanup()\n\n self.report('INFO: done with kkr_scf workflow!\\n')", "def values(self):\n return self.out, self.err", "def get_results(self):\n\n return self.results_", "def get_data(self):\n return self._results", "def get_results(self):\n\n super().get_results()", "def result(self) -> ExpressionResult:\n return self._result", "def end(self):\n self.logger.info(self.result)\n return self.result", "def get_results(self):\n error_dict = {'error_code_test': self.error_code_test,\n 'error_text_test': self.error_text_test}\n\n return self.testresults, error_dict, self.checkstats", "def results(self):\n if not self._results:\n self.read_results()\n return self._results", "def results(self):\n return extract_results(self.model)", "def execute(self):", "def execute(self):", "def execute(self):", "def execute(self):", "def getResult(self):\n return (self.__output, self.__errors, self.__fileSeparators)", "def results(self):\n\n\t\tresults = {'answer':42}\n\n\t\treturn results", "def exc_info(self):\n return self._exc_info", "def on_failure(self, exc: BaseException) -> None:", "def value(self):\n self.event.wait()\n if not self.exc:\n return self.result\n else:\n exc = self.exc\n if not self.exc:\n exc = StandardError('no result')\n raise exc", "def get_details(self):\n raise Exception(\"bad details\")", "def __result__(self) -> Any:\n return self._self_result", "def evals_result(self):\n if self.evals_result_:\n evals_result = self.evals_result_\n else:\n raise XGBoostError('No results.')\n\n return evals_result", "def evals_result(self):\n if self.evals_result_:\n evals_result = self.evals_result_\n else:\n raise XGBoostError('No results.')\n\n return evals_result", "def get_eval_result(self):\n return self.content_eval", "def get_result(self, state):\n pass", "def run(self):\n results = self.fetch()\n return results", "def results(self):\n return self._result_list", "def query(self):", "def getTestResults():", "def data_raise(self):\n return self.get_nowait()", "def query(self):\n pass", "def result(self):\n with self._condition:\n self.fetch()\n return self.__get_result()", "def get_results(self):\n return self._do_action_under_lock(self._get_all_results)", "def get_results_unsafe(self):\n return self._get_results()", "def test_get_results(self):\n pass", "def result(self, timeout=None):\n # Attempt to get the exception if there is one.\n # If there is not one, then we know everything worked, and we can\n # return an appropriate value.\n err = self.exception(timeout=timeout)\n if err is None:\n return self._result\n raise err", "def result(self):\n return self.a", "def result(self):\n with self.__lock:\n assert(self.__complete)\n return self.__result", "def query_results(self):\n return self.details[KEY_QUERY_RESULTS]", "def exo2():", "def report(self, result):\n raise NotImplementedError", "def execute(self):\n\t\tpass", "def results(self):\n\n return self._results", "def result(self):\n return Result(self.messages[:])", "def get_model_results(self):\n \n if not self.trained:\n raise NotTrainedException('You must train the model first')\n return self.results", "def execute(self):\r\n pass", "def results(self) -> list:\n return self.__results", "def _fill_result(self):\n # To be overrided in child\n raise Exception(\"Must override in child.\")", "def result(self, result):\n print(result)", "def getIntervenciones():", "def execute(self):\n pass", "def execute(self):\n pass", "def execute(self):\n pass", "def execute(self):\n pass", "def execute(self):\n pass", "def execute(self):\n pass", "def __call__(self):\r\n raise self", "def __call__(self):\r\n raise self", "def get_error(self):\n return self.exc_info" ]
[ "0.6693895", "0.66713417", "0.66669464", "0.6630863", "0.66149294", "0.6539933", "0.6539933", "0.64237297", "0.6279126", "0.6253117", "0.6223495", "0.62201995", "0.6201204", "0.6201204", "0.607676", "0.6055973", "0.6055973", "0.6055973", "0.60555613", "0.598526", "0.598526", "0.59828746", "0.5971041", "0.5958551", "0.5948812", "0.59091926", "0.59068567", "0.58877546", "0.58531713", "0.58471733", "0.58386475", "0.58325064", "0.5814684", "0.58043355", "0.57973576", "0.573315", "0.57309765", "0.5723808", "0.5723808", "0.5723808", "0.5721865", "0.57033414", "0.56996614", "0.5668144", "0.5656411", "0.56464255", "0.56395364", "0.56279725", "0.5620568", "0.561814", "0.5590727", "0.5584577", "0.557676", "0.557676", "0.557676", "0.557676", "0.5574448", "0.55670106", "0.5564388", "0.5560968", "0.55601", "0.5550481", "0.554929", "0.5546172", "0.5546172", "0.55290306", "0.552339", "0.5518925", "0.5514685", "0.55107325", "0.55089295", "0.5507881", "0.5502289", "0.54980946", "0.54980785", "0.54943395", "0.54886746", "0.54543996", "0.5446645", "0.54456806", "0.544142", "0.5441001", "0.5437363", "0.54365546", "0.54348946", "0.5431487", "0.54283965", "0.5406268", "0.5403555", "0.5402944", "0.53928167", "0.5379595", "0.5378456", "0.5378456", "0.5378456", "0.5378456", "0.5378456", "0.5378456", "0.5373045", "0.5373045", "0.53519344" ]
0.0
-1
Artificially set up the worker's work socket This sets self.aw.work_socket so that methods other than "run" can be tested in the worker.
def set_work_socket(self): self.analysis_id = uuid.uuid4().hex def do_set_work_socket(aw): aw.work_socket = cellprofiler_core.constants.worker.the_zmq_context.socket( zmq.REQ ) aw.work_socket.connect(self.work_addr) aw.work_request_address = self.work_addr aw.current_analysis_id = self.analysis_id self.awthread.execute(do_set_work_socket, self.awthread.aw)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup(self) -> None:\n self.running = True\n self.listen()\n self.start_workers()\n\n # Send server socket to workers.\n assert self.socket is not None\n for work_queue in self.work_queues:\n work_queue[0].send(self.family)\n send_handle(work_queue[0], self.socket.fileno(),\n self.workers[self.current_worker_id].pid)\n self.socket.close()", "def setWorker(self, worker):\n pass", "def setup_for_run(self):\n self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.server.bind((self.ip_address, self.port))\n self.server.listen(100)", "async def _setup(self):\n\n Reporter.info('Setting up workers...')\n self.workers = [asyncio.Task(self._work(), loop=self.loop)\n for _ in range(self.MAX_WORKERS)]\n Reporter.info('Starting scan...')\n await self.q.join()", "def setup(self):\n # create the pull socket (to communicate with this actor, others\n # process have to connect a push socket to this socket)\n self.pull_socket, pull_port = self._create_socket(zmq.PULL, -1)\n\n # create the control socket (to control this actor, a process have to\n # connect a pair socket to this socket with the `control` method)\n self.control_socket, ctrl_port = self._create_socket(zmq.PAIR, 0)\n\n self.pull_socket_address = LOCAL_ADDR + ':' + str(pull_port)\n self.control_socket_address = LOCAL_ADDR + ':' + str(ctrl_port)\n\n self._pull_port.value = pull_port\n self._ctrl_port.value = ctrl_port\n self._values_available.set()", "def worker(self, worker):\n\n self._worker = worker", "def prepare(self):\r\n self.socket.listen()\r\n for _ in xrange(self.threads):\r\n thread = Worker(self.tasks)\r\n thread.setDaemon(True)\r\n thread.start()\r\n self.prepared = True", "def setup(self):\n global log_th, conf_th, header_th, command_w_th\n self.conf_th_ic = conf_th\n self.header_th_ic = header_th\n self.command_w_th_inc = command_w_th\n self.hostname = conf_th.get_item(q_key='general').get('hostname')\n self.std_recv_size = int(conf_th.get_item(q_key='general').get('std_recv_size'))\n self.data_recv_size = int(conf_th.get_item(q_key='general').get('data_recv_size'))\n self.mail_save_enable = int(conf_th.get_item(q_key='general').get('mail_save_enable'))\n self.mail_save_path = conf_th.get_item(q_key='general').get('mail_save_path')\n self.no_answer = int(conf_th.get_item(q_key='general').get('no_answer'))\n self.sleep_between = int(conf_th.get_item(q_key='general').get('sleep_between'))\n self.message_id = library.q_id_generate(size=16)\n self.client_ip = tuple(self.client_address).__getitem__(0)\n self.client_port = int(tuple(self.client_address).__getitem__(1))\n # Running\n self.header_th_ic.write_header(ip=self.client_ip, qid=self.message_id)\n message = '220 ' + self.hostname\n self.func_sender(message)\n log_th.log_info('{} connected to {} thread'.format(self.client_ip, threading.current_thread().name))", "def setup(self):\n self.context = zmq.Context()\n self.sub_socket = self.context.socket(zmq.SUB)\n if self.filter:\n self.sub_socket.setsockopt(zmq.SUBSCRIBE, self.filter)\n self.sub_socket.connect('tcp://'+self.host+':'+str(self.com_port))\n return self", "def __init__(self, worker_id=0, base_port=5005):", "def connect_to_worker():\n socket = context.socket(zmq.REQ)\n socket.connect(\"tcp://localhost:5555\")\n return socket", "def setDefaultWorker(self, worker):\n pass", "def __init__(self, worker_id=0,\n base_port=5005):\n self.port = base_port + worker_id\n self.worker_id = worker_id\n self.server = None\n self.unity_to_external = None\n self.is_open = False", "def setUp(self) :\n self.longMessage = True\n logger = corAna.makeLogger(isTestMode=True,isMaster=True,isViewer=True,isServer=True,rank=0)\n isFirstWorker = True\n self.numTimes = 5\n numDataPointsThisWorker = 1\n\n self.workerData = corAna.WorkerData(logger, isFirstWorker, self.numTimes,\n numDataPointsThisWorker, addRemoveCallbackObject = None)", "def initialize(self,init):\n logger.info('*** initialize: worker id=%d',self._agent.wid)\n self.commands = {'initialize':None, 'before_do_work':None, 'after_do_work':None, 'finalize':None}\n self.commands.update(init.get(self._agent.wid,{}))\n exec_command(self.commands['initialize'])", "def init_socket_with_rety(self, worker_id):\n\n if self.mode == \"tcp\":\n # acquire lock for this socket in 100 ms or abandon, another thread is handling the socket reconnect\n with self.socket_locks[worker_id].acquire_timeout(0.1):\n connected = False\n while not connected:\n try:\n self._init_socket_tcp(worker_id)\n connected = True\n self.get_logger().info('Connection successful!')\n except Exception as e:\n self.get_logger().error(f\"Error initializing socket exception: {str(e)} worker id {worker_id}\")\n for i in range(1, 5):\n self.get_logger().info(f'Retrying in {5-i}')\n time.sleep(1)\n elif self.mode == \"udp\": \n self._init_socket_udp(worker_id)\n else:\n raise Exception(\"Mode must be one of 'udp' or 'tcp'\")", "def main() -> None:\n worker = Worker()\n worker.do_work()", "def setup_socket(self):\n self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.server_socket.bind((self.config['HOST_NAME'], self.config['BIND_PORT']))\n self.server_socket.listen(10)", "def _run(self):\n try:\n # Send a connect message\n self.socket.send_json({\"worker_id\": self.socket_id, \"message\": \"connect\"})\n while not self.stop_event.is_set():\n job = self.socket.recv_json()\n if self.debug:\n print(\"Received task %s\" % job)\n value = self._do_work(job)\n self.socket.send_json(\n {\n \"worker_id\": self.socket_id,\n \"message\": \"job_done\",\n \"job\": Job.get_result(job, value),\n }\n )\n except KeyboardInterrupt:\n pass\n except Exception as e:\n print(e)\n finally:\n self._disconnect()", "def init(self, job_start):\n self.server_addr = self.server.start(self)\n self.job_start = job_start\n self._start_worker()", "def _init_socket_tcp(self, worker_id):\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((self.host, self.port))\n if len(self.sockets) - 1 < worker_id:\n self.sockets.append(MessageSocket(sock))\n else:\n # socket was already initialized, MessageSocket implements a try:catch\n self.sockets[worker_id].close()\n self.sockets[worker_id] = MessageSocket(sock)", "def __init__(self, run, expname):\n logger.debug('Initializing worker {}.'.format(rank))\n self.run = int(run)\n self.expname = expname\n bcast_var = None\n dsname = comm.bcast(bcast_var, root=0)\n print(dsname)\n \n print('********** Start setup.')\n t0 = time.time()\n self.dsIdx = psana.DataSource(str(dsname))\n logger.info('********** Datasource on rank {}: {}s'.format(rank, time.time()-t0))\n self.dsIdxRun = next(self.dsIdx.runs())\n self.parse_detectors()\n logger.info('Rank {} has datasource and detectors.'.format(rank))\n print('********** Setup on rank {}: {}s'.format(rank, time.time()-t0))\n return", "def setup(self, runner):\n msg = None\n try:\n msg = \"Failed to start protocol connection\"\n self.connect()\n\n msg = None\n\n for cls in self.implements:\n getattr(self, cls.name).setup()\n\n msg = \"Post-connection steps failed\"\n self.after_connect()\n except Exception:\n if msg is not None:\n self.logger.warning(msg)\n self.logger.warning(traceback.format_exc())\n raise", "def __init__( self, app, nworkers, **kwds ):\n super( LwrJobRunner, self ).__init__( app, nworkers, runner_param_specs=LWR_PARAM_SPECS, **kwds )\n self._init_worker_threads()\n galaxy_url = self.runner_params.galaxy_url\n if galaxy_url:\n galaxy_url = galaxy_url.rstrip(\"/\")\n self.galaxy_url = galaxy_url\n self.__init_client_manager()\n if self.runner_params.url:\n # This is a message queue driven runner, don't monitor\n # just setup required callback.\n self.client_manager.ensure_has_status_update_callback(self.__async_update)\n else:\n self._init_monitor_thread()", "def setup(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request #TCP socket object for the client\n self.server.clients[(self.ip, self.port)] = self\n self.server.peers.append((self.connection)) \n for client in self.server.clients:\n print(\"Connected client: \", client)\n\n #for peer in self.server.peers:\n # print(\"Peers: \", peer)", "def __init__(self, worker):\n self._worker = worker\n self._jobs = Queue()\n self._results, self._errors = [], []\n self._jobfinished = Condition()", "def init_worker (self):\n print(\"initializing map worker in directory: \", os.getcwd ())\n\n context = zmq.Context()\n\n # Socket to receive messages on. Worker uses PULL from the master\n # To that end, we connect to the server. The map worker pulls info\n # from the base port of the master\n self.receiver = context.socket (zmq.PULL)\n self.receiver.setsockopt (zmq.RCVHWM, 0)\n connect_addr = \"tcp://\"+ self.master_ip + \":\" + str (self.master_port)\n print(\"Using PULL, map worker connecting to \", connect_addr)\n self.receiver.connect (connect_addr)\n \n # As part of the initialization, we tell the master that we are up.\n # This information is to be pushed to the master at a port which is\n # 2 more than the base of the master.\n self.init_sender = context.socket (zmq.PUSH)\n self.init_sender.setsockopt (zmq.LINGER, -1)\n connect_addr = \"tcp://\" + self.master_ip + \":\" + str (self.master_port+2)\n print(\"Using PUSH, map worker connecting to worker up barrier at \", connect_addr)\n self.init_sender.connect (connect_addr)\n #bind_addr = \"tcp://\" + self.master_ip + \":\" + str (self.master_port+2)\n #print \"Using PUSH, map worker binding to worker up barrier at \", bind_addr\n #self.init_sender.bind (bind_addr)\n\n # now send an ACK to the barrier to let it know that we are up\n self.init_sender.send (b'0')\n\n # close the socket\n # self.init_sender.close ()\n\n # To send the results, we need to initialize the send address to point\n # to the map results barrier\n #\n # Note that the port number of the maps result barrier is 3 more than\n # the port of the master. Initialize it so we can send results \n self.results_sender = context.socket (zmq.PUSH)\n self.results_sender.setsockopt (zmq.LINGER, -1)\n self.results_sender.setsockopt (zmq.SNDHWM, 0)\n connect_addr = \"tcp://\" + self.master_ip + \":\" + str (self.master_port+3)\n print(\"Using PUSH, map worker connecting to map results barrier at \", connect_addr)\n self.results_sender.connect (connect_addr)\n #bind_addr = \"tcp://\" + self.master_ip + \":\" + str (self.master_port+3)\n #print \"Using PUSH, map worker binding to map results barrier at \", bind_addr\n #self.results_sender.bind (bind_addr)", "def run_worker(self):\n # TODO(xiejw): To allow execution framework to add train hooks.\n return self._start_distributed_training()", "def activate(self):\n self.socket.listen(self.request_queue_size)", "def _setup_communication(self):\n state = self.ui.checkBox_comm.checkState()\n if state:\n try:\n sys.path.append(\"..\")\n from zmq_interface.gui_interface import ZmqInterface\n except ImportError as e:\n self.write_text(\"ZMQ interface failed to import. No remote control for this session.\")\n self.disable_visualizer()\n return\n try:\n ##TODO: let user specify ports\n self.com = ZmqInterface(rep_port=REPLY_PORT,\n gui_handle=self)\n except Exception as e:\n #traceback.print_exc(file=sys.stdout)\n self.write_text(\"ZMQ interface failed to start. No remote control for this session. Reason: %s\" % e)\n self.disable_visualizer()\n return\n self.start = self._start_session\n self.stop = self._stop_session\n self.load_config = self._load_state\n self.save_config = self._save_state\n self.com_timer = QtCore.QTimer()\n self.com_timer.timeout.connect(self._check_coms)\n self.com_timer.start(200)\n self.write_text(\"ZMQ interface set up. Reply port on %s\" % self.com.rep_port)\n self.enable_visualizer()\n else:\n if self.com:\n self.com.close()\n if self.com_timer:\n self.com_timer.stop()\n self.com = None\n self.com_timer = None\n self.enable_visualizer()\n self.write_text(\"ZMQ interface closed.\")", "def initialize_threading(self, worker_env=None):\n if not (os.path.exists(core.config.paths.zmq_public_keys_path) and\n os.path.exists(core.config.paths.zmq_private_keys_path)):\n logging.error(\"Certificates are missing - run generate_certificates.py script first.\")\n sys.exit(0)\n\n for i in range(NUM_PROCESSES):\n args = (i,)\n if worker_env:\n args = (i, worker_env,)\n\n pid = multiprocessing.Process(target=loadbalancer.Worker, args=args)\n pid.start()\n self.pids.append(pid)\n\n self.ctx = zmq.Context.instance()\n self.auth = ThreadAuthenticator(self.ctx)\n self.auth.start()\n self.auth.allow('127.0.0.1')\n self.auth.configure_curve(domain='*', location=core.config.paths.zmq_public_keys_path)\n\n self.load_balancer = loadbalancer.LoadBalancer(self.ctx)\n self.receiver = loadbalancer.Receiver(self.ctx)\n\n self.receiver_thread = threading.Thread(target=self.receiver.receive_results)\n self.receiver_thread.start()\n\n self.manager_thread = threading.Thread(target=self.load_balancer.manage_workflows)\n self.manager_thread.start()\n\n self.threading_is_initialized = True\n logger.debug('Controller threading initialized')\n gevent.sleep(0)", "def setup(self) -> None:\n if not isinstance(self.request, SlipSocket):\n # noinspection PyTypeChecker\n self.request = SlipSocket(self.request)", "def setup(self):\n # sanity check for the configuration variable\n for required_param in (\"instance_type\", \"access_key_id\"):\n self._check_config_name(self.config, required_param)\n\n logger.info(\"Setting up AWSWorker for submission '{}'\".format(self.submission))\n _instances, status = aws.launch_ec2_instances(self.config)\n\n if not _instances:\n if status == \"retry\":\n # there was a timeout error, put this submission back in the\n # queue and try again later\n logger.warning(\n \"Unable to launch instance for submission \"\n f\"{self.submission}. Adding it back to the \"\n \"queue and will try again later\"\n )\n self.status = \"retry\"\n else:\n logger.error(\n \"Unable to launch instance for submission \"\n f\"{self.submission}. An error occured: {status}\"\n )\n self.status = \"error\"\n return\n else:\n logger.info(\"Instance launched for submission '{}'\".format(self.submission))\n (self.instance,) = _instances\n\n for _ in range(5):\n # try uploading the submission a few times, as this regularly fails\n exit_status = aws.upload_submission(\n self.config,\n self.instance.id,\n self.submission,\n self.submissions_path,\n )\n if exit_status == 0:\n break\n else:\n logger.info(\"Uploading submission failed, retrying ...\")\n if exit_status != 0:\n logger.error(\n 'Cannot upload submission \"{}\"'\n \", an error occured\".format(self.submission)\n )\n self.status = \"error\"\n else:\n logger.info(\"Uploaded submission '{}'\".format(self.submission))\n self.status = \"setup\"", "def on_run(self):\n wxMediator.on_run(self)\n listener_evt = InterThreadEventWX(self,\n wxEVT_NEW_LISTEN_CONN) \n talker_evt = InterThreadEventWX(self,\n wxEVT_NEW_TALK_CONN) \n server = self.server()\n sys.stderr.write('Starting server threads...\\n')\n sys.stderr.flush()\n server.start_other_threads(listener_evt, talker_evt)", "def setupTcp(self):\n \tself.tcpManager = QueuedConnectionManager()\n \tself.tcpReader = QueuedConnectionReader(self.tcpManager, 0)\n \tself.tcpWriter = ConnectionWriter(self.tcpManager, 0)", "def init_worker(self, worker_id) :\n\n # since this is called in a separate process,\n # we need to get a consistent view of the settings\n startup.main(self.mode, self.rank)\n\n # initialize the random seed for this process\n # we don't use just the worker_id but also the rank\n # so we truly get different random numbers in all workers,\n # not restricted to the current pool\n # note that we get some entropy from the time\n # so different epochs get different data augmentations\n np.random.seed((hash(time())\n + (settings.RANK * torch.utils.data.get_worker_info().num_workers\n + worker_id)) % 2**32)", "def _setupSocket(self):\n oldUmask = None\n if type(self._bindAddress) is str:\n # Unix socket\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n try:\n os.unlink(self._bindAddress)\n except OSError:\n pass\n if self._umask is not None:\n oldUmask = os.umask(self._umask)\n else:\n # INET socket\n assert type(self._bindAddress) is tuple\n assert len(self._bindAddress) == 2\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n sock.bind(self._bindAddress)\n sock.listen(socket.SOMAXCONN)\n\n if oldUmask is not None:\n os.umask(oldUmask)\n\n return sock", "def setUp(self):\n self.client_socket = open_client_socket()", "def setUp(self):\n self.client_socket = open_client_socket()", "def start(self):\n super().start()\n\n # Start the socket handler\n handler = copy.deepcopy(self.get_handler_stub('socket'))\n self.start_handler(handler)\n\n # Start a new thread to create a ZMQ socket and send messages to\n # the scheduler\n self._zmq_req_to_scheduler_thread = threading.Thread(\n target=self._handle_zmq_req_to_scheduler\n )\n t_name = 'ZMQ Request Messenger (-> Scheduler)'\n self._zmq_req_to_scheduler_thread.name = t_name\n self._zmq_req_to_scheduler_thread.daemon = True\n self._zmq_req_to_scheduler_thread.start()", "def register_worker(self):\n raise Exception('not implemented')", "def setup_backend(cls, application, io_loop, config, storage, tasks):\n\n server = cls(application, io_loop, config, storage, tasks)\n\n port = server.backend_settings.get('PORT', \"8897\")\n host = server.backend_settings.get('HOST', \"127.0.0.1\")\n\n server.listen(str(port), host)\n\n logger.info(\"GottWall TCP/IP transport listen {host}:{port}\".format(port=port, host=host))\n return server", "def setup(self):\n # Bind socket to local host and port\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.settimeout(2)\n \n try:\n self.socket.bind((HOST, PORT))\n except socket.error:\n return False\n\n if self.running:\n # Start listening on socket\n self.socket.listen(2)\n print \"MapServer: Socket now listening.\"\n\n # Wait to accept a connection - blocking call\n try:\n self.connection, address = self.socket.accept()\n print \"MapServer: Socket connected with \" + address[0] + \":\" + str(address[1])\n self.connection.sendall(str(self.MAP_SIZE_PIXELS)+\"\\n\")\n return True\n except socket.error:\n return False", "def _run(self) -> None:\n asyncio.set_event_loop(self._server_loop)\n self._server_loop.run_until_complete(self._runner.setup())\n\n site = web.TCPSite(\n self._runner, self.host, self.port, ssl_context=self.ssl_context\n )\n self._server_loop.run_until_complete(site.start())\n\n # If the Server was initialized with port 0, determine what port the\n # underlying server ended up listening on\n if self.port == 0:\n site_server = cast(AsyncioServer, site._server)\n sockets = cast(List[Socket], site_server.sockets)\n socket = sockets[0]\n self.port = socket.getsockname()[1]\n\n self._startup_event.set()\n self._server_loop.run_forever()", "def run(self):\n self.connect()", "def getWorker(self):\n pass", "def run(self):\n\t\t\n\t\tself.connect(self.config[\"server\"])", "def setup_net(self):\n pass", "def worker(self, **options):\n pass", "def start(self):\n zmq_uri = (\n \"{protocol}://{address}:{port}\".format(\n protocol=self.protocol, address=self.address, port=self.port\n )\n if self.port\n else \"{protocol}://{address}\".format( # noqa\n protocol=self.protocol, address=self.address\n )\n )\n log.debug(\"ZMQ URI: %s\", zmq_uri)\n self.ctx = zmq.Context()\n if hasattr(zmq, self.type):\n skt_type = getattr(zmq, self.type)\n else:\n skt_type = zmq.PULL\n self.sub = self.ctx.socket(skt_type)\n self.sub.connect(zmq_uri)\n if self.hwm is not None:\n self.sub.setsockopt(zmq.RCVHWM, self.hwm)\n if self.recvtimeout is not None:\n log.debug(\"Setting RCVTIMEO to %d\", self.recvtimeout)\n self.sub.setsockopt(zmq.RCVTIMEO, self.recvtimeout)\n if self.keepalive is not None:\n log.debug(\"Setting TCP_KEEPALIVE to %d\", self.keepalive)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE, self.keepalive)\n if self.keepalive_idle is not None:\n log.debug(\"Setting TCP_KEEPALIVE_IDLE to %d\", self.keepalive_idle)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE_IDLE, self.keepalive_idle)\n if self.keepalive_interval is not None:\n log.debug(\"Setting TCP_KEEPALIVE_INTVL to %d\", self.keepalive_interval)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE_INTVL, self.keepalive_interval)", "def run(self):\n self._connection = self.connect()\n self._connection.ioloop.start()", "def run(self):\n self._connection = self.connect()\n self._connection.ioloop.start()", "def work(self):\n self.config_file = self.args.config\n self.init_config()\n self.init_db()\n\n self.kickoff()", "def __init__(self, worker, event_loop):\n self.weakref_worker = weakref.ref(worker)\n self.event_loop = event_loop\n self.asyncio_task = None", "def __init__(self, config, queue_name):\n self.work_queue_client = WorkQueueClient(config, queue_name)", "def __init__(self, config, queue_name):\n self.work_queue_client = WorkQueueClient(config, queue_name)", "def _setup(self):\n self.log_object.write_log(\n \"MESH0001\", None, {\"mailbox\": self.mailbox, \"environment\": self.environment}\n )\n\n common_params = MeshCommon.get_ssm_params(f\"/{self.environment}/mesh\")\n mailbox_params = MeshCommon.get_ssm_params(\n f\"/{self.environment}/mesh/mailboxes/{self.mailbox}\"\n )\n self.params = {**common_params, **mailbox_params}\n # self._write_certs_to_files()\n\n # maybe_verify = bool(\n # self.mailbox_params.get(\"MESH_VERIFY_SSL\", \"True\") == \"True\"\n # )\n\n # if not maybe_verify:\n # requests.urllib3.disable_warnings(InsecureRequestWarning)\n\n # # rewrite MeshClient\n # self.mesh_client = ExtendedMeshClient(\n # common_params[\"MESH_URL\"],\n # self.mailbox,\n # mailbox_params[\"MAILBOX_PASSWORD\"],\n # shared_key=common_params[\"MESH_SHARED_KEY\"].encode(\"utf8\"),\n # cert=(self.client_cert_file.name, self.client_key_file.name),\n # verify=self.ca_cert_file.name if maybe_verify else None,\n # max_chunk_size=MeshCommon.DEFAULT_CHUNK_SIZE,\n # )", "def setup(self):\n # pylint: disable = E0633\n # Client* do deliver loop, client as result but\n # pylint does not accept that fact\n\n _LOGGER.debug(\"doing setup\")\n if self._config_type == \"serial\":\n _, self._client = ClientSerial(\n schedulers.ASYNC_IO,\n method=self._config_method,\n port=self._config_port,\n baudrate=self._config_baudrate,\n stopbits=self._config_stopbits,\n bytesize=self._config_bytesize,\n parity=self._config_parity,\n timeout=self._config_timeout,\n loop=self._loop,\n )\n elif self._config_type == \"rtuovertcp\":\n _, self._client = ClientTCP(\n schedulers.ASYNC_IO,\n host=self._config_host,\n port=self._config_port,\n framer=ModbusRtuFramer,\n timeout=self._config_timeout,\n loop=self._loop,\n )\n elif self._config_type == \"tcp\":\n _, self._client = ClientTCP(\n schedulers.ASYNC_IO,\n host=self._config_host,\n port=self._config_port,\n timeout=self._config_timeout,\n loop=self._loop,\n )\n elif self._config_type == \"udp\":\n _, self._client = ClientUDP(\n schedulers.ASYNC_IO,\n host=self._config_host,\n port=self._config_port,\n timeout=self._config_timeout,\n loop=self._loop,\n )\n else:\n assert False", "async def _hw_init(self):\n await self._write_async(b\":XR\\r\") # Broadcast: initialize + execute\n # Note: no need to consume reply here because there is none (since we are using broadcast)", "def setUp(self) -> None:\n local_sock, remote_sock = socketpair()\n local_sock.settimeout(1.0)\n remote_sock.settimeout(1.0)\n self.inverter = KeepAliveInverter(local_sock, None, keep_alive=0.01)\n self.sock = remote_sock", "def start(self):\n #url = '{}://{}:{}/'.format('http',\n # self.ip,\n # self.port)\n #self.service_info = ServiceInfo(\n # '_webthing._sub._http._tcp.local.',\n # '{}._http._tcp.local.'.format(self.name),\n # address=socket.inet_aton(self.ip),\n # port=self.port,\n # properties={\n # 'url': url,\n # },\n # server='{}.local.'.format(socket.gethostname()))\n #self.zeroconf = Zeroconf()\n #self.zeroconf.register_service(self.service_info)\n\n # If WebSocketS used and NOT running in thread, and WebServer IS\n # running in thread make shure WebServer has enough stack size to\n # handle also the WebSocket requests.\n log.info('Starting Web Server')\n self.server.Start(threaded=srv_run_in_thread, stackSize=8192)", "def setUp(self) :\n self.longMessage = True\n logger = corAna.makeLogger(isTestMode=True,isMaster=True,isViewer=True,isServer=True,rank=0)\n isFirstWorker = True\n self.numTimes = 5\n numDataPointsThisWorker = 1\n\n self.callbacks = {'remove':[],\n 'add':[],\n 'adjust':[]}\n\n class CallBack(object):\n def __init__(self, callbacks):\n self.callbacks = callbacks\n self.numpyArrayType = type(np.zeros(13))\n\n def workerBeforeDataRemove(self, tm, dataIdx, wd):\n assert isinstance(tm, int)\n assert isinstance(dataIdx, int)\n self.callbacks['remove'].append((tm,wd.X[dataIdx,0]))\n\n def workerAdjustData(self, data):\n assert isinstance(data, self.numpyArrayType)\n self.callbacks['adjust'].append(data[0])\n\n def workerAfterDataInsert(self, tm, dataIdx, wd):\n assert isinstance(tm, int)\n assert isinstance(dataIdx, int)\n self.callbacks['add'].append((tm,wd.X[dataIdx,0]))\n\n self.workerData = corAna.WorkerData(logger, isFirstWorker,\n self.numTimes,\n numDataPointsThisWorker,\n addRemoveCallbackObject = CallBack(self.callbacks))", "def setup_socket(self, sock):\n # Set SO_REUSEPORT option\n if self.has_reuseport and self.enable_reuseport():\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n # Set IP_FREEBIND option\n if self.has_frebind and self.enable_freebind():\n sock.setsockopt(socket.SOL_IP, self.get_ip_freebind(), 1)", "def __init__(self, creator_socket):\n self.__socket = creator_socket\n logger.info(BUNDY_SOCKCREATOR_INIT)", "def setUpZServerThread(self):\n\n from ZServer import zhttp_server, zhttp_handler, logger\n from cStringIO import StringIO\n\n zlog = logger.file_logger(StringIO())\n\n zserver = zhttp_server(ip=self.host,\n port=self.port, \n resolver=None,\n logger_object=zlog)\n zhandler = zhttp_handler(module=bobo_app_name, uri_base='')\n zserver.install_handler(zhandler)\n\n self.zserver = zserver\n name = self.__class__.__name__\n self.zthread = ZServerThread(name=\"%s server\" % name)\n self.zthread.start()", "def start(self):\n self._do_work.set()\n self._worker_thread.start()", "def start_work(self):\n self.worker_thread = WorkerThread(self.feedback_log, self.job_list) # only created when processing begins. May be recreated\n self.worker_thread.daemon = True\n self.worker_thread.start()", "def work():\n with rq.Connection(create_connection()):\n worker = rq.Worker(list(map(rq.Queue, listen)))\n worker.work()", "def initialize(self, myid, dispatcher, **model_params):\n self.lock_update = threading.Lock()\n self.jobsdone = 0 # how many jobs has this worker completed?\n # id of this worker in the dispatcher; just a convenience var for easy access/logging TODO remove?\n self.myid = myid\n self.dispatcher = dispatcher\n self.finished = False\n logger.info(\"initializing worker #%s\", myid)\n self.model = lsimodel.LsiModel(**model_params)", "def start(self):\n\n self.col2_print('Starting Listener threads', self.listeners)\n\n # Front facing socket to accept client connections.\n self.socket_front = self.zmq_context.socket(zmq.ROUTER)\n self.socket_front.router_raw = self.router_raw\n self.socket_front.setsockopt(zmq.LINGER, 1)\n self.socket_front.bind('%s://%s:%s' % (self.str_protocol,\n self.str_IP,\n self.str_port)\n )\n\n # Backend socket to distribute work.\n self.socket_back = self.zmq_context.socket(zmq.DEALER)\n self.socket_back.setsockopt(zmq.LINGER, 1)\n self.socket_back.bind('inproc://backend')\n\n # Start the 'fileIO' thread\n self.fileIO = FileIO( timeout = 60,\n within = self,\n debugFile = self.str_debugFile,\n debugToFile = self.b_debugToFile)\n self.fileIO.start()\n\n # Start the 'listener' workers... keep track of each\n # listener instance so that we can selectively stop\n # them later.\n for i in range(0, self.listeners):\n self.l_listener.append(Listener(\n id = i,\n context = self.zmq_context,\n DB = self._ptree,\n DBpath = self.str_DBpath,\n http = self.b_http,\n within = self,\n listenerSleep = self.listenerSleep,\n debugToFile = self.b_debugToFile,\n debugFile = self.str_debugFile))\n self.l_listener[i].start()\n\n # Use built in queue device to distribute requests among workers.\n # What queue device does internally is,\n # 1. Read a client's socket ID and request.\n # 2. Send socket ID and request to a worker.\n # 3. Read a client's socket ID and result from a worker.\n # 4. Route result back to the client using socket ID.\n self.dp.qprint(\"*******before zmq.device!!!\")\n try:\n zmq.device(zmq.QUEUE, self.socket_front, self.socket_back)\n except:\n self.dp.qprint('Hmmm... some error was caught on shutting down the zmq.device...')\n self.dp.qprint(\"*******after zmq.device!!!\")", "def do_work(self):\n raise NotImplementedError", "def __init__(self):\n self.raw_wires = PyWires.WireNetwork();\n self.__initialize_wires();", "def _setup_workers(self, num_workers):\n self.pool = []\n\n for _ in range(num_workers):\n self.pool.append(Thread(target=self.threadloop))\n\n for a_thread in self.pool:\n a_thread.setDaemon(True)\n a_thread.start()", "def startup(self,context):\n master_socket = int(12345)\n self.task_queue = context.InputQueue\n self.result_queue = context.OutputQueue\n manager = Manager()\n self.dict_position = manager.dict()\n self.dict_cycle = manager.dict()\n self.dict_worker_info = manager.dict()\n\n TaskManager.register('get_job_queue',\n callable = lambda:self.task_queue)\n TaskManager.register('get_result_queue',\n callable = lambda:self.result_queue)\n TaskManager.register('get_data',\n callable = lambda:self.dict_position)\n TaskManager.register('get_cycle',\n callable = lambda:self.dict_cycle)\n TaskManager.register('set_worker_info',\n callable = lambda:self.dict_worker_info)\n self.m = TaskManager(address = ('', master_socket),\n authkey = b'secret')\n\n\n thread = Thread(target=self.runServer)\n thread.start()", "def initialize(self, setting):\n\n # record type mappings \n for worker in setting[\"workers\"]:\n wid = worker[\"id\"]\n flavor = worker[\"flavor\"]\n self.worker_flavor[wid] = flavor\n self.workers[wid] = Worker(wid, self.mode)\n\n self.workload = [0 for _ in range(len(self.workers))]\n\n # record neighboring nodes \n for u, v in setting[\"neighbor_map\"]:\n self.neighbors[u].add(v) \n self.neighbors[v].add(u)\n\n self.initialized = True", "def do_work(self):", "def initzmq(self):\n\n if \"topics\" not in self.configData:\n raise Exception(\"Topics not found in %s\" % self.configPath)\n\n for topic in self.configData['topics']:\n addr = self.gen_address(topic['protocol'], topic['address'],\n topic['port'])\n socket = self.build_socket(topic['paradigm'], topic['topic'], addr)\n self.topics[topic['name']] = socket", "def __init__(self, usocket, starting_point, allow_design):\n self.queue = sundaytasks.utils.get_plugins()\n self.extensions = sundaytasks.utils.get_extensions()\n self.starting_point = starting_point\n self.instance = IOLoop.instance()\n self._allow_design = allow_design\n unix_socket = netutil.bind_unix_socket(usocket)\n netutil.add_accept_handler(unix_socket, self.accept)", "def test_workerConnectionPoolPerformWork(self):\n clock = Clock()\n peerPool = PeerConnectionPool(clock, None, 4322, schema)\n factory = peerPool.workerListenerFactory()\n\n def peer():\n p = factory.buildProtocol(None)\n t = StringTransport()\n p.makeConnection(t)\n return p, t\n\n worker1, _ignore_trans1 = peer()\n worker2, _ignore_trans2 = peer()\n\n # Ask the worker to do something.\n worker1.performWork(schema.DUMMY_WORK_ITEM, 1)\n self.assertEquals(worker1.currentLoad, 1)\n self.assertEquals(worker2.currentLoad, 0)\n\n # Now ask the pool to do something\n peerPool.workerPool.performWork(schema.DUMMY_WORK_ITEM, 2)\n self.assertEquals(worker1.currentLoad, 1)\n self.assertEquals(worker2.currentLoad, 1)", "def run(self):\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect(self.address)\n\n #send dummy data\n sock.sendall(bytes(\"Give me\", \"utf-8\"))\n received = sock.recv(1024)\n while True:\n data = sock.recv(1024)\n if not data: break\n received += data\n lymphocytes = pickle.loads(received)\n self.lymphocytes_setter(lymphocytes)\n except ConnectionRefusedError:\n #Don't bother. May be it's better to add more logic to determine\n #permanent connection errors.\n pass\n finally:\n sock.close()", "def setupClass(cls):\n cls._tmp_dir = tempfile.mkdtemp()\n cls.test_filepath = os.path.join( cls._tmp_dir, \"test_data.h5\" )\n cls._generate_testdata_h5(cls.test_filepath)\n cls.server_proc, cls.shutdown_event = cls._start_mockserver( cls.test_filepath, same_process=True )\n cls.client_connection = httplib.HTTPConnection( \"localhost:8000\" )", "def start_workunit(self, workunit):\r\n pass", "def start_workunit(self, workunit):\r\n pass", "def prepare(self):\n if self.prepared:\n return\n self.socket.listen()\n for name in self.socket.getSocketNames():\n self.serverEventHandler.preServe(name)\n for _ in xrange(self.threads):\n thread = Worker(self.tasks)\n thread.setDaemon(True)\n thread.start()\n\n for fileno in self.socket.handles:\n self.poller.read(fileno)\n self.poller.read(self._read.fileno())\n\n self.prepared = True", "def start(self):\n if not self._worker:\n # the worker might be already created in case of deserialization\n self._worker = APIWorker(self.queue)\n self._worker.start()", "def _worker(self, args):\n pass", "def server_activate(self):\n\t\tself.socket.listen(self.request_queue_size)", "def __init__(self):\n self._shutdown_lock = threading.Lock()\n self._work_queue = queue.Queue()\n self.ftp_server = ftpclient.setup()", "def start(self):\n\n address = (socket.gethostbyname(self.hostname), self.port)\n logger.info(\"Connecting to %r\" % (address,))\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.connect(address)\n self._start_processors()\n return self", "def _init_io_state(self):\n self.io_loop.add_handler(self._shadow_sock, self._handle_events, self._READ)\n self._call_later(0, self._handle_events)", "def __init__(self, addr):\r\n asyncore.dispatcher.__init__(self)\r\n self.accept_channel = None\r\n self.addr = addr\r\n self.create_socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.bind(addr)\r\n self.listen(5)\r\n \r\n # Start the asyncore polling loop if it's not already running\r\n if not asyncore_loop.running:\r\n stackless.tasklet(asyncore_loop)()", "def test_setup_sync(self):\n worker_helper = WorkerHelper()\n self.assertEqual(worker_helper.setup(), None)", "async def setup_event(self):\n # We may need stricter conditions, however, it seems likely that the\n # following is sufficient as:\n # a) The first message that will cause an update for both the order\n # websocket and the market data websocket would be a full, atomic\n # update. In other words, it seems that a single message is used\n # to send the full initial state for both sockets. If it too\n # multiple, then a more complex setup lock would be needed.\n # b) As we don't notify subscribers on heartbeats or subscription\n # acknowledgements, we can be certain that marking the client as\n # setup on the first received message is not premature, even if it\n # is just a heartbeat.\n await self._market_data_sock_info.ready.wait()\n if self._authenticate:\n await self._orders_sock_info.ready.wait()", "def initialize_socket(self):\n try:\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sock.bind((self._host, self._port))\n self.sock.listen(10)\n except socket.error, (value, message):\n if self.sock:\n self.sock.close()\n # TODO: LOG and provide means for graceful failure\n print \"Unable to open socket: \" + message\n print \"Error value: \" + str(value)", "def initialize_socket(self):\n try:\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sock.bind((self.host, self.port))\n self.sock.listen(10)\n except socket.error, (value, message):\n if self.sock:\n self.sock.close()\n # TODO: LOG and provide means for graceful failure\n print \"Unable to open socket: \" + message\n print \"Error value: \" + str(value)", "def setUp(self) -> None:\n local_sock, remote_sock = socketpair() # We apparently can't use family=AF_INET on Linux\n local_sock.settimeout(1.0)\n remote_sock.settimeout(1.0)\n self.inverter = Inverter(local_sock, None)\n # This sock mimics the actual inverter, i.e. the remote side of the\n # connection. Send messages on it to mimic the actual inverter sending\n # messages to the Inverter class.\n self.sock = remote_sock", "def init(self):\n self.dispatcher.start()\n self.replyer.start()", "def __init__(self, poller=None):\n threading.Thread.__init__(self)\n self.name = \"WebSocketManager\"\n self.lock = threading.Lock()\n self.websockets = {}\n self.running = False\n\n if poller:\n self.poller = poller\n else:\n if hasattr(select, \"epoll\"):\n self.poller = EPollPoller()\n logger.info(\"Using epoll\")\n else:\n self.poller = SelectPoller()\n logger.info(\"Using select as epoll is not available\")", "def _set_socket(self, socket):\n socket.setblocking(False)\n self.socket = socket\n self.socket_lock = Lock()\n self.send_queue = Queue()", "def initialize(self):\n LOGGER.info('Set %d initializing...', self.port_set)\n # There is a race condition here with ovs assigning ports, so wait a bit.\n time.sleep(2)\n shutil.rmtree(self.tmpdir, ignore_errors=True)\n networking_name = 'gw%02d' % self.port_set\n networking_port = self.pri_base + self.NETWORKING_OFFSET\n LOGGER.debug(\"Adding networking host on port %d\", networking_port)\n cls = docker_host.make_docker_host('daq/networking', prefix='daq', network='bridge')\n try:\n self.networking = self.runner.add_host(networking_name, port=networking_port,\n cls=cls, tmpdir=self.tmpdir)\n self._create_config(self.networking.tmpdir)\n self.record_result('startup')\n except Exception as e:\n self._state_transition(_STATE.ERROR)\n self.record_result('startup', exception=e)" ]
[ "0.74050635", "0.6771996", "0.66032565", "0.64634144", "0.6445292", "0.62212235", "0.6142211", "0.6116784", "0.6084497", "0.6056881", "0.5957323", "0.59568125", "0.5929769", "0.5915095", "0.5910893", "0.5893372", "0.5853307", "0.5753932", "0.5749135", "0.57407844", "0.57199585", "0.5707923", "0.5697508", "0.5696799", "0.5664185", "0.56582713", "0.56572866", "0.565072", "0.5644031", "0.5639019", "0.5632996", "0.56273353", "0.56272715", "0.56196046", "0.5611425", "0.55987316", "0.55572695", "0.5555673", "0.5555673", "0.5555065", "0.5552544", "0.55449885", "0.55325353", "0.55315447", "0.55250096", "0.552217", "0.55196893", "0.5518331", "0.55052483", "0.5505097", "0.5501342", "0.5501342", "0.54933685", "0.54909337", "0.54896957", "0.54896957", "0.54797035", "0.54688066", "0.54650235", "0.54598457", "0.54555804", "0.545242", "0.5445339", "0.54385036", "0.5438478", "0.5436614", "0.54332477", "0.5428302", "0.54233545", "0.5416568", "0.54128724", "0.5410232", "0.5406877", "0.53854895", "0.538073", "0.5378499", "0.53713316", "0.5367953", "0.536416", "0.53622985", "0.5357195", "0.53546405", "0.53546405", "0.5351058", "0.534942", "0.5347764", "0.53474075", "0.53458583", "0.53267515", "0.5318564", "0.53068286", "0.5298039", "0.5291701", "0.52902794", "0.52663827", "0.525904", "0.5247709", "0.52350426", "0.522924", "0.52267015" ]
0.8267066
0
Announce the work address until we get some sort of a request
def send_announcement_get_work_request(self): self.analysis_id = uuid.uuid4().hex while True: self.announce_socket.send_json(((self.analysis_id, self.work_addr),)) try: return self.awthread.recv(self.work_socket, 250) except six.moves.queue.Empty: continue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def request_work(self):\n self.workRequested.emit()", "def make_work_request(self):\n request = StoreRequest()\n self.bb_client.read_wait(request, self.handle_request)", "def answer_waiting_call(self) -> None:", "def do_work(self):", "async def send_referral(self) -> None:\n # pause logic\n if not self.running.is_set():\n self.add_to_output(\"Paused...\")\n await self.running.wait()\n # check if the referral file exists\n if os.path.exists(REFERRAL_FILE_S9):\n try:\n # tell the user we are sending the referral\n self.add_to_output(\"Sending referral IPK...\")\n # create ssh connection to miner\n await self.send_file(REFERRAL_FILE_S9, '/tmp/referral.ipk')\n await self.send_file(CONFIG_FILE, '/etc/bosminer.toml')\n\n await self.run_command(f'opkg install /tmp/referral.ipk && /etc/init.d/bosminer restart')\n # tell the user the referral completed\n self.add_to_output(f\"Referral configuration completed...\")\n except OSError as e:\n print(e)\n self.add_to_output(f\"Unknown error...\")\n else:\n self.add_to_output(\"No referral file, skipping referral install\")", "def test_answer_background(network):\n work_item_id = bfq.ipOwners().answer(background=True)\n bf_get_work_status(work_item_id)", "def work(self, job):\n pass", "def receive_completion_notification(self, data, *args, **kwargs):\n self.log.debug('Received completion message from {ip}'.format(\n ip = data['ip_addr']\n ))\n self.completion_messages.add(data['ip_addr'])", "def _work(self):\n return \"Task Done\"", "def link_up_respond(self, neighbor):\n neighbor.is_killed = False\n neighbor.send_timer = time.time()\n neighbor.kill_timer = time.time()\n if self.update_dv():\n for name in self.neighbors:\n self.neighbors[name].update_ready = True\n self.neighbors[name].send_timer = time.time()", "def alert_for_pending_mails_1(request):\n\n\tprint(\">>>>>>>>>>>>>>>>>>>>>Beginning of alert_for_pending_mails_1<<<<<<<<<<<<<<<<<<<<\")\n\n\tThread(target=alert_for_pending_mails_1_worker).start()\n\n\tprint(\">>>>>>>>>>>>>>>>>>>>>End of alert_for_pending_mails_1<<<<<<<<<<<<<<<<<<<<\")\n\n\tresponse = {}\n\n\tresponse[\"info_to_contact\"] = \"Ok\"\n\n\treturn response", "def address(self):\n ...", "def director_address():\n while True:\n #addr = etcd.watch(\"director_publish_addr\")\n #director_address = addr.value\n break", "async def announce(self, ctx, *, msg):\n if self._announce_msg is not None:\n await self.bot.say(\"Already announcing, wait until complete to\"\n \" issue a new announcement.\")\n else:\n self._announce_msg = msg", "def test_solicitation_no_reply_resend(self):\n waittime = self.autoconflayer._solicitation_timeout * 4.0\n self.autoconflayer.start_process()\n interest = Interest(Name('/foo/bar'))\n self.queue_from_higher.put([None, interest])\n\n # Catch all data the autoconfig layer sends downwards for 3 seconds\n deadline = datetime.utcnow() + timedelta(seconds=waittime)\n tolower = []\n while datetime.utcnow() < deadline:\n try:\n data = self.queue_to_lower.get(timeout=waittime/10)\n tolower.append(data)\n except queue.Empty:\n pass\n # Make sure the broadcast face was actually created and get its face id\n bcfid = self.faceidtable.get_or_create_faceid(AddressInfo(('127.255.255.255', 4242), 0))\n self.assertIsNotNone(bcfid)\n # Make sure the forwarder solicitation was sent more than once\n solictiation = Interest(Name('/autoconfig/forwarders'))\n solictiation_count = len([1 for data in tolower if data == [bcfid, solictiation]])\n self.assertGreater(solictiation_count, 1)", "def send_completion_notification(self):\n msg_data={'ip_addr': self.this_drone[0], 'id': self.drone_id}\n drone_ip, drone_port = self.master_drone\n self.log.debug('Sending completion ({msg}) to summoner at {ip}:{port}'.format(\n msg=msg_data,\n ip=drone_ip,\n port=drone_port\n ))\n\n send(\n drone_ip=drone_ip,\n mission_id=self.mission_id,\n endpoint='/receive-completion',\n data=msg_data,\n skyserve_port=self.this_drone[1]\n #async=True\n )", "def is_ready(self, addr: int, /) -> bool:", "def startworking():\r\n #In the future have the manager program or from the website implement this arguments to a route\r\n #the program will download the file from the website\r\n global exe_name\r\n global Task_Conditional\r\n task_data = None\r\n while task_data is None:\r\n task_data = recieve_data_from_server(\"get_task\")\r\n if task_data is None:\r\n time.sleep(5)\r\n else:\r\n exe_name = task_data[\"exe_name\"]\r\n print('Working on the task \"{}\"'.format(exe_name))\r\n get_file(exe_name)\r\n Task_Conditional = task_data[\"Task_conditional\"]\r\n print(\"loading\")\r\n t1 = time.time()\r\n task_divider(task_data[\"first_num\"], task_data[\"last_num\"])\r\n t2 = time.time()\r\n print(\"ready {}\".format(t2-t1))", "async def request(self) -> str:\n self._comment = [None] * 3\n await self.addr_conn.conn.segment_scan_completed_event.wait()\n self.comment_known.clear()\n for trh in self.trhs:\n trh.activate()\n await self.comment_known.wait()\n return self.comment", "def _run_notice_event(look_for_work):\n while True:\n try:\n found = look_for_work()\n if not found:\n break\n except ConcurrentUpdate as e:\n # retry if we had a race-condition while claiming work\n sys.stderr.write('Handling ErmrestConcurrentUpdate exception...\\n')\n pass", "def work(self, request):\n raise NotImplementedError", "def notify(self) -> None:\n pass", "def notify(self) -> None:\n pass", "def hook_request_assistance(self, data):\n request_id = data[\"request_id\"]\n log.info(\"NEW request for assistance %s\", request_id)\n volunteers_to_contact = data[\"volunteers\"]\n\n needs = \"\"\n for item in data[\"needs\"]:\n needs += f\"- {item}\\n\"\n\n assistance_request = c.MSG_REQUEST_ANNOUNCEMENT % (data[\"address\"], needs)\n\n for chat_id in volunteers_to_contact:\n if chat_id not in self.updater.persistence.user_data:\n log.debug(\"User %s hasn't added the updater to their contacts, skipping.\", chat_id)\n continue\n\n current_state = self.updater.persistence.user_data[chat_id].get(\"state\", None)\n\n if current_state in [c.State.REQUEST_IN_PROGRESS, c.State.REQUEST_ASSIGNED]:\n log.debug(\"Vol%s is already working on a request, skippint\")\n continue\n\n self.updater.bot.send_message(\n chat_id=chat_id,\n text=assistance_request,\n parse_mode=ParseMode.MARKDOWN,\n reply_markup=ReplyKeyboardMarkup(k.initial_responses, one_time_keyboard=True),\n )\n\n # update this user's state and keep the request_id as well, so we can use it later\n updated_state = {\"state\": c.State.REQUEST_SENT, \"reviewed_request\": request_id}\n self.updater.dispatcher.user_data[chat_id].update(updated_state)\n\n self.updater.dispatcher.bot_data.update({request_id: data})\n self.updater.dispatcher.update_persistence()", "def _future_work_():\n pass", "def feed(self, instruction):\n assert self.future_inst is None, 'BranchUnit fed when full'\n self.future_inst = instruction\n self.future_timer = max(0, instruction.DELAY - 1)", "def run(self):\n while True:\n try:\n target_url = self.TO_PROCESS.get(block=True, timeout=4)\n if target_url[\"url\"].startswith(\"mailto:\"):\n email = target_url[\"url\"][len(\"mailto:\") :]\n self.mailto_links.append(email)\n\n elif target_url[\"url\"] not in self.visited:\n self.visited.add(target_url[\"url\"])\n job = self.pool.submit(\n self.load_url, target_url, self.config.timeout\n )\n job.add_done_callback(self.handle_future)\n except Empty:\n return\n except Exception as e:\n print(e)", "def find_address():\n while True:\n business_object = query_business_name()\n if business_object == \"back\":\n return\n elif business_object is None:\n continue\n\n print(f'{business_object[\"name\"]}\\'s address is:'\n f'{business_object[\"address\"]}, {business_object[\"city\"]} '\n f'{business_object[\"state\"]}')", "def test_lookup_some_pending_some_contacted(self):\n lookup = Lookup(FindValue, self.target, self.node, self.event_loop)\n # Reset in order to manually create the correct state.\n lookup.pending_requests = {}\n lookup.contacted = set()\n self.node.send_find.call_count = 0\n\n # Add a single pending request.\n pending_uuid = str(uuid.uuid4())\n pending_future = asyncio.Future()\n lookup.pending_requests[pending_uuid] = pending_future\n # Add a single contact to the contacted list.\n lookup.contacted.add(lookup.shortlist[0])\n # Sanity check.\n self.assertEqual(1, len(lookup.pending_requests))\n self.assertEqual(1, len(lookup.contacted))\n # Re-run _lookup and check state has been correctly updated.\n lookup._lookup()\n self.assertEqual(ALPHA - 1, self.node.send_find.call_count)\n self.assertEqual(ALPHA, len(lookup.pending_requests))\n self.assertEqual(ALPHA, len(lookup.contacted))", "def jmp(self, addr):\n\n self.reg.ip = addr", "def arp_announce(self):\n pass", "def track_job_to_completion(ip_address, headers, job_id, state):\n\tjob_status_map = {\n\t\t\"2020\": \"Scheduled\",\n\t\t\"2030\": \"Queued\",\n\t\t\"2040\": \"Starting\",\n\t\t\"2050\": \"Running\",\n\t\t\"2060\": \"Completed\",\n\t\t\"2070\": \"Failed\",\n\t\t\"2090\": \"Warning\",\n\t\t\"2080\": \"New\",\n\t\t\"2100\": \"Aborted\",\n\t\t\"2101\": \"Paused\",\n\t\t\"2102\": \"Stopped\",\n\t\t\"2103\": \"Canceled\"\n\t}\n\tstatus_mapping = {\n\t\t\"On\": \"Powered On\",\n\t\t\"Off\": \"Powered Off\",\n\t\t\"Cold Boot\": \"Power Cycle\",\n\t\t\"Warm Boot\": \"Reset\",\n\t\t\"ShutDown\": \"Shutdown\"\n\t}\n\n\tmax_retries = 20\n\tsleep_interval = 30\n\tfailed_job_status = [2070, 2090, 2100, 2101, 2102, 2103]\n\tjob_url = 'https://%s/api/JobService/Jobs(%s)' % (ip_address, job_id)\n\tloop_ctr = 0\n\tjob_incomplete = True\n\tprint(\"Polling %s to completion ...\" % job_id)\n\twhile loop_ctr < max_retries:\n\t\tloop_ctr += 1\n\t\ttime.sleep(sleep_interval)\n\t\tjob_resp = requests.get(job_url, headers=headers, verify=False)\n\t\tif job_resp.status_code == 200:\n\t\t\tjob_status = str((job_resp.json())['LastRunStatus']['Id'])\n\t\t\tjob_status_str = job_status_map[job_status]\n\t\t\tprint(\"Iteration %s: Status of %s is %s\" %\n\t\t\t (loop_ctr, job_id, job_status_str))\n\t\t\tif int(job_status) == 2060:\n\t\t\t\tjob_incomplete = False\n\t\t\t\tprint(\"%s operation successful\" %status_mapping[state])\n\t\t\t\tbreak\n\t\t\telif int(job_status) in failed_job_status:\n\t\t\t\tjob_incomplete = False\n\t\t\t\tif job_status_str == \"Warning\":\n\t\t\t\t\tprint(\"Completed with errors\")\n\t\t\t\telse:\n\t\t\t\t\tprint(\"%s operation failed\" %status_mapping[state])\n\t\t\t\tjob_hist_url = str(job_url) + \"/ExecutionHistories\"\n\t\t\t\tjob_hist_resp = requests.get(job_hist_url, headers=headers, verify=False)\n\t\t\t\tif job_hist_resp.status_code == 200:\n\t\t\t\t\tget_execution_detail(job_hist_resp, headers, job_hist_url)\n\t\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"Unable to poll status of %s - Iteration %s \" % (job_id, loop_ctr))\n\tif job_incomplete:\n\t\tprint(\"Job %s incomplete after polling %s times...Check status\" %\n\t\t (job_id, max_retries))", "def test_forwarder_solicitation_sent(self):\n waittime = 3.0\n self.autoconflayer.start_process()\n # Pass an interest to the autoconfig layer to trigger forwarder solicitation\n interest = Interest(Name('/foo/bar'))\n self.queue_from_higher.put([None, interest])\n\n # Catch all data the autoconfig layer sends downwards for 3 seconds\n deadline = datetime.utcnow() + timedelta(seconds=waittime)\n tolower = []\n while datetime.utcnow() < deadline:\n try:\n data = self.queue_to_lower.get(timeout=waittime/10)\n tolower.append(data)\n except queue.Empty:\n pass\n # Make sure the broadcast face was actually created and get its face id\n bcfid = self.faceidtable.get_or_create_faceid(AddressInfo(('127.255.255.255', 4242), 0))\n self.assertIsNotNone(bcfid)\n # Make sure a forwarder solicitation was sent downwards\n solictiation = Interest(Name('/autoconfig/forwarders'))\n self.assertIn([bcfid, solictiation], tolower)", "def on_iteration(self):\n self.send_pending_requests()\n super().on_iteration()", "def __call__(self):\n hub.sleep(random.randint(1, self.interval))\n while True:\n self.send_req()\n self.reply_pending = True\n hub.sleep(self.interval)\n if self.reply_pending:\n self.no_response()", "def console_request(self, evt, proto):\n if evt.kind == sugar.transport.ServerMsgFactory.TASK_RESPONSE:\n threads.deferToThread(self.on_broadcast_tasks, evt, proto)", "def request_server_address(self, connection):\n address_request = {'type':'sident_verify',\n 'timestamp':calendar.timegm(time.gmtime())}\n self._send_queue.put((address_request, connection))\n return True", "def add_work(self, identifier, work):\n self.works.append((identifier, work))", "def check_in(self):\n etree = self._encapsulate_request(self._generate_ping())\n self.zmq_scheduler_request_queue.put_nowait(etree)", "def jmp_to_addr(self):\n self.pc = self.opcode & 0x0FFF\n logger.info(\"Jumped to address at {}\".format(hex(self.pc)))\n # PC gets incremented after every instruction this counteracts that\n self.pc -= 2", "def announce(self):\n self.notify(self.newAgent)\n if not self.agent.is_someone_subscribed():\n self.fail(cause=\"Noone Interested\")", "def notify(self):\n\n def remind():\n \"\"\"\n this function shows a pop-up using windows notification\n \"\"\"\n ntftion.notify('reminder', f\"{self.notification}:\\n{self.work_name}\\n{self.work_datetime.hour}: \"\n f\"{self.work_datetime.minute} \", app_icon='reminder.ico', timeout=3)\n\n self.eisenhower_priority()\n if self.priority:\n while dt.now().day <= self.time_ntf.day and self.status != \"done\":\n if self.priority == 1 and dt.now().time() >= self.time_ntf.time():\n remind()\n time.sleep(5*60)\n\n elif (self.priority == 2) and ((dt.now().hour == self.time_ntf.hour)\n and (dt.now().time().minute == self.time_ntf.time().minute)):\n remind()\n break\n elif self.priority == 3 and dt.now().time().hour == 18:\n remind()\n time.sleep(24 * 3600)\n elif self.priority == 4 and dt.now().weekday() == 6:\n remind()\n time.sleep(7 * 24 * 3600)\n else:\n pass", "def request() -> None:\n\t_flag.set()", "def Add_to_waitlist(self, email):\n if email not in self.Waitlist:\n self.Waitlist.add(email)\n else:\n raise PreexistingAddressException(email)", "async def request(self):\n # TODO: validate the state\n message = Message(self.name_path)\n await self.issue_command(Command(message))", "def doctest_BackgroundWorkerThread_scheduleNextWork():", "def track_job_to_completion(ip_address, headers, job_id):\n job_status_map = {\n \"2020\": \"Scheduled\",\n \"2030\": \"Queued\",\n \"2040\": \"Starting\",\n \"2050\": \"Running\",\n \"2060\": \"Completed\",\n \"2070\": \"Failed\",\n \"2090\": \"Warning\",\n \"2080\": \"New\",\n \"2100\": \"Aborted\",\n \"2101\": \"Paused\",\n \"2102\": \"Stopped\",\n \"2103\": \"Canceled\"\n }\n\n max_retries = 20\n sleep_interval = 60\n failed_job_status = [2070, 2090, 2100, 2101, 2102, 2103]\n job_url = 'https://%s/api/JobService/Jobs(%s)' % (ip_address, job_id)\n loop_ctr = 0\n job_incomplete = True\n print(\"Polling %s to completion ...\" % job_id)\n while loop_ctr < max_retries:\n loop_ctr += 1\n time.sleep(sleep_interval)\n job_resp = requests.get(job_url, headers=headers, verify=False)\n if job_resp.status_code == 200:\n job_status = str((job_resp.json())['LastRunStatus']['Id'])\n print(\"Iteration %s: Status of %s is %s\" % (loop_ctr, job_id, job_status_map[job_status]))\n if int(job_status) == 2060:\n job_incomplete = False\n print(\"Completed updating firmware successfully ... Exiting\")\n break\n elif int(job_status) in failed_job_status:\n job_incomplete = False\n print(\"Update job failed ... \")\n job_hist_url = str(job_url) + \"/ExecutionHistories\"\n job_hist_resp = requests.get(job_hist_url, headers=headers, verify=False)\n if job_hist_resp.status_code == 200:\n job_history_id = str((job_hist_resp.json())['value'][0]['Id'])\n job_hist_det_url = str(job_hist_url) + \"(\" + job_history_id + \")/ExecutionHistoryDetails\"\n job_hist_det_resp = requests.get(job_hist_det_url,\n headers=headers,\n verify=False)\n if job_hist_det_resp.status_code == 200:\n print(job_hist_det_resp.text)\n else:\n print(\"Unable to parse job execution history .. Exiting\")\n break\n else:\n print(\"Unable to poll status of %s - Iteration %s \" % (job_id, loop_ctr))\n if job_incomplete:\n print(\"Job %s incomplete after polling %s times...Check status\" % (job_id, max_retries))", "async def handle_tr_inform(self, msg, recv):\n\n assert msg is not None\n self.add_behaviour(self.WorkBehaviour())", "def do_work(self):\n raise NotImplementedError", "def doWork(self):\n\n if self.sendNextMessage():\n return\n\n # Load data from the URL. If the primary key is new, add it to the\n # new_data list. We'll sort this list by time afterwards and post\n # the oldest message.\n data = loads(urlopen(self.url).read())\n for donation in data:\n timestamp = strict_rfc3339.rfc3339_to_timestamp(donation['time'] \\\n + \"-07:00\")\n donation['timestamp'] = timestamp\n if donation['pk'] not in self.seen_keys:\n self.new_data.append(donation)\n\n # Re-sort the data by timestamp\n self.new_data = sorted(self.new_data,\n key=lambda donation: donation['timestamp'])\n\n # Special startup code. If seen_keys is empty and we have new data,\n # stuff all but the last /reportlast/ variables into the \"seen_keys\"\n # set, to prevent spamming the channel.\n if len(self.seen_keys) == 0:\n trimmed_list = []\n\n # Only allow recent donations to be posted, and limit the list\n # to /reportlast/ items.\n # If an object is ignored, put its pk in the seen_keys set.\n now = time.time()\n for obj in self.new_data:\n newobj = {}\n newobj['timestamp'] = obj['timestamp']\n newobj['name'] = obj['name'].encode('ascii', 'ignore')\n newobj['amount'] = obj['amount'].encode('ascii', 'ignore')\n newobj['game'] = obj['game'].encode('ascii', 'ignore')\n newobj['total'] = obj['total'].encode('ascii', 'ignore')\n newobj['pk'] = obj['pk']\n if (now - float(obj['timestamp'])) >= self.ignoreolderthan:\n self.seen_keys.add(obj['pk'])\n elif len(trimmed_list) >= self.reportlast:\n self.seen_keys.add(obj['pk'])\n else:\n trimmed_list.append(newobj)\n\n self.new_data = trimmed_list\n print \"We're starting up. Going to announce, in order:\"\n for don in self.new_data:\n print \"Total: $%s Name: %s\" % (don['total'], don['name'])\n \n self.sendNextMessage()", "def born(self, data):\n lc = LoopingCall(get_metrics, None)\n lc.start(2)\n reactor.listenUDP(self.UDP_PORT, NotificationUDPProcessor())\n reactor.listenMulticast(self.MULTICAST_PORT,\n MunticastNotificationProcessor(self.MULTICAST_IP), # add multicast 'born' processing\n listenMultiple=True)\n endpoints.serverFromString(reactor, \"tcp:21999\").listen(EchoFactory())", "def on_nicknameinuse(self, raw_msg, busy_nickname, **kwargs):", "def doctest_BackgroundWorkerThread_getTransactionNote():", "def fetch(self):\r\n if self.wp_op is None: # If we were already doing a list or save, just restart the fetch without changing the operation\r\n self.wp_op = \"fetch\"\r\n self.master.waypoint_request_list_send()", "def notify_solution(self, sol):\n pass # pragma: no cover", "def notify_solution(self, s):\n pass # pragma: no cover", "def request_uplink_info(self, payload):\n\n # This request is received from an agent when it run for the first\n # Send the uplink name (physical port name that connectes compute\n # node and switch fabric),\n agent = payload.get('agent')\n config_res = self.get_agent_configurations(agent)\n LOG.debug('configurations on %(agent)s is %(cfg)s', (\n {'agent': agent, 'cfg': config_res}))\n try:\n self.neutron_event.send_msg_to_agent(agent,\n constants.UPLINK_NAME,\n config_res)\n except (rpc.MessagingTimeout, rpc.RPCException, rpc.RemoteError):\n LOG.error(_LE(\"RPC error: Failed to send uplink name to agent.\"))", "def link(address):", "def worker(self):\n\t\[email protected](\"MISSION_PROPERTY_CHANGED\")\n\t\[email protected](\"DOWNLOAD_EP_COMPLETE\")\n\t\tdef dummy():\n\t\t\t\"\"\"Set the edit flag after mission changed.\"\"\"\n\t\t\tself.edit = True\n\n\t\[email protected](\"WORKER_DONE\")\n\t\tdef dummy():\n\t\t\t\"\"\"Save missions after the thread terminate.\"\"\"\n\t\t\tself.save()\n\n\t\tself.load()\n\t\twhile True:\n\t\t\tself.wait(setting.getint(\"autosave\", 5) * 60)\n\t\t\tself.save()", "def fetch_pending(self):\n pending = self.open(self.urls['pending'])\n soup = BeautifulSoup(pending.read())", "def startUpdateAddressTool(self):\n\n self.iface.mapCanvas().setMapTool(self._updateaddtool)\n self._updateaddtool.setEnabled(True)", "def notify(self, ref_output=None, moves_made=None):\n pass", "def fire_pending_jobs(self, mid: str) -> None:\n self.log.debug(\"Checking for pending jobs on {}\", mid)\n target = PDataContainer(id=mid, host=\"\") # TODO: get a proper target with the hostname\n if self.get_client_protocol(mid) is not None:\n for job in self.jobstore.get_scheduled(target):\n event = type(\"event\", (), {})\n event.jid = job.jid\n event.fun = job.uri\n event.arg = json.loads(job.args)\n threads.deferToThread(self.fire_event, event=event, target=target)", "def waiting_confirmation(self):", "def place_call_offhold(self) -> None:", "def afterWork(self):\n pass", "def connection_responded(self):\n complete = False\n self.workersResponded += 1\n if self.workersResponded == len(self.equipment_model.get_addr_list()):\n complete = True\n self.parent.connection_responded(complete)", "async def view_receive_addresses(w):\n title = \"Proof Wallet: View Receive Addresses\"\n start = 0\n N = 10\n while True:\n external = w.deriveaddresses(start, start + N - 1, 0)\n internal = w.deriveaddresses(start, start + N - 1, 1)\n\n # display receive addreses\n addr_str = \"Derivation | Receive Address\\n\"\n for i, addr in enumerate(external):\n addr_str += f\"m/0/{str(i + start)} | \"\n addr_str += f\"{color_text(addr, GREEN_COLOR, fg)}\\n\"\n\n # display change addreses\n addr_str += f\"\\nDerivation | Change Address\\n\"\n for i, addr in enumerate(internal):\n addr_str += f\"m/1/{str(i + start)} | \"\n addr_str += f\"{color_text(addr, YELLOW_COLOR, fg)}\\n\"\n\n msg = f\"\"\"{title}\n\nAddresses {start} to {start + N - 1}\n\n{addr_str}\n\nControls\n'n' -- Next {N} addresses\n'p' -- Previous {N} addresses\n'x' -- Go back to wallet menu\n\"\"\"\n ch = await ux_show_story(msg, ['n', 'p', 'x'])\n if ch == 'n':\n start = start + N\n elif ch == 'p' and start > 0:\n start = start - N\n elif ch == 'x':\n return", "def _send_and_response(self, addr, msg):\n self._namefixer(msg)\n return send_and_receive(addr, msg, 30) # manual timeout !!!!! fix it!", "def doWork():\n #rVal = True\n rc = 0\n printInfo()\n \n filler = getFillerData()\n #debug( \"doWork(): filler = \" + filler )\n requestServerFile( filler )\n\n return rc", "def on_notify(self, name):\r\n pass", "def _wait_what(self, expected):\r\n \r\n self._msg_server(cb.WAITWHATSERVER % (expected))", "def get_worker_addresses(self) -> List[str]:", "def resolve(self, address):", "def send_req(self):\n self.n_send_req += 1", "def run(self):\n self.sync_state()\n self.periodic_resync()\n self.lease_relay.start()\n self.notifications.run_dispatch(self)", "def ping(self):\n pass", "def notify_of_availability(\n section, src_email: str, src_pass: str, dest_email: str\n) -> None:\n send_email(\n src_email,\n src_pass,\n dest_email,\n f\"{section['courseReferenceNumber']} OPENED.\",\n f\"{section['subjectCourse']} {section['sequenceNumber']} - CRN: {section['courseReferenceNumber']}\",\n )\n\n print(section[\"subjectCourse\"], section[\"sequenceNumber\"])", "def doctest_BackgroundWorkerThread_forSite():", "async def request(self, ctx: commands.Context, *, feature):\n msg = \"Request from {}:\\n\".format(ctx.author.mention) + feature\n await ctx.bot.pm_owner(content=msg)\n await ctx.message.add_reaction(\"✅\")", "async def request(self) -> str:\n self._name = [None] * 2\n await self.addr_conn.conn.segment_scan_completed_event.wait()\n self.name_known.clear()\n for trh in self.trhs:\n trh.activate()\n await self.name_known.wait()\n return self.name", "def pending(self, pending):\n\n self._pending = pending", "def __work__(self):\n while not self.is_done:\n self.refreshSignal.emit()\n time.sleep(0.05)", "def notify(self, almost):\n self.message += \\\n '------------------ ALMOST EXPIRED ------------------\\n'\n for lo in almost:\n self.message += 'NOTIFIED :: ' + lo.borrower.user.username\n self.message += '\\n'\n notif = Notification(wallet=lo.borrower,\n message_short=\"You have a pending\"\n \" loan which dues tomorrow\",\n message_large=\"You have borrowed \" +\n str(lo.loaned) + \" from \" +\n lo.offer.lender.user.username + \", if you \" +\n \"don't pay by this time tomorrow you will \" +\n \"be banned\")\n notif.save()", "async def pin(ctx):\n if ctx.guild.id == 856613891227910194:\n if ctx.message.reference:\n message = await ctx.channel.fetch_message(ctx.message.reference.resolved.id)\n await message.pin(reason=f'Requested by {ctx.author}')\n else:\n await r(ctx, \"Please reply to a message to do that. Example: https://acatia.needs.rest/kqh6kwe1h9a\")\n else:\n await r(ctx, \"Cannot do that in this server. Sorry.\")", "def connectionMade(self):\n print \"connection received from\", self.addr", "def event_loop(self):\n while self.ack is False:\n gevent.sleep(self.loop_interval)\n output_service = self.get_directory_service_proxy().get_service(\"mock-output-service\")\n output_service.put(\"test-worker-work-result\")\n self.ack = True", "def this_needs_work_test_ensure_our_presence(self):\n self.do_test_ensure_our_presence()", "def run(self):\r\n print \"*Ping* We've got a message!\"\r\n # Handle DNS request\r\n resolver = Resolver(self.caching, self.ttl)\r\n aliasRecords = []\r\n addressRecords = []\r\n # Read and resolve the questions one-by-one\r\n questions = self.request.questions\r\n for question in questions:\r\n hostname = question.qname\r\n (hostname, aliases, addresses) = resolver.gethostbyname(hostname)\r\n \r\n for alias in aliases:\r\n aliasData = dns.resource.RecordData.create(Type.CNAME, alias)\r\n aliasRecord = dns.resource.ResourceRecord(hostname, Type.CNAME, Class.IN, 9001, aliasData) # TODO fix ttl\r\n aliasRecords.append(aliasRecord)\r\n for address in addresses:\r\n addressData = dns.resource.RecordData.create(Type.A, address)\r\n addressRecord = dns.resource.ResourceRecord(hostname, Type.A, Class.IN, 9001, addressData)\r\n addressRecords.append(addressRecord)\r\n \r\n # Crafting of the response\r\n respHeader = self.request.header\r\n respHeader.qr = 1\r\n respHeader.qd_count = 0\r\n respHeader.an_count = 1\r\n \r\n respMessage = dns.message.Message(respHeader, [], addressRecords + aliasRecords, [], [])\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n respMessageByte = respMessage.to_bytes()\r\n sock.sendto(respMessageByte, self.clientAddr)\r\n print \"Ended request: \" + hostname\r\n sock.close()", "def busy(self, flag, message=\"\"): \n return None", "async def pending(self, ctx):\r\n if ctx.guild.id == 445092370006933505:\r\n data = self.config.guild(ctx.guild)\r\n lst = await data.get_raw('neededlist')\r\n description = \"\"\r\n coach = await data.coachid()\r\n coach_role = ctx.guild.get_role(coach)\r\n x = ctx.author.top_role\r\n if x >= coach_role:\r\n for member in lst:\r\n userobj = ctx.guild.get_member(int(member))\r\n description += (str(userobj.mention) + '\\n')\r\n embed = discord.Embed(color=0xFFFF00, title='Coaching Needed by following people', description=description)\r\n embed.set_footer(text=credit)\r\n await ctx.send(embed=embed)\r\n await ctx.send('Type \"{0}coaching done @<player name>\" if the player has been coached or type \"{0}coaching info <@playername>\" to view the details submitted by the user'.format(ctx.prefix))\r\n \r\n else:\r\n await ctx.send(\"You are not allowed to do that\")\r\n\r\n else:\r\n await ctx.send(\"This command only works in the Legend eSports server, join us at: https://discord.gg/GGuCXDn\")", "def send_email(self, new_address):\n s = smtplib.SMTP('smtp.gmail.com:587')\n s.starttls()\n s.login(from_address, password)\n email = MIMEText(\"Received a request for ION-X information from:\\n{}\"\n .format(new_address))\n email['To'] = to_address\n email['From'] = from_address\n email['Subject'] = \"Website Request Received\"\n s.sendmail(from_address, to_address, email.as_string())\n s.quit()", "def send_notification(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n m1 = Members(\"Richard\", \"Blackmore\", \"14-04-1945\", \"Weston\")\n s1.send_notification(\"Please return book\")\n self.assertEqual(m1.get_notifications(), None)\n s1.add_resource(b1)\n s1.lending_process(b1, m1)\n s1.send_notification(\"Please return book\")\n self.assertEqual(m1.get_notifications(), \"-Please return boo- \")", "def set_work_socket(self):\n self.analysis_id = uuid.uuid4().hex\n\n def do_set_work_socket(aw):\n aw.work_socket = cellprofiler_core.constants.worker.the_zmq_context.socket(\n zmq.REQ\n )\n aw.work_socket.connect(self.work_addr)\n aw.work_request_address = self.work_addr\n aw.current_analysis_id = self.analysis_id\n\n self.awthread.execute(do_set_work_socket, self.awthread.aw)", "def ack(self, other, city):\n pass", "def test_lookup_none_pending_all_contacted(self):\n lookup = Lookup(FindValue, self.target, self.node, self.event_loop)\n # Put the lookup object in the state to test.\n lookup.pending_requests = {}\n for contact in lookup.shortlist:\n lookup.contacted.add(contact)\n self.node.send_find.call_count = 0\n # Re-run _lookup and test\n lookup._lookup()\n self.assertEqual(self.node.send_find.call_count, 0)", "def pending_address_changes(self, pending_address_changes):\n\n self._pending_address_changes = pending_address_changes", "def test_answer_foreground(network):\n bfq.ipOwners().answer()", "def _wait_for_ip(name, session):\n start_time = datetime.now()\n status = None\n while status is None:\n status = get_vm_ip(name, session)\n if status is not None:\n # ignore APIPA address\n if status.startswith(\"169\"):\n status = None\n check_time = datetime.now()\n delta = check_time - start_time\n log.debug(\n \"Waited %s seconds for %s to report ip address...\", delta.seconds, name\n )\n if delta.seconds > 180:\n log.warning(\"Timeout getting IP address\")\n break\n time.sleep(5)", "def acqstart(self):\n return 0" ]
[ "0.5951986", "0.5589441", "0.5423254", "0.53269106", "0.52976644", "0.52254903", "0.52201986", "0.520611", "0.52030164", "0.5180417", "0.51489556", "0.5138724", "0.51302093", "0.5128945", "0.510327", "0.50935763", "0.50749195", "0.5074746", "0.5072681", "0.505735", "0.50519097", "0.5027512", "0.5027512", "0.501978", "0.5019467", "0.50015664", "0.4992609", "0.49729005", "0.4969123", "0.4968418", "0.4959904", "0.49581507", "0.4947928", "0.49453285", "0.4936511", "0.493598", "0.49311548", "0.4909373", "0.49065053", "0.4904727", "0.49038684", "0.49016428", "0.48988852", "0.48904854", "0.48818907", "0.487352", "0.48731318", "0.48608157", "0.48497558", "0.48281807", "0.4827905", "0.48151377", "0.48141044", "0.48115134", "0.48107803", "0.48105854", "0.48069933", "0.4806037", "0.4803094", "0.47853416", "0.47846377", "0.47840396", "0.47791132", "0.47785813", "0.4776523", "0.47736192", "0.47697192", "0.47676826", "0.47664395", "0.47650436", "0.47641018", "0.4750793", "0.47487336", "0.47475642", "0.4742245", "0.47401986", "0.47361302", "0.473353", "0.472175", "0.47152793", "0.4712901", "0.4710034", "0.47018698", "0.46960327", "0.46879256", "0.46725935", "0.46711788", "0.46708286", "0.46494818", "0.46483594", "0.46385798", "0.463642", "0.46357864", "0.46338573", "0.4633265", "0.46262184", "0.46103296", "0.46087894", "0.4607908", "0.46053365" ]
0.6637995
0
Get an appropriately initialized measurements structure for the good pipeline
def get_measurements_for_good_pipeline(nimages=1, group_numbers=None): import cellprofiler_core path = os.path.abspath( os.path.join( os.path.dirname(cellprofiler_core.__file__), "..", "tests/data/ExampleSBSImages", ) ) # path = os.path.join(tests.modules.example_images_directory(), "ExampleSBSImages") m = cellprofiler_core.measurement.Measurements() if group_numbers is None: group_numbers = [1] * nimages group_indexes = [1] last_group_number = group_numbers[0] group_index = 1 for group_number in group_numbers: if group_number == last_group_number: group_index += 1 else: group_index = 1 group_indexes.append(group_index) for i in range(1, nimages + 1): filename = "Channel2-%02d-%s-%02d.tif" % ( i, "ABCDEFGH"[int((i - 1) / 12)], ((i - 1) % 12) + 1, ) url = cellprofiler_core.utilities.pathname.pathname2url( os.path.join(path, filename) ) m[ cellprofiler_core.constants.measurement.IMAGE, cellprofiler_core.constants.measurement.C_FILE_NAME + "_DNA", i, ] = filename m[ cellprofiler_core.constants.measurement.IMAGE, cellprofiler_core.constants.measurement.C_PATH_NAME + "_DNA", i, ] = path m[ cellprofiler_core.constants.measurement.IMAGE, cellprofiler_core.constants.measurement.C_URL + "_DNA", i, ] = url m[ cellprofiler_core.constants.measurement.IMAGE, cellprofiler_core.constants.measurement.GROUP_NUMBER, i, ] = group_numbers[i - 1] m[ cellprofiler_core.constants.measurement.IMAGE, cellprofiler_core.constants.measurement.GROUP_INDEX, i, ] = group_indexes[i - 1] jblob = javabridge.run_script( """ importPackage(Packages.org.cellprofiler.imageset); importPackage(Packages.org.cellprofiler.imageset.filter); var imageFile=new ImageFile(new java.net.URI(url)); var imageFileDetails = new ImageFileDetails(imageFile); var imageSeries=new ImageSeries(imageFile, 0); var imageSeriesDetails = new ImageSeriesDetails(imageSeries, imageFileDetails); var imagePlane=new ImagePlane(imageSeries, 0, ImagePlane.ALWAYS_MONOCHROME); var ipd = new ImagePlaneDetails(imagePlane, imageSeriesDetails); var stack = ImagePlaneDetailsStack.makeMonochromeStack(ipd); var stacks = java.util.Collections.singletonList(stack); var keys = java.util.Collections.singletonList(imageNumber); var imageSet = new ImageSet(stacks, keys); imageSet.compress(java.util.Collections.singletonList("DNA"), null); """, dict(url=url, imageNumber=str(i)), ) blob = javabridge.get_env().get_byte_array_elements(jblob) m[ cellprofiler_core.constants.measurement.IMAGE, cellprofiler_core.modules.namesandtypes.M_IMAGE_SET, i, blob.dtype, ] = blob pipeline = cellprofiler_core.pipeline.Pipeline() pipeline.loadtxt(six.moves.StringIO(GOOD_PIPELINE)) pipeline.write_pipeline_measurement(m) return m
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def measurements(self) -> NONEARRAY:\n pass", "def getMeasures():", "def extract_specs(self):\n vDeflection_unit = \"lcd-info.{}.conversion-set.conversion.force.scaling.unit.unit\".format(\n self.channel_numbers[\"vDeflection\"])\n self.units[\"vDeflection\"] = self.general[vDeflection_unit]\n\n height_unit = \"lcd-info.{}.conversion-set.conversion.nominal.scaling.unit.unit\".format(\n self.channel_numbers[\"height\"])\n self.units[\"height\"] = self.general[height_unit]", "def get_measurement_data(self) -> MeasurementData:\n result = MeasurementDataStructure()\n Utils.check(VL53L1X_C_LIBRARY.VL53L1_GetRangingMeasurementData(self.dev, byref(result)))\n return MeasurementData(result)", "def test_get_measure_parameters(self):\n pass", "def __init__(self,\n samples,\n names = None,\n percentgel = Q_(1.0,'(g/(100 mL))*100'), # grams agarose/100 mL buffer * 100\n electrfield = Q_(5.0, 'V/cm'),\n temperature = Q_(295.15,'K'),\n gel_len = Q_(8,'cm'),\n wellx = Q_(7,'mm'),\n welly = Q_(2,'mm'),\n wellz = Q_(1,'mm'), # mm ######################### ??? ###\n wellsep = Q_(2,'mm') # mm\n ):\n \n self.samples = samples # assumes len(DNA) in bp #\n self.names = names if names else ['lane'+str(i) for i in #\n xrange(1, len(samples)+1)] #\n self.percent = to_units(percentgel, '(g/(100 mL))*100', 'percentgel') # agarose percentage\n self.field = to_units(electrfield, 'V/cm', 'electrfield') # electric field intensity\n self.temperature = to_units(temperature, 'K', 'temperature') # absolute temperature\n self.gel_len = to_units(gel_len, 'cm', 'gel_len') # lane length\n self.wellx = to_units(wellx, 'mm', 'wellx') # well width\n self.welly = to_units(welly, 'mm', 'welly') # well height\n self.wellz = (to_units(wellz, 'mm', 'wellz') if wellz is not None\n else wellz) # well depth\n self.wellsep = to_units(wellsep, 'mm', 'wellsep') # separation between wells\n # Volumes\n wellVol = self.wellx * self.welly * self.wellz\n wellVol.ito('ul')\n defaulVol = 0.85 * wellVol\n volumes = []\n for sample in self.samples:\n vol = sample.volume\n if not np.isnan(vol) and vol is not None:\n volumes.append(vol)\n else:\n volumes.append(defaulVol)\n self.volumes = to_units(volumes, 'uL', 'volumes')\n # Quantities\n defaulQty = Q_(150,'ng')\n self.quantities = assign_quantitiesB(self.samples, defaulQty)\n #self.quantities = assign_quantities(self.samples, quantities, defaulQty)\n self.runtime = np.nan ##########\n self.freesol_mob = None\n self.mobilities = []\n self.distances = []\n self.bandwidths0 = []\n self.bandwidthsI = []\n self.bandwidths = []\n self.intensities = []\n self.DNAspace_for_mu0 = logspace_int(100, 3000, 10)*ureg.bp # exponential space of DNA sizes\n self.DNAspace_for_vWBRfit = np.linspace(100, 50000, 100)*ureg.bp\n self.Tvals_for_mu0 = []\n self.H2Oviscosity = None\n self.accel_to_plateau = None\n self.equil_to_accel = None\n self.Zimm_to_Rouse = None\n self.poresize = None\n self.poresize_fit = None\n self.vWBR_muS = None\n self.vWBR_muL = None\n self.vWBR_gamma = None", "def __init__(self, parent):\n \n #60 32 bit integers are recorded for the amplifier sample time index \n self.sample_time_index = []\n for i in range(60):\n sample_time = np.int32(struct.unpack('i', parent.rhd.read(4)))[0]\n self.sample_time_index.append(sample_time)\n\n #Amplifier voltages for each channel\n self.electrode_traces = {}#key: channel name value: voltage trce\n for amp in parent._AMPLIFIER_CHANNELS:\n electrode_voltage_trace = []\n #60 samples per channel, int16\n for i in range(60):\n electrode_voltage = np.uint16(struct.unpack('H', parent.rhd.read(2)))[0]\n electrode_voltage_trace.append(electrode_voltage)\n self.electrode_traces[amp] = electrode_voltage_trace \n\n #Get voltage from Aux input channels\n self.auxilary_traces = {}\n for aux in parent._AUX_CHANNELS:\n aux_voltage_trace = []\n #15 samples per channel, int16\n for i in range(15):\n aux_voltage = np.uint16(struct.unpack('H', parent.rhd.read(2)))[0]\n aux_voltage_trace.append(aux_voltage)\n self.auxilary_traces[aux] = aux_voltage_trace \n\n #get voltage from supply voltage channels\n self.supply_voltages = {}\n for sup in parent._SUPPLY_VOLTAGE_CHANNELS:\n sup_voltage_list = []\n for i in range(1):\n sup_voltage = np.uint16(struct.unpack('H', parent.rhd.read(2)))[0]\n sup_voltage_list.append(sup_voltage)\n self.supply_voltages[sup] = sup_voltage_list \n\n #get voltage from temerature sensor channels\n self.temerature_sensor_readings = {}\n for n in range(parent._TEMP_SENSORS):\n temp_list = []\n for i in range(1):\n temperature = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n temp_list.append(temperature)\n self.temerature_sensor_readings[n] = temp_list \n\n #Get voltage ADC inputs\n self.board_adc_input_voltages = {}\n for adc in parent._ADC_INPUT_CHANNELS:\n adc_input_list = []\n for i in range(60):\n adc_input = np.uint16(struct.unpack('H', parent.rhd.read(2)))[0]\n adc_input_list.append(adc_input)\n self.board_adc_input_voltages[adc] = adc_input_list \n\n #Get digital input values\n self.board_digital_inputs = {}\n for dig in parent._DIGITAL_INPUT_CHANNELS :\n digital_input_list = []\n for i in range(60):\n digital_input = np.uint16(struct.unpack('H', parent.rhd.read(2)))[0]\n digital_input_list.append(digital_input)\n self.board_digital_inputs[dig.native_channel_name] = digital_input_list", "def get_data():\n \n data = {\n 'loadAvg1Min': 0, #load average 1 min\n 'loadAvg5Min': 0, #load average 5 min\n 'loadAvg15Min': 0, #load average 15 min\n 'cpuUsage': [], #usage distribution for each cpu\n 'memUsage': {}, #memory usage \n 'networkReads': [], #network reads per second for each interface\n 'networkWrites': [], #network writes per second for each interface\n 'diskReads': [], #disk reads per second for each disk\n 'diskWrites': [] #disk writes per second for each disk\n }\n \n #metrics that doesnt need sampling\n data['loadAvg1Min'], data['loadAvg5Min'], data['loadAvg15Min'] = get_load_avg() #get load avg\n data['memUsage'].update(get_mem_usage()) #memory usage\n \n #metrics that needs sampling\n #they are written as a generator so that we can sleep before collection again\n sampling_duration = 1\n cpu_usage_gen = get_cpu_usage(sampling_duration) #generator for cpu usage\n net_rw_gen = get_net_rw(sampling_duration) #generator for network read write\n disk_rw_gen = get_disk_rw(sampling_duration) #generator for disk read write\n \n while 1: #now start sampling, whenever we have walid data, we can exit the loop\n cpu_usage = next(cpu_usage_gen)\n net_rw = next(net_rw_gen)\n disk_rw = next(disk_rw_gen)\n \n if cpu_usage or net_rw or disk_rw: #we have valid data\n break\n \n time.sleep(sampling_duration)\n \n #append cpu usage for each cpu core\n for cpu, usage in cpu_usage.items():\n data['cpuUsage'].append({'name': cpu, 'value': usage})\n \n #append network read and write for each interface\n for interface, rw in net_rw.items():\n data['networkReads'].append({'name': interface, 'value': rw['reads']})\n data['networkWrites'].append({'name': interface, 'value': rw['writes']}) \n \n #append disk read and write for each logical disk\n for device, rw in disk_rw.items():\n data['diskReads'].append({'name': device, 'value': rw['reads']})\n data['diskWrites'].append({'name': device, 'value': rw['writes']})\n \n return data", "def get_measure_par(input_object):\r\n input_object.measurement_strategy = ui.measurement_strategy.currentIndex()\r\n input_object.len_total = ui.total_length.text()\r\n input_object.frequency = ui.frequency.text()\r\n input_object.num_of_mea = ui.num_of_mea.text()\r\n input_object.len_step = ui.length_step.text()\r\n input_object.time_step = ui.time_step.text()\r\n input_object.temperature = ui.temperature.text()\r\n input_object.humidity = ui.humidity.text()\r\n input_object.na_average_factor = ui.na_average_facotr.value()\r\n input_object.multi_measure = ui.multi_measure.value()\r\n if ui.NA_state.text().strip() != '':\r\n input_object.na_state = ui.NA_state.text().strip()\r\n else:\r\n input_object.na_state = None", "def __init__(self):\n self.state_dim = 12\n self.measurement_dim = 6", "def __init__(self, measure):\n self.measure = measure # Dictionary of the measurement steps\n self.devices = {} # Dictionary holding all the devices\n self.output_devices = [] # List of devices with output capabilities\n self.daqs = {} # Dictionary that holds for each daq the inputs and outputs.\n self.rotation_stages = [] # If there are rotation stages present, they will show up in this list.\n # This short block is going to become useful in the future, when interfacing with a GUI\n for d in self.measure:\n setattr(self, d, self.measure[d])", "def init(self):\n imageDim = u.getDimImage(self.length, 0, 0, 78) # 54.5, 42.3, 66.17\n self.imageInfo['ratio'] = u.getRatio(self.imageInfo['shape'],\n imageDim)\n\n self.measuring = pymeasuring.Measuring(self.imageInfo, self.length)\n\n # rospy.loginfo(\"dims of image [mm]: \" + str(imageDim))\n # rospy.loginfo(\"ratios [mm/px]: \" + str(self.imageInfo['ratio']))\n # rospy.loginfo(\"shape [px]: \" + str(self.imageInfo['shape']))\n rospy.loginfo('init of measuring object is complete.')", "def create_data_structures(self):\n # Data storage arrays for time and measurement\n # Create the array of zeros and preallocating\n start_time = time.time()\n # The number of data points has to be optimized\n self.data_points = 5000\n # prs_data has three rows, 0 = time, 1 = pressure - tare, 2 = raw_pressure\n self.prs_data = np.zeros([3, self.data_points])\n self.prs_data[0, :] = start_time\n # This queue receives data from the sensors and puts it in the graphs and sends to the \n # LifoQueue\n self.prs_q = Queue()\n # The lifo queue is created to send the data to the piston control thread. The piston\n # control will only read and use the last value, since only the most recent information\n # matters\n self.prs_lifo_q = LifoQueue()\n self.prs_tare = 0\n \n self.flw_data = np.zeros([3, self.data_points])\n self.flw_data[0, :] = start_time\n self.flw_q = Queue()\n self.flw_lifo_q = LifoQueue() # Read comment on the lifoqueue above\n self.flw_tare = 0\n\n self.vol_lifo_q = LifoQueue() # Read comment on the lifoqueue above\n self.vol_data = np.zeros([2, self.data_points])\n self.vol_data[0, :] = start_time", "async def prepare_measuring(self):\n # close MDV,\n # fill the constant tank, start the measurement pumps.\n self.valves[MEASURING_DRAIN_VALVE].close()\n await self.tanks[CONSTANT_TANK].fill(keep_source_active=True)\n self.pumps[PUMP_MEASURING_TANK].stop()\n self.scale.start()\n\n self._state = self.PREPARING_MEASUREMENT", "def get_measurements(self):\n metrics = {}\n for key in self.fields.keys():\n metrics[key] = []\n # What's in output:\n # proc_pid date virt res shrd cpu mem power gpus_power\n while not self.queue.empty():\n data = self.queue.get().strip().split()\n for field in self.fields:\n tp = self.fields[field]['type']\n idx = self.fields[field]['index']\n count = self.fields[field]['count']\n if count == -1:\n metrics[field].append(ResourceMonitor.str_to_type(data[idx], tp))\n elif count == 0:\n metrics[field].append([ResourceMonitor.str_to_type(data[idx], tp)])\n else:\n metrics[field].append([\n ResourceMonitor.str_to_type(data[index], tp) for index in xrange(idx, idx+count)\n ])\n return metrics", "def _make_meta(self):\n available_meas_times = list()\n available_intervals = list()\n drill_by = list()\n related = list()\n last_data_set_instance = dict()\n\n if self._data['report_save_historical_instances_ind'] == 'Y':\n # last measurement instance\n res = self._db.Query(\"\"\"SELECT *\n FROM report_data_set_instance\n WHERE\n `element_id`=%s\n AND `segment_value_id` = %s\n ORDER BY measurement_time DESC\n LIMIT 0, 1\"\"\",(self._id, self._segment_value_id))\n if res:\n last_data_set_instance = self._db.record[0]\n last_data_set_instance['measurement_time'] = self._formatter.format_date(last_data_set_instance['measurement_time'])\n\n # available measurement instances\n res = self._db.Query(\"\"\"SELECT *\n FROM report_data_set_instance\n WHERE\n `element_id`=%s\n AND `segment_value_id` = %s\n ORDER BY measurement_time DESC\"\"\",(self._id, self._segment_value_id))\n if res:\n for data_set_instance in self._db.record:\n data_set_instance['measurement_time'] = self._formatter.format_date(data_set_instance['measurement_time'])\n available_meas_times.append(data_set_instance)\n \n\n # get drill by. not for this version\n\n # available measurement intervals\n if self._data['report_primary_shared_dimension_id'] is None:\n self._data['report_primary_shared_dimension_id'] = 0\n\n self._db.Query(\"\"\"\n SELECT measurement_interval.*,\n dashboard_element.element_id\n FROM dashboard_element\n LEFT JOIN measurement_interval\n ON measurement_interval.measurement_interval_id = dashboard_element.measurement_interval_id\n WHERE\n (dashboard_element.`element_id`<>%s\n AND dashboard_element.measurement_interval_id <> %s\n AND dashboard_element.shared_measure_id = %s\n AND dashboard_element.`type` = 'internal report'\n AND ifnull(dashboard_element.report_used_for_drill_to_ind,'N') = %s\n AND ifnull(dashboard_element.report_primary_shared_dimension_id,0) = %s\n AND ifnull(dashboard_element.segment_id,0) = %s)\n OR\n dashboard_element.`element_id`=%s\n AND 3=4\n \n GROUP BY measurement_interval.measurement_interval_id\n ORDER BY\n measurement_interval.display_sequence,\n dashboard_element.name ASC\n \"\"\",\n (self._id,\n self._data['measurement_interval_id'],\n self._data['shared_measure_id'],\n self._data['report_used_for_drill_to_ind'],\n self._data['report_primary_shared_dimension_id'],\n self._data['segment_id'],\n self._id))\n\n\n for interval in self._db.record:\n interval['report_data_set_instance_id'] = 0\n available_intervals.append(interval)\n\n # see related\n self._db.Query(\"\"\"SELECT e.*\n FROM dashboard_element_topic det, dashboard_element e\n WHERE e.element_id = det.dashboard_element_id\n AND dashboard_element_id <> %s\n AND e.enabled_ind = 'Y'\n AND topic_id IN (select topic_id from dashboard_element_topic where dashboard_element_id = %s)\n UNION SELECT e.*\n FROM dashboard_element e, metric_drill_to_report m\n WHERE m.metric_element_id = e.element_id\n AND m.report_element_id = %s\n AND e.enabled_ind = 'Y'\n AND ifnull(e.segment_id,0) = %s\n \"\"\", (self._id, self._id, self._id, self._data['segment_id']))\n \n\n for related_element in self._db.record:\n if not related_element['segment_id']:\n related_element['segment_id'] = 0\n if related_element['segment_id'] == self._data['segment_id']:\n related_element['segment_value_id'] = self._segment_value_id\n else:\n related_element['segment_value_id'] = 0\n related.append(related_element)\n\n # elements displayed on the page\n before_dataset = list()\n after_dataset = list()\n \n charts_before_dataset = list()\n charts_after_dataset = list()\n \n \n # dataset table\n dataset_el = OrderedDict()\n dataset_el['element_id'] = ''\n dataset_el['element_type'] = 'dataset'\n dataset_el['element_name'] = ''\n dataset_el['element_desc'] = ''\n dataset_el['placement'] = ''\n dataset_el['sequence'] = 0\n dataset_el['show_ind'] = self._data['show_data_set_table_in_report_ind']\n \n \n # charts\n self._db.Query(\"\"\"SELECT *\n FROM report_data_set_chart \n WHERE \n `element_id`= %s\n AND \n (ISNULL(report_data_set_pivot_id)\n OR report_data_set_pivot_id = 0) \n ORDER BY display_sequence ASC\"\"\", (self._id, ))\n for chart in self._db.record:\n chart_el = OrderedDict()\n chart_el['element_id'] = chart['report_data_set_chart_id']\n chart_el['element_type'] = 'chart'\n chart_el['pivot_id'] = 0\n if chart['report_data_set_pivot_id']:\n chart_el['pivot_id'] = chart['report_data_set_pivot_id']\n chart_el['element_name'] = chart['name']\n chart_el['element_desc'] = chart['description']\n chart_el['placement'] = chart['chart_placement']\n chart_el['sequence'] = chart['display_sequence']\n chart_el['show_ind'] = chart['enabled_ind']\n if chart_el['placement'] == 'before table': \n charts_before_dataset.append(chart_el)\n else:\n charts_after_dataset.append(chart_el)\n \n # pivots\n self._db.Query(\"\"\"SELECT *\n FROM report_data_set_pivot\n WHERE\n `element_id`= %s\n ORDER BY display_sequence ASC\"\"\", (self._id, ))\n for pivot in self._db.record:\n before_pivot = list()\n after_pivot = list()\n #pivot_element = list()\n \n pivot_el = OrderedDict()\n pivot_el['element_id'] = pivot['report_data_set_pivot_id']\n pivot_el['element_type'] = 'pivot'\n pivot_el['element_name'] = pivot['name']\n pivot_el['element_desc'] = ''\n pivot_el['placement'] = pivot['pivot_table_report_placement']\n pivot_el['sequence'] = pivot['display_sequence']\n pivot_el['show_ind'] = pivot['enabled_ind']\n \n # charts\n self._db.Query(\"\"\"SELECT *\n FROM report_data_set_chart \n WHERE \n `element_id`= %s\n AND report_data_set_pivot_id = %s \n ORDER BY display_sequence ASC\"\"\",\n (self._id, pivot_el['element_id']))\n for chart in self._db.record:\n chart_el = OrderedDict()\n chart_el['element_id'] = chart['report_data_set_chart_id']\n chart_el['element_type'] = 'chart'\n chart_el['pivot_id'] = 0\n if chart['report_data_set_pivot_id']:\n chart_el['pivot_id'] = chart['report_data_set_pivot_id']\n chart_el['element_name'] = chart['name']\n chart_el['element_desc'] = chart['description']\n chart_el['placement'] = chart['chart_placement']\n chart_el['sequence'] = chart['display_sequence']\n chart_el['show_ind'] = chart['enabled_ind']\n if chart_el['placement'] == 'before table': \n before_pivot.append(chart_el)\n else:\n after_pivot.append(chart_el)\n pivot_element = before_pivot + [pivot_el] + after_pivot \n \n if pivot_el['placement'] == 'before data set':\n before_dataset += pivot_element\n else:\n after_dataset += pivot_element\n elements = charts_before_dataset + before_dataset + [dataset_el] + after_dataset + charts_after_dataset\n \n \n self._jfile.make_current_meta(last_data_set_instance,\n available_meas_times,\n available_intervals,\n drill_by,\n related,\n elements,\n self._segment_values)", "def _build_parsed_values(self):\n\n SENSOR = \"Sensor\"\n TYPE = \"type\"\n ID = \"id\"\n PCB_SERIAL_NUMBER = \"PCBSerialNum\"\n ASSEMBLY_NUMBER = \"AssemblyNum\"\n SERIAL_NUMBER = \"SerialNumber\"\n FIRMWARE_VERSION = \"FirmwareVersion\"\n FIRMWARE_DATE = \"FirmwareDate\"\n COMMAND_SET_VERSION = \"CommandSetVersion\"\n PCB_ASSEMBLY = \"PCBAssembly\"\n MANUFACTURE_DATE = \"MfgDate\"\n INTERNAL_SENSORS = \"InternalSensors\"\n TEMPERATURE_SENSOR_ID = \"Main Temperature\"\n CONDUCTIVITY_SENSOR_ID = \"Main Conductivity\"\n PRESSURE_SENSOR_ID = \"Main Pressure\"\n EXTERNAL_SENSORS = \"ExternalSensors\"\n VOLT0 = \"volt 0\"\n VOLT1 = \"volt 1\"\n\n # check to make sure there is a correct match before continuing\n match = SBE19HardwareParticle.regex_compiled().match(self.raw_data)\n if not match:\n raise SampleException(\"No regex match of parsed hardware data: [%s]\" %\n self.raw_data)\n\n dom = parseString(self.raw_data)\n root = dom.documentElement\n log.debug(\"root.tagName = %s\", root.tagName)\n serial_number = root.getAttribute(SERIAL_NUMBER)\n\n firmware_version = self._extract_xml_element_value(root, FIRMWARE_VERSION)\n firmware_date = self._extract_xml_element_value(root, FIRMWARE_DATE)\n command_set_version = self._extract_xml_element_value(root, COMMAND_SET_VERSION)\n manufacture_date = self._extract_xml_element_value(root, MANUFACTURE_DATE)\n\n pcb_assembly_elements = self._extract_xml_elements(root, PCB_ASSEMBLY)\n pcb_serial_number = []\n pcb_assembly = []\n for assembly in pcb_assembly_elements:\n pcb_serial_number.append(assembly.getAttribute(PCB_SERIAL_NUMBER))\n pcb_assembly.append(assembly.getAttribute(ASSEMBLY_NUMBER))\n\n temperature_sensor_serial_number = \"\"\n conductivity_sensor_serial_number = \"\"\n pressure_sensor_serial_number = \"\"\n pressure_sensor_type = \"\"\n volt0_serial_number = 0\n volt0_type = \"\"\n volt1_serial_number = 0\n volt1_type = \"\"\n\n internal_sensors_element = self._extract_xml_elements(root, INTERNAL_SENSORS)[0]\n sensors = self._extract_xml_elements(internal_sensors_element, SENSOR)\n\n for sensor in sensors:\n sensor_id = sensor.getAttribute(ID)\n if sensor_id == TEMPERATURE_SENSOR_ID:\n temperature_sensor_serial_number = self._extract_xml_element_value(sensor, SERIAL_NUMBER)\n elif sensor_id == CONDUCTIVITY_SENSOR_ID:\n conductivity_sensor_serial_number = self._extract_xml_element_value(sensor, SERIAL_NUMBER)\n elif sensor_id == PRESSURE_SENSOR_ID:\n pressure_sensor_serial_number = self._extract_xml_element_value(sensor, SERIAL_NUMBER)\n pressure_sensor_type = self._extract_xml_element_value(sensor, TYPE)\n\n external_sensors_element = self._extract_xml_elements(root, EXTERNAL_SENSORS)[0]\n sensors = self._extract_xml_elements(external_sensors_element, SENSOR)\n\n for sensor in sensors:\n sensor_id = sensor.getAttribute(ID)\n if sensor_id == VOLT0:\n volt0_serial_number = self._extract_xml_element_value(sensor, SERIAL_NUMBER)\n volt0_type = self._extract_xml_element_value(sensor, TYPE)\n elif sensor_id == VOLT1:\n volt1_serial_number = self._extract_xml_element_value(sensor, SERIAL_NUMBER)\n volt1_type = self._extract_xml_element_value(sensor, TYPE)\n\n result = [{DataParticleKey.VALUE_ID: SBE19HardwareParticleKey.SERIAL_NUMBER,\n DataParticleKey.VALUE: serial_number},\n {DataParticleKey.VALUE_ID: SBE19HardwareParticleKey.FIRMWARE_VERSION,\n DataParticleKey.VALUE: firmware_version},\n {DataParticleKey.VALUE_ID: SBE19HardwareParticleKey.FIRMWARE_DATE,\n DataParticleKey.VALUE: firmware_date},\n {DataParticleKey.VALUE_ID: SBE19HardwareParticleKey.COMMAND_SET_VERSION,\n DataParticleKey.VALUE: command_set_version},\n {DataParticleKey.VALUE_ID: SBE19HardwareParticleKey.MANUFACTURE_DATE,\n DataParticleKey.VALUE: manufacture_date},\n {DataParticleKey.VALUE_ID: SBE19HardwareParticleKey.PCB_SERIAL_NUMBER,\n DataParticleKey.VALUE: pcb_serial_number},\n {DataParticleKey.VALUE_ID: SBE19HardwareParticleKey.ASSEMBLY_NUMBER,\n DataParticleKey.VALUE: pcb_assembly},\n {DataParticleKey.VALUE_ID: SBE19HardwareParticleKey.TEMPERATURE_SENSOR_SERIAL_NUMBER,\n DataParticleKey.VALUE: temperature_sensor_serial_number},\n {DataParticleKey.VALUE_ID: SBE19HardwareParticleKey.CONDUCTIVITY_SENSOR_SERIAL_NUMBER,\n DataParticleKey.VALUE: conductivity_sensor_serial_number},\n {DataParticleKey.VALUE_ID: SBE19HardwareParticleKey.PRESSURE_SENSOR_SERIAL_NUMBER,\n DataParticleKey.VALUE: pressure_sensor_serial_number},\n {DataParticleKey.VALUE_ID: SBE19HardwareParticleKey.PRESSURE_SENSOR_TYPE,\n DataParticleKey.VALUE: pressure_sensor_type},\n {DataParticleKey.VALUE_ID: SBE19HardwareParticleKey.VOLT0_SERIAL_NUMBER,\n DataParticleKey.VALUE: volt0_serial_number},\n {DataParticleKey.VALUE_ID: SBE19HardwareParticleKey.VOLT0_TYPE,\n DataParticleKey.VALUE: volt0_type},\n {DataParticleKey.VALUE_ID: SBE19HardwareParticleKey.VOLT1_SERIAL_NUMBER,\n DataParticleKey.VALUE: volt1_serial_number},\n {DataParticleKey.VALUE_ID: SBE19HardwareParticleKey.VOLT1_TYPE,\n DataParticleKey.VALUE: volt1_type}]\n\n return result", "def _config_measurements(self, spec, period):\r\n logging.info(\"Config measurement for spec {0}\".format(spec))\r\n \r\n eq = self._get_equipment()\r\n\r\n measurements=[[],[],[]]\r\n \r\n mplane_param2value={}\r\n for k in spec.parameter_names():\r\n v = spec.get_parameter_value(k)\r\n if isinstance(v,float):\r\n v = \"{:.0f}\".format(v)\r\n else:\r\n v = str(v)\r\n mplane_param2value[k] = v\r\n \r\n for meas_type in sorted(self._meas[\"types\"].keys()):\r\n (meas,add2)=self._add_or_update_measurement(eq,meas_type,mplane_param2value,period)\r\n measurements[add2].append(meas)\r\n \r\n return measurements", "def get_measurements(self, pipeline, object_name, category):\n result = self.get_object_measurements(pipeline, object_name, category,\n {self.object_name.value: [] })\n return result", "def pipeline_test_data(self):\n if self.linearity:\n Detector1Pipeline.call(self.ramp_file, save_results=True, output_dir=self.output_dir, output_use_model=True,\n steps={'ipc': {'skip': True},\n 'rscd': {'skip': True},\n 'lastframe': {'save_results': True,\n 'output_dir': self.output_dir},\n 'dark_current': {'save_results': True,\n 'output_dir': self.output_dir},\n #'linearity': {'skip': True},\n 'jump': {'save_results': True,\n 'output_dir': self.output_dir}})\n else:\n Detector1Pipeline.call(self.ramp_file, save_results=True, output_dir=self.output_dir, output_use_model=True,\n steps={'ipc': {'skip': True},\n 'rscd': {'skip': True},\n 'lastframe': {'save_results': True,\n 'output_dir': self.output_dir},\n 'dark_current': {'save_results': True,\n 'output_dir': self.output_dir},\n 'linearity': {'skip': True},\n 'jump': {'save_results': True,\n 'output_dir': self.output_dir}})\n\n self.pre_dark_file = os.path.join(self.output_dir, 'step_lastframe.fits')\n self.post_dark_file = os.path.join(self.output_dir, 'step_dark_current.fits')\n self.jump_file = os.path.join(self.output_dir, 'step_jump.fits')\n self.rate_file = os.path.join(self.output_dir, 'step_rate.fits')", "def measure_program(self, channels: Iterable[str]) -> Dict[str, numpy.ndarray]:", "def measure_dict():\n out = base_dict()\n out['mro']['current'] = ['Measure']\n out['name']['current'] = 'Measure'\n ao(out, 'nSamples', 'Integer', 1, readLevel=3)\n ao(out, 'id', 'String', 'Conversion source ID', readLevel=3)\n ao(out, 'uid', 'String', 'Unique ID', readLevel=5)\n ao(out, 'date', 'Date', '00:00:00 01/01/2000', name='Test date')\n ao(out, 'zerotime', 'Float', name='Acquisition starting time', readLevel=4)\n ao(out, 'elapsed', 'Float', name='Test duration', unit='second')\n ao(out, 'operator', 'String', name='Operator')\n return out", "def __init__(self):\n self.hmd = None\n self.vr_render_models = None\n self.render_width = 0\n self.render_height = 0", "def __init__(self):\n super().__init__()\n self.dmdParams = {} # dmd settings container\n self.printTag = 'DMD' # print tag\n self._dynamicHandling = True # This ROM is able to manage the time-series on its own. No need for special treatment outside\n self.pivotParameterID = None # pivot parameter\n # variables filled up in the training stages\n self._amplitudes = {} # {'target1': vector of amplitudes,'target2':vector of amplitudes, etc.}\n self._eigs = {} # {'target1': vector of eigenvalues,'target2':vector of eigenvalues, etc.}\n self._modes = {} # {'target1': matrix of dynamic modes,'target2':matrix of dynamic modes, etc.}\n self.__Atilde = {} # {'target1': matrix of lowrank operator from the SVD,'target2':matrix of lowrank operator from the SVD, etc.}\n self.pivotValues = None # pivot values (e.g. time)\n self.KDTreeFinder = None # kdtree weighting model\n self.timeScales = {} # time-scales (training and dmd). {'training' and 'dmd':{t0:float,'dt':float,'intervals':int}}\n self.featureVals = None # feature values", "def test_dimensions(self):\n\t\t\n\t\t# default\t\n\t\tdetails = self.watcher.describe()\n\t\tprint(details)\n\t\t\n\t\t# default\t\n\t\tdetails = self.watcher.describe(layers=[self.first_layer])\n\t\tprint(details)\n\t\tN = details.N.to_numpy()[0]\n\t\tM = details.M.to_numpy()[0]\n\t\trf = details.rf.to_numpy()[0]\n\t\tnum_evals = details.num_evals.to_numpy()[0]\n\t\tprint(N,M,rf,num_evals)\n\t\t\n\t\tself.assertEqual(N,64)\n\t\tself.assertEqual(M,3)\n\t\tself.assertEqual(rf,9)\n\t\tself.assertEqual(num_evals,M*rf)", "def __init__(self, data_path):\n self.perf_data = dill.load(open(data_path, 'rb'))\n #print(self.perf_data[0])\n print(len(self.perf_data))\n self.length = len(self.perf_data)\n\n # perform a few pre-processing steps\n for i in range(self.length):\n # store the length of the pitch contours for use later\n self.perf_data[i]['length'] = len(\n self.perf_data[i]['pitch_contour'])\n # store the length of the pitch contours for use later\n self.perf_data[i]['pitch_contour'] = self.normalize_pitch_contour(\n self.perf_data[i]['pitch_contour'])\n print(self.perf_data[0])", "def __init__(self, name=\"\", description=\"\", time_units=\"s\", len_units=\"m\",\n pump_units=\"m3/s\"):\n\n # Set general info\n self._type = 1 # pumping well id\n self.parameters = {'full': True,\n 'rw': 1.,\n 'd': 0.,\n 'l': 1.}\n self.time_units = time_units\n self.len_units = len_units\n self.pump_units = pump_units\n\n # Create pumping well data\n self.pumprate = _Data(dtype=0, name=name, description=description)\n self.pumprate.set_units(self.time_units, self.pump_units)\n\n # Set observation wells and piezometers\n self.wells = []", "def getMeasure(unique_name):", "def getMeasure(unique_name):", "def __init__(self):\r\n\r\n super(Metallized, self).__init__()\r\n\r\n # Initialize public scalar attributes.\r\n self.spec_sheet = 0\r\n if self.hazard_rate_type < 3: # MIL-HDBK-217\r\n self.reference_temperature = 358.0", "def _initialise_sufficient_statistics(self):\n stats = super()._initialise_sufficient_statistics()\n\n stats['B'] = {\n 'numer': [\n np.zeros((self.n_states, self.n_features[i]))\n for i in range(self.n_emissions)\n ],\n 'denom': [\n np.zeros((self.n_states, self.n_features[i]))\n for i in range(self.n_emissions)\n ],\n }\n\n return stats", "def extract_data(self):\n values = {}\n for injkey in self.data_sets.keys():\n values[injkey] = {}\n alldata = self.data_sets[injkey]\n paramkeys = alldata['params'].keys()\n for datakey in alldata.keys():\n if not datakey == 'params':\n values[injkey][datakey] = {}\n values[injkey][datakey]['metric_val'] = {}\n values[injkey][datakey]['metric_val']['vals'] = []\n for paramkey in paramkeys:\n values[injkey][datakey][paramkey] = {}\n values[injkey][datakey][paramkey]['vals'] = []\n trials = alldata[datakey]\n for trial_num in trials.keys():\n trial = trials[trial_num]\n values[injkey][datakey]['metric_val']['vals'] \\\n .append(trial['metric_val'])\n values[injkey][datakey]['metric_val']['type'] \\\n = trial['metric']\n values[injkey][datakey]['metric_val']['units'] \\\n = 'dimensionless'\n param_vals = trial['params']\n for param_name in param_vals.keys():\n val, units = self.parse_pint_string(\n pint_string=param_vals[param_name]\n )\n values[injkey][datakey][param_name]['vals'] \\\n .append(float(val))\n values[injkey][datakey][param_name]['units'] \\\n = units\n self.values = values", "def __init__(self):\n self.eps = 1e-5\n self.use_global_stats = True\n self.workspace = 512\n self.units = (3, 4, 23, 3) # use for 101\n self.filter_list = [256, 512, 1024, 2048]", "def get_measurement_data(self, measurement_path: pathlib.Path) -> Data:\n # Load settings\n with open(measurement_path / self.name_meta, 'r') as f:\n meta = yaml.load(f, Loader=yaml.FullLoader)\n # Load frog image\n frog_image = imageio.imread(measurement_path / self.name_frog)\n data = Data(frog_image, meta)\n # Load configuration\n #with open(measurement_path / self.name_config, 'r') as f:\n # config = yaml.load(f, Loader=yaml.FullLoader)\n return data", "def _build_parsed_values(self):\n\n SERIAL_NUMBER = \"SerialNumber\"\n CALIBRATION = \"Calibration\"\n ID = \"id\"\n TEMPERATURE_SENSOR_ID = \"Main Temperature\"\n CONDUCTIVITY_SENSOR_ID = \"Main Conductivity\"\n PRESSURE_SENSOR_ID = \"Main Pressure\"\n VOLT0 = \"Volt 0\"\n VOLT1 = \"Volt 1\"\n VOLT2 = \"Volt 2\"\n VOLT3 = \"Volt 3\"\n VOLT4 = \"Volt 4\"\n VOLT5 = \"Volt 5\"\n EXTERNAL_FREQUENCY_CHANNEL = \"external frequency channel\"\n\n # check to make sure there is a correct match before continuing\n match = SBE19CalibrationParticle.regex_compiled().match(self.raw_data)\n if not match:\n raise SampleException(\"No regex match of parsed calibration data: [%s]\" %\n self.raw_data)\n\n dom = parseString(self.raw_data)\n root = dom.documentElement\n log.debug(\"root.tagName = %s\", root.tagName)\n serial_number = root.getAttribute(SERIAL_NUMBER)\n result = [{DataParticleKey.VALUE_ID: SBE19CalibrationParticleKey.SERIAL_NUMBER,\n DataParticleKey.VALUE: serial_number}]\n\n calibration_elements = self._extract_xml_elements(root, CALIBRATION)\n for calibration in calibration_elements:\n id_attr = calibration.getAttribute(ID)\n if id_attr == TEMPERATURE_SENSOR_ID:\n result.append(\n self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.TEMP_SENSOR_SERIAL_NUMBER, str))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.TEMP_CAL_DATE, str))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.TA0))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.TA1))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.TA2))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.TA3))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.TOFFSET))\n elif id_attr == CONDUCTIVITY_SENSOR_ID:\n result.append(\n self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.COND_SENSOR_SERIAL_NUMBER, str))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.COND_CAL_DATE, str))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.CONDG))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.CONDH))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.CONDI))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.CONDJ))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.CPCOR))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.CTCOR))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.CSLOPE))\n elif id_attr == PRESSURE_SENSOR_ID:\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PRES_SERIAL_NUMBER, str))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PRES_CAL_DATE, str))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PA0))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PA1))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PA2))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PTCA0))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PTCA1))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PTCA2))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PTCB0))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PTCB1))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PTCB2))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PTEMPA0))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PTEMPA1))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PTEMPA2))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.POFFSET))\n result.append(\n self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PRES_RANGE, self.float_to_int))\n elif id_attr == VOLT0:\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.EXT_VOLT0_OFFSET))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.EXT_VOLT0_SLOPE))\n elif id_attr == VOLT1:\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.EXT_VOLT1_OFFSET))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.EXT_VOLT1_SLOPE))\n elif id_attr == VOLT2:\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.EXT_VOLT2_OFFSET))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.EXT_VOLT2_SLOPE))\n elif id_attr == VOLT3:\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.EXT_VOLT3_OFFSET))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.EXT_VOLT3_SLOPE))\n elif id_attr == VOLT4:\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.EXT_VOLT4_OFFSET))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.EXT_VOLT4_SLOPE))\n elif id_attr == VOLT5:\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.EXT_VOLT5_OFFSET))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.EXT_VOLT5_SLOPE))\n elif id_attr == EXTERNAL_FREQUENCY_CHANNEL:\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.EXT_FREQ))\n\n return result", "def test_initialise(self):\n # Make sure the variables are all updated\n assert isinstance(gcmc_system_sampler.context, Context)\n assert isinstance(gcmc_system_sampler.positions, Quantity)\n assert isinstance(gcmc_system_sampler.simulation_box, Quantity)\n\n return None", "def readWaveform(self):\n # prepare data holder\n y = [ 0 for j in range(4) ]\n # in case of previous errors\n self.flushInput()\n for ch in self.chs:\n # mostly for TDS\n self.setCh(ch)\n # calibration factor we will need soon\n (vmult, voff) = self.calibV()\n # read and calibrate data\n data = (numpy.array(self.readData()) - voff) * vmult\n # This is from the formula in TDS manual, without the\n # \"vzero\" in it---I couldn't figure out when that wouldn't\n # be exactly zero.\n y[ch-1]=data[:]\n\n (hstep, hoff) = self.calibH()\n # initialize time array\n t = numpy.array(range(len(y[0])))\n t = (t * hstep) + hoff\n\n # update the sequence number (... for isUpdated())\n self.seq = self.readSeq()\n\n return (t, y)", "def measurements(self):\n return self.config['measurements']", "def prep(self):\n print\n print 'Filtering rawdata to data as masked array...'\n# using 0 as flag\n# self.data = n.ma.masked_array(self.rawdata[:self.nints,:, self.chans,:], self.rawdata[:self.nints,:, self.chans,:] == 0j)\n# using standard flags\n self.data = n.ma.masked_array(self.rawdata[:self.nints,:, self.chans,:], self.flags[:self.nints,:, self.chans,:] == 0) # mask of True for flagged data (flags=0 in tpipe, which is flags=False in Miriad and flags=True in MS)\n self.dataph = (self.data.mean(axis=3).mean(axis=1)).real #dataph is summed and detected to form TP beam at phase center, multi-pol\n self.min = self.dataph.min()\n self.max = self.dataph.max()\n print 'Shape of data:'\n print self.data.shape\n print 'Dataph min, max:'\n print self.min, self.max\n\n self.freq = self.freq_orig[self.chans]\n\n # set up ur tracks (lol)\n self.dmtrack0 = {}\n self.twidths = {}\n for dmbin in xrange(len(self.dmarr)):\n self.dmtrack0[dmbin] = self.dmtrack(self.dmarr[dmbin],0) # track crosses high-freq channel in first integration\n self.twidths[dmbin] = 0\n for k in self.dmtrack0[dmbin][1]:\n self.twidths[dmbin] = max(self.twidths[dmbin], len(n.where(n.array(self.dmtrack0[dmbin][1]) == k)[0]))\n\n print 'Track width in time: '\n for dmbin in self.twidths:\n print 'DM=%.1f, twidth=%d. Iteration could step by %d/2.' % (self.dmarr[dmbin], self.twidths[dmbin], self.twidths[dmbin])", "def __init__(self):\r\n # sample ID -> (ref individual count,\r\n # {size -> (estimate, std err, ci_low, ci_high)})\r\n self._data = {}", "def make_test_data(self):\r\n\r\n \r\n\r\n print (\"Creating Test Sample:\")\r\n\r\n print (' Period, rate, reps, phases: ', self.period, self.framerate, self.nrepetitions, self.nPhases)\r\n\r\n nframes = int(self.period * self.framerate * self.nrepetitions)\r\n\r\n print (' nframes: ', nframes)\r\n\r\n if self.bkgdNoise > 0.:\r\n\r\n d = np.random.normal(size=(nframes,self.imageSize[0],self.imageSize[1]),\r\n\r\n loc=self.bkgdIntensity, scale=self.bkgdNoise).astype('float32')\r\n\r\n else:\r\n\r\n d = self.bkgdIntensity*np.ones((nframes,self.imageSize[0],self.imageSize[1])).astype('float32')\r\n\r\n \r\n\r\n ds = d.shape\r\n\r\n print (' data shape: ', ds)\r\n\r\n dx = int(ds[2]/4)\r\n\r\n xc = int(ds[2]/2)\r\n\r\n xo = [xc-dx, xc+dx]\r\n\r\n ywidth = int(ds[2]/(self.nPhases+2))\r\n\r\n framedelay = 4\r\n\r\n\r\n\r\n if not self.mode:\r\n\r\n self.phasex = []\r\n\r\n self.phasey = []\r\n\r\n for i in range(0,self.nPhases):\r\n\r\n dy = int((i+1)*ds[2]/(self.nPhases+2)) # each phase is assigned to a region\r\n\r\n self.resp = np.zeros((nframes,))\r\n\r\n self.resp = np.cos(\r\n\r\n np.linspace(0, 2.0*np.pi*nframes/(self.period*self.framerate), nframes-framedelay)+i*np.pi/8 - np.pi/2.0)\r\n\r\n self.resp = np.concatenate((np.zeros(framedelay), self.resp))\r\n\r\n d[:, xo[0]:xo[1], dy:dy+ywidth ] += self.resp[:, np.newaxis, np.newaxis]\r\n\r\n self.phasey.append( (2+(dy+int(ds[2]/self.nPhases))/2))\r\n\r\n self.phasex.append((6+int(ds[1]/2)/2)) # make the signal equivalent of digitized one (baseline 3000, signal at 1e-4 of baseline)\r\n\r\n else:\r\n\r\n self.nPhases = 4\r\n\r\n self.spotsize = 16\r\n\r\n nrpts = 20\r\n\r\n nsites = 4\r\n\r\n one_rep = int(self.period*self.framerate)\r\n\r\n isi = int(self.period*self.framerate/self.nPhases)\r\n\r\n print('period, isi: ', self.period, isi)\r\n\r\n r = np.arange(0, nrpts, 1.)\r\n\r\n alpha = 4.\r\n\r\n A = r/alpha *np.exp(-(r-alpha)/alpha) # scaled alpha function\r\n\r\n self.spot= self.gauss_spot(self.spotsize, 3.) # the 2d spot\r\n\r\n sigsize = np.random.normal(size=self.nPhases, loc=self.signal_size, scale=self.signal_size*2)\r\n\r\n sigsize = [np.abs(s) for s in sigsize] # restrict to positive amplitudes\r\n\r\n print ('sigsize: ', sigsize)\r\n\r\n for j in range(self.nrepetitions):\r\n\r\n for i in range(self.nPhases):\r\n\r\n self.resp = np.zeros((nrpts, self.spot.shape[0], self.spot.shape[1]))\r\n\r\n for k in range(nrpts):\r\n\r\n self.resp[k,:,:] += sigsize[i]*A[k] * self.spot # make response an alpha time course of gaussian spot\r\n\r\n start = j*one_rep + i*isi + framedelay\r\n\r\n stop = start + nrpts\r\n\r\n dy = int((i+1)*ds[2]/(self.nPhases+2)) # location for phase\r\n\r\n #dy = dy + 2*z\r\n\r\n# print ('start, stop: ', start, stop)\r\n\r\n for z in range(nsites):\r\n\r\n #self.resp = np.concatenate((np.zeros(framedelay), self.resp))\r\n\r\n xp = xo[0] + i*10 - 10*z\r\n\r\n yp = dy - i*10 + 10*z\r\n\r\n d[start:stop, xp:xp+self.spotsize, yp:yp+self.spotsize ] += self.resp\r\n\r\n self.imageData = d # reduce to a 16-bit map to match camera data type\r\n\r\n self.nFrames = self.imageData.shape[0]\r\n\r\n self.times = np.arange(0, nframes/self.framerate, 1.0/self.framerate)\r\n\r\n print( \" Test Image Created\")\r\n\r\n # imv = pg.ImageView()\r\n\r\n # imv.show()\r\n\r\n # imv.setImage(self.imageData)\r\n\r\n\r\n\r\n if self.layout is not None:\r\n\r\n self.layout.addWidget(imv, 0, 0)\r\n\r\n\r\n\r\n avgImage = np.mean(self.imageData, axis=0)\r\n\r\n ima = pg.ImageView()\r\n\r\n ima.setImage(avgImage)\r\n\r\n self.layout.addWidget(ima, 0, 1)\r\n\r\n self.adjust_image_data()\r\n\r\n self.avgimg = np.mean(self.imageData, axis=0) # get mean image for reference later: average across all time\r\n\r\n print (' Test file, original Image Info: ')\r\n\r\n self.print_image_info()\r\n\r\n self.rebin_image()\r\n\r\n #self.clean_windowerrors()\r\n\r\n # pg.image(self.imageData)\r\n\r\n # pg.show()\r\n\r\n # mpl.figure(1)\r\n\r\n # mpl.show()\r\n\r\n if not self.mode: # FFT analysis\r\n\r\n self.analysis_fourier_map(target=1, mode=0)\r\n\r\n self.plot_maps(mode=2, gfilter=self.gfilter)\r\n\r\n else:\r\n\r\n self.analysis_dFF_map()\r\n\r\n mpl.show()", "def _init_sample(self):\n self.timestamps = np.zeros(5)\n self.data = np.zeros((5, 12))", "def get_space(): \n space = {\n 'timesteps_per_batch': hp.choice('timesteps_per_batch', [512, 1024, 2048, 4096, 8192]),\n 'vf_stepsize': hp.loguniform('vf_stepsize', -5, -2),\n 'max_kl' : hp.loguniform('max_kl', -2.5, -0.5),\n 'gamma': hp.uniform('gamma', (1-(1/((10**(-1))*4))), (1-(1/((10**(1.5))*4)))), #4:T. Remember to change this if code is altered. -1:T/tau. tau=0.04=dt\n 'lam': hp.uniform('lam', (1-(1/((10**(-1))*4))), (1-(1/((10**(1.5))*4)))) #4:T. Remember to change this if code is altered. -1:T/tau. tau=0.04=dt\n }\n return space", "def __init__(self, name=\"\", description=\"\", time_units=\"s\", len_units=\"m\"):\n\n # Set general info\n self._type = 2 # observation well id\n self.time_units = time_units\n self.len_units = len_units\n\n self.parameters = {'full': True, # is full penetrating?\n 'r': 1., # radius, distance until pumping well in length units\n 'd': 0., # depth of well screen (from top) in length units\n 'l': 1.} # depth of well bottom in length units\n\n # Create drawdown data\n self.drawdown = _Data(dtype=1, name=name, description=description)\n self.drawdown.set_units(self.time_units, self.len_units)\n\n # Set results from models\n self.data = []", "def _measure():\n return {\n 'type' : 'class',\n 'name' : 'measure',\n 'base' : None,\n 'is_abstract' : False,\n 'doc' : None,\n 'properties' : [\n ('description', 'str', '0.1', None),\n ('identification', 'str', '0.1', None),\n ('name', 'str', '0.1', None),\n ],\n 'decodings' : [\n ('description', 'child::cim:measureDescription'),\n ('identification', 'child::cim:measureIdentification/gmd:code/gco:CharacterString'),\n ('name', 'child::cim:nameOfMeasure'),\n\n # Hacks due to DKRZ misimplementation.\n ('description', 'parent::cim:report/child::gmd:measureDescription/gco:CharacterString'),\n ('name', 'parent::cim:report/child::gmd:nameOfMeasure/gco:CharacterString'),\n ]\n }", "def __init__(self, num_cycles_index1=None, num_cycles_index2=None, num_cycles_read1=None, num_cycles_read2=None, num_lanes=None, num_reads=None, num_surfaces=None, num_swaths_per_lane=None, num_tiles_per_swath=None, error_rate=None, error_rate_r1=None, error_rate_r2=None, intensity_cycle1=None, is_indexed=None, max_cycle_called=None, max_cycle_extracted=None, max_cycle_scored=None, min_cycle_called=None, min_cycle_extracted=None, min_cycle_scored=None, non_indexed_error_rate=None, non_indexed_intensity_cycle1=None, non_indexed_percent_aligned=None, non_indexed_percent_gt_q30=None, non_indexed_projected_total_yield=None, non_indexed_yield_total=None, percent_aligned=None, percent_gt_q30=None, percent_gt_q30_last10_cycles=None, percent_gt_q30_r1=None, percent_gt_q30_r2=None, percent_pf=None, percent_resynthesis=None, phasing_r1=None, phasing_r2=None, pre_phasing_r1=None, pre_phasing_r2=None, projected_total_yield=None, reads_pf_total=None, reads_total=None, yield_total=None, clusters=None, clusters_pf=None, cluster_density=None, occupancy=None, percent_loading_concentration=None, local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._num_cycles_index1 = None\n self._num_cycles_index2 = None\n self._num_cycles_read1 = None\n self._num_cycles_read2 = None\n self._num_lanes = None\n self._num_reads = None\n self._num_surfaces = None\n self._num_swaths_per_lane = None\n self._num_tiles_per_swath = None\n self._error_rate = None\n self._error_rate_r1 = None\n self._error_rate_r2 = None\n self._intensity_cycle1 = None\n self._is_indexed = None\n self._max_cycle_called = None\n self._max_cycle_extracted = None\n self._max_cycle_scored = None\n self._min_cycle_called = None\n self._min_cycle_extracted = None\n self._min_cycle_scored = None\n self._non_indexed_error_rate = None\n self._non_indexed_intensity_cycle1 = None\n self._non_indexed_percent_aligned = None\n self._non_indexed_percent_gt_q30 = None\n self._non_indexed_projected_total_yield = None\n self._non_indexed_yield_total = None\n self._percent_aligned = None\n self._percent_gt_q30 = None\n self._percent_gt_q30_last10_cycles = None\n self._percent_gt_q30_r1 = None\n self._percent_gt_q30_r2 = None\n self._percent_pf = None\n self._percent_resynthesis = None\n self._phasing_r1 = None\n self._phasing_r2 = None\n self._pre_phasing_r1 = None\n self._pre_phasing_r2 = None\n self._projected_total_yield = None\n self._reads_pf_total = None\n self._reads_total = None\n self._yield_total = None\n self._clusters = None\n self._clusters_pf = None\n self._cluster_density = None\n self._occupancy = None\n self._percent_loading_concentration = None\n self.discriminator = None\n\n self.num_cycles_index1 = num_cycles_index1\n self.num_cycles_index2 = num_cycles_index2\n self.num_cycles_read1 = num_cycles_read1\n self.num_cycles_read2 = num_cycles_read2\n self.num_lanes = num_lanes\n self.num_reads = num_reads\n self.num_surfaces = num_surfaces\n self.num_swaths_per_lane = num_swaths_per_lane\n self.num_tiles_per_swath = num_tiles_per_swath\n if error_rate is not None:\n self.error_rate = error_rate\n if error_rate_r1 is not None:\n self.error_rate_r1 = error_rate_r1\n if error_rate_r2 is not None:\n self.error_rate_r2 = error_rate_r2\n if intensity_cycle1 is not None:\n self.intensity_cycle1 = intensity_cycle1\n if is_indexed is not None:\n self.is_indexed = is_indexed\n if max_cycle_called is not None:\n self.max_cycle_called = max_cycle_called\n if max_cycle_extracted is not None:\n self.max_cycle_extracted = max_cycle_extracted\n if max_cycle_scored is not None:\n self.max_cycle_scored = max_cycle_scored\n if min_cycle_called is not None:\n self.min_cycle_called = min_cycle_called\n if min_cycle_extracted is not None:\n self.min_cycle_extracted = min_cycle_extracted\n if min_cycle_scored is not None:\n self.min_cycle_scored = min_cycle_scored\n if non_indexed_error_rate is not None:\n self.non_indexed_error_rate = non_indexed_error_rate\n if non_indexed_intensity_cycle1 is not None:\n self.non_indexed_intensity_cycle1 = non_indexed_intensity_cycle1\n if non_indexed_percent_aligned is not None:\n self.non_indexed_percent_aligned = non_indexed_percent_aligned\n if non_indexed_percent_gt_q30 is not None:\n self.non_indexed_percent_gt_q30 = non_indexed_percent_gt_q30\n if non_indexed_projected_total_yield is not None:\n self.non_indexed_projected_total_yield = non_indexed_projected_total_yield\n if non_indexed_yield_total is not None:\n self.non_indexed_yield_total = non_indexed_yield_total\n if percent_aligned is not None:\n self.percent_aligned = percent_aligned\n if percent_gt_q30 is not None:\n self.percent_gt_q30 = percent_gt_q30\n if percent_gt_q30_last10_cycles is not None:\n self.percent_gt_q30_last10_cycles = percent_gt_q30_last10_cycles\n if percent_gt_q30_r1 is not None:\n self.percent_gt_q30_r1 = percent_gt_q30_r1\n if percent_gt_q30_r2 is not None:\n self.percent_gt_q30_r2 = percent_gt_q30_r2\n if percent_pf is not None:\n self.percent_pf = percent_pf\n if percent_resynthesis is not None:\n self.percent_resynthesis = percent_resynthesis\n if phasing_r1 is not None:\n self.phasing_r1 = phasing_r1\n if phasing_r2 is not None:\n self.phasing_r2 = phasing_r2\n if pre_phasing_r1 is not None:\n self.pre_phasing_r1 = pre_phasing_r1\n if pre_phasing_r2 is not None:\n self.pre_phasing_r2 = pre_phasing_r2\n if projected_total_yield is not None:\n self.projected_total_yield = projected_total_yield\n if reads_pf_total is not None:\n self.reads_pf_total = reads_pf_total\n if reads_total is not None:\n self.reads_total = reads_total\n if yield_total is not None:\n self.yield_total = yield_total\n if clusters is not None:\n self.clusters = clusters\n if clusters_pf is not None:\n self.clusters_pf = clusters_pf\n if cluster_density is not None:\n self.cluster_density = cluster_density\n if occupancy is not None:\n self.occupancy = occupancy\n if percent_loading_concentration is not None:\n self.percent_loading_concentration = percent_loading_concentration", "def _get_data(self) -> dict:\n LOGGER.debug(f\"Setting data property for {self.dirname}\")\n data = {}\n for axis in range(1, 4):\n # Subsample by 8 since this does not vary quickly\n data[f\"aoatter{axis}\"] = (\n self.tlm[f\"aoatter{axis}\"].vals[::ATT_ERR_SUBSAMP].astype(np.float32)\n )\n data[\"aokalstr\"] = self.tlm[\"aokalstr\"].vals\n # fmt: off\n data[\"npnt_kalm\"] = (\n (self.tlm[\"aopcadmd\"].vals == \"NPNT\")\n & (self.tlm[\"aoacaseq\"].vals == \"KALM\")\n )\n # fmt: on\n for slot in range(8):\n data[f\"aca_track{slot}\"] = self.tlm[f\"aoacfct{slot}\"].vals == \"TRAK\"\n data[f\"aca_ir{slot}\"] = self.tlm[f\"aoaciir{slot}\"].vals == \"ERR\"\n data[\"times\"] = self.tlm[\"aokalstr\"].times\n data[\"perigee_times\"] = self.tlm.perigee_times.astype(np.float32)\n data[\"perigee\"] = self.perigee.date\n data[\"rad_entry\"] = self.rad_entry.date\n data[\"rad_exit\"] = self.rad_exit.date\n data[\"obss\"] = self.obss.as_array()\n\n return data", "def build_data(self):\n return self.mean, self.sigma", "def _collect_params(self) -> np.ndarray:\n res = np.array([0.]*(self.dimensions))\n res[0] = self.model.rbf.variance\n res[1:-1] = self.model.rbf.lengthscale\n res[-1] = self.model.Gaussian_noise.variance\n return res", "def getMeasures(unique_name=None):", "def measure(self, lastMeasure=None, m=None):\n if m is None:\n m = {}\n m['_time'] = time.time()\n if lastMeasure is not None:\n m['_stepDuration'] = time.time() - lastMeasure['_time']\n else:\n m['_stepDuration'] = time.time() - self._start_t\n self._msr(m)\n return m", "def _get_metadata(self): \n metadata = {'DATA_TYPE':'Fourier Climatology'} \n \n area_bounds = self._area_inst.get_cube_area_bounds(self.cube, \n self.xy_coords)\n x_bounds = [area_bounds[self._area_inst.x_min], \n area_bounds[self._area_inst.x_max]]\n y_bounds = [area_bounds[self._area_inst.y_min], \n area_bounds[self._area_inst.y_max]]\n \n metadata['VARIABLE'] = self.cube.name()\n metadata['UNITS'] = str(self.cube.units)\n metadata['INITIALISATION_DATES'] = self.cube_init_dates\n metadata['DATES'] = self.cube_dates\n metadata[self.xy_coords[0].upper()+'_BOUNDS'] = x_bounds\n metadata[self.xy_coords[-1].upper()+'_BOUNDS'] = y_bounds\n \n # Find additional coordinates in cube and add them to metadata.\n for coord in self.cube.coords():\n if coord.name() not in self.unwanted_coords and \\\n coord.name() not in self._required_coords and \\\n coord.name() not in self.xy_coords:\n metadata[coord.name().upper()] = coord.points\n \n bound_names = [self.xy_coords[0].upper()+'_BOUNDS',\n self.xy_coords[-1].upper()+'_BOUNDS']\n \n return self.MetaData(metadata, bound_names)", "def __init__(self):\n\n # self.threshold = 3.\n self.gamma_min = 3\n self.gamma_max = 12\n self.n_samples = 40\n # self.do_plots = False\n # self.do_albedo = True\n # self.verbose = True\n\n self.nbands = 7\n self.bu = np.array([0.004, 0.015, 0.003, 0.004, 0.013, 0.010, 0.006])\n\n # Determine 250 or 500 meters product\n # self.resolution = 500\n\n # self.pixelWidth = 500\n # self.pixelHeight = 500", "def smp_dict():\n out = base_dict()\n out['mro']['current'] = ['Sample']\n out['name']['current'] = 'Sample'\n ao(out, 'idx', 'Integer', attr=['Hidden'])\n ao(out, 'ii', 'Integer', attr=['Hidden'])\n ao(out, 'initialDimension', 'Float', 0., name='Initial Dimension')\n return out", "def _primary_beam(self, hdr):\n # Called ApPrimaryNano in OpenMIMS\n d = {}\n start_position = hdr.tell()\n d['source'], d['current start'], d['current end'], d['Lduo'], d['L1'] = \\\n unpack(self._bo + '8s 4i', hdr.read(24))\n\n # Each widths list is 10 ints long\n d['Dduo'] = unpack(self._bo + 'i', hdr.read(4))[0]\n d['Dduo widths'] = tuple(unpack(self._bo + '10i', hdr.read(40)))\n d['D0'] = unpack(self._bo + 'i', hdr.read(4))[0]\n d['D0 widths'] = tuple(unpack(self._bo + '10i', hdr.read(40)))\n d['D1'] = unpack(self._bo + 'i', hdr.read(4))[0]\n d['D1 widths'] = tuple(unpack(self._bo + '10i', hdr.read(40)))\n\n # 4 bytes unused\n hdr.seek(4, 1)\n d['raster'], d['oct45'], d['oct90'], d['E0P'], \\\n d['pressure analysis chamber'] = \\\n unpack(self._bo + '4d 32s', hdr.read(64))\n\n d['source'] = self._cleanup_string(d['source'])\n d['pressure analysis chamber'] = self._cleanup_string(d['pressure analysis chamber'])\n\n if self.header['analysis version'] >= 3:\n d['L0'] = unpack(self._bo + 'i', hdr.read(4))[0]\n if self.header['analysis version'] >= 4:\n d['hv cesium'], d['hv duo'] = unpack(self._bo + '2i', hdr.read(8))\n # DCs not in OpenMIMS; only in certain release/version?\n d['Dcs'] = unpack(self._bo + 'i', hdr.read(4))[0]\n d['Dcs widths'] = tuple(unpack(self._bo + '10i', hdr.read(40)))\n\n # skip bytes until total read in this function is 552\n # OpenMIMS: size_Ap_primary_nano = 552\n # Newer versions have rest filled with \\xCC continuation bytes, but\n # older versions have null-bytes, but not all bytes are null!!\n # The numbers do not seem to represent anything, though, so can be skipped.\n hdr.seek(start_position + 552)\n return d", "def __init__(self):\n self.index = 'r11_07_06c'\n self.parameters = {'run_index': 'r11_07_06c',\n 'h_1': 0.25,\n 'rho_0': 1.150,\n 'rho_1': 1.100,\n 'rho_2': 1.000,\n 'alpha': 0.5,\n 'D': 0.4,\n 'H': 0.25,\n 'sample': 1.0,\n 'perspective': 'old'}\n self.run_data = {'run_index': 'r11_07_06c',\n 'l0x': 2796,\n 'l0y': 1151,\n 'lsx': 2793,\n 'lsy': 716,\n 'j10x': 210,\n 'j10y': 1165,\n 'j1sx': 208,\n 'j1sy': 727,\n 'leakage': -76,\n 'odd_1': 'n',\n 'j20x': 2728,\n 'j20y': 1086,\n 'j2sx': 2730,\n 'j2sy': 670,\n 'r0x': 1097,\n 'r0y': 1095,\n 'rsx': 1093,\n 'rsy': 683,\n 'odd_2': 'n'}\n self.raw_image = 'tests/data/synced/r11_07_06c/cam1/img_0001.jpg'\n self.bc_image = 'tests/data/bc/r11_07_06c/cam1/img_0001.jpg'\n self.processed_path = 'tests/data/processed_ref/r11_07_06c/cam1/img_0001.jpg'", "def _magsamples(self):\n if self._derived_properties[\"magsamples\"] is None:\n if self.lbda is None:\n raise AttributeError(\"lbda not set.\")\n self.derive_magsamples()\n \n return self._derived_properties[\"magsamples\"]", "def prep(self):\n print\n print 'Filtering rawdata to data as masked array...'\n# using 0 as flag\n# self.data = n.ma.masked_array(self.rawdata[:self.nints,:, self.chans,:], self.rawdata[:self.nints,:, self.chans,:] == 0j)\n# using standard flags\n self.data = n.ma.masked_array(self.rawdata[:self.nints,:, self.chans,:], self.flags[:self.nints,:, self.chans,:] == 0) # mask of True for flagged data (flags=0 in tpipe, which is flags=False in Miriad and flags=True in MS)\n self.dataph = (self.data.mean(axis=3).mean(axis=1)).real #dataph is summed and detected to form TP beam at phase center, multi-pol\n self.min = self.dataph.min()\n self.max = self.dataph.max()\n print 'Shape of data:'\n print self.data.shape\n print 'Dataph min, max:'\n print self.min, self.max\n\n self.freq = self.freq_orig[self.chans]\n\n self.track0 = self.track(0.)\n self.twidth = 0\n for k in self.track0[1]:\n self.twidth = max(self.twidth, len(n.where(n.array(self.track0[1]) == k)[0]))\n\n print 'Track width in time: %d. Iteration could step by %d/2.' % (self.twidth, self.twidth)", "def prepare_raw_data(self, idx: int):\n info = super().prepare_raw_data(idx)\n if self.cache_reader is not None:\n self.human_data = self.cache_reader.get_item(idx)\n idx = idx % self.cache_reader.slice_size\n\n if 'smplx' in self.human_data:\n smplx_dict = self.human_data['smplx']\n info['has_smplx'] = 1\n else:\n smplx_dict = {}\n info['has_smplx'] = 0\n if 'global_orient' in smplx_dict:\n info['smplx_global_orient'] = smplx_dict['global_orient'][idx]\n info['has_smplx_global_orient'] = 1\n else:\n info['smplx_global_orient'] = np.zeros((3), dtype=np.float32)\n info['has_smplx_global_orient'] = 0\n\n if 'body_pose' in smplx_dict:\n info['smplx_body_pose'] = smplx_dict['body_pose'][idx]\n info['has_smplx_body_pose'] = 1\n else:\n info['smplx_body_pose'] = np.zeros((21, 3), dtype=np.float32)\n info['has_smplx_body_pose'] = 0\n\n if 'right_hand_pose' in smplx_dict:\n info['smplx_right_hand_pose'] = smplx_dict['right_hand_pose'][idx]\n info['has_smplx_right_hand_pose'] = 1\n else:\n info['smplx_right_hand_pose'] = np.zeros((15, 3), dtype=np.float32)\n info['has_smplx_right_hand_pose'] = 0\n\n if 'left_hand_pose' in smplx_dict:\n info['smplx_left_hand_pose'] = smplx_dict['left_hand_pose'][idx]\n info['has_smplx_left_hand_pose'] = 1\n else:\n info['smplx_left_hand_pose'] = np.zeros((15, 3), dtype=np.float32)\n info['has_smplx_left_hand_pose'] = 0\n\n if 'jaw_pose' in smplx_dict:\n info['smplx_jaw_pose'] = smplx_dict['jaw_pose'][idx]\n info['has_smplx_jaw_pose'] = 1\n else:\n info['smplx_jaw_pose'] = np.zeros((3), dtype=np.float32)\n info['has_smplx_jaw_pose'] = 0\n\n if 'betas' in smplx_dict:\n info['smplx_betas'] = smplx_dict['betas'][idx]\n info['has_smplx_betas'] = 1\n else:\n info['smplx_betas'] = np.zeros((self.num_betas), dtype=np.float32)\n info['has_smplx_betas'] = 0\n\n if 'expression' in smplx_dict:\n info['smplx_expression'] = smplx_dict['expression'][idx]\n info['has_smplx_expression'] = 1\n else:\n info['smplx_expression'] = np.zeros((self.num_expression),\n dtype=np.float32)\n info['has_smplx_expression'] = 0\n\n return info", "def measurements(self) -> NONEARRAY:\n\n return self._measurements", "def get_scan_data(self):\n self.metric_name = self.scan_file_dict['results'][0]['metric']\n data = {}\n data['metric_vals'] = []\n for result in self.scan_file_dict['results']:\n data['metric_vals'].append(result['metric_val'])\n for param_key in result['params'].keys():\n if not result['params'][param_key]['is_fixed']:\n if param_key not in data.keys():\n data[param_key] = {}\n data[param_key]['vals'] = []\n data[param_key]['units'] = \\\n result['params'][param_key]['prior']['units']\n data[param_key]['prior'] = \\\n result['params'][param_key]['prior']\n data[param_key]['vals'].append(\n result['params'][param_key]['value'][0]\n )\n\n if self.best_fit_dict is not None:\n best_fit_data = {}\n best_fit_data['metric_val'] = self.best_fit_dict['metric_val']\n for param_key in self.best_fit_dict['params'].keys():\n if not self.best_fit_dict['params'][param_key]['is_fixed']:\n best_fit_data[param_key] = {}\n best_fit_data[param_key]['val'] = \\\n self.best_fit_dict['params'][param_key]['value'][0]\n best_fit_data[param_key]['units'] = \\\n self.best_fit_dict['params'][param_key]['value'][1]\n # Make a list of shifted metrics based on this best fit point\n data['shifted_metric_vals'] = []\n for val in data['metric_vals']:\n data['shifted_metric_vals'].append(\n val-best_fit_data['metric_val']\n )\n else:\n best_fit_data = None\n\n if self.projection_dicts is not None:\n self.proj_bin_names = []\n self.proj_bin_edges = []\n self.proj_bin_cens = []\n self.proj_bin_units = []\n self.projection_data = []\n for projection_dict in self.projection_dicts:\n projection_data = {}\n proj_bin_cens, proj_bin_edges, \\\n proj_bin_names, proj_bin_units = \\\n self.get_scan_steps(scandict=projection_dict)\n if len(proj_bin_names) != 1:\n raise ValueError(\n \"Projection files should be 1D scans. \"\n \"Got %i.\"%len(proj_bin_names)\n )\n if proj_bin_names[0] not in self.all_bin_names:\n raise ValueError(\n \"Projection file was over %s which is \"\n \"not in the 2D scan over %s.\"%(\n proj_bin_names[0], self.all_bin_names)\n )\n else:\n self.proj_bin_names.append(proj_bin_names[0])\n self.proj_bin_edges.append(proj_bin_edges[0])\n self.proj_bin_cens.append(proj_bin_cens[0])\n self.proj_bin_units.append(proj_bin_units[0])\n projection_data['metric_vals'] = []\n for result in projection_dict['results']:\n projection_data['metric_vals'].append(result['metric_val'])\n for param_key in result['params'].keys():\n if not result['params'][param_key]['is_fixed']:\n if param_key not in projection_data.keys():\n projection_data[param_key] = {}\n projection_data[param_key]['vals'] = []\n projection_data[param_key]['units'] = \\\n result['params'][\n param_key]['prior']['units']\n projection_data[param_key]['prior'] = \\\n result['params'][param_key]['prior']\n projection_data[param_key]['vals'].append(\n result['params'][param_key]['value'][0]\n )\n if best_fit_data is not None:\n projection_data['shifted_metric_vals'] = []\n for val in projection_data['metric_vals']:\n projection_data['shifted_metric_vals'].append(\n val-best_fit_data['metric_val']\n )\n self.projection_data.append(projection_data)\n else:\n self.projection_data = None\n\n if self.contour_dicts is not None:\n for contour_dict in self.contour_dicts:\n if not sorted(self.all_bin_names) == \\\n sorted(contour_dict['vars']):\n special_vars = sorted(['sin2theta23', 'deltam32'])\n special_bins = sorted(['theta23', 'deltam31'])\n good_contour = \\\n (sorted(self.all_bin_names) == special_bins) and \\\n (sorted(contour_dict['vars']) == special_vars)\n else:\n good_contour = True\n if not good_contour:\n raise ValueError(\n \"Contour variables - %s - do not match \"\n \"the scan variables - %s.\"%(\n contour_dict['vars'], self.all_bin_names\n )\n )\n\n self.data = data\n self.best_fit_data = best_fit_data", "def __init__(self, height = None, width = None, ratio=None, type=None):\n \n self.dF = []\n self.feature = []\n self.Class = []\n self.featureNumpy = []\n self.ClassNumpy = []\n \n self.model = []\n \n self.fTrain = []\n self.fTest = []\n self.cTrain = []\n self.cTest = []", "def __init__(self):\n self.bpf_lcut = 10\n self.bpf_hcut = 425\n self.lpf_lcut = 5\n self.lp_butter_order = 4\n self.bp_butter_order = 2\n self.data_rate = None\n self.process_time = []", "def profile(self):\n\n return dict(width=self.width, height=self.height, crs=self.crs, \n interleave=self.interleave, resampling=self.resampling)", "def init_metrics(self):\n\n self.metrics = {}\n\n self.metrics['train_loss'] = np.zeros(0)\n self.metrics['test_loss'] = np.zeros(0)\n\n # self.orth_clf = LinearDecoder(self, self.q_, MeanClassifier)\n # self.metrics['train_orthogonality'] = np.zeros(0)\n # self.metrics['test_orthogonality'] = np.zeros(0)\n\n self.metrics['train_parallelism'] = np.zeros((0,self.q_)) \n self.metrics['test_parallelism'] = np.zeros((0,self.q_))", "def __init__(self):\r\n super().__init__()\r\n self._name = \"PICOSCOPE2408b\"\r\n self._lib = None\r\n self._handle = None\r\n self._run_lock = Lock()\r\n self._driver_lock = Lock()\r\n\r\n self._sampling_time = 4E-9\r\n self._sampling_duration = 50E-6\r\n self._pulse_time = 100E-9\r\n self._samples = int(self._sampling_duration / self._sampling_time)\r\n self._idx = 0\r\n\r\n w_len = self._samples\r\n location = 0.1\r\n idx1 = int(w_len*(location - self._pulse_time/(2*self._sampling_duration)))\r\n idx2 = int(w_len*(location + self._pulse_time/(2*self._sampling_duration))) - 1\r\n self._waveform = np.array([-1*MAX_EXT if (i < idx1 or i >= idx2) else MAX_EXT for i in range(w_len)],dtype=c_int16)\r\n\r\n self._A_data = np.ones(self._samples)*2\r\n self._B_data = np.ones(self._samples)*-2\r\n self._C_data = np.ones(self._samples)*0\r\n self._window_est = np.ones(self._samples)*0\r\n self._t = np.linspace(0,self._sampling_duration,self._samples)\r\n self._range_A = None\r\n self._range_B = None\r\n self._depol_ratio = None\r\n\r\n self._process_queue = Queue()\r\n self._save_queue = Queue()", "def __init__(self):\n am.AbstractMeasurement.__init__(self)\n self.face_mesh = mp_face_mesh.FaceMesh(\n min_detection_confidence=0.5, min_tracking_confidence=0.5)\n self.drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)", "def __init__(self, meas, verb,pvsr, default_site,delete_created_measurements,pvsr_default_conf_check_cycle,pvsr_meas_types):\r\n \r\n logging.info(\"adding capability: {0}\".format(meas[\"name\"]))\r\n \r\n self._verb=verb\r\n if verb==mplane.model.VERB_QUERY:\r\n cap = mplane.model.Capability(label=meas[\"name\"]+\"-query\", when = \"past ... now / 15s\", verb=mplane.model.VERB_QUERY)\r\n elif verb==mplane.model.VERB_MEASURE:\r\n cap = mplane.model.Capability(label=meas[\"name\"]+\"-measure\", when = \"now ... future / 15s\", verb=mplane.model.VERB_MEASURE)\r\n else:\r\n raise ValueError(\"Verb is not supported: {0}\".format(verb))\r\n cap.add_result_column(\"time\")\r\n \r\n self._mplane2uda={}\r\n self._uda_name2uda = {}\r\n \r\n self._pvsr_default_conf_check_cycle=pvsr_default_conf_check_cycle\r\n \r\n try:\r\n for k in sorted(meas[\"types\"].keys()):\r\n if \"first\" in meas[\"types\"][k]:\r\n logging.debug(\" result colum: {0}\".format(meas[\"types\"][k][\"first\"]))\r\n cap.add_result_column(meas[\"types\"][k][\"first\"])\r\n if \"second\" in meas[\"types\"][k]:\r\n logging.debug(\" result colum: {0}\".format(meas[\"types\"][k][\"second\"]))\r\n cap.add_result_column(meas[\"types\"][k][\"second\"])\r\n \r\n if \"PropertyType\" in pvsr_meas_types[k]:\r\n for i in range(len(pvsr_meas_types[k][\"PropertyType\"])):\r\n self._uda_name2uda[pvsr_meas_types[k][\"PropertyType\"][i][\"Name\"]]=pvsr_meas_types[k][\"PropertyType\"][i]\r\n \r\n if \"index_mplane_name\" in meas:\r\n logging.debug(\" parameter: {0}\".format(meas[\"index_mplane_name\"]))\r\n cap.add_parameter(meas[\"index_mplane_name\"])\r\n \r\n if \"mplane_constants\" in meas:\r\n for k,v in sorted(meas[\"mplane_constants\"].items()):\r\n logging.debug(\" parameter: {0} with value {1}\".format(k,v))\r\n cap.add_parameter(k,v)\r\n \r\n if \"uda_name2mplane_name\" in meas:\r\n for k,v in sorted(meas[\"uda_name2mplane_name\"].items()):\r\n if k in self._uda_name2uda:\r\n logging.debug(\" parameter: {0}\".format(v))\r\n cap.add_parameter(v)\r\n self._mplane2uda[v]=k\r\n else:\r\n logging.error(\" unknown UDA: {0}\".format(v))\r\n except Exception as e:\r\n logging.critical(\"Error during capability creation: {0}\".format(e))\r\n raise e\r\n\r\n super(PvsrService, self).__init__(cap)\r\n \r\n self._pvsr = pvsr\r\n self._meas = meas\r\n self._default_site = default_site\r\n self._delete_created_measurements = delete_created_measurements\r\n self._pvsr_meas_types = pvsr_meas_types", "def _build_parsed_values(self):\n match = SAMPLE_REGEX.match(self.raw_data)\n \n if not match:\n raise SampleException(\"No regex match of parsed sample data: [%s]\" %\n self.decoded_raw)\n \n log.trace(\"Matching sample [%s], [%s], [%s], [%s], [%s], [%s], [%s], [%s], [%s], [%s], [%s], [%s]\",\n match.group(1),match.group(2),match.group(3),match.group(4),match.group(5),\n match.group(6),match.group(7),match.group(8),match.group(9),match.group(10),\n match.group(11),match.group(12))\n res_5 = float(match.group(1))\n res_x1 = float(match.group(2))\n res_x5 = float(match.group(3))\n h_5 = float(match.group(4))\n h_x1 = float(match.group(5))\n h_x5 = float(match.group(6))\n eh = float(match.group(7))\n ref_temp_v = float(match.group(8))\n ref_temp_c = float(match.group(9))\n res_temp_v = float(match.group(10))\n res_temp_c = float(match.group(11))\n batt_v = float(match.group(12))\n \n \n result = [{DataParticleKey.VALUE_ID: BarsDataParticleKey.RESISTIVITY_5,\n DataParticleKey.VALUE: res_5},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.RESISTIVITY_X1,\n DataParticleKey.VALUE: res_x1},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.RESISTIVITY_X5,\n DataParticleKey.VALUE: res_x5},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.HYDROGEN_5,\n DataParticleKey.VALUE: h_5},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.HYDROGEN_X1,\n DataParticleKey.VALUE: h_x1},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.HYDROGEN_X5,\n DataParticleKey.VALUE: h_x5},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.EH_SENSOR,\n DataParticleKey.VALUE: eh},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.REFERENCE_TEMP_VOLTS,\n DataParticleKey.VALUE: ref_temp_v},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.REFERENCE_TEMP_DEG_C,\n DataParticleKey.VALUE: ref_temp_c},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.RESISTIVITY_TEMP_VOLTS,\n DataParticleKey.VALUE: res_temp_v},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.RESISTIVITY_TEMP_DEG_C,\n DataParticleKey.VALUE: res_temp_c},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.BATTERY_VOLTAGE,\n DataParticleKey.VALUE: batt_v}\n ]\n \n return result", "def build(self):\n if not hasattr(self, 'subtitle'):\n self.subtitle = self.data_code['subtitle']\n #print('ntimes=%s nelements=%s ntotal=%s subtitle=%s' % (\n #self.ntimes, self.nelements, self.ntotal, self.subtitle))\n if self.is_built:\n return\n nnodes = 1\n\n #self.names = []\n #self.nelements //= nnodes\n self.nelements //= self.ntimes\n self.ntotal = self.nelements * nnodes * 2\n #self.ntotal\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n #print('ntotal=%s ntimes=%s nelements=%s' % (self.ntotal, self.ntimes, self.nelements))\n\n #print(\"ntimes=%s nelements=%s ntotal=%s\" % (self.ntimes, self.nelements, self.ntotal))\n self._times = np.zeros(self.ntimes, 'float32')\n #self.ntotal = self.nelements * nnodes\n\n self.element_node = np.zeros((self.ntotal, 2), 'int32')\n\n # the number is messed up because of the offset for the element's properties\n if not self.nelements * nnodes * 2 == self.ntotal:\n msg = 'ntimes=%s nelements=%s nnodes=%s ne*nn=%s ntotal=%s' % (\n self.ntimes, self.nelements, nnodes, self.nelements * nnodes,\n self.ntotal)\n raise RuntimeError(msg)\n\n # [angle, sc, sd, se, sf]\n self.data = np.zeros((self.ntimes, self.ntotal, 5), 'complex64')", "def init_data_array(self, mess = None): \n if self.verbose > 1:\n print(\"MultiLinearSpectra.init_data_array()\") \n \n if mess is None:\n if self.mess is None:\n warnings.warn(\"MultiLinearSpectra.init_data_array(): no data to initialize\")\n return None\n else:\n self.mess = mess\n \n\n \n \n for m in range(len(self.mess)):\n \n self.mess[m][\"index\"] = m\n \n kwargs = {}\n for k, v in self.mess[m].items():\n kwargs[k] = v\n \n if self.mess[m][\"class\"] == \"PASGas\" and flag_ST:\n self.mess[m][\"object\"] = PASG.PASGas(verbose = self.verbose, **kwargs)\n\n elif self.mess[m][\"class\"] == \"PASLiquid\" and flag_ST:\n self.mess[m][\"object\"] = PASL.PASLiquid(verbose = self.verbose, **kwargs)\n\n\n # x_unit = self.mess[0].x_unit\n # y_unit = self.mess[0].y_unit\n\n # for m in range(1, len(self.mess)):\n # if x_unit != self.mess[m].x_unit:\n # self.mess.x_unit", "def _fill_meas_result(self,meas,from_time,to_time,meas_data):\r\n input=self._pvsr.create_pvsr_object(\"GetMeasuredValuesInput\")\r\n input.ObjType = \"Measurement\"\r\n input.ObjId = meas.Id\r\n input.From = datetime.datetime.fromtimestamp(from_time)\r\n input.To = datetime.datetime.fromtimestamp(to_time)\r\n logging.info(\"Get values, eq: {0}, type: {1}, index: {2}, name: {3}, {4} -> {5}\".format(self._meas[\"equipment\"],meas.Type,meas.Index,meas.DescriptionToShow,input.From,input.To))\r\n meas_res=self._pvsr.getMeasuredValues(input)\r\n \r\n index2mplane_name={}\r\n multiply = None\r\n if \"first\" in self._meas[\"types\"][meas.Type]:\r\n index2mplane_name[0]=self._meas[\"types\"][meas.Type][\"first\"]\r\n if \"second\" in self._meas[\"types\"][meas.Type]:\r\n index2mplane_name[1]=self._meas[\"types\"][meas.Type][\"second\"]\r\n if \"multiply\" in self._meas[\"types\"][meas.Type]:\r\n multiply=int(self._meas[\"types\"][meas.Type][\"multiply\"])\r\n\r\n if hasattr(meas_res,\"D\"):\r\n for d in meas_res.D:\r\n if d.T not in meas_data:\r\n meas_data[d.T]={}\r\n for index,mplane_name in index2mplane_name.items():\r\n if index < len(d.V):\r\n if multiply is not None:\r\n d.V[index]*=multiply\r\n meas_data[d.T][mplane_name]=d.V[index]\r\n else:\r\n meas_data[d.T][mplane_name]=None", "def test_get_voltage_maps(self):\n pass", "def setup(self):\n cls = type(\"Timings\", (Structure,),\n {\"_fields_\": [(n, c_double) for n in self._timers]})\n self._C_timings = cls()\n return byref(self._C_timings)", "def __init__(self, hwPlatform, mode = \"class\", cps = False, flags = False, debug = False, quiet = False, time = 0, stg = None):\n \n # Constants and tunable parameters\n self.__c_ct_slow = 22 # Number of samples in slow mode.\n self.__c_ct_fast = 4 # Number of samples in fast mode.\n \n # Flags\n self.f_accum = 0x03 # Bit position of accumulator flags. \n self.f_accum_unk = 0x00 # This flag means we don't have any accumulator info.\n self.f_accum_accum = 0x01 # This flag means we're still accumulating data.\n self.f_accum_complete = 0x02 # This flag means we have a full sample to average from.\n \n self.f_trend = 0x1c # Bit position of the trend flag.\n self.f_trend_unk = 0x00 # Unknown trend.\n self.f_trend_up = 0x04 # Readings increasing.\n self.f_trend_dn = 0x08 # Readings decreasing.\n self.f_trend_stable = 0x10 # Readings stable\n \n self.f_mode = 0xe0 # Bit position of the mode flag. This is large because we want to support more modes in the future.\n self.f_mode_fast = 0x00 # Fast averaging mode.\n self.f_mode_slow = 0x20 # Slow averaging mode.\n self.f_mode_counter = 0x40 # Counter mode.\n self.f_mode_scaler = 0x80 # Scaler mode.\n \n # Set up storage:\n self.__stg = stg\n \n # Set up hardware.\n self.__hw = hwPlatform\n \n # Set default flags. By default we're in stream mode unless the CLI tells us differently.\n self.__flags = self.f_accum_unk | self.f_trend_unk | self.f_mode_counter\n \n # Hold samples.\n self.__samples = []\n \n # Set mode.\n self.__mode = mode\n \n self.__cpsOn = False # Show CPS?\n self.__flagsOn = False # Show flags?\n self.__debugOn = False # Debug?\n self.__liveOutput = True # Do we dump data in real time?\n self.__runtime = 0 # How long have we been running?\n self.__accumCts = 0 # How many counts do we have? \n self.__timed = False # Are we running in timed mode?\n self.__timeLimit = 0 # What is our time limit\n self.__textOut = True # By default we are quiet.\n \n # Printed timestamp format.\n self.__tsFormat = '%Y-%m-%d %H:%M:%S.%f UTC'\n \n # Set mdoe.\n self.__mode = mode\n \n # If we have activated counts per second set the flag.\n if cps == True:\n self.__cpsOn = True\n \n # Show flags?\n if flags == True:\n self.__flagsOn = True\n \n # Should we debug?\n if debug == True:\n self.__hw.setDebug(True)\n self.__debugOn = True\n \n # Live output?\n if quiet == True:\n self.__textOut = False\n \n # Timer?\n if time > 0:\n self.__timed = True\n self.__timeLimit = time\n \n # Turn hardware text on/off.\n self.__hw.setTextOut(self.__textOut)\n \n # Placeholder\n self.__dtsStart = datetime.datetime.utcnow()\n self.__dtsEnd = datetime.datetime.utcnow()", "def setup_units():\n # The number of tetrodes, probes, etc - any kind of grouping\n num_groups = 8\n\n # The region that each group belongs to\n regions = [\"SUB\"] * num_groups\n\n # A group number for each group, for example the tetrode number\n groups = [1, 2, 3, 4, 9, 10, 11, 12]\n\n output_dict = {\n \"num_groups\": num_groups,\n \"region\": regions,\n \"group\": groups,\n }\n\n return output_dict", "def __init__( self, filename, swathgroup, component, verbose=False ) :\n \n # modules:\n import logging\n import os\n import h5py\n import numpy\n \n # setup logger:\n self.logger = logging.getLogger( 'OMI' )\n \n # store:\n self.filename = filename\n self.component = component\n\n # check ...\n if not os.path.isfile(filename) :\n self.logger.error( 'file not found : %s' % filename )\n raise Exception\n #endif\n \n # open:\n try :\n hid = h5py.File( filename, 'r' )\n except IOError :\n self.logger.error( 'IOError from opening file : %s' % filename )\n raise\n except :\n self.logger.error( 'could not open file : %s' % filename )\n raise\n #endtry\n \n # init result:\n self.swaths = {}\n self.swaths[component] = {}\n \n # form group name:\n compgrpname = '/HDFEOS/SWATHS/%s' % swathgroup\n \n # get group:\n compgrp = hid.get( compgrpname )\n if compgrp == None :\n self.logger.error( 'group \"%s\" not found in %s' % (compgrpname,filename) )\n raise Exception\n #endif\n \n # copy attributes:\n self.swaths[component]['attrs'] = {}\n for key in compgrp.attrs.keys() :\n self.swaths[component]['attrs'][key] = compgrp.attrs[key]\n #endfor\n \n # loop over groups:\n for field in compgrp.keys() :\n # info ...\n if verbose : print( ' %s' % field )\n # init result:\n flds = {}\n # get field group:\n fieldgrp = compgrp.get( field )\n # loop over values:\n for varname in fieldgrp.keys() :\n # testing ...\n #if varname != 'Pressure' : continue\n # info ...\n if verbose : print( ' %s' % varname )\n # init result:\n fld = {}\n # handle to variable:\n varid = fieldgrp.get(varname)\n # info ...\n if verbose :\n print( ' data type : ', varid.dtype )\n print( ' shape : ', varid.shape )\n print( ' attributes:' )\n for key in varid.attrs.keys() :\n print( ' %s = %s' % (key,varid.attrs[key]) )\n #endfor # attributes\n #endfor\n # data; geo-stationair files have (sometimes?) only a single time value:\n if len(varid.shape) == 0 :\n values = numpy.array( varid.value )\n else :\n values = varid[:]\n #endif\n # mask for the unfilled values:\n _FillValue = varid.attrs['_FillValue']\n mask = values == _FillValue\n # adhoc fix: also mask the 'nan' values:\n if numpy.any(numpy.isnan(values)) :\n # info ...\n self.logger.warning( ' found NaN values in \"%s\" in \"%s\"' % (varname,filename) )\n # mask also these values:\n mask = mask | numpy.isnan(values)\n #endif\n # created masked array:\n values = numpy.ma.array( data=values, mask=mask )\n # unpack:\n add_offset = varid.attrs['Offset']\n scale_factor = varid.attrs['ScaleFactor']\n values = add_offset + scale_factor * values\n # store:\n fld['data' ] = values\n # extract attribute:\n avalue = varid.attrs['Title']\n if type(avalue) == numpy.ndarray : avalue = avalue[0]\n fld['long_name' ] = avalue\n # extract attribute:\n avalue = varid.attrs['Units']\n if type(avalue) == numpy.ndarray : avalue = avalue[0]\n fld['units'] = avalue.decode('latin1')\n # extract attribute:\n avalue = varid.attrs['_FillValue']\n if type(avalue) == numpy.ndarray : avalue = avalue[0]\n fld['_FillValue'] = avalue\n # fill units from long_name if possible:\n if fld['units'] in ['NoUnits'] :\n # replace if possible:\n if varname == 'CloudRadianceFraction' :\n # problem: longname says '%', but sometimes\n # files are found with with 0-1 (ISOTROP!)\n if numpy.nanmax(fld['data']) <= 1.0 :\n fld['units'] = '1'\n else : \n fld['units'] = '%'\n #endif\n elif ('AirMassFactor' in varname) or \\\n ('AveragingKernel' in varname) or \\\n ('CloudFraction' in varname) or \\\n ('Id' in varname) or \\\n ('Flag' in varname) or \\\n ('Albedo' in varname) or \\\n ('SurfaceAlbedo' in varname) or \\\n ('PressurelevelB' in varname) or \\\n ('TropoPauseLevel' in varname) or \\\n ('volumetric mixing ratio' in fld['long_name']) :\n fld['units'] = '1'\n else :\n self.logger.error( 'could not replace units \"%s\" for variable \"%s\" based on on long_name \"%s\"' \\\n % (fld['units'],varname,fld['long_name']) )\n self.logger.error( 'data range : %f - %f' % (numpy.nanmin(fld['data']),numpy.nanmax(fld['data'])) )\n raise Exception\n #endif\n #endif\n # store:\n flds[varname] = fld\n #endfor # variables\n # store:\n self.swaths[component][field] = flds\n #endfor # field groups\n \n # done:\n hid.close()\n \n # extract time dimension:\n shp = self.swaths[self.component]['Geolocation Fields']['Time']['data'].shape\n # single value for geo, time line for leo:\n if len(shp) == 0 :\n # get shape of longitude field:\n lon_shp = self.swaths[self.component]['Geolocation Fields']['Longitude']['data'].shape\n # assume leading is time:\n self.ntime = lon_shp[0]\n # extend time array:\n time_value = self.swaths[self.component]['Geolocation Fields']['Time']['data']\n self.swaths[self.component]['Geolocation Fields']['Time']['data'] = numpy.ones((self.ntime),time_value.dtype) + time_value\n elif len(shp) == 1 :\n self.ntime = shp[0]\n else :\n self.logger.error( 'expected single value time (GEO) or 1D (LE), found shape : %s' % str(shp) )\n raise Exception\n #endif\n\n # extract scan dimension from longitude array:\n shp = self.swaths[self.component]['Geolocation Fields']['Longitude']['data'].shape\n if len(shp) != 2 :\n self.logger.error( 'expected 2D longitude array, found shape : %s' % str(shp) )\n raise Exception\n #endif\n if shp[0] == self.ntime :\n self.np = shp[1]\n elif shp[1] == self.ntime :\n self.np = shp[0]\n else :\n self.logger.error( 'expected (time,np) or (np,time) shape for longitudes;' )\n self.logger.erorr( 'found shape %s while time is %i' % (str(shp),self.ntime) )\n raise Exception\n #endif\n \n # extract level dimension from kernel array:\n shp = self.swaths[self.component]['Data Fields']['AveragingKernel']['data'].shape\n if len(shp) != 3 :\n self.logger.error( 'expected 3D kernel array, found shape : %s' % str(shp) )\n raise Exception\n #endif\n if shp[0:2] == (self.ntime,self.np) :\n self.nlayer = shp[2]\n elif shp[1:3] == (self.ntime,self.np) :\n self.nlayer = shp[0]\n else :\n self.logger.error( 'expected (time,np,nlayer) or (nlayer,time,np) shape for kernel;' )\n self.logger.erorr( 'found shape %s while (time,np) is (%i,%i)' % (str(shp),self.ntime,self.np) )\n raise Exception\n #endif\n \n # loop over all data fields to set dimension names:\n for field in self.swaths[component].keys() :\n # skip some ...\n if field == 'attrs' : continue\n # variables in field group:\n for varname in self.swaths[component][field].keys() :\n # get shape:\n shp = self.swaths[component][field][varname]['data'].shape\n # search known combinations:\n if shp == (self.ntime,) :\n dimnames = ('time',)\n elif shp == (self.ntime,self.np) :\n dimnames = ('time','pixel')\n elif shp == (4,self.ntime,self.np) :\n dimnames = ('corner','time','pixel')\n elif shp == (self.ntime,self.np,self.nlayer) :\n dimnames = ('time','pixel','layer')\n elif shp == (self.ntime,self.np,self.nlayer+1) :\n dimnames = ('time','pixel','layer_interface')\n elif shp == (self.nlayer,self.ntime,self.np) :\n dimnames = ('layer','time','pixel')\n elif shp == (self.nlayer,) :\n dimnames = ('layer',)\n else :\n self.logger.error( 'unknown shape %s for variable \"%s\";' % (str(shp),varname) )\n self.logger.error( 'dimensions time %i, pixel %i, layer %i' % (self.ntime,self.np,self.nlayer) )\n raise OmiLevelException\n #endif\n # store:\n self.swaths[component][field][varname]['dimnames'] = dimnames\n #endfor # variables\n #endfor # field groups\n \n #\n # Adhox fix: corners have strange order, convert to mathematical default of counter-clock wise:\n # \n # _ - o 0 _ - o 1\n # 2 o - \\ ---> 2 o - \\ \n # \\ _ - o 1 \\ _ - o 0\n # 3 o - 3 o -\n #\n # loop over corner variables:\n for vname in ['LongitudeCornerpoints','LatitudeCornerpoints'] :\n # copy original:\n values = self.swaths[self.component]['Geolocation Fields'][vname]['data'].copy()\n # swap, dimension order is (corner,scan,pix)\n self.swaths[self.component]['Geolocation Fields'][vname]['data'][0,:,:] = values[1,:,:]\n self.swaths[self.component]['Geolocation Fields'][vname]['data'][1,:,:] = values[0,:,:]\n #endfor", "def measure(self):\n pass", "def fillDetInfo():\n print('here i am')\n # 1. maps of analysis channel to cpd, and pulser monitor channels\n detCH, pMons = {}, {}\n for ds in [0,1,2,3,4,5,6]:\n f = np.load(\"%s/data/ds%d_detChans.npz\" % (os.environ['LATDIR'], ds))\n detCH[ds] = f['arr_0'].item()\n pMons[ds] = f['arr_1'].item()\n\n # 2. maps of HV and TRAP threshold settings are stored in the DB.\n # make them global, and move them to the runSettings file.\n # FORMAT: {ds : {'det' : [(run1,val1),(run2,val2)...]} }\n detHV, detTH = {}, {}\n\n # load all possible values, as in settingsMgr\n detDB = db.TinyDB(\"%s/calDB-v2.json\" % dsi.latSWDir)\n detPars = db.Query()\n cal = dsi.CalInfo()\n for ds in [0,1,2,3,4,5,6]:\n # for ds in [0]:\n print(\"scanning ds\",ds)\n detTH[ds] = {}\n detHV[ds] = {}\n for key in cal.GetKeys(ds):\n mod = -1\n if \"m1\" in key: mod = 1\n if \"m2\" in key: mod = 2\n for cIdx in range(cal.GetIdxs(key)):\n\n # load the DB records\n dbKeyTH = \"trapThr_%s_c%d\" % (key, cIdx)\n dbValTH = dsi.getDBRecord(dbKeyTH,calDB=detDB,pars=detPars)\n\n dbKeyHV = \"hvBias_%s_c%d\" % (key, cIdx)\n dbValHV = dsi.getDBRecord(dbKeyHV,calDB=detDB,pars=detPars)\n\n # debug: print the record\n # for val in sorted(dbValTH):\n # if len(dbValTH[val])>0:\n # print(val, dbValTH[val])\n # return\n\n # fill the first value\n if len(detTH[ds])==0:\n detTH[ds] = dbValTH\n detHV[ds] = dbValHV\n continue\n\n # check for new threshold values.\n for cpd in detTH[ds]:\n nOld, nNew = len(detTH[ds][cpd]), len(dbValTH[cpd])\n\n # detector just came online\n if nOld==0 and nNew>0:\n detTH[ds][cpd] = dbValTH[cpd]\n continue\n # detector still offline\n if nOld==0 and nNew==0:\n continue\n # detector just went offline\n if nOld>0 and nNew==0:\n continue\n\n # check last run/trap pair against each new one\n prevRun, prevTH = detTH[ds][cpd][-1][0], detTH[ds][cpd][-1][1]\n for val in dbValTH[cpd]:\n thisRun, thisTH = val[0], val[1]\n if thisTH != prevTH:\n detTH[ds][cpd].append([thisRun,thisTH])\n prevTH = thisTH\n\n # check for new HV values.\n for cpd in detHV[ds]:\n\n nOld, nNew = len(detHV[ds][cpd]), len(dbValHV[cpd])\n\n # detector just came online\n if nOld==0 and nNew>0:\n detHV[ds][cpd] = dbValHV[cpd]\n continue\n # detector still offline\n if nOld==0 and nNew==0:\n continue\n # detector just went offline\n if nOld>0 and nNew==0:\n continue\n\n # check last run/trap pair against each new one\n prevRun, prevHV = detHV[ds][cpd][-1][0], detHV[ds][cpd][-1][1]\n for val in dbValHV[cpd]:\n thisRun, thisHV = val[0], val[1]\n if thisHV != prevHV:\n print(\"found HV diff. cpd %d prev %dV (run %d) new %dV (run %d)\" % (cpd, prevHV, prevRun, thisHV, thisRun))\n detHV[ds][cpd].append([thisRun,thisHV])\n prevHV = thisHV\n\n # return\n\n # # load the old file and compare\n # # GOAL: improve on this file.\n # # f = np.load(\"%s/data/runSettings.npz\" % dsi.latSWDir)\n # # detHVOld = f['arr_0'].item()\n # # detTHOld = f['arr_1'].item()\n # # detCHOld = f['arr_2'].item()\n # # pMonsOld = f['arr_3'].item()\n #\n # ds = 3\n # print(\"old results, ds\",ds)\n # for cpd in sorted(detTHOld[ds]):\n # if cpd!=\"122\":continue\n # if len(detTHOld[ds][cpd]) > 0:\n # print(cpd, detTHOld[ds][cpd])\n #\n # # for ds in [0,1,2,3,4,5,6]:\n # print(\"thresh results, ds:\",ds)\n # for cpd in sorted(detTH[ds]):\n # # if cpd!=122:continue\n # if len(detTH[ds][cpd]) > 0:\n # print(cpd, detTH[ds][cpd])\n\n\n np.savez(\"%s/data/runSettings-v2.npz\" % dsi.latSWDir,detHV,detTH,detCH,pMons)", "def __getstate__(self) -> typing.Dict:\n # print(\"[INFO] Get state called\")\n\n state = self.__dict__ # get attribute dictionary\n\n # add the fitted_primitives\n state['fitted_pipe'] = self.runtime.pipeline\n state['pipeline'] = self.pipeline.to_json_structure()\n state['log_dir'] = self.log_dir\n state['id'] = self.id\n del state['runtime'] # remove runtime entry\n\n return state", "def getActualConfig(self) -> object:\n if not self.debug:\n ntraces = int(self.getNumberOfTraces())\n traces = []\n for i in range(1,ntraces+1):\n self.selectTrace(i)\n data = self.getData()\n if i == 1:\n title = \"S11\"\n elif i == 2:\n title = \"S21\"\n elif i == 3:\n title = \"S12\"\n else:\n title = \"S22\"\n trace={\n 'number': i,\n 'xMin': self.getStartFrequency(),\n 'xMax': self.getStopFrequency(),\n 'yMin': self.getmindbm(i),#min([x['y'] for x in data]), #getmindbm(),\n 'yMax': self.getmaxdbm(i),#max([x['y'] for x in data]), #getmaxdbm(),\n 'xScale': \"linear\",#self.getxscale()\n 'yScale': \"linear\",#self.getyscale(),\n 'type': \"bode\",#self.getTypeFormat(),\n 'title': title,#self.getTraceTitle(i),\n 'xLabel': \"Freq [Hz]\",#getxLabel(),\n 'yLabel': \"dBm\", #getyLabel()\n 'data': data,\n 'yPDiv': self.getYPDiv(i)\n }\n traces.append(trace) \n ret = {\n 'traces': traces, \n 'sweepResolution': self.getSweepResolution(),\n 'IFBW': self.getIFBW() \n }\n else:\n trace1 = {\n 'number': 1,\n 'xMin': 100,\n 'xMax': 1000,\n 'yMin': 100,\n 'yMax': 1000,\n 'xScale': 'logarithmic',\n 'yScale': 'logarithmic',\n 'type': 'bode',\n 'title': 'S11',\n 'xLabel': 'Freq',\n 'yLabel': 'dBm',\n 'yPDiv': 10,\n 'data': [\n {'x': 100,'y': 100},\n {'x': 200,'y': 150},\n {'x': 500,'y': 300},\n {'x': 1000,'y': 800}\n ]\n }\n trace2 = {\n 'number': 2,\n 'xMin': 1,\n 'xMax': 100,\n 'yMin': 1,\n 'yMax': 1000,\n 'xScale': 'linear',\n 'yScale': 'linear',\n 'type': 'bode',\n 'title': 'S21',\n 'xLabel': 'Freq',\n 'yLabel': 'dBm',\n 'yPDiv': 10,\n 'data': [\n {'x': 1,'y': 100},\n {'x': 20,'y': 250},\n {'x': 50,'y': 200},\n {'x': 100,'y': 600}\n ]\n }\n trace3 = {\n 'number': 3,\n 'xMin': 500,\n 'xMax': 10000,\n 'yMin': 100,\n 'yMax': 10000,\n 'xScale': 'linear',\n 'yScale': 'logarithmic',\n 'type': 'bode',\n 'title': 'S12',\n 'xLabel': 'Freq',\n 'yLabel': 'dBm',\n 'yPDiv': 10,\n 'data': [\n {'x': 500,'y': 100},\n {'x': 2000,'y': 1000},\n {'x': 5000,'y': 3000},\n {'x': 10000,'y': 8000}\n ]\n }\n trace4 = {\n 'number': 4,\n 'xMin': 100,\n 'xMax': 10000,\n 'yMin': 500,\n 'yMax': 10000,\n 'xScale': 'logarithmic',\n 'yScale': 'linear',\n 'type': 'bode',\n 'title': 'S22',\n 'xLabel': 'Freq',\n 'yLabel': 'dBm',\n 'yPDiv': 10,\n 'data': [\n {'x': 100,'y': 500},\n {'x': 2000,'y': 5000},\n {'x': 5000,'y': 2000},\n {'x': 10000,'y': 4000}\n ]\n }\n ret = {\n 'traces': [ trace1, trace2, trace3, trace4 ], \n 'sweepResolution': 401,\n 'IFBW': 10000 \n }\n return ret", "def get_suite(robot, number):\n suite = Suite(robot, number)\n measure = {}\n actuate = {}\n import PiBot\n if type(robot) == PiBot.PiBot:\n # Wrapped\n measure['FLL'] = [[robot.get_front_left_laser]]\n measure['FML'] = [[robot.get_front_middle_laser]]\n measure['FRL'] = [[robot.get_front_right_laser]]\n measure['RLS'] = [[robot.get_rear_left_side_ir]]\n measure['RLD'] = [[robot.get_rear_left_diagonal_ir]]\n measure['RLF'] = [[robot.get_rear_left_straight_ir]]\n measure['RRS'] = [[robot.get_rear_right_side_ir]]\n measure['RRD'] = [[robot.get_rear_right_diagonal_ir]]\n measure['RRF'] = [[robot.get_rear_right_straight_ir]]\n measure['LSL'] = [[robot.get_leftmost_line_sensor]]\n measure['LSSL'] = [[robot.get_second_line_sensor_from_left]]\n measure['LSTL'] = [[robot.get_third_line_sensor_from_left]]\n measure['LSR'] = [[robot.get_rightmost_line_sensor]]\n measure['LSSR'] = [[robot.get_second_line_sensor_from_right]]\n measure['LSTR'] = [[robot.get_third_line_sensor_from_right]]\n measure['COMPASS'] = [[robot.get_rotation]]\n measure['LE'] = [[robot.get_left_wheel_encoder]]\n measure['RE'] = [[robot.get_right_wheel_encoder]]\n actuate['LEFT'] = robot.set_left_wheel_speed\n actuate['RIGHT'] = robot.set_right_wheel_speed\n else:\n # Raw\n measure['FLL'] = [[robot._tof_read], [robot.tof_values, 0]]\n measure['FML'] = [[robot._tof_read], [robot.tof_values, 1]]\n measure['FRL'] = [[robot._tof_read], [robot.tof_values, 2]]\n measure['RLS'] = [[robot._adc_read], [robot.sensor, 0]]\n measure['RLD'] = [[robot._adc_read], [robot.sensor, 1]]\n measure['RLF'] = [[robot._adc_read], [robot.sensor, 2]]\n measure['RRS'] = [[robot._adc_read], [robot.sensor, 5]]\n measure['RRD'] = [[robot._adc_read], [robot.sensor, 4]]\n measure['RRF'] = [[robot._adc_read], [robot.sensor, 3]]\n measure['LSL'] = [[robot._adc_read], [robot.sensor, 13]]\n measure['LSSL'] = [[robot._adc_read], [robot.sensor, 12]]\n measure['LSTL'] = [[robot._adc_read], [robot.sensor, 11]]\n measure['LSR'] = [[robot._adc_read], [robot.sensor, 8]]\n measure['LSSR'] = [[robot._adc_read], [robot.sensor, 9]]\n measure['LSTR'] = [[robot._adc_read], [robot.sensor, 10]]\n measure['COMPASS'] = [[lambda: robot._rotation_z]]\n measure['LE'] = [[robot._encoders_get], [robot.encoder, 1]]\n measure['RE'] = [[robot._encoders_get], [robot.encoder, 0]]\n actuate['LEFT'] = robot._motorL_set\n actuate['RIGHT'] = robot._motorR_set\n\n for distance in [10, 20, 30, 40, 50, 60]:\n suite.add(\"Place robot with ToF left laser {} cm from wall\"\n .format(distance),\n \"FLL@{}\".format(distance),\n None,\n [], [], measure['FLL'], [], 5)\n suite.add(\"Place robot with ToF middle laser {} cm from wall\"\n .format(distance),\n \"FML@{}\".format(distance),\n None,\n [], [], measure['FML'], [], 5)\n suite.add(\"Place robot with ToF right laser {} cm from wall\"\n .format(distance),\n \"FRL@{}\".format(distance),\n None,\n [], [], measure['FRL'], [], 5)\n for distance in [4, 8, 12]:\n suite.add(\"Place robot with IR left side {} cm from wall\"\n .format(distance),\n \"RLS@{}\".format(distance),\n None,\n [], [], measure['RLS'], [], 5)\n suite.add(\"Place robot with IR left diagonal {} cm from wall\"\n .format(distance),\n \"RLD@{}\".format(distance),\n None,\n [], [], measure['RLD'], [], 5)\n suite.add(\"Place robot with IR left straight {} cm from wall\"\n .format(distance),\n \"RLF@{}\".format(distance),\n None,\n [], [], measure['RLF'], [], 5)\n suite.add(\"Place robot with IR right side {} cm from wall\"\n .format(distance),\n \"RRS@{}\".format(distance),\n None,\n [], [], measure['RRS'], [], 5)\n suite.add(\"Place robot with IR right diagonal {} cm from wall\"\n .format(distance),\n \"RRD@{}\".format(distance),\n None,\n [], [], measure['RRD'], [], 5)\n suite.add(\"Place robot with IR right straight {} cm from wall\"\n .format(distance),\n \"RRF@{}\".format(distance),\n None,\n [], [], measure['RRF'], [], 5)\n for color in [\"white\", \"black\"]:\n suite.add(\"Place robot with leftmost line sensor on {}\"\n .format(color),\n \"LS leftmost@{}\".format(color),\n None,\n [], [], measure['LSL'], [], 5)\n suite.add(\"Place robot with second line sensor from left on {}\"\n .format(color),\n \"LS second from left@{}\".format(color),\n None,\n [], [], measure['LSSL'], [], 5)\n suite.add(\"Place robot with third line sensor from left on {}\"\n .format(color),\n \"LS third from left@{}\".format(color),\n None,\n [], [], measure['LSTL'], [], 5)\n suite.add(\"Place robot with rightmost line sensor on {}\"\n .format(color),\n \"LS rightmost@{}\".format(color),\n None,\n [], [], measure['LSR'], [], 5)\n suite.add(\"Place robot with second line sensor from right on {}\"\n .format(color),\n \"LS second from right@{}\".format(color),\n None,\n [], [], measure['LSSR'], [], 5)\n suite.add(\"Place robot with third line sensor from right on {}\"\n .format(color),\n \"LS third from right@{}\".format(color),\n None,\n [], [], measure['LSTR'], [], 5)\n # Compass test\n suite.add(\"Clear rotation space for compass test\",\n \"compass\",\n actuate['LEFT'],\n [20, 0], [5, 0], measure['COMPASS'], measure['COMPASS'], 3)\n # Motor tests\n speed_list = [8, 10, 12, 15, 18, 20, 24]\n speed_list = speed_list + list(map(lambda x: -x, speed_list))\n for speed in speed_list:\n suite.add(\"Clear space for LEFT motor test at speed {}\".format(speed),\n \"left motor@{}\".format(speed),\n actuate['LEFT'],\n [speed, 0], [4, 0], measure['LE'], measure['LE'], 1)\n for speed in speed_list:\n suite.add(\"Clear space for RIGHT motor test\",\n \"right motor@{}\".format(speed),\n actuate['RIGHT'],\n [speed, 0], [4, 0], measure['RE'], measure['RE'], 1)\n\n return suite", "def __init__(self):\n self.sensor_value = dict()", "def measurements(self):\n return self._measurements", "def _data():\n data = {s: {} for s in systems}\n\n # PbPb2760 and PbPb5020 dNch/deta\n for system, args, name in [\n ('PbPb2760', (880049, 1), 'D(N)/DETARAP'),\n ('PbPb5020', (1410589, 2),\n r'$\\mathrm{d}N_\\mathrm{ch}/\\mathrm{d}\\eta$'),\n ]:\n data[system]['dNch_deta'] = {None: HEPData(*args).dataset(name)}\n\n # PbPb2760 transverse energy\n # ignore bin 0-5 since it's redundant with 0-2.5 and 2.5-5\n dset = HEPData(1427723, 1).dataset('$E_{T}$', ignore_bins=[(0, 5)])\n dset['yerr']['sys'] = dset['yerr'].pop('sys,total')\n data['PbPb2760']['dET_deta'] = {None: dset}\n\n # PbPb2760 identified dN/dy and mean pT\n system = 'PbPb2760'\n\n for obs, table, combine_func in [\n ('dN_dy', 31, np.sum),\n ('mean_pT', 32, np.mean),\n ]:\n data[system][obs] = {}\n d = HEPData(1222333, table)\n for key, re_products in [\n ('pion', ['PI+', 'PI-']),\n ('kaon', ['K+', 'K-']),\n ('proton', ['P', 'PBAR']),\n ]:\n dsets = [\n d.dataset(RE='PB PB --> {} X'.format(i))\n for i in re_products\n ]\n\n data[system][obs][key] = dict(\n dsets[0],\n y=combine_func([d['y'] for d in dsets], axis=0),\n yerr={\n e: combine_func([d['yerr'][e] for d in dsets], axis=0)\n for e in dsets[0]['yerr']\n }\n )\n\n # PbPb2760 strange baryon yields\n data['PbPb2760']['dN_dy']['Lambda'] = HEPData(1243863, 23).dataset(\n RE='PB PB --> LAMBDA X'\n )\n\n d = HEPData(1243865, 11)\n for s in ['Xi', 'Omega']:\n data[system]['dN_dy'][s] = d.dataset(\n RE='PB PB --> ({0}- + {0}BAR+) X'.format(s.upper())\n )\n\n # PbPb2760 mean pT fluctuations\n d = HEPData(1307102, 6, reverse=True)\n name = r'$\\sqrt{C_m}/M(p_{\\rm T})_m$'\n # the table only has Npart, but they are actually 5% centrality bins\n width = 5.\n d.cent = [(n*width, (n+1)*width) for n, _ in enumerate(d.y(name))]\n data['PbPb2760']['pT_fluct'] = {None: d.dataset(name, maxcent=60)}\n\n # PbPb2760 and PbPb5020 flows\n for system, tables_nk in [\n ('PbPb5020', [\n (1, [(2, 2), (2, 4)]),\n (2, [(3, 2), (4, 2)]),\n ]),\n ('PbPb2760', [\n (3, [(2, 2), (2, 4)]),\n (4, [(3, 2), (4, 2)]),\n ]),\n ]:\n data[system]['vnk'] = {}\n\n for table, nk in tables_nk:\n d = HEPData(1419244, table)\n for n, k in nk:\n data[system]['vnk'][n, k] = d.dataset(\n 'V{}{{{}{}}}'.format(\n n, k, ', |DELTAETA|>1' if k == 2 else ''\n ),\n maxcent=(70 if n == 2 else 50)\n )\n\n # PbPb2760 central flows vn{2}\n system, obs = 'PbPb2760', 'vnk_central'\n data[system][obs] = {}\n\n for n, table, sys_err_frac in [(2, 11, .025), (3, 12, .040)]:\n dset = HEPData(900651, table).dataset()\n # the (unlabeled) errors in the dataset are actually stat\n dset['yerr']['stat'] = dset['yerr'].pop('sum')\n # sys error is not provided -- use estimated fractions\n dset['yerr']['sys'] = sys_err_frac * dset['y']\n data[system][obs][n, 2] = dset\n\n # PbPb2760 flow correlations\n for obs, table in [\n ('sc', 1),\n ('sc_normed', 2),\n ('sc_central', 3),\n ('sc_normed_central', 4)\n ]:\n d = HEPData(1452590, table)\n data['PbPb2760'][obs] = {\n mn: d.dataset('SC({},{})'.format(*mn))\n for mn in [(3, 2), (4, 2)]\n }\n\n return data", "def _build_parsed_values(self):\n\n SERIAL_NUMBER = \"SerialNumber\"\n DATE_TIME = \"DateTime\"\n LOGGING_STATE = \"LoggingState\"\n EVENT_SUMMARY = \"EventSummary\"\n NUMBER_OF_EVENTS = \"numEvents\"\n POWER = \"Power\"\n MEMORY_SUMMARY = \"MemorySummary\"\n\n # check to make sure there is a correct match before continuing\n match = SBE19StatusParticle.regex_compiled().match(self.raw_data)\n if not match:\n raise SampleException(\"No regex match of parsed status data: [%s]\" %\n self.raw_data)\n\n dom = parseString(self.raw_data)\n root = dom.documentElement\n log.debug(\"root.tagName = %s\", root.tagName)\n serial_number = root.getAttribute(SERIAL_NUMBER)\n date_time = self._extract_xml_element_value(root, DATE_TIME)\n logging_status = self._extract_xml_element_value(root, LOGGING_STATE)\n event_summary = self._extract_xml_elements(root, EVENT_SUMMARY)[0]\n number_of_events = int(event_summary.getAttribute(NUMBER_OF_EVENTS))\n result = [{DataParticleKey.VALUE_ID: SBE19StatusParticleKey.SERIAL_NUMBER,\n DataParticleKey.VALUE: serial_number},\n {DataParticleKey.VALUE_ID: SBE19StatusParticleKey.DATE_TIME,\n DataParticleKey.VALUE: date_time},\n {DataParticleKey.VALUE_ID: SBE19StatusParticleKey.LOGGING_STATE,\n DataParticleKey.VALUE: logging_status},\n {DataParticleKey.VALUE_ID: SBE19StatusParticleKey.NUMBER_OF_EVENTS,\n DataParticleKey.VALUE: number_of_events}]\n\n element = self._extract_xml_elements(root, POWER)[0]\n result.append(self._get_xml_parameter(element, SBE19StatusParticleKey.BATTERY_VOLTAGE_MAIN))\n result.append(self._get_xml_parameter(element, SBE19StatusParticleKey.BATTERY_VOLTAGE_LITHIUM))\n result.append(self._get_xml_parameter(element, SBE19StatusParticleKey.OPERATIONAL_CURRENT))\n result.append(self._get_xml_parameter(element, SBE19StatusParticleKey.PUMP_CURRENT))\n result.append(self._get_xml_parameter(element, SBE19StatusParticleKey.EXT_V01_CURRENT))\n result.append(self._get_xml_parameter(element, SBE19StatusParticleKey.SERIAL_CURRENT))\n\n element = self._extract_xml_elements(root, MEMORY_SUMMARY)[0]\n result.append(self._get_xml_parameter(element, SBE19StatusParticleKey.MEMORY_FREE, int))\n result.append(self._get_xml_parameter(element, SBE19StatusParticleKey.NUMBER_OF_SAMPLES, int))\n result.append(self._get_xml_parameter(element, SBE19StatusParticleKey.SAMPLES_FREE, int))\n result.append(self._get_xml_parameter(element, SBE19StatusParticleKey.SAMPLE_LENGTH, int))\n result.append(self._get_xml_parameter(element, SBE19StatusParticleKey.PROFILES, int))\n\n return result", "def __init__(self, fluorescenceSeries, conditionName, conditionSalt, conditionPh, conditiondpHdT, conditionIsControl):\n #name, temperatures, and curve from data\n self.name = fluorescenceSeries.name\n self.temperatures = fluorescenceSeries.index\n self.fluorescence = [x for x in fluorescenceSeries]\n\n stepSize = self.temperatures[1]-self.temperatures[0]\n \n #from the non normalised curve we get the max for each individual curve\n #the overall max on the plate will decide what the monotenicity threshold for the experiment will be\n self.maxNonNormalised = 0\n for x in self.fluorescence:\n if x > self.maxNonNormalised:\n self.maxNonNormalised = x\n \n #================= normalisation happens here ================#\n #the curve is then normalised to have an area below the curve of 1\n count = 0\n for height in self.fluorescence:\n count += height*stepSize\n self.fluorescence = [x / count for x in self.fluorescence]\n #used to calculate the monotenicity threshold\n self.normalisationFactor = count\n \n #from the now normalised curve we get the max and min for each individual curve\n #this is used in complex detection and plotting\n self.maxNormalised = self.maxNonNormalised / count\n self.minNormalised = 1\n for x in self.fluorescence:\n if x < self.minNormalised:\n self.minNormalised = x\n \n #other attributes of the curve are set to false/none until later analysis of the curve\n self.complex = False\n self.mono = False\n #tm and tm error are calulated upon calling the computeTm() method\n self.Tm = None \n self.TmError = None\n \n #the contents of the well is contained in an object of Contents inside well\n self.contents = Contents(conditionName, conditionSalt, conditionPh, conditiondpHdT, conditionIsControl)\n return", "def get_param_scenario3():\n nb_carre_x = 42\n nb_carre_y = 45\n largeur_x = 0.825\n largeur_y = 0.645\n z1 = np.array([[11], [32], [17.26]])\n z2 = np.array([[12], [32], [18.43]])\n z3 = np.array([[13], [32], [23.2]])\n z4 = np.array([[15], [31], [23.4]])\n z5 = np.array([[16], [30], [23.29]])\n z6 = np.array([[18], [29], [22.28]])\n z7 = np.array([[19], [28], [35.79]])\n z8 = np.array([[20], [27], [36.87]])\n z9 = np.array([[21], [26], [33.92]])\n z10 = np.array([[23], [24], [38.11]])\n z11 = np.array([[24], [23], [37.76]])\n z12 = np.array([[25], [22], [45.6]])\n z13 = np.array([[26], [20], [56.4]])\n z14 = np.array([[27], [18], [55.2]])\n z15 = np.array([[28], [16], [57.53]])\n z16 = np.array([[29], [14], [58.64]])\n z17 = np.array([[30], [13], [66.04]])\n z18 = np.array([[30], [11], [70.02]])\n z19 = np.array([[31], [9], [69.64]])\n z20 = np.array([[31], [6], [68.82]])\n\n meas = [z1, z2, z3, z4, z5, z6, z7, z8, z9, z10, z11, z12, z13, z14, z15, z16, z17, z18, z19, z20]\n\n z = [np.array([[x[0][0] * largeur_x / nb_carre_x], [x[1][0] * largeur_y / nb_carre_y], [-x[2][0] * np.pi / 180]])\n for x in meas]\n\n # We take a measure every second\n thymio_speed_to_mms = 0.4753\n Ts = 1\n\n Thymio_speed_left = [73, 92, 94, 102, 100, 89, 92, 103, 97, 100, 100, 104, 95, 97, 103, 101, 100, 101, 92, 98]\n Thymio_speed_right = [67, 96, 102, 89, 105, -45, 102, 101, 104, 71, 100, 93, 94, 105, 96, -45, 95, 103, 88, 94]\n delta_sr_test = [x * Ts / thymio_speed_to_mms / 1000 for x in Thymio_speed_right]\n delta_sl_test = [x * Ts / thymio_speed_to_mms / 1000 for x in Thymio_speed_left]\n\n return delta_sr_test, delta_sl_test, z", "def _get_measurements_space(self) -> spaces.Dict:\n # Define some proxies for convenience\n sensors_data = self.robot.sensors_data\n command_limit = self.robot.command_limit\n position_space, velocity_space = self._get_agent_state_space(\n use_theoretical_model=False).values()\n assert isinstance(position_space, spaces.Box)\n assert isinstance(velocity_space, spaces.Box)\n\n # Replace inf bounds of the action space\n for motor_name in self.robot.motors_names:\n motor = self.robot.get_motor(motor_name)\n motor_options = motor.get_options()\n if not motor_options[\"enableCommandLimit\"]:\n command_limit[motor.joint_velocity_idx] = MOTOR_EFFORT_MAX\n\n # Initialize the bounds of the sensor space\n sensor_space_lower = OrderedDict(\n (key, np.full(value.shape, -np.inf))\n for key, value in sensors_data.items())\n sensor_space_upper = OrderedDict(\n (key, np.full(value.shape, np.inf))\n for key, value in sensors_data.items())\n\n # Replace inf bounds of the encoder sensor space\n if encoder.type in sensors_data.keys():\n sensor_list = self.robot.sensors_names[encoder.type]\n for sensor_name in sensor_list:\n # Get the position and velocity bounds of the sensor.\n # Note that for rotary unbounded encoders, the sensor bounds\n # cannot be extracted from the configuration vector limits\n # since the representation is different: cos/sin for the\n # configuration, and principal value of the angle for the\n # sensor.\n sensor = self.robot.get_sensor(encoder.type, sensor_name)\n assert isinstance(sensor, encoder)\n sensor_idx = sensor.idx\n joint = self.robot.pinocchio_model.joints[sensor.joint_idx]\n if sensor.joint_type == jiminy.joint_t.ROTARY_UNBOUNDED:\n sensor_position_lower = -np.pi\n sensor_position_upper = np.pi\n else:\n sensor_position_lower = position_space.low[joint.idx_q]\n sensor_position_upper = position_space.high[joint.idx_q]\n sensor_velocity_limit = velocity_space.high[joint.idx_v]\n\n # Update the bounds accordingly\n sensor_space_lower[encoder.type][0, sensor_idx] = \\\n sensor_position_lower\n sensor_space_upper[encoder.type][0, sensor_idx] = \\\n sensor_position_upper\n sensor_space_lower[encoder.type][1, sensor_idx] = \\\n - sensor_velocity_limit\n sensor_space_upper[encoder.type][1, sensor_idx] = \\\n sensor_velocity_limit\n\n # Replace inf bounds of the effort sensor space\n if effort.type in sensors_data.keys():\n sensor_list = self.robot.sensors_names[effort.type]\n for sensor_name in sensor_list:\n sensor = self.robot.get_sensor(effort.type, sensor_name)\n assert isinstance(sensor, effort)\n sensor_idx = sensor.idx\n motor_idx = self.robot.motors_velocity_idx[sensor.motor_idx]\n sensor_space_lower[effort.type][0, sensor_idx] = \\\n -command_limit[motor_idx]\n sensor_space_upper[effort.type][0, sensor_idx] = \\\n +command_limit[motor_idx]\n\n # Replace inf bounds of the imu sensor space\n if self.enforce_bounded_spaces:\n # Replace inf bounds of the contact sensor space\n if contact.type in sensors_data.keys():\n sensor_space_lower[contact.type][:, :] = -SENSOR_FORCE_MAX\n sensor_space_upper[contact.type][:, :] = SENSOR_FORCE_MAX\n\n # Replace inf bounds of the force sensor space\n if force.type in sensors_data.keys():\n sensor_space_lower[force.type][:3, :] = -SENSOR_FORCE_MAX\n sensor_space_upper[force.type][:3, :] = SENSOR_FORCE_MAX\n sensor_space_lower[force.type][3:, :] = -SENSOR_MOMENT_MAX\n sensor_space_upper[force.type][3:, :] = SENSOR_MOMENT_MAX\n\n # Replace inf bounds of the imu sensor space\n if imu.type in sensors_data.keys():\n gyro_imu_idx = [\n field.startswith('Gyro') for field in imu.fieldnames]\n sensor_space_lower[imu.type][gyro_imu_idx, :] = \\\n -SENSOR_GYRO_MAX\n sensor_space_upper[imu.type][gyro_imu_idx, :] = \\\n SENSOR_GYRO_MAX\n\n accel_imu_idx = [\n field.startswith('Accel') for field in imu.fieldnames]\n sensor_space_lower[imu.type][accel_imu_idx, :] = \\\n -SENSOR_ACCEL_MAX\n sensor_space_upper[imu.type][accel_imu_idx, :] = \\\n SENSOR_ACCEL_MAX\n\n return spaces.Dict(OrderedDict(\n (key, spaces.Box(low=min_val, high=max_val, dtype=np.float64))\n for (key, min_val), max_val in zip(\n sensor_space_lower.items(), sensor_space_upper.values())))", "def getMetadata(self):\n\n # keep variables local so they are not stored in memory\n meta, units = self.getDefaultMeta()\n\n # check each available file for header information\n # sequence is important since later calls overwrite earlier ones so if a header is present in \"psd\" and\n # \"data\", the value from \"data\" will be returned\n if self.ts:\n # get header data from file\n metaTmp, unitsTmp = self.ts.getMetadata()\n\n # make sure we don't override important stuff that by accident has the same name\n self.renameKey('nSamples', 'psdNSamples', meta=metaTmp, units=unitsTmp)\n self.renameKey('dt', 'psdDt', meta=metaTmp, units=unitsTmp)\n\n # set time series unit\n unitsTmp['timeseries'] = 'V'\n\n # update the dictionaries with newly found values\n meta.update(metaTmp)\n units.update(unitsTmp)\n\n if self.psd:\n metaTmp, unitsTmp = self.psd.getMetadata()\n\n # make sure we don't override important stuff that by accident has the same name\n # also, 'nSamples' and 'samplingRate' in reality refer to the underlying timeseries data\n self.renameKey('nSamples', 'psdNSamples', meta=metaTmp, units=unitsTmp)\n self.renameKey('dt', 'psdDt', meta=metaTmp, units=unitsTmp)\n\n # set psd unit\n unitsTmp['psd'] = 'V^2 / Hz'\n\n meta.update(metaTmp)\n units.update(unitsTmp)\n\n if self.data:\n metaTmp, unitsTmp = self.data.getMetadata()\n\n # rename variables for the sake of consistency and compatibility with Matlab and because the naming is\n # confusing: samplingRate is actually the acquisition rate since the DAQ card averages the data already\n # the sampling rate should describe the actual time step between data points not something else\n if 'recordingRate' in metaTmp:\n self.renameKey('samplingRate', 'acquisitionRate', meta=metaTmp, units=unitsTmp)\n self.renameKey('recordingRate', 'samplingRate', meta=metaTmp, units=unitsTmp)\n self.renameKey('nSamples', 'nAcquisitionsPerSample', meta=metaTmp)\n\n # add trial number\n metaTmp['trial'] = self.data.getTrialNumber()\n\n # update dictionaries\n meta.update(metaTmp)\n units.update(unitsTmp)\n\n # add title string to metadata, used for plots\n self.setTitle(meta)\n\n # make sure all axes have the beadDiameter\n meta['pmY']['beadDiameter'] = meta['pmX']['beadDiameter']\n units['pmY']['beadDiameter'] = units['pmX']['beadDiameter']\n meta['aodY']['beadDiameter'] = meta['aodX']['beadDiameter']\n units['aodY']['beadDiameter'] = units['aodX']['beadDiameter']\n\n # add trap names\n meta['traps'] = meta.subDictKeys()\n\n return meta, units", "def data(self):\n return {\"estimate\":self.get_estimate(),\n \"samplers\":self.samplers}", "def __init__(self):\n\t\tsuper(PhaseMeter, self).__init__()\n\t\tself._register_accessors(_pm_reg_handlers)\n\t\t\n\t\tself.id = 3\n\t\tself.type = \"phasemeter\"\n\t\tself.logname = \"MokuPhaseMeterData\"\n\n\t\tself.binstr = \"<p32,0xAAAAAAAA:u48:u48:s15:p1,0:s48:s32:s32\"\n\t\tself.procstr = [\"*{:.16e} : *{:.16e} : : *{:.16e} : *C*{:.16e} : *C*{:.16e} \".format(self._intToHertz(1.0), self._intToHertz(1.0), self._intToCycles(1.0), self._intToVolts(1.0,1.0), self._intToVolts(1.0,1.0)),\n\t\t\t\t\t\t\"*{:.16e} : *{:.16e} : : *{:.16e} : *C*{:.16e} : *C*{:.16e} \".format(self._intToHertz(1.0), self._intToHertz(1.0), self._intToCycles(1.0), self._intToVolts(1.0,1.0), self._intToVolts(1.0,1.0))]\n\t\tprint self.procstr", "def GetSamples(self):\n samples = super().GetSamples()\n for container in itertools.chain(*list(self.containers.values())):\n metadata = {'image': container.image.split('/')[-1]}\n if container.resource_ready_time and container.create_start_time:\n samples.append(\n sample.Sample(\n 'Container Deployment Time',\n container.resource_ready_time - container.create_start_time,\n 'seconds', metadata))\n if container.delete_end_time and container.delete_start_time:\n samples.append(\n sample.Sample(\n 'Container Delete Time',\n container.delete_end_time - container.delete_start_time,\n 'seconds', metadata))\n for service in self.services.values():\n metadata = {'image': service.image.split('/')[-1]}\n if service.resource_ready_time and service.create_start_time:\n samples.append(\n sample.Sample(\n 'Service Deployment Time',\n service.resource_ready_time - service.create_start_time,\n 'seconds', metadata))\n if service.delete_end_time and service.delete_start_time:\n samples.append(\n sample.Sample('Service Delete Time',\n service.delete_end_time - service.delete_start_time,\n 'seconds', metadata))\n\n return samples", "def _default_specs(self):\n # Spectrometer specs\n self.model = \"Flame-S\" # Spectrometer model\n self.fov = 1 # Field of view fo spectrometer (radius of FOV)\n self.ILS = None # Number array holding instrument line shape (possibly don't hold this here?)\n self.fiber_diameter = 1e-3 # Diameter of optical fiber\n self.pix_num = 2048 # Number of pixels\n self.bit_depth = 16 # Bit depth of spectrometer detector\n\n # File information\n self.file_ext = '.npy' # Spectra saved as numpy array\n self.file_ss = '{}ss' # Shutter speed format spec\n self.file_ss_loc = 1 # Shutter speed location in filename\n self.file_spec_type = {'meas': 'Plume', 'dark': 'Dark', 'cal': 'ppmm', 'clear': 'Clear'}\n self.file_datestr = \"%Y-%m-%dT%H%M%S\" # Date/time format spec in filename\n self.file_datestr_loc = 0\n self.plume_params_file = 'plume_params.txt'\n self.plume_speed_id = 'plume_speed='\n self.plume_dist_id = 'plume_distance='\n\n # File which flags that a scan is complete. The file will be empty, just its presence is required\n self.scan_complete = 'complete.txt'\n\n # Acquisition settings\n self.start_int_time = 100 # Starting integration time\n self.start_coadd = 1 # Number of spectra to coadd\n self.framerate = 1 # Framerate of acquisitions (Hz)\n self.wavelengths = None # Wavelengths (nm)\n self.spectrum = None # Spectrum\n self.spectrum_filename = None # Filename for spectrum\n\n self.auto_int = True # Bool for requesting automated integration time adjustment\n self.min_saturation = 0.6 # Minimum saturation accepted before adjusting shutter speed (if auto_ss is True)\n self.max_saturation = 0.9 # Maximum saturation accepted before adjusting shutter speed (if auto_ss is True)\n self.saturation_range = [300, 335] # Range of wavelengths used in checking integration time\n self.saturation_pixels = 2 # Number of pixels to check\n\n # Predefined list of integration times for automatic exposure adjustment\n self.int_list = np.concatenate((np.arange(0.1, 0.5, 0.05),\n np.arange(0.5, 1, 0.1),\n np.arange(1, 5, 0.5),\n np.arange(5, 10, 1),\n np.arange(10, 50, 5),\n np.arange(50, 100, 10),\n np.arange(100, 500, 50),\n np.arange(500, 1000, 100),\n np.arange(10 ** 3, 10 ** 4, 500),\n np.array([10 ** 4])))", "def __init__(self) -> None:\n self.metrics = {}\n self.current = None\n self.run = None", "def setup(self):\n\n self.testInst = pysat.Instrument('pysat', 'testing',\n clean_level='clean',\n update_files=True)\n self.testInst.bounds = (dt.datetime(2008, 1, 1),\n dt.datetime(2008, 1, 31))\n self.test_bins = [0, 24, 24]\n self.test_label = 'slt'\n self.test_data = ['dummy1', 'dummy2']\n self.out_keys = ['count', 'avg_abs_dev', 'median', 'bin_x']\n self.out_data = {'dummy1':\n {'count': [111780., 111320., 111780., 111320.,\n 111780., 111320., 111780., 111320.,\n 111780., 111320., 111780., 111320.,\n 111780., 111320., 111918., 111562.,\n 112023., 111562., 112023., 111412.,\n 111780., 111320., 111780., 111320.],\n 'avg_abs_dev': np.zeros(shape=24),\n 'median': np.linspace(0.0, 23.0, 24)},\n 'dummy2':\n {'count': [111780., 111320., 111780., 111320.,\n 111780., 111320., 111780., 111320.,\n 111780., 111320., 111780., 111320.,\n 111780., 111320., 111918., 111562.,\n 112023., 111562., 112023., 111412.,\n 111780., 111320., 111780., 111320.],\n 'avg_abs_dev': np.zeros(shape=24) + 6.0,\n 'median': [11., 12., 11., 11., 12., 11., 12., 11.,\n 12., 12., 11., 12., 11., 12., 11., 11.,\n 12., 11., 12., 11., 11., 11., 11., 12.]}}\n return", "def _setInfo(self):\n\n if len(self.data.shape)==1:\n self.numChannels = 1\n self.totalSamples = len(self.data)\n else:\n self.numChannels = self.data.shape[1]\n self.totalSamples = self.data.shape[0]\n \n self.duration = float(self.totalSamples)/self.rate # [sec]\n self.dataType = str(self.data.dtype)", "def _initialize_data(self):\n self.unique_id = 123\n\n self.gas_valve_open = False\n self.buffer_valve_open = False\n self.pump_valve_open = False\n\n self.operatingmode = 0\n\n self.sample_pressure_high_limit = 100\n self.sample_pressure_low_limit = 10\n self.sample_pressure = 0\n\n self.error = 0\n\n self.buffer_pressure_high = True", "def __init__(self, data):\n # check if dataset contains time information\n # (fetched from bootloader storage)\n if len(data) == 61:\n (_, seconds, minutes, hours, days, months, years) = struct.unpack(\n '<55sBBBBBB', data)\n self.date = datetime(2000 + years, months, days, hours, minutes,\n seconds)\n\n # Only parse preceding data\n data = data[:55]\n power = [0, 0]\n kWh = [0, 0]\n MWh = [0, 0]\n (_, digital, speed, active, power[0], kWh[0], MWh[0], power[1], kWh[1],\n MWh[1]) = struct.unpack('<32sH4sBLHHLHH', data)\n\n analog = struct.unpack(\n '<{}{}'.format('H' * 16, 'x' * (len(data) - 32)), data)\n\n self.analog = {}\n for channel in range(0, 16):\n self.analog[channel + 1] = round(\n self._convert_analog(analog[channel]), 3)\n\n self.digital = {}\n for channel in range(0, 16):\n self.digital[channel + 1] = self._convert_digital(digital, channel)\n\n '''\n self.speed = {}\n for channel in range(0, 4):\n self.speed[channel + 1] = round(\n self._convert_speed(speed[channel]), 3)\n \n\n self.energy = {}\n for channel in range(0, 2):\n self.energy[channel + 1] = round(\n self._convert_energy(MWh[channel], kWh[channel], active,\n channel), 3)\n \n\n self.power = {}\n for channel in range(0, 2):\n self.power[channel + 1] = round(\n self._convert_power(power[channel], active, channel), 3)\n '''", "def test_detector1pipeline1(_bigdata):\n\n step = Detector1Pipeline()\n step.group_scale.skip = True\n step.dq_init.skip = True\n step.saturation.skip = True\n step.ipc.skip = True\n step.superbias.skip = True\n step.refpix.skip = True\n step.rscd.skip = True\n step.firstframe.skip = True\n step.lastframe.skip = True\n step.linearity.skip = True\n step.dark_current.skip = True\n step.persistence.skip = True\n step.jump.skip = True\n step.ramp_fit.skip = False\n\n step.gain_scale.skip = False\n step.gain_scale.save_results = True\n\n expfile = 'jw00001001001_01101_00001_MIRIMAGE'\n step.run(_bigdata+'/miri/test_sloperpipeline/' + expfile + '_uncal.fits')\n\n\n files = glob('*.fits')\n\n output_file = expfile + '_gain_scale.fits'\n assert output_file in files\n files.remove(output_file)\n\n output_file = expfile + '_gain_scaleints.fits'\n assert output_file in files\n files.remove(output_file)\n\n assert not len(files)", "def amet_memoryWise(self):\r\n # set up logging files to monitor the calculation\r\n logging.basicConfig(filename = os.path.join(self.path,'history_amet_python.log'),\r\n filemode = 'w+', level = logging.DEBUG,\r\n format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n # initialize the time span\r\n # define sigma level\r\n A, B = self.defineSigmaLevels()\r\n # use example input file to load the basic dimensions information\r\n datapath_var = os.path.join(self.path, 'MERRA2_400.inst3_3d_asm_Nv.20160101.nc4.nc')\r\n var_key = Dataset(datapath_var)\r\n lat = var_key.variables['lat'][:]\r\n lon = var_key.variables['lon'][:]\r\n # calculate the reference levels based on A & B and standard surface pressure\r\n half_level = A + B * 101325\r\n level = (half_level[1:] + half_level[:-1]) / 2\r\n # create space for the output\r\n # AMET in the entire column\r\n E = np.zeros((len(lat),len(lon)), dtype=float)\r\n cpT = np.zeros((len(lat),len(lon)), dtype=float)\r\n Lvq = np.zeros((len(lat),len(lon)), dtype=float)\r\n gz = np.zeros((len(lat),len(lon)), dtype=float)\r\n uv2 = np.zeros((len(lat),len(lon)), dtype=float)\r\n logging.info(\"Start retrieving variables T,q,u,v,sp\")\r\n # The shape of each variable is (8,72,361,576)\r\n T = var_key.variables['T'][:]\r\n q = var_key.variables['QV'][:]\r\n sp = var_key.variables['PS'][:] #(8,361,576)\r\n u = var_key.variables['U'][:]\r\n v = var_key.variables['V'][:]\r\n logging.info(\"Extracting variables successfully!\") \r\n # compute gz\r\n z_model = self.calc_gz(var_key)\r\n # get the basic shape\r\n tt, hh, yy, xx = q.shape\r\n AMET = amet.met()\r\n E, cpT, Lvq, gz, uv2 = AMET.calc_met(T, q, sp, u, v, z_model, A, B,\r\n tt, hh, len(lat), len(lon), lat, self.lat_unit)\r\n\r\n return np.mean(E)" ]
[ "0.63832515", "0.6135345", "0.6120204", "0.60634154", "0.605351", "0.5972833", "0.5960781", "0.5844192", "0.5828747", "0.5825251", "0.582229", "0.5809576", "0.5800582", "0.5798093", "0.5785926", "0.57514644", "0.5689381", "0.56682414", "0.5606564", "0.5601516", "0.5559532", "0.5557253", "0.5540037", "0.5523215", "0.5517051", "0.54715306", "0.54670507", "0.54653776", "0.54653776", "0.5463243", "0.5461262", "0.5456288", "0.5441408", "0.54336137", "0.5433099", "0.5421969", "0.5421066", "0.54178935", "0.5415365", "0.54141676", "0.54132193", "0.5412624", "0.54105437", "0.54034877", "0.5400044", "0.53954387", "0.5395437", "0.53886724", "0.53814167", "0.5380188", "0.5375139", "0.5366837", "0.536601", "0.5335258", "0.5330095", "0.53246766", "0.53234327", "0.5321535", "0.53207767", "0.5318668", "0.530977", "0.53014654", "0.52833563", "0.5275863", "0.52631044", "0.5259209", "0.5257103", "0.5256854", "0.5256624", "0.52564144", "0.5254059", "0.5253716", "0.52495086", "0.5248902", "0.52449423", "0.52434397", "0.52372265", "0.52369106", "0.5236074", "0.5233763", "0.52312267", "0.5229425", "0.5228933", "0.52272725", "0.52233297", "0.5222398", "0.5221875", "0.5217671", "0.52136594", "0.52100664", "0.520894", "0.5207612", "0.5205241", "0.5203767", "0.52009344", "0.5197934", "0.51956755", "0.51955646", "0.51946175", "0.5190508", "0.5189802" ]
0.0
-1
Returns the X window id of the window whose title matches regex `title_regex`
def get_window_id(title_regex): cmd = "wmctrl -l" logit(cmd) output = subprocess.check_output(cmd.split()).decode("utf-8").splitlines() logit(output) for line in output: w_id = line.split()[0] title = line.split(" ", 3)[3] if re.match(title_regex, title): return w_id raise Exception(f"Could not find window with title matching regex: {title_regex}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetProcessIdByWindowTitle(window_title: str) -> int:\n result = ctypes.c_uint32(0)\n\n string_buffer_size = len(window_title) + 2 # (+2) for the next possible character of a title and the NULL char.\n string_buffer = ctypes.create_unicode_buffer(string_buffer_size)\n\n def callback(hwnd, size):\n \"\"\"\n This callback is used to get a window handle and compare\n its title with the target window title.\n\n To continue enumeration, the callback function must return TRUE;\n to stop enumeration, it must return FALSE.\n \"\"\"\n nonlocal result, string_buffer\n\n user32.GetWindowTextW(hwnd, string_buffer, size)\n\n # Compare the window titles and get the process ID.\n if window_title == string_buffer.value:\n user32.GetWindowThreadProcessId(hwnd, ctypes.byref(result))\n return False\n\n # Indicate it must continue enumeration.\n return True\n\n # Enumerates all top-level windows on the screen by passing the handle to each window,\n # in turn, to an application-defined callback function.\n user32.EnumWindows(WNDENUMPROC(callback), string_buffer_size)\n\n return result.value", "def getCurrentWindowId(*args):", "def get_window_title(self): # real signature unknown; restored from __doc__\n return \"\"", "def _get_title_id(cursor, title):\n # run query to find title id for given title\n title_id_query = cursor.execute(dbq.SELECT_TITLE_ID, [title])\n\n if title_id_query:\n return _fetch_value(cursor)\n else:\n return None", "def get_window_title(self):\n\n return self.window_title", "def getCurrentWindowDialogId(*args):", "def getApplicationwindowId(ReferenceID):\n try:\n ldtp.wait(5)\n window = ReferenceID.windows()[0]\n logging.info(\"Application id of the window : %s\" % window)\n except Exception as er:\n logging.info('Not able to get window name of Application')\n return False\n return window", "def id_by_title(self, title):\n logging.debug('id_by_title(%s)', title)\n if not self.list_loaded_:\n self.load_shows()\n\n for show_id in self.shows_data:\n next_show = self.shows_data[show_id]\n logging.debug('id_by_title(%s) = %s', next_show['title'], show_id)\n if next_show['title'] == title:\n logging.debug('Found id_by_title(%s) = %s', title, show_id)\n return show_id\n\n print('Unknown title - {0}'.format(title))\n sys.exit(1)", "def get_title_id(title=_(\"Name the element\"),\n text=_(\"Choose a name for the element\"),\n element_title=None,\n element_id=None):\n d = title_id_dialog(title=title,\n element_title=element_title,\n element_id=element_id,\n text=text)\n d.show_all()\n center_on_mouse(d)\n\n res=d.run()\n if res == gtk.RESPONSE_OK:\n try:\n t=unicode(d.title_entry.get_text())\n i=unicode(d.id_entry.get_text())\n except ValueError:\n t=None\n i=None\n else:\n t=None\n i=None\n\n d.destroy()\n\n return t, i", "def extract_channel_number(title):\n # Generate re\n p = _re.compile(\"(lower)|(upper)\")\n result = _re.search(p, title)\n idx = result.lastindex\n return idx", "def title(self):\n return win32gui.GetWindowText(self.hwnd)", "def draw_title_window(self, screen: curses.window, height: int, width: int, y: int, x: int) -> None:\n title_win = screen.subwin(height, width, y, x)\n title_win.border()\n\n title = \"XKCD Extractor\"\n centered_x = width // 2 - len(title) // 2\n title_win.addstr(1, centered_x, title)", "def winTitle(self, title):\n winTitle = title\n window = self.window\n window.setWindowTitle(winTitle)", "def getTitleAndPos(self, pos, windowname = \"\"):\n \n wnd = WindowFromPoint(pos)\n while True:\n if not GetParent(wnd): break\n if windowname:\n if windowname in GetWindowText(wnd):\n break\n wnd = GetParent(wnd)\n\n # if the user-specified window is a valid top-level window, use it\n # except that the click took place on the genius application window or\n # on a child window that has the user-specified name\n if GetWindowText(wnd) != \"Operation Genius\" and windowname:\n try:\n w = winutil.getWindowHandle(windowname)\n except WindowNotFound:\n pass\n else:\n if windowname not in GetWindowText(wnd):\n wnd = w\n \n title = GetWindowText(wnd)\n wPos = winutil.ScreenToWindow(wnd, pos)\n return (title, wPos)", "def callback(hwnd, size):\n nonlocal result, string_buffer\n\n user32.GetWindowTextW(hwnd, string_buffer, size)\n\n # Compare the window titles and get the process ID.\n if window_title == string_buffer.value:\n user32.GetWindowThreadProcessId(hwnd, ctypes.byref(result))\n return False\n\n # Indicate it must continue enumeration.\n return True", "def find_window(**kwargs):\r\n try:\r\n kwargs['backend'] = 'win32'\r\n element = find_element(**kwargs)\r\n return element.handle\r\n except ElementNotFoundError:\r\n raise WindowNotFoundError\r\n except ElementAmbiguousError:\r\n raise WindowAmbiguousError", "def getActiveWindowName(display):\n\n # we can't get it via powershell as a system process, so we need to get it from a file \n # that gets written to through a scheduled task (hopefully)\n\n # get user's TEMP path\n tempPath = getCurrentUserTempPath()\n\n if tempPath:\n windowOutput = ''\n try:\n with open(tempPath+\"mqttNanny-activeWindow.txt\", encoding=\"utf-16\") as file:\n windowOutput = file.read()\n except IOError as e:\n logger.error(\"Error while reading active window name: {}\".format(str(e)))\n return ''\n\n # File contents looks like this:\n #\n #ProcessName AppTitle \n #----------- -------- \n #WindowsTerminal Windows PowerShell \n \n processNameLength = 0\n dashesMatched = False\n activeWindows = []\n\n for line in iter(windowOutput.splitlines()):\n #ignore blank lines\n if re.match('^\\s*$', line):\n continue\n logger.debug(line)\n # look for ----------- --------\n matchDashes = re.match(r'^([-]+\\s+)([-]+\\s*)', line, re.UNICODE)\n if matchDashes:\n # we need to count the length of the columns so that we can more easily parse it\n processNameLength = len(matchDashes.group(1))\n logger.debug(\"processNameLength = {}\".format(processNameLength))\n dashesMatched = True\n continue\n \n if dashesMatched:\n # we'll split the line based on length\n # some lines may not have all the data, skip them\n if len(line) >= processNameLength:\n processName = line[0:processNameLength].rstrip(\" \")\n title = line[processNameLength:].rstrip(\" \")\n \n activeWindows.append(processName + \": \" + title)\n \n if len(activeWindows) == 1:\n #this is normal, one active window\n return activeWindows[0]\n elif len(activeWindows) == 0:\n return \"No window\"\n else:\n # more than one active window is a problem - couldn't get active windows...\n logger.warning(\"Found \"+str(len(activeWindows))+\" active windows. This is not ok.\")\n return \"Error - couldn't get active window\"", "def getTitle(self):\n cmdId = self.executeCommand(Command.GET_TITLE)\n return cmdId", "def strip_winids(string):\n return re.sub(r'0x([0-9a-f]+)', '<windowid>', string)", "def title_contains(title_substring):\n title_substring = title_substring.encode('ascii')\n def f(win):\n t = conv(win.title)\n return title_substring in t\n return f", "def get_window_x_y(windowid):\n return commands.getoutput(\"xwininfo -id \"+windowid+\" | grep 'Corners' | cut -d' ' -f5 | cut -d'+' -f2,3\").split(\"+\")", "def the_tvdb_dot_com_id(title):\n pass", "def doGetPageTitle(self, timeout=10.0):\n TestAdapterLib.check_timeout(caller=TestAdapterLib.caller(), timeout=timeout)\n \n ret = True\n cmdId = self.getTitle()\n rsp = self.hasWindowTitle(timeout=timeout, commandId=cmdId)\n if rsp is None:\n ret = False\n else:\n elementVall = rsp.get('GUI', 'value')\n ret = elementVall.get('value') # title of the window\n return ret", "def title(self):\n with switch_window(self._browser, self.name):\n return self._browser.title", "def get_window_id_by_pid(pid):\n from subprocess import check_output\n # Looks like:\n # 0x03c00041 0 3498 skipper Mozilla Firefox\n # WindowID ? PID USER Window Name\n # Needs sudo apt-get install wmctrl -lp\n\n output = check_output('wmctrl -lp', shell=True)\n # Find the line with the PID we are looking for\n for line in output.splitlines():\n fields = line.split()\n if len(fields) >= 3:\n this_pid = int(fields[2])\n if this_pid == pid:\n return int(fields[0], 16)\n return None", "def set_title( self , winTitle ):\r\n self.rootWin.wm_title( str( winTitle ) )", "def get_valid_title(title):\n if len(title) >= 254:\n title = title[:254]\n return title", "def tv_tropes_id(title):\n pass", "def get_title(self):\n return self.run_command('get_title')[0]", "def _set_window(video_path, window_name, title):\n\n assert os.path.isfile(video_path), \"Path error\"\n vc = cv2.VideoCapture()\n vc.open(video_path)\n im_width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))\n im_height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n return (im_width, im_height)", "def find_window_wildcard(self, wildcard):\n self._handle = None\n win32gui.EnumWindows(self._window_enum_callback, wildcard)", "def find_window_wildcard(self, wildcard):\n self._handle = None\n win32gui.EnumWindows(self._window_enum_callback, wildcard)", "def title(self):\n return self.run_command('title')[0]", "def GetIdxFromWindow(self, wnd):\r\n \r\n for indx, page in enumerate(self._pages):\r\n if page.window == wnd:\r\n return indx\r\n\r\n return wx.NOT_FOUND", "def board_game_geek_id(title):\n pass", "def get_title(reg_doc):\n parent = reg_doc.xpath('//PART/HD')[0]\n title = parent.text\n return title", "def _insert_title(cursor, title):\n cursor.execute(dbq.INSERT_TITLE, [title])\n title_id = cursor.lastrowid\n logger.debug(\"Title '{}' inserted at title_id '{}'\".format(title, title_id))\n\n return title_id", "def get_title(self):\n return self._select_interface(self._rc_get_title, self._http_get_title)", "def get_current_window_name():\n\n hwnd = get_current_window_hwnd()\n length = GetWindowTextLength(hwnd)\n buff = ctypes.create_unicode_buffer(length + 1)\n GetWindowText(hwnd, buff, length + 1)\n\n return buff.value", "def imdb_id(title):\n pass", "def GetItemWindow(self, item):\r\n\r\n return item.GetWindow()", "def get_title():", "def get_window_by_name(self, name):\n for window in self.windows:\n if window.name == name:\n return window\n else:\n raise ValueError(\"No source with that name.\")", "def get_title(mods):\n title = mods.find(\"{{{0}}}titleInfo/{{{0}}}title\".format(common.MODS_NS))\n return title.text", "def getkeyname(cls, title):\n return re.sub(r'[^a-zA-Z0-9-]', '_', title.strip().lower())", "def current_app(x):\n try:\n display = Xlib.display.Display()\n window = display.get_input_focus().focus\n wmclass = window.get_wm_class()\n\n if wmclass is None:\n window = window.query_tree().parent\n wmclass = window.get_wm_class()\n\n display.close()\n del display\n\n if wmclass:\n return(wmclass[1])\n else:\n return('UNKNOWN')\n except:\n return('ERROR!')", "def _current_window_for_event(event):\n return find_window_for_buffer_name(event.cli, event.cli.current_buffer_name)", "def get_window_name(cls, quad):\t\t\n\t\treturn ast.literal_eval(str(cls.get_address_value(quad.result)))", "def _validate_title(self, attribute: attr.Attribute, value: str):\n\n if not isinstance(value, str) or len(value) <= 0:\n raise ValueError(\n f\"Window title must be a non-empty string, received {value!r}\"\n )", "def change_tmux_window_title(text):\n # The idea here is to show the time through the window title\n # And other messages when needed.\n command = \"tmux rename-window \" + text\n subprocess.call(command.split())", "def GetHeaderWindow(self):\r\n \r\n return self._header_win", "def _parse_title(self, item):\n title_str = \" \".join(item.css(\"td:first-child *::text\").extract()).strip()\n content_match = re.search(r\"(?<=\\().*(?=\\))\", title_str)\n if not content_match:\n return \"Advisory Board\"\n return content_match.group().title()", "def _window_enum_callback(self, hwnd, wildcard):\n if re.match(wildcard, str(win32gui.GetWindowText(hwnd))) is not None:\n self._handle = hwnd", "def _window_enum_callback(self, hwnd, wildcard):\n if re.match(wildcard, str(win32gui.GetWindowText(hwnd))) is not None:\n self._handle = hwnd", "def check_title(title_list):\n for w_index in range(len(title_list)):\n title_list[w_index] = title_list[w_index].replace('_', ' ')\n return [word for word in title_list if word.istitle()]", "def get_title(content):\n content = content[:100000]\n pa = re.compile(\"<title.*?>(.*?)<\\/title>\", re.DOTALL | re.IGNORECASE)\n match = re.search(pa, content)\n title = \"\"\n if match != None:\n title_found = match.group(1)\n title = title_found.replace(\"\\r\", \"\").replace(\"\\n\", \"\").replace(\"\\t\", \" \")\n return title", "def create_popup_window(title, body):\n assert isinstance(title, six.text_type)\n assert isinstance(body, Container)\n\n return HSplit([\n VSplit([\n Window(width=D.exact(1), height=D.exact(1),\n content=FillControl(BORDER.TOP_LEFT, token=Token.Window.Border)),\n TokenListToolbar(\n get_tokens=lambda cli: [(Token.Window.Title, ' %s ' % title)],\n align_center=True,\n default_char=Char(BORDER.HORIZONTAL, Token.Window.Border)),\n Window(width=D.exact(1), height=D.exact(1),\n content=FillControl(BORDER.TOP_RIGHT, token=Token.Window.Border)),\n ]),\n VSplit([\n Window(width=D.exact(1),\n content=FillControl(BORDER.VERTICAL, token=Token.Window.Border)),\n body,\n Window(width=D.exact(1),\n content=FillControl(BORDER.VERTICAL, token=Token.Window.Border)),\n ]),\n VSplit([\n Window(width=D.exact(1), height=D.exact(1),\n content=FillControl(BORDER.BOTTOM_LEFT, token=Token.Window.Border)),\n Window(height=D.exact(1),\n content=FillControl(BORDER.HORIZONTAL, token=Token.Window.Border)),\n Window(width=D.exact(1), height=D.exact(1),\n content=FillControl(BORDER.BOTTOM_RIGHT, token=Token.Window.Border)),\n ]),\n ])", "def eidr_identifier(title):\n pass", "def cli_get_process_title():\n raise NotImplementedError()", "def title_id_widget(element_title=None,\n element_id=None):\n v=gtk.Table(rows=2, columns=2)\n\n l=gtk.Label(_(\"Title\"))\n v.attach(l, 0, 1, 0, 1)\n\n title_entry=gtk.Entry()\n title_entry.show()\n if element_title:\n title_entry.set_text(element_title)\n v.attach(title_entry, 1, 2, 0, 1)\n\n l=gtk.Label(_(\"Id\"))\n v.attach(l, 0, 1, 1, 2)\n\n id_entry=gtk.Entry()\n id_entry.show()\n if element_id:\n id_entry.set_text(element_id)\n v.attach(id_entry, 1, 2, 1, 2)\n\n def update_id(entry):\n id_entry.set_text(helper.title2id(unicode(entry.get_text())))\n return True\n\n title_entry.connect('changed', update_id)\n\n v.id_entry=id_entry\n v.title_entry=title_entry\n return v", "def _get_certificate_error_window():\n all_windows = []\n win32gui.EnumWindows(_enumWindowsCallback, all_windows)\n for win in all_windows:\n class_name = win[1]\n title_bar_text = win[2]\n if class_name == 'IEFrame' and \\\n 'Certificate Error: Navigation Blocked' in title_bar_text:\n return win", "def getWindowName(self):\n return self.__windowName", "def get_title(self):\n title = self.driver.title\n return title", "def getFocusedWindowName(self):\n window = self.getFocusedWindow()\n if window:\n return window.activity\n return None", "def get_current_window():\n\n try:\n return vim.current.window.number - 1\n except AttributeError:\n return int(vim.eval('winnr()')) - 1", "def check_valid_title(title):\n title_issues = TitleIssues(title_contains_nsfw=title_contains_nsfw(title))\n return title_issues", "def title(self):\n return self.browser.get_attribute(\"title\", self)", "def GetXTitle(self):\n return self.GetXaxis().GetTitle()", "def get_title(line):\n title = line.split(' (')[0]\n return title", "def get_title_by_id(id):\n\n # your code", "def getFocusId(*args):", "def getFocusId(*args):", "def getFocusId(*args):", "def getFocusId(*args):", "def title(self):\n return self._frame._title", "def check_title(self):\n currenttitle = self.driver.title\n assert self.TITLE in currenttitle, 'Title not expected. Actual: ' + currenttitle + ', Expected: ' + self.TITLE", "def find_window(self, class_name, window_name=None):\n self._handle = win32gui.FindWindow(class_name, window_name)", "def find_window(self, class_name, window_name=None):\n self._handle = win32gui.FindWindow(class_name, window_name)", "def get_title(doc):\r\n title_nodes = doc.getElementsByTagName('title')\r\n if len(title_nodes) > 0:\r\n return title_nodes[0].firstChild.nodeValue", "def title_id_dialog(title=_(\"Name the element\"),\n element_title=None,\n element_id=None,\n text=_(\"Choose a name for the element\"),\n flags=None):\n if flags is None:\n flags=gtk.DIALOG_DESTROY_WITH_PARENT\n d = gtk.Dialog(title=title,\n parent=None,\n flags=flags,\n buttons=( gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,\n gtk.STOCK_OK, gtk.RESPONSE_OK,\n ))\n if text:\n l=gtk.Label(text)\n l.show()\n d.vbox.add(l)\n\n v=title_id_widget(element_title, element_id)\n d.vbox.pack_start(v, expand=False)\n d.connect('key-press-event', dialog_keypressed_cb)\n d.id_entry=v.id_entry\n d.title_entry=v.title_entry\n return d", "def enumHandler(self, hwnd, lParam):\n if win32gui.IsWindowVisible(hwnd):\n title = win32gui.GetWindowText(hwnd)\n if self._search_str in title:\n self._hwnd = hwnd\n print(self._search_str + ' found in hwnd ' + str(hwnd))\n print(title)", "def title(self):\n for shape in self.__shapes:\n if shape._is_title:\n return shape\n return None", "def configured_title(self):\n return self.get('title', self.DEFAULT_SPACE_TITLE)", "def _get_window_width(self):", "def get_window_pos(self, win_id, dX=0, dY=-17, dw=2, dh=16):\n p = Popen([\"xwininfo\", \"-id\", win_id], stdout=PIPE)\n out = p.communicate()[0]\n if p.returncode != 0:\n raise Exception(\"xwininfo failed\")\n X = int(re.search(\"Absolute upper-left X:.*?(\\d+)\", out).groups()[0])\n Y = int(re.search(\"Absolute upper-left Y:.*?(\\d+)\", out).groups()[0])\n width = int(re.search(\"Width:.*?(\\d+)\", out).groups()[0])\n height = int(re.search(\"Height:.*?(\\d+)\", out).groups()[0])\n X += dX\n Y += dY\n width += dw\n height += dh\n return X, Y, width, height", "def label_rule_for_title(text: str):\n return re.findall(LABEL_SPECIFICATION[\"RE_TITLE\"], text)", "def GetItemWindow(self, item, column=None):\r\n \r\n return item.GetWindow(column)", "def _get_ID(self):\n raw_data = imdb.search_for_title(self.title)\n if len(raw_data) > 1:\n raw_data = raw_data[0] # Pulls the first value of the title (the closest match)\n # if there is more than one\n self.ID = raw_data['imdb_id']", "def index(self):\n return self._browser.driver.window_handles.index(self.name)", "def title_n(self):\n self.run_command('title_n')", "def get_year_from_movielist_title(title):\n match = re.match(r'.*\\s+\\((\\d+)\\)', title)\n year = int(match.groups()[0])\n return year", "def find_tracknumber(title):\n\n tracknumber = ''\n # If we found a digit, it's probably the tracknumber\n j = 0\n # So we save it into tracknumber\n while title[j].isdigit() and j < len(title):\n tracknumber += title[j]\n j += 1\n return tracknumber", "def get_maya_window():\n ptr = apiUI.MQtUtil.mainWindow()\n if ptr is not None:\n return shiboken.wrapInstance(long(ptr), QtGui.QWidget)", "def _is_title(self):\n ph = _child(self.__nvXxPr.nvPr, 'p:ph')\n if ph is None:\n return False\n # idx defaults to 0 when idx attr is absent\n ph_idx = ph.get('idx', '0')\n # title placeholder is identified by idx of 0\n return ph_idx == '0'", "def GetWindow(self, idx):\n assert idx < len(self._windows)\n return self._windows[idx]", "def create_window(session):\n def create_window():\n windows_before = session.handles\n name = session.execute_script(\"window.open()\")\n assert len(session.handles) == len(windows_before) + 1\n new_windows = list(set(session.handles) - set(windows_before))\n return new_windows.pop()\n return create_window", "def get_active_window():\n import sys\n active_window_name = None\n if sys.platform in ['Windows', 'win32', 'cygwin']:\n # http://stackoverflow.com/a/608814/562769\n import win32gui\n window = win32gui.GetForegroundWindow()\n active_window_name = win32gui.GetWindowText(window)\n else:\n print(\"Must be a Windows platform.\"\n .format(platform=sys.platform))\n print(sys.version)\n return active_window_name", "def windowTitle(self):\n return self.__windowTitle", "def get_window(self): # real signature unknown; restored from __doc__\n pass", "def enter_title():\n valid_data = False\n # used to keep track of the values and change them in other scopes\n input_data = {'title': ''}\n\n while not valid_data:\n input_data['title'] = get_input(\"Title of the task: \")\n if re.match('[\\w]+', input_data['title']):\n valid_data = True\n clean_scr()\n\n return input_data['title']" ]
[ "0.67164993", "0.62230086", "0.607506", "0.6065819", "0.60467404", "0.58529276", "0.5789825", "0.57747257", "0.5757205", "0.57141364", "0.5661283", "0.56199354", "0.56173295", "0.5543509", "0.55334383", "0.548825", "0.5437465", "0.5373055", "0.5367433", "0.5316267", "0.53121114", "0.5308796", "0.52915806", "0.5228942", "0.51771426", "0.51694065", "0.51619935", "0.5161497", "0.51402414", "0.5100269", "0.50828993", "0.50828993", "0.5078962", "0.5077186", "0.5074635", "0.50547975", "0.50346535", "0.50184256", "0.49989855", "0.49805897", "0.49717367", "0.49661717", "0.49616176", "0.49344394", "0.49327728", "0.49292138", "0.49203607", "0.49165773", "0.4891058", "0.48808888", "0.48714858", "0.48619357", "0.485864", "0.485864", "0.48564696", "0.48538697", "0.4843513", "0.4842442", "0.484045", "0.48299518", "0.48202866", "0.47971013", "0.47927046", "0.47792393", "0.4773991", "0.47630534", "0.47523534", "0.47472486", "0.4725741", "0.47189543", "0.47089568", "0.47089568", "0.47089568", "0.47089568", "0.47087088", "0.46980372", "0.46919432", "0.46919432", "0.46860173", "0.4685564", "0.46840534", "0.46698985", "0.46650758", "0.4663385", "0.46633178", "0.46602657", "0.46441856", "0.46433887", "0.46399984", "0.46397528", "0.46287578", "0.46085584", "0.46040252", "0.45968682", "0.45956028", "0.45886043", "0.45866323", "0.45862854", "0.45832983", "0.45753342" ]
0.8586612
0
Ensure we can create a new user if we have the permission.
def test_create_new_student_user(self): data = { 'username': 'John', 'email': '[email protected]', 'password': 'test123!', 'phone': '1234567890', 'first_name': 'Chuck', 'last_name': 'Norris', 'university': { 'name': "random_university" }, 'academic_field': {'name': "random_field"}, 'academic_level': {'name': "random_level"}, 'gender': "M", 'birthdate': "1999-11-11", } response = self.client.post( reverse('user-list'), data, format='json', ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(json.loads(response.content)['phone'], '1234567890') user = User.objects.get(email="[email protected]") activation_token = ActionToken.objects.filter( user=user, type='account_activation', ) self.assertEqual(1, len(activation_token))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create(self, request, *args, **kwargs):\n user = request.user\n if user.is_authenticated and not user.has_perm(\"users.add_user\"):\n self.permission_denied(request, message=_(\"You cannot create users.\"))\n return super().create(request, *args, **kwargs)", "def test_if_not_created_authenticated_permissions(self):\r\n payload = {\r\n \"email\": \"[email protected]\",\r\n \"password\": \"password\",\r\n \"name\": \"asdasd\",\r\n \"is_superuser\": False,\r\n }\r\n\r\n res = self.client_authenticated.post(reverse(CREATE_USER_URL),data=payload)\r\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def can_create(cls, user, **data):\n raise Return(True)", "def test_if_created_superusers_permissions(self):\r\n payload = {\r\n \"email\": \"[email protected]\",\r\n \"password\": \"password\",\r\n \"name\": \"asdasd\",\r\n \"is_superuser\": False,\r\n }\r\n\r\n res = self.client_superuser.post(reverse(CREATE_USER_URL),data=payload)\r\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)", "def test_user_creation(self):\n self.assertTrue(User.objects.exists())", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def can_create(self):\n return True", "def create(self):\n if User.load(username) is None:\n \"\"\"This username is not in use\"\"\"\n if self.validateEmail(self.email):\n \"\"\"This email is valid\"\"\"\n if len(self.username) > 2:\n \"\"\"This is long enough\"\"\"\n self.__store()", "def test_createUser_single(self):\n #TODO: this and other tests", "def test_08_create_user_not_exists(self):\n\n _, user = self.get_random_item(models.User)\n utils.create_user(user, session=self.session)\n success, error = utils.create_user(user, session=self.session)\n self.assertFalse(success)\n self.assertTrue(error)", "def testCreateIsAllowed(self):\n self.users.create([(u'user', u'secret', u'User', u'[email protected]')])\n user = getUser(u'user')\n self.assertEqual(u'user', user.username)", "def create_user(self):\n return User.objects.create_user(**self.user_data)", "def new_user():\n pass", "def users_create():", "def create_user(self):\n User.objects.create_user('test', '[email protected]', 'testing')", "def test_dont_create_user(self):\n self.assertFalse(User.objects.exists())", "def create_user():\r\n if not request.is_json or 'name' not in request.get_json() or 'phone_number' not in request.get_json() or 'password' not in request.get_json():\r\n return bad_request('Missing required data.')\r\n try:\r\n return add_user(request)\r\n except:\r\n return bad_request(error_messages['user_exist'])", "def test_create_with_permissions(self):\n permissions = Permission.objects.filter(name__in=('Can add course mode', 'Can change course mode'))\n for permission in permissions:\n self.user.user_permissions.add(permission)\n\n self.assert_can_create_course()", "def has_object_create_permission(self, request):\n user = request.user\n if user.is_superuser:\n return user.is_superuser\n\n return self.user == user", "def post(self):\r\n return create_user(request)", "def can_create_users(self, verify_key: VerifyKey) -> bool:\n try:\n user = self.get_user(verify_key)\n return user.role.get(\"can_create_users\", False)\n except UserNotFoundError:\n return False", "def test_create_user_user_exists(self):\n create_mock_user(**self.mock_user)\n\n res = self.client.post(CREATE_USER_URL, self.mock_user)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_able_to_create_a_user():\n response = api_helper.create_user(pytest.test_user)\n assert response.status_code == 201\n check_user_data_in_response(response.json()[\"data\"])", "def new_user(cls, user):\r\n pass", "def test_staff_permission_required(self):\r\n with self.assertRaises(PermissionDenied):\r\n add_user_with_status_granted(self.user, self.user)\r\n\r\n with self.assertRaises(PermissionDenied):\r\n update_course_creator_group(self.user, self.user, True)", "def test_user_creation(self):\n self.assertEqual(CustomUser.objects.count() - self.user_count, 2)\n self.assertEqual(self.new_student_user.username, 'Student')\n self.assertTrue(self.new_student_user.password, 'student12345')\n self.assertEqual(self.new_instructor_user.username, 'Instructor')\n self.assertTrue(self.new_instructor_user.password, 'instructor12345')", "def test_07_create_user_exists(self):\n\n _, user = self.get_random_item(models.User)\n success, error = utils.create_user(user, session=self.session)\n db_user = db_utils.get_item(\n models.User, filters={\"id\": user[\"id\"]}, session=self.session\n )\n user[\"password\"] = db_user.password\n self.assertTrue(db_user)\n db_user = db_user.as_dict()\n items_equal = utils.is_equal(user, db_user)\n self.assertTrue(items_equal)\n self.assertTrue(success)\n self.assertFalse(error)", "def test_user_existence(self):\n\n credentials = {\n 'email': '[email protected]',\n 'password': 'Testpass12',\n 'name': 'Test Name'\n }\n get_user_model().objects.create_user(**credentials)\n\n # Check that this is a bad request since the user does already exists.\n response = self.client.post(URL_CREATE_USER, credentials)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_user(self):\n self.assertEqual(self.new_user.first_name, 'John')\n self.assertEqual(self.new_user.last_name, 'Doe')\n self.assertEqual(self.new_user.username, 'john_doe')\n self.assertEqual(self.new_user.email, '[email protected]')\n self.assertTrue(self.new_user.check_password('test_password'))\n self.assertFalse(self.new_user.is_staff)\n self.assertFalse(self.new_user.is_superuser)\n self.assertFalse(self.new_user.is_active)\n\n with self.assertRaises(ValueError):\n User.objects.create_user(\n first_name='', last_name='', username='', email='', bio='', password=''\n )", "def test_create_user_exists(self):\n payload = {\n \"user\": {\n \"email\": \"[email protected]\",\n \"password\": \"useruser111\",\n \"is_doctor\": False,\n \"is_hospital_admin\": True\n },\n 'first_name': 'Test',\n 'last_name': 'JustUser'\n }\n sample_user(\n payload['user']['email'], payload['user']['password'],\n payload['user']['is_doctor'], payload['user']['is_hospital_admin']\n ),\n response = self.client.post(HOSPITAL_ADMIN_REGISTER_URL, payload, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_creation(self):\r\n \r\n self.assertIsInstance(self.user, User)", "def test_create_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n '[email protected]',\n 'test123'\n )\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def test_create_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n '[email protected]',\n 'admin123456'\n )\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def new_user(cls, user):\n pass", "def test_create_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n '[email protected]',\n 'test123',\n )\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def test_create_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n '[email protected]',\n 'test123'\n )\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def test_create_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n '[email protected]',\n 'test123'\n )\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def test_creating_a_new_user_without_email(self):\n\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, \"Test1234\")", "def test_create_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n \"[email protected]\",\n \"test123\"\n )\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def test_user_recreate(self):\n self.test_user_creation()\n\n with self.assertRaisesMessage(\n IntegrityError, \"UNIQUE constraint failed: auths_user.username\"\n ):\n UserModel.objects.create_user(\n username=\"saimer\"\n )", "def test_create_new_super_user(self):\n user = get_user_model().objects.create_superuser(\n '[email protected]', 'test123'\n )\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)\n self.assertEqual(user.role, Role.ADMIN)", "def test_create_new_superuser(self):\n\n user = get_user_model().objects.create_superuser(\n '[email protected]',\n 'password123'\n )\n\n # is_superuser is part of PermissionsMixin\n self.assertTrue(user.is_superuser) # checks if user is superuser\n self.assertTrue(user.is_staff) # checks if user is staff", "def test_create_user(self):\r\n self._auto_auth()\r\n self.assertEqual(User.objects.count(), 1)\r\n self.assertTrue(User.objects.all()[0].is_active)", "def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def test_add_permission(self):\r\n self.assertFalse(self.creator_admin.has_add_permission(self.request))", "def test_create_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n '[email protected]',\n 'fghdjdkricc'\n )\n self.assertTrue(user.is_superuser, True)\n self.assertTrue(user.is_staff, True)", "def test_existing_user(self):\n user = User.objects.create(username=self.username)\n actual = get_user_if_exists(None, self.details)\n self.assertDictEqual(actual, {'is_new': False, 'user': user})", "def test_create_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n '[email protected]',\n 'test123'\n )\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def test_creating_a_new_super_user(self):\n\n user = get_user_model().objects.create_superuser(\n email=\"[email protected]\", password=\"Test12345\"\n )\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def test_unauthorized_create_user(self):\n res = self.submit()\n\n assert res.status_code == 401", "def test_create_user_valid(self):\n res = self.client.post(CREATE_USER_URL, self.mock_user)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n user = get_user_model().objects.get(**res.data)\n\n self.assertTrue(user.check_password(self.mock_user['password']))\n self.assertNotIn('password', res.data)", "def test_user_create(self):\n user_count = User.objects.count()\n user = User.objects.create_user(email='[email protected]', password='test')\n self.assertTrue(User.objects.count() == user_count + 1)\n self.assertFalse(user.is_staff)\n self.assertFalse(user.is_superuser)\n with self.assertRaises(ValueError, msg='The email must be provided'):\n User.objects.create_user(email='', password='test')", "def test_create_defined_user(self):\r\n self._auto_auth(\r\n username='robot', password='test',\r\n email='[email protected]', full_name=\"Robot Name\"\r\n )\r\n\r\n # Check that the user has the correct info\r\n user = User.objects.get(username='robot')\r\n self.assertEqual(user.username, 'robot')\r\n self.assertTrue(user.check_password('test'))\r\n self.assertEqual(user.email, '[email protected]')\r\n\r\n # Check that the user has a profile\r\n user_profile = UserProfile.objects.get(user=user)\r\n self.assertEqual(user_profile.name, \"Robot Name\")\r\n\r\n # By default, the user should not be global staff\r\n self.assertFalse(user.is_staff)", "def test_create_super_user(self):\n user = get_user_model().objects.create_superuser(\"[email protected]\", \"test123\")\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def test_create_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n email=self.test_user_email,\n password=self.test_user_pass,\n name=self.test_user_name\n )\n\n self.assertTrue(user.is_superuser)", "def test_create__forbidden(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with test_app.test_request_context(self.request_path):\n with self.assertRaises(werkzeug.exceptions.Forbidden):\n self.handler.do_post(self.appuser_id)\n\n new_appuser = user_models.AppUser.query(\n user_models.AppUser.email == '[email protected]').get()\n self.assertIsNone(new_appuser)", "def test_create_user(self) -> None:\n\n u1 = self.register_user(\"u1\", \"pass\")\n\n u1stats = self._get_current_stats(\"user\", u1)\n\n assert u1stats is not None\n\n # not in any rooms by default\n self.assertEqual(u1stats[\"joined_rooms\"], 0)", "def test_user_creation(self):\n user = UserModel.objects.create_user(\n username=\"saimer\"\n )\n self.assertEqual(user.email, \"\")\n self.assertEqual(user.username, \"saimer\")\n self.assertFalse(user.has_usable_password())", "def handle(self, *args, **options):\r\n username = 'populate_creators_command'\r\n email = '[email protected]'\r\n try:\r\n admin = User.objects.create_user(username, email, 'foo')\r\n admin.is_staff = True\r\n admin.save()\r\n except IntegrityError:\r\n # If the script did not complete the last time it was run,\r\n # the admin user will already exist.\r\n admin = User.objects.get(username=username, email=email)\r\n\r\n for user in get_users_with_role(CourseInstructorRole.ROLE):\r\n add_user_with_status_granted(admin, user)\r\n\r\n # Some users will be both staff and instructors. Those folks have been\r\n # added with status granted above, and add_user_with_status_unrequested\r\n # will not try to add them again if they already exist in the course creator database.\r\n for user in get_users_with_role(CourseStaffRole.ROLE):\r\n add_user_with_status_unrequested(user)\r\n\r\n # There could be users who are not in either staff or instructor (they've\r\n # never actually done anything in Studio). I plan to add those as unrequested\r\n # when they first go to their dashboard.\r\n\r\n admin.delete()", "def create_user(change):\n return change()", "def allowed_user_access_create(user, pos=None, org=None):\n\n if not user.has_perm(\"auth.add_user\"):\n return False\n\n if pos is None and org is not None:\n raise ValueError(\"Either pos and org must both be None, or neither must be None\")\n if pos is not None and org is None:\n raise ValueError(\"Either pos and org must both be None, or neither must be None\")\n\n if pos is None and org is None:\n # We already know we're allowed to create users in principle\n return True\n else:\n\n # Try to get the user's profile - this fails if they're anonymous, so return False\n try:\n up = user.get_profile()\n except AttributeError:\n return False\n\n # If we're trying to create a user in a different organization and we're\n # not allowed to\n if org != up.org:\n if not user.has_perm(\"vnswww.userprofile_create_different_org\"):\n return False\n try:\n return user.has_perm(db.UserProfile.PERMISSIONS[pos])\n except KeyError:\n # If that type of user doesn't exist, they're not allowed to\n # create one\n return False", "def test_create_super_user(self):\n user = get_user_model().objects.create_superuser(\n email='admin@1234',\n password='adminpassword'\n )\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def test_user_is_really_create():\n response = api_helper.get_user(user_name=pytest.test_user.name)\n assert response.status_code == 200\n response_json_data = response.json()[\"data\"]\n assert len(response_json_data) == 1\n check_user_data_in_response(response_json_data[0])\n pytest.test_user.id = response_json_data[0]['id']", "def test_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n '[email protected]',\n 'test123'\n )\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def test_handle_create_not_admin(self):\n test_user = User(\"userid\")\n test_user.github_username = \"githubuser\"\n self.db.retrieve.return_value = test_user\n self.gh.org_create_team.return_value = \"team_id\"\n inputstring = \"team create b-s --name 'B S'\"\n self.assertTupleEqual(self.testcommand.handle(inputstring, user),\n (self.testcommand.permission_error, 200))\n self.db.store.assert_not_called()", "def _add_user(user, state):\r\n if not user.is_staff and CourseCreator.objects.filter(user=user).count() == 0:\r\n entry = CourseCreator(user=user, state=state)\r\n entry.save()\r\n return True\r\n\r\n return False", "def test_username_is_writable_for_user_creation(self):\n request = Mock()\n assert 'username' not in self.admin.get_readonly_fields(request)", "def test_create_new_super_user(self):\n user = get_user_model().objects.create_superuser(\n email=\"[email protected]\",\n password=\"testblabla123\"\n )\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def test_create_successful(self):\n self.webkom.add_user(self.disallowed_user)\n request = self.factory.post(\"/permissiontest/\", self.test_update_object)\n force_authenticate(request, self.disallowed_user)\n view = TestViewSet.as_view({\"post\": \"create\"})\n\n response = view(request)\n created = response.data\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(created[\"name\"], self.test_update_object[\"name\"])", "def create_new_user(self):\n username = 'pseudo'\n email = '[email protected]'\n password = '00000000'\n user_created = self.user.objects.create_user(id=1, username=username,\n email=email, password=password)\n HistoryUser.objects.create(user=user_created)\n StatusUser.objects.create(user=user_created)\n\n return user_created", "def test_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n '[email protected]',\n 'ezea'\n ) \n self.assertTrue(user.is_staff)\n self.assertTrue(user.is_superuser)", "def signup():\n req = request.get_json()\n user = req['user']\n is_created = views.UserManagement().create(user)\n if not is_created:\n return jsonify(msg.ALREADY_USE), 400\n\n return jsonify(msg.SUCCESS), 200", "def test_create_profile_on_access(self):\n user = User.objects.create_user(\n 'auto_tester', '[email protected]', 'auto_tester')\n profile = user.get_profile()\n profile.delete()\n profile = user.get_profile()\n ok_(profile is not None)\n eq_(False, profile.username_changes)", "def user_created(self, user, password):\n\n if not self.check_prereqs():\n return False\n\n if self.has_user(user):\n return False\n\n hash = self.hash_method.generate_hash(user,password)\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n res=self.set_password(user,password,create_user=True)\n self.log.debug(\"sqlflexibleauthstore: user_created: %s, %s\" % (user,res))\n return res", "def createUser(self):\n if self.user:\n return self.user\n from soc.models.user import User\n from soc.modules.seeder.logic.providers.user import CurrentUserProvider\n properties = {'account': CurrentUserProvider(),\n 'status': 'valid', 'is_developer': self.dev_test}\n self.user = seeder_logic.seed(User, properties=properties)\n return self.user", "def test_user_creation(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n self.assertEqual(new_user.get_username(), 'alice')\n self.assertEqual(new_user.email, '[email protected]')\n self.assertTrue(new_user.check_password('swordfish'))\n self.assertFalse(new_user.is_active)\n\n expiration_date = datetime_now() - timedelta(\n settings.ACCOUNT_ACTIVATION_DAYS\n )\n self.assertGreater(new_user.date_joined, expiration_date)", "def test_authenticated_user_create(self):\r\n with self.flask_app.test_request_context('/'):\r\n for token in self.auth_providers:\r\n assert_raises(Forbidden,\r\n getattr(require, 'token').create,\r\n token)", "def test_creating_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n email=\"[email protected]\",\n password=\"Lorem12345\"\n )\n\n self.assertTrue(user.is_staff)\n self.assertTrue(user.is_superuser)", "def test_create_user(self):\n \n new_user = {\"username\": \"beny1976\", \"vocab_count\": 0, \"name\": \"beny rood\", \"sex\": \"male\", \"dob\": \"18/10/1979\"}\n msg = app.create_user(predefined_user=new_user)\n self.assertTrue(msg != \"\")", "def create(self, data):\n # ensure 'create()' calls the specific 'create_user()' method\n # note that the 'data' gets validated\n user = get_user_model().objects.create_user(**data)\n return user", "def test_create_user_invalid_id(self):\r\n print(\"Create user invalid id (already taken)\")\r\n u_id = 100\r\n username = \"newtestuser\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_create_user_exists(self):\n payload = {\n \"email\": \"[email protected]\",\n \"name\": \"Test\",\n 'password': 'test123'\n }\n create_user(**payload)\n\n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_user_invalid_type(self):\r\n print(\"Create user invalid type\")\r\n u_id = 3\r\n username = \"newtestuser\"\r\n password = \"password\"\r\n u_type = 5\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def create_new_user():\n return get_user_model().objects.create_user(\n email='[email protected]',\n password='test@londodnjisdjfois',\n username='tempusername'\n )", "def test_create_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n email='[email protected]',\n password='userpass123'\n )\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def post(self):\n return self.get_request_handler(request.headers).create_new_user(request)", "def create_users(self):\n from django.contrib.auth.models import User\n user = User.objects.create_user('red', '', 'red')\n user = User.objects.create_user('green', '', 'green')\n user = User.objects.create_user('blue', '', 'blue')", "def on_model_change(self, form, model, is_created):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n elif is_created:\n if not user_has_permission(current_user, 'can_create','admins'):\n abort(403)\n else:\n if not user_has_permission(current_user, 'can_edit','admins'):\n abort(403)", "def test_append_existing_user(self):\n print('(' + self.test_append_existing_user.__name__+')',\n self.test_append_existing_user.__doc__)\n self.assertIsNone(self.connection.append_user(\n PATIENT_USERNAME, NEW_PATIENT))", "def test_create_no_username(self):\n\n # If there is no username, email will be used instead\n properties = self.portal.portal_properties.site_properties\n properties.manage_changeProperties(use_email_as_login=True)\n\n user = api.user.create(\n email='[email protected]',\n password='secret'\n )\n\n self.assertEquals(user.getUserName(), '[email protected]')\n\n # But if using emails as a username is disabled, we should get\n # an error\n properties.manage_changeProperties(use_email_as_login=False)\n\n self.assertRaises(\n ValueError,\n api.user.create,\n email='[email protected]', password='secret'\n )", "def new_user(global_config, timestamped_email, id_api):\n yield id_api.create_user_if_not_exists(timestamped_email, global_config.users.default.password)", "def test_resource_user_resource_add_user_post(self):\n pass", "def create_user(self, **kwargs):\n kwargs = self._prepare_create_user_args(**kwargs)\n user = self.user_model(**kwargs)\n # noinspection PyUnresolvedReferences\n return self.save(user)", "def test_new_superuser(self):\n\n user = \\\n get_user_model().objects.create_superuser('[email protected]'\n , 'test123')\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def test_api_can_create_users(self):\n res = self.client().post('/api/v1/user/', data = self.req)\n self.assertEquals(res.status_code, 200)\n self.assertIn('mary', str(res.data))", "def test_create(self):\n urls = [reverse('api:user-list')]\n data = {\n \"username\": \"newuser\",\n \"email\": \"[email protected]\",\n \"password\": \"password\"\n }\n access = {\n \"forbidden\": [self.anonymous_client, self.readonly_client, self.custodian_1_client],\n \"allowed\": [self.admin_client]\n }\n for client in access['forbidden']:\n for url in urls:\n self.assertIn(\n client.post(url, data, format='json').status_code,\n [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]\n )\n\n for client in access['allowed']:\n for url in urls:\n self.assertEqual(\n client.post(url, data, format='json').status_code,\n status.HTTP_201_CREATED\n )", "def allowed_organization_access_create(user):\n return user.has_perm(\"vnswww.add_organization\")", "def test_user_exists(self):\r\n payload = {\r\n 'email': '[email protected]',\r\n 'password': 'testpass',\r\n 'name': 'Maks'\r\n }\r\n create_user(**payload)\r\n\r\n res = self.client.post(CREATE_USER_URL, payload)\r\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_exists(self):\n payload = {'email': '[email protected]', 'password': 'password'}\n create_user(**payload)\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)" ]
[ "0.7776861", "0.72906166", "0.7237606", "0.7017382", "0.69414604", "0.6917588", "0.6917588", "0.6917588", "0.67925555", "0.6755567", "0.6740731", "0.6710452", "0.67034537", "0.6681693", "0.6673598", "0.6666447", "0.6623244", "0.66229874", "0.66059834", "0.65804935", "0.6564211", "0.6555534", "0.65370995", "0.65349036", "0.6529641", "0.65261155", "0.65234804", "0.6516463", "0.65067935", "0.6501405", "0.65009636", "0.6498945", "0.6495174", "0.6483011", "0.64779925", "0.6477804", "0.6468004", "0.6465473", "0.64648265", "0.6463669", "0.64534986", "0.6449846", "0.6446783", "0.6444287", "0.6439542", "0.6429613", "0.6429283", "0.642774", "0.64262277", "0.64248866", "0.64118415", "0.6411472", "0.64081085", "0.6405731", "0.6401217", "0.64010346", "0.6399396", "0.6396729", "0.6394843", "0.63867193", "0.635489", "0.63445485", "0.63410574", "0.631966", "0.63195276", "0.630504", "0.6298882", "0.62960905", "0.62925994", "0.62902427", "0.6286614", "0.6285109", "0.62833893", "0.6282842", "0.6281287", "0.62779593", "0.6277488", "0.62774706", "0.627337", "0.62654316", "0.62632895", "0.62631667", "0.62622267", "0.62528837", "0.6238875", "0.6236931", "0.62296945", "0.6229152", "0.62222165", "0.622089", "0.62114537", "0.6208902", "0.6208235", "0.6206924", "0.620559", "0.62051076", "0.6204693", "0.62039745", "0.62011397", "0.6199499", "0.6199256" ]
0.0
-1
Ensure we can create a new user if we have the permission.
def test_create_new_user(self): data = { 'username': 'John', 'email': '[email protected]', 'password': 'test123!', 'phone': '1234567890', 'first_name': 'Chuck', 'last_name': 'Norris', 'gender': "M", 'birthdate': "1999-11-11", } response = self.client.post( reverse('user-list'), data, format='json', ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(json.loads(response.content)['phone'], '1234567890') user = User.objects.get(email="[email protected]") activation_token = ActionToken.objects.filter( user=user, type='account_activation', ) self.assertEqual(1, len(activation_token))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create(self, request, *args, **kwargs):\n user = request.user\n if user.is_authenticated and not user.has_perm(\"users.add_user\"):\n self.permission_denied(request, message=_(\"You cannot create users.\"))\n return super().create(request, *args, **kwargs)", "def test_if_not_created_authenticated_permissions(self):\r\n payload = {\r\n \"email\": \"[email protected]\",\r\n \"password\": \"password\",\r\n \"name\": \"asdasd\",\r\n \"is_superuser\": False,\r\n }\r\n\r\n res = self.client_authenticated.post(reverse(CREATE_USER_URL),data=payload)\r\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def can_create(cls, user, **data):\n raise Return(True)", "def test_if_created_superusers_permissions(self):\r\n payload = {\r\n \"email\": \"[email protected]\",\r\n \"password\": \"password\",\r\n \"name\": \"asdasd\",\r\n \"is_superuser\": False,\r\n }\r\n\r\n res = self.client_superuser.post(reverse(CREATE_USER_URL),data=payload)\r\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)", "def test_user_creation(self):\n self.assertTrue(User.objects.exists())", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def can_create(self):\n return True", "def create(self):\n if User.load(username) is None:\n \"\"\"This username is not in use\"\"\"\n if self.validateEmail(self.email):\n \"\"\"This email is valid\"\"\"\n if len(self.username) > 2:\n \"\"\"This is long enough\"\"\"\n self.__store()", "def test_createUser_single(self):\n #TODO: this and other tests", "def test_08_create_user_not_exists(self):\n\n _, user = self.get_random_item(models.User)\n utils.create_user(user, session=self.session)\n success, error = utils.create_user(user, session=self.session)\n self.assertFalse(success)\n self.assertTrue(error)", "def testCreateIsAllowed(self):\n self.users.create([(u'user', u'secret', u'User', u'[email protected]')])\n user = getUser(u'user')\n self.assertEqual(u'user', user.username)", "def create_user(self):\n return User.objects.create_user(**self.user_data)", "def new_user():\n pass", "def users_create():", "def create_user(self):\n User.objects.create_user('test', '[email protected]', 'testing')", "def test_dont_create_user(self):\n self.assertFalse(User.objects.exists())", "def create_user():\r\n if not request.is_json or 'name' not in request.get_json() or 'phone_number' not in request.get_json() or 'password' not in request.get_json():\r\n return bad_request('Missing required data.')\r\n try:\r\n return add_user(request)\r\n except:\r\n return bad_request(error_messages['user_exist'])", "def test_create_with_permissions(self):\n permissions = Permission.objects.filter(name__in=('Can add course mode', 'Can change course mode'))\n for permission in permissions:\n self.user.user_permissions.add(permission)\n\n self.assert_can_create_course()", "def has_object_create_permission(self, request):\n user = request.user\n if user.is_superuser:\n return user.is_superuser\n\n return self.user == user", "def post(self):\r\n return create_user(request)", "def can_create_users(self, verify_key: VerifyKey) -> bool:\n try:\n user = self.get_user(verify_key)\n return user.role.get(\"can_create_users\", False)\n except UserNotFoundError:\n return False", "def test_create_user_user_exists(self):\n create_mock_user(**self.mock_user)\n\n res = self.client.post(CREATE_USER_URL, self.mock_user)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_able_to_create_a_user():\n response = api_helper.create_user(pytest.test_user)\n assert response.status_code == 201\n check_user_data_in_response(response.json()[\"data\"])", "def new_user(cls, user):\r\n pass", "def test_staff_permission_required(self):\r\n with self.assertRaises(PermissionDenied):\r\n add_user_with_status_granted(self.user, self.user)\r\n\r\n with self.assertRaises(PermissionDenied):\r\n update_course_creator_group(self.user, self.user, True)", "def test_user_creation(self):\n self.assertEqual(CustomUser.objects.count() - self.user_count, 2)\n self.assertEqual(self.new_student_user.username, 'Student')\n self.assertTrue(self.new_student_user.password, 'student12345')\n self.assertEqual(self.new_instructor_user.username, 'Instructor')\n self.assertTrue(self.new_instructor_user.password, 'instructor12345')", "def test_07_create_user_exists(self):\n\n _, user = self.get_random_item(models.User)\n success, error = utils.create_user(user, session=self.session)\n db_user = db_utils.get_item(\n models.User, filters={\"id\": user[\"id\"]}, session=self.session\n )\n user[\"password\"] = db_user.password\n self.assertTrue(db_user)\n db_user = db_user.as_dict()\n items_equal = utils.is_equal(user, db_user)\n self.assertTrue(items_equal)\n self.assertTrue(success)\n self.assertFalse(error)", "def test_user_existence(self):\n\n credentials = {\n 'email': '[email protected]',\n 'password': 'Testpass12',\n 'name': 'Test Name'\n }\n get_user_model().objects.create_user(**credentials)\n\n # Check that this is a bad request since the user does already exists.\n response = self.client.post(URL_CREATE_USER, credentials)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_user(self):\n self.assertEqual(self.new_user.first_name, 'John')\n self.assertEqual(self.new_user.last_name, 'Doe')\n self.assertEqual(self.new_user.username, 'john_doe')\n self.assertEqual(self.new_user.email, '[email protected]')\n self.assertTrue(self.new_user.check_password('test_password'))\n self.assertFalse(self.new_user.is_staff)\n self.assertFalse(self.new_user.is_superuser)\n self.assertFalse(self.new_user.is_active)\n\n with self.assertRaises(ValueError):\n User.objects.create_user(\n first_name='', last_name='', username='', email='', bio='', password=''\n )", "def test_create_user_exists(self):\n payload = {\n \"user\": {\n \"email\": \"[email protected]\",\n \"password\": \"useruser111\",\n \"is_doctor\": False,\n \"is_hospital_admin\": True\n },\n 'first_name': 'Test',\n 'last_name': 'JustUser'\n }\n sample_user(\n payload['user']['email'], payload['user']['password'],\n payload['user']['is_doctor'], payload['user']['is_hospital_admin']\n ),\n response = self.client.post(HOSPITAL_ADMIN_REGISTER_URL, payload, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_creation(self):\r\n \r\n self.assertIsInstance(self.user, User)", "def test_create_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n '[email protected]',\n 'test123'\n )\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def test_create_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n '[email protected]',\n 'admin123456'\n )\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def new_user(cls, user):\n pass", "def test_create_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n '[email protected]',\n 'test123',\n )\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def test_create_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n '[email protected]',\n 'test123'\n )\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def test_create_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n '[email protected]',\n 'test123'\n )\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def test_creating_a_new_user_without_email(self):\n\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, \"Test1234\")", "def test_create_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n \"[email protected]\",\n \"test123\"\n )\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def test_user_recreate(self):\n self.test_user_creation()\n\n with self.assertRaisesMessage(\n IntegrityError, \"UNIQUE constraint failed: auths_user.username\"\n ):\n UserModel.objects.create_user(\n username=\"saimer\"\n )", "def test_create_new_super_user(self):\n user = get_user_model().objects.create_superuser(\n '[email protected]', 'test123'\n )\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)\n self.assertEqual(user.role, Role.ADMIN)", "def test_create_new_superuser(self):\n\n user = get_user_model().objects.create_superuser(\n '[email protected]',\n 'password123'\n )\n\n # is_superuser is part of PermissionsMixin\n self.assertTrue(user.is_superuser) # checks if user is superuser\n self.assertTrue(user.is_staff) # checks if user is staff", "def test_create_user(self):\r\n self._auto_auth()\r\n self.assertEqual(User.objects.count(), 1)\r\n self.assertTrue(User.objects.all()[0].is_active)", "def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def test_add_permission(self):\r\n self.assertFalse(self.creator_admin.has_add_permission(self.request))", "def test_create_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n '[email protected]',\n 'fghdjdkricc'\n )\n self.assertTrue(user.is_superuser, True)\n self.assertTrue(user.is_staff, True)", "def test_existing_user(self):\n user = User.objects.create(username=self.username)\n actual = get_user_if_exists(None, self.details)\n self.assertDictEqual(actual, {'is_new': False, 'user': user})", "def test_create_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n '[email protected]',\n 'test123'\n )\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def test_creating_a_new_super_user(self):\n\n user = get_user_model().objects.create_superuser(\n email=\"[email protected]\", password=\"Test12345\"\n )\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def test_unauthorized_create_user(self):\n res = self.submit()\n\n assert res.status_code == 401", "def test_create_user_valid(self):\n res = self.client.post(CREATE_USER_URL, self.mock_user)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n user = get_user_model().objects.get(**res.data)\n\n self.assertTrue(user.check_password(self.mock_user['password']))\n self.assertNotIn('password', res.data)", "def test_user_create(self):\n user_count = User.objects.count()\n user = User.objects.create_user(email='[email protected]', password='test')\n self.assertTrue(User.objects.count() == user_count + 1)\n self.assertFalse(user.is_staff)\n self.assertFalse(user.is_superuser)\n with self.assertRaises(ValueError, msg='The email must be provided'):\n User.objects.create_user(email='', password='test')", "def test_create_defined_user(self):\r\n self._auto_auth(\r\n username='robot', password='test',\r\n email='[email protected]', full_name=\"Robot Name\"\r\n )\r\n\r\n # Check that the user has the correct info\r\n user = User.objects.get(username='robot')\r\n self.assertEqual(user.username, 'robot')\r\n self.assertTrue(user.check_password('test'))\r\n self.assertEqual(user.email, '[email protected]')\r\n\r\n # Check that the user has a profile\r\n user_profile = UserProfile.objects.get(user=user)\r\n self.assertEqual(user_profile.name, \"Robot Name\")\r\n\r\n # By default, the user should not be global staff\r\n self.assertFalse(user.is_staff)", "def test_create_super_user(self):\n user = get_user_model().objects.create_superuser(\"[email protected]\", \"test123\")\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def test_create_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n email=self.test_user_email,\n password=self.test_user_pass,\n name=self.test_user_name\n )\n\n self.assertTrue(user.is_superuser)", "def test_create__forbidden(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with test_app.test_request_context(self.request_path):\n with self.assertRaises(werkzeug.exceptions.Forbidden):\n self.handler.do_post(self.appuser_id)\n\n new_appuser = user_models.AppUser.query(\n user_models.AppUser.email == '[email protected]').get()\n self.assertIsNone(new_appuser)", "def test_create_user(self) -> None:\n\n u1 = self.register_user(\"u1\", \"pass\")\n\n u1stats = self._get_current_stats(\"user\", u1)\n\n assert u1stats is not None\n\n # not in any rooms by default\n self.assertEqual(u1stats[\"joined_rooms\"], 0)", "def test_user_creation(self):\n user = UserModel.objects.create_user(\n username=\"saimer\"\n )\n self.assertEqual(user.email, \"\")\n self.assertEqual(user.username, \"saimer\")\n self.assertFalse(user.has_usable_password())", "def handle(self, *args, **options):\r\n username = 'populate_creators_command'\r\n email = '[email protected]'\r\n try:\r\n admin = User.objects.create_user(username, email, 'foo')\r\n admin.is_staff = True\r\n admin.save()\r\n except IntegrityError:\r\n # If the script did not complete the last time it was run,\r\n # the admin user will already exist.\r\n admin = User.objects.get(username=username, email=email)\r\n\r\n for user in get_users_with_role(CourseInstructorRole.ROLE):\r\n add_user_with_status_granted(admin, user)\r\n\r\n # Some users will be both staff and instructors. Those folks have been\r\n # added with status granted above, and add_user_with_status_unrequested\r\n # will not try to add them again if they already exist in the course creator database.\r\n for user in get_users_with_role(CourseStaffRole.ROLE):\r\n add_user_with_status_unrequested(user)\r\n\r\n # There could be users who are not in either staff or instructor (they've\r\n # never actually done anything in Studio). I plan to add those as unrequested\r\n # when they first go to their dashboard.\r\n\r\n admin.delete()", "def create_user(change):\n return change()", "def allowed_user_access_create(user, pos=None, org=None):\n\n if not user.has_perm(\"auth.add_user\"):\n return False\n\n if pos is None and org is not None:\n raise ValueError(\"Either pos and org must both be None, or neither must be None\")\n if pos is not None and org is None:\n raise ValueError(\"Either pos and org must both be None, or neither must be None\")\n\n if pos is None and org is None:\n # We already know we're allowed to create users in principle\n return True\n else:\n\n # Try to get the user's profile - this fails if they're anonymous, so return False\n try:\n up = user.get_profile()\n except AttributeError:\n return False\n\n # If we're trying to create a user in a different organization and we're\n # not allowed to\n if org != up.org:\n if not user.has_perm(\"vnswww.userprofile_create_different_org\"):\n return False\n try:\n return user.has_perm(db.UserProfile.PERMISSIONS[pos])\n except KeyError:\n # If that type of user doesn't exist, they're not allowed to\n # create one\n return False", "def test_create_super_user(self):\n user = get_user_model().objects.create_superuser(\n email='admin@1234',\n password='adminpassword'\n )\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def test_user_is_really_create():\n response = api_helper.get_user(user_name=pytest.test_user.name)\n assert response.status_code == 200\n response_json_data = response.json()[\"data\"]\n assert len(response_json_data) == 1\n check_user_data_in_response(response_json_data[0])\n pytest.test_user.id = response_json_data[0]['id']", "def test_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n '[email protected]',\n 'test123'\n )\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def test_handle_create_not_admin(self):\n test_user = User(\"userid\")\n test_user.github_username = \"githubuser\"\n self.db.retrieve.return_value = test_user\n self.gh.org_create_team.return_value = \"team_id\"\n inputstring = \"team create b-s --name 'B S'\"\n self.assertTupleEqual(self.testcommand.handle(inputstring, user),\n (self.testcommand.permission_error, 200))\n self.db.store.assert_not_called()", "def _add_user(user, state):\r\n if not user.is_staff and CourseCreator.objects.filter(user=user).count() == 0:\r\n entry = CourseCreator(user=user, state=state)\r\n entry.save()\r\n return True\r\n\r\n return False", "def test_username_is_writable_for_user_creation(self):\n request = Mock()\n assert 'username' not in self.admin.get_readonly_fields(request)", "def test_create_new_super_user(self):\n user = get_user_model().objects.create_superuser(\n email=\"[email protected]\",\n password=\"testblabla123\"\n )\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def test_create_successful(self):\n self.webkom.add_user(self.disallowed_user)\n request = self.factory.post(\"/permissiontest/\", self.test_update_object)\n force_authenticate(request, self.disallowed_user)\n view = TestViewSet.as_view({\"post\": \"create\"})\n\n response = view(request)\n created = response.data\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(created[\"name\"], self.test_update_object[\"name\"])", "def create_new_user(self):\n username = 'pseudo'\n email = '[email protected]'\n password = '00000000'\n user_created = self.user.objects.create_user(id=1, username=username,\n email=email, password=password)\n HistoryUser.objects.create(user=user_created)\n StatusUser.objects.create(user=user_created)\n\n return user_created", "def test_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n '[email protected]',\n 'ezea'\n ) \n self.assertTrue(user.is_staff)\n self.assertTrue(user.is_superuser)", "def signup():\n req = request.get_json()\n user = req['user']\n is_created = views.UserManagement().create(user)\n if not is_created:\n return jsonify(msg.ALREADY_USE), 400\n\n return jsonify(msg.SUCCESS), 200", "def test_create_profile_on_access(self):\n user = User.objects.create_user(\n 'auto_tester', '[email protected]', 'auto_tester')\n profile = user.get_profile()\n profile.delete()\n profile = user.get_profile()\n ok_(profile is not None)\n eq_(False, profile.username_changes)", "def user_created(self, user, password):\n\n if not self.check_prereqs():\n return False\n\n if self.has_user(user):\n return False\n\n hash = self.hash_method.generate_hash(user,password)\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n res=self.set_password(user,password,create_user=True)\n self.log.debug(\"sqlflexibleauthstore: user_created: %s, %s\" % (user,res))\n return res", "def createUser(self):\n if self.user:\n return self.user\n from soc.models.user import User\n from soc.modules.seeder.logic.providers.user import CurrentUserProvider\n properties = {'account': CurrentUserProvider(),\n 'status': 'valid', 'is_developer': self.dev_test}\n self.user = seeder_logic.seed(User, properties=properties)\n return self.user", "def test_user_creation(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n self.assertEqual(new_user.get_username(), 'alice')\n self.assertEqual(new_user.email, '[email protected]')\n self.assertTrue(new_user.check_password('swordfish'))\n self.assertFalse(new_user.is_active)\n\n expiration_date = datetime_now() - timedelta(\n settings.ACCOUNT_ACTIVATION_DAYS\n )\n self.assertGreater(new_user.date_joined, expiration_date)", "def test_authenticated_user_create(self):\r\n with self.flask_app.test_request_context('/'):\r\n for token in self.auth_providers:\r\n assert_raises(Forbidden,\r\n getattr(require, 'token').create,\r\n token)", "def test_creating_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n email=\"[email protected]\",\n password=\"Lorem12345\"\n )\n\n self.assertTrue(user.is_staff)\n self.assertTrue(user.is_superuser)", "def test_create_user(self):\n \n new_user = {\"username\": \"beny1976\", \"vocab_count\": 0, \"name\": \"beny rood\", \"sex\": \"male\", \"dob\": \"18/10/1979\"}\n msg = app.create_user(predefined_user=new_user)\n self.assertTrue(msg != \"\")", "def create(self, data):\n # ensure 'create()' calls the specific 'create_user()' method\n # note that the 'data' gets validated\n user = get_user_model().objects.create_user(**data)\n return user", "def test_create_user_invalid_id(self):\r\n print(\"Create user invalid id (already taken)\")\r\n u_id = 100\r\n username = \"newtestuser\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_create_user_exists(self):\n payload = {\n \"email\": \"[email protected]\",\n \"name\": \"Test\",\n 'password': 'test123'\n }\n create_user(**payload)\n\n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_user_invalid_type(self):\r\n print(\"Create user invalid type\")\r\n u_id = 3\r\n username = \"newtestuser\"\r\n password = \"password\"\r\n u_type = 5\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def create_new_user():\n return get_user_model().objects.create_user(\n email='[email protected]',\n password='test@londodnjisdjfois',\n username='tempusername'\n )", "def test_create_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n email='[email protected]',\n password='userpass123'\n )\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def post(self):\n return self.get_request_handler(request.headers).create_new_user(request)", "def create_users(self):\n from django.contrib.auth.models import User\n user = User.objects.create_user('red', '', 'red')\n user = User.objects.create_user('green', '', 'green')\n user = User.objects.create_user('blue', '', 'blue')", "def on_model_change(self, form, model, is_created):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n elif is_created:\n if not user_has_permission(current_user, 'can_create','admins'):\n abort(403)\n else:\n if not user_has_permission(current_user, 'can_edit','admins'):\n abort(403)", "def test_append_existing_user(self):\n print('(' + self.test_append_existing_user.__name__+')',\n self.test_append_existing_user.__doc__)\n self.assertIsNone(self.connection.append_user(\n PATIENT_USERNAME, NEW_PATIENT))", "def test_create_no_username(self):\n\n # If there is no username, email will be used instead\n properties = self.portal.portal_properties.site_properties\n properties.manage_changeProperties(use_email_as_login=True)\n\n user = api.user.create(\n email='[email protected]',\n password='secret'\n )\n\n self.assertEquals(user.getUserName(), '[email protected]')\n\n # But if using emails as a username is disabled, we should get\n # an error\n properties.manage_changeProperties(use_email_as_login=False)\n\n self.assertRaises(\n ValueError,\n api.user.create,\n email='[email protected]', password='secret'\n )", "def new_user(global_config, timestamped_email, id_api):\n yield id_api.create_user_if_not_exists(timestamped_email, global_config.users.default.password)", "def test_resource_user_resource_add_user_post(self):\n pass", "def create_user(self, **kwargs):\n kwargs = self._prepare_create_user_args(**kwargs)\n user = self.user_model(**kwargs)\n # noinspection PyUnresolvedReferences\n return self.save(user)", "def test_new_superuser(self):\n\n user = \\\n get_user_model().objects.create_superuser('[email protected]'\n , 'test123')\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def test_api_can_create_users(self):\n res = self.client().post('/api/v1/user/', data = self.req)\n self.assertEquals(res.status_code, 200)\n self.assertIn('mary', str(res.data))", "def test_create(self):\n urls = [reverse('api:user-list')]\n data = {\n \"username\": \"newuser\",\n \"email\": \"[email protected]\",\n \"password\": \"password\"\n }\n access = {\n \"forbidden\": [self.anonymous_client, self.readonly_client, self.custodian_1_client],\n \"allowed\": [self.admin_client]\n }\n for client in access['forbidden']:\n for url in urls:\n self.assertIn(\n client.post(url, data, format='json').status_code,\n [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]\n )\n\n for client in access['allowed']:\n for url in urls:\n self.assertEqual(\n client.post(url, data, format='json').status_code,\n status.HTTP_201_CREATED\n )", "def allowed_organization_access_create(user):\n return user.has_perm(\"vnswww.add_organization\")", "def test_user_exists(self):\r\n payload = {\r\n 'email': '[email protected]',\r\n 'password': 'testpass',\r\n 'name': 'Maks'\r\n }\r\n create_user(**payload)\r\n\r\n res = self.client.post(CREATE_USER_URL, payload)\r\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_exists(self):\n payload = {'email': '[email protected]', 'password': 'password'}\n create_user(**payload)\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)" ]
[ "0.7776861", "0.72906166", "0.7237606", "0.7017382", "0.69414604", "0.6917588", "0.6917588", "0.6917588", "0.67925555", "0.6755567", "0.6740731", "0.6710452", "0.67034537", "0.6681693", "0.6673598", "0.6666447", "0.6623244", "0.66229874", "0.66059834", "0.65804935", "0.6564211", "0.6555534", "0.65370995", "0.65349036", "0.6529641", "0.65261155", "0.65234804", "0.6516463", "0.65067935", "0.6501405", "0.65009636", "0.6498945", "0.6495174", "0.6483011", "0.64779925", "0.6477804", "0.6468004", "0.6465473", "0.64648265", "0.6463669", "0.64534986", "0.6449846", "0.6446783", "0.6444287", "0.6439542", "0.6429613", "0.6429283", "0.642774", "0.64262277", "0.64248866", "0.64118415", "0.6411472", "0.64081085", "0.6405731", "0.6401217", "0.64010346", "0.6399396", "0.6396729", "0.6394843", "0.63867193", "0.635489", "0.63445485", "0.63410574", "0.631966", "0.63195276", "0.630504", "0.6298882", "0.62960905", "0.62925994", "0.62902427", "0.6286614", "0.6285109", "0.62833893", "0.6282842", "0.6281287", "0.62779593", "0.6277488", "0.62774706", "0.627337", "0.62654316", "0.62632895", "0.62631667", "0.62622267", "0.62528837", "0.6238875", "0.6236931", "0.62296945", "0.6229152", "0.62222165", "0.622089", "0.62114537", "0.6208902", "0.6208235", "0.6206924", "0.620559", "0.62051076", "0.6204693", "0.62039745", "0.62011397", "0.6199499", "0.6199256" ]
0.0
-1
Ensure we can't create a student user without academic_ fields.
def test_create_new_student_user_missing_field(self): data = { 'email': '[email protected]', 'password': 'test123!', } response = self.client.post( reverse('user-list'), data, format='json', ) self.assertEqual(response.status_code, status.HTTP_201_CREATED)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_can_not_create_education_instance_without_user(self):\n\t\twith self.assertRaises(\n\t\t\tIntegrityError,\n\t\t\tmsg = 'Should raise IntegrityError if user not provided.'\n\t\t\t):\n\n\t\t\tEducation.objects.create(\n\t\t\t\tschool_name=self.school_name,\n\t\t\t\tcourse_name=self.course_name,\n\t\t\t\tstart_date=self.start_date,\n\t\t\t)", "def test_careers_invalid_student(self):\n student_id = '1234567890'\n result = self.ucuenca.careers(student_id)\n self.assertFalse(result)", "def test_invalid_student(self):\n # request\n request_body = {\n 'wwuid': '123456789', # too long\n 'labgroup': self.labgroup.id,\n 'enroll_key': self.labgroup.enroll_key\n }\n response = self.client.post(reverse(self.view_name), request_body)\n # test response\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n # test database\n self.assertEqual(len(Student.objects.all()), 0)", "def test_no_user(self):\n form = self._get_form()\n self.assertTrue(self._validate_form(form), form.errors)\n self.assertRaises(IntegrityError, form.save)", "def test_create_student_missing_role(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_missing_role),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Student.objects.count(), self.qty)", "def test_staff_permission_required(self):\r\n with self.assertRaises(PermissionDenied):\r\n add_user_with_status_granted(self.user, self.user)\r\n\r\n with self.assertRaises(PermissionDenied):\r\n update_course_creator_group(self.user, self.user, True)", "def test_can_not_create_education_instance_without_start_date(self):\n\t\twith self.assertRaises(\n\t\t\tIntegrityError,\n\t\t\tmsg = 'Should raise IntegrityError if start_date not provided.'\n\t\t\t):\n\n\t\t\tEducation.objects.create(\n\t\t\t\tuser=self.user,\n\t\t\t\tschool_name=self.school_name,\n\t\t\t\tcourse_name=self.course_name,\n\t\t\t)", "def test_private_create_user_without_email(self):\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user(None, None, False, False)\n self.assertIn('email must be set', str(exinfo.value))", "def test_validate_user(self):\n with self.assertRaises(ValidationError):\n self.make_assignment(\n self.category, self.user_alice, self.role_contributor\n )", "def test_bad_student(self):\r\n staff_page = self._goto_staff_page()\r\n staff_page.answer_problem()\r\n\r\n staff_debug_page = staff_page.open_staff_debug_info()\r\n staff_debug_page.delete_state('INVALIDUSER')\r\n msg = staff_debug_page.idash_msg[0]\r\n self.assertEqual(u'Failed to delete student state. '\r\n 'User does not exist.', msg)", "def test06_add_student_with_empty_fields(self):\n student_data = self.students_page.\\\n click_edit_students_list_button(). \\\n click_add_new_student_button()\n student_data.save_data_changes_button.click()\n actual_warnings = \\\n student_data.warnings_text_for_adding_student_with_empty_fields()\n self.assertEqual(actual_warnings, data['expected_warnings'])", "def test_creation_without_password(self, user):\n with pytest.raises(mongoengine.errors.ValidationError):\n user.save()", "def test_dont_create_user(self):\n self.assertFalse(User.objects.exists())", "def test_cannot_create_user_without_email(self):\n with self.assertRaises(TypeError):\n User.objects.create_user(username=\"username\", password=\"password\", email=None)", "def test_invalid_data_course_add(self, app, auth, field):\n app.admin.add_new_course()\n course_data = CreateCourse.random()\n setattr(course_data, field, None)\n app.course.create_course(course_data)\n assert (\n not app.course.all_required_fields_filled()\n ), \"Empty fields are ignored and user data changed successfully!\"", "def test_add_user_to_course_group_permission_denied(self):\r\n add_users(self.global_admin, CourseInstructorRole(self.course_key), self.creator)\r\n add_users(self.global_admin, CourseStaffRole(self.course_key), self.creator)\r\n with self.assertRaises(PermissionDenied):\r\n add_users(self.staff, CourseStaffRole(self.course_key), self.staff)", "def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def test_create_user_missing_fields(self):\n payload = {\n 'email': 'email',\n 'password': ''\n }\n res = self.client.post(CREATE_USER_API, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_careers_invalid_student(self):\n student_id = '1234567890'\n result = self.ucuenca.schedule(student_id)\n self.assertFalse(result)", "def test_creator_group_not_enabled(self):\r\n self.assertTrue(has_access(self.user, CourseCreatorRole()))", "def test_can_have_no_assessor(self):\n title = \"Smart thesis title\"\n\n thesis = Thesis(student=self.student,\n assessor=None,\n supervisor=self.supervisor,\n title=title,\n begin_date=datetime.now().date(),\n due_date=datetime(2018, 1, 30))\n\n thesis.save()\n\n self.assertEqual(None, thesis.assessor)", "def test_creating_a_new_user_without_email(self):\n\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, \"Test1234\")", "def test_08_create_user_not_exists(self):\n\n _, user = self.get_random_item(models.User)\n utils.create_user(user, session=self.session)\n success, error = utils.create_user(user, session=self.session)\n self.assertFalse(success)\n self.assertTrue(error)", "def test_create_instructor_missing_role(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_missing_role),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty)", "def _add_user(user, state):\r\n if not user.is_staff and CourseCreator.objects.filter(user=user).count() == 0:\r\n entry = CourseCreator(user=user, state=state)\r\n entry.save()\r\n return True\r\n\r\n return False", "def test_create_course_no_course_creators_not_staff(self):\r\n with mock.patch.dict('django.conf.settings.FEATURES', {\"ENABLE_CREATOR_GROUP\": True}):\r\n self.user.is_staff = False\r\n self.user.save()\r\n self.assert_course_permission_denied()", "def test_cannot_create_group_with_empty_field(self):\n\n utils.create_user_and_authenticate(self)\n group_fields = ['name', 'description']\n utils.test_cannot_post_with_empty_fields(self, self.url, group_fields)", "def test_creation_throws_error_on_missing_fields(self, test_domain):\n with pytest.raises(ValidationError) as err:\n test_domain.repository_for(Person)._dao.create(last_name=\"Doe\")\n\n assert err.value.messages == {\"first_name\": [\"is required\"]}", "def test_signup_missing_first_name(self):\n\n invalid_u = User.signup(\"[email protected]\", \"testuser\", \"testpass\", None, \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_create_user_invalid_type(self):\r\n print(\"Create user invalid type\")\r\n u_id = 3\r\n username = \"newtestuser\"\r\n password = \"password\"\r\n u_type = 5\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_education_instance_created_without_required_arguments(self):\n\n\t\tEducation.objects.create(\n\t\t\tuser=self.user,\n\t\t\tschool_name=self.school_name,\n\t\t\tcourse_name=self.course_name,\n\t\t\tstart_date=self.start_date,\n\t\t)\n\n\t\teducation = Education.objects.get(pk=1)\n\t\tself.assertEqual(\n\t\t\tself.user,\n\t\t\teducation.user,\n\t\t\t\"Users don't match.\")\n\n\t\tself.assertEqual(\n\t\t\tself.school_name,\n\t\t\teducation.school_name,\n\t\t\t\"School names don't match.\"\n\t\t)\n\t\tself.assertEqual(\n\t\t\tself.course_name,\n\t\t\teducation.course_name,\n\t\t\t\"Course names don't match.\"\n\t\t)\n\t\tself.assertEqual(\n\t\t\tself.start_date,\n\t\t\teducation.start_date,\n\t\t\t\"Start dates don't match\"\n\t\t)", "def test_seat_not_available(self):\n\n user1 = User.objects.create(username=\"user1\", password=\"\", email=\"[email protected]\")\n user2 = User.objects.create(username=\"user2\", password=\"\", email=\"[email protected]\")\n\n course = Course.objects.first()\n course.student.add(user1)\n course.student.add(user2)\n\n self.assertFalse(course.is_seat_available())", "def create_student(self, username):\r\n return self._create_user(username, is_staff=False)", "def test_not_permitted(self):\r\n test_user_client, test_user = self.create_non_staff_authed_user_client()\r\n CourseEnrollment.enroll(test_user, self.course.id)\r\n response = test_user_client.get(self.orphan_url)\r\n self.assertEqual(response.status_code, 403)\r\n response = test_user_client.delete(self.orphan_url)\r\n self.assertEqual(response.status_code, 403)", "def clean(self):\n c = super(UserForm, self).clean()\n if (self.instance.pk is None and\n c.get('email') and\n user_exists(c.get('email'),\n c.get('last_name'),\n c.get('first_name'),\n self.current_round_name)):\n raise forms.ValidationError(\n ugettext('APPLICATION_EXISTS PLEASE_LOGIN'))\n return c", "def test_raises_on_missing_needed_fields(self):\n test_name = \"impossible_creature_not_present\"\n self.form.constructor_fields = [*self.form.constructor_fields, test_name]\n message = \"The fields for email, username, and constructor must be set in fields. \"\n self.assertNotIn(test_name, self.form.base_fields)\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.confirm_required_fields()", "def test_create_no_email(self):\n\n self.portal.portal_properties.site_properties.use_email_as_login = True\n\n self.assertRaises(\n ValueError,\n api.user.create,\n username='chuck', password='secret'\n )", "def test_unauthorized_create_user(self):\n res = self.submit()\n\n assert res.status_code == 401", "def test_incomplete_user_exception(self):\n u_username_only = User(username=\"incomplete_user\")\n with self.assertRaises(TypeError) as err:\n User.signup(u_username_only)", "def test_not_permitted(self, default_store):\n course = self.create_course_with_orphans(default_store)\n orphan_url = reverse_course_url('orphan_handler', course.id)\n\n test_user_client, test_user = self.create_non_staff_authed_user_client()\n CourseEnrollment.enroll(test_user, course.id)\n response = test_user_client.get(orphan_url)\n self.assertEqual(response.status_code, 403)\n response = test_user_client.delete(orphan_url)\n self.assertEqual(response.status_code, 403)", "def test_blank(self):\n form_data = {\n 'username': 'testuser',\n 'password1': '',\n 'password2': ''\n }\n form = StrictUserCreationForm(data=form_data)\n self.assertFalse(form.is_valid())", "def test_missing_parameters(self):\n # request\n request_body = {\n 'wwuid': self.student.wwuid,\n 'enroll_key': self.labgroup.enroll_key\n }\n response = self.client.post(reverse(self.view_name), request_body)\n # test response\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n # test database\n self.assertEqual(Student.objects.first().labgroup, None)", "def test_admin_cannot_create_user_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='',\n username='',\n password='',\n role=''\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Please input all fields!')\n self.assertEqual(resp.status_code, 400)", "def _warn_no_students(self):\n message = \"<tr><h2>No student records were found</h2></tr>\"\n self.add_element(message,True,0,True)", "def test__user_passed_as_none(self):\r\n access.has_access(None, 'staff', 'global', None)", "def test_create_user_invalid_id(self):\r\n print(\"Create user invalid id (already taken)\")\r\n u_id = 100\r\n username = \"newtestuser\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_invalid_user_without_email(self):\n email = ''\n password = None\n with self.assertRaises(ValueError):\n self.user_manager.create_user(email=email, password=password)", "def test_course_not_available(self):\n \n user1 = User.objects.create(username=\"user1\", password=\"1234\", email=\"[email protected]\")\n user2 = User.objects.create(username=\"user2\", password=\"1234\", email=\"[email protected]\")\n \n course = Course.objects.first()\n course.registered_course.add(user1)\n course.registered_course.add(user2)\n \n self.assertFalse(course.is_course_available())", "def test_create_user(self):\n self.assertEqual(self.new_user.first_name, 'John')\n self.assertEqual(self.new_user.last_name, 'Doe')\n self.assertEqual(self.new_user.username, 'john_doe')\n self.assertEqual(self.new_user.email, '[email protected]')\n self.assertTrue(self.new_user.check_password('test_password'))\n self.assertFalse(self.new_user.is_staff)\n self.assertFalse(self.new_user.is_superuser)\n self.assertFalse(self.new_user.is_active)\n\n with self.assertRaises(ValueError):\n User.objects.create_user(\n first_name='', last_name='', username='', email='', bio='', password=''\n )", "def test_create_course_with_course_creation_disabled_not_staff(self):\r\n with mock.patch.dict('django.conf.settings.FEATURES', {'DISABLE_COURSE_CREATION': True}):\r\n self.user.is_staff = False\r\n self.user.save()\r\n self.assert_course_permission_denied()", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, '123')", "def test_is_student_user(self):\n student = User.objects.get(email='[email protected]')\n self.assertEqual(student.is_staff, False)", "def test_create_student_missing_birthday(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_missing_birthday),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Student.objects.count(), self.qty)", "def test_other_users_create_assessment(self):\n req, resp = data.get_assessment(self.contract['id'])\n\n response = self.user_02.post(self.assessment_list_url, req)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n response = self.convener.post(self.assessment_list_url, req)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n response = self.supervisor_formal.post(self.assessment_list_url, req)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_other_users_create_assessment(self):\n req, resp = data.get_assessment(self.contract['id'])\n\n response = self.user_02.post(self.assessment_list_url, req)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n response = self.convener.post(self.assessment_list_url, req)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n response = self.supervisor_formal.post(self.assessment_list_url, req)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, '3232Ze')", "def test_user_signup_with_invalid_first_name(self):\n pass", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, \"test123\")", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_create_use_with_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, password='open@123')", "def test_new_user_invalid_email(self):\n\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test1234')", "def test_new_user_invalid_email(self):\n\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_create_instructor_missing_birthday(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_missing_birthday),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty)", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123456')", "def test_new_user_with_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'userpass123')", "def test_signup_member_only(app, resource):\n nethz = 'Something'\n with app.user(nethz=nethz, membership='none'):\n # Create fake course to sign up to\n course = str(app.data.driver.db['courses'].insert({}))\n\n # Try with other nethz\n data = {\n 'nethz': nethz,\n 'course': course\n }\n app.client.post(resource,\n data=data,\n assert_status=422)", "def test_create_user_invalid_username(self):\r\n print(\"Create user invalid username (already taken)\")\r\n u_id = 3\r\n username = \"100\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_request_membership_form_with_an_invalid_user_id(self):\n pass", "def test_user_creation(self):\n self.assertEqual(CustomUser.objects.count() - self.user_count, 2)\n self.assertEqual(self.new_student_user.username, 'Student')\n self.assertTrue(self.new_student_user.password, 'student12345')\n self.assertEqual(self.new_instructor_user.username, 'Instructor')\n self.assertTrue(self.new_instructor_user.password, 'instructor12345')", "def _requireStudyAdmin(self, user):\n studyAdminsGroup = self.model('group').findOne({'name': 'Study Administrators'})\n if not studyAdminsGroup or studyAdminsGroup['_id'] not in user['groups']:\n if not user.get('admin', False):\n raise AccessException(\n 'Only members of the Study Administrators group can create or modify studies.')", "def test_signup_missing_username(self):\n\n invalid_u = User.signup(\"[email protected]\", None, \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_if_not_created_authenticated_permissions(self):\r\n payload = {\r\n \"email\": \"[email protected]\",\r\n \"password\": \"password\",\r\n \"name\": \"asdasd\",\r\n \"is_superuser\": False,\r\n }\r\n\r\n res = self.client_authenticated.post(reverse(CREATE_USER_URL),data=payload)\r\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def test_user_errors(self):\r\n user = User(\r\n email_addr=\"[email protected]\",\r\n name=\"johndoe\",\r\n fullname=\"John Doe\",\r\n locale=\"en\")\r\n\r\n # User.name should not be nullable\r\n user.name = None\r\n db.session.add(user)\r\n assert_raises(IntegrityError, db.session.commit)\r\n db.session.rollback()\r\n\r\n # User.fullname should not be nullable\r\n user.name = \"johndoe\"\r\n user.fullname = None\r\n db.session.add(user)\r\n assert_raises(IntegrityError, db.session.commit)\r\n db.session.rollback()\r\n\r\n # User.email_addr should not be nullable\r\n user.name = \"johndoe\"\r\n user.fullname = \"John Doe\"\r\n user.email_addr = None\r\n db.session.add(user)\r\n assert_raises(IntegrityError, db.session.commit)\r\n db.session.rollback()", "def test_signup_missing_last_name(self):\n\n invalid_u = User.signup(\"[email protected]\", \"testuser\", \"testpass\", \"Test\", None, None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_cannot_create_superuser_without_password(self):\n with self.assertRaises(TypeError):\n User.objects.create_superuser(username=\"admin\", email=\"[email protected]\")", "def test_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(email=None, password=\"123\")", "def test_notes_invalid_student(self):\n student_id = '1234567890'\n career_id = 34\n perdiod_id = 115\n result = self.ucuenca.notes(student_id, career_id, perdiod_id)\n self.assertFalse(result)", "def test_post_student_if_anonymous(self):\n\n url = reverse('student-list')\n response = self.client.post(url, self.data)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(Student.objects.count(), 1)", "def test_negative_validation_decision(self, form_field_name, user_data):\n self.assertNotValidationDecision(\n {form_field_name: user_data},\n {form_field_name: ''}\n )", "def test_create_new_user_missing_fields(self):\n data = {}\n\n response = self.client.post(\n reverse('user-list'),\n data,\n format='json',\n )\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n content = {\n 'email': ['This field is required.'],\n 'password': ['This field is required.']\n }\n self.assertEqual(json.loads(response.content), content)", "def test_create_student_complete_data(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_all), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Student.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload_all['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload_all['email']).exists())\n user_id = User.objects.get(username=self.payload_all['email']).id\n self.assertTrue(Student.objects.filter(user_id=user_id).exists())", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\"\", \"test42837492374923749\")", "def has_student(self, user, allow_superusers=True):\n return (user.is_superuser and allow_superusers) or len(self.students.filter(id=user.id)) > 0", "def test_create_student(self):\n response = self.client.post(self.url, data=json.dumps(self.payload), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Student.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload['email']).exists())\n user_id = User.objects.get(username=self.payload['email']).id\n self.assertTrue(Student.objects.filter(user_id=user_id).exists())", "def test_bad_request_anon_user_no_subject(self, zendesk_mock_class, datadog_mock):\r\n self._test_bad_request_omit_field(self._anon_user, self._anon_fields, \"subject\", zendesk_mock_class, datadog_mock)\r\n self._test_bad_request_empty_field(self._anon_user, self._anon_fields, \"subject\", zendesk_mock_class, datadog_mock)", "def test_add_users_doesnt_add_duplicate_entry(self):\r\n role = CourseStaffRole(self.course_key)\r\n role.add_users(self.student)\r\n self.assertTrue(role.has_user(self.student))\r\n # Call add_users a second time, then remove just once.\r\n role.add_users(self.student)\r\n role.remove_users(self.student)\r\n self.assertFalse(role.has_user(self.student))", "def test_create_no_username(self):\n\n # If there is no username, email will be used instead\n properties = self.portal.portal_properties.site_properties\n properties.manage_changeProperties(use_email_as_login=True)\n\n user = api.user.create(\n email='[email protected]',\n password='secret'\n )\n\n self.assertEquals(user.getUserName(), '[email protected]')\n\n # But if using emails as a username is disabled, we should get\n # an error\n properties.manage_changeProperties(use_email_as_login=False)\n\n self.assertRaises(\n ValueError,\n api.user.create,\n email='[email protected]', password='secret'\n )", "def test_user_create(self):\n user_count = User.objects.count()\n user = User.objects.create_user(email='[email protected]', password='test')\n self.assertTrue(User.objects.count() == user_count + 1)\n self.assertFalse(user.is_staff)\n self.assertFalse(user.is_superuser)\n with self.assertRaises(ValueError, msg='The email must be provided'):\n User.objects.create_user(email='', password='test')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\n None,\n 'Password'\n )", "def test_create_Student_missing_param(self):\n school_ids = self.create_School(1,20)\n url = '/students'\n\n \"\"\"Normal request\"\"\"\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n \"\"\"Missing first_name\"\"\"\n data = {'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['first_name'][0].code, 'required')\n\n \"\"\"Missing all\"\"\"\n data = {}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['first_name'][0].code, 'required')\n self.assertEqual(response.data['last_name'][0].code, 'required')\n self.assertEqual(response.data['age'][0].code, 'required')\n self.assertEqual(response.data['nationality'][0].code, 'required')\n self.assertEqual(response.data['school'][0].code, 'required')", "def test_dont_save_new_user(self):\n self.assertEqual(get_user_model().objects.exists(), 1)", "def test_if_not_created_unauthorized(self):\r\n payload = {\r\n \"email\": \"[email protected]\",\r\n \"password\": \"password\",\r\n \"name\": \"asdasd\",\r\n \"is_superuser\": False,\r\n }\r\n\r\n res = self.client_not_authenticated.post(reverse(CREATE_USER_URL),data=payload)\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_create_course_with_bad_organization(self):\r\n self.course_data['org'] = 'University of California, Berkeley'\r\n self.assert_course_creation_failed(\r\n r\"(?s)Unable to create course 'Robot Super Course'.*: Invalid characters in u'University of California, Berkeley'\")", "def test_create_user_with_no_role(setup_client):\n client = setup_client\n payload = {\n 'email': '[email protected]',\n 'password': 'password',\n 'name': 'Test name'\n }\n res = client.post(CREATE_USER_URL, payload)\n assert res.status_code == status.HTTP_400_BAD_REQUEST\n user_exists = get_user_model().objects.filter(\n email=payload['email']).exists()\n assert not user_exists", "def _validate_user(_):\n pass", "def test_negative_conditions(self):\r\n outline_url = reverse_course_url('course_handler', self.course.id)\r\n # register a non-staff member and try to delete the course branch\r\n non_staff_client, _ = self.create_non_staff_authed_user_client()\r\n response = non_staff_client.delete(outline_url, {}, HTTP_ACCEPT='application/json')\r\n self.assertEqual(response.status_code, 403)" ]
[ "0.67956454", "0.63918", "0.636316", "0.6349266", "0.63289773", "0.6238365", "0.623791", "0.6165642", "0.61546856", "0.61181474", "0.6080576", "0.60766065", "0.60527575", "0.60310954", "0.60178196", "0.6006018", "0.59811884", "0.5958835", "0.59572184", "0.59439963", "0.5943449", "0.5939593", "0.5896514", "0.5877708", "0.58449924", "0.5835229", "0.582459", "0.5813666", "0.5810574", "0.5803253", "0.5802381", "0.579278", "0.5790807", "0.5777622", "0.57758754", "0.57712036", "0.57707226", "0.57683325", "0.57399684", "0.5736417", "0.57004637", "0.5689629", "0.5651125", "0.5642347", "0.56394833", "0.5635168", "0.56196445", "0.5619083", "0.56097496", "0.560963", "0.56088996", "0.5603647", "0.5602314", "0.5595238", "0.5595238", "0.55925274", "0.5583632", "0.55792296", "0.5573145", "0.5573145", "0.5573145", "0.5573145", "0.5573145", "0.55706906", "0.5568989", "0.5561123", "0.55516934", "0.5551463", "0.55474913", "0.5541152", "0.5538526", "0.5537552", "0.55327445", "0.55322486", "0.55259895", "0.55189043", "0.5517478", "0.55154085", "0.5512312", "0.5508246", "0.5508029", "0.55027235", "0.5495776", "0.54933", "0.5492842", "0.5483845", "0.5477514", "0.54708976", "0.54661995", "0.54644", "0.54627115", "0.5459263", "0.54579103", "0.54549515", "0.54541534", "0.5450503", "0.5447705", "0.54356194", "0.54303515", "0.54298276" ]
0.64296496
1
Ensure we can't create a new user with blank fields
def test_create_new_user_blank_fields(self): self.maxDiff = None data = { 'email': '', 'password': '', } response = self.client.post( reverse('user-list'), data, format='json', ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) content = { 'email': ['This field may not be blank.'], 'password': ['This field may not be blank.'], } self.assertEqual(json.loads(response.content), content)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_blank(self):\n form_data = {\n 'username': 'testuser',\n 'password1': '',\n 'password2': ''\n }\n form = StrictUserCreationForm(data=form_data)\n self.assertFalse(form.is_valid())", "def test_create_user_missing_fields(self):\n payload = {\n 'email': 'email',\n 'password': ''\n }\n res = self.client.post(CREATE_USER_API, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_admin_cannot_create_user_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='',\n username='',\n password='',\n role=''\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Please input all fields!')\n self.assertEqual(resp.status_code, 400)", "def test_create_new_user_missing_fields(self):\n data = {}\n\n response = self.client.post(\n reverse('user-list'),\n data,\n format='json',\n )\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n content = {\n 'email': ['This field is required.'],\n 'password': ['This field is required.']\n }\n self.assertEqual(json.loads(response.content), content)", "def signup_user_with_empty_fields(self):\n response = self.client.post(\n self.signup_url, self.invalid_user_with_empty_fields, format='json')\n\n return response", "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"\"))", "def test_create_user_empty_string(self):\n res = self.client.post(CREATE_USER_URL, {\n **self.mock_user,\n 'name': ''\n })\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n user = get_user_model().objects.get(**res.data)\n\n self.assertTrue(user.check_password(self.mock_user['password']))\n self.assertNotEqual('', user.name)", "def clean(self):\n c = super(UserForm, self).clean()\n if (self.instance.pk is None and\n c.get('email') and\n user_exists(c.get('email'),\n c.get('last_name'),\n c.get('first_name'),\n self.current_round_name)):\n raise forms.ValidationError(\n ugettext('APPLICATION_EXISTS PLEASE_LOGIN'))\n return c", "def test_create_user_invalid_username(self):\r\n print(\"Create user invalid username (already taken)\")\r\n u_id = 3\r\n username = \"100\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_creating_a_new_user_without_email(self):\n\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, \"Test1234\")", "def test_admin_cannot_update_user_with_empty_fields(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='',\n username='',\n password='',\n role=''\n )\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Please input all fields!')\n self.assertEqual(resp.status_code, 400)", "def test_cannot_create_user_without_email(self):\n with self.assertRaises(TypeError):\n User.objects.create_user(username=\"username\", password=\"password\", email=None)", "def test_no_user(self):\n form = self._get_form()\n self.assertTrue(self._validate_form(form), form.errors)\n self.assertRaises(IntegrityError, form.save)", "def test_create_no_username(self):\n\n # If there is no username, email will be used instead\n properties = self.portal.portal_properties.site_properties\n properties.manage_changeProperties(use_email_as_login=True)\n\n user = api.user.create(\n email='[email protected]',\n password='secret'\n )\n\n self.assertEquals(user.getUserName(), '[email protected]')\n\n # But if using emails as a username is disabled, we should get\n # an error\n properties.manage_changeProperties(use_email_as_login=False)\n\n self.assertRaises(\n ValueError,\n api.user.create,\n email='[email protected]', password='secret'\n )", "def test_private_create_user_without_email(self):\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user(None, None, False, False)\n self.assertIn('email must be set', str(exinfo.value))", "def test_username_not_in_use(self):\n self.request.json_body = {'username': 'newuser'}\n self.datautils.create_user({'username': 'testuser', 'password': 'testpass'})\n result = users_post_view(self.request)['d']\n self.assertIsInstance(result, dict)\n self.assertEqual(result, error_dict('api_errors', 'username, email, and password are all required string fields'))", "def test_empty_fields(self):\n with self.client:\n response = register_user(\n self, '', '', '', '')\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Validation errors.')\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 422)", "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(None, None))", "def test_username_nodigits(self):\n response = self.signup_a_user(self.username_nodigits)\n self.assertEqual(response.data['errors']['username'],\n [\"username is invalid\"]\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_new_student_user_missing_field(self):\n data = {\n 'email': '[email protected]',\n 'password': 'test123!',\n }\n\n response = self.client.post(\n reverse('user-list'),\n data,\n format='json',\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def test_blank_names(self):\n rv = self.signup('', '', '[email protected]', 'Bo1995', 'Bo1995')\n self.assertIn(b'This field is required.', rv.data)", "def test_signup_when_empty_data_provided(self):\n user = {}\n\n response = self.client.post('/api/v1/register', json=json.dumps(user), headers={'Content-Type': 'application/json'})\n data = response.get_json()\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(data['status'], 400)\n self.assertEqual(data['message'], 'Invalid data. Please fill all the required fields')", "def test_user_signup_with_invalid_first_name(self):\n pass", "def test_create_user_missing_data(self):\n data = {\"firstname\": \"John\"}\n res = self.post(url=\"/users\", data=data)\n self.assertException(res, exc.DataIsMissing)\n\n users = User.query.all()\n self.assertEqual(len(users), 5)", "def test_dont_create_user(self):\n self.assertFalse(User.objects.exists())", "def test_cannot_create_tab_with_empty_field(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n tab_fields = ['name']\n\n utils.test_cannot_post_with_empty_fields(self, self.url, tab_fields)\n self.assertEqual(len(Tab.objects.all()), 0)", "def signup_user_with_missing_fields(self):\n response = self.client.post(\n self.signup_url, self.invalid_user_with_missing_fields, format='json')\n\n return response", "def test_create_user_with_no_password(self):\n data = {\n 'username': 'foobar',\n 'email': '[email protected]',\n 'password': ''\n }\n response = self.client.post(self.create_url, data, format='json')\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['password']), 1)", "def test_create_user_invalid_password(self):\r\n print(\"Create user invalid password (empty)\")\r\n u_id = 3\r\n username = \"newtestuser\"\r\n password = \"\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_signup_with_blank_email_false(self):\n user = {\n \"Email\": \"\",\n \"Password\": \"pass1234\",\n \"Confirm Password\": \"pass5678\"\n }\n res = self.client().post('/api/v1/auth/signup', data=user)\n self.assertEqual(res.status_code, 400)\n res = res.get_json()\n self.assertEqual(res['error'][0],\n 'Email should not be blank')", "def test_signup_missing_first_name(self):\n\n invalid_u = User.signup(\"[email protected]\", \"testuser\", \"testpass\", None, \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_cannot_create_group_with_empty_field(self):\n\n utils.create_user_and_authenticate(self)\n group_fields = ['name', 'description']\n utils.test_cannot_post_with_empty_fields(self, self.url, group_fields)", "def test_creation_without_password(self, user):\n with pytest.raises(mongoengine.errors.ValidationError):\n user.save()", "def test_raises_on_missing_needed_fields(self):\n test_name = \"impossible_creature_not_present\"\n self.form.constructor_fields = [*self.form.constructor_fields, test_name]\n message = \"The fields for email, username, and constructor must be set in fields. \"\n self.assertNotIn(test_name, self.form.base_fields)\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.confirm_required_fields()", "def clean(self):\n if self.edit_user is None and len(self.cleaned_data['password1']) == 0:\n raise forms.ValidationError(_(u'You must supply a password when creating a user'))\n return super(RegisterUserForm, self).clean()", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, '3232Ze')", "def test_create_user_with_no_username(self):\n data = {\n 'username': '',\n 'email': '[email protected]',\n 'password': 'foobar'\n }\n response = self.client.post(self.create_url, data, format='json')\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['username']), 1)", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, '123')", "def test_user_empty_username(self):\n data = json.dumps({\n \"username\" : \"\", \"email\" : \"[email protected]\",\n \"password\" : \"12345678\", \"confirm_password\" : \"12345678\"})\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 400)", "def test_register_user_with_empty_data(self, app):\n data = RegisterUser.random()\n setattr(data, \"username\", None)\n res = app.register.register(\n data=data, type_response=RegisterUserResponseInvalid\n )\n assert res.status_code == 400\n assert res.data.message == ResponseText.MESSAGE_REGISTER_USER_INVALID", "def test_create_user_invalid_type(self):\r\n print(\"Create user invalid type\")\r\n u_id = 3\r\n username = \"newtestuser\"\r\n password = \"password\"\r\n u_type = 5\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def testAddEmptyUsername(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"password\"))", "def test_register_without_data(self):\n response = self.client.post('/api/v2/auth/signup',\n data=json.dumps(users[4]),\n headers=self.admin_headers,\n content_type='application/json')\n self.assertEqual(response.status_code, 400)\n self.assertIn('Missing required parameter', str(response.data))", "def test_signup_when_there_are_missing_fields(self):\n user = {\n 'firstname' : 'Caleb',\n 'lastname' : 'Mbugua',\n 'password' : '12345566'\n }\n\n response = self.client.post('/api/v1/register', json=user, headers={'Content-Type': 'application/json'})\n data = response.get_json()\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(data['status'], 400)\n self.assertEqual(data['message'], 'Invalid data. Please fill all the required fields')", "def test_empty_string(self):\n self.assertEqual(self.user_1.email, \"\")\n self.assertEqual(self.user_1.password, \"\")\n self.assertEqual(self.user_1.first_name, \"\")\n self.assertEqual(self.user_1.last_name, \"\")", "def test_create_user(self):\n self.assertEqual(self.new_user.first_name, 'John')\n self.assertEqual(self.new_user.last_name, 'Doe')\n self.assertEqual(self.new_user.username, 'john_doe')\n self.assertEqual(self.new_user.email, '[email protected]')\n self.assertTrue(self.new_user.check_password('test_password'))\n self.assertFalse(self.new_user.is_staff)\n self.assertFalse(self.new_user.is_superuser)\n self.assertFalse(self.new_user.is_active)\n\n with self.assertRaises(ValueError):\n User.objects.create_user(\n first_name='', last_name='', username='', email='', bio='', password=''\n )", "def create(self):\n if User.load(username) is None:\n \"\"\"This username is not in use\"\"\"\n if self.validateEmail(self.email):\n \"\"\"This email is valid\"\"\"\n if len(self.username) > 2:\n \"\"\"This is long enough\"\"\"\n self.__store()", "def test_new_user_with_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'userpass123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, \"test123\")", "def test_empty_data(self, client):\n url = reverse('users:create')\n response = client.post(url)\n assert response.status_code == 200\n assert 'This field is required.' in str(response.content)", "def create_token_missing_field(self):\n payload = {'email': '[email protected]', 'password': 'testpass'}\n create_user(**payload)\n payload = {'email': '[email protected]', 'password': ''}\n res = self.client.post(TOKEN_URL, payload)\n self.assertNotIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123456')", "def testAddNoneUsername(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(None, \"password\"))", "def test_blank_password(self):\n rv = self.signup('Bo', 'Theo', '[email protected]', '', 'Bo1995')\n self.assertIn(b'This field is required.', rv.data)", "def test_new_user_invalid_email(self):\n\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test1234')", "def test_signup_missing_username(self):\n\n invalid_u = User.signup(\"[email protected]\", None, \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_new_user_invalid_email(self):\n\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_create_user_invalid_id(self):\r\n print(\"Create user invalid id (already taken)\")\r\n u_id = 100\r\n username = \"newtestuser\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_create_user_passwords_repeat_is_missing(self):\n data = {\"firstname\": \"John\", \"lastname\": \"Doe\", \"password\": \"supersecret\"}\n res = self.post(url=\"/users\", data=data)\n self.assertException(res, exc.DataIsMissing)\n users = User.query.all()\n self.assertEqual(len(users), 5)", "def test_user_creation(self):\n user = UserModel.objects.create_user(\n username=\"saimer\"\n )\n self.assertEqual(user.email, \"\")\n self.assertEqual(user.username, \"saimer\")\n self.assertFalse(user.has_usable_password())", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\n None,\n 'Password'\n )", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\"\", \"test42837492374923749\")", "def test_signup_with_blank_password_false(self):\n user = {\n \"Email\": \"[email protected]\",\n \"Password\": \"\",\n \"Confirm Password\": \"pass5678\"\n }\n res = self.client().post('/api/v1/auth/signup', data=user)\n self.assertEqual(res.status_code, 400)\n res = res.get_json()\n self.assertEqual(res['error'][0],\n 'Password should not be blank')", "def test_length(self):\n form_data = {\n 'username': 'testuser',\n 'password1': 'c897B$eH@',\n 'password2': 'c897B$eH@'\n }\n form = StrictUserCreationForm(data=form_data)\n self.assertFalse(form.is_valid())", "def testAddNonePassword(self):\n self.assertEquals(models.ERR_BAD_PASSWORD, self.users.add(\"userF\", None))", "def test_user_no_displayname_no_gcos_no_cn(dummy_user_dict):\n del dummy_user_dict[\"displayname\"]\n del dummy_user_dict[\"gecos\"]\n del dummy_user_dict[\"cn\"]\n user = User(dummy_user_dict)\n assert user.name is None", "def test_dont_save_new_user(self):\n self.assertEqual(get_user_model().objects.exists(), 1)", "def test_user_empty_email(self):\n data = json.dumps({\n \"username\" : \"empty\", \"email\" : \"\",\n \"password\" : \"secret12345\", \"confirm_password\" : \"secret12345\"})\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 400)", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\n email=None,\n password=self.test_user_pass,\n name=self.test_user_name,\n )", "def test_create_user_with_no_email(self):\n data = data = {\n 'username': 'foobar',\n 'email': '',\n 'password': 'testpassword'\n }\n response = self.client.post(self.create_url, data, format='json')\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['email']), 1)", "def test_registeration_no_username(self):\n response = self.signup_a_user(self.user_lacks_username)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertNotIn(\"token\", response.data)", "def test_invalid_user_without_email(self):\n email = ''\n password = None\n with self.assertRaises(ValueError):\n self.user_manager.create_user(email=email, password=password)", "def test_blank(self):\n form_data = self.form_data('')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def test_create_use_with_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, password='open@123')", "def test_create_user_fails_with_no_username(self):\n user = get_user_model().objects.create(\n email='[email protected]',\n first_name='Test',\n password='pass123456!'\n )\n\n users = User.objects.filter(username='Test')\n\n self.assertEqual(len(users), 0)", "def users_create():", "def test_empty_first_name_field(self):\r\n result=self.user.get_user_register(\"\",\"Stephen\",\" Ochieng\",\"[email protected]\",\"stephenochieng\",\"eat\")\r\n self.assertEqual(2,result,\"Please fill in the first name field\")", "def test_register_user_with_empty_password(self, app):\n data = RegisterUser.random()\n setattr(data, \"password\", None)\n res = app.register.register(\n data=data, type_response=RegisterUserResponseInvalid\n )\n assert res.status_code == 400\n assert res.data.message == ResponseText.MESSAGE_REGISTER_USER_INVALID", "def test_empty_username_field(self):\n self.empty_username = {'user': {\n \"username\": \"\",\n \"email\": \"[email protected]\",\n \"password\": \"Password123\"\n }}\n response = self.client.post(\n self.reg_url,\n self.empty_username,\n format=\"json\")\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertIn(b\"may not be blank\", response.content)", "def test_create_account_failed_no_first_name(self):\n data = self.user_data.copy()\n data.pop('first_name')\n response = self.client.post(self.url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data.get('message').get('first_name')[0], 'This field is required.')", "def test_user_create(self):\n user_count = User.objects.count()\n user = User.objects.create_user(email='[email protected]', password='test')\n self.assertTrue(User.objects.count() == user_count + 1)\n self.assertFalse(user.is_staff)\n self.assertFalse(user.is_superuser)\n with self.assertRaises(ValueError, msg='The email must be provided'):\n User.objects.create_user(email='', password='test')", "def test_08_create_user_not_exists(self):\n\n _, user = self.get_random_item(models.User)\n utils.create_user(user, session=self.session)\n success, error = utils.create_user(user, session=self.session)\n self.assertFalse(success)\n self.assertTrue(error)", "def test_create_no_email(self):\n\n self.portal.portal_properties.site_properties.use_email_as_login = True\n\n self.assertRaises(\n ValueError,\n api.user.create,\n username='chuck', password='secret'\n )", "def __init__(self, *args, **kwargs):\n super(UserCreationForm, self).__init__(*args, **kwargs)\n self.fields[\"first_name\"].required = True\n self.fields[\"last_name\"].required = True\n self.fields[\"email\"].required = True", "def test_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(email=None, password=\"123\")", "def test_empty_user_name_field(self):\r\n result=self.user.get_user_register(\"Stephen\",\" Ochieng\",\"[email protected]\",\"stephenochieng\",\"eat\")\r\n self.assertEqual(2,result,\"Fill in the username field please\")", "def test_user_empty_body(self):\n data = json.dumps({})\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 400)", "def test_signup_missing_last_name(self):\n\n invalid_u = User.signup(\"[email protected]\", \"testuser\", \"testpass\", \"Test\", None, None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_create_user_only_lastname(self):\n data = {\"lastname\": \"Doe\"}\n res = self.post(url=\"/users\", data=data)\n self.assertEqual(res.status_code, 200)\n self.assertIn(b\"Created user.\", res.data)\n user = User.query.filter_by(id=6).first()\n self.assertEqual(user.firstname, None)\n self.assertEqual(user.lastname, \"Doe\")\n self.assertFalse(user.is_verified)", "def test_create_invalid(self):\n url = '/api/users/'\n data = {}\n username = str(uuid1())[:8]\n # Response should be status 400 where essential parameters are missing.\n response = self.client.post(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 400)\n data['EmailAddress'] = '{}@dbca.wa.gov.au'.format(username)\n response = self.client.post(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 400)\n data['DisplayName'] = 'Doe, John'\n response = self.client.post(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 400)\n data['SamAccountName'] = username\n response = self.client.post(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 201) # Now valid.", "def test_create_empty_user(patch_mongo):\n user = {\n \"name\": \"\"\n }\n\n response = client.put(\"/user\", json=user)\n assert response.status_code == status.HTTP_400_BAD_REQUEST", "def testAddEmptyPassword(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userE\", \"\"))", "def test_create_user_with_invalid_input(self, user, status_code, len_):\n # setup: none\n\n # test\n resp = self.create_user(user)\n try:\n assert resp.status_code == status_code\n assert resp.headers[\"Content-Type\"] == \"application/json; charset=utf-8\"\n assert resp.text.find(\"Passwords must have at least one non alphanumeric character\") == len_\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp.request)\n self.pprint_response(resp)\n\n # teardown: none", "def test_blank_email(self):\n rv = self.signup('Bo', 'Theo', '', 'Bo1995', 'Bo1995')\n self.assertIn(b'Field must be between 6 and 30 characters long.', rv.data)", "def test_incomplete_user_exception(self):\n u_username_only = User(username=\"incomplete_user\")\n with self.assertRaises(TypeError) as err:\n User.signup(u_username_only)" ]
[ "0.7712952", "0.7632216", "0.7593471", "0.726332", "0.7243656", "0.7158078", "0.7157128", "0.7000987", "0.6996104", "0.6994467", "0.6968338", "0.69449747", "0.6941492", "0.69386184", "0.69285846", "0.69195503", "0.6915723", "0.6893214", "0.68801963", "0.687974", "0.6873898", "0.6854231", "0.684645", "0.68419313", "0.6829588", "0.68087006", "0.6806918", "0.67848545", "0.6780238", "0.6776506", "0.67761064", "0.676076", "0.67598957", "0.6759028", "0.67584425", "0.67582643", "0.67568886", "0.67498094", "0.6749532", "0.6732981", "0.67251426", "0.6725064", "0.6721178", "0.6720368", "0.67192036", "0.67101604", "0.67032146", "0.6700827", "0.6699914", "0.6699914", "0.6699914", "0.6699914", "0.6699914", "0.669745", "0.66952544", "0.6688583", "0.66867787", "0.6681328", "0.6678864", "0.6678786", "0.6668858", "0.666482", "0.66462785", "0.6641004", "0.66388243", "0.6632202", "0.6617782", "0.66026455", "0.6589927", "0.65868205", "0.65814114", "0.6568072", "0.65659827", "0.6564364", "0.6534207", "0.6531578", "0.65304387", "0.6523057", "0.6517745", "0.6515618", "0.65151453", "0.65133643", "0.65131235", "0.65037286", "0.6501424", "0.6501342", "0.6500583", "0.6487977", "0.6479522", "0.6479241", "0.64776707", "0.6474257", "0.64699614", "0.64684916", "0.6465261", "0.6451771", "0.6445911", "0.6441091", "0.6435296", "0.6434588" ]
0.7567398
3
Ensure we can't create a new user without required fields
def test_create_new_user_missing_fields(self): data = {} response = self.client.post( reverse('user-list'), data, format='json', ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) content = { 'email': ['This field is required.'], 'password': ['This field is required.'] } self.assertEqual(json.loads(response.content), content)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_user_missing_fields(self):\n payload = {\n 'email': 'email',\n 'password': ''\n }\n res = self.client.post(CREATE_USER_API, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_creating_a_new_user_without_email(self):\n\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, \"Test1234\")", "def test_admin_cannot_create_user_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='',\n username='',\n password='',\n role=''\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Please input all fields!')\n self.assertEqual(resp.status_code, 400)", "def test_private_create_user_without_email(self):\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user(None, None, False, False)\n self.assertIn('email must be set', str(exinfo.value))", "def test_cannot_create_user_without_email(self):\n with self.assertRaises(TypeError):\n User.objects.create_user(username=\"username\", password=\"password\", email=None)", "def test_creation_without_password(self, user):\n with pytest.raises(mongoengine.errors.ValidationError):\n user.save()", "def test_dont_create_user(self):\n self.assertFalse(User.objects.exists())", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, '123')", "def test_no_user(self):\n form = self._get_form()\n self.assertTrue(self._validate_form(form), form.errors)\n self.assertRaises(IntegrityError, form.save)", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, '3232Ze')", "def test_new_user_with_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'userpass123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, \"test123\")", "def test_create_no_username(self):\n\n # If there is no username, email will be used instead\n properties = self.portal.portal_properties.site_properties\n properties.manage_changeProperties(use_email_as_login=True)\n\n user = api.user.create(\n email='[email protected]',\n password='secret'\n )\n\n self.assertEquals(user.getUserName(), '[email protected]')\n\n # But if using emails as a username is disabled, we should get\n # an error\n properties.manage_changeProperties(use_email_as_login=False)\n\n self.assertRaises(\n ValueError,\n api.user.create,\n email='[email protected]', password='secret'\n )", "def test_new_user_invalid_email(self):\n\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test1234')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123456')", "def test_new_user_invalid_email(self):\n\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_create_user_invalid_username(self):\r\n print(\"Create user invalid username (already taken)\")\r\n u_id = 3\r\n username = \"100\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_blank(self):\n form_data = {\n 'username': 'testuser',\n 'password1': '',\n 'password2': ''\n }\n form = StrictUserCreationForm(data=form_data)\n self.assertFalse(form.is_valid())", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\n None,\n 'Password'\n )", "def test_create_use_with_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, password='open@123')", "def test_create_user(self):\n self.assertEqual(self.new_user.first_name, 'John')\n self.assertEqual(self.new_user.last_name, 'Doe')\n self.assertEqual(self.new_user.username, 'john_doe')\n self.assertEqual(self.new_user.email, '[email protected]')\n self.assertTrue(self.new_user.check_password('test_password'))\n self.assertFalse(self.new_user.is_staff)\n self.assertFalse(self.new_user.is_superuser)\n self.assertFalse(self.new_user.is_active)\n\n with self.assertRaises(ValueError):\n User.objects.create_user(\n first_name='', last_name='', username='', email='', bio='', password=''\n )", "def test_08_create_user_not_exists(self):\n\n _, user = self.get_random_item(models.User)\n utils.create_user(user, session=self.session)\n success, error = utils.create_user(user, session=self.session)\n self.assertFalse(success)\n self.assertTrue(error)", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\"\", \"test42837492374923749\")", "def test_create_user_invalid_type(self):\r\n print(\"Create user invalid type\")\r\n u_id = 3\r\n username = \"newtestuser\"\r\n password = \"password\"\r\n u_type = 5\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_create_no_email(self):\n\n self.portal.portal_properties.site_properties.use_email_as_login = True\n\n self.assertRaises(\n ValueError,\n api.user.create,\n username='chuck', password='secret'\n )", "def test_username_not_in_use(self):\n self.request.json_body = {'username': 'newuser'}\n self.datautils.create_user({'username': 'testuser', 'password': 'testpass'})\n result = users_post_view(self.request)['d']\n self.assertIsInstance(result, dict)\n self.assertEqual(result, error_dict('api_errors', 'username, email, and password are all required string fields'))", "def test_incomplete_user_exception(self):\n u_username_only = User(username=\"incomplete_user\")\n with self.assertRaises(TypeError) as err:\n User.signup(u_username_only)", "def test_invalid_user_without_email(self):\n email = ''\n password = None\n with self.assertRaises(ValueError):\n self.user_manager.create_user(email=email, password=password)", "def test_create_user_invalid_id(self):\r\n print(\"Create user invalid id (already taken)\")\r\n u_id = 100\r\n username = \"newtestuser\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_create_new_user_blank_fields(self):\n self.maxDiff = None\n data = {\n 'email': '',\n 'password': '',\n }\n\n response = self.client.post(\n reverse('user-list'),\n data,\n format='json',\n )\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n content = {\n 'email': ['This field may not be blank.'],\n 'password': ['This field may not be blank.'],\n }\n self.assertEqual(json.loads(response.content), content)", "def test_create_user_invalid_password(self):\r\n print(\"Create user invalid password (empty)\")\r\n u_id = 3\r\n username = \"newtestuser\"\r\n password = \"\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def clean(self):\n c = super(UserForm, self).clean()\n if (self.instance.pk is None and\n c.get('email') and\n user_exists(c.get('email'),\n c.get('last_name'),\n c.get('first_name'),\n self.current_round_name)):\n raise forms.ValidationError(\n ugettext('APPLICATION_EXISTS PLEASE_LOGIN'))\n return c", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\n email=None,\n password=self.test_user_pass,\n name=self.test_user_name,\n )", "def test_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(email=None, password=\"123\")", "def test_new_user_invalid_email(self):\n\n # It should raise a ValueError\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\n None,\n password=\"password\"\n )", "def create(self):\n if User.load(username) is None:\n \"\"\"This username is not in use\"\"\"\n if self.validateEmail(self.email):\n \"\"\"This email is valid\"\"\"\n if len(self.username) > 2:\n \"\"\"This is long enough\"\"\"\n self.__store()", "def test_create_new_student_user_missing_field(self):\n data = {\n 'email': '[email protected]',\n 'password': 'test123!',\n }\n\n response = self.client.post(\n reverse('user-list'),\n data,\n format='json',\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def test_unauthorized_create_user(self):\n res = self.submit()\n\n assert res.status_code == 401", "def test_user_create(self):\n user_count = User.objects.count()\n user = User.objects.create_user(email='[email protected]', password='test')\n self.assertTrue(User.objects.count() == user_count + 1)\n self.assertFalse(user.is_staff)\n self.assertFalse(user.is_superuser)\n with self.assertRaises(ValueError, msg='The email must be provided'):\n User.objects.create_user(email='', password='test')", "def create_user():\r\n if not request.is_json or 'name' not in request.get_json() or 'phone_number' not in request.get_json() or 'password' not in request.get_json():\r\n return bad_request('Missing required data.')\r\n try:\r\n return add_user(request)\r\n except:\r\n return bad_request(error_messages['user_exist'])", "def test_signup_missing_username(self):\n\n invalid_u = User.signup(\"[email protected]\", None, \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_raises_on_missing_needed_fields(self):\n test_name = \"impossible_creature_not_present\"\n self.form.constructor_fields = [*self.form.constructor_fields, test_name]\n message = \"The fields for email, username, and constructor must be set in fields. \"\n self.assertNotIn(test_name, self.form.base_fields)\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.confirm_required_fields()", "def create_token_missing_field(self):\n payload = {'email': '[email protected]', 'password': 'testpass'}\n create_user(**payload)\n payload = {'email': '[email protected]', 'password': ''}\n res = self.client.post(TOKEN_URL, payload)\n self.assertNotIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_admin_cannot_update_user_with_empty_fields(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='',\n username='',\n password='',\n role=''\n )\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Please input all fields!')\n self.assertEqual(resp.status_code, 400)", "def test_user_creation(self):\n user = UserModel.objects.create_user(\n username=\"saimer\"\n )\n self.assertEqual(user.email, \"\")\n self.assertEqual(user.username, \"saimer\")\n self.assertFalse(user.has_usable_password())", "def test_signup_missing_first_name(self):\n\n invalid_u = User.signup(\"[email protected]\", \"testuser\", \"testpass\", None, \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def _validate_user(_):\n pass", "def signup_user_with_missing_fields(self):\n response = self.client.post(\n self.signup_url, self.invalid_user_with_missing_fields, format='json')\n\n return response", "def test_create_user_with_no_role(setup_client):\n client = setup_client\n payload = {\n 'email': '[email protected]',\n 'password': 'password',\n 'name': 'Test name'\n }\n res = client.post(CREATE_USER_URL, payload)\n assert res.status_code == status.HTTP_400_BAD_REQUEST\n user_exists = get_user_model().objects.filter(\n email=payload['email']).exists()\n assert not user_exists", "def test_create_user_with_invalid_input(self, user, status_code, len_):\n # setup: none\n\n # test\n resp = self.create_user(user)\n try:\n assert resp.status_code == status_code\n assert resp.headers[\"Content-Type\"] == \"application/json; charset=utf-8\"\n assert resp.text.find(\"Passwords must have at least one non alphanumeric character\") == len_\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp.request)\n self.pprint_response(resp)\n\n # teardown: none", "def test_register_without_data(self):\n response = self.client.post('/api/v2/auth/signup',\n data=json.dumps(users[4]),\n headers=self.admin_headers,\n content_type='application/json')\n self.assertEqual(response.status_code, 400)\n self.assertIn('Missing required parameter', str(response.data))", "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"\"))", "def test_dont_save_new_user(self):\n self.assertEqual(get_user_model().objects.exists(), 1)", "def users_create():", "def test_create__invalid(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n json_data = {'isAdmin': False} # No email\n with test_app.test_request_context(self.request_path, json=json_data):\n with self.assertRaises(werkzeug.exceptions.BadRequest):\n self.handler.do_post()\n\n new_appuser = user_models.AppUser.query(\n user_models.AppUser.email == '[email protected]').get()\n self.assertIsNone(new_appuser)", "def test_user_signup_with_invalid_first_name(self):\n pass", "def test_signup_when_there_are_missing_fields(self):\n user = {\n 'firstname' : 'Caleb',\n 'lastname' : 'Mbugua',\n 'password' : '12345566'\n }\n\n response = self.client.post('/api/v1/register', json=user, headers={'Content-Type': 'application/json'})\n data = response.get_json()\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(data['status'], 400)\n self.assertEqual(data['message'], 'Invalid data. Please fill all the required fields')", "def test_create_user_valid(self):\n res = self.client.post(CREATE_USER_URL, self.mock_user)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n user = get_user_model().objects.get(**res.data)\n\n self.assertTrue(user.check_password(self.mock_user['password']))\n self.assertNotIn('password', res.data)", "def test_creation_throws_error_on_missing_fields(self, test_domain):\n with pytest.raises(ValidationError) as err:\n test_domain.repository_for(Person)._dao.create(last_name=\"Doe\")\n\n assert err.value.messages == {\"first_name\": [\"is required\"]}", "def test_create_user_with_no_password(self):\n data = {\n 'username': 'foobar',\n 'email': '[email protected]',\n 'password': ''\n }\n response = self.client.post(self.create_url, data, format='json')\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['password']), 1)", "def test_user_recreate(self):\n self.test_user_creation()\n\n with self.assertRaisesMessage(\n IntegrityError, \"UNIQUE constraint failed: auths_user.username\"\n ):\n UserModel.objects.create_user(\n username=\"saimer\"\n )", "def test_signup_missing_password(self):\n with self.assertRaises(ValueError) as context:\n invalid_u = User.signup(\"[email protected]\", \"testuser\", None, \"Test\", \"User\", None)", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_new_user_invalid_email(self):\n user_number_before = get_user_model().objects.count()\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\n None,\n password=\"1234Test\"\n )\n user_number_after = get_user_model().objects.count()\n self.assertEqual(user_number_before, user_number_after)", "def test_create_invalid(self):\n url = '/api/users/'\n data = {}\n username = str(uuid1())[:8]\n # Response should be status 400 where essential parameters are missing.\n response = self.client.post(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 400)\n data['EmailAddress'] = '{}@dbca.wa.gov.au'.format(username)\n response = self.client.post(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 400)\n data['DisplayName'] = 'Doe, John'\n response = self.client.post(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 400)\n data['SamAccountName'] = username\n response = self.client.post(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 201) # Now valid.", "def test_signup_missing_email(self):\n\n invalid_u = User.signup(None, \"testuser\", \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_create(self):\n \n name=\"mytest\"\n email=\"[email protected]\"\n \n #test user can be created successfully when given correct values\n user = users.create(Request(name, email)) \n self.assertIsInstance(user, User)\n self.assertEquals(user.name, name)\n self.assertEquals(user.email, email)\n \n #ensure that an error is raised when essential attributes are missed\n self.assertRaises(datastore_errors.BadValueError, users.create, None)", "def test_create_user_missing_data(self):\n data = {\"firstname\": \"John\"}\n res = self.post(url=\"/users\", data=data)\n self.assertException(res, exc.DataIsMissing)\n\n users = User.query.all()\n self.assertEqual(len(users), 5)", "def test_new_user_400(self):\n # Missing First Name\n user1_body = deepcopy(self.test_user1_data)\n del(user1_body['first_name'])\n resp = self.app.post('/users', data=json.dumps(user1_body))\n assert resp.status_code == 400\n\n # Missing Last Name\n user1_body = deepcopy(self.test_user1_data)\n del(user1_body['last_name'])\n resp = self.app.post('/users', data=json.dumps(user1_body))\n assert resp.status_code == 400\n\n # Missing UserID\n user1_body = deepcopy(self.test_user1_data)\n del(user1_body['userid'])\n resp = self.app.post('/users', data=json.dumps(user1_body))\n assert resp.status_code == 400\n\n # Bad data type for groups\n user1_body = deepcopy(self.test_user1_data)\n user1_body['groups'] = self.test_group1_groupid\n resp = self.app.post('/users', data=json.dumps(user1_body))\n assert resp.status_code == 400", "def test_create_user_empty_string(self):\n res = self.client.post(CREATE_USER_URL, {\n **self.mock_user,\n 'name': ''\n })\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n user = get_user_model().objects.get(**res.data)\n\n self.assertTrue(user.check_password(self.mock_user['password']))\n self.assertNotEqual('', user.name)", "def test_signup_when_empty_data_provided(self):\n user = {}\n\n response = self.client.post('/api/v1/register', json=json.dumps(user), headers={'Content-Type': 'application/json'})\n data = response.get_json()\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(data['status'], 400)\n self.assertEqual(data['message'], 'Invalid data. Please fill all the required fields')", "def create(self, request, *args, **kwargs):\n user = request.user\n if user.is_authenticated and not user.has_perm(\"users.add_user\"):\n self.permission_denied(request, message=_(\"You cannot create users.\"))\n return super().create(request, *args, **kwargs)", "def test_registeration_for_a_super_user_no_password(self):\n with self.assertRaisesMessage(TypeError,\n 'Superusers must have a password.'):\n User.objects.create_superuser(\n 'jey',\n '[email protected]',\n None\n )", "def test_username_nodigits(self):\n response = self.signup_a_user(self.username_nodigits)\n self.assertEqual(response.data['errors']['username'],\n [\"username is invalid\"]\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def signup_user_with_empty_fields(self):\n response = self.client.post(\n self.signup_url, self.invalid_user_with_empty_fields, format='json')\n\n return response", "def test_create_user_password_too_short(self):\n res = self.client.post(CREATE_USER_URL, {\n **self.mock_user,\n 'password': 'pw'\n })\n\n db_user = get_user_model().objects.filter(\n email=self.mock_user['email']\n )\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertFalse(db_user)", "def clean(self):\n if self.edit_user is None and len(self.cleaned_data['password1']) == 0:\n raise forms.ValidationError(_(u'You must supply a password when creating a user'))\n return super(RegisterUserForm, self).clean()", "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(None, None))", "def test_empty_fields(self):\n with self.client:\n response = register_user(\n self, '', '', '', '')\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Validation errors.')\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 422)", "def test_user_creation(self):\n self.assertTrue(User.objects.exists())", "def test_registeration_no_username(self):\n response = self.signup_a_user(self.user_lacks_username)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertNotIn(\"token\", response.data)", "def test_user_creation(self):\n self.assertEqual(CustomUser.objects.count() - self.user_count, 2)\n self.assertEqual(self.new_student_user.username, 'Student')\n self.assertTrue(self.new_student_user.password, 'student12345')\n self.assertEqual(self.new_instructor_user.username, 'Instructor')\n self.assertTrue(self.new_instructor_user.password, 'instructor12345')", "def test_cannot_create_superuser_without_password(self):\n with self.assertRaises(TypeError):\n User.objects.create_superuser(username=\"admin\", email=\"[email protected]\")", "def test_create_user_with_no_username(self):\n data = {\n 'username': '',\n 'email': '[email protected]',\n 'password': 'foobar'\n }\n response = self.client.post(self.create_url, data, format='json')\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['username']), 1)", "def test_user_errors(self):\r\n user = User(\r\n email_addr=\"[email protected]\",\r\n name=\"johndoe\",\r\n fullname=\"John Doe\",\r\n locale=\"en\")\r\n\r\n # User.name should not be nullable\r\n user.name = None\r\n db.session.add(user)\r\n assert_raises(IntegrityError, db.session.commit)\r\n db.session.rollback()\r\n\r\n # User.fullname should not be nullable\r\n user.name = \"johndoe\"\r\n user.fullname = None\r\n db.session.add(user)\r\n assert_raises(IntegrityError, db.session.commit)\r\n db.session.rollback()\r\n\r\n # User.email_addr should not be nullable\r\n user.name = \"johndoe\"\r\n user.fullname = \"John Doe\"\r\n user.email_addr = None\r\n db.session.add(user)\r\n assert_raises(IntegrityError, db.session.commit)\r\n db.session.rollback()", "def test_create_user_passwords_repeat_is_missing(self):\n data = {\"firstname\": \"John\", \"lastname\": \"Doe\", \"password\": \"supersecret\"}\n res = self.post(url=\"/users\", data=data)\n self.assertException(res, exc.DataIsMissing)\n users = User.query.all()\n self.assertEqual(len(users), 5)", "def test_user_existence(self):\n\n credentials = {\n 'email': '[email protected]',\n 'password': 'Testpass12',\n 'name': 'Test Name'\n }\n get_user_model().objects.create_user(**credentials)\n\n # Check that this is a bad request since the user does already exists.\n response = self.client.post(URL_CREATE_USER, credentials)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_validate_user(self):\n with self.assertRaises(ValidationError):\n self.make_assignment(\n self.category, self.user_alice, self.role_contributor\n )", "def test_create_account_failed_no_password(self):\n data = self.user_data.copy()\n data.pop('password')\n response = self.client.post(self.url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data.get('message').get('password')[0], 'This field is required.')", "def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def _validate_user_fields(fields: dict):\n # Checks\n for k, v in fields.items():\n if k == \"username\":\n if len(v) > UserLimits.USERNAME_MAX_LENGTH or len(v) < UserLimits.USERNAME_MIN_LENGTH:\n raise ForbiddenArgument(\"invalid username\")\n\n elif k == \"fullname\":\n if len(v) > UserLimits.FULLNAME_MAX_LENGTH or len(v) < UserLimits.USERNAME_MIN_LENGTH:\n raise ForbiddenArgument(\"invalid full name\")\n\n elif k == \"email\":\n if not is_email(v) or len(v) > UserLimits.EMAIL_MAX_LENGTH or len(v) < UserLimits.EMAIL_MIN_LENGTH:\n raise ForbiddenArgument(\"invalid email\")\n\n elif k == \"password\":\n if len(v) > UserLimits.PASSWORD_MAX_LENGTH or len(v) < UserLimits.PASSWORD_MIN_LENGTH:\n raise ForbiddenArgument(\"invalid password\")", "def test_createUser_single(self):\n #TODO: this and other tests", "def test_if_not_created_unauthorized(self):\r\n payload = {\r\n \"email\": \"[email protected]\",\r\n \"password\": \"password\",\r\n \"name\": \"asdasd\",\r\n \"is_superuser\": False,\r\n }\r\n\r\n res = self.client_not_authenticated.post(reverse(CREATE_USER_URL),data=payload)\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)" ]
[ "0.7630348", "0.73862165", "0.73674697", "0.73465914", "0.72970724", "0.72438395", "0.7208631", "0.7136524", "0.7124549", "0.7122272", "0.7103522", "0.7093455", "0.7093455", "0.7093455", "0.7093455", "0.7093455", "0.70929193", "0.7077152", "0.707205", "0.7072001", "0.70619893", "0.7030969", "0.7021747", "0.701763", "0.7016226", "0.70116866", "0.70053625", "0.6999455", "0.6994855", "0.6988487", "0.6962033", "0.6957902", "0.6940327", "0.6935358", "0.693399", "0.69212735", "0.69166934", "0.6885418", "0.6881372", "0.68807834", "0.6876645", "0.6874128", "0.68345", "0.68281037", "0.6817594", "0.6804497", "0.67964", "0.67692906", "0.67590934", "0.67516243", "0.674182", "0.67337006", "0.6733349", "0.6730363", "0.6727885", "0.6715445", "0.6714786", "0.6703667", "0.66915", "0.6674264", "0.6673129", "0.6661519", "0.66594726", "0.66494703", "0.66339225", "0.6633592", "0.6620213", "0.66192013", "0.66192013", "0.66192013", "0.66191417", "0.66163117", "0.66155446", "0.6589333", "0.6583052", "0.6579377", "0.65526026", "0.6552222", "0.65513813", "0.65431935", "0.6535065", "0.651966", "0.6517664", "0.6514674", "0.650994", "0.6505046", "0.650167", "0.6500112", "0.64982283", "0.64915097", "0.6489176", "0.64848346", "0.6469258", "0.6455361", "0.64537305", "0.6453425", "0.64402825", "0.6434789", "0.6432233", "0.6429077" ]
0.71747696
7
Ensure we can't create a new user with a weak password
def test_create_new_user_weak_password(self): data = { 'username': 'John', 'email': '[email protected]', 'password': '19274682736', 'first_name': 'Chuck', 'last_name': 'Norris', 'university': { "name": "random_university" }, 'academic_field': {'name': "random_field"}, 'academic_level': {'name': "random_level"}, 'gender': "M", 'birthdate': "1999-11-11", } response = self.client.post( reverse('user-list'), data, format='json', ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) content = {"password": ['This password is entirely numeric.']} self.assertEqual(json.loads(response.content), content)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_user_password_too_short(self):\n res = self.client.post(CREATE_USER_URL, {\n **self.mock_user,\n 'password': 'pw'\n })\n\n db_user = get_user_model().objects.filter(\n email=self.mock_user['email']\n )\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertFalse(db_user)", "def test_create_user_invalid_password(self):\r\n print(\"Create user invalid password (empty)\")\r\n u_id = 3\r\n username = \"newtestuser\"\r\n password = \"\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_creation_without_password(self, user):\n with pytest.raises(mongoengine.errors.ValidationError):\n user.save()", "def test_password_too_short(self):\n payload = {'email': '[email protected]', 'password': 'pw'}\n \n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n \n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n \n self.assertFalse(user_exists)", "def test_password_too_short(self):\n # requirments for creating user\n payload = {\n 'email': '[email protected]',\n 'password': 'pwd',\n 'name': 'Test',\n }\n\n # this will do a HTTP POST request and create a user\n response = self.client.post(CREATE_USER_URL, payload)\n\n # Ensure that statuscode returns a HTTP400 bad request\n # becos must exist before we can ckeck password\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n # chech if user exists true else false\n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n\n self.assertFalse(user_exists)", "def test_password_too_short(self):\n payload = {'email': '[email protected]', 'password': 'fu'}\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n self.assertFalse(user_exists)", "def test_invalid_password(self):\n pass", "def test_password_too_short(self):\n\t\tpayload = {\n\t\t'email': '[email protected]',\n\t\t'password': 'pw',\n\t\t'name': 'test'\n\t\t}\n\n\t\tres = self.client.post(CREATE_USER_URL, payload)\n\t\tself.assertEqual(res.status_code,status.HTTP_400_BAD_REQUEST)\n\t\tuser_exists = get_user_model().objects.filter(\n\t\t\temail = payload['email']\n\t\t\t).exists()\n\n\t\tself.assertFalse(user_exists)", "def test_password_too_short(self):\r\n payload = {\r\n 'email': '[email protected]',\r\n 'password': 'pw',\r\n 'name': 'Maks'\r\n }\r\n\r\n res = self.client.post(CREATE_USER_URL, payload)\r\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\r\n\r\n user_exists = get_user_model().objects.filter(\r\n email=payload['email']\r\n ).exists()\r\n self.assertFalse(user_exists)", "def test_invalid_password(self):\n u_invalid_password = User(username=\"bad_user\", email=\"[email protected]\", password=\"df\")\n with self.assertRaises(TypeError) as err:\n User.signup(u_invalid_password)", "def test_password_too_short(self):\n payload = {'email': '[email protected]', 'password': '123'}\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n self.assertFalse(user_exists)", "def test_password_too_short(self):\n\n payload = {\n \"email\": \"[email protected]\",\n \"name\": \"Test\",\n 'password': 'tTTt'\n }\n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n\n user_exitst = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n\n self.assertFalse(user_exitst)", "def test_password_too_short(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'pw'\n }\n res = self.client.post(CREATE_USER_API, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n self.assertFalse(user_exists)", "def test_password_too_short(self):\n\n credentials = {\n 'email': '[email protected]',\n 'password': 'pw',\n 'name': 'Test Name'\n }\n response = self.client.post(URL_CREATE_USER, credentials)\n\n # Check that this is a bad request since the password was too short.\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n is_user_created = get_user_model().objects.filter(\n email=credentials['email']\n ).exists()\n\n self.assertFalse(is_user_created)\n self.assertEqual(response.data['password'][0].code, 'min_length')", "def test_auth_user_fail_bad_password(self):\n\n self.assertFalse(User.authenticate(self.user1.username, \"invalid\"))", "def test_registeration_for_a_super_user_no_password(self):\n with self.assertRaisesMessage(TypeError,\n 'Superusers must have a password.'):\n User.objects.create_superuser(\n 'jey',\n '[email protected]',\n None\n )", "def test_no_forced_password_change(self):\r\n\r\n email, password = self._setup_user()\r\n self._login(email, password)\r\n\r\n email, password = self._setup_user(is_staff=True)\r\n self._login(email, password)", "def test_creation_with_password(self, user):\n user.password = \"is_god\"\n user.save()", "def test_password_too_short(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'pw',\n 'name': 'test Name'\n }\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEquals(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_user_passwords_do_not_match(self):\n data = {\n \"firstname\": \"John\",\n \"lastname\": \"Doe\",\n \"password\": \"supersecret\",\n \"password_repeat\": \"supersecret_ooops\",\n }\n res = self.post(url=\"/users\", data=data)\n self.assertException(res, exc.PasswordsDoNotMatch)\n\n users = User.query.all()\n self.assertEqual(len(users), 5)", "def test_registeration_no_password(self):\n response = self.signup_a_user(self.user_lacks_password)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data[\"errors\"][\"password\"],\n [\"This field may not be blank.\"]\n )\n self.assertNotIn(\"token\", response.data)", "def test_that_an_incorrect_password_returns_false(new_user):\n user, user_data = new_user\n password = 'x{}x'.format(user_data.get('password'))\n\n assert_that(user.verify_password(password)).is_false()", "def test_admin_cannot_create_user_with_invalid_password(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andyandy',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Password should be longer than 6 characters, have atleast an uppercase and a lowercase!')\n self.assertEqual(resp.status_code, 400)", "def test_create_use_with_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, password='open@123')", "def test_signup_missing_password(self):\n with self.assertRaises(ValueError) as context:\n invalid_u = User.signup(\"[email protected]\", \"testuser\", None, \"Test\", \"User\", None)", "def test_user_creation(self):\n user = UserModel.objects.create_user(\n username=\"saimer\"\n )\n self.assertEqual(user.email, \"\")\n self.assertEqual(user.username, \"saimer\")\n self.assertFalse(user.has_usable_password())", "def test_cannot_create_superuser_without_password(self):\n with self.assertRaises(TypeError):\n User.objects.create_superuser(username=\"admin\", email=\"[email protected]\")", "def test_invalid_password(self):\n user = User(email=\"[email protected]\", password=\"testpassword\")\n\n self.assertFalse(user.is_valid_password(\"invalid_password\"))", "def testAddNonePassword(self):\n self.assertEquals(models.ERR_BAD_PASSWORD, self.users.add(\"userF\", None))", "def test_invalid_length_for_new_password():\n user = User(email=\"[email protected]\", user_type=0)\n user_password = \"ILoveHTML\"\n user.SetPassword(user_password)\n\n new_password1 = \"pwd\"\n with pytest.raises(ValueError):\n user.SetPassword(new_password1)\n assert not user.VerifyPassword(new_password1)\n assert user.VerifyPassword(user_password)\n\n new_password2 = \"I love meatball and tuna.\"\n with pytest.raises(ValueError):\n user.SetPassword(new_password2)\n assert not user.VerifyPassword(new_password2)\n assert user.VerifyPassword(user_password)", "def test_set_user_password(self):\n pass", "def create_user(username):\n\n password = getpass.getpass('Password for {0}: '.format(username))\n confirm = getpass.getpass('Again: ')\n\n if password != confirm:\n print >> sys.stderr, \"Passwords don't match\"\n\n sys.exit(1)\n\n with transaction.manager:\n Users(username, password).save()", "def test_invalid_user_without_email(self):\n email = ''\n password = None\n with self.assertRaises(ValueError):\n self.user_manager.create_user(email=email, password=password)", "def invalid_credentials( form , field ): \n\tusername_entered = form.username.data\n\tpassword_entered = field.data \n\tuser_object = User.query.filter_by(username = username_entered).first()\n\tif user_object is None : \n\t\traise ValidationError(\"Username or Password is incorrect !\")\n\telif not pbkdf2_sha256.verify(password_entered , user_object.password) : \n\t\traise ValidationError(\"Username or Password is incorrect !\")", "def test_no_forced_password_change(self):\r\n student = self._user_factory_with_history()\r\n staff = self._user_factory_with_history(is_staff=True)\r\n\r\n # also create a user who doesn't have any history\r\n grandfathered_student = UserFactory()\r\n grandfathered_student.date_joined = timezone.now()\r\n\r\n self.assertFalse(PasswordHistory.should_user_reset_password_now(student))\r\n self.assertFalse(PasswordHistory.should_user_reset_password_now(staff))\r\n self.assertFalse(PasswordHistory.should_user_reset_password_now(grandfathered_student))\r\n\r\n staff_reset_time = timezone.now() + timedelta(days=100)\r\n with freeze_time(staff_reset_time):\r\n self.assertFalse(PasswordHistory.should_user_reset_password_now(student))\r\n self.assertFalse(PasswordHistory.should_user_reset_password_now(grandfathered_student))\r\n self.assertFalse(PasswordHistory.should_user_reset_password_now(staff))", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\n None,\n 'Password'\n )", "def test_create_account_failed_no_password(self):\n data = self.user_data.copy()\n data.pop('password')\n response = self.client.post(self.url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data.get('message').get('password')[0], 'This field is required.')", "def test_new_password(self):\n form_data = self.form_data(self.pwd)\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def test_32_oauth_password(self):\r\n user = User(email_addr=\"[email protected]\",\r\n name=self.user.username,\r\n passwd_hash=None,\r\n fullname=self.user.fullname,\r\n api_key=\"api-key\")\r\n db.session.add(user)\r\n db.session.commit()\r\n res = self.signin()\r\n assert \"Ooops, we didn't find you in the system\" in res.data, res.data", "def test_get_token_with_invalid_password(self):\n payload = {'email': '[email protected]', 'password': ''}\n\n create_user(**payload)\n token = self.client.post(TOKEN_URL, payload)\n\n self.assertEqual(token.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_user_password_too_short(self):\n data = {\n \"firstname\": \"John\",\n \"lastname\": \"Doe\",\n \"password\": \"short\",\n \"password_repeat\": \"short\",\n }\n res = self.post(url=\"/users\", data=data)\n self.assertException(res, exc.PasswordTooShort)\n\n users = User.query.all()\n self.assertEqual(len(users), 5)", "def test_password_is_to_short(setup_client):\n client = setup_client\n payload = {\n 'email': '[email protected]',\n 'password': 'pw',\n 'role': 'Supplier',\n 'name': 'Test name'\n }\n res = client.post(CREATE_USER_URL, payload)\n assert res.status_code == status.HTTP_400_BAD_REQUEST\n user_exists = get_user_model().objects.filter(\n email=payload['email']).exists()\n assert not user_exists", "def test_new_user_with_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'userpass123')", "def testLoginBadUsernameAndPassword(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userJ\", \"password\"))\n self.assertEquals(models.ERR_BAD_CREDENTIALS, self.users.login(\"nobody_user\", \"nobody_password\"))", "def test_password_salts_are_random(self):\n self.user.password = '123456'\n self.user2.password = '123456'\n self.assertTrue(self.user.password_hash != self.user2.password_hash)", "def test_bad_password(self):\n user = 'santos.gallegos'\n passw = '1234'\n result = self.ucuenca.authentication(user, passw)\n self.assertFalse(result['autenticacion'])", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\n email=None,\n password=self.test_user_pass,\n name=self.test_user_name,\n )", "def test_password_length(self):\n payload = {\n 'email': '[email protected]',\n 'name': \"hello\",\n 'password': 'pw',\n }\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n\n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n self.assertFalse(user_exists)", "def create_user(item, username, passw):\n if len(passw) < 8:\n print(\"To short. Password should have minimum 8 characters.\")\n else:\n try:\n user = User(username=username, password=passw)\n user.save_to_db(item)\n print(\"User created\")\n except UniqueViolation as problem1:\n print(\"User already exist. Pick other username. \", problem1)", "def _prepare(cls, create, **kwargs):\n password = kwargs.pop(\"password\", None)\n user = super(UserFactory, cls)._prepare(False, **kwargs)\n if password:\n user.set_password(password)\n if create:\n user.save()\n return user", "def test_auth_user_fail_bad_username(self):\n\n self.assertFalse(User.authenticate(\"invalid\", \"allison\"))", "def test_creating_a_new_user_without_email(self):\n\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, \"Test1234\")", "def test_user_password_factory(self):\n user = UserFactory(name='User', password='myownpass')\n assert user.check_password('myownpass')", "def _create_user(self, password, **extra_fields):\n try:\n user = self.model(**extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user\n except:\n raise ValueError('ValueError: Cannot create new user')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\"\", \"test42837492374923749\")", "def test_private_create_user_without_email(self):\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user(None, None, False, False)\n self.assertIn('email must be set', str(exinfo.value))", "def test_set_password_with_wrong_type(self):\n user = UserModel()\n with pytest.raises(ValueError):\n user.password = 12345", "def check_existing_users(user_name,password):\n\n\n new_user = User(user_name,password)\n\n return new_user", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, '3232Ze')", "def test_create_account_failed_invalid_password(self):\n data = self.user_data.copy()\n data['password'] = 'aaAA@@aaaaa'\n response = self.client.post(self.url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data.get('message').get('password')[0],\n 'Password value should have at least 2 uppercase, 2 lowercase, '\n '2 digit and 2 special character.')", "def test_user_check_password(self):\r\n user = User(username=self.test_username, email=self.test_email)\r\n user.password = generate_password_hash(self.test_password)\r\n self.db.session.add(user)\r\n self.db.session.commit()\r\n assert user.check_password(self.test_password)", "def test_check_password(self):\n user = User.query.filter_by(username='eschoppik').first()\n self.assertTrue(bcrypt.check_password_hash(user.password, 'secret'))\n self.assertFalse(bcrypt.check_password_hash(user.password, 'notsecret'))", "def test_accounts_password_reuse(self):\r\n user = self._user_factory_with_history()\r\n staff = self._user_factory_with_history(is_staff=True)\r\n\r\n # students need to user at least one different passwords before reuse\r\n self.assertFalse(PasswordHistory.is_allowable_password_reuse(user, \"test\"))\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(user, \"different\"))\r\n self._change_password(user, \"different\")\r\n\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(user, \"test\"))\r\n\r\n # staff needs to use at least two different passwords before reuse\r\n self.assertFalse(PasswordHistory.is_allowable_password_reuse(staff, \"test\"))\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(staff, \"different\"))\r\n self._change_password(staff, \"different\")\r\n\r\n self.assertFalse(PasswordHistory.is_allowable_password_reuse(staff, \"test\"))\r\n self.assertFalse(PasswordHistory.is_allowable_password_reuse(staff, \"different\"))\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(staff, \"third\"))\r\n self._change_password(staff, \"third\")\r\n\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(staff, \"test\"))", "def test_user_short_passwords(self):\n data = json.dumps({\n \"username\" : \"moses\", \"email\" : \"[email protected]\",\n \"password\" : \"1234567\", \"confirm_password\" : \"1234567\"})\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 400)", "def test_admin_cannot_update_user_with_invalid_password(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Password should be longer than 6 characters, have atleast an uppercase and a lowercase!')\n self.assertEqual(resp.status_code, 400)", "def test_user_recreate(self):\n self.test_user_creation()\n\n with self.assertRaisesMessage(\n IntegrityError, \"UNIQUE constraint failed: auths_user.username\"\n ):\n UserModel.objects.create_user(\n username=\"saimer\"\n )", "def test_new_user_invalid_email(self):\n\n # It should raise a ValueError\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\n None,\n password=\"password\"\n )", "def test_user_authenticate_invalid_password(self):\n\n user = User.authenticate(\"test1\", \"wrong_password\")\n\n self.assertEqual(user, False)", "def test_new_user_invalid_email(self):\n\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test1234')", "def test_invalid_password(self):\n response = self.signup_a_user(self.password_lacks_specialchar)\n self.assertEqual(response.data['errors']['password'],\n [\"please consider a password that has a number, an \"\n \"uppercase letter, lowercase letter and a special\"\n \" character\"]\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123456')", "def test_account_create_should_fail_when_password_too_short(self):\n ident_choice = UserIdentificationLabel.objects.get(slug=\"ident2\")\n form_data = {\n 'invitation_code': '1234',\n 'username': 'fred2',\n 'organization_name': 'transhealth',\n 'password1': 'p',\n 'password2': 'p',\n 'first_name': 'Fred',\n 'last_name': 'Flinstone',\n 'identification_choice': str(ident_choice.pk),\n }\n response = self.client.post(self.url, form_data, follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'too short')", "def test_dont_create_user(self):\n self.assertFalse(User.objects.exists())", "def test_user_empty_conf_password(self):\n data = json.dumps({\n \"username\" : \"lenny\", \"email\" : \"[email protected]\",\n \"password\" : \"secret\", \"confirm_password\" : \"\"})\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 400)", "def test_invalid_password(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n rv = self.login('[email protected]', 'Bo1905')\n self.assertIn(b'Invalid password! Please try again', rv.data)", "def test_account_view_wrong_pw(flask_server, create_account):\n import requests\n\n data = create_account\n data['password'] += '123'\n\n req = requests.post('{}/account/view'.format(API_URL), data=data)\n assert req.content == b'Wrong password'\n assert req.status_code == 400", "def test_new_user_invalid_email(self):\n\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, '123')", "def test_disabled_too_frequent_password_resets(self):\r\n student = self._user_factory_with_history()\r\n\r\n self.assertFalse(PasswordHistory.is_password_reset_too_soon(student))", "def test_valid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']))]))\n\n valid_password(None, pass_field)", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, \"test123\")", "def test_create_no_email(self):\n\n self.portal.portal_properties.site_properties.use_email_as_login = True\n\n self.assertRaises(\n ValueError,\n api.user.create,\n username='chuck', password='secret'\n )", "def test_create_user_with_invalid_input(self, user, status_code, len_):\n # setup: none\n\n # test\n resp = self.create_user(user)\n try:\n assert resp.status_code == status_code\n assert resp.headers[\"Content-Type\"] == \"application/json; charset=utf-8\"\n assert resp.text.find(\"Passwords must have at least one non alphanumeric character\") == len_\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp.request)\n self.pprint_response(resp)\n\n # teardown: none", "def test_user_empty_password(self):\n data = json.dumps({\n \"username\" : \"lenny\", \"email\" : \"[email protected]\",\n \"password\" : \"\", \"confirm_password\" : \"secret12345\"})\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 400)", "def test_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(email=None, password=\"123\")", "def test_08_create_user_not_exists(self):\n\n _, user = self.get_random_item(models.User)\n utils.create_user(user, session=self.session)\n success, error = utils.create_user(user, session=self.session)\n self.assertFalse(success)\n self.assertTrue(error)", "def test_create_user_invalid_username(self):\r\n print(\"Create user invalid username (already taken)\")\r\n u_id = 3\r\n username = \"100\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def newuser(UserID, Password):\n\tif UserID not in USERS:\n\t\tif len(UserID) < 32:\n\t\t\tif len(Password) >= 4 and len(Password) <= 8:\n\t\t\t\twith open(FILE, 'a') as f:\n\t\t\t\t\tf.write(f'\\n({UserID}, {Password})')\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tsend(f'{SERVER_NAME}: Your password needs to be between 4 and 8 characters in length, please retry.')\n\t\telse:\n\t\t\tsend(f'{SERVER_NAME}: {UserID} needs to be less than 32 characters long, please shorten it.')\n\n\telse:\n\t\tsend(f'{SERVER_NAME}: {UserID} has already been used, please try a different username.')\n\n\treturn False", "def test_create_user_with_short_password(self):\n data = {\n 'email': '[email protected]',\n 'password': 'foo'\n }\n\n response = self.client.post(self.create_url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['password']), 1)", "def test_passwordsuccess(self):\n form_data = {\n 'username': 'testuser',\n 'password1': '2$n5[]$nnA5Y}2}}^gba',\n 'password2': '2$n5[]$nnA5Y}2}}^gba'\n }\n form = StrictUserCreationForm(data=form_data)\n self.assertTrue(form.is_valid())", "def test_create_user_valid(self):\n res = self.client.post(CREATE_USER_URL, self.mock_user)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n user = get_user_model().objects.get(**res.data)\n\n self.assertTrue(user.check_password(self.mock_user['password']))\n self.assertNotIn('password', res.data)", "def test_account_create_should_fail_when_password_too_common(self):\n ident_choice = UserIdentificationLabel.objects.get(slug=\"ident2\")\n form_data = {\n 'invitation_code': '1234',\n 'username': 'fred',\n 'organization_name': 'transhealth',\n 'password1': 'password',\n 'password2': 'password',\n 'first_name': 'Fred',\n 'last_name': 'Flinstone',\n 'identification_choice': str(ident_choice.pk),\n }\n response = self.client.post(self.url, form_data, follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'too common')", "def test_create_user(self):\n self.assertEqual(self.new_user.first_name, 'John')\n self.assertEqual(self.new_user.last_name, 'Doe')\n self.assertEqual(self.new_user.username, 'john_doe')\n self.assertEqual(self.new_user.email, '[email protected]')\n self.assertTrue(self.new_user.check_password('test_password'))\n self.assertFalse(self.new_user.is_staff)\n self.assertFalse(self.new_user.is_superuser)\n self.assertFalse(self.new_user.is_active)\n\n with self.assertRaises(ValueError):\n User.objects.create_user(\n first_name='', last_name='', username='', email='', bio='', password=''\n )", "def test_invalid_password(self):\n\n account_data = {\n \"username\": \"Mike\",\n \"email\": \"[email protected]\",\n \"password\": \"1234567\",\n \"confirm_password\": \"INVALID_PASSWORD\"\n }\n response = self.client.post(\n reverse('accounts:create-user'),\n account_data,\n format=\"json\")\n \"\"\"Test the api has bucket creation capability.\"\"\"\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertTrue(\"error\" in json.loads(response.content))", "def test_create_token_invalid_credantials(self):\n create_user(email='[email protected]', password='testpass')\n payload = {'email': '[email protected]', 'password': 'wrong'}\n res = self.client.post(TOKEN_URL, payload)\n\n self.assertNotIn('token', res.data)\n self.assertEquals(res.status_code, status.HTTP_400_BAD_REQUEST)" ]
[ "0.7448158", "0.7334659", "0.73337275", "0.7231263", "0.7226161", "0.72220945", "0.714039", "0.7128195", "0.7127693", "0.7122859", "0.710582", "0.7076057", "0.7073139", "0.70351803", "0.6981093", "0.6925133", "0.69214153", "0.6903709", "0.6858862", "0.68386996", "0.6816049", "0.68123394", "0.6777982", "0.67328376", "0.6731094", "0.6727343", "0.6724735", "0.6723644", "0.67079747", "0.6630522", "0.66290766", "0.66286916", "0.66245276", "0.66048664", "0.6598078", "0.6597054", "0.65867525", "0.65840673", "0.6582685", "0.6577058", "0.6575801", "0.6569614", "0.65639603", "0.6558179", "0.6543566", "0.6520973", "0.6520167", "0.6506907", "0.64888585", "0.6484778", "0.64680207", "0.64643365", "0.64413327", "0.64332443", "0.6431974", "0.64305305", "0.642863", "0.64256835", "0.64226997", "0.64210534", "0.6415279", "0.6413909", "0.6411299", "0.6407707", "0.6405998", "0.6405448", "0.64032185", "0.6400672", "0.6385359", "0.6382897", "0.6382699", "0.63824683", "0.63809294", "0.63802534", "0.6370175", "0.63697493", "0.6358864", "0.63578206", "0.63577205", "0.63557637", "0.6355339", "0.6355339", "0.6355339", "0.6355339", "0.6355339", "0.6349865", "0.63461083", "0.63410026", "0.63326126", "0.6329492", "0.63294023", "0.6328595", "0.63280326", "0.63217664", "0.6320693", "0.6316422", "0.63069105", "0.6304675", "0.6295865", "0.62955266" ]
0.6505213
48
Ensure we can't create a new user with an invalid phone number
def test_create_new_user_invalid_phone(self): data = { 'username': 'John', 'email': '[email protected]', 'password': '1fasd6dq#$%', 'phone': '12345', 'other_phone': '23445dfg', 'first_name': 'Chuck', 'last_name': 'Norris', 'university': { "name": "random_university" }, 'academic_field': {'name': "random_field"}, 'academic_level': {'name': "random_level"}, 'gender': "M", 'birthdate': "1999-11-11", } response = self.client.post( reverse('user-list'), data, format='json', ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) content = { "phone": ['Invalid format.'], "other_phone": ['Invalid format.'] } self.assertEqual(json.loads(response.content), content)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_phone_number(self, phone_number):\n if User.objects.filter(phone_number=phone_number).exists():\n raise serializers.ValidationError('Phone Number already registered.')\n return phone_number", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\"\", \"test42837492374923749\")", "def test_duplicate_phone_number(self):\n params = {\n 'first_name': \"David\",\n 'last_name': \"Smith\",\n 'password': '******',\n 'email': \"[email protected]\",\n 'phone_number': \"012-345-6789\"\n }\n self.register(params)\n response = self.register(params)\n self.assertEqual(response.status_code, 400)\n self.assertDictContainsSubset({'message': \"Phone number/email already exists\"}, response.json())", "def test_new_user_invalid_email(self):\n\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test1234')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, '3232Ze')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, '123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123456')", "def test_create_use_with_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, password='open@123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, \"test123\")", "def test_new_user_invalid_email(self):\n\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_create_user_with_invalid_input(self, user, status_code, len_):\n # setup: none\n\n # test\n resp = self.create_user(user)\n try:\n assert resp.status_code == status_code\n assert resp.headers[\"Content-Type\"] == \"application/json; charset=utf-8\"\n assert resp.text.find(\"Passwords must have at least one non alphanumeric character\") == len_\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp.request)\n self.pprint_response(resp)\n\n # teardown: none", "def test_new_user_with_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'userpass123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_create_user_invalid_id(self):\r\n print(\"Create user invalid id (already taken)\")\r\n u_id = 100\r\n username = \"newtestuser\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\n None,\n 'Password'\n )", "def test_new_user_invalid_email(self):\n user_number_before = get_user_model().objects.count()\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\n None,\n password=\"1234Test\"\n )\n user_number_after = get_user_model().objects.count()\n self.assertEqual(user_number_before, user_number_after)", "def validate_phone(self, data):\n value = data.strip()\n if re.match(constant.NUMBER_ONLY, value):\n if User.objects.filter(phone=value).exists():\n raise serializers.ValidationError('phone number already registered')\n return value\n raise serializers.ValidationError(VALIDATION['phone']['invalid'])", "def post(self):\r\n args = user_parser.parse_args()\r\n\r\n phone_number = args[\"phone_number\"]\r\n\r\n first_three = phone_number[:3]\r\n\r\n if first_three not in prefix_list and first_three != \"+23\":\r\n response = {\r\n \"status\": \"error\",\r\n \"details\": {\r\n \"message\": \"Pass in a valid phone-number\"\r\n }\r\n }\r\n return response, http.client.BAD_REQUEST\r\n\r\n if not (len(phone_number) == 11 or len(phone_number) == 14):\r\n\r\n response = {\r\n \"status\": \"error\",\r\n \"details\": {\r\n \"message\": \"The lenth of number passed is invalid\"\r\n }\r\n }\r\n return response, http.client.BAD_REQUEST\r\n\r\n user = (UserModel.query.filter(\r\n UserModel.phone_number == phone_number).first())\r\n\r\n if user:\r\n result = {\r\n \"status\": \"error\",\r\n \"result\": {\r\n 'message': 'Phone Number already exists, try another one.'\r\n }\r\n }\r\n return result, http.client.CONFLICT\r\n\r\n if not validators.email(args[\"email\"]):\r\n response = {\r\n \"status\": \"error\",\r\n \"details\": {\r\n \"message\": \"Input a valid email address\"\r\n }\r\n }\r\n return response, http.client.BAD_REQUEST\r\n\r\n user = (UserModel.query.filter(\r\n UserModel.email == args['email']).first())\r\n if user:\r\n result = {\r\n \"status\": \"error\",\r\n \"result\": {\r\n 'message': 'Email already exists, try another one.'\r\n }\r\n }\r\n return result, http.client.CONFLICT\r\n\r\n email = args['email'].lower()\r\n new_user = UserModel(email=email,\r\n phone_number=args[\"phone_number\"],\r\n password=args['password'],\r\n role=args['role'],\r\n created_at=datetime.utcnow(),\r\n firebase_token=args['firebase_token'])\r\n db.session.add(new_user)\r\n try:\r\n db.session.commit()\r\n except IntegrityError:\r\n db.session.rollback()\r\n result = {\r\n \"status\": \"error\",\r\n \"result\": {\r\n 'message':\r\n 'Email or Phone Number already exists, try another one.'\r\n }\r\n }\r\n return result, http.client.CONFLICT\r\n\r\n result = admin_namespace.marshal(new_user, user_model)\r\n\r\n response = {\"status\": \"success\", \"result\": result}\r\n\r\n return response, http.client.CREATED", "def test_profile_phone_number_exceptions(self, bad_number):\n with mute_signals(post_save):\n profile = ExamProfileFactory(profile__phone_number=bad_number)\n with self.assertRaises(InvalidProfileDataException):\n CDDWriter.profile_phone_number_to_raw_number(profile)\n with self.assertRaises(InvalidProfileDataException):\n CDDWriter.profile_phone_number_to_country_code(profile)", "def test_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(email=None, password=\"123\")", "def test_creating_a_new_user_without_email(self):\n\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, \"Test1234\")", "def test_new_user_invalid_email(self):\n\n # It should raise a ValueError\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\n None,\n password=\"password\"\n )", "def validate_username(self, attrs, source):\n phone_no = attrs[source]\n if not phoneCleaner(phone_no):\n raise serializers.ValidationError(\"Please check your phone no. the format is incorrect\")\n\n try:\n us = User.objects.get(username__iexact=phone_no)\n except User.DoesNotExist:\n raise serializers.ValidationError(\"Phone number must already be registered before doing this\")\n\n if us.hierarchy != 'master':\n raise serializers.ValidationError(\"Phone number must not be a slave to another user\")\n\n return attrs", "def test_invalid_user_without_email(self):\n email = ''\n password = None\n with self.assertRaises(ValueError):\n self.user_manager.create_user(email=email, password=password)", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\n email=None,\n password=self.test_user_pass,\n name=self.test_user_name,\n )", "def test_valid_phone_invalid():\n assert not valid_phone(\"\")\n assert not valid_phone(\"000-000-00000\")\n assert not valid_phone(\"000-0000-0000\")\n assert not valid_phone(\"0000-000-0000\")\n assert not valid_phone(\"00000000000\")\n assert not valid_phone(\"foobar\")", "def validate(self, attrs):\n phone_no = self.context['kwargs'].get('slave')\n if not phoneCleaner(phone_no):\n raise serializers.ValidationError(\"Please check your phone no. the format is incorrect\")\n\n try:\n User.objects.get(username__iexact=phone_no)\n except User.DoesNotExist:\n attrs.update({'slave': phone_no})\n return attrs\n raise serializers.ValidationError(\"Phone number already exists. If you are trying to glue, consider the glue option\")", "def test_number(self):\n form_data = {\n 'username': 'testuser',\n 'password1': 'CDr=cpz&Z&a!cuP-nAQe',\n 'password2': 'CDr=cpz&Z&a!cuP-nAQe'\n }\n form = StrictUserCreationForm(data=form_data)\n self.assertFalse(form.is_valid())", "def test_user_register_bad_request(self):\n response = self.client.post(\n CONSTS.USER_REGISTER_URL,\n data=self.invalid_user_data,\n format='json'\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def validate_telephone(self, data):\n value = data.strip()\n if re.match(constant.NUMBER_ONLY, value):\n if User.objects.filter(telephone=value).exists():\n raise serializers.ValidationError('telephone number already registered')\n return value\n raise serializers.ValidationError(VALIDATION['phone']['invalid'])", "def test_signup_missing_first_name(self):\n\n invalid_u = User.signup(\"[email protected]\", \"testuser\", \"testpass\", None, \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def check_format_user_phone(phone):\n match = re.match(r'^\\+[0-9]{10,}$', phone)\n if not match:\n raise exceptions.ValidationError('phone is not valid!')\n return phone", "def test_user_signup_with_invalid_first_name(self):\n pass", "def test_signup_missing_last_name(self):\n\n invalid_u = User.signup(\"[email protected]\", \"testuser\", \"testpass\", \"Test\", None, None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_create_user_invalid_username(self):\r\n print(\"Create user invalid username (already taken)\")\r\n u_id = 3\r\n username = \"100\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_cannot_create_user_without_email(self):\n with self.assertRaises(TypeError):\n User.objects.create_user(username=\"username\", password=\"password\", email=None)", "def test_signup_missing_email(self):\n\n invalid_u = User.signup(None, \"testuser\", \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_valid_phone_valid():\n assert valid_phone(\"000-000-0000\")\n assert valid_phone(\"0000000000\")", "def test_create_invalid(self):\n url = '/api/users/'\n data = {}\n username = str(uuid1())[:8]\n # Response should be status 400 where essential parameters are missing.\n response = self.client.post(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 400)\n data['EmailAddress'] = '{}@dbca.wa.gov.au'.format(username)\n response = self.client.post(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 400)\n data['DisplayName'] = 'Doe, John'\n response = self.client.post(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 400)\n data['SamAccountName'] = username\n response = self.client.post(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 201) # Now valid.", "def clean_phone(self):\n phone = self.cleaned_data.get('phone')\n if Profile.objects.filter(phone=phone). \\\n exclude(pk=self.instance.pk).exists():\n raise ValidationError(\n u'This phone is already registered.',\n code='invalid'\n )\n\n return phone", "def test_private_create_user_without_email(self):\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user(None, None, False, False)\n self.assertIn('email must be set', str(exinfo.value))", "def testPhoneNumberValidation(self):\n \n fsd_tool = getToolByName(self.portal, TOOLNAME)\n desc = fsd_tool.getPhoneNumberDescription()\n self.failUnless(self.person.validate_officePhone('(555) 555-5555') is None)\n self.failUnless(self.person.validate_officePhone('555 555-5555') == \"Please provide the phone number in the format %s\" % desc)\n \n # Make sure a blank value for the phone number results in no validation\n self.failUnless(self.person.validate_officePhone('') is None, \"A blank value for officePhone should not be validated since officePhone is not a required field.\")\n \n # Make sure a blank value for the regex results in no validation.\n fsd_tool.setPhoneNumberRegex('')\n self.failUnless(self.person.validate_officePhone('555 555-5555') is None, \"A blank value for phoneNumberRegex should result in any value being accepted\")", "def validate_username(self, attrs, source):\n phone_no = attrs[source]\n if not phoneCleaner(phone_no):\n raise serializers.ValidationError(\"Please check your phone no. the format is incorrect\")\n\n try:\n User.objects.get(username__iexact=phone_no)\n except User.DoesNotExist:\n return attrs\n raise serializers.ValidationError(\"Phone number already exists. If are trying to glue, consider the glue option\")", "def test_create_user_invalid_type(self):\r\n print(\"Create user invalid type\")\r\n u_id = 3\r\n username = \"newtestuser\"\r\n password = \"password\"\r\n u_type = 5\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def validate_phone(form, field):\n if len(field.data) > 16:\n raise ValidationError('Invalid phone number')\n try:\n input_number = phonenumbers.parse(field.data)\n if not (phonenumbers.is_valid_number(input_number)):\n raise ValidationError('Invalid phone number')\n except Exception:\n input_number = phonenumbers.parse('+1' + field.data)\n if not (phonenumbers.is_valid_number(input_number)):\n raise ValidationError('Invalid phone number')", "def test_create__invalid(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n json_data = {'isAdmin': False} # No email\n with test_app.test_request_context(self.request_path, json=json_data):\n with self.assertRaises(werkzeug.exceptions.BadRequest):\n self.handler.do_post()\n\n new_appuser = user_models.AppUser.query(\n user_models.AppUser.email == '[email protected]').get()\n self.assertIsNone(new_appuser)", "def create_user(phone_num, pword, nickname):\n # Validate credentials\n if not SWE_PHONENUM_RE.match(phone_num):\n return {\n \"success\": False,\n \"msg\": \"Swedish format is required for phone number.\"\n }\n if not PASSWORD_RE.match(pword):\n return {\n \"success\": False,\n \"msg\": \"Password needs to be between 8 and 128 characters, and can only contain letters, numbers, spaces and the following characters: @#$%^&+=\"\n }\n if not NICKNAME_RE.match(nickname):\n return {\n \"success\": False,\n \"msg\": \"Nicknames need to be 2-30 characters long and can only contain letters, numbers, spaces, dashes and underscores.\"\n }\n\n phone_num = strip_phone_num(phone_num) # Get last 9 digits\n\n user = User.query.filter_by(phone_num=phone_num).first()\n if user:\n if(user.active):\n return {\n \"success\": False,\n \"msg\": \"Phone number already exists.\"\n }\n user.nickname = nickname\n user.active = True\n else:\n user = User(\n phone_num=phone_num,\n nickname=nickname,\n active=True\n )\n\n user.set_password(pword)\n\n db.session.add(user)\n db.session.commit()\n\n return {\n \"success\": True,\n \"msg\": \"User successfully registered.\"\n }", "def test_signup_dupe_email(self):\n\n invalid_u = User.signup(\"[email protected]\", \"testuser\", \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_signup_missing_username(self):\n\n invalid_u = User.signup(\"[email protected]\", None, \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_bad_phone():\n bad_phone = \"213-555-121\"\n m = CannedRe.PHONE.match(bad_phone)\n assert m is None, \"Canned RegEx phone test succeeded for %s while it should not\" % bad_phone", "def test_create_user_invalid_password(self):\r\n print(\"Create user invalid password (empty)\")\r\n u_id = 3\r\n username = \"newtestuser\"\r\n password = \"\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_incomplete_user_exception(self):\n u_username_only = User(username=\"incomplete_user\")\n with self.assertRaises(TypeError) as err:\n User.signup(u_username_only)", "def test_signup_dupe_username(self):\n\n invalid_u = User.signup(\"[email protected]\", \"allison\", \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_duplicate_email(self):\n params = {\n 'first_name': \"David\",\n 'last_name': \"Smith\",\n 'password': '******',\n 'email': \"[email protected]\",\n 'phone_number': \"012-345-6789\"\n }\n self.register(params)\n response = self.register(params)\n self.assertEqual(response.status_code, 400)\n self.assertDictContainsSubset({'message': \"Phone number/email already exists\"}, response.json())", "def test_no_phone_number(self):\n response = self.register({\n 'first_name': \"David\",\n 'last_name': \"Smith\",\n 'password': \"******\",\n 'email': \"[email protected]\",\n })\n self.assertEqual(response.status_code, 302)\n response = self.client.get(response.url)\n self.assertEqual(response.json()['phone_number'], None)", "def test_create_token_invalid_credantials(self):\n create_user(email='[email protected]', password='testpass')\n payload = {'email': '[email protected]', 'password': 'wrong'}\n res = self.client.post(TOKEN_URL, payload)\n\n self.assertNotIn('token', res.data)\n self.assertEquals(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_error_user_already_exists(self):\n User.objects.create_user(self.data)\n client = Client()\n client.post('/register/', self.data)\n self.assertRaisesMessage(ValueError, 'user already exists')", "def create_user():\r\n if not request.is_json or 'name' not in request.get_json() or 'phone_number' not in request.get_json() or 'password' not in request.get_json():\r\n return bad_request('Missing required data.')\r\n try:\r\n return add_user(request)\r\n except:\r\n return bad_request(error_messages['user_exist'])", "def test_bad_register(self):\n body, code = self.post(f\"/users\", bob, bob_creds)\n self.assertEqual(400, code)\n self.assertEqual({\"error\": \"Invalid request.\"}, body)", "def test_check_user_profile_id(self):\n userValue = {'name': 'User Test 1',\n 'login': 'usertest1',\n 'user_profile_id': self.env.ref('base.user_root').id,\n }\n with self.assertRaises(ValidationError):\n self.env['res.users'].create(userValue)", "def test_request_membership_form_with_an_invalid_user_id(self):\n pass", "def create_user(self, phone_number, type, password, is_staff):\n return self.__create_user(phone_number, type, password, is_staff, False, False)", "def test_invalid_data(self, client):\n data = {\n 'username': '*' * 255,\n 'birthday': 'test'\n }\n url = reverse('users:create')\n response = client.post(url, data)\n assert response.status_code == 200\n assert 'Enter a valid date.' in str(response.content)\n assert 'Ensure this value has at most 150 characters (it has 255).' in str(response.content)", "def test_create_user_with_existing_email(client, existing_user):\n response = client.post(\"/auth/register\", json=existing_user)\n assert response.status_code == 400", "def _create_user(self, phone_number, password, **extra_fields):\n if not phone_number:\n raise ValueError('The given phone_number must be set')\n phone_number = phone_number\n user = self.model(phone_number=phone_number, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def test_password_too_short(self):\n payload = {'email': '[email protected]', 'password': 'fu'}\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n self.assertFalse(user_exists)", "def test_create_with_duplicate_userid(self):\n\n self.sdkapi.guest_create(self.userid, 1, 1024)\n try:\n self.sdkapi.guest_create(self.userid, 1, 1024)\n except exception.SDKSMUTRequestFailed as err:\n self.assertEqual(err.results['rc'], 400)\n self.assertEqual(err.results['rs'], 8)", "def test_invalid_password(self):\n u_invalid_password = User(username=\"bad_user\", email=\"[email protected]\", password=\"df\")\n with self.assertRaises(TypeError) as err:\n User.signup(u_invalid_password)", "def is_valid_user_by_phone_number(phone_number):\n count = db.users.filter(and_(db.users.phone_number == phone_number, db.users.is_validate == True))\n if count == 1:\n return True", "def test_registeration_invalid_email(self):\n response = self.signup_a_user(self.user_invalid_email)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data[\"errors\"][\"email\"],\n [\"Enter a valid email address.\"]\n )\n self.assertNotIn(\"token\", response.data)", "def test_user_create(self):\n user_count = User.objects.count()\n user = User.objects.create_user(email='[email protected]', password='test')\n self.assertTrue(User.objects.count() == user_count + 1)\n self.assertFalse(user.is_staff)\n self.assertFalse(user.is_superuser)\n with self.assertRaises(ValueError, msg='The email must be provided'):\n User.objects.create_user(email='', password='test')", "def create_user(name, phone_num):\n\n user = User(name=name, phone_num=phone_num)\n db.session.add(user)\n db.session.commit()\n\n return user", "def test_create_user(self):\n self.assertEqual(self.new_user.first_name, 'John')\n self.assertEqual(self.new_user.last_name, 'Doe')\n self.assertEqual(self.new_user.username, 'john_doe')\n self.assertEqual(self.new_user.email, '[email protected]')\n self.assertTrue(self.new_user.check_password('test_password'))\n self.assertFalse(self.new_user.is_staff)\n self.assertFalse(self.new_user.is_superuser)\n self.assertFalse(self.new_user.is_active)\n\n with self.assertRaises(ValueError):\n User.objects.create_user(\n first_name='', last_name='', username='', email='', bio='', password=''\n )", "def test_creation_without_password(self, user):\n with pytest.raises(mongoengine.errors.ValidationError):\n user.save()", "def test_password_too_short(self):\n payload = {'email': '[email protected]', 'password': '123'}\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n self.assertFalse(user_exists)", "def validate_phone_number(value):\n\n try:\n z = phonenumbers.parse(value, None)\n except phonenumbers.NumberParseException:\n raise forms.ValidationError(\"Enter a valid phone number.\")\n\n if not phonenumbers.is_valid_number(z):\n raise forms.ValidationError(\"Enter a valid phone number.\")", "def test_08_create_user_not_exists(self):\n\n _, user = self.get_random_item(models.User)\n utils.create_user(user, session=self.session)\n success, error = utils.create_user(user, session=self.session)\n self.assertFalse(success)\n self.assertTrue(error)", "def test_password_too_short(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'pw'\n }\n res = self.client.post(CREATE_USER_API, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n self.assertFalse(user_exists)", "def test_phone_too_short(self):\n phone = Report(\n contact_phone='202',\n )\n\n try:\n phone.full_clean()\n except ValidationError as err:\n phone_error_message = err.message_dict['contact_phone']\n self.assertTrue(phone_error_message == ['Enter a valid value.'])", "def test_register_twice(self):\n body, code = self.post(f\"/users\", bob, {\"phone\": \"+441234567890\", **bob_creds})\n self.assertEqual(400, code)\n self.assertEqual({\"error\": \"User already exists.\"}, body)", "def test_password_too_short(self):\r\n payload = {\r\n 'email': '[email protected]',\r\n 'password': 'pw',\r\n 'name': 'Maks'\r\n }\r\n\r\n res = self.client.post(CREATE_USER_URL, payload)\r\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\r\n\r\n user_exists = get_user_model().objects.filter(\r\n email=payload['email']\r\n ).exists()\r\n self.assertFalse(user_exists)", "def test_signup_missing_password(self):\n with self.assertRaises(ValueError) as context:\n invalid_u = User.signup(\"[email protected]\", \"testuser\", None, \"Test\", \"User\", None)", "def test_password_too_short(self):\n payload = {'email': '[email protected]', 'password': 'pw'}\n \n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n \n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n \n self.assertFalse(user_exists)", "def test_invalid_registration(self):\n self.response = self.client.post(\n \"/api/users/\",\n self.invalid_reg_data,\n format=\"json\")\n self.assertEqual(self.response.status_code,\n status.HTTP_400_BAD_REQUEST)", "def validate_phonenumber(self):\n special_chars = set(string.punctuation.replace('+', ''))\n for number in self.telefono:\n if number.isalpha() or number in special_chars:\n raise OspiteExc('Il campo numero di telefono non è valido')", "def test_create_account_failed_existing_email(self):\n data = self.user_data.copy()\n data['email'] = '[email protected]'\n response = self.client.post(self.url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data.get('message').get('email')[0], 'user with this email already exists.')", "def test_create_account_failed_invalid_email(self):\n data = self.user_data.copy()\n data['email'] = 'test'\n response = self.client.post(self.url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data.get('message').get('email')[0], 'Enter a valid email address.')", "def test_duplicate_user(self, mapp, existing_user_id):\n\n mapp.create_user(user=existing_user_id, password=1234,\n email=existing_user_id + \"@example.com\", code=409)", "def test_blank_phone_number(self):\n response = self.register({\n 'first_name': \"David\",\n 'last_name': \"Smith\",\n 'password': '******',\n 'email': \"[email protected]\",\n 'phone_number': \"\"\n })\n self.assertEqual(response.status_code, 302)\n response = self.client.get(response.url)\n self.assertEqual(response.json()['phone_number'], None)", "def test_must_inform_email_or_phone(self):\n form = self.make_validator_form(phone='', email='')\n # Identifica erro no formulário e não em um campo.\n self.assertListEqual(['__all__'], list(form.errors))", "def test_create_account_failed_invalid_password(self):\n data = self.user_data.copy()\n data['password'] = 'aaAA@@aaaaa'\n response = self.client.post(self.url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data.get('message').get('password')[0],\n 'Password value should have at least 2 uppercase, 2 lowercase, '\n '2 digit and 2 special character.')", "async def test_invalid_insert_user_typeError(database):\n await database.setup_database(reset=True)\n\n for user_id in zip([\n random.choice([\"string\",[1,2],set(),dict()])\n for i in range(0,10)]):\n try:\n await database.insert_user(user_id=user_id)\n assert False\n except:\n assert True\n await database.close_pool()", "def test_user_recreate(self):\n self.test_user_creation()\n\n with self.assertRaisesMessage(\n IntegrityError, \"UNIQUE constraint failed: auths_user.username\"\n ):\n UserModel.objects.create_user(\n username=\"saimer\"\n )", "def test_create_no_email(self):\n\n self.portal.portal_properties.site_properties.use_email_as_login = True\n\n self.assertRaises(\n ValueError,\n api.user.create,\n username='chuck', password='secret'\n )", "def test_user_cannot_register_twice(self):\n self.client.post(SIGNUP_URL,\n data=json.dumps(self.user_data), content_type='application/json')\n response2 = self.client.post(SIGNUP_URL,\n data=json.dumps(self.user_data), content_type='application/json')\n self.assertEqual(response2.status_code, 203)\n result = json.loads(response2.data.decode())\n self.assertEqual(result[\"message\"], \"User already exists\")", "def validatePhoneNumber(self):\n ## Declaring a Flag to control a while loop\n phone_number_ok = False\n ## While loop to have user retry their input if they enter incorrectly\n while not phone_number_ok:\n ## Asking for a phone number and checkig to see if it is 10 digits\n if self.phone_number.isdigit():\n if len(self.phone_number) == 10:\n phone_number_ok = True\n return True\n else:\n print(\"Please Enter a 10 digit phone number.\")\n return False\n \n else:\n print(\"You have enetered an invalid phone number. Please try again.\")\n return False" ]
[ "0.7145561", "0.70290124", "0.6975835", "0.68602705", "0.68565595", "0.6848795", "0.684489", "0.6838828", "0.68144745", "0.6813772", "0.6810992", "0.6809171", "0.68087786", "0.68087786", "0.68087786", "0.68087786", "0.68087786", "0.6709067", "0.67028594", "0.6652703", "0.66233295", "0.66155636", "0.6599365", "0.6571285", "0.6551518", "0.6550027", "0.6540607", "0.6514016", "0.6495744", "0.64895654", "0.6471155", "0.64556295", "0.6444379", "0.6431133", "0.64220464", "0.641292", "0.6384025", "0.6381374", "0.63802826", "0.6361217", "0.6345133", "0.6318881", "0.63179314", "0.63137877", "0.63114196", "0.62973166", "0.62912554", "0.6267535", "0.62532437", "0.62415123", "0.6236479", "0.6229411", "0.62266314", "0.62100047", "0.6188289", "0.6165693", "0.61627233", "0.61559457", "0.6147425", "0.61333984", "0.6128702", "0.61170787", "0.6114818", "0.6097642", "0.6094094", "0.6086597", "0.6076882", "0.607173", "0.6066248", "0.6059334", "0.6046637", "0.6040943", "0.6038847", "0.6028419", "0.60166705", "0.6014265", "0.59997636", "0.59990543", "0.599484", "0.59944636", "0.5989878", "0.5984339", "0.5981903", "0.5969221", "0.59650624", "0.5960354", "0.59584516", "0.5957398", "0.5955311", "0.5954718", "0.5948332", "0.594386", "0.5931205", "0.59304863", "0.5928061", "0.59263206", "0.5920749", "0.5917401", "0.5914768", "0.5912508" ]
0.7310096
0
Ensure we can't create a new user with an already existing email
def test_create_new_user_duplicate_email(self): data = { 'username': 'John', 'email': '[email protected]', 'password': 'test123!', 'phone': '1234567890', 'first_name': 'Chuck', 'last_name': 'Norris', 'university': { "name": "random_university" }, 'academic_field': {'name': "random_field"}, 'academic_level': {'name': "random_level"}, 'gender': "M", 'birthdate': "1999-11-11", } user = UserFactory() user.email = '[email protected]' user.save() response = self.client.post( reverse('user-list'), data, format='json', ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) content = { 'email': [ "An account for the specified email address already exists." ] } self.assertEqual(json.loads(response.content), content)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_private_create_user_without_email(self):\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user(None, None, False, False)\n self.assertIn('email must be set', str(exinfo.value))", "def test_create_use_with_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, password='open@123')", "def test_new_user_invalid_email(self):\n\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test1234')", "def test_new_user_with_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'userpass123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\"\", \"test42837492374923749\")", "def test_new_user_invalid_email(self):\n\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, '123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, \"test123\")", "def test_creating_a_new_user_without_email(self):\n\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, \"Test1234\")", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\n email=None,\n password=self.test_user_pass,\n name=self.test_user_name,\n )", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\n None,\n 'Password'\n )", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123456')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, '3232Ze')", "def test_create_account_failed_existing_email(self):\n data = self.user_data.copy()\n data['email'] = '[email protected]'\n response = self.client.post(self.url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data.get('message').get('email')[0], 'user with this email already exists.')", "def test_signup_dupe_email(self):\n\n invalid_u = User.signup(\"[email protected]\", \"testuser\", \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_invalid_user_without_email(self):\n email = ''\n password = None\n with self.assertRaises(ValueError):\n self.user_manager.create_user(email=email, password=password)", "def test_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(email=None, password=\"123\")", "def test_new_user_invalid_email(self):\n\n # It should raise a ValueError\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\n None,\n password=\"password\"\n )", "def test_create_no_email(self):\n\n self.portal.portal_properties.site_properties.use_email_as_login = True\n\n self.assertRaises(\n ValueError,\n api.user.create,\n username='chuck', password='secret'\n )", "def test_signup_missing_email(self):\n\n invalid_u = User.signup(None, \"testuser\", \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_register_existing_email(self):\n response = self.client.post('/api/v2/auth/signup',\n data=json.dumps(users[0]),\n headers=self.admin_headers,\n content_type='application/json')\n self.assertEqual(response.status_code, 409)\n self.assertIn('user with email already registred', str(response.data))", "def test_cannot_create_user_without_email(self):\n with self.assertRaises(TypeError):\n User.objects.create_user(username=\"username\", password=\"password\", email=None)", "def test_user_existing_email(self):\n data = json.dumps({\n \"username\" : \"john\", \"email\" : \"[email protected]\",\n \"password\" : \"secret12345\", \"confirm_password\" : \"secret12345\"})\n res = self.app.post( # pylint: disable=W0612\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 400)", "def test_registeration_duplicate_user_email(self):\n self.signup_a_user(self.user_data)\n response_duplicate = self.signup_a_user(self.user_data_duplicate_email)\n self.assertEqual(response_duplicate.status_code,\n status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response_duplicate.data[\"errors\"][\"email\"],\n [\"user with this email already exists.\"])\n self.assertNotIn(\"token\", response_duplicate.data)", "def test_new_user_invalid_email(self):\n user_number_before = get_user_model().objects.count()\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\n None,\n password=\"1234Test\"\n )\n user_number_after = get_user_model().objects.count()\n self.assertEqual(user_number_before, user_number_after)", "def test_create_user_with_existing_email(client, existing_user):\n response = client.post(\"/auth/register\", json=existing_user)\n assert response.status_code == 400", "def check_duplicate_email(self, email):\r\n request = self.req_factory.post('unused_url', data={\r\n 'new_email': email,\r\n 'password': 'test',\r\n })\r\n request.user = self.user\r\n self.assertFailedRequest(self.run_request(request), 'An account with this e-mail already exists.')", "def create_user(email, password):\n try:\n User(email=email, password=password)\n except IntegrityError:\n print('Error: Duplicate email address')", "def test_create_user_with_preexisting_email(self):\n data = {\n 'username': 'test_user2',\n 'email': '[email protected]',\n 'password': 'testpassword'\n }\n response = self.client.post(self.create_url, data, format='json')\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['email']), 1)", "def validate_email(self, email_field):\n\n if User.query.filter_by(email=email_field.data).first():\n raise ValidationError(\"There already is a user with this email address.\")", "def validate_email(self, email):\n user = User.query.filter_by(email=email.data).first()\n if user:\n raise ValidationError('That email already exists. Please choose another email.')", "def validate_email(self, email):\n if email.data != current_user.email:\n user = User.query.filter_by(email=email.data).first()\n if user:\n raise ValidationError('That email already exists. Please choose another email.')", "def validate_email(self, email_field):\n user = User.query.filter_by(email=email_field.data).first()\n if user:\n if user.email:\n current_app.logger.error('{} tried to register user with email {} but user already exists.'.format(\n user.email, email_field.data))\n else:\n current_app.logger.error('Anonymous user tried to register user with email {} but user already exists.'.\n format(email_field.data))\n raise ValidationError('An account with this email address already exists')", "def validate_unique_email(email):\n if AssociatedEmail.objects.filter(email=email.lower(), is_primary_email=False):\n raise ValidationError(_(\"User with this email already exists.\"),\n code='email_not_unique',)\n if User.objects.filter(email=email.lower()):\n raise ValidationError(_(\"User with this email already exists.\"),\n code='email_not_unique',)", "def save(self, *args, **kwargs):\n if not self.id and User.objects.filter(email=self.email).exists():\n raise IntegrityError()\n super().save(*args, **kwargs)", "def test_registeration_no_email(self):\n response = self.signup_a_user(self.user_lacks_email)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data[\"errors\"][\"email\"],\n [\"This field may not be blank.\"]\n )\n self.assertNotIn(\"token\", response.data)", "def test_registeration_invalid_email(self):\n response = self.signup_a_user(self.user_invalid_email)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data[\"errors\"][\"email\"],\n [\"Enter a valid email address.\"]\n )\n self.assertNotIn(\"token\", response.data)", "def test_create_user_with_invalid_email(self):\n data = data = {\n 'username': 'foobar',\n 'email': 'testing',\n 'password': 'testpassword'\n }\n response = self.client.post(self.create_url, data, format='json')\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['email']), 1)", "def test_add_user_duplicate_email(self):\n with self.client:\n auth_headers = login_test_user(self.client)\n payload = json.dumps(dict(email=\"[email protected]\",username=\"neilb14\",password=\"password123\"))\n self.client.post('/users',\n data = payload,\n content_type='application/json',\n headers = auth_headers\n )\n response = self.client.post('/users',\n data = payload,\n content_type='application/json',\n headers = auth_headers\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertIn('User already exists', data['message'])\n self.assertIn('fail', data['status'])", "def validate_email(self, data, field_name):\n existing = User.objects.filter(email__iexact=data['email'])\n if existing.exists():\n raise fields.ValidationError(\"A user with that email already exists.\")\n else:\n return data", "def validate_email(self, email):\n if email and email_address_exists(email):\n raise serializers.ValidationError(\n \"A user is already registered with this e-mail address.\")\n\n return email", "def test_invalid_superuser_without_email(self):\n email = ''\n password = 'password'\n with self.assertRaises(ValueError):\n self.user_manager.create_superuser(email=email, password=password)", "def test_user_create(self):\n user_count = User.objects.count()\n user = User.objects.create_user(email='[email protected]', password='test')\n self.assertTrue(User.objects.count() == user_count + 1)\n self.assertFalse(user.is_staff)\n self.assertFalse(user.is_superuser)\n with self.assertRaises(ValueError, msg='The email must be provided'):\n User.objects.create_user(email='', password='test')", "def email_exists(form, field):\n if User.select().where(User.email == field.data).exists():\n raise ValidationError('A user with that E-mail already exists.')", "def test_create_account_failed_no_email(self):\n data = self.user_data.copy()\n data.pop('email')\n response = self.client.post(self.url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data.get('message').get('email')[0], 'This field is required.')", "def validate_email(self, email_field):\n if User.query.filter_by(email=email_field.data).first():\n raise ValidationError('An account with this email address already exists')\n return True", "def validate_email(self, email):\n user = User.query.filter_by(email=email.data).first()\n if user is not None:\n raise ValidationError('Please use a different email address.')", "def validate_email(self, field):\n if User.query.filter_by(email=field.data).first():\n raise ValidationError(\"Email already registered.\")", "def test_duplicate_user(self, mapp, existing_user_id):\n\n mapp.create_user(user=existing_user_id, password=1234,\n email=existing_user_id + \"@example.com\", code=409)", "def test_validates_email_not_already_invited_on_create(self):\n # Make an invitation with the same email address, but with different capitalisation\n self.project.invitations.create(email = '[email protected]')\n invitation = Invitation(project = self.project, email = '[email protected]')\n expected_errors = {\n 'email': ['Email address already has an invitation for this project.'],\n }\n with self.assertValidationErrors(expected_errors):\n invitation.full_clean()", "def test_user_creation_no_email(self):\n self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(),\n send_email=False, **self.user_info)\n self.assertEqual(len(mail.outbox), 0)", "def validate_email(form, field):\n if User.query.filter_by(email = field.data).first():\n raise ValidationError(\"Email already registed.\")", "def test_create_no_username(self):\n\n # If there is no username, email will be used instead\n properties = self.portal.portal_properties.site_properties\n properties.manage_changeProperties(use_email_as_login=True)\n\n user = api.user.create(\n email='[email protected]',\n password='secret'\n )\n\n self.assertEquals(user.getUserName(), '[email protected]')\n\n # But if using emails as a username is disabled, we should get\n # an error\n properties.manage_changeProperties(use_email_as_login=False)\n\n self.assertRaises(\n ValueError,\n api.user.create,\n email='[email protected]', password='secret'\n )", "def clean_email(self):\n try:\n user = User.objects.get(email__iexact=self.cleaned_data['email'])\n except User.DoesNotExist:\n return self.cleaned_data['email']\n except User.MultipleObjectsReturned:\n pass\n raise forms.ValidationError(_(u'This email address is already in use. Please supply a different email address.')\n )", "def test_08_create_user_not_exists(self):\n\n _, user = self.get_random_item(models.User)\n utils.create_user(user, session=self.session)\n success, error = utils.create_user(user, session=self.session)\n self.assertFalse(success)\n self.assertTrue(error)", "def validate_register_email():\n email = request.args.get('email')\n if not email:\n return jsonify({'valid': False, 'error': 'Please provide an email address'})\n user = User.query.filter_by(email=email).first()\n if user:\n # The email already exists\n return jsonify({'valid': False, 'error': 'An account with that email address has already registered'})\n return jsonify({'valid': True})", "def validate_email(self, value):\n if not User.objects.filter(email=value).exists():\n raise serializers.ValidationError('User with this email does not exist.')\n return value", "def validate_email(self, value):\n if not User.objects.filter(email=value).exists():\n raise serializers.ValidationError('User with this email does not exist.')\n return value", "def validate_email(form, field):\n\n user = User.query.filter_by(email=form.email.data).first()\n\n if user and not user == g.user:\n form.email.errors = [\n \"Email already associated with account!\",\n *form.email.errors\n ]\n raise ValidationError", "def test_already_signedup_user(self):\n response = self.client().post('/api/v1/auth/signup', data=self.user_data_2)\n response = self.client().post('/api/v1/auth/signup', data=self.user_data_2)\n self.assertEqual(response.status_code, 409)\n # return result in json format\n result = json.loads(response.data.decode())\n self.assertEqual(\n result[\"message\"], \"The email address has been used try another one.\")", "def test_create_account_failed_invalid_email(self):\n data = self.user_data.copy()\n data['email'] = 'test'\n response = self.client.post(self.url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data.get('message').get('email')[0], 'Enter a valid email address.')", "def test_existence_conflict(self, username, email, validate_suggestions):\n user = UserFactory.create(username='user', email='[email protected]')\n self.assertValidationDecision(\n {\n 'username': username,\n 'email': email\n },\n {\n # pylint: disable=no-member\n \"username\": USERNAME_CONFLICT_MSG.format(\n username=user.username\n ) if username == user.username else '',\n # pylint: disable=no-member\n \"email\": EMAIL_CONFLICT_MSG.format(\n email_address=user.email\n ) if email == user.email else ''\n },\n validate_suggestions\n )", "def clean_email(self):\n try:\n user = User.objects.get(email__iexact=self.cleaned_data['email'])\n except User.DoesNotExist:\n return self.cleaned_data['email']\n raise forms.ValidationError(_(\"A user with that email already exists.\"))", "def clean_email(self):\n try:\n user = User.objects.get(email__exact=self.cleaned_data['email'])\n except User.DoesNotExist:\n return self.cleaned_data['email']\n raise forms.ValidationError(_(u'This email address is already in use. Please supply a different email address.'))", "def testEmailAlreadyThere(self):\r\n res = self.app.post(\r\n '/signup_process',\r\n params={\r\n 'email': '[email protected]'\r\n }\r\n )\r\n self.assertIn('already signed up', res.body)", "def test_optional_email(self):\n\n consumer = LTIConsumerFactory(slug=\"consumer\")\n passport = LTIPassportFactory(title=\"consumer1_passport1\", consumer=consumer)\n\n user_count = get_user_model().objects.count()\n\n new_user = self._authenticate(\n {\n \"user_id\": \"7275a984-1e77-4084-9fe6-e54d0deba0e7\",\n \"lti_message_type\": \"basic-lti-launch-request\",\n \"lti_version\": \"LTI-1p0\",\n \"resource_link_id\": \"aaa\",\n \"context_id\": \"course-v1:fooschool+authbackend+0001\",\n \"lis_person_sourcedid\": \"user_without_email\",\n },\n passport,\n )\n\n self.assertEqual(\"user_without_email\", new_user.public_username)\n self.assertEqual(\"\", new_user.email)\n self.assertEqual(consumer, new_user.lti_consumer)\n self.assertEqual(\"user_without_email@consumer\", new_user.username)\n self.assertEqual(user_count + 1, get_user_model().objects.count())", "def test_email_not_unique(bot):\n expect_error(register, InputError, \"a\", \"abcdef\", \"a\", \"a\", bot.email)", "def clean_email(self):\n email = self.cleaned_data.get(\"email\")\n # Check if user exists already, error early\n if User.objects.filter(email=email).exists():\n LOGGER.debug(\"email already exists\", email=email)\n raise ValidationError(_(\"Email already exists\"))\n return email", "def test_invalid_email_when_logging_in(self):\n pass", "def test_create__invalid(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n json_data = {'isAdmin': False} # No email\n with test_app.test_request_context(self.request_path, json=json_data):\n with self.assertRaises(werkzeug.exceptions.BadRequest):\n self.handler.do_post()\n\n new_appuser = user_models.AppUser.query(\n user_models.AppUser.email == '[email protected]').get()\n self.assertIsNone(new_appuser)", "def test_register_invalid_email(self):\n response = self.client.post('/api/v2/auth/signup',\n data=json.dumps(users[3]),\n headers=self.admin_headers,\n content_type='application/json')\n self.assertEqual(response.status_code, 422)\n self.assertIn('invalid email', str(response.data))", "def test_manage_user_without_email(self):\r\n # First with a new user\r\n user_data = dict(id=1, username='facebook', name='name')\r\n token = 't'\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['email'], user\r\n assert user.name == user_data['username'], user\r\n assert user.fullname == user_data['name'], user\r\n assert user.facebook_user_id == user_data['id'], user\r\n\r\n # Second with the same user\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['email'], user\r\n assert user.name == user_data['username'], user\r\n assert user.fullname == user_data['name'], user\r\n assert user.facebook_user_id == user_data['id'], user\r\n\r\n # Finally with a user that already is in the system\r\n user_data = dict(id=10, username=self.name,\r\n email=self.email_addr, name=self.fullname)\r\n token = 'tA'\r\n user = manage_user(token, user_data, None)\r\n err_msg = \"It should return the same user\"\r\n assert user.facebook_user_id == 10, err_msg", "def test_error_user_already_exists(self):\n User.objects.create_user(self.data)\n client = Client()\n client.post('/register/', self.data)\n self.assertRaisesMessage(ValueError, 'user already exists')", "def test_user_cannot_register_twice(self):\n self.client.post(SIGNUP_URL,\n data=json.dumps(self.user_data), content_type='application/json')\n response2 = self.client.post(SIGNUP_URL,\n data=json.dumps(self.user_data), content_type='application/json')\n self.assertEqual(response2.status_code, 203)\n result = json.loads(response2.data.decode())\n self.assertEqual(result[\"message\"], \"User already exists\")", "def sample_user_dynamic_email(email):\n return get_user_model().objects.create_user(email=email,\n password=\"password123\",\n name=\"some name\")", "def clean_email(self):\n existing = User.objects.filter(email__iexact=self.cleaned_data['email'])\n if existing.exists():\n raise forms.ValidationError(_(\"This email address is already in use. Please enter a different email \"\n \"address!\"))\n else:\n return self.cleaned_data['email']", "def test_create_email_account_twice(self):\n email_addr = 'testcreatetwins@' + self.email_dom\n acc = SpokeEmailAccount(self.org_name, self.user_id)\n self.assertRaises(error.AlreadyExists, acc.create, email_addr)", "def test_user_exists_without_email(self):\n response = self.client.post(self.url)\n expected_response_code = 400\n\n self.assertEqual(expected_response_code, response.status_code)", "def validate_email(self, email):\n email = email.lower()\n if User.objects.filter(email=email).exists():\n raise serializers.ValidationError('Email already registered.')\n return email", "def unique_email(cls, email):\n user_db = User.get_by('email', email)\n if user_db:\n raise ValueError('Sorry, this email is already taken.')\n return email", "def test_unique(self, test_domain):\n test_domain.repository_for(User)._dao.create(\n email=\"[email protected]\", password=\"a1b2c3\"\n )\n\n with pytest.raises(ValidationError) as err:\n test_domain.repository_for(User)._dao.create(\n email=\"[email protected]\", password=\"d4e5r6\"\n )\n assert err.value.messages == {\n \"email\": [\"User with email '[email protected]' is already present.\"]\n }", "def test_registration_when_user_already_exists(self):\n # register the user the first time\n self.register_user()\n # register the same user the second time\n result = self.client().post(AuthTestCase.registration, data=self.user)\n response_result = json.loads(result.data.decode())\n self.assertEqual(result.status_code, 409)\n self.assertEqual(response_result['message'], \"user already exists\")", "def test_signup_dupe_username(self):\n\n invalid_u = User.signup(\"[email protected]\", \"allison\", \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def createOtherUser(self, email):\n from soc.models.user import User\n from soc.modules.seeder.logic.providers.user import FixedUserProvider\n properties = {'account': FixedUserProvider(value=email), 'status': 'valid'}\n self.user = seeder_logic.seed(User, properties=properties)\n return self.user", "def unique():\n\n click.secho('*** Creating User...', fg='green')\n email = 'mclovin-{}@example.net'.format(uuid4().hex)\n mclovin = _make_document('user', email=email, age=21)\n click.secho(json.dumps(mclovin, indent=2, sort_keys=True), fg='yellow')\n\n click.secho('*** Trying to create another user with the same email...', fg='green')\n try:\n _make_document('user', email=email, age=16)\n except requests.HTTPError as e:\n click.secho(str(e), fg='red')\n click.secho(json.dumps(e.response.json(), indent=2, sort_keys=True), fg='yellow')", "def validate_email(form, field):\n if User.query.filter_by(email=form.email.data).first():\n form.email.errors.append(\n \"Email already associated with account!\")\n raise ValidationError", "def clean_email(self):\n email = self.cleaned_data['email'].lower()\n if User.objects.filter(email__iexact=email).exists():\n raise ValidationError(_('A user with that email already exists.'))\n return email", "def test_user_must_pass_registered_email(self):\n\n data = {'email': '[email protected]', 'username': 'unregistered',\n 'callback_url': 'http://www.example.com'}\n serializer = CreateEmailVerificationSerializer(data=data)\n self.assertTrue(serializer.is_valid())\n\n with self.assertRaises(ValidationError) as e:\n serializer.create_payload(data)\n\n self.assertEqual(e.exception.detail[0],\n \"No user with this email address is registered.\")", "def test_manage_user_with_email(self):\r\n # First with a new user\r\n user_data = dict(id=1, username='facebook',\r\n email='[email protected]', name='name')\r\n token = 't'\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['email'], user\r\n assert user.name == user_data['username'], user\r\n assert user.fullname == user_data['name'], user\r\n assert user.facebook_user_id == user_data['id'], user\r\n\r\n # Second with the same user\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['email'], user\r\n assert user.name == user_data['username'], user\r\n assert user.fullname == user_data['name'], user\r\n assert user.facebook_user_id == user_data['id'], user\r\n\r\n # Finally with a user that already is in the system\r\n user_data = dict(id=10, username=self.name,\r\n email=self.email_addr, name=self.fullname)\r\n token = 'tA'\r\n user = manage_user(token, user_data, None)\r\n err_msg = \"It should return the same user\"\r\n assert user.facebook_user_id == 10, err_msg", "def test_user_recreate(self):\n self.test_user_creation()\n\n with self.assertRaisesMessage(\n IntegrityError, \"UNIQUE constraint failed: auths_user.username\"\n ):\n UserModel.objects.create_user(\n username=\"saimer\"\n )", "def test_already_existing_user(self):\n self.user.registration(\n \"Githeri\", \"[email protected]\", \"iwantgitheri\", \"iwantgitheri\")\n msg = self.user.registration(\"Githeri\",\n \"[email protected]\",\n \"iwantgitheri\",\n \"iwantgitheri\")\n self.assertEqual(msg, \"Your Account Already Active. Proceed to login\")", "def create(self):\n if User.load(username) is None:\n \"\"\"This username is not in use\"\"\"\n if self.validateEmail(self.email):\n \"\"\"This email is valid\"\"\"\n if len(self.username) > 2:\n \"\"\"This is long enough\"\"\"\n self.__store()", "def test_user_creation_email(self):\n self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n self.assertEqual(len(mail.outbox), 1)", "def test_user_invalid_email(self):\n data = json.dumps({\n \"username\" : \"lenny\", \"email\" : \"invalidemail.com\",\n \"password\" : \"secret12345\", \"confirm_password\" : \"secret12345\"})\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 400)", "def test_signup_with_invalid_email_false(self):\n user = {\n \"Email\": \"user.com\",\n \"Password\": \"pass1234\",\n \"Confirm Password\": \"pass1234\"\n }\n res = self.client().post('/api/v1/auth/signup', data=user)\n self.assertEqual(res.status_code, 400)\n res = res.get_json()\n self.assertEqual(res['error'][0],\n 'Invalid Email Address')" ]
[ "0.8002483", "0.7915221", "0.78696436", "0.78556156", "0.78552705", "0.78552043", "0.7842394", "0.7826193", "0.7826193", "0.7826193", "0.7826193", "0.7826193", "0.78208935", "0.78188866", "0.78091335", "0.78064054", "0.77855194", "0.77645355", "0.7674032", "0.76636356", "0.7660287", "0.76563466", "0.7650036", "0.7632782", "0.7623421", "0.7593197", "0.75694746", "0.7528185", "0.75127894", "0.74460566", "0.74072576", "0.7406488", "0.7360239", "0.7345988", "0.73428905", "0.7277139", "0.72620976", "0.7256967", "0.7224864", "0.7202814", "0.712955", "0.71092063", "0.71038646", "0.71020657", "0.7035897", "0.7035661", "0.7032307", "0.70279276", "0.7025775", "0.7025516", "0.7024739", "0.7012538", "0.7009948", "0.7005104", "0.69969666", "0.69926035", "0.6991633", "0.69729054", "0.69659865", "0.6924125", "0.6906126", "0.6904268", "0.6904268", "0.69007754", "0.6900121", "0.6899146", "0.68946123", "0.6880722", "0.6878238", "0.68367666", "0.68351126", "0.6832217", "0.6830393", "0.682967", "0.68215555", "0.6817338", "0.68112814", "0.6811187", "0.68100256", "0.68042034", "0.6802787", "0.68013054", "0.68009984", "0.68003404", "0.6796537", "0.6780024", "0.67731106", "0.6769866", "0.6766985", "0.67657477", "0.67611605", "0.67554295", "0.6747587", "0.6747144", "0.67451936", "0.6731748", "0.6730248", "0.6724113", "0.6710943", "0.669867" ]
0.68754745
69
Ensure that the activation email is sent when user signs up.
def test_create_user_activation_email(self): data = { 'username': 'John', 'email': '[email protected]', 'password': 'test123!', 'phone': '1234567890', 'first_name': 'Chuck', 'last_name': 'Norris', 'university': { "name": "random_university" }, 'academic_field': {'name': "random_field"}, 'academic_level': {'name': "random_level"}, 'gender': "M", 'birthdate': "1999-11-11", } response = self.client.post( reverse('user-list'), data, format='json', ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(json.loads(response.content)['phone'], '1234567890') user = User.objects.get(email="[email protected]") activation_token = ActionToken.objects.filter( user=user, type='account_activation', ) self.assertFalse(user.is_active) self.assertEqual(1, len(activation_token)) # Test that one message was sent: self.assertEqual(len(mail.outbox), 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def signup(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n user.is_active = False\n user.save()\n current_site = get_current_site(request)\n subject = 'Activate Your neighwatch Account'\n message = render_to_string('registration/activation_email.html', {\n 'user': user,\n 'domain': current_site.domain,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n 'token': account_activation_token.make_token(user),\n })\n user.email_user(subject, message)\n return redirect('account_activation_sent')\n else:\n form = SignUpForm()\n return render(request, 'registration/registration_form.html', {'form': form})", "def test_activation_email_uses_registration_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_activation_email(Site.objects.get_current())\n self.assertEqual(mail.outbox[0].from_email, '[email protected]')", "def test_activation_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_activation_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].to, [self.user_info['email']])", "def test_activation_email_missing_template(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_activation_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].to, [self.user_info['email']])", "def activate_user(self, email):\r\n activation_key = Registration.objects.get(user__email=email).activation_key\r\n # and now we try to activate\r\n check_for_get_code(self, 200, reverse('activate', kwargs={'key': activation_key}))\r\n # Now make sure that the user is now actually activated\r\n self.assertTrue(User.objects.get(email=email).is_active)", "def test_activation_email_falls_back_to_django_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_activation_email(Site.objects.get_current())\n self.assertEqual(mail.outbox[0].from_email, '[email protected]')", "def verify(self):\n ACTIVATION_PERIOD = datetime.timedelta(days=14)\n if not self.org_verified:\n self.org_verified = True\n if not self.is_active:\n if not self.activation_code:\n self.activation_code = random_url_safe_code()\n self.activate_by = datetime.datetime.utcnow() + ACTIVATION_PERIOD\n import messaging # avoid circular import\n messaging.send_activation_emails(self)\n self.save()", "def test_resend_activation_email_activated_user(self):\n user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **self.user_info)\n\n profile = self.registration_profile.objects.get(user=user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n self.assertTrue(user.is_active)\n self.assertTrue(activated)\n\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n self.assertEqual(len(mail.outbox), 0)", "def account_activation_sent(request):\n current_user = request.user\n if current_user.is_authenticated():\n return HttpResponseRedirect('/')\n return render(request, 'registration/activation_complete.html')", "def activate(self):\n if not self.is_active:\n self.is_active = True\n self.activated_at = datetime.datetime.utcnow()\n import messaging # avoid circular import\n messaging.send_activated_emails(self)\n self.save()", "def test_activation_email_is_html_by_default(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_activation_email(Site.objects.get_current())\n\n self.assertEqual(len(mail.outbox[0].alternatives), 1)", "def test_resend_activation_email_activated_user(self):\n user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **self.user_info)\n\n profile = self.registration_profile.objects.get(user=user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n self.assertFalse(user.is_active)\n self.assertTrue(activated)\n\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n # Outbox has one mail, admin approve mail\n\n self.assertEqual(len(mail.outbox), 1)\n admins_emails = [value[1] for value in settings.REGISTRATION_ADMINS]\n for email in mail.outbox[0].to:\n self.assertIn(email, admins_emails)", "def test_activation_email_uses_site_address(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n site = Site.objects.get_current()\n profile.send_activation_email(site)\n from_email = 'admin@{}'.format(site.domain)\n self.assertEqual(mail.outbox[0].from_email, from_email)", "def form_valid(self, form):\n # Switching between temporary registration and main registration is easy with the is_active attribute.\n # The withdrawal process will also improve if you only set is_active to False.\n user = form.save(commit=False)\n user.is_active = False\n user.save()\n\n # Send activation URL\n current_site = get_current_site(self.request)\n domain = current_site.domain\n context = {\n 'protocol': 'https' if self.request.is_secure() else 'http',\n 'domain': domain,\n 'token': dumps(user.pk),\n 'user': user,\n }\n\n subject = render_to_string('register/mail_template/create/subject.txt', context)\n message = render_to_string('register/mail_template/create/message.txt', context)\n\n user.email_user(subject, message)\n return redirect('register:user_create_done')", "def confirm_email(self):\n self.active = True\n self.save()", "def test_activation_email_uses_site_address_improperly_configured(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n with self.assertRaises(ImproperlyConfigured):\n profile.send_activation_email(Site.objects.get_current())", "def test_admin_approval_email_uses_registration_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.activated = True\n self.registration_profile.objects.send_admin_approve_email(\n new_user, Site.objects.get_current())\n self.assertEqual(mail.outbox[0].from_email, '[email protected]')", "def test_resend_activation_email_nonexistent_user(self):\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n self.assertEqual(len(mail.outbox), 0)", "def testSignupWorks(self):\r\n email = u'[email protected]'\r\n UserMgr.signup_user(email, u'testcase')\r\n\r\n activations = Activation.query.all()\r\n\r\n self.assertTrue(len(activations) == 1)\r\n act = activations[0]\r\n\r\n self.assertEqual(\r\n email,\r\n act.user.email,\r\n \"The activation email is the correct one.\")", "def send_confirmation(self):\r\n c.user.email_validated = False\r\n c.user.confirmation_code = random_key(6)\r\n c.user._commit()\r\n emailer.confirmation_email(c.user)", "def test_resend_activation_email(self):\n user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **self.user_info)\n self.assertEqual(len(mail.outbox), 0)\n\n profile = self.registration_profile.objects.get(user=user)\n orig_activation_key = profile.activation_key\n\n self.assertTrue(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n\n profile = self.registration_profile.objects.get(pk=profile.pk)\n new_activation_key = profile.activation_key\n\n self.assertNotEqual(orig_activation_key, new_activation_key)\n self.assertEqual(len(mail.outbox), 1)", "def _activate_user(self, email):\r\n activation_key = registration(email).activation_key\r\n\r\n # and now we try to activate\r\n resp = self.client.get(reverse('activate', kwargs={'key': activation_key}))\r\n return resp", "def send_activation_email(self):\n ctx_dict = {\n 'activation_key': self.activation_key,\n 'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,\n 'user': self.user,\n 'SITE_URL': settings.SITE_URL,\n }\n subject = render_to_string('accounts/activation_email_subject.txt', ctx_dict)\n # Email subject *must not* contain newlines\n subject = ''.join(subject.splitlines())\n \n message = render_to_string('accounts/activation_email_body.html', ctx_dict)\n\n msg = EmailMultiAlternatives(subject, message, None, [self.user.email])\n msg.attach_alternative(message, \"text/html\")\n msg.send()", "def signup_active(request, uidb36=None, token=None,\n post_activation_redirect=None,\n token_generator=default_token_generator,\n domain_override=None, use_https=False):\n assert uidb36 is not None and token is not None\n if post_activation_redirect is None:\n post_activation_redirect = reverse('amscms.core.views.signup_active_done')\n try:\n uid_int = base36_to_int(uidb36)\n user = User.objects.get(id=uid_int)\n except (ValueError, User.DoesNotExists):\n user = None\n \n if user is not None and token_generator.check_token(user, token):\n user.is_active = True\n user.save()\n \"\"\"\n Sends successful email to the user. \n \"\"\"\n if not domain_override:\n current_site = Site.objects.get_current()\n site_name = current_site.name\n domain = current_site.domain\n else:\n site_name = domain = domain_override\n c = {\n 'subject': _(u\"Registration was successful on %(site_name)s\" % {'site_name': site_name, }),\n 'site_name': site_name,\n 'user': user,\n 'domain': domain,\n 'protocol': use_https and 'https' or 'http',\n 'login_url': reverse('django.contrib.auth.views.login'),\n }\n send_email(user.email, c, settings.DEFAULT_FROM_EMAIL,\n \"registration/signup_email_activated.txt\",\n \"registration/signup_email_activated.html\")\n \n else:\n messages.error(request, _(u\"Invalid activation link, you may already activated, try to login. \"))\n return HttpResponseRedirect(\"/\")\n return HttpResponseRedirect(post_activation_redirect)", "def test_admin_approval_complete_email_uses_registration_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_admin_approve_complete_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].from_email, '[email protected]')", "def activate(self):\r\n if self.activation_code == '':\r\n raise ValidationError('The member is already activated')\r\n signer = TimestampSigner()\r\n signer.unsign(self.activation_code, max_age=timedelta(days=2))\r\n self.hidden = False\r\n self.activation_code = ''\r\n self.joined_date = timezone.now()\r\n self.save()", "def email_signup_user(email, msg, settings, message_data):\r\n from bookie.lib.message import ActivationMsg\r\n msg = ActivationMsg(email, msg, settings)\r\n status = msg.send(message_data)\r\n if status == 4:\r\n from bookie.lib.applog import SignupLog\r\n trans = transaction.begin()\r\n SignupLog(SignupLog.ERROR,\r\n 'Could not send smtp email to signup: ' + email)\r\n trans.commit()", "def signup_process(request):\r\n params = request.params\r\n email = params.get('email', None)\r\n\r\n if not email:\r\n # if still no email, I give up!\r\n return {\r\n 'errors': {\r\n 'email': 'Please supply an email address to sign up.'\r\n }\r\n }\r\n else:\r\n email = email.lower()\r\n\r\n # first see if the user is already in the system\r\n exists = UserMgr.get(email=email)\r\n if exists:\r\n return {\r\n 'errors': {\r\n 'email': 'The user has already signed up.'\r\n }\r\n }\r\n\r\n new_user = UserMgr.signup_user(email, 'signup')\r\n if new_user:\r\n # then this user is able to invite someone\r\n # log it\r\n AuthLog.reactivate(new_user.username)\r\n\r\n # and then send an email notification\r\n # @todo the email side of things\r\n settings = request.registry.settings\r\n\r\n # Add a queue job to send the user a notification email.\r\n tasks.email_signup_user.delay(\r\n new_user.email,\r\n \"Enable your Bookie account\",\r\n settings,\r\n request.route_url(\r\n 'reset',\r\n username=new_user.username,\r\n reset_key=new_user.activation.code\r\n )\r\n )\r\n\r\n # And let the user know they're signed up.\r\n return {\r\n 'message': 'Thank you for signing up from: ' + new_user.email\r\n }\r\n else:\r\n return {\r\n 'errors': {\r\n 'email': 'There was an unknown error signing up.'\r\n }\r\n }", "def test_resend_activation_email_expired_user(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **self.user_info)\n new_user.date_joined -= datetime.timedelta(\n days=settings.ACCOUNT_ACTIVATION_DAYS + 1)\n new_user.save()\n\n profile = self.registration_profile.objects.get(user=new_user)\n self.assertTrue(profile.activation_key_expired())\n\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n self.assertEqual(len(mail.outbox), 0)", "def new_user_form_valid(self, form):\n new_user = form.save()\n new_user.set_password(form.cleaned_data[\"password\"])\n\n h = hashlib.sha1()\n h.update(str(random.random()).encode('utf-8'))\n salt = h.hexdigest()[:5]\n\n h = hashlib.sha1()\n text = salt+new_user.name\n h.update(text.encode('utf-8'))\n\n new_user.activation_key = h.hexdigest()\n new_user.save()\n\n subject = \"Your Work Schedule: Confirm registration\"\n text = (\n \"\"\"Hi {}, \\n please confirm Your registration by clicking or\n copy-past this link \\n {}/user_account/activate/{}/ \\n\n Please confirm with in 48 houers. Thank You for using our app.\n \\n Your Sandbox Team\n \"\"\".format(new_user.name, HOST_NAME, new_user.activation_key))\n\n send_mail(\n subject,\n text,\n EMAIL_HOST_USER,\n [new_user.email],\n fail_silently=False\n )\n return HttpResponseRedirect(self.get_success_url())", "def test_user_creation_email(self):\n self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n self.assertEqual(len(mail.outbox), 1)", "def test_admin_approval_email_falls_back_to_django_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.activated = True\n self.registration_profile.objects.send_admin_approve_email(\n new_user, Site.objects.get_current())\n self.assertEqual(mail.outbox[0].from_email, '[email protected]')", "def test_resend_activation_email_nonunique_email(self):\n user1 = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **self.user_info)\n user2_info = copy(self.user_info)\n user2_info['username'] = 'bob'\n user2 = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **user2_info)\n self.assertEqual(user1.email, user2.email)\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n self.assertEqual(len(mail.outbox), 0)", "def user_activation(user):\n act_hash = random_password(32)\n user.set_hashword(act_hash)\n user.save()\n base_url = url_for('public.home', _external=True)\n act_url = url_for(\n 'auth.activate',\n userid=user.id,\n userhash=act_hash,\n _external=True)\n if not 'mailman' in current_app.extensions:\n logging.warning('E-mail extension has not been configured')\n return act_hash\n msg = EmailMessage()\n msg.subject = 'Your dribdat account'\n msg.body = \\\n \"Hello %s,\\n\" % user.username \\\n + \"Thanks for signing up at %s\\n\\n\" % base_url \\\n + \"Tap here to activate your account:\\n\\n%s\" % act_url\n msg.to = [user.email]\n logging.info('Sending activation mail to user %d' % user.id)\n logging.debug(act_url)\n msg.send(fail_silently=True)\n return act_hash", "def account_activation_sent(request):\r\n\treturn render(request, 'account_activation_sent.html')", "def test_admin_approval_complete_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_admin_approve_complete_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].to, [self.user_info['email']])", "def confirm_email(self):\n # The base class' implementation does nothing\n pass", "def perform_create(self, serializer):\n user = serializer.save()\n signals.user_registered.send(\n sender=self.__class__, user=user, request=self.request\n )\n\n context = get_email_context(user)\n to = [get_user_email(user)]\n if djconf.SEND_ACTIVATION_EMAIL:\n djconf.EMAIL.activation(self.request, context).send(to)\n elif djconf.SEND_CONFIRMATION_EMAIL:\n djconf.EMAIL.confirmation(self.request, context).send(to)", "def send_verification_email(self, request, *args, **kwargs):\n verified_key_text = getattr(settings, \"VERIFIED_KEY_TEXT\", None)\n if not verified_key_text:\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n username = request.data.get(\"username\")\n redirect_url = request.data.get(\"redirect_url\")\n response_message = _(\"Verification email has NOT been sent\")\n\n if username:\n try:\n registration_profile = RegistrationProfile.objects.get(\n user__username=username\n )\n except RegistrationProfile.DoesNotExist:\n pass\n else:\n user = registration_profile.user\n set_is_email_verified(user.profile, False)\n\n verification_key = registration_profile.activation_key\n if verification_key == verified_key_text:\n verification_key = (\n user.registrationprofile.create_new_activation_key()\n )\n\n verification_url = get_verification_url(\n redirect_url, request, verification_key\n )\n\n email_data = get_verification_email_data(\n user.email,\n user.username,\n verification_url,\n request,\n )\n\n send_verification_email.delay(**email_data)\n response_message = _(\"Verification email has been sent\")\n\n return Response(response_message)\n\n return HttpResponseBadRequest(response_message)", "def validation_email_sent(request):\n assert(settings.EMAIL_VALIDATION == True)\n logging.debug('')\n data = {\n 'email': request.user.email,\n 'change_email_url': reverse('user_changeemail'),\n 'action_type': 'validate'\n }\n return render_to_response('authenticator/changeemail.html', RequestContext(request, data))", "def test_admin_approval_complete_email_falls_back_to_django_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_admin_approve_complete_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].from_email, '[email protected]')", "def confirm_email(self, request, email_address):\n email_address.verified = True\n email_address.set_as_primary(conditional=True)\n email_address.save()\n\n u = get_user_model().objects.get(pk=email_address.user.id)\n u.is_active = True\n u.save()", "def save(self, *args, **kwargs):\n if not self.require_confirm_email:\n User.objects.filter(is_active=False, deactivation_reason=\"pending\").update(\n is_active=True, deactivation_reason=None\n )\n if not self.invite_question_text:\n self.invite_question_text = \"What is your favourite book?\"\n super().save(*args, **kwargs)", "def testInitialUserInactivated(self):\r\n u = User()\r\n u.email = gen_random_word(10)\r\n DBSession.add(u)\r\n\r\n self.assertEqual(\r\n False,\r\n u.activated,\r\n 'A new signup should start out deactivated by default')\r\n self.assertTrue(\r\n u.activation.code is not None,\r\n 'A new signup should start out as deactivated')\r\n self.assertEqual(\r\n 'signup',\r\n u.activation.created_by,\r\n 'This is a new signup, so mark is as thus')", "def envoie_activation_compte(request):\n # Vérification connexion utilisateur\n user = AuxilliariesUser().get_user(request)\n if user:\n # Utilisateur connecté\n activation_key = \"\".join(\n [\n random.choice(\n \"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n )\n for _ in range(24)\n ]\n )\n user.cle_dactivation_de_compte = activation_key\n user.save()\n sent_mail_statut = AuxilliariesAuthentification().send_mail(\n \"activation_account\", user\n )\n return redirect(\"../../user/home/\")\n else:\n # Utilisateur non connecté\n raise Http404()", "def test_user_creation_no_email(self):\n self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(),\n send_email=False, **self.user_info)\n self.assertEqual(len(mail.outbox), 0)", "def activate(request, activation_key, template_name='registration/activate.html'):\n activation_key = activation_key.lower() # Normalize before trying anything with it.\n account = RegistrationProfile.objects.activate_user(activation_key)\n account.is_active = True\n account.save()\n return render(request, template_name,\n { 'account': account,\n 'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS })", "def send_signup_notification(self):\n return self._send_signup_notification", "def send_activation_email(self, user):\n\t\tactivation_key = self.get_activation_key(user)\n\t\tcontext = self.get_email_context(activation_key)\n\t\tcontext.update({\n\t\t\t'user': user\n\t\t})\n\t\tsubject = render_to_string(self.email_subject_template,\n\t\t\t\t\t\t\t\t context)\n\t\t# Force subject to a single line to avoid header-injection\n\t\t# issues.\n\t\tsubject = ''.join(subject.splitlines())\n\t\tmessage = render_to_string(self.email_body_template,\n\t\t\t\t\t\t\t\t context)\n\t\tuser.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)", "def login_on_activation(sender, user, request, **kwargs):\n user.backend = 'storybase_user.auth.backends.EmailModelBackend'\n login(request, user)", "def test_no_admins_registered(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n\n with self.assertRaises(ImproperlyConfigured):\n self.registration_profile.objects.send_admin_approve_email(\n new_user, Site.objects.get_current())", "def send_confirmation_email(self, registration_profile,\n text_template=None, html_template=None,\n subject=None, email_data=None, **kwargs):\n user_id = registration_profile.user.id\n key = registration_profile.activation_key\n self._send_email(\n confirmation_profile=registration_profile,\n url=reverse('users.activate', args=[user_id, key]),\n subject=subject or _('Please confirm your email address'),\n text_template=text_template or 'users/email/activate.ltxt',\n html_template=html_template or 'users/email/activate.html',\n send_to=registration_profile.user.email,\n expiration_days=settings.ACCOUNT_ACTIVATION_DAYS,\n username=registration_profile.user.username,\n email_data=email_data,\n **kwargs)", "def activate_user(self, activation_key, request=None):\n # Make sure the key we're trying conforms to the pattern of a\n # SHA1 hash; if it doesn't, no point trying to look it up in\n # the database.\n if SHA1_RE.search(activation_key):\n try:\n profile = self.get(activation_key=activation_key)\n except self.model.DoesNotExist:\n profile = None\n statsd.incr('user.activate-error.does-not-exist')\n reason = 'key not found'\n if profile:\n if not profile.activation_key_expired():\n user = profile.user\n user.is_active = True\n user.save()\n\n # We don't need the RegistrationProfile anymore, delete it.\n profile.delete()\n\n # If user registered as contributor, send them the\n # welcome email.\n if user.groups.filter(name=CONTRIBUTOR_GROUP):\n self._send_email(\n confirmation_profile=profile,\n url=None,\n subject=_('Welcome to SUMO!'),\n text_template='users/email/contributor.ltxt',\n html_template='users/email/contributor.html',\n send_to=user.email,\n contributor=user)\n\n return user\n else:\n statsd.incr('user.activate-error.expired')\n reason = 'key expired'\n else:\n statsd.incr('user.activate-error.invalid-key')\n reason = 'invalid key'\n\n log.warning(u'User activation failure ({r}): {k}'.format(\n r=reason, k=activation_key))\n\n return False", "def on_start(self):\n # self.signup()", "def assertReactivateEmailSent(self, email_user):\r\n context = {\r\n 'name': self.user.profile.name,\r\n 'key': self.registration.activation_key\r\n }\r\n\r\n self.assertEmailUser(\r\n email_user,\r\n 'emails/activation_email_subject.txt',\r\n context,\r\n 'emails/activation_email.txt',\r\n context\r\n )\r\n\r\n # Thorough tests for safe_get_host are elsewhere; here we just want a quick URL sanity check\r\n request = RequestFactory().post('unused_url')\r\n request.META['HTTP_HOST'] = \"aGenericValidHostName\"\r\n self.append_allowed_hosts(\"aGenericValidHostName\")\r\n\r\n body = render_to_string('emails/activation_email.txt', context)\r\n host = safe_get_host(request)\r\n\r\n self.assertIn(host, body)", "def create_email_confirmation(self, trigger_email=True):\n EmailConfirmation.objects.create(user=self,\n email_vc=hexlify(os.urandom(5)),\n email_vc_expiry=datetime.datetime.utcnow().replace(tzinfo=utc) +\n datetime.timedelta(hours=3))", "def signup(request):\r\n\tif request.user.is_authenticated:\r\n\t\t# Redirect user to home if already logged in\r\n\t\tgames = Game.objects.all()\r\n\t\treturn redirect('/', {'games': games, 'MEDIA_URL': settings.MEDIA_URL})\r\n\tif request.method == 'POST':\r\n\t\tform = SignUpForm(request.POST)\r\n\t\tif form.is_valid():\r\n\t\t\tuser = form.save()\r\n\t\t\tuser.refresh_from_db() # Retreive the newly saved object\r\n\t\t\tuser.is_active = False\r\n\t\t\tuser.profile.is_developer = form.cleaned_data.get('is_developer')\r\n\t\t\tuser.save()\r\n\t\t\t# Get current domain name and generate the user token\r\n\t\t\tcurrent_site = get_current_site(request)\r\n\t\t\tencodeded_uid = urlsafe_base64_encode(force_bytes(user.pk))\r\n\r\n\t\t\t# Create email subject and body\r\n\t\t\tsubject = 'Activate Your PlayMe Account'\r\n\t\t\tmessage = render_to_string('account_activation_email.html', {\r\n\t\t\t\t'user': user,\r\n\t\t\t\t'domain': current_site.domain,\r\n\t\t\t\t'uid': encodeded_uid.decode('utf-8'),\r\n\t\t\t\t'token': account_activation_token.make_token(user),\r\n\t\t\t})\r\n\t\t\tuser.email_user(subject, message)\r\n\t\t\treturn redirect('account_activation_sent')\r\n\telse:\r\n\t\tform = SignUpForm()\r\n\treturn render(request, 'registration/signup.html', {'form': form})", "def test_activate_user(self):\n activated_user = (RegistrationProfile.objects\n .activate_user(self.activation_key))\n self.assertTrue(activated_user.registrationprofile.activated)\n self.assertFalse(activated_user.is_active)", "def post(self):\n requestData = request.form\n\n # Grab username and password from request\n # Generate a hash from password so its not stored in plaintext\n username = requestData['username']\n pwdhash = generate_password_hash(requestData['password'])\n\n # Check if user with given username already exists\n user = User.query.filter_by(username=username).first()\n\n # If not, create a new user and redirect to login page\n if user is None:\n try:\n user = User(username=username, pwdhash=pwdhash)\n except AssertionError:\n flash('Forbidden character detected in username', 'warning')\n return redirect(url_for('page.RegisterView:index'))\n db.session.add(user)\n db.session.commit()\n print user\n print user.get_activation_link()\n flash(\"\"\"\n We\\'ve sent you an email. Please click the link in the\n email to complete the creation of your account.\n \"\"\", 'info')\n link = user.get_activation_link()\n body = render_template(\"email.html\", link=link)\n self.send_email('Account activation',\n '[email protected]',\n [username], body)\n return redirect(url_for('page.LoginView:index'))\n\n # Otherwise show error message\n flash('Username already taken', 'info')\n return redirect(url_for('page.RegisterView:index'))", "def confirm_registration_view(request):\n user_email_token = request.matchdict['email_confirm']\n non_active_user = User.get_one(request, url_token=user_email_token)\n if non_active_user is None:\n return {\"msg\": \"Error404 HTTPNotFound\"}\n else:\n non_active_user.status_id = UserStatus\\\n .get_user_by_status(request, status=\"Active\").id\n non_active_user.role_id = Role.get_role(request, role=\"user\").id\n non_active_user.url_token = None\n return {\"msg\": \"Your email address is confirmed\"}", "def test_admin_approval_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.activated = True\n self.registration_profile.objects.send_admin_approve_email(\n new_user, Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n admins_emails = [value[1] for value in settings.REGISTRATION_ADMINS]\n for email in mail.outbox[0].to:\n self.assertIn(email, admins_emails)", "def send_activation_email(self, user):\n activation_key = self.get_activation_key(user)\n context = self.get_email_context(activation_key)\n context[\"user\"] = user\n subject = render_to_string(\n template_name=self.email_subject_template,\n context=context,\n request=self.request,\n )\n # Force subject to a single line to avoid header-injection\n # issues.\n subject = \"\".join(subject.splitlines())\n message = render_to_string(\n template_name=self.email_body_template,\n context=context,\n request=self.request,\n )\n user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)", "def test_activation_already_activated(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n profile = self.registration_profile.objects.get(user=new_user)\n _, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n self.assertFalse(activated)", "def test_resend_activation_email(self):\n\n data = {\n 'email': self.user.email,\n }\n\n response = self.client.post(\n reverse('user-resend-activation-email'),\n data,\n format='json',\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_200_OK,\n response.content\n )\n\n self.assertEqual(\n response.content,\n b'',\n )\n\n self.assertEqual(len(mail.outbox), 1)", "def clean_email(self):\n e = self.cleaned_data['email']\n try:\n user = User.objects.get(email=e)\n if not user.is_active:\n msg = 'This user account has not been confirmed yet'\n raise forms.ValidationError(msg)\n except User.DoesNotExist:\n pass\n # msg = 'This email is not associated with an account'\n # raise forms.ValidationError(msg)\n return e", "def test_activation(self):\n reg_profile = RegisterProfile.objects.create_profile(\n 'TestName', '[email protected]', 'asdf1234')\n\n kwargs = {'activation_key': reg_profile.activation_key}\n response = self.client.post(reverse('users.activate', kwargs=kwargs))\n eq_(200, response.status_code)\n\n # Test relations\n u = User.objects.get(email='[email protected]')\n eq_(u.get_profile().display_name, 'TestName')", "def send_verify_email(self, redirect_to):\n if not self.user_in_db:\n self.user_in_db = User.users_db.get(self.email)\n if not self.user_in_db:\n # User does not exist\n return\n\n if self.user_in_db['verified']:\n return\n\n if not self.user_in_db['secret_token']:\n self.user_in_db['secret_token'] = secrets.token_hex(12)\n User.users_db.put(self.user_in_db)\n\n token = manage_tokens.encode({\n 'secret_token': self.user_in_db['secret_token'],\n 'redirect_to': redirect_to,\n })\n\n email_sender.welcome(self.email, token)", "def activate(request, activation_key,template_name='registration/activate.html',extra_context=None):\n\tactivation_key = activation_key.lower() # Normalize before trying anything with it.\n\taccount = RegistrationProfile.objects.activate_user(activation_key)\n\t\n\t\n\t#new profile PROBLEME NON ENREGISTREMENT DU PROFILE\n\t#recuperer l user id de l'account user.id\n\tprofile = UserProfile();\n\tprofile.user = account\n\tprofile.save()\n\t\n\t\n\tif extra_context is None:\n\t\textra_context = {}\n\tcontext = RequestContext(request)\n\tfor key, value in extra_context.items():\n\t\tcontext[key] = callable(value) and value() or value\n\treturn render_to_response(template_name,{ 'account': account,'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS }, context_instance=context)", "def form_valid(self, form):\n\n ModelAccountVerification.objects.send_verification_email(form.user,\n verification_type=\n ModelAccountVerification.VERIFICATION_TYPES[1][0])\n\n messages.info(self.request, \"Email sent successfully to the {0}\".format(form.user.email))\n\n return HttpResponseRedirect(self.success_url)", "def test_valid_activation(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n self.assertIsInstance(user, UserModel())\n self.assertEqual(user.id, new_user.id)\n self.assertFalse(user.is_active)\n self.assertTrue(activated)\n\n profile = self.registration_profile.objects.get(user=new_user)\n self.assertTrue(profile.activated)", "def send_confirmation_email(user_pk):\n pass", "def test_user_activation(self):\n user = User.objects.get()\n response = self.client.get(reverse('accounts:user-activate',\n kwargs={'uidb64': urlsafe_base64_encode(force_bytes(user.pk)),\n 'token': account_activation_token.make_token(user)}))\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_activation_email_is_plain_text_if_html_disabled(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_activation_email(Site.objects.get_current())\n\n self.assertEqual(len(mail.outbox[0].alternatives), 0)", "def _send_registration_email(request, user, acct_type):\n current_site = get_current_site(request)\n subject = \"Activate your PuPPy Mentorship Account\"\n\n uid = urlsafe_base64_encode(force_bytes(user.pk))\n activation_token = account_activation_token.make_token(user)\n\n url_token = uid.decode('utf-8') + '/' + activation_token\n\n message = render_to_string(\n 'mentorship_profile/activation_email.html', {\n \"user\": user,\n \"domain\": current_site.domain,\n \"account_type\": acct_type,\n \"url_token\": url_token\n }\n )\n user.email_user(subject, message)", "def activate(request, uidb64, token):\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n\n if user is not None and account_activation_token.check_token(user, token):\n user.is_active = True\n user.profile.email_confirmed = True\n user.save()\n login(request, user)\n return redirect('home')\n else:\n return render(request, 'registration/activation_invalid.html')", "def test_valid_activation(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n self.assertIsInstance(user, UserModel())\n self.assertEqual(user.id, new_user.id)\n self.assertTrue(user.is_active)\n self.assertTrue(activated)\n\n profile = self.registration_profile.objects.get(user=new_user)\n self.assertTrue(profile.activated)", "def notify_account_registration(request, user, uidb64, token, sso=False):\n # Send an email with the activation link\n subject = f\"{settings.SITE_NAME} Account Activation\"\n context = {\n 'name': user.get_full_name(),\n 'domain': get_current_site(request),\n 'url_prefix': get_url_prefix(request),\n 'uidb64': uidb64,\n 'token': token,\n 'sso': sso,\n 'SITE_NAME': settings.SITE_NAME,\n }\n body = loader.render_to_string('user/email/register_email.html', context)\n # Not resend the email if there was an integrity error\n send_mail(subject, body, settings.DEFAULT_FROM_EMAIL, [user.email], fail_silently=False)", "def test_admin_approval_email_is_html_by_default(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.activated = True\n self.registration_profile.objects.send_admin_approve_email(\n new_user, Site.objects.get_current())\n\n self.assertEqual(len(mail.outbox[0].alternatives), 1)", "def sign_up(request):\n #logged in users are redirected\n if request.user.is_authenticated:\n messages.error(request, _('You are already signed in, and can\\'t make a new account until you sign out.'), extra_tags='alert alert-warning')\n return render(request, 'you_did_something.html')\n\n #mark an event - someone visited this site\n event = Event(category='visited_sign_up_view')\n event.save()\n\n #create the form\n form = SignUpForm\n context = {\n 'form': form,\n 'submit_button_text': _('Sign up',)\n }\n # If this is a POST request then process the Form data\n if request.method == 'POST':\n\n # Create a form instance and populate it with data from the request (binding):\n form = SignUpForm(request.POST)\n context.update({'form': form})\n # Check if the form is valid:\n if form.is_valid():\n \n # process the data in form.cleaned_data as required (here we just write it to the model due_back field)\n user = User.objects.create_user(form.cleaned_data['username'], form.cleaned_data['username'], form.cleaned_data['password'])\n user.save()\n organization = Organization(\n owner=user,\n phone = form.cleaned_data['phone'],\n name = form.cleaned_data['name'],\n address_line_1 = form.cleaned_data['address_line_1'],\n address_line_2 = form.cleaned_data['address_line_2'],\n zip_code = form.cleaned_data['zip_code'],\n city = form.cleaned_data['city'],\n country = form.cleaned_data['country'],\n accepted_terms_and_conditions = form.cleaned_data['accepted_terms_and_conditions'],\n )\n organization.save()\n messages.success(request, _(\"Welcome aboard. Let's start by adding some employees to survey!\"), extra_tags='alert alert-success')\n send_mail(\n '[www] New user: %s!'%(user.username),\n 'User: %s has signed up!'%(user.username),\n '[email protected]',\n ['[email protected]'],\n fail_silently=True,\n )\n if user is not None:\n auth.login(request, user)\n\n #mark an event - someone signed up successfully\n event = Event(category='completed_sign_up', user=user)\n event.save()\n\n # redirect to a new URL:\n return HttpResponseRedirect(reverse('surveys-dashboard'))\n else:\n #mark an event - someone failed to sign up\n comment = \"\"\n for field in form.visible_fields():\n if field.field.label != _(\"Choose a password\") and field.field.label != _(\"Confirm password\"):\n field_data = \"%s: %s \\n\"%(field.field.label, field.data)\n comment+=(field_data)\n event = Event(category='failed_sign_up', comment=comment)\n event.save()\n return render(request, 'sign_up_form.html', context)", "def test_activation_already_activated(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n self.assertEqual(user, new_user)\n self.assertFalse(activated)", "def activate(self, *args, **kwargs):\n username = self.validate_key(kwargs.get(\"activation_key\"))\n user = self.get_user(username)\n user.is_active = True\n user.save()\n return user", "def send_activation_key_via_email(user, signup_key):\n subject = '[%s]Verify your email.' % (settings.ORGANIZATION_NAME)\n from_email = settings.DEFAULT_FROM_EMAIL\n to = [user.email, ]\n activation_link = '%s%s' % (settings.HOSTNAME_URL,\n reverse('activation_verify',\n args=(signup_key,)))\n\n html_content = \"\"\"\n <p>\n Hello %s. Please click the link to activate your account.<br>\n <a href=%s a> %s</a><br>\n\n Thank you,<br>\n\n The Team\n </p>\n \"\"\" % (user.first_name, activation_link, activation_link)\n\n text_content = \"\"\"\n Hello %s. Please click the link to activate your account.\n\n %s\n\n Thank you,\n\n The Team\n\n \"\"\" % (user.first_name, activation_link)\n\n msg = EmailMultiAlternatives(subject=subject, body=text_content,\n to=to, from_email=from_email)\n msg.attach_alternative(html_content, 'text/html')\n msg.send()", "def activate(request, activation_key):\n profile = get_object_or_404(User, activation_key=activation_key)\n if profile.akey_expires < timezone.now():\n return render('user_account/activate.html', {'expired': True})\n\n profile.save(update_fields=['active', 'activation_key'])\n return render(\n 'user_account/activate.html',\n {'success': True, 'name': profile.name + \" \" + profile.surname}\n )", "def create_user_email(user):\n if not user.is_authenticated:\n return False\n \n user.email = \"%s@%s\" % (user.username, settings.DEFAULT_EMAIL_HOST)\n user.save()\n \n return user.email", "def test_create_user_activation_email_failure(self, send):\n data = {\n 'username': 'John',\n 'email': '[email protected]',\n 'password': 'test123!',\n 'phone': '1234567890',\n 'first_name': 'Chuck',\n 'last_name': 'Norris',\n 'university': {\n \"name\": \"random_university\"\n },\n 'academic_field': {'name': \"random_field\"},\n 'academic_level': {'name': \"random_level\"},\n 'gender': \"M\",\n 'birthdate': \"1999-11-11\",\n }\n\n response = self.client.post(\n reverse('user-list'),\n data,\n format='json',\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(json.loads(response.content)['phone'], '1234567890')\n\n user = User.objects.get(email=\"[email protected]\")\n activation_token = ActionToken.objects.filter(\n user=user,\n type='account_activation',\n )\n\n self.assertFalse(user.is_active)\n self.assertEqual(1, len(activation_token))\n\n # Test that no email was sent:\n self.assertEqual(len(mail.outbox), 0)", "def process_signup():\n\n\temail = request.form.get('email');\n\tpassword = request.form.get('password');\n\n\tif email:\n\t\tnew_user = model.User(email=email, password=password)\n\t\tmodel.session.add(new_user)\n\t\tmodel.session.commit()\n\t\tsession['email'] = email\t\n\n\treturn render_template(\"signup.html\")", "def test_email_signup(self, subscribe):\n params = {'display_name': 'newbie',\n 'email': '[email protected]',\n 'password': 'asdf1234',\n 'agreement': 'on',\n 'email_subscribe': 'on'}\n response = self.client.post(reverse('users.register'), params)\n eq_(200, response.status_code)\n\n source_url = 'http://testserver%s' % response.request['PATH_INFO']\n subscribe.assert_called_with('testcamp', u'[email protected]',\n lang='en-us', source_url=source_url)", "def clean_email(self):\n # NOTE: all emails are stored in lower-case\n e = self.cleaned_data['email'].lower()\n try:\n user = User.objects.get(email=e)\n if not user.is_active:\n msg = 'This user account has not been confirmed yet'\n raise forms.ValidationError(msg)\n except User.DoesNotExist:\n msg = 'This email is not associated with an account'\n raise forms.ValidationError(msg)\n return e", "def confirm_email(request, key):\n alt_email = cpm.Email.objects.filter(activation_key=key)\n if alt_email.exists():\n alt_email[0].confirm()\n return redirect('/')\n hero_title = 'We weren\\'t able to complete your request...'\n return render_err_msg(request, hero_title)", "def test_activation_invalid_key(self):\n user, activated = self.registration_profile.objects.activate_user(\n 'foo', Site.objects.get_current())\n self.assertIs(user, False)\n self.assertFalse(activated)", "def signup():", "def notify_activate(self, **kwargs):\n return self.notify(\"notify_activate\", **kwargs)", "def activate_user(self, activation_key):\n if SHA1_RE.search(activation_key):\n try:\n profile = RegistrationProfile.objects.get(activation_key=activation_key)\n except self.model.DoesNotExist:\n return False\n if not profile.activation_key_expired():\n user = profile.user\n user.is_active = True\n user.save()\n profile.activation_key = \"ALREADY_ACTIVATED\"\n profile.save()\n return user\n\n return False", "def test_expired_activation(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n new_user.date_joined -= datetime.timedelta(\n days=settings.ACCOUNT_ACTIVATION_DAYS + 1)\n new_user.save()\n\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n self.assertIs(user, False)\n self.assertFalse(activated)\n\n new_user = UserModel().objects.get(username='alice')\n self.assertFalse(new_user.is_active)\n\n profile = self.registration_profile.objects.get(user=new_user)\n self.assertFalse(profile.activated)", "def signup(self):\n # sign up\n new_username = generate_username()\n success = signup_helper(self, new_username)\n if success:\n # go to AuthenticatedTasks\n self.user.username = new_username\n self.interrupt()", "def signup(self, request, user):\n pass", "def create_associated_email(sender, **kwargs):\n user = kwargs['instance']\n if kwargs['created']:\n email = AssociatedEmail(user=user, email=user.email, is_primary_email=True)\n if user.is_active:\n email.verification_date = timezone.now()\n email.is_verified = True\n email.save()", "def activation_email_template(cls, user_id):\n user = get_user_model().objects.get(id=user_id)\n email = user.e_mail\n activation_key = user.activation_key\n\n htmly = get_template('activation.html')\n \n context_kw = Context({'user': {'email': email, 'activation_key': activation_key}})\n \n email_subject = 'Account confirmation - NoTes'\n from_email = '[email protected]'\n html_content = htmly.render(context_kw)\n msg = EmailMultiAlternatives(email_subject, html_content, \n from_email, [email])\n msg.content_subtype = \"html\"\n msg.send()", "def post_activation_redirect(self, request, user):\n\t\tnewMember = StaffMember.objects.filter(user_id__exact=user.pk).get()\n\t\tlabGroup = LabGroup.objects.filter(pk=1).get()\n\t\tnewMember.lab_group = labGroup\n\t\tnewMember.save()\n\t\treturn ('registration_activation_complete', (), {})", "def form_valid(self, form):\n self.object = form.save()\n self.send_verify_email()\n return super().form_valid(form)" ]
[ "0.72729725", "0.7271193", "0.72603685", "0.7180653", "0.71139836", "0.710328", "0.708832", "0.70866233", "0.70748895", "0.6895029", "0.6886898", "0.68769574", "0.6810442", "0.68082666", "0.675095", "0.6744684", "0.67334735", "0.6731933", "0.6719185", "0.6693199", "0.6619442", "0.6596283", "0.65941095", "0.6566583", "0.65650076", "0.65608734", "0.6549365", "0.6543267", "0.65286267", "0.64959127", "0.64934313", "0.64725846", "0.64276975", "0.64199173", "0.6416365", "0.6369003", "0.63652515", "0.6356449", "0.63463056", "0.6344157", "0.63417476", "0.6341253", "0.6338403", "0.63328105", "0.63104856", "0.63053805", "0.6280076", "0.62782997", "0.62591773", "0.6256951", "0.6217434", "0.6213981", "0.6193444", "0.6190592", "0.61865145", "0.61786896", "0.6171651", "0.61560464", "0.61372226", "0.61341184", "0.61325413", "0.6129835", "0.6127966", "0.611214", "0.61052555", "0.60965616", "0.609594", "0.60950613", "0.60912496", "0.6081495", "0.6074204", "0.60708815", "0.6067788", "0.6064372", "0.60605824", "0.6059648", "0.60587054", "0.60362417", "0.6035835", "0.6033708", "0.602454", "0.6015395", "0.6007875", "0.6002234", "0.5996228", "0.59765124", "0.59734756", "0.59709847", "0.5967623", "0.595087", "0.59429026", "0.5941259", "0.5905595", "0.589555", "0.5893284", "0.58930725", "0.5889039", "0.58716613", "0.58637977", "0.586366" ]
0.5958555
89
Ensure that the user is notified that no email was sent.
def test_create_user_activation_email_failure(self, send): data = { 'username': 'John', 'email': '[email protected]', 'password': 'test123!', 'phone': '1234567890', 'first_name': 'Chuck', 'last_name': 'Norris', 'university': { "name": "random_university" }, 'academic_field': {'name': "random_field"}, 'academic_level': {'name': "random_level"}, 'gender': "M", 'birthdate': "1999-11-11", } response = self.client.post( reverse('user-list'), data, format='json', ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(json.loads(response.content)['phone'], '1234567890') user = User.objects.get(email="[email protected]") activation_token = ActionToken.objects.filter( user=user, type='account_activation', ) self.assertFalse(user.is_active) self.assertEqual(1, len(activation_token)) # Test that no email was sent: self.assertEqual(len(mail.outbox), 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_skip_blank_emails(self):\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n blank_contact = self.create_contact(data={'email': ''})\n self.group.contacts.add(blank_contact)\n\n # run email job\n from aremind.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertEqual(len(message.to), 1)", "def test_skip_if_no_patients(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=5)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n # run email job\n from aremind.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 0)", "def test_send_mail_without_mail(self):\n event_without_mail = self.create_event(self.family, name=None)\n fadm = admin.EventAdmin(Event, self.site)\n with patch.object(fadm, \"message_user\") as message_user_mock:\n fadm.send_mail(\"Request\", [self.event, event_without_mail])\n message_user_mock.assert_called_once_with(\n \"Request\", \"The event of the 2018-12-31 has no email template set\",\n admin.messages.ERROR)", "def is_no_email(self):\n return self._tag == 'no_email'", "def test_resend_activation_email_nonexistent_user(self):\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n self.assertEqual(len(mail.outbox), 0)", "def test_skip_blank_emails(self):\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n reminders.Patient.objects.filter(\n pk__in=[self.test_patient.pk, self.other_patient.pk]\n ).update(next_visit=appt_date)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n blank_contact = self.create_contact(data={'email': ''})\n null_contact = self.create_contact(data={'email': None})\n self.group.contacts.add(blank_contact)\n self.group.contacts.add(null_contact)\n\n self.startRouter()\n self.router.logger.setLevel(logging.DEBUG)\n # run email job\n from afrims.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertEqual(len(message.to), 1)\n self.stopRouter()", "def confirm_email(self):\n # The base class' implementation does nothing\n pass", "def test_registered_no_notifications(self):\n msg = self._send(self.reg_conn, '1')\n self.assertEqual(len(msg.responses), 1)\n self.assertEqual(msg.responses[0].text,\n self.app.no_reminders)", "def test_registered_no_notifications(self):\n msg = self._send(self.reg_conn, '1')\n self.assertEqual(len(msg.responses), 1)\n self.assertEqual(msg.responses[0].text,\n self.app.no_reminders)", "def set_receive_no_mail(self):\n self.__mail = False", "def test_skip_if_no_patients(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=5)\n reminders.Patient.objects.filter(\n pk__in=[self.test_patient.pk, self.other_patient.pk]\n ).update(next_visit=appt_date)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n self.startRouter()\n self.router.logger.setLevel(logging.DEBUG)\n # run email job\n from afrims.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 0)\n self.stopRouter()", "def check_notify(self):\n # no stage or no notify\n if not self.stage_id or not self.stage_id.notify:\n return False\n # mail already sent and don't send multiple times\n if self.stage_id in self.notified_stage_ids:\n if not self.stage_id.notify_multiple:\n return False\n # no mail template\n if not self.stage_id.notify_template_id:\n raise except_orm(\n _(u'Warning !'),\n _(u\"No email template selected \"\n u\"in the '%s' stage of the '%s' method\"\n ) % (self.stage_id.name, self.method_id.name))\n return True", "def test_no_email(self):\n user = self.make_user()\n data: dict = {}\n\n with self.login(user):\n response = self.post(\"referrals:create\", data=data)\n\n message = list(get_messages(response.wsgi_request))[0]\n assert str(message) == \"'missing email' is an invalid email address.\"", "def test_without_overriding_recipient_email(self, settings, mocked_notify_client):\n settings.OMIS_NOTIFICATION_OVERRIDE_RECIPIENT_EMAIL = ''\n\n notify._send_email(\n email_address='[email protected]',\n template_id='foobar',\n personalisation={},\n )\n\n mocked_notify_client.send_email_notification.assert_called_with(\n email_address='[email protected]',\n template_id='foobar',\n personalisation={},\n )", "def test_send_mail_unauthorized(self):\r\n\r\n response = self.client.post(\r\n self.url, {\r\n 'action': 'Send email',\r\n 'to_option': 'all',\r\n 'subject': \"Welcome to the course!\",\r\n 'message': \"Lets start with an introduction!\"\r\n }\r\n )\r\n self.assertContains(response, \"Email is not enabled for this course.\")", "def test_user_creation_no_email(self):\n self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(),\n send_email=False, **self.user_info)\n self.assertEqual(len(mail.outbox), 0)", "def test_email_sent_to_omis_admin_if_no_manager(self, mocked_notify_client):\n market = Market.objects.first()\n market.manager_email = ''\n market.save()\n\n order = OrderFactory(primary_market_id=market.country.id)\n\n notify.order_created(order)\n\n assert mocked_notify_client.send_email_notification.called\n call_args = mocked_notify_client.send_email_notification.call_args_list[0][1]\n assert call_args['email_address'] == settings.OMIS_NOTIFICATION_ADMIN_EMAIL\n assert call_args['template_id'] == Template.generic_order_info.value", "def test_email_sent_to_omis_admin_if_no_market(self, mocked_notify_client):\n market = Market.objects.first()\n country = market.country\n market.delete()\n\n order = OrderFactory(primary_market_id=country.id)\n\n notify.order_created(order)\n\n assert mocked_notify_client.send_email_notification.called\n call_args = mocked_notify_client.send_email_notification.call_args_list[0][1]\n assert call_args['email_address'] == settings.OMIS_NOTIFICATION_ADMIN_EMAIL\n assert call_args['template_id'] == Template.generic_order_info.value", "def non_compliant_notification(self) -> bool:\n return pulumi.get(self, \"non_compliant_notification\")", "def non_compliant_notification(self) -> bool:\n return pulumi.get(self, \"non_compliant_notification\")", "def test_no_email_sent_to_regions_without_settings(self, mocked_notify_client):\n assert not UKRegionalSettings.objects.count()\n order = OrderFactory(uk_region_id=UKRegion.london.value.id)\n\n notify.order_created(order)\n\n assert mocked_notify_client.send_email_notification.call_count == 1\n call_args = mocked_notify_client.send_email_notification.call_args_list[0][1]\n assert call_args['template_id'] != Template.order_created_for_regional_manager.value", "def test_no_email_sent_to_regions_if_no_manager_email_defined(self, mocked_notify_client):\n UKRegionalSettings.objects.create(\n uk_region_id=UKRegion.london.value.id,\n manager_emails=[],\n )\n\n order = OrderFactory(uk_region_id=UKRegion.london.value.id)\n\n notify.order_created(order)\n\n assert mocked_notify_client.send_email_notification.call_count == 1\n call_args = mocked_notify_client.send_email_notification.call_args_list[0][1]\n assert call_args['template_id'] != Template.order_created_for_regional_manager.value", "def test_send_system_message_no_notification(self):\n user = mommy.make(User, group_notification_period='none')\n\n # pylint: disable=line-too-long\n with patch('open_connect.connectmessages.tasks.send_immediate_notification') as mock:\n tasks.send_system_message(user, self.subject, self.message)\n\n self.assertFalse(mock.delay.called)", "def test_no_email_sent_to_regions_if_region_is_null(self, mocked_notify_client):\n order = OrderFactory(uk_region_id=None)\n\n notify.order_created(order)\n\n assert mocked_notify_client.send_email_notification.call_count == 1\n call_args = mocked_notify_client.send_email_notification.call_args_list[0][1]\n assert call_args['template_id'] != Template.order_created_for_regional_manager.value", "def can_notify(self, last_notification):\n return (\n features.is_enabled(features.EMAIL_NOTIFICATIONS)\n and self.notification_settings.via_email\n and api.can_email_user(self.user)\n and super().can_notify(last_notification)\n )", "def test_private_message_not_sends_email(self, get_current):\n get_current.return_value.domain = \"testserver\"\n\n s, c = Setting.objects.get_or_create(user=self.to, name=\"email_private_messages\")\n # Now user should not recieve email.\n s.value = False\n s.save()\n assert not Setting.get_for_user(self.to, \"email_private_messages\")\n\n self.client.login(username=self.sender.username, password=\"testpass\")\n post(self.client, \"messages.new\", {\"to\": self.to, \"message\": \"a message\"})\n\n assert not mail.outbox", "def test_registeration_no_email(self):\n response = self.signup_a_user(self.user_lacks_email)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data[\"errors\"][\"email\"],\n [\"This field may not be blank.\"]\n )\n self.assertNotIn(\"token\", response.data)", "def test_group_notification_not_called(self):\n send_message(self.directmessage1.pk)\n self.assertFalse(self.groupnotify_mock.called)", "def test_activation_email_missing_template(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_activation_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].to, [self.user_info['email']])", "def test_email_disabled(self):\r\n test_email = {\r\n 'action': 'Send email',\r\n 'send_to': 'myself',\r\n 'subject': 'test subject for myself',\r\n 'message': 'test message for myself'\r\n }\r\n response = self.client.post(self.send_mail_url, test_email)\r\n # We should get back a HttpResponseForbidden (status code 403)\r\n self.assertContains(response, \"Email is not enabled for this course.\", status_code=403)", "def test_send_mass_html_mail_to_send_no_email(self, send_mass_html_mail__mock: Mock):\n self.family.guests.add(\n Guest(name=\"Pierre\", email=None, phone=\"0123456789\", female=False, family=self.family),\n bulk=False\n )\n events = Event.objects.filter(pk=self.event.pk)\n\n admin.EventAdmin.send_mail(Mock(), None, events)\n\n recipient = list(send_mass_html_mail__mock.call_args[0][0])[0][4]\n self.assertListEqual(list(recipient),\n [\"Françoise <[email protected]>\", \"Jean <[email protected]>\"])", "def test_no_email(self):\n\n a = Agency(name=\"Broadcasting Board of Governors\", slug=\"brodcasting\")\n a.save()\n\n response = self.client.get(\n reverse('contact_landing', args=['broadcasting']))\n self.assertTrue(200, response.status_code)\n content = response.content.decode('utf-8')\n self.assertTrue('Request online' not in content)", "def test_email(self):\n # No email should be send\n self.assertEqual(len(mail.outbox), 0)\n\n # enable plugin and set mail setting to true\n plugin = registry.plugins.get('inventreecorenotificationsplugin')\n plugin.set_setting('ENABLE_NOTIFICATION_EMAILS', True)\n NotificationUserSetting.set_setting(\n key='NOTIFICATION_METHOD_MAIL',\n value=True,\n change_user=self.user,\n user=self.user,\n method=InvenTreeCoreNotificationsPlugin.EmailNotification.METHOD_NAME\n )\n\n # run through\n self._notification_run(InvenTreeCoreNotificationsPlugin.EmailNotification)\n\n # Now one mail should be send\n self.assertEqual(len(mail.outbox), 1)", "def test_send_subscribe_email(self):\n #Verifica se foi enviado 1 e-mail, o este não envia e-mail\n self.assertEqual(1, len(mail.outbox))", "def test_unverified_consumer(self):\n self.prep_consumer()\n UnqualifiedConsumerEmailTask().run(test_mode=self.consumer)\n self.common_asserts()\n self.assertTrue('Confirm your email address with a single click.' in\n mail.outbox[0].alternatives[0][0])\n self.assertTrue('Use this link to confirm your email address' in\n mail.outbox[0].body)", "def test_unverified_subscriber(self):\n self.prep_consumer()\n subscriber = Subscriber.objects.get(id=7)\n self.consumer.subscriber = subscriber\n self.consumer.save()\n UnqualifiedConsumerEmailTask().run(test_mode=self.consumer)\n self.common_asserts()\n self.assertTrue('71010' in mail.outbox[0].alternatives[0][0])\n self.assertTrue('71010' in mail.outbox[0].body)", "def test_notify_user(self):\n foo = Foo.objects.create(name='foo', description='foo object')\n notify_users([self.user_a], foo, notification_type='foo')\n self.assertEqual(len(mail.outbox), 1)", "def verify_mail(self):\n raise NotImplementedError", "def notify_students():\n time_now = datetime.datetime.now(get_localzone())\n emails_to_send = Email.objects.all()\n for email in emails_to_send:\n if email.assignment.date_assigned <= time_now:\n send_mail(subject=email.subject,\n message=email.message,\n recipient_list=Student.objects.filter(assignments=email.assignment),\n from_email=None,\n fail_silently=False)\n email.delete()", "def test_no_email_to_studio_if_setting_not_on(self, mock_tz):\n mock_tz.now.return_value = datetime(2015, 2, 11, 10, tzinfo=dt_timezone.utc)\n for i in range(5):\n baker.make(\n TicketBooking, ticketed_event=self.ticketed_event,\n cancelled=False, paid=False,\n user__email=\"unpaid_user{}@test.com\".format(i),\n date_booked= datetime(2015, 2, 9, tzinfo=dt_timezone.utc),\n warning_sent=True,\n date_warning_sent= datetime(2015, 2, 9, tzinfo=dt_timezone.utc),\n )\n for booking in TicketBooking.objects.all():\n baker.make(Ticket, ticket_booking=booking)\n\n management.call_command('cancel_unpaid_ticket_bookings')\n # emails are sent to user per cancelled booking (6) (these 5 plus\n # self.unpaid); none to studio\n self.assertEqual(len(mail.outbox), 6)\n cancelled_booking_emails = [\n booking.user.email for booking\n in TicketBooking.objects.filter(cancelled=True)\n ]\n self.assertEqual(\n cancelled_booking_emails, [email.to[0] for email in mail.outbox]\n )", "def testEmailRequired(self):\r\n res = self.app.post('/signup_process')\r\n self.assertIn('Please supply', res.body)", "def test_without_primary_market(self, mocked_notify_client):\n order = OrderFactory(primary_market_id=None)\n\n notify.order_info(order, what_happened='something happened', why='to inform you')\n\n assert mocked_notify_client.send_email_notification.called\n call_args = mocked_notify_client.send_email_notification.call_args_list[0][1]\n assert call_args['personalisation']['primary market'] == 'Unknown market'", "def test_email_sent_on_failure(self):\n self._authorize()\n data = {\n 'Subject_Number': '000-1111',\n 'Pin_Code': '1234',\n 'Date_Enrolled': datetime.datetime.now().strftime('%b %d %Y '),\n 'Mobile_Number': '2223334444',\n }\n patient = self.create_xml_patient(data)\n payload = self.create_xml_payload([patient])\n response = self._post(payload)\n self.assertEqual(response.status_code, 500)\n self.assertEqual(len(mail.outbox), 1)", "def view_mailinglist_unsubscribe_notrack(request): \n user_email = request.POST.get('email')\n message=''\n if user_email:\n try:\n contact = Contact.objects.get(email=user_email)\n contactsave = Contact(pk=contact.pk,\n email=contact.email,\n first_name=contact.first_name,\n last_name=contact.last_name,\n subscriber=False, \n content_type=contact.content_type,\n object_id=contact.object_id,\n content_object=contact.content_object,\n creation_date=contact.creation_date,\n modification_date=contact.modification_date)\n contactsave.save()\n finished = True\n return render_to_response('newsletter/mailing_list_unsubscribe_notrack.html',\n {'finished':finished},\n context_instance=RequestContext(request))\n except:\n message=\"A user with that email does not exist.\" \n else:\n message=\"No email have been entered.\" \n return render_to_response('newsletter/mailing_list_unsubscribe_notrack.html',\n {'message':message},\n context_instance=RequestContext(request))", "def test_confirmation_username_not_email(self):\n pass", "def test_no_program_user_response(self, *args): # pylint: disable=unused-argument\n with mute_signals(post_save):\n no_permissions_profile = ProfileFactory.create()\n self.client.force_login(no_permissions_profile.user)\n resp_post = self.client.post(self.mail_url, data=self.request_data, format='json')\n assert resp_post.status_code == HTTP_403_FORBIDDEN", "def _did_send_first_contact_email(app):\n first_contact = app[FIRST_CONTACT_EMAIL_SENT_KEY]\n if first_contact and first_contact.lower() == 'y':\n return True\n return False", "def test_no_email_to_studio_if_setting_not_on(self, mock_tz):\n mock_tz.now.return_value = datetime(2015, 2, 10, 10, tzinfo=dt_timezone.utc)\n for i in range(5):\n baker.make_recipe(\n 'booking.booking', event=self.event,\n status='OPEN', paid=False,\n payment_confirmed=False,\n user__email=\"unpaid_user{}@test.com\".format(i),\n date_booked= datetime(2015, 2, 9, tzinfo=dt_timezone.utc),\n warning_sent=True,\n date_warning_sent= datetime(2015, 2, 9, 2, tzinfo=dt_timezone.utc),\n )\n\n management.call_command('cancel_unpaid_bookings')\n # emails are sent to user per cancelled booking (6); none to studio\n self.assertEqual(len(mail.outbox), 6)\n cancelled_booking_emails = [\n [booking.user.email] for booking\n in Booking.objects.filter(status='CANCELLED')\n ]\n self.assertEqual(\n sorted(cancelled_booking_emails),\n sorted([email.to for email in mail.outbox])\n )", "def test_sending_mail(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n # run email job\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertTrue(self.test_contact.email in message.to)", "def test_resend_activation_email_nonunique_email(self):\n user1 = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **self.user_info)\n user2_info = copy(self.user_info)\n user2_info['username'] = 'bob'\n user2 = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **user2_info)\n self.assertEqual(user1.email, user2.email)\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n self.assertEqual(len(mail.outbox), 0)", "def test_handle_sending_email(self, mock_email):\n mock_email.return_value = True\n\n send_email_notification(self.email_body)\n self.assertTrue(EmailMultiAlternatives.send.has_been_called)", "def test_set_send_email_notifications(self):\n # Setup scenario\n username = 'tester'\n password = 'secret'\n user = Account.objects.create_user(username=username, email='[email protected]', password=password)\n\n self.assertTrue(self.client.login(username=username, password=password))\n\n # Verify initial assumptions\n self.assertTrue(user.send_email_notifications)\n\n # Run code\n resp = self.client.post(reverse('account.api.configure_email'), {\n 'send_email_notifications': False,\n }, format='json')\n\n # Verify expectations\n self.assertEquals(status.HTTP_201_CREATED, resp.status_code)\n self.assertTrue(user.send_email_notifications)", "def test_email_warnings_sent_if_no_payment_due_date(self, mock_tz):\n mock_tz.now.return_value = datetime(\n 2015, 2, 11, 19, 0, tzinfo=dt_timezone.utc\n )\n\n # cancellation period starts 2015/2/13 18:00\n # payment_due_date None\n event = baker.make_recipe(\n 'booking.future_EV',\n date=datetime(2015, 2, 14, 18, 0, tzinfo=dt_timezone.utc),\n payment_open=True,\n cost=10,\n payment_due_date=None,\n cancellation_period=24)\n\n baker.make_recipe(\n 'booking.booking', event=event, paid=False,\n payment_confirmed=False,\n date_booked=datetime(2015, 2, 11, 14, 30, tzinfo=dt_timezone.utc),\n _quantity=5,\n )\n _add_user_email_addresses(Booking)\n management.call_command('email_warnings')\n self.assertEqual(len(mail.outbox), 5)", "def assertFailedBeforeEmailing(self, email_user):\r\n self.assertRolledBack()\r\n self.assertFalse(email_user.called)", "def test_admin_approval_complete_email_falls_back_to_django_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_admin_approve_complete_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].from_email, '[email protected]')", "def test_send_notification_without_reports_filled(self, fake_requests_obj):\n # act like it's March 2012\n fake_date = datetime.datetime(year=2012, month=3, day=1)\n (fake_requests_obj.expects_call().returns(fake_date))\n\n # delete existing reports\n Report.objects.all().delete()\n management.call_command('send_mentor_report_notification', [], {})\n eq_(len(mail.outbox), 4)", "def test_email_is_None(self):\n settings.GTMETRIX_REST_API_EMAIL = None\n with raises(GTmetrixEmailIsNone):\n gt = GTmetrixInterface()", "def test_notify_users(self):\n foo = Foo.objects.create(name='foo', description='foo object')\n notify_users(User.objects.all(), foo, notification_type='foo')\n self.assertEqual(len(mail.outbox), 2)", "def test_no_admins_registered(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n\n with self.assertRaises(ImproperlyConfigured):\n self.registration_profile.objects.send_admin_approve_email(\n new_user, Site.objects.get_current())", "def test_send_notification_without_reports_filled(self, fake_requests_obj):\n # act like it's March 2012\n fake_date = datetime.datetime(year=2012, month=3, day=1)\n (fake_requests_obj.expects_call().returns(fake_date))\n\n # delete existing reports\n Report.objects.all().delete()\n management.call_command('send_third_report_notification', [], {})\n eq_(len(mail.outbox), 4)", "def notify_email_confirmed(self, user, email):\n \n # make sure user isn't still invited to groups he owns or is a member of\n for g in self.users_groups(user):\n g.remove_invitation(user)", "def handle_sent(self, instance):\n if not instance.created_by:\n return\n\n activity = Activity(\n actor=instance.created_by,\n verb=RestrictedMailSent,\n object=instance,\n time=instance.used,\n extra_context={},\n )\n self.manager.add_activity(\n activity, [instance.created_by.pk], [NotificationFeed]\n )\n\n # Send notification\n notification = RestrictedMailSentNotification(instance.created_by)\n notification.notify()", "def test_admin_approval_complete_email_uses_registration_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_admin_approve_complete_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].from_email, '[email protected]')", "async def check_notify(self) -> None:\n async with self.lock:\n # We loop through a list of keys because we are going to\n # mutate the dictionary as we loop through it.\n for message_id in copy.copy(list(self.upcoming_events.keys())):\n upcoming_event = self.upcoming_events[message_id]\n if not upcoming_event.time_to_notify():\n continue\n\n # Delete upcoming event if it's a member event\n if isinstance(upcoming_event, MemberEvent):\n # Delete upcoming if it's a member event\n await self.delete_upcoming_event(message_id)\n\n # Prepare message from the queue if it's recurring\n stop_notifying = False\n if isinstance(upcoming_event, RecurringEvent):\n stop_notifying = (\n upcoming_event.event_cancelled\n or upcoming_event.notified\n )\n\n if not stop_notifying:\n # Send ongoing event message\n ongoing_message = await upcoming_event.send_ongoing_message(\n notif_message=self.ongoing_template,\n channel=self.calendar_channel\n )\n\n # Distribute DM\n await upcoming_event.distribute_dm(\n self.dm_template,\n self.organizer_dm_template\n )\n\n # Create new ongoing event\n ongoing_event = OngoingEvent(\n countdown_time=upcoming_event.start_time,\n timeout_length=self.event_timeout,\n organizer_id=upcoming_event.organizer.id,\n message_text=ongoing_message.content,\n message_embed=ongoing_message.embeds[0]\n )\n\n self.ongoing_events[ongoing_message.id] = ongoing_event", "def test_admin_approval_email_falls_back_to_django_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.activated = True\n self.registration_profile.objects.send_admin_approve_email(\n new_user, Site.objects.get_current())\n self.assertEqual(mail.outbox[0].from_email, '[email protected]')", "def test_fifs_send_mass_html_mail_to_send_no_email(self, send_mass_html_mail__mock: Mock):\n self.family.guests.add(\n Guest(name=\"Pierre\", email=None, phone=\"0123456789\", female=False, family=self.family),\n bulk=False\n )\n\n self._send_form()\n\n recipient = list(send_mass_html_mail__mock.call_args[0][0])[0][4]\n self.assertListEqual(list(recipient),\n [\"Françoise <[email protected]>\", \"Jean <[email protected]>\"])", "def test_fifs_send_mass_html_mail_to_send_no_email(self, send_mass_html_mail__mock: Mock):\n self.family.guests.add(\n Guest(name=\"Pierre\", email=None, phone=\"0123456789\", female=False, family=self.family),\n bulk=False\n )\n\n self._send_form()\n\n recipient = list(send_mass_html_mail__mock.call_args[0][0])[0][4]\n self.assertListEqual(list(recipient),\n [\"Françoise <[email protected]>\", \"Jean <[email protected]>\"])", "def notify_overdue(self, **kwargs):\n return self.notify(\"notify_overdue\", **kwargs)", "def test_send_notification_without_reports_filled(self, fake_requests_obj):\n # act like it's March 2012\n fake_date = datetime.datetime(year=2012, month=3, day=1)\n (fake_requests_obj.expects_call().returns(fake_date))\n\n # delete existing reports\n Report.objects.all().delete()\n management.call_command('send_second_report_notification', [], {})\n eq_(len(mail.outbox), 4)", "def test_invalid_email_when_logging_in(self):\n pass", "def clean(self):\n cleaned_data = super().clean()\n email = cleaned_data.get('email')\n is_subscribed = Subscriber.objects.filter(\n email__iexact=email,\n mailing_list=self.mailing_list,\n status=Status.SUBSCRIBED\n )\n if not is_subscribed:\n email_validation_error = ValidationError(\n gettext('The email address \"%(email)s\" is not subscribed to this list.'),\n params={'email': email},\n code='not_subscribed_error'\n )\n self.add_error('email', email_validation_error)\n return cleaned_data", "async def test_permanent_not_scheduled(self):\n ctx = MockContext(channel=self.text_channel)\n await self.cog.silence.callback(self.cog, ctx, None, None)\n self.cog.scheduler.schedule_later.assert_not_called()", "def handle_notify_request(self) -> HttpResponse:\n return HttpResponseNotFound()", "def test_clean_email_empty(self):\n\n raw_email = 'from=<>'\n result = clean_email(raw_email)\n self.assertEqual(result, '')", "def test_no_registration_admins_registered(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n\n with warnings.catch_warnings(record=True) as _warning:\n self.registration_profile.objects.send_admin_approve_email(\n new_user, Site.objects.get_current())\n\n assertion_error = '''No warning triggered for unregistered\n REGISTRATION_ADMINS'''\n self.assertTrue(len(_warning) > 0, assertion_error)\n self.assertTrue('REGISTRATION_ADMINS' in str(_warning[-1].message),\n assertion_error)", "def clean_email(self):\n e = self.cleaned_data['email']\n try:\n user = User.objects.get(email=e)\n if not user.is_active:\n msg = 'This user account has not been confirmed yet'\n raise forms.ValidationError(msg)\n except User.DoesNotExist:\n pass\n # msg = 'This email is not associated with an account'\n # raise forms.ValidationError(msg)\n return e", "def test_failed_email(self):\n self.assertEqual(send_email(\"testtestcom\", \"test\", \"test\"), 'There was an error sending')", "def test_admin_approval_email_uses_registration_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.activated = True\n self.registration_profile.objects.send_admin_approve_email(\n new_user, Site.objects.get_current())\n self.assertEqual(mail.outbox[0].from_email, '[email protected]')", "def get_email(cls, unused_provider_details):\r\n return None", "def test_blank_email(self):\n rv = self.signup('Bo', 'Theo', '', 'Bo1995', 'Bo1995')\n self.assertIn(b'Field must be between 6 and 30 characters long.', rv.data)", "def test_emailable(self):\n to_date = datetime.today() - timedelta(days=2)\n consumer = Consumer.objects.get(id=103)\n consumer.consumer_create_datetime = to_date - timedelta(days=1)\n consumer.is_emailable = False\n consumer.save()\n from_date = to_date.date() - timedelta(days=2)\n result = UnqualifiedConsumerEmailTask().qry_unqualified_consumers(\n from_date, to_date.date())\n self.assertEqual(result.filter(id=103).count(), 0)", "def notify(self) -> None:\n pass", "def notify(self) -> None:\n pass", "def test_bad_request_anon_user_no_email(self, zendesk_mock_class, datadog_mock):\r\n self._test_bad_request_omit_field(self._anon_user, self._anon_fields, \"email\", zendesk_mock_class, datadog_mock)\r\n self._test_bad_request_empty_field(self._anon_user, self._anon_fields, \"email\", zendesk_mock_class, datadog_mock)", "def test_status_no_message(self):\n with self.app.app_context():\n u = user(email='[email protected]', save=True)\n\n authenticate(self.client, u)\n\n rv = self.client.post('/statusize/', data={'message': ''},\n follow_redirects=True)\n # This kicks up a 404, but that's lame.\n eq_(rv.status_code, 404)", "def test_no_repost(self, mock_slack):\n from mail.management.commands.monitor_imap_folder import get_messages\n mock_slack.return_value.api_call.return_value = {'ok': True, 'ts': '1234'}\n get_messages(folder='INBOX', channel='#mailtest')\n mock_slack.return_value.api_call.assert_not_called()", "def test_contact_us_without_email(client, new_msg):\n del new_msg[\"email\"]\n rv = client.post(\"/api/send-email/\", json=new_msg)\n response = rv.get_json()[\"message\"]\n\n assert rv.status_code == HTTPStatus.BAD_REQUEST\n assert response[\"email\"][\"message\"] == \"Valid email is required\"", "def notify_already_registered(email):\n notifications_client = NotificationsAPIClient(settings.GOV_NOTIFY_API_KEY)\n\n notifications_client.send_email_notification(\n email_address=email,\n template_id=settings.GOV_NOTIFY_ALREADY_REGISTERED_TEMPLATE_ID,\n personalisation={\n 'login_url': (settings.SSO_BASE_URL + reverse('account_login')),\n 'password_reset_url': (settings.SSO_BASE_URL + reverse('account_reset_password')),\n 'contact_us_url': urls.domestic.CONTACT_US,\n },\n )", "def wait_for_alert(self):\n self.wait_for(lambda: self._alert is not None,\n 'User has not been alerted.')\n msg, self._alert = self._alert, None\n return msg", "async def test_silenced_not_added_to_notifier(self):\n with mock.patch.object(self.cog, \"_set_silence_overwrites\", return_value=False):\n await self.cog.silence.callback(self.cog, MockContext(), 15)\n self.cog.notifier.add_channel.assert_not_called()", "def send_mail_when_failed(self, body):\r\n pass", "def testEmailAlreadyThere(self):\r\n res = self.app.post(\r\n '/signup_process',\r\n params={\r\n 'email': '[email protected]'\r\n }\r\n )\r\n self.assertIn('already signed up', res.body)", "def email_not_in_use(has_user_field: bool = False) -> Callable:\n def _email_not_in_use(form, field):\n user_id = -1 if not has_user_field else form.user.id\n user = User.query.filter(User.email == field.data).first()\n if user is not None and user.id != user_id and len(field.data) > 0:\n raise ValidationError('This address is already in use')\n\n return _email_not_in_use", "def test_not_accept(mocker, client, application, decision, should_send_email):\n order = create_test_order(application, 123, fulfilled=False)\n\n data = {\"req_reference_number\": make_reference_id(order), \"decision\": decision}\n mocker.patch(\n \"ecommerce.views.IsSignedByCyberSource.has_permission\", return_value=True\n )\n send_email = mocker.patch(\"ecommerce.api.MailgunClient.send_individual_email\")\n resp = client.post(reverse(\"order-fulfillment\"), data=data)\n assert resp.status_code == statuses.HTTP_200_OK\n assert len(resp.content) == 0\n order.refresh_from_db()\n assert Order.objects.count() == 1\n assert order.status == Order.FAILED\n\n if should_send_email:\n assert send_email.call_count == 1\n assert send_email.call_args[0] == (\n \"Order fulfillment failed, decision={decision}\".format(\n decision=\"something else\"\n ),\n \"Order fulfillment failed for order {order}\".format(order=order),\n \"[email protected]\",\n )\n else:\n assert send_email.call_count == 0", "def test_send_notification(self):\n management.call_command('send_first_report_notification', [], {})\n eq_(len(mail.outbox), 4)", "def check_message(self):\n def check(fld_key):\n if not self[fld_key]:\n string = self._fields[fld_key].string\n raise UserError(\n _(\"%s field required to send an email.\") % string)\n if self.email_type == 'general':\n check('subject')\n check('body')\n elif self.email_type == 'scheduled':\n check('date')\n check('duration')\n check('priority')\n check('sub_subject')\n check('mail_template_id')", "def isMissingDataAlarm(self):\n\n if(self.missingData and len(self.subjects) != 0):\n for animal in self.subjects:\n message = \"Missing data for \" + animal + \".\"\n if(self.log):\n logging.info(message)\n self.sendToAllSubscribers(message, \"Alert: Missing data\")", "def test_admin_approval_complete_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_admin_approve_complete_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].to, [self.user_info['email']])", "def test_no_email_registration(self):\n self.response = self.client.post(\n \"/api/users/\",\n {\"user\": {\n \"username\": \"kake\",\n \"email\": \"\",\n \"password\": \"123445abcdefghijk\",\n }\n },\n format=\"json\"\n )\n self.assertEqual(self.response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual('This field may not be blank.',\n self.response.json()['errors']['email'][0])", "def test_none_message(self):\n d = self.producer.send_messages(\"topic\", key=b\"key\", msgs=[None])\n d.addErrback(lambda f: None) # Handle the cancellation failure from producer.stop().\n\n self.assertNoResult(d)", "def test_activation_email_falls_back_to_django_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_activation_email(Site.objects.get_current())\n self.assertEqual(mail.outbox[0].from_email, '[email protected]')" ]
[ "0.7036216", "0.6975554", "0.6825931", "0.67460984", "0.6742591", "0.6721783", "0.66891694", "0.65338993", "0.65338993", "0.65253884", "0.6507732", "0.6412952", "0.63811535", "0.6334888", "0.6278315", "0.62352383", "0.6226457", "0.6224285", "0.61838245", "0.61838245", "0.6182637", "0.61673284", "0.61660576", "0.6122246", "0.61037856", "0.6102731", "0.61023813", "0.6034166", "0.6029969", "0.6028784", "0.60129577", "0.6010501", "0.5982437", "0.5953309", "0.59477675", "0.5924686", "0.5921751", "0.59196496", "0.58818656", "0.585068", "0.5843115", "0.5832101", "0.58204114", "0.5800354", "0.5792567", "0.5785831", "0.5769441", "0.5757387", "0.57516783", "0.57472485", "0.57431585", "0.57424057", "0.5736783", "0.5732347", "0.571814", "0.57021743", "0.56927305", "0.5682823", "0.5665628", "0.56587446", "0.5644017", "0.56292075", "0.5585603", "0.55747783", "0.5569603", "0.55539197", "0.55539197", "0.55503315", "0.5550095", "0.55495095", "0.55295765", "0.55237734", "0.5515973", "0.54967785", "0.5493772", "0.54907215", "0.54899144", "0.54894894", "0.54891723", "0.5480115", "0.5473096", "0.5472288", "0.5472288", "0.5469683", "0.5468208", "0.5466184", "0.54638875", "0.5461401", "0.5447117", "0.5444314", "0.5443802", "0.54398465", "0.54389954", "0.5438384", "0.54379237", "0.54374725", "0.54305476", "0.54273254", "0.54208565", "0.540735", "0.5406401" ]
0.0
-1
Ensure that the user is automatically activated.
def test_create_user_auto_activate(self, services): data = { 'username': 'John', 'email': '[email protected]', 'password': 'test123!', 'phone': '1234567890', 'first_name': 'Chuck', 'last_name': 'Norris', 'university': { "name": "random_university" }, 'academic_field': {'name': "random_field"}, 'academic_level': {'name': "random_level"}, 'gender': "M", 'birthdate': "1999-11-11", } response = self.client.post( reverse('user-list'), data, format='json', ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(json.loads(response.content)['phone'], '1234567890') user = User.objects.get(email="[email protected]") activation_token = ActionToken.objects.filter( user=user, type='account_activation', ) self.assertTrue(user.is_active) self.assertEqual(1, len(activation_token)) # Test that no email was sent: self.assertEqual(len(mail.outbox), 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def activate_user(self, user):\n if not user.active:\n user.active = True\n return True\n return False", "def activate_user(self, user):\n if not user.active:\n user.active = True\n # noinspection PyUnresolvedReferences\n self.save(user)\n return True\n\n return", "def will_activate(self):\n pass", "def activate_user(self, email):\r\n activation_key = Registration.objects.get(user__email=email).activation_key\r\n # and now we try to activate\r\n check_for_get_code(self, 200, reverse('activate', kwargs={'key': activation_key}))\r\n # Now make sure that the user is now actually activated\r\n self.assertTrue(User.objects.get(email=email).is_active)", "def test_activate_active_user(self):\n activate_user(self.user, self.request)\n self.assertEqual(self.user.is_active, True)", "def test_activate_user(self):\n activated_user = (RegistrationProfile.objects\n .activate_user(self.activation_key))\n self.assertTrue(activated_user.registrationprofile.activated)\n self.assertFalse(activated_user.is_active)", "def testInitialUserInactivated(self):\r\n u = User()\r\n u.email = gen_random_word(10)\r\n DBSession.add(u)\r\n\r\n self.assertEqual(\r\n False,\r\n u.activated,\r\n 'A new signup should start out deactivated by default')\r\n self.assertTrue(\r\n u.activation.code is not None,\r\n 'A new signup should start out as deactivated')\r\n self.assertEqual(\r\n 'signup',\r\n u.activation.created_by,\r\n 'This is a new signup, so mark is as thus')", "def activate(self, *args, **kwargs):\n username = self.validate_key(kwargs.get(\"activation_key\"))\n user = self.get_user(username)\n user.is_active = True\n user.save()\n return user", "def activate(self):\n if not self.is_active:\n self.is_active = True\n self.activated_at = datetime.datetime.utcnow()\n import messaging # avoid circular import\n messaging.send_activated_emails(self)\n self.save()", "def check_active(self, user):\r\n if not self.require_active:\r\n # Ignore & move on.\r\n return True\r\n\r\n return user.is_active", "def activated_user(self):\n user = self.signup_user_two()\n user.is_active = True\n user.save()\n return user", "def activate_user(self, user_name):\n if not self._simultanious_log_ins:\n self._active_users_names.add(user_name)", "def _activate_user(self, email):\r\n activation_key = registration(email).activation_key\r\n\r\n # and now we try to activate\r\n resp = self.client.get(reverse('activate', kwargs={'key': activation_key}))\r\n return resp", "def activate_user(self, activation_key):\r\n # Make sure the key we're trying conforms to the pattern of a\r\n # SHA1 hash; if it doesn't, no point trying to look it up in\r\n # the database.\r\n if SHA1_RE.search(activation_key):\r\n try:\r\n profile = self.get(activation_key=activation_key)\r\n except self.model.DoesNotExist:\r\n return False\r\n if not profile.activation_key_expired():\r\n user = profile.user\r\n user.is_active = True\r\n user.save()\r\n profile.activation_key = \"ALREADY_ACTIVATED\"\r\n profile.save()\r\n return user\r\n return False", "def activate_user(self, username):\n args = parser_activate.parse_args()\n isActive = request.json.get('isactive')\n\n query = \"\"\"UPDATE users SET isactive=%s WHERE username=%s\"\"\"\n values = isActive, username\n\n conn = self.db\n cursor = conn.cursor()\n cursor.execute(query, values)\n conn.commit()\n return True", "def activate(self):\r\n self.update_enrollment(is_active=True)", "def test_activation_already_activated(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n profile = self.registration_profile.objects.get(user=new_user)\n _, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n self.assertFalse(activated)", "def activate(self):\n self._is_active = True", "def activate(self):\r\n if self.activation_code == '':\r\n raise ValidationError('The member is already activated')\r\n signer = TimestampSigner()\r\n signer.unsign(self.activation_code, max_age=timedelta(days=2))\r\n self.hidden = False\r\n self.activation_code = ''\r\n self.joined_date = timezone.now()\r\n self.save()", "def activate(self):\n pass", "def activate_user(self, activation_key):\n if SHA1_RE.search(activation_key):\n try:\n profile = RegistrationProfile.objects.get(activation_key=activation_key)\n except self.model.DoesNotExist:\n return False\n if not profile.activation_key_expired():\n user = profile.user\n user.is_active = True\n user.save()\n profile.activation_key = \"ALREADY_ACTIVATED\"\n profile.save()\n return user\n\n return False", "def KLP_User_Activate(request, user_id):\n\n # get logged in user\n\n user = request.user\n if user.id:\n\n # check logged in user permissions to delete user\n\n KLP_user_Perm(request.user, 'Users', None)\n userObj = User.objects.get(pk=user_id)\n userObj.is_active = 1 # activate user\n userObj.save() # save user object\n return render_to_response('viewtemplates/userAction_done.html',\n {\n 'user': request.user,\n 'selUser': userObj,\n 'message': 'User Activated Successfully',\n 'legend': 'Karnataka Learning Partnership',\n 'entry': 'Add',\n }, context_instance=RequestContext(request))\n else:\n\n # if user is not logged in redirect to login page\n\n return HttpResponseRedirect('/login/')", "def test_activation_already_activated(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n self.assertEqual(user, new_user)\n self.assertFalse(activated)", "def activate_user(cls, activation_key):\n #from registration.signals import user_activated\n \n # Make sure the key we're trying conforms to the pattern of a\n # SHA1 hash; if it doesn't, no point trying to look it up in\n # the database.\n db = DB_Session()\n if SHA1_RE.search(activation_key):\n query = db.query(RegistrationProfile)\n profile = query.filter(RegistrationProfile.activation_key == activation_key).one()\n if not profile:\n return False\n if not profile.activation_key_expired():\n user = profile.user\n user.is_active = 1\n profile.activation_key = RegistrationProfile.ACTIVATED\n db.flush()\n db.commit()\n db.close()\n #user_activated.send(sender=self.model, user=user)\n return user\n return False", "def activate_user(self, activation_key):\n # Make sure the key we're trying conforms to the pattern of a\n # SHA1 hash; if it doesn't, no point even trying to look it up\n # in the DB.\n if SHA1_RE.search(activation_key):\n try:\n user_profile = self.get(activation_key=activation_key)\n except self.model.DoesNotExist:\n return False\n if not user_profile.activation_key_expired():\n # Account exists and has a non-expired key. Activate it.\n user = user_profile.user\n user.is_active = True\n user.save()\n return user\n return False", "def _activate(self):\n self.active = True", "def test_valid_activation(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n self.assertIsInstance(user, UserModel())\n self.assertEqual(user.id, new_user.id)\n self.assertFalse(user.is_active)\n self.assertTrue(activated)\n\n profile = self.registration_profile.objects.get(user=new_user)\n self.assertTrue(profile.activated)", "def activate_user(self, activation_key):\n # Make sure the key we're trying conforms to the pattern of a\n # SHA1 hash; if it doesn't, no point trying to look it up in\n # the database.\n try:\n profile = self.get(admin_key=activation_key)\n except self.model.DoesNotExist:\n return False, False\n user = profile.user\n activated = False\n if not user.is_active:\n user.is_active = True\n user.save()\n activated = True\n return (activated, user)", "def test_valid_activation(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n self.assertIsInstance(user, UserModel())\n self.assertEqual(user.id, new_user.id)\n self.assertTrue(user.is_active)\n self.assertTrue(activated)\n\n profile = self.registration_profile.objects.get(user=new_user)\n self.assertTrue(profile.activated)", "def test_activation_deactivated(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n # Deactivate the new user.\n new_user.is_active = False\n new_user.save()\n\n # Try to activate again and ensure False is returned.\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n self.assertFalse(activated)", "def _ensure_activation(self):\n if self._activation_gate is not None:\n self._activation_gate.wait()\n else:\n curframe = inspect.currentframe()\n calframe = inspect.getouterframes(curframe, 2)\n guarded_method = calframe[1][3]\n\n errmsg = \"The Landscape must be activated before calling the '%s' method.\" % guarded_method\n raise AKitSemanticError(errmsg)\n\n return", "def test_user_activation(self):\n user = User.objects.get()\n response = self.client.get(reverse('accounts:user-activate',\n kwargs={'uidb64': urlsafe_base64_encode(force_bytes(user.pk)),\n 'token': account_activation_token.make_token(user)}))\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "async def ensure_active(self):\n if not self.active:\n await self.refresh()", "def activate(self):\n self.active = True", "def activate(self):\n self.active = True", "def activate(self) -> bool:\n self.active = True\n return self._activate()", "def activate(request, activation_key):\n profile = get_object_or_404(User, activation_key=activation_key)\n if profile.akey_expires < timezone.now():\n return render('user_account/activate.html', {'expired': True})\n\n profile.save(update_fields=['active', 'activation_key'])\n return render(\n 'user_account/activate.html',\n {'success': True, 'name': profile.name + \" \" + profile.surname}\n )", "def test_activation_invalid_key(self):\n user, activated = self.registration_profile.objects.activate_user(\n 'foo', Site.objects.get_current())\n self.assertIs(user, False)\n self.assertFalse(activated)", "def toggle_active(self, user):\n user.active = not user.active\n return True", "def confirm_login_allowed(self, user):\r\n if not user.is_active:\r\n raise forms.ValidationError(\r\n self.error_messages['inactive'],\r\n code='inactive',\r\n )", "def activate_user(self, activation_key, request=None):\n # Make sure the key we're trying conforms to the pattern of a\n # SHA1 hash; if it doesn't, no point trying to look it up in\n # the database.\n if SHA1_RE.search(activation_key):\n try:\n profile = self.get(activation_key=activation_key)\n except self.model.DoesNotExist:\n profile = None\n statsd.incr('user.activate-error.does-not-exist')\n reason = 'key not found'\n if profile:\n if not profile.activation_key_expired():\n user = profile.user\n user.is_active = True\n user.save()\n\n # We don't need the RegistrationProfile anymore, delete it.\n profile.delete()\n\n # If user registered as contributor, send them the\n # welcome email.\n if user.groups.filter(name=CONTRIBUTOR_GROUP):\n self._send_email(\n confirmation_profile=profile,\n url=None,\n subject=_('Welcome to SUMO!'),\n text_template='users/email/contributor.ltxt',\n html_template='users/email/contributor.html',\n send_to=user.email,\n contributor=user)\n\n return user\n else:\n statsd.incr('user.activate-error.expired')\n reason = 'key expired'\n else:\n statsd.incr('user.activate-error.invalid-key')\n reason = 'invalid key'\n\n log.warning(u'User activation failure ({r}): {k}'.format(\n r=reason, k=activation_key))\n\n return False", "def activate(request, activation_key, template_name='registration/activate.html'):\n activation_key = activation_key.lower() # Normalize before trying anything with it.\n account = RegistrationProfile.objects.activate_user(activation_key)\n account.is_active = True\n account.save()\n return render(request, template_name,\n { 'account': account,\n 'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS })", "def is_pending_activation(self):\n if (self.auth_token_is_used and self.is_active):\n return False\n else:\n return True", "def confirm_login_allowed(self, user):\n if not user.is_active:\n raise ValidationError(\n self.error_messages['inactive'],\n code='inactive',\n )", "def toggle_active(self, user):\n user.active = not user.active\n # noinspection PyUnresolvedReferences\n self.save(user)\n return True", "def confirm_login_allowed(self, user):\n if not user.is_active:\n raise forms.ValidationError(\n self.error_messages['inactive'],\n code='inactive',\n )", "def confirm_login_allowed(self, user):\n if not user.is_active:\n raise forms.ValidationError(\n self.error_messages['inactive'],\n code='inactive',\n )", "def confirm_login_allowed(self, user):\n if not user.is_active:\n raise forms.ValidationError(\n self.error_messages['inactive'],\n code='inactive',\n )", "def confirm_login_allowed(self, user):\n if not user.is_active:\n raise forms.ValidationError(\n self.error_messages['inactive'],\n code='inactive',\n )", "def confirm_login_allowed(self, user):\n if not user.is_active:\n raise forms.ValidationError(\n self.error_messages['inactive'],\n code='inactive',\n )", "def activate(self) -> None:\n self._bot.inject_flows_from(self)\n self.is_activated = True", "def user_is_activated(self, user_name):\n return not self._simultanious_log_ins and \\\n user_name in self._active_users_names", "def activate(userid, userhash):\n a_user = User.query.filter_by(id=userid).first_or_404()\n if a_user.check_hashword(userhash):\n a_user.hashword = None\n a_user.active = True\n a_user.save()\n login_user(a_user, remember=True)\n flash(\"Welcome! Your user account has been activated.\", 'success')\n return redirect(url_for('auth.user_profile'))\n elif a_user.active:\n flash(\"Your user account is active.\", 'success')\n else:\n flash(\"Activation not found, or has expired.\" \\\n + \"Please try again or ask an organizer.\", 'warning')\n logout_user()\n return redirect(url_for('public.home'))", "def activate_account(self, activation_key):\n try:\n registration_profile = self.get(activation_key=activation_key)\n except self.model.DoesNotExist:\n return None\n\n if not registration_profile.is_expired():\n user = registration_profile.user\n user.is_active = True\n user.save()\n registration_profile.delete()\n return user\n else:\n return None", "def check_for_activate(self):\n try:\n # Attempt to activate. If the user has completed pairing on the,\n # backend, this will succeed. Otherwise it throws and HTTPError()\n\n token = self.data.get(\"token\")\n login = self.api.activate(self.state, token) # HTTPError() thrown\n\n # When we get here, the pairing code has been entered on the\n # backend and pairing can now be saved.\n # The following is kinda ugly, but it is really critical that we\n # get this saved successfully or we need to let the user know that\n # they have to perform pairing all over again at the website.\n try:\n IdentityManager.save(login)\n except Exception as e:\n self.log.debug(\"First save attempt failed: \" + repr(e))\n time.sleep(2)\n try:\n IdentityManager.save(login)\n except Exception as e2:\n # Something must be seriously wrong\n self.log.debug(\"Second save attempt failed: \" + repr(e2))\n self.abort_and_restart()\n\n if mycroft.audio.is_speaking():\n # Assume speaking is the pairing code. Stop TTS of that.\n mycroft.audio.stop_speaking()\n\n self.enclosure.activate_mouth_events() # clears the display\n\n # Notify the system it is paired\n self.gui.show_page(\"pairing_done.qml\", override_idle=False)\n self.bus.emit(Message(\"mycroft.paired\", login))\n\n self.pairing_performed = True\n with self.pair_dialog_lock:\n if self.mycroft_ready:\n # Tell user they are now paired\n self.speak_dialog(self.paired_dialog)\n mycroft.audio.wait_while_speaking()\n else:\n self.speak_dialog(\"wait.for.startup\")\n mycroft.audio.wait_while_speaking()\n\n # Un-mute. Would have been muted during onboarding for a new\n # unit, and not dangerous to do if pairing was started\n # independently.\n self.bus.emit(Message(\"mycroft.mic.unmute\", None))\n\n # Send signal to update configuration\n self.bus.emit(Message(\"configuration.updated\"))\n\n # Allow this skill to auto-update again\n self.reload_skill = True\n except HTTPError:\n # speak pairing code every 60th second\n with self.counter_lock:\n if self.count == 0:\n self.speak_code()\n self.count = (self.count + 1) % 6\n\n if time.monotonic() > self.time_code_expires:\n # After 20 hours the token times out. Restart\n # the pairing process.\n with self.counter_lock:\n self.count = -1\n self.data = None\n self.handle_pairing()\n else:\n # trigger another check in 10 seconds\n self.__create_activator()\n except Exception as e:\n self.log.debug(\"Unexpected error: \" + repr(e))\n self.abort_and_restart()", "def confirm_email(self):\n self.active = True\n self.save()", "def verify(self):\n ACTIVATION_PERIOD = datetime.timedelta(days=14)\n if not self.org_verified:\n self.org_verified = True\n if not self.is_active:\n if not self.activation_code:\n self.activation_code = random_url_safe_code()\n self.activate_by = datetime.datetime.utcnow() + ACTIVATION_PERIOD\n import messaging # avoid circular import\n messaging.send_activation_emails(self)\n self.save()", "def activate(self):\n raise NotImplementedError(\"Shouldn't be called\")", "def notify_activate(self, **kwargs):\n return self.notify(\"notify_activate\", **kwargs)", "def _base_test_extauth_auto_activate_user_with_flag(self, log_user_string=\"[email protected]\"):\r\n inactive_user = UserFactory.create(email='[email protected]')\r\n inactive_user.is_active = False\r\n inactive_user.save()\r\n request = self.request_factory.get('/shib-login')\r\n request.session = import_module(settings.SESSION_ENGINE).SessionStore() # empty session\r\n request.META.update({\r\n 'Shib-Identity-Provider': 'https://idp.stanford.edu/',\r\n 'REMOTE_USER': '[email protected]',\r\n 'mail': '[email protected]'\r\n })\r\n\r\n request.user = AnonymousUser()\r\n with patch('external_auth.views.AUDIT_LOG') as mock_audit_log:\r\n response = shib_login(request)\r\n audit_log_calls = mock_audit_log.method_calls\r\n # reload user from db, since the view function works via db side-effects\r\n inactive_user = User.objects.get(id=inactive_user.id)\r\n self.assertIsNotNone(ExternalAuthMap.objects.get(user=inactive_user))\r\n self.assertTrue(inactive_user.is_active)\r\n self.assertIsInstance(response, HttpResponseRedirect)\r\n self.assertEqual(request.user, inactive_user)\r\n self.assertEqual(response['Location'], '/')\r\n # verify logging:\r\n self.assertEquals(len(audit_log_calls), 3)\r\n self._assert_shib_login_is_logged(audit_log_calls[0], log_user_string)\r\n method_name, args, _kwargs = audit_log_calls[2]\r\n self.assertEquals(method_name, 'info')\r\n self.assertEquals(len(args), 1)\r\n self.assertIn(u'Login success', args[0])\r\n self.assertIn(log_user_string, args[0])", "def activated(self, value: bool) -> None:\n\n if not isinstance(value, bool):\n raise TypeError(f\"<value> should be {bool}, {type(value)} given.\")\n\n self._activated = value", "def test_expired_activation(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n new_user.date_joined -= datetime.timedelta(\n days=settings.ACCOUNT_ACTIVATION_DAYS + 1)\n new_user.save()\n\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n self.assertIs(user, False)\n self.assertFalse(activated)\n\n new_user = UserModel().objects.get(username='alice')\n self.assertFalse(new_user.is_active)\n\n profile = self.registration_profile.objects.get(user=new_user)\n self.assertFalse(profile.activated)", "def is_user_change_required(self):\n return self.__running_user != self.__desired_user", "def login_on_activation(sender, user, request, **kwargs):\n user.backend = 'storybase_user.auth.backends.EmailModelBackend'\n login(request, user)", "def on_activate(self) -> None:", "def is_active():\n return True", "def test_active_account_activation_key_expired(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n profile.refresh_from_db()\n self.assertTrue(profile.activation_key_expired())", "def activate(request, uidb64, token):\r\n\ttry:\r\n\t\tuid = force_text(urlsafe_base64_decode(uidb64))\r\n\t\tuser = User.objects.get(pk=uid)\r\n\texcept (TypeError, ValueError, OverflowError, User.DoesNotExist):\r\n\t\tuser = None\r\n\r\n\tif user is not None and account_activation_token.check_token(user, token):\r\n\t\t# User activated and redirected to the homepage\r\n\t\tuser.is_active = True\r\n\t\tuser.profile.email_confirmed = True\r\n\t\tuser.save()\r\n\t\tlogin(request, user, backend='django.contrib.auth.backends.ModelBackend')\r\n\t\tgames = Game.objects.all()\r\n\t\treturn redirect('/', {'games': games, 'MEDIA_URL': settings.MEDIA_URL})\r\n\telse:\r\n\t\treturn render(request, 'account_activation_invalid.html')", "def test_create_user(self):\r\n self._auto_auth()\r\n self.assertEqual(User.objects.count(), 1)\r\n self.assertTrue(User.objects.all()[0].is_active)", "def is_active(self):\r\n return True", "def activateWebAppUser( self, username, activation_code ):\n try:\n con = self.getMetadataDatabaseConnection()\n user_data = con.cursor()\n\n con.cursor().callproc('verify_user_activation_code', [username, activation_code, user_data])\n row = user_data.fetchone()\n if row:\n con.cursor().callproc('activate_user_account', [username])\n return True\n else:\n return False\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def activate(request, uidb64, token):\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n\n if user is not None and account_activation_token.check_token(user, token):\n user.is_active = True\n user.profile.email_confirmed = True\n user.save()\n login(request, user)\n return redirect('home')\n else:\n return render(request, 'registration/activation_invalid.html')", "def test_active_account_activation_key_expired(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n profile.refresh_from_db()\n self.assertTrue(profile.activation_key_expired())", "def test_active_for_user(self):\r\n user = UserFactory.create()\r\n\r\n # This user has no active at the moment...\r\n assert_is_none(SoftwareSecurePhotoVerification.active_for_user(user))\r\n\r\n # Create an attempt and mark it ready...\r\n attempt = SoftwareSecurePhotoVerification(user=user)\r\n attempt.mark_ready()\r\n assert_equals(attempt, SoftwareSecurePhotoVerification.active_for_user(user))\r\n\r\n # A new user won't see this...\r\n user2 = UserFactory.create()\r\n user2.save()\r\n assert_is_none(SoftwareSecurePhotoVerification.active_for_user(user2))\r\n\r\n # If it's got a different status, it doesn't count\r\n for status in [\"submitted\", \"must_retry\", \"approved\", \"denied\"]:\r\n attempt.status = status\r\n attempt.save()\r\n assert_is_none(SoftwareSecurePhotoVerification.active_for_user(user))\r\n\r\n # But if we create yet another one and mark it ready, it passes again.\r\n attempt_2 = SoftwareSecurePhotoVerification(user=user)\r\n attempt_2.mark_ready()\r\n assert_equals(attempt_2, SoftwareSecurePhotoVerification.active_for_user(user))\r\n\r\n # And if we add yet another one with a later created time, we get that\r\n # one instead. We always want the most recent attempt marked ready()\r\n attempt_3 = SoftwareSecurePhotoVerification(\r\n user=user,\r\n created_at=attempt_2.created_at + timedelta(days=1)\r\n )\r\n attempt_3.save()\r\n\r\n # We haven't marked attempt_3 ready yet, so attempt_2 still wins\r\n assert_equals(attempt_2, SoftwareSecurePhotoVerification.active_for_user(user))\r\n\r\n # Now we mark attempt_3 ready and expect it to come back\r\n attempt_3.mark_ready()\r\n assert_equals(attempt_3, SoftwareSecurePhotoVerification.active_for_user(user))", "def activate(request, uidb64, token):\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except(TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n if user is not None and account_activation_token.check_token(user, token):\n user.is_active = True\n user.save()\n return render(request, 'accounts/active_done.html')\n else:\n return HttpResponse('Activation link is invalid!')", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def active(self, value):\n self._active = value\n # Check if this is already linked with an object in the database.\n # If it is, change the username in the user account too.\n try:\n self.userprofile.user.is_active = value\n except UserProfile.DoesNotExist:\n pass", "def auth_active(hass):\n hass.loop.run_until_complete(\n register_auth_provider(hass, {\"type\": \"homeassistant\"})\n )", "def activate(self):\n pass", "def test_resend_activation_email_activated_user(self):\n user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **self.user_info)\n\n profile = self.registration_profile.objects.get(user=user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n self.assertTrue(user.is_active)\n self.assertTrue(activated)\n\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n self.assertEqual(len(mail.outbox), 0)", "def test_activate_authenticated(client):\n employee = factories.EmployeeFactory(\n company=factories.CompanyFactory(),\n account_status=False\n )\n with client.session_transaction() as session:\n session[\"user_id\"] = employee.id\n g.user = employee\n response = client.post(\"/auth/activate\")\n assert b\"<h1>Successfully activated your account.</h1>\" in response.data\n assert employee.account_status\n assert response.status_code == HTTPStatus.OK", "def active(self, activate):\n self.is_active = activate" ]
[ "0.78858703", "0.7588761", "0.7308194", "0.72612184", "0.7234818", "0.72069454", "0.7042805", "0.7022283", "0.6872894", "0.67527574", "0.66841036", "0.6675689", "0.6663803", "0.6635442", "0.66318125", "0.6628338", "0.6611106", "0.6603589", "0.658556", "0.6558611", "0.65234345", "0.6506198", "0.64943177", "0.6485037", "0.64698726", "0.64684004", "0.64567655", "0.6452981", "0.64474165", "0.64348894", "0.6422692", "0.6419335", "0.6410228", "0.63530064", "0.63530064", "0.63284934", "0.6328066", "0.63207644", "0.6319195", "0.6287769", "0.627346", "0.6270386", "0.6269061", "0.62574756", "0.6250988", "0.62502766", "0.62502766", "0.62502766", "0.62502766", "0.62502766", "0.62401146", "0.62380093", "0.621954", "0.620348", "0.6138607", "0.61132115", "0.6105075", "0.6084739", "0.6076552", "0.6076088", "0.60749036", "0.6062812", "0.60618573", "0.60204893", "0.6018172", "0.6016909", "0.60032916", "0.59977686", "0.59935254", "0.5991775", "0.5986548", "0.59758705", "0.59726924", "0.59668046", "0.59654063", "0.5943008", "0.5943008", "0.5943008", "0.5943008", "0.5943008", "0.5943008", "0.5943008", "0.5943008", "0.5943008", "0.5943008", "0.5943008", "0.5943008", "0.5943008", "0.5943008", "0.5943008", "0.5943008", "0.5943008", "0.5943008", "0.5943008", "0.59421223", "0.59310937", "0.59235543", "0.5918035", "0.5910607", "0.590382" ]
0.62174755
53
Ensure we can list all users.
def test_list_users(self): self.client.force_authenticate(user=self.admin) response = self.client.get(reverse('user-list')) self.assertEqual(json.loads(response.content)['count'], 2) # Users are ordered alphabetically by email first_user = json.loads(response.content)['results'][0] second_user = json.loads(response.content)['results'][1] self.assertEqual(first_user['email'], self.admin.email) membership = { 'url': 'http://testserver/memberships/' + str(self.membership.id), 'id': self.membership.id, 'name': 'basic_membership', 'available': True, 'available_on_product_types': [], 'available_on_products': [], 'options': [], 'picture': None, 'price': '50.00', 'details': '1-Year student membership', 'duration': '365 00:00:00', 'available_on_retreat_types': [], 'academic_levels': ['http://testserver/academic_levels/' + str(self.academic_level.id)] } self.assertEqual( remove_translation_fields(second_user['membership']), membership ) # Check the system doesn't return attributes not expected attributes = [ 'id', 'url', 'email', 'first_name', 'last_name', 'is_active', 'phone', 'other_phone', 'is_superuser', 'is_staff', 'university', 'last_login', 'date_joined', 'academic_level', 'academic_field', 'gender', 'language', 'birthdate', 'groups', 'user_permissions', 'tickets', 'membership', 'membership_end', 'city', 'personnal_restrictions', 'academic_program_code', 'faculty', 'student_number', 'volunteer_for_workplace', 'hide_newsletter', 'is_in_newsletter', 'number_of_free_virtual_retreat', 'membership_end_notification', 'get_number_of_past_tomatoes', 'get_number_of_future_tomatoes', 'last_acceptation_terms_and_conditions', 'tomato_field_matrix', 'current_month_tomatoes', ] for key in first_user.keys(): self.assertTrue( key in attributes, 'Attribute "{0}" is not expected but is ' 'returned by the system.'.format(key) ) attributes.remove(key) # Ensure the system returns all expected attributes self.assertTrue( len(attributes) == 0, 'The system failed to return some ' 'attributes : {0}'.format(attributes) ) self.assertEqual(response.status_code, status.HTTP_200_OK)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_users():", "def list_users(self):\n raise NotImplementedError", "def test_api_can_get_all_users(self):\n response = self.client().get('/api/v1/user/')\n self.assertTrue(response.status_code, 200)", "def test_admin_user_list_all_users(self):\n response = self.client.get(CONSTS.USER_ADMIN_LIST)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, self.users.data)", "def test_list_user(self):\n pass", "def test_get_all_users(self):\n api.user.create(\n username='chuck',\n email='[email protected]',\n password='secret',\n )\n users = [user.getUserName() for user in api.user.get_users()]\n\n self.assertEqual(users, ['chuck', TEST_USER_NAME])", "def test_user_get_all(self):\n response = self.app.get('/api/v3/users', headers=self.user_header)\n self.assertEqual(response.status_code, 401)", "def test_fetch_all_user(self):\n\n payload = self.get_req('api/v1/users')\n self.assertEqual(payload.status_code, 200)\n self.assertEqual(payload.json['users'], [])", "def all_users():\n\n users = crud.get_users()\n\n return render_template('all_users.html', users=users)", "def user_list(ctx):\n data = ctx.obj.get_all_users()\n output_json_data(data)", "def test_get_users(self):\n pass", "def test_admin_get_all(self):\n response = self.app.get('/api/v3/users', headers=self.admin_header)\n self.assertEqual(response.status_code, 200)", "def list_all_users():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n users_list = get_users_list()\n if check_authentication(session_id, user_id) and is_admin_user(user_id):\n return render_template('admin_area.html', user=user_id, session_id=session_id, users_list=users_list)\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)", "def admin_can_view_all_user_accounts(self):\n resp = self.admin_create_user()\n reply = self.admin_create_user2()\n resp = self.admin_login()\n token = resp['token']\n\n resp = self.client.get(\n '/api/v1/users',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('love', str(reply['users'][1]['username']))\n self.assertIn('walker', str(reply['users'][2]['username']))\n self.assertEqual(resp.status_code, 200)", "def test_get_users(self):\n print('(' + self.test_get_users.__name__+')',\n self.test_get_users.__doc__)\n users = self.connection.get_users()\n # Check we get right size of users table\n self.assertEqual(len(users), INITIAL_USERS_COUNT)\n # check PATIENT and DOCTOR data with users object we got\n for user in users:\n if user['username'] == PATIENT_USERNAME:\n self.assertDictContainsSubset(user, PATIENT['public_profile'])\n elif user['username'] == DOCTOR_USERNAME:\n self.assertDictContainsSubset(user, DOCTOR['public_profile'])", "def fetch_all_users():\n users = find_users()\n return to_response(users, \"No users\")", "def do_get_all_users(self, *args):\n self.user_data = self.connection_obj.get_all()\n self.__class__.print_func(self, self.user_data)", "def _list_users(self):\n users = fileIO.load_json(\"users.json\")\n print(\"The list of users is as follows:\")\n for i in users:\n print(users[i][\"name\"])\n self._list_user_settings(users)", "def listusers():\n\n try:\n users = User.query.order_by(User.email).all()\n click.echo(\n tabulate(\n [\n [u.username, u.email, \"admin\" if u.is_admin else None]\n for u in users\n ]\n )\n )\n except OperationalError:\n click.echo(\"Tabela de usuários inexistente...\")", "def _CheckUsers(self, all_users):\n summary = self.fd.GetSummary()\n self.assertItemsEqual([x.username for x in summary.users], all_users)\n\n users = [x.username for x in self.fd.Get(self.fd.Schema.USER)]\n self.assertItemsEqual(users, all_users)\n self.assertItemsEqual(self.fd.Get(self.fd.Schema.USERNAMES), all_users)\n\n # Check kb users\n kbusers = [x.username for x in\n self.fd.Get(self.fd.Schema.KNOWLEDGE_BASE).users]\n self.assertItemsEqual(kbusers, all_users)", "def get_all_users():\n return jsonify(admin.get_all_users(current_app.scoped_session()))", "def test_admin_user_list_all_users_permission_denied(self):\n self.client.logout()\n self.client.login(\n username=self.invalid_user.username,\n password=self.invalid_user.password\n )\n response = self.client.get(CONSTS.USER_ADMIN_LIST)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def all_users(self):\n return range(self.n_users)", "def list_users():\n check_admin()\n results = User.query.order_by(-User.id)\n return render_template('user_list.html', users=results)", "def get_users(self):\r\n\t\tlogger.debug(\"Fetch users\")\r\n\t\t\r\n\t\treturn login.get_users()", "def test_initial_share_all_users(self) -> None:\n self.handler.search_all_users = True\n self.hs.config.userdirectory.user_directory_search_all_users = True\n\n u1 = self.register_user(\"user1\", \"pass\")\n self.register_user(\"user2\", \"pass\")\n u3 = self.register_user(\"user3\", \"pass\")\n\n shares_private = self.get_success(\n self.user_dir_helper.get_users_who_share_private_rooms()\n )\n public_users = self.get_success(\n self.user_dir_helper.get_users_in_public_rooms()\n )\n\n # No users share rooms\n self.assertEqual(public_users, set())\n self.assertEqual(shares_private, set())\n\n # Despite not sharing a room, search_all_users means we get a search\n # result.\n s = self.get_success(self.handler.search_users(u1, u3, 10))\n self.assertEqual(len(s[\"results\"]), 1)\n\n # We can find the other two users\n s = self.get_success(self.handler.search_users(u1, \"user\", 10))\n self.assertEqual(len(s[\"results\"]), 2)\n\n # Registering a user and then searching for them works.\n u4 = self.register_user(\"user4\", \"pass\")\n s = self.get_success(self.handler.search_users(u1, u4, 10))\n self.assertEqual(len(s[\"results\"]), 1)", "def test_user_list(self):\r\n self._add_demo_import()\r\n params = {\r\n 'api_key': self.api_key\r\n }\r\n res = self.testapp.get('/api/v1/a/users/list',\r\n params=params,\r\n status=200)\r\n\r\n # we should get back dict of count, users.\r\n data = json.loads(res.body)\r\n\r\n self.assertEqual(\r\n 1, data.get('count'), \"There are none by default. \" + res.body)\r\n self.assertEqual(\r\n 'admin',\r\n data.get('users')[0]['username'],\r\n \"The first user is from admin \" + res.body)\r\n self.assertEqual(\r\n '[email protected]',\r\n data.get('users')[0]['email'],\r\n \"The first user is from [email protected] \" + res.body)", "def test_get_users(self):\n users = app.get_users()\n self.assertEqual(len(users), 1)", "def list_users(self):\n return self.get_admin(\"users\")", "def __list_all_users(self):\n\n usernames_dict = get_data.get_usernames_dict()\n if len(usernames_dict) > 0:\n first_str = 'user'\n second_str = 'posts scraped'\n descriptor = '{:<40} {}'\n print('')\n print(descriptor.format(first_str, second_str))\n print(descriptor.format(len(first_str) * '-', len(second_str) * '-'))\n for number, username in usernames_dict.items():\n space_str = ' ' if len(str(number)) > 1 else ' '\n first = '[' + space_str + str(number) + '] ' + username\n second = str(get_data.get_user_post_count(username))\n print(descriptor.format(first, second))\n else:\n print('no users found in the database')", "def list_user():\n\tbegin = 0\n\tlength = 25\n\ttry:\n\t\tif request.json != None:\n\t\t\tbegin = int(request.json.get('begin', 0))\n\t\t\tlength = int(request.json.get('length', 25))\n\texcept:\n\t\tabort(403)\n\tif length > 100 :\n\t\tlength = 100\n\tuserList = User.list(begin, length)\n\tif userList == None:\n\t\tabort(400)\n\treturn jsonify({'users': map(lambda(e): e.output(), userList), 'begin': begin, 'length': len(userList)})", "def display_users(cls):\n return cls.user_list", "def get_users(self):\n users = []\n page = 1\n while not len(users) % 100:\n users += self._get('/users?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not users:\n break\n page += 1\n return users", "def test_users_listed(self):\n\n # Get the admin url and send a GET request\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n # Assertions\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def test_get_all_user(self):\n response = self.client().get(AuthTestCase.admin)\n # assert the response code\n self.assertEqual(response.status_code, 200)", "def load_users(everyone):\n if user_list.loaded:\n return\n for user in iteritems(everyone):\n user_list.load(user[1])", "def _load_users(self) -> List[Dict]:\n try:\n api_call = self.web_client.api_call('users.list')\n if api_call.get('ok'):\n return api_call.get('members')\n except Exception:\n LOGGER.exception('Cannot get users')\n raise", "def user_list():\n if session['user_admin'] == False:\n abort(403)\n\n # Retrieve all Users\n sqa_sess = sqa_session()\n users = sqa_sess.query(User).all()\n\n return render_template('admin/user_list.html', users=users)", "def get_all_users():\n return Users.query.all()", "def get_all_users(connection):\r\n with connection:\r\n return len(connection.execute(GET_ALL_USERS).fetchall())", "def test_users_get(self):\n pass", "def test_users_get(self):\n pass", "def test_list(self, client, users):\n url = reverse('users:list')\n response = client.get(url)\n assert response.status_code == 200\n for user in users:\n assert user.username in str(response.content)", "def list_users(self):\n _url = \"http://\" + self.host_ip + \":35357/v2.0/users\"\n _body = None\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.cloud_admin_info['token_project']}\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\" no response from Server\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\n \"get user list Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"users List : %s\")\n return output[\"users\"]", "def get_all_users():\n return User.query.all()", "def getResponsibleUsers():", "def test_display_all_users(self):\n self.assertEqual(User.display_users(), User.UserDetails)", "def get_users():\n return db.fetch_users()", "def test_get_all_users(self):\n\n email1 = \"[email protected]\"\n self.create_example_user(email1)\n\n email2 = \"[email protected]\"\n\n self.create_example_user(email2)\n\n users_get_endpoint_result = user.fetchall(self.database)\n\n verify_query = \"\"\"\n SELECT * FROM USERS;\"\"\"\n self.database.cursor.execute(verify_query)\n\n verify_rows = [r._asdict() for r in self.database.cursor.fetchall()]\n\n assert len(verify_rows) == len(users_get_endpoint_result)\n\n for (email, name, group_name, hashed_password, admin) in [\n (r[\"email\"], r[\"name\"], r[\"group_name\"], r[\"hashed_password\"], r[\"admin\"])\n for r in users_get_endpoint_result\n ]:\n\n self.verify_user_data(email, name, group_name, hashed_password, admin)", "def get(self):\n return get_all_users()", "def get(self):\n return get_all_users()", "def get(self):\n return get_all_users()", "def get(self):\n return get_all_users()", "async def list_users(self, ctx):\n \n path = \"Users\"\n headers = {\n 'accept': 'application/json'\n }\n response = send_request(method=\"get\", path=path, headers=headers)\n users = []\n for page in response:\n users.append(f\"**{page['Name']}**: ``{page['Id']}``\")\n log.debug(users)\n\n embed = embeds.make_embed(ctx=ctx, title=\"List Users\", image_url=\"https://emby.media/resources/logowhite_1881.png\")\n\n await LinePaginator.paginate([line for line in users], ctx, embed, restrict_to_user=ctx.author)", "def test_users_listed(self):\n # the url is defined in django admin documentation\n # it generate the url for the list of user page\n # it is good using that instead of the url in case it changes\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def get_users():\n return Response(f\"{User.get_all_users()}\", 200, mimetype='text/plain')", "def test_getting_all(self):\n\n self.create_common_users_and_groups()\n\n sync = SyncUserAndGroups(\n tsurl=TS_URL,\n username=TS_USER,\n password=TS_PASSWORD,\n disable_ssl=True,\n )\n auag = sync.get_all_users_and_groups()\n\n # There are four constant users, tsadmin, guest, su, system\n self.assertEqual(9, auag.number_users())\n # There are two constant groups, Administrator and System\n self.assertEqual(9, auag.number_groups())", "def test_users_listed(self):\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def get_all_users(self):\n query = \"SELECT * FROM users\"\n self.cursor.execute(query)\n result = self.cursor.fetchall()\n return result", "def user_list():\n\n users = User.query.all()\n return render_template(\"/user_list.html\", users=users)", "def test_users(self):\n users = (\"root\", \"matlab\")\n for user in users:\n with self.subTest(username=user):\n self.assertTrue(self.host.user(user).exists)", "async def read_all_users(db_handler: DBHandler = Depends(database_dependency)):\n all_user_records = await db_handler.select_users()\n all_user_records = [init_BaseUser(record) for record in all_user_records]\n\n return all_user_records", "def get_all_users(self):\n \n sql = \"select * from users\"\n return self._query_all(sql)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def get_all_users_for_admin_purposes(connection):\r\n with connection:\r\n return connection.execute(GET_ALL_USERS).fetchall()[1]", "def get_all_users():\n token = request.headers.get('token')\n\n # Token Validation\n token_valid, response = is_token_valid(token)\n if not token_valid:\n return response\n token_username = response\n\n # Privilege handling\n if token_username != 'admin':\n return jsonify({'message': \"You aren't allowed to access this\"}), 404\n\n return jsonify(list(Users.values())), 200", "def user_list():\n\n users = User.query.all()\n \n return render_template(\"user_list.html\", users=users)", "def test_list(self):\n self.userbase('create', 'alice', 'localhost', SECRET)\n self.userbase('create', 'bob', 'localhost', SECRET)\n output = self.userbase('list')\n self.assertEqual(output, ['alice@localhost', 'bob@localhost'])", "def user_list():\n\n users = User.query.all()\n\n return render_template(\"user_list.html\", users=users)", "def test_listuser():\n url = baseUrl + userurl + listurl\n logging.info(\"List users\")\n r = requests.get(url, headers=header)\n assert r.status_code == 200\n resp = r.json()\n global user_ids\n user_ids = []\n if resp is None:\n pass\n else:\n user_num = len(resp)\n for k in range(0, user_num):\n assert resp[k]['subscriptionIds'][0] == subscriptionid\n if resp[k][\"isActive\"] is True:\n user_ids.append(resp[k][\"id\"])\n print (user_ids)\n assert user_id in user_ids", "def get_users(self):\n return self.execute(TABELLE['users']['select']['all'])", "async def test_list_user(hass: HomeAssistant, provider, capsys) -> None:\n data = provider.data\n data.add_auth(\"test-user\", \"test-pass\")\n data.add_auth(\"second-user\", \"second-pass\")\n\n await script_auth.list_users(hass, provider, None)\n\n captured = capsys.readouterr()\n\n assert captured.out == \"\\n\".join(\n [\"test-user\", \"second-user\", \"\", \"Total users: 2\", \"\"]\n )", "def get_all_users():\n db = api.db.get_conn()\n return list(db.users.find({}, {\"_id\": 0, \"password_hash\": 0}))", "def ListUsers(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_all_users(db):\n return list(db['user'].find())", "def test_user_list(self, mapp, user_list):\n user_list = set(user_list)\n res = set(mapp.getuserlist())\n assert len(user_list) == len(res) and user_list.issubset(res)", "def test_user_list(self):\n response = self.client.get('/tests/dashboard/')\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'testuser', html=True)\n self.assertContains(response, '[email protected]', html=True)", "def test_serve_users(self):\n pass", "def getInterestedUsers():", "def get_all_users():\n return session.query(User).all()", "def list_users():\n\n db_users = User.query.all()\n\n return render_template(\"list_users.html\", headline=\"Blogly Users\", users=db_users)", "def get_users():\n users = functions.users()\n return users", "def get_all_users(self) -> tuple:\n users_list: dict_list_type = [user.to_dict() for user in UserModel.query().fetch()]\n message: str = 'successfully retrieved active users'\n return jsonify({'status': True, 'payload': users_list, 'message': message}), 200", "def all_users(self):\n\n cur = self.db.cursor()\n cur.execute(\n \"\"\"SELECT user_id, username, phone, email, role, date_created \n FROM users\"\"\")\n \n user_from_db = cur.fetchall()\n if cur.rowcount >= 1: \n resp = self.serialize_user(user_from_db) \n return resp\n return None", "def get_users(self):\n return self.get_all_dbusers()", "def fetch_users(self):\n users = super(type(self), self).fetch_users()\n return list(filter(self._check_active, users))", "def test_list_users_without_permissions(self):\n self.client.force_authenticate(user=self.user)\n\n response = self.client.get(reverse('user-list'))\n\n content = {\n 'detail': 'You do not have permission to perform this action.'\n }\n self.assertEqual(json.loads(response.content), content)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_get_all_users(self):\n created_30_days_ago = datetime.datetime.utcnow() + datetime.timedelta(-30)\n add_user('neilb', '[email protected]', 'password123', created_30_days_ago)\n add_user('juneau', '[email protected]')\n with self.client:\n response = self.client.get('/users')\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(data['data']['users']),2)\n self.assertTrue('created_at' in data['data']['users'][0])\n self.assertTrue('created_at' in data['data']['users'][1])\n self.assertIn('juneau', data['data']['users'][0]['username'])\n self.assertIn('neilb', data['data']['users'][1]['username'])\n self.assertIn('success', data['status'])", "def list_users():\n users = User.query.order_by(User.last_name, User.first_name).all()\n return render_template('index.html', users=users)", "def _users_list(self):\n result = self.slack.api_call(\"users.list\", presence=0)\n\n if not result.get(\"ok\"):\n logging.error(result['error'])\n return None\n\n return result['members']", "def list_users(BrokerId=None, MaxResults=None, NextToken=None):\n pass", "def list_users(self, kwargs):\n verbose = kwargs.get(\"verbose\", False)\n filter_ = kwargs.get(\"filter\", \"all\")\n\n if verbose:\n attributes = self.engine.all_attributes()\n else:\n attributes = [\"sAMAccountName\", \"objectClass\"]\n\n if filter_ == \"all\":\n results = self.engine.query(self.engine.USER_ALL_FILTER(), attributes)\n elif filter_ == \"spn\":\n results = self.engine.query(self.engine.USER_SPN_FILTER(), attributes)\n elif filter_ == \"enabled\":\n results = self.engine.query(self.engine.USER_ACCOUNT_CONTROL_FILTER_NEG(USER_ACCOUNT_CONTROL[\"ACCOUNTDISABLE\"]), attributes)\n elif filter_ == \"disabled\":\n results = self.engine.query(self.engine.USER_ACCOUNT_CONTROL_FILTER(USER_ACCOUNT_CONTROL[\"ACCOUNTDISABLE\"]), attributes)\n elif filter_ == \"locked\":\n results = self.engine.query(self.engine.USER_LOCKED_FILTER(), attributes)\n elif filter_ == \"nopasswordexpire\":\n results = self.engine.query(self.engine.USER_ACCOUNT_CONTROL_FILTER(USER_ACCOUNT_CONTROL[\"DONT_EXPIRE_PASSWORD\"]), attributes)\n elif filter_ == \"passwordexpired\":\n results = self.engine.query(self.engine.USER_ACCOUNT_CONTROL_FILTER(USER_ACCOUNT_CONTROL[\"PASSWORD_EXPIRED\"]), attributes)\n elif filter_ == \"nokrbpreauth\":\n results = self.engine.query(self.engine.USER_ACCOUNT_CONTROL_FILTER(USER_ACCOUNT_CONTROL[\"DONT_REQ_PREAUTH\"]), attributes)\n elif filter_ == \"reversible\":\n results = self.engine.query(self.engine.USER_ACCOUNT_CONTROL_FILTER(USER_ACCOUNT_CONTROL[\"ENCRYPTED_TEXT_PWD_ALLOWED\"]), attributes)\n else:\n return None\n\n self.display(results, verbose)", "def locate_all_users(self, fields=\"all\"):\n if fields == \"all\":\n return_fields = all_fields\n else:\n return_fields = fields\n return self.ldap_connection.search_s(\"ou=Users,dc=redhat,dc=com\",\n ldap.SCOPE_SUBTREE, \"uid=*\", return_fields)", "def all_users(cls):\n return UsersModel.query.all()", "def user_list(server_object, client, address, command_args):\n\n\tmsg = \"\"\n\n\t#: Create a formatted string of all the users.\n\tfor usr in server_object.usrs.values():\n\t\tmsg += usr + '\\n'\n\n\tclient.send(msg.encode())", "def list_users():\n\ttry:\n\t\tusers_call = sc.api_call(\"users.list\")\n\t\tusers = []\n\t\tif users_call.get('ok'):\n\t\t\treturn users_call['members']\n\texcept:\n\t\tprint(\"users error\")\n\treturn None" ]
[ "0.78882915", "0.77124584", "0.76391757", "0.74921876", "0.74234813", "0.73127735", "0.72611696", "0.7228305", "0.721806", "0.71892637", "0.71163815", "0.70789593", "0.7072042", "0.7043019", "0.7032264", "0.70049846", "0.69722825", "0.6963775", "0.6953116", "0.6939012", "0.69312894", "0.6909668", "0.69072294", "0.69066954", "0.68652576", "0.68581694", "0.6855258", "0.6798622", "0.6784807", "0.6780094", "0.67742205", "0.6753531", "0.675106", "0.67373556", "0.67334545", "0.67310715", "0.6727806", "0.67186904", "0.67163527", "0.67094743", "0.6707128", "0.6707128", "0.66999173", "0.66926783", "0.6684429", "0.66824156", "0.6682361", "0.668214", "0.6675504", "0.6668153", "0.6668153", "0.6668153", "0.6668153", "0.6664838", "0.6659753", "0.66525525", "0.6647673", "0.6640664", "0.66358423", "0.6615185", "0.66122866", "0.6612057", "0.66110545", "0.6598012", "0.6598012", "0.6598012", "0.6598012", "0.6598012", "0.6598012", "0.6596543", "0.6591925", "0.65869814", "0.6585382", "0.65838724", "0.65819913", "0.6554039", "0.65464836", "0.65452677", "0.6524871", "0.6514638", "0.6508222", "0.65023416", "0.6500153", "0.6495827", "0.6493612", "0.64877474", "0.6482552", "0.64670104", "0.6463506", "0.6461568", "0.6461257", "0.6450611", "0.6445542", "0.644258", "0.643416", "0.6430794", "0.6430505", "0.64288735", "0.6427598", "0.6425202", "0.64088875" ]
0.0
-1
Ensure we can list all users.
def test_list_users_with_search(self): self.client.force_authenticate(user=self.admin) response = self.client.get(reverse('user-list') + '?search=chuck') self.assertEqual(json.loads(response.content)['count'], 1) # Users are ordered alphabetically by email first_user = json.loads(response.content)['results'][0] self.assertEqual(first_user['email'], self.admin.email) # Check the system doesn't return attributes not expected attributes = [ 'id', 'url', 'email', 'first_name', 'last_name', 'is_active', 'phone', 'other_phone', 'is_superuser', 'is_staff', 'university', 'last_login', 'date_joined', 'academic_level', 'academic_field', 'gender', 'language', 'birthdate', 'groups', 'user_permissions', 'tickets', 'membership', 'membership_end', 'city', 'personnal_restrictions', 'academic_program_code', 'faculty', 'student_number', 'volunteer_for_workplace', 'hide_newsletter', 'is_in_newsletter', 'number_of_free_virtual_retreat', 'membership_end_notification', 'get_number_of_past_tomatoes', 'get_number_of_future_tomatoes', 'last_acceptation_terms_and_conditions', 'tomato_field_matrix', 'current_month_tomatoes', ] for key in first_user.keys(): self.assertTrue( key in attributes, 'Attribute "{0}" is not expected but is ' 'returned by the system.'.format(key) ) attributes.remove(key) # Ensure the system returns all expected attributes self.assertTrue( len(attributes) == 0, 'The system failed to return some ' 'attributes : {0}'.format(attributes) ) self.assertEqual(response.status_code, status.HTTP_200_OK)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_users():", "def list_users(self):\n raise NotImplementedError", "def test_api_can_get_all_users(self):\n response = self.client().get('/api/v1/user/')\n self.assertTrue(response.status_code, 200)", "def test_admin_user_list_all_users(self):\n response = self.client.get(CONSTS.USER_ADMIN_LIST)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, self.users.data)", "def test_list_user(self):\n pass", "def test_get_all_users(self):\n api.user.create(\n username='chuck',\n email='[email protected]',\n password='secret',\n )\n users = [user.getUserName() for user in api.user.get_users()]\n\n self.assertEqual(users, ['chuck', TEST_USER_NAME])", "def test_user_get_all(self):\n response = self.app.get('/api/v3/users', headers=self.user_header)\n self.assertEqual(response.status_code, 401)", "def test_fetch_all_user(self):\n\n payload = self.get_req('api/v1/users')\n self.assertEqual(payload.status_code, 200)\n self.assertEqual(payload.json['users'], [])", "def all_users():\n\n users = crud.get_users()\n\n return render_template('all_users.html', users=users)", "def user_list(ctx):\n data = ctx.obj.get_all_users()\n output_json_data(data)", "def test_get_users(self):\n pass", "def test_admin_get_all(self):\n response = self.app.get('/api/v3/users', headers=self.admin_header)\n self.assertEqual(response.status_code, 200)", "def list_all_users():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n users_list = get_users_list()\n if check_authentication(session_id, user_id) and is_admin_user(user_id):\n return render_template('admin_area.html', user=user_id, session_id=session_id, users_list=users_list)\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)", "def admin_can_view_all_user_accounts(self):\n resp = self.admin_create_user()\n reply = self.admin_create_user2()\n resp = self.admin_login()\n token = resp['token']\n\n resp = self.client.get(\n '/api/v1/users',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('love', str(reply['users'][1]['username']))\n self.assertIn('walker', str(reply['users'][2]['username']))\n self.assertEqual(resp.status_code, 200)", "def test_get_users(self):\n print('(' + self.test_get_users.__name__+')',\n self.test_get_users.__doc__)\n users = self.connection.get_users()\n # Check we get right size of users table\n self.assertEqual(len(users), INITIAL_USERS_COUNT)\n # check PATIENT and DOCTOR data with users object we got\n for user in users:\n if user['username'] == PATIENT_USERNAME:\n self.assertDictContainsSubset(user, PATIENT['public_profile'])\n elif user['username'] == DOCTOR_USERNAME:\n self.assertDictContainsSubset(user, DOCTOR['public_profile'])", "def fetch_all_users():\n users = find_users()\n return to_response(users, \"No users\")", "def do_get_all_users(self, *args):\n self.user_data = self.connection_obj.get_all()\n self.__class__.print_func(self, self.user_data)", "def _list_users(self):\n users = fileIO.load_json(\"users.json\")\n print(\"The list of users is as follows:\")\n for i in users:\n print(users[i][\"name\"])\n self._list_user_settings(users)", "def listusers():\n\n try:\n users = User.query.order_by(User.email).all()\n click.echo(\n tabulate(\n [\n [u.username, u.email, \"admin\" if u.is_admin else None]\n for u in users\n ]\n )\n )\n except OperationalError:\n click.echo(\"Tabela de usuários inexistente...\")", "def _CheckUsers(self, all_users):\n summary = self.fd.GetSummary()\n self.assertItemsEqual([x.username for x in summary.users], all_users)\n\n users = [x.username for x in self.fd.Get(self.fd.Schema.USER)]\n self.assertItemsEqual(users, all_users)\n self.assertItemsEqual(self.fd.Get(self.fd.Schema.USERNAMES), all_users)\n\n # Check kb users\n kbusers = [x.username for x in\n self.fd.Get(self.fd.Schema.KNOWLEDGE_BASE).users]\n self.assertItemsEqual(kbusers, all_users)", "def get_all_users():\n return jsonify(admin.get_all_users(current_app.scoped_session()))", "def test_admin_user_list_all_users_permission_denied(self):\n self.client.logout()\n self.client.login(\n username=self.invalid_user.username,\n password=self.invalid_user.password\n )\n response = self.client.get(CONSTS.USER_ADMIN_LIST)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def all_users(self):\n return range(self.n_users)", "def list_users():\n check_admin()\n results = User.query.order_by(-User.id)\n return render_template('user_list.html', users=results)", "def get_users(self):\r\n\t\tlogger.debug(\"Fetch users\")\r\n\t\t\r\n\t\treturn login.get_users()", "def test_initial_share_all_users(self) -> None:\n self.handler.search_all_users = True\n self.hs.config.userdirectory.user_directory_search_all_users = True\n\n u1 = self.register_user(\"user1\", \"pass\")\n self.register_user(\"user2\", \"pass\")\n u3 = self.register_user(\"user3\", \"pass\")\n\n shares_private = self.get_success(\n self.user_dir_helper.get_users_who_share_private_rooms()\n )\n public_users = self.get_success(\n self.user_dir_helper.get_users_in_public_rooms()\n )\n\n # No users share rooms\n self.assertEqual(public_users, set())\n self.assertEqual(shares_private, set())\n\n # Despite not sharing a room, search_all_users means we get a search\n # result.\n s = self.get_success(self.handler.search_users(u1, u3, 10))\n self.assertEqual(len(s[\"results\"]), 1)\n\n # We can find the other two users\n s = self.get_success(self.handler.search_users(u1, \"user\", 10))\n self.assertEqual(len(s[\"results\"]), 2)\n\n # Registering a user and then searching for them works.\n u4 = self.register_user(\"user4\", \"pass\")\n s = self.get_success(self.handler.search_users(u1, u4, 10))\n self.assertEqual(len(s[\"results\"]), 1)", "def test_user_list(self):\r\n self._add_demo_import()\r\n params = {\r\n 'api_key': self.api_key\r\n }\r\n res = self.testapp.get('/api/v1/a/users/list',\r\n params=params,\r\n status=200)\r\n\r\n # we should get back dict of count, users.\r\n data = json.loads(res.body)\r\n\r\n self.assertEqual(\r\n 1, data.get('count'), \"There are none by default. \" + res.body)\r\n self.assertEqual(\r\n 'admin',\r\n data.get('users')[0]['username'],\r\n \"The first user is from admin \" + res.body)\r\n self.assertEqual(\r\n '[email protected]',\r\n data.get('users')[0]['email'],\r\n \"The first user is from [email protected] \" + res.body)", "def test_get_users(self):\n users = app.get_users()\n self.assertEqual(len(users), 1)", "def list_users(self):\n return self.get_admin(\"users\")", "def __list_all_users(self):\n\n usernames_dict = get_data.get_usernames_dict()\n if len(usernames_dict) > 0:\n first_str = 'user'\n second_str = 'posts scraped'\n descriptor = '{:<40} {}'\n print('')\n print(descriptor.format(first_str, second_str))\n print(descriptor.format(len(first_str) * '-', len(second_str) * '-'))\n for number, username in usernames_dict.items():\n space_str = ' ' if len(str(number)) > 1 else ' '\n first = '[' + space_str + str(number) + '] ' + username\n second = str(get_data.get_user_post_count(username))\n print(descriptor.format(first, second))\n else:\n print('no users found in the database')", "def list_user():\n\tbegin = 0\n\tlength = 25\n\ttry:\n\t\tif request.json != None:\n\t\t\tbegin = int(request.json.get('begin', 0))\n\t\t\tlength = int(request.json.get('length', 25))\n\texcept:\n\t\tabort(403)\n\tif length > 100 :\n\t\tlength = 100\n\tuserList = User.list(begin, length)\n\tif userList == None:\n\t\tabort(400)\n\treturn jsonify({'users': map(lambda(e): e.output(), userList), 'begin': begin, 'length': len(userList)})", "def display_users(cls):\n return cls.user_list", "def get_users(self):\n users = []\n page = 1\n while not len(users) % 100:\n users += self._get('/users?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not users:\n break\n page += 1\n return users", "def test_users_listed(self):\n\n # Get the admin url and send a GET request\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n # Assertions\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def test_get_all_user(self):\n response = self.client().get(AuthTestCase.admin)\n # assert the response code\n self.assertEqual(response.status_code, 200)", "def load_users(everyone):\n if user_list.loaded:\n return\n for user in iteritems(everyone):\n user_list.load(user[1])", "def _load_users(self) -> List[Dict]:\n try:\n api_call = self.web_client.api_call('users.list')\n if api_call.get('ok'):\n return api_call.get('members')\n except Exception:\n LOGGER.exception('Cannot get users')\n raise", "def user_list():\n if session['user_admin'] == False:\n abort(403)\n\n # Retrieve all Users\n sqa_sess = sqa_session()\n users = sqa_sess.query(User).all()\n\n return render_template('admin/user_list.html', users=users)", "def get_all_users():\n return Users.query.all()", "def get_all_users(connection):\r\n with connection:\r\n return len(connection.execute(GET_ALL_USERS).fetchall())", "def test_users_get(self):\n pass", "def test_users_get(self):\n pass", "def test_list(self, client, users):\n url = reverse('users:list')\n response = client.get(url)\n assert response.status_code == 200\n for user in users:\n assert user.username in str(response.content)", "def list_users(self):\n _url = \"http://\" + self.host_ip + \":35357/v2.0/users\"\n _body = None\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.cloud_admin_info['token_project']}\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\" no response from Server\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\n \"get user list Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"users List : %s\")\n return output[\"users\"]", "def get_all_users():\n return User.query.all()", "def getResponsibleUsers():", "def test_display_all_users(self):\n self.assertEqual(User.display_users(), User.UserDetails)", "def get_users():\n return db.fetch_users()", "def test_get_all_users(self):\n\n email1 = \"[email protected]\"\n self.create_example_user(email1)\n\n email2 = \"[email protected]\"\n\n self.create_example_user(email2)\n\n users_get_endpoint_result = user.fetchall(self.database)\n\n verify_query = \"\"\"\n SELECT * FROM USERS;\"\"\"\n self.database.cursor.execute(verify_query)\n\n verify_rows = [r._asdict() for r in self.database.cursor.fetchall()]\n\n assert len(verify_rows) == len(users_get_endpoint_result)\n\n for (email, name, group_name, hashed_password, admin) in [\n (r[\"email\"], r[\"name\"], r[\"group_name\"], r[\"hashed_password\"], r[\"admin\"])\n for r in users_get_endpoint_result\n ]:\n\n self.verify_user_data(email, name, group_name, hashed_password, admin)", "def get(self):\n return get_all_users()", "def get(self):\n return get_all_users()", "def get(self):\n return get_all_users()", "def get(self):\n return get_all_users()", "async def list_users(self, ctx):\n \n path = \"Users\"\n headers = {\n 'accept': 'application/json'\n }\n response = send_request(method=\"get\", path=path, headers=headers)\n users = []\n for page in response:\n users.append(f\"**{page['Name']}**: ``{page['Id']}``\")\n log.debug(users)\n\n embed = embeds.make_embed(ctx=ctx, title=\"List Users\", image_url=\"https://emby.media/resources/logowhite_1881.png\")\n\n await LinePaginator.paginate([line for line in users], ctx, embed, restrict_to_user=ctx.author)", "def test_users_listed(self):\n # the url is defined in django admin documentation\n # it generate the url for the list of user page\n # it is good using that instead of the url in case it changes\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def get_users():\n return Response(f\"{User.get_all_users()}\", 200, mimetype='text/plain')", "def test_getting_all(self):\n\n self.create_common_users_and_groups()\n\n sync = SyncUserAndGroups(\n tsurl=TS_URL,\n username=TS_USER,\n password=TS_PASSWORD,\n disable_ssl=True,\n )\n auag = sync.get_all_users_and_groups()\n\n # There are four constant users, tsadmin, guest, su, system\n self.assertEqual(9, auag.number_users())\n # There are two constant groups, Administrator and System\n self.assertEqual(9, auag.number_groups())", "def test_users_listed(self):\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def get_all_users(self):\n query = \"SELECT * FROM users\"\n self.cursor.execute(query)\n result = self.cursor.fetchall()\n return result", "def user_list():\n\n users = User.query.all()\n return render_template(\"/user_list.html\", users=users)", "def test_users(self):\n users = (\"root\", \"matlab\")\n for user in users:\n with self.subTest(username=user):\n self.assertTrue(self.host.user(user).exists)", "async def read_all_users(db_handler: DBHandler = Depends(database_dependency)):\n all_user_records = await db_handler.select_users()\n all_user_records = [init_BaseUser(record) for record in all_user_records]\n\n return all_user_records", "def get_all_users(self):\n \n sql = \"select * from users\"\n return self._query_all(sql)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def get_all_users_for_admin_purposes(connection):\r\n with connection:\r\n return connection.execute(GET_ALL_USERS).fetchall()[1]", "def get_all_users():\n token = request.headers.get('token')\n\n # Token Validation\n token_valid, response = is_token_valid(token)\n if not token_valid:\n return response\n token_username = response\n\n # Privilege handling\n if token_username != 'admin':\n return jsonify({'message': \"You aren't allowed to access this\"}), 404\n\n return jsonify(list(Users.values())), 200", "def user_list():\n\n users = User.query.all()\n \n return render_template(\"user_list.html\", users=users)", "def test_list(self):\n self.userbase('create', 'alice', 'localhost', SECRET)\n self.userbase('create', 'bob', 'localhost', SECRET)\n output = self.userbase('list')\n self.assertEqual(output, ['alice@localhost', 'bob@localhost'])", "def user_list():\n\n users = User.query.all()\n\n return render_template(\"user_list.html\", users=users)", "def test_listuser():\n url = baseUrl + userurl + listurl\n logging.info(\"List users\")\n r = requests.get(url, headers=header)\n assert r.status_code == 200\n resp = r.json()\n global user_ids\n user_ids = []\n if resp is None:\n pass\n else:\n user_num = len(resp)\n for k in range(0, user_num):\n assert resp[k]['subscriptionIds'][0] == subscriptionid\n if resp[k][\"isActive\"] is True:\n user_ids.append(resp[k][\"id\"])\n print (user_ids)\n assert user_id in user_ids", "def get_users(self):\n return self.execute(TABELLE['users']['select']['all'])", "async def test_list_user(hass: HomeAssistant, provider, capsys) -> None:\n data = provider.data\n data.add_auth(\"test-user\", \"test-pass\")\n data.add_auth(\"second-user\", \"second-pass\")\n\n await script_auth.list_users(hass, provider, None)\n\n captured = capsys.readouterr()\n\n assert captured.out == \"\\n\".join(\n [\"test-user\", \"second-user\", \"\", \"Total users: 2\", \"\"]\n )", "def get_all_users():\n db = api.db.get_conn()\n return list(db.users.find({}, {\"_id\": 0, \"password_hash\": 0}))", "def ListUsers(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_all_users(db):\n return list(db['user'].find())", "def test_user_list(self, mapp, user_list):\n user_list = set(user_list)\n res = set(mapp.getuserlist())\n assert len(user_list) == len(res) and user_list.issubset(res)", "def test_user_list(self):\n response = self.client.get('/tests/dashboard/')\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'testuser', html=True)\n self.assertContains(response, '[email protected]', html=True)", "def test_serve_users(self):\n pass", "def getInterestedUsers():", "def get_all_users():\n return session.query(User).all()", "def list_users():\n\n db_users = User.query.all()\n\n return render_template(\"list_users.html\", headline=\"Blogly Users\", users=db_users)", "def get_users():\n users = functions.users()\n return users", "def get_all_users(self) -> tuple:\n users_list: dict_list_type = [user.to_dict() for user in UserModel.query().fetch()]\n message: str = 'successfully retrieved active users'\n return jsonify({'status': True, 'payload': users_list, 'message': message}), 200", "def all_users(self):\n\n cur = self.db.cursor()\n cur.execute(\n \"\"\"SELECT user_id, username, phone, email, role, date_created \n FROM users\"\"\")\n \n user_from_db = cur.fetchall()\n if cur.rowcount >= 1: \n resp = self.serialize_user(user_from_db) \n return resp\n return None", "def get_users(self):\n return self.get_all_dbusers()", "def fetch_users(self):\n users = super(type(self), self).fetch_users()\n return list(filter(self._check_active, users))", "def test_list_users_without_permissions(self):\n self.client.force_authenticate(user=self.user)\n\n response = self.client.get(reverse('user-list'))\n\n content = {\n 'detail': 'You do not have permission to perform this action.'\n }\n self.assertEqual(json.loads(response.content), content)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_get_all_users(self):\n created_30_days_ago = datetime.datetime.utcnow() + datetime.timedelta(-30)\n add_user('neilb', '[email protected]', 'password123', created_30_days_ago)\n add_user('juneau', '[email protected]')\n with self.client:\n response = self.client.get('/users')\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(data['data']['users']),2)\n self.assertTrue('created_at' in data['data']['users'][0])\n self.assertTrue('created_at' in data['data']['users'][1])\n self.assertIn('juneau', data['data']['users'][0]['username'])\n self.assertIn('neilb', data['data']['users'][1]['username'])\n self.assertIn('success', data['status'])", "def list_users():\n users = User.query.order_by(User.last_name, User.first_name).all()\n return render_template('index.html', users=users)", "def _users_list(self):\n result = self.slack.api_call(\"users.list\", presence=0)\n\n if not result.get(\"ok\"):\n logging.error(result['error'])\n return None\n\n return result['members']", "def list_users(BrokerId=None, MaxResults=None, NextToken=None):\n pass", "def list_users(self, kwargs):\n verbose = kwargs.get(\"verbose\", False)\n filter_ = kwargs.get(\"filter\", \"all\")\n\n if verbose:\n attributes = self.engine.all_attributes()\n else:\n attributes = [\"sAMAccountName\", \"objectClass\"]\n\n if filter_ == \"all\":\n results = self.engine.query(self.engine.USER_ALL_FILTER(), attributes)\n elif filter_ == \"spn\":\n results = self.engine.query(self.engine.USER_SPN_FILTER(), attributes)\n elif filter_ == \"enabled\":\n results = self.engine.query(self.engine.USER_ACCOUNT_CONTROL_FILTER_NEG(USER_ACCOUNT_CONTROL[\"ACCOUNTDISABLE\"]), attributes)\n elif filter_ == \"disabled\":\n results = self.engine.query(self.engine.USER_ACCOUNT_CONTROL_FILTER(USER_ACCOUNT_CONTROL[\"ACCOUNTDISABLE\"]), attributes)\n elif filter_ == \"locked\":\n results = self.engine.query(self.engine.USER_LOCKED_FILTER(), attributes)\n elif filter_ == \"nopasswordexpire\":\n results = self.engine.query(self.engine.USER_ACCOUNT_CONTROL_FILTER(USER_ACCOUNT_CONTROL[\"DONT_EXPIRE_PASSWORD\"]), attributes)\n elif filter_ == \"passwordexpired\":\n results = self.engine.query(self.engine.USER_ACCOUNT_CONTROL_FILTER(USER_ACCOUNT_CONTROL[\"PASSWORD_EXPIRED\"]), attributes)\n elif filter_ == \"nokrbpreauth\":\n results = self.engine.query(self.engine.USER_ACCOUNT_CONTROL_FILTER(USER_ACCOUNT_CONTROL[\"DONT_REQ_PREAUTH\"]), attributes)\n elif filter_ == \"reversible\":\n results = self.engine.query(self.engine.USER_ACCOUNT_CONTROL_FILTER(USER_ACCOUNT_CONTROL[\"ENCRYPTED_TEXT_PWD_ALLOWED\"]), attributes)\n else:\n return None\n\n self.display(results, verbose)", "def locate_all_users(self, fields=\"all\"):\n if fields == \"all\":\n return_fields = all_fields\n else:\n return_fields = fields\n return self.ldap_connection.search_s(\"ou=Users,dc=redhat,dc=com\",\n ldap.SCOPE_SUBTREE, \"uid=*\", return_fields)", "def all_users(cls):\n return UsersModel.query.all()", "def user_list(server_object, client, address, command_args):\n\n\tmsg = \"\"\n\n\t#: Create a formatted string of all the users.\n\tfor usr in server_object.usrs.values():\n\t\tmsg += usr + '\\n'\n\n\tclient.send(msg.encode())", "def list_users():\n\ttry:\n\t\tusers_call = sc.api_call(\"users.list\")\n\t\tusers = []\n\t\tif users_call.get('ok'):\n\t\t\treturn users_call['members']\n\texcept:\n\t\tprint(\"users error\")\n\treturn None" ]
[ "0.78882915", "0.77124584", "0.76391757", "0.74921876", "0.74234813", "0.73127735", "0.72611696", "0.7228305", "0.721806", "0.71892637", "0.71163815", "0.70789593", "0.7072042", "0.7043019", "0.7032264", "0.70049846", "0.69722825", "0.6963775", "0.6953116", "0.6939012", "0.69312894", "0.6909668", "0.69072294", "0.69066954", "0.68652576", "0.68581694", "0.6855258", "0.6798622", "0.6784807", "0.6780094", "0.67742205", "0.6753531", "0.675106", "0.67373556", "0.67334545", "0.67310715", "0.6727806", "0.67186904", "0.67163527", "0.67094743", "0.6707128", "0.6707128", "0.66999173", "0.66926783", "0.6684429", "0.66824156", "0.6682361", "0.668214", "0.6675504", "0.6668153", "0.6668153", "0.6668153", "0.6668153", "0.6664838", "0.6659753", "0.66525525", "0.6647673", "0.6640664", "0.66358423", "0.6615185", "0.66122866", "0.6612057", "0.66110545", "0.6598012", "0.6598012", "0.6598012", "0.6598012", "0.6598012", "0.6598012", "0.6596543", "0.6591925", "0.65869814", "0.6585382", "0.65838724", "0.65819913", "0.6554039", "0.65464836", "0.65452677", "0.6524871", "0.6514638", "0.6508222", "0.65023416", "0.6500153", "0.6495827", "0.6493612", "0.64877474", "0.6482552", "0.64670104", "0.6463506", "0.6461568", "0.6461257", "0.6450611", "0.6445542", "0.644258", "0.643416", "0.6430794", "0.6430505", "0.64288735", "0.6427598", "0.6425202", "0.64088875" ]
0.0
-1
Ensure we can't list users without authentication.
def test_list_users_without_authenticate(self): response = self.client.get(reverse('user-list')) content = {"detail": "Authentication credentials were not provided."} self.assertEqual(json.loads(response.content), content) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_list_users_without_permissions(self):\n self.client.force_authenticate(user=self.user)\n\n response = self.client.get(reverse('user-list'))\n\n content = {\n 'detail': 'You do not have permission to perform this action.'\n }\n self.assertEqual(json.loads(response.content), content)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_unauthenticated_user_denial(self):\n\n self.response = self.client.get(\"/api/users/users_list/\")\n self.assertEqual(self.response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n 'Authentication credentials were not provided.', self.response.data['detail'])", "def test_admin_user_list_all_users_permission_denied(self):\n self.client.logout()\n self.client.login(\n username=self.invalid_user.username,\n password=self.invalid_user.password\n )\n response = self.client.get(CONSTS.USER_ADMIN_LIST)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_get_users_non_admin(client: FlaskClient) -> None:\n # Non-admin users are not allowed to make the request\n username = create_random_username()\n auth_token = create_auth_token(username)\n response = get_users(client, auth_token.signed)\n assert_error_response(response, HTTPStatus.FORBIDDEN)", "def test_if_forbiden_for_authenticated_permissions(self):\r\n res = self.client_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def test_get_users_unauthenticated(client: FlaskClient) -> None:\n # Unauthenticated users are not allowed to make the request\n response = get_users(client)\n assert_error_response(response, HTTPStatus.UNAUTHORIZED)", "def test_user_get_all(self):\n response = self.app.get('/api/v3/users', headers=self.user_header)\n self.assertEqual(response.status_code, 401)", "def test_cannot_view_all_users_with_blacklisted_token(self):\n resp = self.admin_create_user()\n reply = self.admin_create_user2()\n resp = self.admin_login()\n token = resp['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/users',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_10_admin_user_not_listed(self):\r\n self.register()\r\n res = self.app.get('/admin/users', follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data\r\n assert \"Current Users with Admin privileges\" not in res.data, res.data\r\n assert \"John\" not in res.data, res.data", "def test_if_not_available_for_unauthorized(self):\r\n res = self.not_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_show_private_lists_invalid(self):\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = self.user2.id\n \n res = c.get(\"/users/tester1/private-lists\")\n\n self.assertEqual(res.status_code, 302)", "def test_anonymous_user_read(self):\r\n with self.flask_app.test_request_context('/'):\r\n for token in self.auth_providers:\r\n assert_raises(Unauthorized,\r\n getattr(require, 'token').read,\r\n token)", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_no_token_get_all(self):\n response = self.app.get('/api/v3/users')\n self.assertEqual(response.status_code, 401)", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_authenticated_user_read(self):\r\n with self.flask_app.test_request_context('/'):\r\n for token in self.auth_providers:\r\n assert_raises(Forbidden,\r\n getattr(require, 'token').read,\r\n token)", "def test_user_list_get_without_auth(client):\n\n response = client.get(\"/users\", headers={\"Accept\": \"application/vnd.api+json\"})\n assert response.status_code == 401\n assert get_content_type(response) == \"application/vnd.api+json\"\n assert json.loads(response.data.decode()) == {\n \"errors\": [\n {\n \"status\": 401,\n \"title\": \"Unauthorized\",\n \"detail\": \"Missing Authorization Header\",\n }\n ]\n }", "def test_list_user(self):\n pass", "def test_unauthenticated_get(self):\n url = reverse('edit-list')\n\n response = self.client.get(url)\n self.assertEqual(403, response.status_code)\n self.assertEqual('Forbidden', response.status_text)\n self.assertTrue(\n 'credentials were not provided.' in response.data.get('detail'))", "def test_list_not_authenticated(self):\n response = self.client.get(\n reverse('retreat:waitqueuenotification-list'),\n format='json',\n )\n\n content = {'detail': 'Authentication credentials were not provided.'}\n\n self.assertEqual(json.loads(response.content), content)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_need_login_to_see_usagelist(self):\n response = self.client.get(reverse('api_v1:usage-list'), follow=True)\n self.assertEqual(response.status_code, 403)", "def test_list_members_without_auth(self):\n url = '/api/v1/communities/3/retrieve_members/'\n\n response = self.client.get(url)\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)", "def test_get_work_list_forbidden(self):\n # Attempt to get works list\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_get_list_of_followers_without_auth(self):\n with self.settings(\n EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend'):\n response = self.client.get(self.followers_url)\n self.assertEqual(response.data['detail'],\n \"Authentication credentials were not provided.\")\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def get_illegal_users(sessions, users):\n\n # Don't care about specific users being allowed to log in\n if not users:\n return []\n\n illegal_users = []\n\n # Record user sessions not whitelisted by the check\n for session in sessions:\n user = sessions[session]['user']\n if users:\n if user not in users:\n illegal_users.append(session + ':' + user)\n\n return illegal_users", "def test_show_private_lists_valid(self):\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = self.user1.id\n \n res = c.get(\"/users/tester1/private-lists\")\n\n self.assertEqual(res.status_code, 200)", "def test_list_playlists_by_anonymous_user(self):\n factories.PlaylistFactory()\n response = self.client.get(\"/api/playlists/\")\n self.assertEqual(response.status_code, 401)", "def test_list_not_authenticated(self):\n response = self.client.get('/api/products/')\n expected = {'detail': 'Authentication credentials were not provided.'}\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response.json(), expected)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def get_authenticated_denied(self):", "def test_listing_supplies_unauthenticated(self):\n request = self.factory.get('/api/supplies')\n response = SupplyListView.as_view()(request)\n # no permission\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_list_containers_with_non_authorized_user(self):\n\n test_auth_provider = self.os_operator.auth_provider\n # Get auth for the test user\n test_auth_provider.auth_data\n\n # Get fresh auth for test user and set it to next auth request for\n # account_client\n delattr(test_auth_provider, 'auth_data')\n test_auth_new_data = test_auth_provider.auth_data\n self.account_client.auth_provider.set_alt_auth_data(\n request_part='headers',\n auth_data=test_auth_new_data\n )\n\n params = {'format': 'json'}\n # list containers with non-authorized user token\n self.assertRaises(lib_exc.Forbidden,\n self.account_client.list_account_containers,\n params=params)", "def test_api__get_user_workspaces__err_403__unallowed_user(self):\n self.testapp.authorization = (\n 'Basic',\n (\n '[email protected]',\n 'foobarbaz'\n )\n )\n res = self.testapp.get('/api/v2/users/1/workspaces', status=403)\n assert isinstance(res.json, dict)\n assert 'code' in res.json.keys()\n assert 'message' in res.json.keys()\n assert 'details' in res.json.keys()", "def test_list_members_without_member_rights(self):\n url = '/api/v1/communities/3/retrieve_members/'\n\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)", "def list_users(self):\n raise NotImplementedError", "def attendants_cannot_view_user_accounts(self):\n reply = self.admin_create_user()\n resp = self.attendant_login()\n token = resp['token']\n resp = self.client.get(\n '/api/v1/users',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_no_user_filter(self):\n self.sync.sync_users()\n self.assertEqual(self.ldapobj.methods_called(), [])", "def test_empty_list(self, client):\n url = reverse('users:list')\n response = client.get(url)\n assert response.status_code == 200\n assert 'There is no users yet' in str(response.content)", "def test_get_user_by_id_unauthenticated(client: FlaskClient) -> None:\n username = create_random_username()\n # Unauthenticated users are not allowed to make the request\n response = get_user(client, username)\n assert_error_response(response, HTTPStatus.UNAUTHORIZED)", "def test_retrieve_users_unauthorized(setup_client):\n client = setup_client\n res = client.get(ME_URL)\n assert res.status_code == status.HTTP_401_UNAUTHORIZED", "def test_retrieve_user_unautherized(self):\n res = self.client.get(ME_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_unauthenticated(self):\n self.logout_user()\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"sign in to use private threads\", status_code=403)", "def test_get_all_accessible_by_hash_list_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.user1_template.hash], request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_get_all_accessible_by_hash_list_as_anonymous_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.user1_template.hash], request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_get_users(self):\n pass", "def test_list_members_without_mod_rights(self):\n url = '/api/v1/communities/3/retrieve_members/'\n\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)", "def test_many_users_fail(helper):\r\n helper.register_users(10)\r\n\r\n invalid_email = 'harryswrongemail.com'\r\n with pytest.raises(InputError) as e:\r\n auth_login_v1(email=invalid_email,\r\n password='verywrongpassword')\r\n \r\n assert f'Email {invalid_email} does not belong to a user.' in str(e.value)", "def test_retrive_user_unauthenticated(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_11_admin_user_not_listed_in_search(self):\r\n self.register()\r\n data = {'user': 'john'}\r\n res = self.app.post('/admin/users', data=data, follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data\r\n assert \"Current Users with Admin privileges\" not in res.data, res.data\r\n assert \"John\" not in res.data, res.data", "def test_api_video_read_list_staff_or_user(self):\n for user in [factories.UserFactory(), factories.UserFactory(is_staff=True)]:\n self.client.login(username=user.username, password=\"test\")\n factories.VideoFactory()\n response = self.client.get(\"/api/videos/\")\n self.assertEqual(response.status_code, 401)", "async def test_list_user(hass: HomeAssistant, provider, capsys) -> None:\n data = provider.data\n data.add_auth(\"test-user\", \"test-pass\")\n data.add_auth(\"second-user\", \"second-pass\")\n\n await script_auth.list_users(hass, provider, None)\n\n captured = capsys.readouterr()\n\n assert captured.out == \"\\n\".join(\n [\"test-user\", \"second-user\", \"\", \"Total users: 2\", \"\"]\n )", "def test_get_work_type_list_forbidden(self):\n # Attempt to get work type list\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_auth_user_fail_bad_username(self):\n\n self.assertFalse(User.authenticate(\"invalid\", \"allison\"))", "def listusers():\n\n try:\n users = User.query.order_by(User.email).all()\n click.echo(\n tabulate(\n [\n [u.username, u.email, \"admin\" if u.is_admin else None]\n for u in users\n ]\n )\n )\n except OperationalError:\n click.echo(\"Tabela de usuários inexistente...\")", "def test_list_not_admin2(self):\n self.client.force_authenticate(user=self.user)\n\n response = self.client.get(\n reverse('retreat:waitqueuenotification-list'),\n format='json',\n )\n\n content = {\n 'count': 0,\n 'next': None,\n 'previous': None,\n 'results': [],\n }\n\n self.assertEqual(json.loads(response.content), content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_list_my_memberships_without_auth(self):\n url = '/api/v1/communities/0/list_my_memberships/'\n\n response = self.client.get(url)\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)", "def testGetAccessDenied(self):\n self.runGet(None)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_list_members_with_mod_rights_not_accepted(self):\n url = '/api/v1/communities/3/retrieve_members/'\n\n # Test before acceptation\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)", "def test_login_request_with_an_incorrect_authentication(self):\n user_res = self.ph.create_user(self.test_user_name, self.test_user_password)\n self.assertEqual(user_res.status_code, status.HTTP_201_CREATED)\n res = self.test_client.get(\n url_for('api.featurelistresource', _external=True))\n self.assertTrue(res.status_code == status.HTTP_401_UNAUTHORIZED)", "def test_admin_user_list_all_users(self):\n response = self.client.get(CONSTS.USER_ADMIN_LIST)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, self.users.data)", "def test_get_request_should_not_work(self):\n response = self.client.get(reverse('user-list'))\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)", "def test_api_can_get_all_users(self):\n response = self.client().get('/api/v1/user/')\n self.assertTrue(response.status_code, 200)", "def test_get_with_auth_not_staff(self):\n u = UserFactory()\n u.set_password('123')\n u.save()\n\n auth_url = prepare_url('login')\n data = {\n 'username': u.username,\n 'password': '123'\n }\n response = self.client.post(auth_url, data=data, format='json')\n token = response.data['token']\n\n url = prepare_url('admin-cities-list')\n self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_retrieve_unauthorized_user(self):\n\n response = self.client.get(URL_ME)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_listOnClean(self):\n output = self.userbase('list')\n self.assertEqual(output, ['No accounts'])", "def test_anonymous_user_create(self):\r\n with self.flask_app.test_request_context('/'):\r\n for token in self.auth_providers:\r\n assert_raises(Unauthorized,\r\n getattr(require, 'token').create,\r\n token)", "def test_followers_following_list_unauthorized(self):\n\n self.u2.following.append(self.u)\n db.session.commit()\n\n with self.client as client:\n response = client.get(\"/users/2/following\")\n\n self.assertEqual(response.location, \"http://localhost/\")\n self.assertIn('Access unauthorized.', get_flashed_messages())\n\n response2 = client.get(\"/users/2/followers\")\n\n self.assertEqual(response2.location, \"http://localhost/\")\n self.assertIn('Access unauthorized.', get_flashed_messages())", "def test_auth_empty_list(self):\n # prepare search mock\n self.mock_conn.search.return_value = True\n # bind operation\n self.mock_conn.bind.return_value = True\n # response to the search and bind calls\n self.mock_conn.response.__len__.return_value = 1\n fake_resp = {\n 'attributes': {\n 'mail': ['[email protected]'],\n 'cn': ['Bar User', 'Baruser'],\n 'title': [],\n },\n 'type': 'searchResEntry',\n 'dn': 'uid=000000000,c=de,ou=base,o=foo.com',\n }\n self.mock_conn.response.__getitem__.return_value = fake_resp\n\n # perform action\n ldap_manager = ldap.MANAGER()\n\n # validate response\n check_resp = {\n 'login': fake_resp['attributes']['mail'][0],\n 'fullname': fake_resp['attributes']['cn'][0],\n 'title': '',\n }\n self.assertEqual(\n check_resp, ldap_manager.authenticate('baruser', 'barpwd'))", "def test_unauthenticated_service_blocked(self):\n raise NotImplementedError # FIXME", "def test_user_list_get_with_invalid_auth(client):\n\n response = client.get(\n \"/users\",\n headers={\"Accept\": \"application/vnd.api+json\", \"Authorization\": \"abcdefg\"},\n )\n assert response.status_code == 422\n assert get_content_type(response) == \"application/vnd.api+json\"\n assert json.loads(response.data.decode()) == {\n \"errors\": [\n {\n \"status\": 422,\n \"title\": \"Unprocessable Entity\",\n \"detail\": \"Bad Authorization header. Expected value 'Bearer <JWT>'\",\n }\n ]\n }", "def check_auth_none(self, username):\n return AUTH_FAILED", "def test_get_bad_user(self):\r\n user = UserMgr.get(username=u'noexist')\r\n\r\n self.assertEqual(\r\n user,\r\n None,\r\n \"Should not find a non-existant user: \" + str(user))", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_not_authenticated_non_public_course_with_blank_username(self):\n self.client.logout()\n self.query_params['username'] = ''\n self.verify_response(403)", "def test_get_user_fail_unauthorised():\n\n client = APIClient()\n\n response = client.get(reverse(\"user-detail\"), format=\"json\")\n assert response.status_code == status.HTTP_401_UNAUTHORIZED", "def test_users_list_view(self):\n target_url = url_for('users.list_users')\n redirect_url = url_for('users.login', next=target_url)\n response = self.client.get(target_url)\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, redirect_url)", "def test_listWithDisabled(self):\n self.userbase('create', 'alice', 'localhost', SECRET)\n self.userbase('create', 'bob', 'localhost', SECRET)\n\n def cb(xxx_todo_changeme2):\n (interface, avatar, logout) = xxx_todo_changeme2\n avatar.disabled = 1\n output = self.userbase('list')\n self.assertEqual(output,\n ['alice@localhost', 'bob@localhost [DISABLED]'])\n\n return self._login('bob@localhost', SECRET).addCallback(cb)", "def test_list_authz_missing_dn_or_op(self):\n self.app.get(\"/config/authorize?operation=config\", status=200)\n self.app.get(\"/config/authorize?dn=/DN=a.test.user\", status=200)", "def test_user_list(self):\n response = self.client.get('/tests/dashboard/')\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'testuser', html=True)\n self.assertContains(response, '[email protected]', html=True)", "def get_queryset(self):\n user = self.request.user\n if not (user.is_authenticated and user.check_permstring(\"builders\")):\n raise Http404(\"Not staff\")\n return super(IncompleteRosterListView, self).get_queryset()", "def test_not_authenticated_non_public_course_with_other_username(self):\n self.client.logout()\n self.verify_response(403)", "def test_get_no_permission(self):\n self.user.user_permissions.clear()\n response = self._get()\n self.assertRedirectsToLogin(response)", "def test_get_no_permission(self):\n self.user.user_permissions.clear()\n response = self._get()\n self.assertRedirectsToLogin(response)", "def test_if_allowed_for_superusers_permissions(self):\r\n res = self.client_superuser.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def test_get_all_for_other_team_forbidden(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n user = User.create(name='foo', email='[email protected]')\n user.put()\n response = self.testapp.get(\n '/api/teams/{}/users'.format(team.uid),\n headers=self.login_headers(user),\n status=403\n )", "def test_users_get(self):\n pass", "def test_users_get(self):\n pass", "def test_csc_authorization_request_list_normal_user(self):\n # Arrange:\n self.client.credentials(\n HTTP_AUTHORIZATION=\"Token \" + self.token_user_normal.key\n )\n\n # Act:\n url = reverse(\"authlistrequest-list\")\n response = self.client.get(url, format=\"json\")\n\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.data), 2)", "def test_need_login_to_see_readinglist(self):\n response = self.client.get(reverse('api_v1:reading-list'), follow=True)\n self.assertEqual(response.status_code, 403)", "def test_get_all_tokens_anonymous_user(self):\r\n\r\n # Anonymoues users should be unauthorized, no matter which kind of token are requesting\r\n res = self.app.get('/api/token')\r\n err = json.loads(res.data)\r\n\r\n assert res.status_code == 401, err\r\n assert err['status'] == 'failed', err\r\n assert err['status_code'] == 401, err\r\n assert err['exception_cls'] == 'Unauthorized', err\r\n assert err['target'] == 'token', err", "def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_listing_from_wall_when_blocked_some_users(self):" ]
[ "0.7592878", "0.7291725", "0.7268098", "0.72500527", "0.7094657", "0.6990975", "0.6927358", "0.69206643", "0.6902929", "0.68474746", "0.67786574", "0.6738562", "0.67326707", "0.67326707", "0.6700241", "0.664444", "0.6609902", "0.6586935", "0.6504748", "0.6471352", "0.64705676", "0.64099103", "0.63810873", "0.634859", "0.63304645", "0.63026404", "0.62980145", "0.62765384", "0.6274346", "0.6273391", "0.6273391", "0.6273391", "0.6273391", "0.62484807", "0.62320894", "0.62317544", "0.6231731", "0.62075675", "0.62066483", "0.6196169", "0.61958367", "0.6176823", "0.6176823", "0.6174065", "0.61641055", "0.6142641", "0.61420834", "0.61230564", "0.6114342", "0.61118853", "0.61082554", "0.61051315", "0.6104592", "0.61024183", "0.61004186", "0.60976475", "0.6096111", "0.60956055", "0.60877", "0.60873824", "0.60867035", "0.6075616", "0.6070121", "0.6067019", "0.60621816", "0.605906", "0.60580415", "0.6056893", "0.60567933", "0.60555136", "0.6052221", "0.6038619", "0.6013833", "0.6004363", "0.599274", "0.5990746", "0.59904087", "0.5987278", "0.5985522", "0.5979927", "0.5978861", "0.59759253", "0.5970937", "0.5970524", "0.59682226", "0.59652334", "0.595496", "0.5954845", "0.5952374", "0.5952374", "0.5950283", "0.59495175", "0.59480834", "0.59480834", "0.594321", "0.59415144", "0.59409964", "0.59393954", "0.59393954", "0.5928298" ]
0.75815666
1
Ensure we can't list users without permissions.
def test_list_users_without_permissions(self): self.client.force_authenticate(user=self.user) response = self.client.get(reverse('user-list')) content = { 'detail': 'You do not have permission to perform this action.' } self.assertEqual(json.loads(response.content), content) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_admin_user_list_all_users_permission_denied(self):\n self.client.logout()\n self.client.login(\n username=self.invalid_user.username,\n password=self.invalid_user.password\n )\n response = self.client.get(CONSTS.USER_ADMIN_LIST)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_if_forbiden_for_authenticated_permissions(self):\r\n res = self.client_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def test_filter_user_permissions(self):\n data = {\n \"users\": {\n 1: \"view\",\n 2: \"NONE\",\n }\n }\n\n with self.assertRaises(exceptions.PermissionDenied):\n check_user_permissions(data, 1)\n\n with self.assertRaises(exceptions.PermissionDenied):\n check_user_permissions(data, 2)\n\n check_user_permissions(data, 3)", "def test_10_admin_user_not_listed(self):\r\n self.register()\r\n res = self.app.get('/admin/users', follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data\r\n assert \"Current Users with Admin privileges\" not in res.data, res.data\r\n assert \"John\" not in res.data, res.data", "def DeniedPermissions(self) -> _n_6_t_0:", "def test_get_users_non_admin(client: FlaskClient) -> None:\n # Non-admin users are not allowed to make the request\n username = create_random_username()\n auth_token = create_auth_token(username)\n response = get_users(client, auth_token.signed)\n assert_error_response(response, HTTPStatus.FORBIDDEN)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_get_no_permission(self):\n self.user.user_permissions.clear()\n response = self._get()\n self.assertRedirectsToLogin(response)", "def test_get_no_permission(self):\n self.user.user_permissions.clear()\n response = self._get()\n self.assertRedirectsToLogin(response)", "def get_queryset(self):\n user = self.request.user\n if not (user.is_authenticated and user.check_permstring(\"builders\")):\n raise Http404(\"Not staff\")\n return super(IncompleteRosterListView, self).get_queryset()", "def test_if_allowed_for_superusers_permissions(self):\r\n res = self.client_superuser.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def test_user_without_share(self):\n set_permission(Permission.EDIT, self.user1, self.collection)\n\n # Can not add permissions to users.\n data = {\"users\": {self.user2.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n # Can not add permissions to groups.\n data = {\"users\": {self.group.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def test_get_no_permission(self):\n self.user.user_permissions.clear()\n response = self._get()\n self.assertRedirectsToLogin(response)\n self._assert_no_change()", "def cant(self, permissions: Union[str, List]) -> bool:", "def test_unauthenticated_user_denial(self):\n\n self.response = self.client.get(\"/api/users/users_list/\")\n self.assertEqual(self.response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n 'Authentication credentials were not provided.', self.response.data['detail'])", "def get_everyone_denied(self):", "def test_get_no_permission(self):\n self.user.user_permissions.clear()\n response = self._get()\n self.assertRedirectsToLogin(response)\n self.assertEquals(self.model.objects.count(), 0)", "def test_need_login_to_see_usagelist(self):\n response = self.client.get(reverse('api_v1:usage-list'), follow=True)\n self.assertEqual(response.status_code, 403)", "def attendants_cannot_view_user_accounts(self):\n reply = self.admin_create_user()\n resp = self.attendant_login()\n token = resp['token']\n resp = self.client.get(\n '/api/v1/users',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"DELETE\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_list_user(self):\n pass", "def test_no_permission(self):\n self.user.user_permissions.remove(*self.permissions)\n response = self._get()\n self.assertEqual(response.status_code, 302)", "def test_no_permission(self):\n self.user.user_permissions.remove(*self.permissions)\n response = self._get()\n self.assertEqual(response.status_code, 302)", "def test_cannot_view_all_users_with_blacklisted_token(self):\n resp = self.admin_create_user()\n reply = self.admin_create_user2()\n resp = self.admin_login()\n token = resp['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/users',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def check_admin():\n\tif not current_user.is_admin:\n\t\tabort(403)", "def test__user_passed_as_none(self):\r\n access.has_access(None, 'staff', 'global', None)", "def test_list_users_without_authenticate(self):\n response = self.client.get(reverse('user-list'))\n\n content = {\"detail\": \"Authentication credentials were not provided.\"}\n self.assertEqual(json.loads(response.content), content)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def testGetAccessDenied(self):\n self.runGet(None)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_show_private_lists_invalid(self):\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = self.user2.id\n \n res = c.get(\"/users/tester1/private-lists\")\n\n self.assertEqual(res.status_code, 302)", "def test_get_no_permission(self):\n self.user.user_permissions.clear()\n response = self._get()\n self.assertRedirectsToLogin(response)\n self.assertEquals(self.model.objects.count(), 1)", "def test_user_get_all(self):\n response = self.app.get('/api/v3/users', headers=self.user_header)\n self.assertEqual(response.status_code, 401)", "def test_only_add_perm(self):\n self.assertStatusCode(self.url, 403)", "def test_cannot_add_usage(self):\n p = Permission.objects.get(name='Can add usage')\n self.user.user_permissions.add(p)\n self.client.login(username='testuser', password='q2w3E$R%')\n data = {'month': 2, 'year': 2018, 'meter': 1, 'usage': 789}\n response = self.client.post(reverse('api_v1:reading-list'),\n data=json.dumps(data),\n content_type='application/json',\n follow=True)\n self.assertEqual(response.status_code, 403)\n self.assertIn('You do not have permission', str(response.content))", "def OptionalPermissions(self) -> _n_6_t_0:", "def test_get_permissions(self):\n pass", "def users_with_role(self):\r\n return User.objects.none()", "def test_api__get_user_workspaces__err_403__unallowed_user(self):\n self.testapp.authorization = (\n 'Basic',\n (\n '[email protected]',\n 'foobarbaz'\n )\n )\n res = self.testapp.get('/api/v2/users/1/workspaces', status=403)\n assert isinstance(res.json, dict)\n assert 'code' in res.json.keys()\n assert 'message' in res.json.keys()\n assert 'details' in res.json.keys()", "def check_admin():\r\n if not current_user.is_admin:\r\n abort(403)", "def test_no_user_filter(self):\n self.sync.sync_users()\n self.assertEqual(self.ldapobj.methods_called(), [])", "def test_no_permission(self):\n override_acl(self.user, {'can_use_private_threads': 0})\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"t use private threads\", status_code=403)", "def list_users(self):\n raise NotImplementedError", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.vendor_id)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def test_listing_from_wall_when_blocked_some_users(self):", "def test_list_members_with_mod_rights_not_accepted(self):\n url = '/api/v1/communities/3/retrieve_members/'\n\n # Test before acceptation\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)", "def test_no_permission(self):\n override_acl(self.user, {'can_use_private_threads': 0})\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"can't use private threads\", status_code=403)", "def test_get_work_type_list_forbidden(self):\n # Attempt to get work type list\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_util_has_perm_or_owns_sanity(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n perm = 'forums_forum.thread_edit_forum'\n allowed = access.has_perm_or_owns(me, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = access.has_perm_or_owns(me, perm, other_t, self.forum_1)\n eq_(allowed, False)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def listusers():\n\n try:\n users = User.query.order_by(User.email).all()\n click.echo(\n tabulate(\n [\n [u.username, u.email, \"admin\" if u.is_admin else None]\n for u in users\n ]\n )\n )\n except OperationalError:\n click.echo(\"Tabela de usuários inexistente...\")", "def test_get_all_accessible_by_hash_list_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.user1_template.hash], request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_listWithDisabled(self):\n self.userbase('create', 'alice', 'localhost', SECRET)\n self.userbase('create', 'bob', 'localhost', SECRET)\n\n def cb(xxx_todo_changeme2):\n (interface, avatar, logout) = xxx_todo_changeme2\n avatar.disabled = 1\n output = self.userbase('list')\n self.assertEqual(output,\n ['alice@localhost', 'bob@localhost [DISABLED]'])\n\n return self._login('bob@localhost', SECRET).addCallback(cb)", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_not_permitted(self):\r\n test_user_client, test_user = self.create_non_staff_authed_user_client()\r\n CourseEnrollment.enroll(test_user, self.course.id)\r\n response = test_user_client.get(self.orphan_url)\r\n self.assertEqual(response.status_code, 403)\r\n response = test_user_client.delete(self.orphan_url)\r\n self.assertEqual(response.status_code, 403)", "def test_if_not_available_for_unauthorized(self):\r\n res = self.not_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_cant_remove_other_users_from_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=8,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=False,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n self.assert_json_error(result, \"Insufficient permission\")", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.sodar_uuid)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user, sequencer=self.hiseq2000.sodar_uuid)\n self.response_403()", "def test_listOnClean(self):\n output = self.userbase('list')\n self.assertEqual(output, ['No accounts'])", "def test_get_all_accessible_by_id_list_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_id_list(\n self.template_id_list, request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def test_can_info_does_not_exist(self):\n fake_user = User(username='Fake', password='')\n self.assertFalse(send_rotate_to_can(fake_user, self.BIN_NUM))", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_list_members_without_mod_rights(self):\n url = '/api/v1/communities/3/retrieve_members/'\n\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)", "def testNoPermission(self):\n self.login_user(self.user)\n response = self.client.get(self.url, self.args)\n self.assertEqual(response.status_code, 302)", "def test_admin_user_list_all_users(self):\n response = self.client.get(CONSTS.USER_ADMIN_LIST)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, self.users.data)", "def test_get_work_list_forbidden(self):\n # Attempt to get works list\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_permission_remove_action_for_all_users(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('permission add anonymous TICKET_CREATE')\n self._execute('permission remove * TICKET_CREATE')\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def permissions():\n pass", "def test_11_admin_user_not_listed_in_search(self):\r\n self.register()\r\n data = {'user': 'john'}\r\n res = self.app.post('/admin/users', data=data, follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data\r\n assert \"Current Users with Admin privileges\" not in res.data, res.data\r\n assert \"John\" not in res.data, res.data", "def redirect_users_without_permissions(page, request, serve_args, serve_kwargs):\n if not has_permission(request.user, get_required_groups(page)): \n return redirect(NO_PERMISSIONS_REDIRECT_URL)", "def has_module_perms(self, users):\r\n return True", "def test_get_all_for_other_team_forbidden(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n user = User.create(name='foo', email='[email protected]')\n user.put()\n response = self.testapp.get(\n '/api/teams/{}/users'.format(team.uid),\n headers=self.login_headers(user),\n status=403\n )", "def test_permission_list_ok(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def permissions(self):\n return None", "def no_network_access_check(user):\n return not user.has_property(\"network_access\")", "async def test_regular_member_cannot_use_command_outside_of_bot_commands(self, constants):\n constants.MODERATION_ROLES = [self.moderator_role.id]\n constants.STAFF_ROLES = [self.moderator_role.id]\n ctx = helpers.MockContext(author=self.author, channel=helpers.MockTextChannel(id=100))\n\n msg = \"Sorry, but you may only use this command within <#50>.\"\n with self.assertRaises(InWhitelistCheckFailure, msg=msg):\n await self.cog.user_info(self.cog, ctx)", "def not_test_without_user(self):\n # TODO", "def test_news_index_no_perm(self):\n self.assertStatusCode(self.url, 403)", "def test_only_edit_perm(self):\n self.assertStatusCode(self.url, 403)", "def test_no_token_get_all(self):\n response = self.app.get('/api/v3/users')\n self.assertEqual(response.status_code, 401)", "def users_list(request):\n users_filter = UserFilter(request.GET, queryset=CustomUser.objects.filter(is_admin=False), request=request)\n return render(request, 'users/list.html', {'filter': users_filter})", "def get_illegal_users(sessions, users):\n\n # Don't care about specific users being allowed to log in\n if not users:\n return []\n\n illegal_users = []\n\n # Record user sessions not whitelisted by the check\n for session in sessions:\n user = sessions[session]['user']\n if users:\n if user not in users:\n illegal_users.append(session + ':' + user)\n\n return illegal_users", "def test_listing_supplies_unauthenticated(self):\n request = self.factory.get('/api/supplies')\n response = SupplyListView.as_view()(request)\n # no permission\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_list_members_without_member_rights(self):\n url = '/api/v1/communities/3/retrieve_members/'\n\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)", "def test_xblockcompletion_get_user_no_permission(self):\n data = {\n 'format':'all',\n 'course': str(self.course.id)\n }\n response = self.client_student.get(reverse('xblockcompletion-data:data'), data)\n request = response.request\n self.assertEqual(response.status_code, 200)\n self.assertEqual(json.loads(response._container[0].decode()), {'error': 'Usuario no tiene rol para esta funcionalidad'})", "def test_get_users_unauthenticated(client: FlaskClient) -> None:\n # Unauthenticated users are not allowed to make the request\n response = get_users(client)\n assert_error_response(response, HTTPStatus.UNAUTHORIZED)", "def test_view_all_categories_with_wrong_perms(self):\n self.client.login(username='hodor', password='hodor')\n Perms.objects.create(user=self.user, access_level=5).save()\n response = self.client.get('/categories/', {}, follow=True)\n self.assertTemplateUsed(response, 'unauthorized.html')", "def test_check_permissions(mock_list_permissions, mock_dry_permissions):\n view = views.ListEntryListView()\n\n view.check_permissions(None)\n\n assert mock_dry_permissions.call_count == 1\n assert mock_list_permissions.call_count == 1", "def test_user_get_profile_not_authorized(self):\n self.client.logout()\n response = self.client.get(CONSTS.USER_PROFILE_URL)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_get_all_superuser_permissions(self):\n user = self.UserModel._default_manager.get(pk=self.superuser.pk)\n self.assertEqual(len(user.get_all_permissions()), len(Permission.objects.all()))", "def RequestedPermissions(self) -> _n_6_t_0:", "def test_urllist_command(self):\n # TODO: Write tests for when there is no superuser.\n # This seemed to not work when using this command on PythonAnywhere the first time\n pass", "def test_cannot_delete_usage(self):\n p = Permission.objects.get(name='Can delete usage')\n self.user.user_permissions.add(p)\n self.client.login(username='testuser', password='q2w3E$R%')\n response = self.client.delete(reverse('api_v1:usage-detail', kwargs={'pk': 1}),\n follow=True)\n self.assertEqual(response.status_code, 405)\n self.assertIn('not allowed', str(response.content))", "def test_filter_public_permissions(self):\n data = {\"public\": \"view\"}\n check_public_permissions(data)\n\n data = {\"public\": \"edit\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"share\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"owner\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)", "def can_edit_or_403(self, user):\n if self.get_permission_level(user) < self.OWNER_PERMISSION:\n raise PermissionDenied\n return True" ]
[ "0.7638809", "0.69624966", "0.6810429", "0.6799262", "0.67936945", "0.6770797", "0.6763287", "0.6763287", "0.66203946", "0.66203946", "0.6566771", "0.65271413", "0.6525158", "0.64896446", "0.64557964", "0.6414002", "0.63701904", "0.6349665", "0.63055766", "0.62822396", "0.626694", "0.62578756", "0.62459654", "0.62459654", "0.6243862", "0.6235741", "0.6225688", "0.6202697", "0.61954355", "0.6195404", "0.6193635", "0.61761546", "0.61668795", "0.6166617", "0.6149882", "0.61482584", "0.614516", "0.6135463", "0.61288995", "0.61045426", "0.6102352", "0.60889876", "0.60788786", "0.60720885", "0.6072086", "0.6072086", "0.6056108", "0.60539645", "0.6051057", "0.60479605", "0.604106", "0.60406506", "0.60406506", "0.60406506", "0.60406506", "0.6037186", "0.60368186", "0.60087496", "0.6006133", "0.5993799", "0.59919375", "0.59901047", "0.5971715", "0.5969801", "0.5961864", "0.59506017", "0.5949937", "0.5928371", "0.59271795", "0.59247", "0.5915242", "0.5915068", "0.59132373", "0.59081715", "0.5907469", "0.5902197", "0.58999056", "0.58978504", "0.5895511", "0.5886967", "0.5879096", "0.58701557", "0.5868849", "0.5862939", "0.5861465", "0.5857477", "0.5850592", "0.58488744", "0.5845844", "0.5840772", "0.58394027", "0.58323216", "0.5830354", "0.58236474", "0.58170587", "0.581568", "0.5810749", "0.58087903", "0.58062285", "0.580534" ]
0.7912165
0
Ensure we can send notification for membership end
def test_send_notification_end_membership(self): fixed_time = timezone.now() end_time_membership = fixed_time + relativedelta(days=28) self.user.membership = self.membership self.user.membership_end = end_time_membership self.user.save() with mock.patch( 'store.serializers.timezone.now', return_value=fixed_time ): response = self.client.get( reverse('user-execute-automatic-email-membership-end') ) content = { 'stop': False, 'email_send_count': 1 } self.assertEqual( response.status_code, status.HTTP_200_OK, response.content ) self.assertEqual( json.loads(response.content), content ) self.assertEqual(len(mail.outbox), 1) self.user.refresh_from_db() self.assertEqual(self.user.membership_end_notification, fixed_time) with mock.patch( 'store.serializers.timezone.now', return_value=fixed_time ): response = self.client.get( reverse('user-execute-automatic-email-membership-end') ) content = { 'stop': False, 'email_send_count': 0 } self.assertEqual( response.status_code, status.HTTP_200_OK, response.content ) self.assertEqual( json.loads(response.content), content ) # no new mail self.assertEqual(len(mail.outbox), 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_successful_subscriptions_notifies_pm(self) -> None:\n invitee = self.example_user(\"iago\")\n\n current_stream = self.get_streams(invitee)[0]\n invite_streams = self.make_random_stream_names([current_stream])[:1]\n self.common_subscribe_to_streams(\n invitee,\n invite_streams,\n extra_post_data={\n \"announce\": \"true\",\n \"principals\": orjson.dumps([self.user_profile.id]).decode(),\n },\n )", "def test_notify_user(self):\n foo = Foo.objects.create(name='foo', description='foo object')\n notify_users([self.user_a], foo, notification_type='foo')\n self.assertEqual(len(mail.outbox), 1)", "def test_admin_approval_complete_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_admin_approve_complete_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].to, [self.user_info['email']])", "def test_registered_no_notifications(self):\n msg = self._send(self.reg_conn, '1')\n self.assertEqual(len(msg.responses), 1)\n self.assertEqual(msg.responses[0].text,\n self.app.no_reminders)", "def test_registered_no_notifications(self):\n msg = self._send(self.reg_conn, '1')\n self.assertEqual(len(msg.responses), 1)\n self.assertEqual(msg.responses[0].text,\n self.app.no_reminders)", "def send_notification(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n m1 = Members(\"Richard\", \"Blackmore\", \"14-04-1945\", \"Weston\")\n s1.send_notification(\"Please return book\")\n self.assertEqual(m1.get_notifications(), None)\n s1.add_resource(b1)\n s1.lending_process(b1, m1)\n s1.send_notification(\"Please return book\")\n self.assertEqual(m1.get_notifications(), \"-Please return boo- \")", "def test_admin_approval_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.activated = True\n self.registration_profile.objects.send_admin_approve_email(\n new_user, Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n admins_emails = [value[1] for value in settings.REGISTRATION_ADMINS]\n for email in mail.outbox[0].to:\n self.assertIn(email, admins_emails)", "def test_notify_users(self):\n foo = Foo.objects.create(name='foo', description='foo object')\n notify_users(User.objects.all(), foo, notification_type='foo')\n self.assertEqual(len(mail.outbox), 2)", "def send_reminder(self):\n pass", "async def check_notify(self) -> None:\n async with self.lock:\n # We loop through a list of keys because we are going to\n # mutate the dictionary as we loop through it.\n for message_id in copy.copy(list(self.upcoming_events.keys())):\n upcoming_event = self.upcoming_events[message_id]\n if not upcoming_event.time_to_notify():\n continue\n\n # Delete upcoming event if it's a member event\n if isinstance(upcoming_event, MemberEvent):\n # Delete upcoming if it's a member event\n await self.delete_upcoming_event(message_id)\n\n # Prepare message from the queue if it's recurring\n stop_notifying = False\n if isinstance(upcoming_event, RecurringEvent):\n stop_notifying = (\n upcoming_event.event_cancelled\n or upcoming_event.notified\n )\n\n if not stop_notifying:\n # Send ongoing event message\n ongoing_message = await upcoming_event.send_ongoing_message(\n notif_message=self.ongoing_template,\n channel=self.calendar_channel\n )\n\n # Distribute DM\n await upcoming_event.distribute_dm(\n self.dm_template,\n self.organizer_dm_template\n )\n\n # Create new ongoing event\n ongoing_event = OngoingEvent(\n countdown_time=upcoming_event.start_time,\n timeout_length=self.event_timeout,\n organizer_id=upcoming_event.organizer.id,\n message_text=ongoing_message.content,\n message_embed=ongoing_message.embeds[0]\n )\n\n self.ongoing_events[ongoing_message.id] = ongoing_event", "def notifyNewMember(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_resend_inactive(self):\n self.invite.active = False\n self.invite.save()\n url = reverse(\n 'projectroles:api_invite_resend',\n kwargs={'projectinvite': self.invite.sodar_uuid},\n )\n response = self.request_knox(url, method='POST')\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(len(mail.outbox), 0)", "def test_registered_with_notification(self):\n now = datetime.datetime.now()\n notification = reminders.Notification.objects.create(num_days=1,\n time_of_day=now)\n reminders.SentNotification.objects.create(notification=notification,\n recipient=self.contact,\n status='sent',\n message='abc',\n appt_date=now,\n date_to_send=now)\n msg = self._send(self.reg_conn, '1')\n self.assertEqual(len(msg.responses), 1)\n self.assertEqual(msg.responses[0].text,\n self.app.thank_you)\n sent_notif = reminders.SentNotification.objects.all()\n self.assertEqual(sent_notif.count(), 1)\n self.assertEqual(sent_notif[0].status, 'confirmed')", "def test_registered_with_notification(self):\n now = datetime.datetime.now()\n notification = reminders.Notification.objects.create(num_days=1,\n time_of_day=now)\n reminders.SentNotification.objects.create(notification=notification,\n recipient=self.contact,\n status='sent',\n message='abc',\n appt_date=now,\n date_to_send=now,\n date_queued=now)\n msg = self._send(self.reg_conn, '1')\n self.assertEqual(len(msg.responses), 1)\n self.assertEqual(msg.responses[0].text,\n self.app.thank_you)\n sent_notif = reminders.SentNotification.objects.all()\n self.assertEqual(sent_notif.count(), 1)\n self.assertEqual(sent_notif[0].status, 'confirmed')", "def test_resend_delegate_no_perms(self):\n self.invite.role = self.role_delegate\n self.invite.save()\n delegate = self.make_user('delegate')\n self.make_assignment(self.project, delegate, self.role_delegate)\n\n url = reverse(\n 'projectroles:api_invite_resend',\n kwargs={'projectinvite': self.invite.sodar_uuid},\n )\n response = self.request_knox(\n url, method='POST', token=self.get_token(delegate)\n )\n self.assertEqual(response.status_code, 403, msg=response.content)\n self.assertEqual(len(mail.outbox), 0)", "def test_private_message_sends_email(self, get_current):\n get_current.return_value.domain = \"testserver\"\n\n s, c = Setting.objects.get_or_create(user=self.to, name=\"email_private_messages\")\n s.value = True\n s.save()\n # User has setting, and should recieve notification email.\n\n assert Setting.get_for_user(self.to, \"email_private_messages\")\n\n self.client.login(username=self.sender.username, password=\"testpass\")\n post(self.client, \"messages.new\", {\"to\": self.to, \"message\": \"a message\"})\n subject = \"[SUMO] You have a new private message from [{sender}]\"\n\n attrs_eq(\n mail.outbox[0],\n to=[self.to.email],\n subject=subject.format(sender=self.sender.profile.name),\n )\n starts_with(\n mail.outbox[0].body, PRIVATE_MESSAGE_EMAIL.format(sender=self.sender.profile.name)\n )", "def test_set_send_email_notifications(self):\n # Setup scenario\n username = 'tester'\n password = 'secret'\n user = Account.objects.create_user(username=username, email='[email protected]', password=password)\n\n self.assertTrue(self.client.login(username=username, password=password))\n\n # Verify initial assumptions\n self.assertTrue(user.send_email_notifications)\n\n # Run code\n resp = self.client.post(reverse('account.api.configure_email'), {\n 'send_email_notifications': False,\n }, format='json')\n\n # Verify expectations\n self.assertEquals(status.HTTP_201_CREATED, resp.status_code)\n self.assertTrue(user.send_email_notifications)", "def test_api_user_resend_confirmation_post(self):\n pass", "def can_notify(self, last_notification):\n return (\n features.is_enabled(features.EMAIL_NOTIFICATIONS)\n and self.notification_settings.via_email\n and api.can_email_user(self.user)\n and super().can_notify(last_notification)\n )", "def notify(message):\n # TODO: clean up this ugly mess\n\n global notify_flag\n\n if not notify_flag:\n notify_flag = True\n message.reply(\":gear: Started expiration checking process; users will now \"\n \"be notified if their access is about to expire.\")\n else:\n message.reply(\"Cannot have more than one running instance of the notify \"\n \"function.\")\n return\n\n flag = \"tenmins\"\n while True:\n if flag is \"deleted\":\n info = sql.notify_users(\"hour\")\n flag = \"hour\"\n elif flag is \"hour\":\n info = sql.notify_users(\"tenmins\")\n flag = \"tenmins\"\n elif flag is \"tenmins\":\n info = sql.notify_users(\"deleted\")\n flag = \"deleted\"\n\n for person in info:\n if len(info[person]) == 0:\n continue\n try:\n users = hf.get_users()\n for user in users:\n if user[\"name\"] == person:\n dbs = []\n servers = []\n for grant in info[person]:\n dbs.append(grant[\"db\"])\n servers.append(grant[\"server\"])\n chan = hf.find_channel(message._client.channels, user[\"id\"])\n\n if flag is \"hour\":\n message._client.send_message(chan,\n Strings['NOTIFY_EXPIRE_HOUR'].format(\", \".join(dbs)) + \"\\n\"\n \"\" + Strings[\"NOTIFY_EXPIRE_INFO\"])\n for db, server in zip(dbs, servers):\n logging.info(\"{} reason=[NOTIFIED OF DATABASE ACCESS EXPIRING IN AN HOUR]\\n\".format(user[\"name\"]), server, db, \"notifyhour\")\n elif flag is \"tenmins\":\n message._client.send_message(chan,\n Strings['NOTIFY_EXPIRE_TENMINS'].format(\", \".join(dbs)) + \"\\n\"\n \"\" + Strings[\"NOTIFY_EXPIRE_INFO\"])\n for db, server in zip(dbs, servers):\n logging.info(\"{} reason=[NOTIFIED OF DATABASE ACCESS EXPIRING IN TEN MINUTES]\\n\".format(user[\"name\"]), server, db, \"notifyten\")\n elif flag is \"deleted\":\n message._client.send_message(chan,\n Strings['EXPIRE'].format(\", \".join(dbs)))\n message._client.send_message(public_channel,\n Strings[\"EXPIRE_PING\"].format(user[\"name\"],\n \", \".join(dbs)))\n for db, server in zip(dbs, servers):\n logging.info(\"{} reason=[NOTIFIED OF DATABASE ACCESS EXPIRING]\\n\".format(user[\"name\"]), server, db, \"notifyexpire\")\n\n except Exception as e:\n message._client.send_message(errors_channel, \"```{}```\".format(e))\n\n with open(\"data/jobs.json\") as f:\n jobs = json.load(f)\n\n new_jobs = []\n if len(jobs) > 0:\n for job in jobs:\n if not job.endswith(\"DONE\"):\n job_string = job.replace(\"10.132.140.160\", \"SQLCLUSTER02\").replace(\"10.132.140.150\", \"SQLCLUSTER01\")\n message._client.send_message(public_channel,\n Strings[\"LOGOUT_PLEASE\"].format(job_string.split(\":\")[0],\n job_string.split(\":\")[1]))\n new_jobs.append(job + \":DONE\")\n else:\n new_jobs.append(job)\n\n with open(\"data/jobs.json\", \"w\") as f:\n json.dump(new_jobs, f)\n\n # For use with Datadog\n with open(\"/opt/opsbot35/data/status.txt\", \"w\") as f:\n f.write(str(datetime.now()))\n\n time.sleep(5)", "def test_resend_activation_email_nonexistent_user(self):\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n self.assertEqual(len(mail.outbox), 0)", "def test_alert_create_for_site_members(self):\n pass", "def test_sending_mail(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n # run email job\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertTrue(self.test_contact.email in message.to)", "def test_private_message_not_sends_email(self, get_current):\n get_current.return_value.domain = \"testserver\"\n\n s, c = Setting.objects.get_or_create(user=self.to, name=\"email_private_messages\")\n # Now user should not recieve email.\n s.value = False\n s.save()\n assert not Setting.get_for_user(self.to, \"email_private_messages\")\n\n self.client.login(username=self.sender.username, password=\"testpass\")\n post(self.client, \"messages.new\", {\"to\": self.to, \"message\": \"a message\"})\n\n assert not mail.outbox", "def test_resend_delegate(self):\n self.invite.role = self.role_delegate\n self.invite.save()\n url = reverse(\n 'projectroles:api_invite_resend',\n kwargs={'projectinvite': self.invite.sodar_uuid},\n )\n response = self.request_knox(url, method='POST')\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(len(mail.outbox), 1)", "async def anticipation(self, ctx: commands.Context):\n role = ctx.guild.get_role(529447810127495168)\n\n if role.id not in (r.id for r in ctx.author.roles):\n await ctx.author.add_roles(role, reason=\"/anticipation\")\n embed = discord.Embed(\n colour=discord.Colour.green(),\n description=\"Anticipation Notifications successfully added.\"\n )\n await ctx.send(embed=embed)\n\n else:\n await ctx.author.remove_roles(role, reason=\"/anticipation\")\n embed = discord.Embed(\n colour=discord.Colour.red(),\n description=\"Anticipation Notifications successfully removed.\"\n )\n await ctx.send(embed=embed)", "def handle_sent(self, instance):\n if not instance.created_by:\n return\n\n activity = Activity(\n actor=instance.created_by,\n verb=RestrictedMailSent,\n object=instance,\n time=instance.used,\n extra_context={},\n )\n self.manager.add_activity(\n activity, [instance.created_by.pk], [NotificationFeed]\n )\n\n # Send notification\n notification = RestrictedMailSentNotification(instance.created_by)\n notification.notify()", "def test_notify_reached_end_of_wait_queue(self):\n # self.client.force_authenticate(user=self.admin)\n\n notification_count = WaitQueueNotification.objects.all().count()\n\n self.retreat.next_user_notified = 2\n self.retreat.save()\n\n response = self.client.get(\n '/'.join([\n reverse('retreat:waitqueuenotification-list'),\n 'notify',\n ])\n )\n\n self.retreat.refresh_from_db()\n\n self.assertEqual(\n self.retreat.next_user_notified,\n 0,\n \"next_user_notified index invalid\"\n )\n\n # Assert that 0 reserved seats remain (since 0 users are waiting)\n self.assertEqual(\n self.retreat.reserved_seats,\n 0,\n \"reserved_seats index invalid\"\n )\n\n # Assert that 0 notification has been created\n # The old one has been deleted\n self.assertEqual(\n WaitQueueNotification.objects.all().count(),\n notification_count - 1,\n \"WaitQueueNotification count invalid\"\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_200_OK,\n response.content,\n )\n\n response_data = json.loads(response.content)\n\n content = {\n 'detail': 'No reserved seats.',\n 'stop': True\n }\n\n self.assertEqual(response_data, content)\n\n self.assertEqual(len(mail.outbox), 0)", "def send_registration_handle(sender, instance, **kwargs):\n #import ipdb; ipdb.set_trace()\n if instance._verifying: return\n #url=instance.get_absolute_url()\n url=reverse('registration-verify', request=instance.request, format=None)\n try:\n send_mail(\n 'Registration to ItalyInformaticaProject',\n #\"Please click on the link to validate your registration: /verify/%s/%s\"%(url.rstrip('/'),instance.token),\n \"Please click on the link to validate your registration: %s/%s/%s\"%(url.rstrip('/'),repr(instance.id),instance.token),\n '[email protected]',\n [instance.owner.email],\n fail_silently=False,\n )\n except Exception as e:\n instance.owner.delete()\n raise APIException(\"Cannot send email notification:%s\"%repr(e))", "def notify_students():\n time_now = datetime.datetime.now(get_localzone())\n emails_to_send = Email.objects.all()\n for email in emails_to_send:\n if email.assignment.date_assigned <= time_now:\n send_mail(subject=email.subject,\n message=email.message,\n recipient_list=Student.objects.filter(assignments=email.assignment),\n from_email=None,\n fail_silently=False)\n email.delete()", "def test_emailable(self):\n to_date = datetime.today() - timedelta(days=2)\n consumer = Consumer.objects.get(id=103)\n consumer.consumer_create_datetime = to_date - timedelta(days=1)\n consumer.is_emailable = False\n consumer.save()\n from_date = to_date.date() - timedelta(days=2)\n result = UnqualifiedConsumerEmailTask().qry_unqualified_consumers(\n from_date, to_date.date())\n self.assertEqual(result.filter(id=103).count(), 0)", "def verify(self):\n ACTIVATION_PERIOD = datetime.timedelta(days=14)\n if not self.org_verified:\n self.org_verified = True\n if not self.is_active:\n if not self.activation_code:\n self.activation_code = random_url_safe_code()\n self.activate_by = datetime.datetime.utcnow() + ACTIVATION_PERIOD\n import messaging # avoid circular import\n messaging.send_activation_emails(self)\n self.save()", "def test_registered_with_notification_and_pin(self):\n now = datetime.datetime.now()\n self.contact.pin = '1234'\n self.contact.save()\n notification = reminders.Notification.objects.create(num_days=1,\n time_of_day=now)\n reminders.SentNotification.objects.create(notification=notification,\n recipient=self.contact,\n status='sent',\n message='abc',\n appt_date=now,\n date_to_send=now)\n msg = self._send(self.reg_conn, '1234')\n self.assertEqual(len(msg.responses), 1)\n self.assertEqual(msg.responses[0].text,\n self.app.thank_you)\n sent_notif = reminders.SentNotification.objects.all()\n self.assertEqual(sent_notif.count(), 1)\n self.assertEqual(sent_notif[0].status, 'confirmed')", "def waiting_confirmation(self):", "def test_group_notification_not_called(self):\n send_message(self.directmessage1.pk)\n self.assertFalse(self.groupnotify_mock.called)", "def test_issue_subscriptions(self):\n pass", "def test_registered_with_notification_and_pin(self):\n now = datetime.datetime.now()\n self.contact.pin = '1234'\n self.contact.save()\n notification = reminders.Notification.objects.create(num_days=1,\n time_of_day=now)\n reminders.SentNotification.objects.create(notification=notification,\n recipient=self.contact,\n status='sent',\n message='abc',\n appt_date=now,\n date_to_send=now,\n date_queued=now)\n msg = self._send(self.reg_conn, '1234')\n self.assertEqual(len(msg.responses), 1)\n self.assertEqual(msg.responses[0].text,\n self.app.thank_you)\n sent_notif = reminders.SentNotification.objects.all()\n self.assertEqual(sent_notif.count(), 1)\n self.assertEqual(sent_notif[0].status, 'confirmed')", "def test_unverified_subscriber(self):\n self.prep_consumer()\n subscriber = Subscriber.objects.get(id=7)\n self.consumer.subscriber = subscriber\n self.consumer.save()\n UnqualifiedConsumerEmailTask().run(test_mode=self.consumer)\n self.common_asserts()\n self.assertTrue('71010' in mail.outbox[0].alternatives[0][0])\n self.assertTrue('71010' in mail.outbox[0].body)", "def test_email_sent_on_failure(self):\n self._authorize()\n data = {\n 'Subject_Number': '000-1111',\n 'Pin_Code': '1234',\n 'Date_Enrolled': datetime.datetime.now().strftime('%b %d %Y '),\n 'Mobile_Number': '2223334444',\n }\n patient = self.create_xml_patient(data)\n payload = self.create_xml_payload([patient])\n response = self._post(payload)\n self.assertEqual(response.status_code, 500)\n self.assertEqual(len(mail.outbox), 1)", "def test_send_registration_event(self):\n event_receiver = Mock(side_effect=self._event_receiver_side_effect)\n STUDENT_REGISTRATION_COMPLETED.connect(event_receiver)\n\n self.client.post(self.url, self.user_info)\n\n user = User.objects.get(username=self.user_info.get(\"username\"))\n self.assertTrue(self.receiver_called)\n self.assertDictContainsSubset(\n {\n \"signal\": STUDENT_REGISTRATION_COMPLETED,\n \"sender\": None,\n \"user\": UserData(\n pii=UserPersonalData(\n username=user.username,\n email=user.email,\n name=user.profile.name,\n ),\n id=user.id,\n is_active=user.is_active,\n ),\n },\n event_receiver.call_args.kwargs\n )", "def test_sending_mail(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n reminders.Patient.objects.filter(\n pk__in=[self.test_patient.pk, self.other_patient.pk]\n ).update(next_visit=appt_date)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n self.startRouter()\n self.router.logger.setLevel(logging.DEBUG)\n\n # run email job\n from afrims.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertTrue(self.test_contact.email in message.to)\n self.stopRouter()", "def test_notify(self):\n # self.client.force_authenticate(user=self.admin)\n\n FIXED_TIME = datetime(2018, 1, 1, tzinfo=LOCAL_TIMEZONE)\n\n # Old notification that will be deleted\n with mock.patch(\n 'django.utils.timezone.now', return_value=FIXED_TIME):\n WaitQueueNotification.objects.create(\n user=self.user,\n retreat=self.retreat,\n )\n\n waiting_user = WaitQueue.objects.create(\n user=self.user,\n retreat=self.retreat,\n )\n\n waiting_user2 = WaitQueue.objects.create(\n user=self.user2,\n retreat=self.retreat,\n )\n\n notification_count = WaitQueueNotification.objects.all().count()\n\n response = self.client.get(\n '/'.join([\n reverse('retreat:waitqueuenotification-list'),\n 'notify',\n ])\n )\n\n self.retreat.refresh_from_db()\n\n # Assert that the wait queue index is updated\n # All users (2) are notified since there are more (4) reserved_seats\n self.assertEqual(\n self.retreat.next_user_notified,\n 2,\n \"next_user_notified index invalid\"\n )\n\n # Assert that only 2 reserved seats remain (since only 2 users are\n # waiting)\n self.assertEqual(\n self.retreat.reserved_seats,\n 2,\n \"reserved_seats index invalid\"\n )\n\n # Assert that 2 new notifications are created (2 users in wait_queue)\n # Assert that 2 old notification has been deleted (too old)\n self.assertEqual(\n WaitQueueNotification.objects.all().count(),\n notification_count + 2 - 2,\n \"WaitQueueNotification count invalid\"\n )\n\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n self.assertEqual(len(mail.outbox), 2)\n\n waiting_user.delete()\n waiting_user2.delete()", "async def plaguenotify(self, ctx):\n notifications = await self.config.user(ctx.author).notifications()\n if notifications != False:\n await self.config.user(ctx.author).notifications.set(False)\n message = \"You will no longer be sent Plague Game notifications.\"\n else:\n await self.config.user(ctx.author).notifications.set(True)\n message = \"You will now be sent Plague Game notifications.\"\n\n await ctx.send(message)", "def test_skip_if_no_patients(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=5)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n # run email job\n from aremind.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 0)", "def promoteUser(self):\n\t\t#ensure they're supposed to be here and haven't been here before\n\t\tif self.goodEventsCount >= 3 and not self.verified:\n\t\t\tself.verifiedUser=True\n\t\t\tself.put()\n\t\t\tmessage = mail.EmailMessage(\n\t\t\t\t\tsender=\"Friends with Food Admin <[email protected]>\",\n subject=\"Your account has been verified!\")\n\n\t\t\tmessage.to = self.id.email()\n\t\t\tmessage.cc = \"[email protected]\"\n\t\t\tmessage.body = \"\"\"\n\t\t\tDear %s:\n\n\t\t\tYour account on Friends with Food has been verified! Because you've \n\t\t\tshown us so many good events, we've upgraded your account. Now, you'll \n\t\t\tget notified of free food on campus ASAP! You'll also be able to verify\n\t\t\tevents so that everyone knows they're legit.\n\t\t\t\n\t\t\t*With great power comes great responsibility*\n\t\t\t\n\t\t\tThanks,\n\t\t\t\n\t\t\tThe Friends with Food Team\n\t\t\t\"\"\" % self.id.nickname()\n\t\t\tmessage.send()", "def test_send_email_on_invite(self):\n\n league = self.create_league()\n\n season = self.create_season(league)\n team = self.create_team(season)\n\n player = self.create_player()\n\n send_user_email_on_join(player, team.id)\n\n self.assertEqual(len(mail.outbox), 1)\n\n # if testing manually:\n # import pathlib\n # pathlib.Path(\"test_email.html\").write_text(last_sent.body)", "def notify(guid, message):", "def test_no_admins_registered(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n\n with self.assertRaises(ImproperlyConfigured):\n self.registration_profile.objects.send_admin_approve_email(\n new_user, Site.objects.get_current())", "def notifySysOperator(self):\n msg = self.generateNotifyMessage()\n print(msg)\n # with smtplib.SMTP('smtp.gmail.com', 587) as smtp:\n # smtp.ehlo()\n # smtp.starttls()\n # smtp.ehlo()\n\n # smtp.login(\"[email protected]\", \"qwerQWER123.\")\n\n # smtp.sendmail(\"[email protected]\", \"[email protected]\", msg)\n\n # smtp.close()\n return False", "def test_no_registration_admins_registered(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n\n with warnings.catch_warnings(record=True) as _warning:\n self.registration_profile.objects.send_admin_approve_email(\n new_user, Site.objects.get_current())\n\n assertion_error = '''No warning triggered for unregistered\n REGISTRATION_ADMINS'''\n self.assertTrue(len(_warning) > 0, assertion_error)\n self.assertTrue('REGISTRATION_ADMINS' in str(_warning[-1].message),\n assertion_error)", "def test_mail_admin_on_pending(self):\r\n\r\n def check_admin_message_state(state, expect_sent_to_admin, expect_sent_to_user):\r\n \"\"\" Changes user state and verifies e-mail sent to admin address only when pending. \"\"\"\r\n mail.outbox = []\r\n self._change_state(state)\r\n\r\n # If a message is sent to the user about course creator status change, it will be the first\r\n # message sent. Admin message will follow.\r\n base_num_emails = 1 if expect_sent_to_user else 0\r\n if expect_sent_to_admin:\r\n context = {'user_name': \"test_user\", 'user_email': '[email protected]'}\r\n self.assertEquals(base_num_emails + 1, len(mail.outbox), 'Expected admin message to be sent')\r\n sent_mail = mail.outbox[base_num_emails]\r\n self.assertEquals(\r\n mock_render_to_string('emails/course_creator_admin_subject.txt', context),\r\n sent_mail.subject\r\n )\r\n self.assertEquals(\r\n mock_render_to_string('emails/course_creator_admin_user_pending.txt', context),\r\n sent_mail.body\r\n )\r\n self.assertEquals(self.studio_request_email, sent_mail.from_email)\r\n self.assertEqual([self.studio_request_email], sent_mail.to)\r\n else:\r\n self.assertEquals(base_num_emails, len(mail.outbox))\r\n\r\n with mock.patch.dict('django.conf.settings.FEATURES', self.enable_creator_group_patch):\r\n # E-mail message should be sent to admin only when new state is PENDING, regardless of what\r\n # previous state was (unless previous state was already PENDING).\r\n # E-mail message sent to user only on transition into and out of GRANTED state.\r\n check_admin_message_state(CourseCreator.UNREQUESTED, expect_sent_to_admin=False, expect_sent_to_user=False)\r\n check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=True, expect_sent_to_user=False)\r\n check_admin_message_state(CourseCreator.GRANTED, expect_sent_to_admin=False, expect_sent_to_user=True)\r\n check_admin_message_state(CourseCreator.DENIED, expect_sent_to_admin=False, expect_sent_to_user=True)\r\n check_admin_message_state(CourseCreator.GRANTED, expect_sent_to_admin=False, expect_sent_to_user=True)\r\n check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=True, expect_sent_to_user=True)\r\n check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=False, expect_sent_to_user=False)\r\n check_admin_message_state(CourseCreator.DENIED, expect_sent_to_admin=False, expect_sent_to_user=True)", "def test_admin_approval_complete_email_uses_registration_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_admin_approve_complete_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].from_email, '[email protected]')", "def test_subscribe_to_stream_post_policy_restrict_new_members_stream(self) -> None:\n new_member_email = self.nonreg_email(\"test\")\n self.register(new_member_email, \"test\")\n new_member = self.nonreg_user(\"test\")\n\n do_set_realm_property(new_member.realm, \"waiting_period_threshold\", 10, acting_user=None)\n self.assertTrue(new_member.is_provisional_member)\n\n stream = self.make_stream(\"stream1\")\n do_change_stream_post_policy(\n stream, Stream.STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS, acting_user=new_member\n )\n result = self.common_subscribe_to_streams(new_member, [\"stream1\"])\n json = self.assert_json_success(result)\n self.assertEqual(json[\"subscribed\"], {new_member.email: [\"stream1\"]})\n self.assertEqual(json[\"already_subscribed\"], {})", "def test_send_subscribe_email(self):\n #Verifica se foi enviado 1 e-mail, o este não envia e-mail\n self.assertEqual(1, len(mail.outbox))", "def test_admin_approval_email_uses_registration_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.activated = True\n self.registration_profile.objects.send_admin_approve_email(\n new_user, Site.objects.get_current())\n self.assertEqual(mail.outbox[0].from_email, '[email protected]')", "def test_resend_activation_email_nonunique_email(self):\n user1 = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **self.user_info)\n user2_info = copy(self.user_info)\n user2_info['username'] = 'bob'\n user2 = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **user2_info)\n self.assertEqual(user1.email, user2.email)\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n self.assertEqual(len(mail.outbox), 0)", "def perform(self):\n emails.notify(\n event=self.event_type,\n user=self.user,\n node=self.node,\n timestamp=self.timestamp,\n message=self.html_message,\n profile_image_url=self.profile_image_url,\n url=self.url\n )", "def notify(self) -> None:\n pass", "def notify(self) -> None:\n pass", "def verify_mail(self):\n raise NotImplementedError", "def registration_ended(self):\n pass", "def test_user_creation_email(self):\n self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n self.assertEqual(len(mail.outbox), 1)", "def test_send_notification(self):\n management.call_command('send_first_report_notification', [], {})\n eq_(len(mail.outbox), 4)", "def test_accept_member_with_owner(self):\n url = '/api/v1/communities/3/accept_member/'\n data = {\n 'id': 5\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(5, data['id'])\n self.assertEqual('1', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership accepted')", "def check_can_accept_new_users(membership):\n if membership is not None and not membership.is_left():\n return membership.can_accept_new_users()\n else:\n return False", "def send_verification(self):\n pass", "def confirmed(self):", "def test_resend_activation_email_expired_user(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **self.user_info)\n new_user.date_joined -= datetime.timedelta(\n days=settings.ACCOUNT_ACTIVATION_DAYS + 1)\n new_user.save()\n\n profile = self.registration_profile.objects.get(user=new_user)\n self.assertTrue(profile.activation_key_expired())\n\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n self.assertEqual(len(mail.outbox), 0)", "def dispatch(self, request, *args, **kwargs):\n user_to = User.objects.get(pk=kwargs['pk'])\n user_from = self.request.user\n ###\n if user_to not in wanna_be_friends(user_from):\n friendship = FriendshipInvitation.objects.create(\n from_user=user_from, to_user=user_to, status=\"0\")\n\n notif = Notification.objects.create(sender=user_from,\n receiver=user_to,\n notif_type='friend_request')\n # Aca se ha enviado la solicitud\n else:\n return HttpResponseRedirect(\"/fr_req_fail/\")\n return HttpResponseRedirect(\"/\")", "def test_meeting_invitation(self):\n pass", "def test_create_delegate_limit(self):\n del_user = self.make_user('delegate')\n self.make_assignment(self.project, del_user, self.role_delegate)\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n\n url = reverse(\n 'projectroles:api_invite_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'email': INVITE_USER_EMAIL,\n 'role': PROJECT_ROLE_DELEGATE,\n 'message': INVITE_MESSAGE,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n self.assertEqual(len(mail.outbox), 0)", "def announce(self):\n self.notify(self.newAgent)\n if not self.agent.is_someone_subscribed():\n self.fail(cause=\"Noone Interested\")", "def test_skip_if_no_patients(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=5)\n reminders.Patient.objects.filter(\n pk__in=[self.test_patient.pk, self.other_patient.pk]\n ).update(next_visit=appt_date)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n self.startRouter()\n self.router.logger.setLevel(logging.DEBUG)\n # run email job\n from afrims.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 0)\n self.stopRouter()", "def notify(self):\n\n def remind():\n \"\"\"\n this function shows a pop-up using windows notification\n \"\"\"\n ntftion.notify('reminder', f\"{self.notification}:\\n{self.work_name}\\n{self.work_datetime.hour}: \"\n f\"{self.work_datetime.minute} \", app_icon='reminder.ico', timeout=3)\n\n self.eisenhower_priority()\n if self.priority:\n while dt.now().day <= self.time_ntf.day and self.status != \"done\":\n if self.priority == 1 and dt.now().time() >= self.time_ntf.time():\n remind()\n time.sleep(5*60)\n\n elif (self.priority == 2) and ((dt.now().hour == self.time_ntf.hour)\n and (dt.now().time().minute == self.time_ntf.time().minute)):\n remind()\n break\n elif self.priority == 3 and dt.now().time().hour == 18:\n remind()\n time.sleep(24 * 3600)\n elif self.priority == 4 and dt.now().weekday() == 6:\n remind()\n time.sleep(7 * 24 * 3600)\n else:\n pass", "def test_resend_activation_email_activated_user(self):\n user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **self.user_info)\n\n profile = self.registration_profile.objects.get(user=user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n self.assertFalse(user.is_active)\n self.assertTrue(activated)\n\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n # Outbox has one mail, admin approve mail\n\n self.assertEqual(len(mail.outbox), 1)\n admins_emails = [value[1] for value in settings.REGISTRATION_ADMINS]\n for email in mail.outbox[0].to:\n self.assertIn(email, admins_emails)", "def test_can_subscribe_other_users(self) -> None:\n\n def validation_func(user_profile: UserProfile) -> bool:\n user_profile.refresh_from_db()\n return user_profile.can_subscribe_other_users()\n\n self.check_has_permission_policies(\"invite_to_stream_policy\", validation_func)", "def test_resend_activation_email(self):\n user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **self.user_info)\n self.assertEqual(len(mail.outbox), 0)\n\n profile = self.registration_profile.objects.get(user=user)\n orig_activation_key = profile.activation_key\n\n self.assertTrue(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n\n profile = self.registration_profile.objects.get(pk=profile.pk)\n new_activation_key = profile.activation_key\n\n self.assertNotEqual(orig_activation_key, new_activation_key)\n self.assertEqual(len(mail.outbox), 1)", "def test_admin_approval_complete_email_falls_back_to_django_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_admin_approve_complete_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].from_email, '[email protected]')", "def test_activation_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_activation_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].to, [self.user_info['email']])", "def handle(self, *args, **options):\n\n candidates_with_email = [candidate for candidate in Candidate.objects.all()\n if candidate.contact_address and candidate.participating]\n\n\n print 'sending e-mails'\n conn = get_connection()\n for c in candidates_with_email:\n if c.should_send_reminder():\n\n print 'emailing', c\n # store timestamp for reminder email so that they don't get another one for <REMINDER_TIME_PERIOD> days\n c.last_reminder_sent = timezone.now()\n c.save()\n msg = make_email(c)\n conn.send_messages([msg])\n conn.close()", "async def new_post_message_listener(self, message: discord.Message) -> None:\n if not _channel.is_help_forum_post(message.channel):\n return\n\n await _message.notify_session_participants(message)\n\n if not message.author.bot and message.author.id != message.channel.owner_id:\n await _caches.posts_with_non_claimant_messages.set(message.channel.id, \"sentinel\")", "def test_timeout(self):\n now = datetime.datetime.now()\n channel = ChannelStatus.get_channel(channel_spec=self.channel)\n greet = channel.update_user_join_status(self.user, self.greeting, now=now)\n now += datetime.timedelta(seconds=REGREET_TIMEOUT) + datetime.timedelta(seconds=100)\n\n greet = channel.update_user_join_status(self.user, self.greeting, now=now)\n self.assertEqual(True, greet)", "def send_warning(self):\n\n # Check whether all the necessary parameters for SMS are present\n if self.your_phone != '' and self.twilio_phone != '' and self.account_sid != '' and self.auth_token != '':\n client = Client(self.account_sid, self.auth_token)\n\n try:\n sms = client.messages.create(\n body=\"\"\"Last will: It was at least 30 days since your last check in. \n This is a reminder to check in in the next 24 hours.\"\"\",\n from_=self.twilio_phone,\n to=self.your_phone)\n sms\n print(\"\\nSMS sent\")\n except Exception as e:\n print(f\"An error occurred while trying to send the SMS. Error: {e}\")\n\n else:\n print(\"\\nMissing SMS parameters. SMS not sent\")\n\n # Check whether all the necessary parameters for email are present\n if self.sender_name != '' and self.recipient_email != '' and self.email != '' and self.email_pwd != '':\n message = f\"\"\"It has been at least 30 days since you last checked in. \nYou need to check in in the next 24 hours.\\n\nOtherwise at {self.deadline} the email with the important info will be sent to the designated recipient.\\n\nIn order to reset simply go to the working directory and run python3 last_will.py\"\"\"\n\n # send_email will return 0 if everything went ok, otherwise it will return an error message\n status = send_email(self.sender_name, self.your_email,\n self.email, self.email_pwd,\n subject='Last will: Reminder to check in', unencrypted_message=message)\n\n if status != 0:\n print(status)\n exit(1)\n else:\n print(\"Email sent\\n\")\n\n print(f\"You have until {self.deadline} to check in. \"\n f\"In order to do that simply go to the working directory and run ./last_will.sh\\n\")\n else:\n print(\"Missing email parameters. Email not sent.\\n\")\n exit(1)", "def test_simple_unconfirmed(self):\n appt_date = datetime.date.today()\n self.create_confirmed_notification(self.test_patient, appt_date)\n self.create_unconfirmed_notification(self.other_patient, appt_date)\n qs = Patient.objects.unconfirmed_for_date(appt_date)\n self.assertFalse(self.test_patient in qs)\n self.assertTrue(self.other_patient in qs)\n self.assertFalse(self.unrelated_patient in qs)", "def notify(self, almost):\n self.message += \\\n '------------------ ALMOST EXPIRED ------------------\\n'\n for lo in almost:\n self.message += 'NOTIFIED :: ' + lo.borrower.user.username\n self.message += '\\n'\n notif = Notification(wallet=lo.borrower,\n message_short=\"You have a pending\"\n \" loan which dues tomorrow\",\n message_large=\"You have borrowed \" +\n str(lo.loaned) + \" from \" +\n lo.offer.lender.user.username + \", if you \" +\n \"don't pay by this time tomorrow you will \" +\n \"be banned\")\n notif.save()", "def notifications_n_email_after_event_creation(sender, instance, **kwargs):\n alarm = instance.alarm # Alarm which generated the event\n\n subscriptions = Subscription.objects.filter(alarm=alarm) # Getting the subscriptions associated with alarms\n sub_serializer = SubscriptionSerializer(subscriptions, many=True)\n send = [] # list of emails which the mail was send\n notificated = [] # list with users notificated\n\n # If no device, no variable and no content_type, there is nothing to send yet. Cancel notification and email\n if instance.device is None and instance.variables is None and len(instance.content_type.all()) == 0 :\n return\n for sub in sub_serializer.data: # Itering for subscription\n if sub['user'] is not None: # if user field isn't NULL AND not Group\n user = User.objects.get(id=sub['user'])\n if sub['active'] and user not in notificated: # if subscription is active\n Notification.objects.create(user=user, event=instance) # creating notification\n notificated.append(user) # adding user to the notified list\n if sub['email']: # if email option is checked\n email = user.email\n if email not in send: # for dont repeat email\n # Get a dict with relevant information about the event\n context = {'event': instance,\n 'alarm': instance.alarm,\n 'user': user,\n 'device': instance.device,\n 'var': instance.variables,\n 'content_type': instance.content_type.all()}\n plain_text = get_template('mail.txt') # Plain text template\n text_content = plain_text.render(context)\n subject = 'Event Alert: ' + instance.__str__()\n from_email = '[email protected]'\n to = email\n msg = EmailMultiAlternatives(subject, text_content, from_email, [to])\n try:\n if sub['staff_template'] is not None:\n htmly = get_template(sub['staff_template']) # Define the HTML template\n html_content = htmly.render(context) # Rendering the templates with context information\n elif sub['staff_template_text'] != \"\":\n htmly = Template(sub['staff_template_text'])\n html_content = htmly.render(Context(context))\n elif sub['user_template'] is not None:\n htmly = get_template(sub['user_template']) # Define the HTML template\n html_content = htmly.render(context) # Rendering the templates with context information\n elif sub['user_template_text'] != \"\":\n htmly = Template(sub['user_template_text'])\n html_content = htmly.render(Context(context))\n msg.attach_alternative(html_content, 'text/html')\n msg.send()\n except:\n msg.send()\n print('Mail send to %s' % email)\n\n if sub['group'] is not None: # if is group and not user\n users_mail_list = [] # list with staff users instances\n if sub['active']: # if subscription is active\n group = Group.objects.get(pk=sub['group']) # Getting the group by id\n users = User.objects.filter(groups__name=group) # getting the users for group\n context = {'event': instance,\n 'alarm': instance.alarm,\n 'user': group,\n 'device': instance.device,\n 'var': instance.variables}\n for user in users: # Iterating users\n if user not in notificated:\n Notification.objects.create(user=user, event=instance) # creating notification\n notificated.append(user) # adding user to notificated list\n if sub['email']:\n mail = user.email # Adding the email for users in the user list\n if mail not in send: # for don't repeat email\n users_mail_list.append(mail)\n send.append(mail)\n # After getting all the emails and classifying it for staff and not staff members\n plain_text = get_template('mail.txt') # Plain text template\n text_content = plain_text.render(context)\n subject = 'Event Alert: ' + instance.__str__()\n from_email = '[email protected]'\n msg = EmailMultiAlternatives(subject, text_content, from_email, users_mail_list)\n try:\n if sub['staff_template'] is not None:\n htmly = get_template(sub['staff_template']) # Define the HTML template\n html_content = htmly.render(context) # Rendering the templates with context information\n elif sub['staff_template_text'] != \"\":\n htmly = Template(sub['staff_template_text'])\n html_content = htmly.render(Context(context))\n elif sub['user_template'] is not None:\n htmly = get_template(sub['user_template']) # Define the HTML template\n html_content = htmly.render(context) # Rendering the templates with context information\n elif sub['user_template_text'] != \"\":\n htmly = Template(sub['user_template_text'])\n html_content = htmly.render(Context(context))\n msg.attach_alternative(html_content, 'text/html')\n msg.send()\n except:\n msg.send()\n print('Mail send to %s' % str(users_mail_list))", "def test_meeting_registrants(self):\n pass", "def test_double_join(self):\n now = datetime.datetime.now()\n\n channel = ChannelStatus.get_channel(channel_spec=self.channel)\n greet = channel.update_user_join_status(self.user, self.greeting, now=now)\n now += datetime.timedelta(seconds=0.1)\n\n greet = channel.update_user_join_status(self.user, self.greeting, now=now)\n self.assertEqual(False, greet)", "async def on_member_join(self, member):\n verified = get(member.guild.roles, name='verified')\n verify_channel = get(member.guild.channels, name='verify')\n db_discord_user = PostgreSQL.get_discord_user(member.id)\n # Checks if the verified role exists, if it doesn't a DM is sent to the server owner to configure it\n if verified is None:\n await verify_channel.send(f'{member.guild.owner.mention} The verified role doesn\\'t exist in the server `{member.guild.name}`. Please type `!build` in one of the text channels in that server')\n return\n\n # Checks if the user exists in the database, if it doesn't a DM is sent to the user to tell them to get verified\n if db_discord_user is None:\n await verify_channel.send(f'{member.mention} You have not been verified yet. Please visit {WEBSITE} to get verified (VPN is required)')\n return\n \n db_openid_user = PostgreSQL.get_openid_user(db_discord_user[\"openidc_id\"])\n email = db_openid_user[\"username\"]\n await member.add_roles(verified, reason='Assigning user the verified role')\n\n if check_shelve_file(member.guild.id):\n await member.edit(nick=f'{member.name} [{email}]', reason=\"Changing users\\'s nickname\")", "def test_group_notification_called(self):\n sender = self.create_user()\n thread = self.create_thread(sender=sender)\n newmessage = mommy.make(Message, thread=thread, sender=sender)\n send_message(newmessage.pk)\n self.groupnotify_mock.assert_called_with(newmessage.pk)", "def test_is_member_ok(self):\n self.add_group('testgroup', ['user:[email protected]'])\n\n # baphomet is not a member\n request = endpoints_api.MembershipRequest.combined_message_class(\n group='testgroup',\n identity='user:[email protected]')\n response = self.call_api('membership', msg_dict(request), 200)\n self.assertEqual({u'is_member': False}, response.json)\n\n # mithras is a member\n request = endpoints_api.MembershipRequest.combined_message_class(\n group='testgroup',\n identity='user:[email protected]')\n response = self.call_api('membership', msg_dict(request), 200)\n self.assertEqual({u'is_member': True}, response.json)", "def test_invitation_email(self):\n queryset = models.Invitation.objects.filter(id=self.invitation.id)\n self.admin_instance.send_new_activation_email(self.some_request, queryset)\n # check whether there is a mail in the outbox\n self.assertEqual(len(mail.outbox), 1)\n # check subject\n self.assertEqual(\n mail.outbox[0].subject,\n \"Er is een account voor u aangemaakt op sso.lizard.net\",\n )\n self.assertEqual(mail.outbox[0].to, [\"[email protected]\"])\n # check mail starts with 'Hallo Reinout,'\n self.assertTrue(mail.outbox[0].body.startswith(\"Hallo Reinout,\"))", "def send_signup_notification(self):\n return self._send_signup_notification", "def test_consumer_w_subscriber(self):\n self.prep_consumer()\n subscriber = Subscriber.objects.get(id=6)\n self.consumer.subscriber = subscriber\n self.consumer.save()\n UnqualifiedConsumerEmailTask().run(test_mode=self.consumer)\n self.common_asserts()\n self.assertTrue('Provide your cell phone number' \n not in mail.outbox[0].alternatives[0][0])\n self.assertTrue('Provide your cell phone number. Follow this link:' \n not in mail.outbox[0].body)", "def test_advertiser_recipient(self):\n self.prep_advertiser()\n UnqualifiedConsumerEmailTask().run(test_mode=self.consumer)\n self.common_asserts()", "def checkUpstreamScheduler():", "def send_admin_notification_callback(sender, **kwargs):\r\n user = kwargs['user']\r\n\r\n studio_request_email = settings.FEATURES.get('STUDIO_REQUEST_EMAIL', '')\r\n context = {'user_name': user.username, 'user_email': user.email}\r\n\r\n subject = render_to_string('emails/course_creator_admin_subject.txt', context)\r\n subject = ''.join(subject.splitlines())\r\n message = render_to_string('emails/course_creator_admin_user_pending.txt', context)\r\n\r\n try:\r\n send_mail(\r\n subject,\r\n message,\r\n studio_request_email,\r\n [studio_request_email],\r\n fail_silently=False\r\n )\r\n except SMTPException:\r\n log.warning(\"Failure sending 'pending state' e-mail for %s to %s\", user.email, studio_request_email)", "def on_me_joined(self, raw_msg, **kwargs):", "def confirm_email(self):\n # The base class' implementation does nothing\n pass", "def test_49_announcement_messages(self, mock):\r\n self.register()\r\n res = self.app.get(\"/\", follow_redirects=True)\r\n error_msg = \"There should be a message for the root user\"\r\n print res.data\r\n assert \"Root Message\" in res.data, error_msg\r\n error_msg = \"There should be a message for the user\"\r\n assert \"User Message\" in res.data, error_msg\r\n error_msg = \"There should not be an owner message\"\r\n assert \"Owner Message\" not in res.data, error_msg\r\n # Now make the user an app owner\r\n self.new_application()\r\n res = self.app.get(\"/\", follow_redirects=True)\r\n error_msg = \"There should be a message for the root user\"\r\n assert \"Root Message\" in res.data, error_msg\r\n error_msg = \"There should be a message for the user\"\r\n assert \"User Message\" in res.data, error_msg\r\n error_msg = \"There should be an owner message\"\r\n assert \"Owner Message\" in res.data, error_msg\r\n self.signout()\r\n\r\n # Register another user\r\n self.register(method=\"POST\", fullname=\"Jane Doe\", name=\"janedoe\",\r\n password=\"janedoe\", password2=\"janedoe\",\r\n email=\"[email protected]\")\r\n res = self.app.get(\"/\", follow_redirects=True)\r\n error_msg = \"There should not be a message for the root user\"\r\n assert \"Root Message\" not in res.data, error_msg\r\n error_msg = \"There should be a message for the user\"\r\n assert \"User Message\" in res.data, error_msg\r\n error_msg = \"There should not be an owner message\"\r\n assert \"Owner Message\" not in res.data, error_msg\r\n self.signout()\r\n\r\n # Now as an anonymous user\r\n res = self.app.get(\"/\", follow_redirects=True)\r\n error_msg = \"There should not be a message for the root user\"\r\n assert \"Root Message\" not in res.data, error_msg\r\n error_msg = \"There should not be a message for the user\"\r\n assert \"User Message\" not in res.data, error_msg\r\n error_msg = \"There should not be an owner message\"\r\n assert \"Owner Message\" not in res.data, error_msg" ]
[ "0.631417", "0.61985207", "0.61863524", "0.6167759", "0.6167759", "0.61630905", "0.6139909", "0.6068332", "0.60387355", "0.60372734", "0.6013381", "0.60123867", "0.6006325", "0.59600574", "0.5957167", "0.5927392", "0.5894092", "0.5890335", "0.5887974", "0.5873129", "0.5872538", "0.58361214", "0.5829908", "0.58294815", "0.5777522", "0.57497036", "0.5746095", "0.5745145", "0.5735348", "0.5732637", "0.57206565", "0.5715671", "0.5712509", "0.5706263", "0.570243", "0.56946355", "0.5668032", "0.56664383", "0.5666164", "0.5653144", "0.56519", "0.5648408", "0.56371975", "0.56337494", "0.56280476", "0.5626112", "0.561884", "0.56123775", "0.5605043", "0.560448", "0.5602423", "0.558893", "0.55724925", "0.5565061", "0.5548386", "0.5543712", "0.5542572", "0.5541142", "0.5541142", "0.5539274", "0.552713", "0.5524289", "0.55147237", "0.5509312", "0.55018365", "0.54985243", "0.5492382", "0.54915124", "0.5487142", "0.548393", "0.547765", "0.54742146", "0.547256", "0.5460692", "0.54596794", "0.54468966", "0.5446367", "0.5445304", "0.5441411", "0.5441075", "0.5436949", "0.54187346", "0.5413263", "0.5409804", "0.54094625", "0.5402084", "0.54011464", "0.53975487", "0.5397474", "0.5397001", "0.5396086", "0.5394629", "0.5383621", "0.53806216", "0.537887", "0.5373189", "0.5373047", "0.5369055", "0.5365228", "0.5363059" ]
0.7043496
0
Ensure we can resend an activation email on demand
def test_resend_activation_email(self): data = { 'email': self.user.email, } response = self.client.post( reverse('user-resend-activation-email'), data, format='json', ) self.assertEqual( response.status_code, status.HTTP_200_OK, response.content ) self.assertEqual( response.content, b'', ) self.assertEqual(len(mail.outbox), 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_resend_activation_email(self):\n user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **self.user_info)\n self.assertEqual(len(mail.outbox), 0)\n\n profile = self.registration_profile.objects.get(user=user)\n orig_activation_key = profile.activation_key\n\n self.assertTrue(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n\n profile = self.registration_profile.objects.get(pk=profile.pk)\n new_activation_key = profile.activation_key\n\n self.assertNotEqual(orig_activation_key, new_activation_key)\n self.assertEqual(len(mail.outbox), 1)", "def test_resend_activation_email_activated_user(self):\n user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **self.user_info)\n\n profile = self.registration_profile.objects.get(user=user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n self.assertTrue(user.is_active)\n self.assertTrue(activated)\n\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n self.assertEqual(len(mail.outbox), 0)", "def test_resend_activation_email_activated_user(self):\n user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **self.user_info)\n\n profile = self.registration_profile.objects.get(user=user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n self.assertFalse(user.is_active)\n self.assertTrue(activated)\n\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n # Outbox has one mail, admin approve mail\n\n self.assertEqual(len(mail.outbox), 1)\n admins_emails = [value[1] for value in settings.REGISTRATION_ADMINS]\n for email in mail.outbox[0].to:\n self.assertIn(email, admins_emails)", "def test_resend_activation_email_expired_user(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **self.user_info)\n new_user.date_joined -= datetime.timedelta(\n days=settings.ACCOUNT_ACTIVATION_DAYS + 1)\n new_user.save()\n\n profile = self.registration_profile.objects.get(user=new_user)\n self.assertTrue(profile.activation_key_expired())\n\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n self.assertEqual(len(mail.outbox), 0)", "def verify(self):\n ACTIVATION_PERIOD = datetime.timedelta(days=14)\n if not self.org_verified:\n self.org_verified = True\n if not self.is_active:\n if not self.activation_code:\n self.activation_code = random_url_safe_code()\n self.activate_by = datetime.datetime.utcnow() + ACTIVATION_PERIOD\n import messaging # avoid circular import\n messaging.send_activation_emails(self)\n self.save()", "def test_resend_activation_email_nonunique_email(self):\n user1 = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **self.user_info)\n user2_info = copy(self.user_info)\n user2_info['username'] = 'bob'\n user2 = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **user2_info)\n self.assertEqual(user1.email, user2.email)\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n self.assertEqual(len(mail.outbox), 0)", "def test_resend_activation_email_nonexistent_user(self):\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n self.assertEqual(len(mail.outbox), 0)", "def assertReactivateEmailSent(self, email_user):\r\n context = {\r\n 'name': self.user.profile.name,\r\n 'key': self.registration.activation_key\r\n }\r\n\r\n self.assertEmailUser(\r\n email_user,\r\n 'emails/activation_email_subject.txt',\r\n context,\r\n 'emails/activation_email.txt',\r\n context\r\n )\r\n\r\n # Thorough tests for safe_get_host are elsewhere; here we just want a quick URL sanity check\r\n request = RequestFactory().post('unused_url')\r\n request.META['HTTP_HOST'] = \"aGenericValidHostName\"\r\n self.append_allowed_hosts(\"aGenericValidHostName\")\r\n\r\n body = render_to_string('emails/activation_email.txt', context)\r\n host = safe_get_host(request)\r\n\r\n self.assertIn(host, body)", "def confirm_email(self):\n self.active = True\n self.save()", "def confirm_email(self):\n # The base class' implementation does nothing\n pass", "def test_reactivate_process(self, mock_sendmail):\r\n res = self.testapp.post('/api/v1/suspend',\r\n params={'email': u'[email protected]'},\r\n status=200)\r\n self.assertTrue(mock_sendmail.called)\r\n\r\n success = json.loads(res.body)\r\n self.assertTrue(\r\n 'message' in success,\r\n \"Should be successful with admin email address\")\r\n\r\n # now let's try to login\r\n # the migrations add a default admin account\r\n user_data = {'login': 'admin',\r\n 'password': 'admin',\r\n 'form.submitted': 'true'}\r\n\r\n res = self.testapp.post('/login',\r\n params=user_data,\r\n status=200)\r\n\r\n self.assertTrue(\r\n 'account deactivated' in str(res),\r\n \"Login should have failed since we're not active: \" + str(res))\r\n\r\n act = Activation.query.first()\r\n self.testapp.delete(\r\n \"/api/v1/suspend?username={0}&code={1}&password={2}\".format(\r\n user_data['login'],\r\n act.code,\r\n 'admin'),\r\n status=200)\r\n\r\n self.assertTrue(\r\n 'activated' in str(res),\r\n \"Should be prompted to login now: \" + str(res))\r\n\r\n user_data = {'login': 'admin',\r\n 'password': 'admin',\r\n 'form.submitted': 'true'}\r\n\r\n res = self.testapp.post('/login',\r\n params=user_data,\r\n status=302)", "def test_resend_inactive(self):\n self.invite.active = False\n self.invite.save()\n url = reverse(\n 'projectroles:api_invite_resend',\n kwargs={'projectinvite': self.invite.sodar_uuid},\n )\n response = self.request_knox(url, method='POST')\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(len(mail.outbox), 0)", "def send_confirmation(self):\r\n c.user.email_validated = False\r\n c.user.confirmation_code = random_key(6)\r\n c.user._commit()\r\n emailer.confirmation_email(c.user)", "def test_api_user_resend_confirmation_post(self):\n pass", "def test_activation_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_activation_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].to, [self.user_info['email']])", "def resend_email(self, userdict):\n return self.post('resend', userdict)", "def test_0120_activationkey_resend_post_2(self):\n user = User(name=\"Test User\", email=\"[email protected]\")\n user.set_password(\"password\")\n user.save()\n with nested(\n patch.object(smtplib.SMTP, 'sendmail'),\n patch.object(smtplib.SMTP, 'quit'),\n ) as (mock_sendmail, mock_quit):\n response = self.fetch(\n '/activation_resend', method=\"POST\", follow_redirects=False,\n body=urlencode({'email':'[email protected]'})\n )\n self.assertEqual(mock_sendmail.call_count, 1)\n self.assertEqual(mock_quit.call_count, 1)\n self.assertEqual(response.code, 302)", "def test_admin_approval_complete_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_admin_approve_complete_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].to, [self.user_info['email']])", "def test_reactivation_for_unregistered_user(self, email_user):\r\n response_data = self.reactivation_email(self.unregisteredUser)\r\n\r\n self.assertFalse(response_data['success'])", "def activate(self):\n if not self.is_active:\n self.is_active = True\n self.activated_at = datetime.datetime.utcnow()\n import messaging # avoid circular import\n messaging.send_activated_emails(self)\n self.save()", "def _activate_user(self, email):\r\n activation_key = registration(email).activation_key\r\n\r\n # and now we try to activate\r\n resp = self.client.get(reverse('activate', kwargs={'key': activation_key}))\r\n return resp", "def confirm_email(request, key):\n alt_email = cpm.Email.objects.filter(activation_key=key)\n if alt_email.exists():\n alt_email[0].confirm()\n return redirect('/')\n hero_title = 'We weren\\'t able to complete your request...'\n return render_err_msg(request, hero_title)", "def test_admin_approval_email_uses_registration_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.activated = True\n self.registration_profile.objects.send_admin_approve_email(\n new_user, Site.objects.get_current())\n self.assertEqual(mail.outbox[0].from_email, '[email protected]')", "def test_0110_activationkey_resend_post_1(self):\n response = self.fetch(\n '/activation_resend', method=\"POST\", follow_redirects=False,\n body=urlencode({'email':'[email protected]'})\n )\n self.assertEqual(response.code, 200)\n self.assertEqual(\n response.body.count(u'we could not match your email'), 1\n )", "def confirm_email(self, request, email_address):\n email_address.verified = True\n email_address.set_as_primary(conditional=True)\n email_address.save()\n\n u = get_user_model().objects.get(pk=email_address.user.id)\n u.is_active = True\n u.save()", "def test_admin_approval_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.activated = True\n self.registration_profile.objects.send_admin_approve_email(\n new_user, Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n admins_emails = [value[1] for value in settings.REGISTRATION_ADMINS]\n for email in mail.outbox[0].to:\n self.assertIn(email, admins_emails)", "def confirm_email_change(request, key):\r\n try:\r\n try:\r\n pec = PendingEmailChange.objects.get(activation_key=key)\r\n except PendingEmailChange.DoesNotExist:\r\n response = render_to_response(\"invalid_email_key.html\", {})\r\n transaction.rollback()\r\n return response\r\n\r\n user = pec.user\r\n address_context = {\r\n 'old_email': user.email,\r\n 'new_email': pec.new_email\r\n }\r\n\r\n if len(User.objects.filter(email=pec.new_email)) != 0:\r\n response = render_to_response(\"email_exists.html\", {})\r\n transaction.rollback()\r\n return response\r\n\r\n subject = render_to_string('emails/email_change_subject.txt', address_context)\r\n subject = ''.join(subject.splitlines())\r\n message = render_to_string('emails/confirm_email_change.txt', address_context)\r\n up = UserProfile.objects.get(user=user)\r\n meta = up.get_meta()\r\n if 'old_emails' not in meta:\r\n meta['old_emails'] = []\r\n meta['old_emails'].append([user.email, datetime.datetime.now(UTC).isoformat()])\r\n up.set_meta(meta)\r\n up.save()\r\n # Send it to the old email...\r\n try:\r\n user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)\r\n except Exception:\r\n log.warning('Unable to send confirmation email to old address', exc_info=True)\r\n response = render_to_response(\"email_change_failed.html\", {'email': user.email})\r\n transaction.rollback()\r\n return response\r\n\r\n user.email = pec.new_email\r\n user.save()\r\n pec.delete()\r\n # And send it to the new email...\r\n try:\r\n user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)\r\n except Exception:\r\n log.warning('Unable to send confirmation email to new address', exc_info=True)\r\n response = render_to_response(\"email_change_failed.html\", {'email': pec.new_email})\r\n transaction.rollback()\r\n return response\r\n\r\n response = render_to_response(\"email_change_successful.html\", address_context)\r\n transaction.commit()\r\n return response\r\n except Exception:\r\n # If we get an unexpected exception, be sure to rollback the transaction\r\n transaction.rollback()\r\n raise", "def test_admin_approval_email_falls_back_to_django_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.activated = True\n self.registration_profile.objects.send_admin_approve_email(\n new_user, Site.objects.get_current())\n self.assertEqual(mail.outbox[0].from_email, '[email protected]')", "def test_activation_email_missing_template(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_activation_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].to, [self.user_info['email']])", "def activate_user(self, email):\r\n activation_key = Registration.objects.get(user__email=email).activation_key\r\n # and now we try to activate\r\n check_for_get_code(self, 200, reverse('activate', kwargs={'key': activation_key}))\r\n # Now make sure that the user is now actually activated\r\n self.assertTrue(User.objects.get(email=email).is_active)", "def suspend_acct(request):\r\n params = request.params\r\n user = request.user\r\n\r\n # we need to get the user from the email\r\n email = params.get('email', None)\r\n\r\n if email is None and hasattr(request, 'json_body'):\r\n # try the json body\r\n email = request.json_body.get('email', None)\r\n\r\n if user is None and email is None:\r\n request.response.status_int = 406\r\n return _api_response(request, {\r\n 'error': \"Please submit an email address\",\r\n })\r\n\r\n if user is None and email is not None:\r\n user = UserMgr.get(email=email)\r\n\r\n if user is None:\r\n request.response.status_int = 404\r\n return _api_response(request, {\r\n 'error': \"Please submit a valid address\",\r\n 'email': email\r\n })\r\n\r\n # check if we've already gotten an activation for this user\r\n if user.activation is not None:\r\n request.response.status_int = 406\r\n return _api_response(request, {\r\n 'error': \"\"\"You've already marked your account for reactivation.\r\nPlease check your email for the reactivation link. Make sure to\r\ncheck your spam folder.\"\"\",\r\n 'username': user.username,\r\n })\r\n\r\n # mark them for reactivation\r\n user.reactivate(u\"FORGOTTEN\")\r\n\r\n # log it\r\n AuthLog.reactivate(user.username)\r\n\r\n # and then send an email notification\r\n # @todo the email side of things\r\n settings = request.registry.settings\r\n msg = ReactivateMsg(user.email,\r\n \"Activate your Bookie account\",\r\n settings)\r\n\r\n msg.send({\r\n 'url': request.route_url(\r\n 'reset',\r\n username=user.username,\r\n reset_key=user.activation.code),\r\n 'username': user.username\r\n })\r\n\r\n return _api_response(request, {\r\n 'message': \"\"\"Your account has been marked for reactivation. Please\r\n check your email for instructions to reset your\r\n password\"\"\",\r\n })", "def test_activation_email_falls_back_to_django_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_activation_email(Site.objects.get_current())\n self.assertEqual(mail.outbox[0].from_email, '[email protected]')", "def test_admin_approval_complete_email_uses_registration_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_admin_approve_complete_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].from_email, '[email protected]')", "def test_admin_approval_complete_email_falls_back_to_django_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_admin_approve_complete_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].from_email, '[email protected]')", "def test_activation_email_uses_registration_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_activation_email(Site.objects.get_current())\n self.assertEqual(mail.outbox[0].from_email, '[email protected]')", "def send_activation_email(self):\n ctx_dict = {\n 'activation_key': self.activation_key,\n 'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,\n 'user': self.user,\n 'SITE_URL': settings.SITE_URL,\n }\n subject = render_to_string('accounts/activation_email_subject.txt', ctx_dict)\n # Email subject *must not* contain newlines\n subject = ''.join(subject.splitlines())\n \n message = render_to_string('accounts/activation_email_body.html', ctx_dict)\n\n msg = EmailMultiAlternatives(subject, message, None, [self.user.email])\n msg.attach_alternative(message, \"text/html\")\n msg.send()", "def test_0070_registration_post_3(self):\n options.options.require_activation = True\n user = User(name=\"Test User\", email=\"[email protected]\")\n user.set_password(\"password\")\n user.save()\n with nested(\n patch.object(smtplib.SMTP, 'sendmail'),\n patch.object(smtplib.SMTP, 'quit'),\n ) as (mock_sendmail, mock_quit):\n response = self.fetch(\n '/registration', method=\"POST\", follow_redirects=False,\n body=urlencode({'name':'anoop',\n 'email':'[email protected]',\n 'password':'openlabs', 'confirm_password':'openlabs'}\n )\n )\n self.assertEqual(mock_sendmail.call_count, 0)\n self.assertEqual(mock_quit.call_count, 0)\n self.assertEqual(response.code, 200)\n self.assertEqual(\n response.body.count(\n u'This email is already registered.'\n ), 1\n )", "def is_invited_pending_activation(self):\n if self.registration_method == self.INVITED \\\n and self.is_pending_activation():\n return True\n else:\n return False", "def test_0100_activationkey_resend_get(self):\n response = self.fetch(\n '/activation_resend', method=\"GET\", follow_redirects=False,\n )\n self.assertEqual(response.code, 200)", "def test_create_user_activation_email_failure(self, send):\n data = {\n 'username': 'John',\n 'email': '[email protected]',\n 'password': 'test123!',\n 'phone': '1234567890',\n 'first_name': 'Chuck',\n 'last_name': 'Norris',\n 'university': {\n \"name\": \"random_university\"\n },\n 'academic_field': {'name': \"random_field\"},\n 'academic_level': {'name': \"random_level\"},\n 'gender': \"M\",\n 'birthdate': \"1999-11-11\",\n }\n\n response = self.client.post(\n reverse('user-list'),\n data,\n format='json',\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(json.loads(response.content)['phone'], '1234567890')\n\n user = User.objects.get(email=\"[email protected]\")\n activation_token = ActionToken.objects.filter(\n user=user,\n type='account_activation',\n )\n\n self.assertFalse(user.is_active)\n self.assertEqual(1, len(activation_token))\n\n # Test that no email was sent:\n self.assertEqual(len(mail.outbox), 0)", "def test_0080_registration_post_4(self):\n options.options.require_activation = True\n with nested(\n patch.object(smtplib.SMTP, 'sendmail'),\n patch.object(smtplib.SMTP, 'quit'),\n ) as (mock_sendmail, mock_quit):\n response = self.fetch(\n '/registration', method=\"POST\", follow_redirects=False,\n body=urlencode({'name':'anoop',\n 'email':'[email protected]',\n 'password':'openlabs', 'confirm_password':'openlabs'}\n )\n )\n self.assertEqual(mock_sendmail.call_count, 1)\n self.assertEqual(mock_quit.call_count, 1)\n self.assertEqual(response.code, 302)", "def patch(self):\n try:\n MessageService.resend_email_validation(token_auth.current_user())\n return {\"Success\": \"Verification email resent\"}, 200\n except ValueError as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 400", "def send_verification_email(self, request, *args, **kwargs):\n verified_key_text = getattr(settings, \"VERIFIED_KEY_TEXT\", None)\n if not verified_key_text:\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n username = request.data.get(\"username\")\n redirect_url = request.data.get(\"redirect_url\")\n response_message = _(\"Verification email has NOT been sent\")\n\n if username:\n try:\n registration_profile = RegistrationProfile.objects.get(\n user__username=username\n )\n except RegistrationProfile.DoesNotExist:\n pass\n else:\n user = registration_profile.user\n set_is_email_verified(user.profile, False)\n\n verification_key = registration_profile.activation_key\n if verification_key == verified_key_text:\n verification_key = (\n user.registrationprofile.create_new_activation_key()\n )\n\n verification_url = get_verification_url(\n redirect_url, request, verification_key\n )\n\n email_data = get_verification_email_data(\n user.email,\n user.username,\n verification_url,\n request,\n )\n\n send_verification_email.delay(**email_data)\n response_message = _(\"Verification email has been sent\")\n\n return Response(response_message)\n\n return HttpResponseBadRequest(response_message)", "def user_activation(user):\n act_hash = random_password(32)\n user.set_hashword(act_hash)\n user.save()\n base_url = url_for('public.home', _external=True)\n act_url = url_for(\n 'auth.activate',\n userid=user.id,\n userhash=act_hash,\n _external=True)\n if not 'mailman' in current_app.extensions:\n logging.warning('E-mail extension has not been configured')\n return act_hash\n msg = EmailMessage()\n msg.subject = 'Your dribdat account'\n msg.body = \\\n \"Hello %s,\\n\" % user.username \\\n + \"Thanks for signing up at %s\\n\\n\" % base_url \\\n + \"Tap here to activate your account:\\n\\n%s\" % act_url\n msg.to = [user.email]\n logging.info('Sending activation mail to user %d' % user.id)\n logging.debug(act_url)\n msg.send(fail_silently=True)\n return act_hash", "def resend_verification_email(self, emailid, link, template=''):\n payload = {'appkey': self._lr_object._get_api_key(), 'appsecret': self._lr_object._get_api_secret(),\n 'emailid': emailid, 'link': link,'template':template}\n url = SECURE_API_URL + \"raas/v1/account/verificationemail\"\n return self._lr_object._get_json(url, payload)", "def email_signup_user(email, msg, settings, message_data):\r\n from bookie.lib.message import ActivationMsg\r\n msg = ActivationMsg(email, msg, settings)\r\n status = msg.send(message_data)\r\n if status == 4:\r\n from bookie.lib.applog import SignupLog\r\n trans = transaction.begin()\r\n SignupLog(SignupLog.ERROR,\r\n 'Could not send smtp email to signup: ' + email)\r\n trans.commit()", "def test_0090_test_registration_post_5(self):\n options.options.require_activation = False\n with nested(\n patch.object(smtplib.SMTP, 'sendmail'),\n patch.object(smtplib.SMTP, 'quit'),\n ) as (mock_sendmail, mock_quit):\n response = self.fetch(\n '/registration', method=\"POST\", follow_redirects=False,\n body=urlencode({'name':'anoop',\n 'email':'[email protected]',\n 'password':'openlabs', 'confirm_password':'openlabs'}\n )\n )\n self.assertEqual(mock_sendmail.call_count, 0)\n self.assertEqual(mock_quit.call_count, 0)\n self.assertEqual(response.code, 302)", "def test_activation_email_is_html_by_default(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_activation_email(Site.objects.get_current())\n\n self.assertEqual(len(mail.outbox[0].alternatives), 1)", "def test_activate_form_dual(self, mock_sendmail):\r\n res = self.testapp.post('/api/v1/suspend',\r\n params={'email': u'[email protected]'},\r\n status=200)\r\n self.assertTrue(mock_sendmail.called)\r\n\r\n success = json.loads(res.body)\r\n self.assertTrue(\r\n 'message' in success,\r\n \"Should be successful with admin email address\")\r\n\r\n res = self.testapp.post('/api/v1/suspend',\r\n params={'email': u'[email protected]'},\r\n status=406)\r\n\r\n success = json.loads(res.body)\r\n self.assertTrue(\r\n 'error' in success,\r\n \"Should not be successful on second try: \" + str(res))\r\n\r\n self.assertTrue(\r\n 'already' in str(res),\r\n \"Should find 'already' in the response: \" + str(res))", "def remind_reference_identity_check(request, application, auto_rejection_days):\n applicant_name = application.get_full_name()\n subject = f'{settings.SITE_NAME} credentialing application reminder'\n body = loader.render_to_string(\n 'notification/email/notify_remind_reference_identity_check.html', {\n 'application': application,\n 'applicant_name': applicant_name,\n 'domain': get_current_site(request),\n 'url_prefix': get_url_prefix(request),\n 'signature': settings.EMAIL_SIGNATURE,\n 'footer': email_footer(), 'SITE_NAME': settings.SITE_NAME,\n 'auto_rejection_days': auto_rejection_days\n })\n send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,\n [application.user.email], fail_silently=False)", "def confirm(key):\n manager = EmailManager.find_key(key)\n if not manager:\n # If key is wrong, return False\n return False\n\n if manager.is_active:\n # Do not reactivate users\n return False\n\n if manager.other_email:\n # If other_email\n if EmailManager.email_used(manager.other_email):\n # Other_email already being used by someone\n return False\n # Other email is not being used by anybody else, make it the active one\n\n # if username == email, set it as new email\n if manager.user.email == manager.user.username:\n manager.user.username = manager.other_email\n manager.user.email = manager.other_email\n\n manager.user.is_active = True\n manager.user.save()\n\n # Activate email\n manager.active = True\n manager.save()\n\n # Returns the activated User's obj\n return manager.user", "def test_resend_delegate(self):\n self.invite.role = self.role_delegate\n self.invite.save()\n url = reverse(\n 'projectroles:api_invite_resend',\n kwargs={'projectinvite': self.invite.sodar_uuid},\n )\n response = self.request_knox(url, method='POST')\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(len(mail.outbox), 1)", "def test_active_account_activation_key_expired(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n profile.refresh_from_db()\n self.assertTrue(profile.activation_key_expired())", "def send_confirmation_email(user_pk):\n pass", "def create_email_confirmation(self, trigger_email=True):\n EmailConfirmation.objects.create(user=self,\n email_vc=hexlify(os.urandom(5)),\n email_vc_expiry=datetime.datetime.utcnow().replace(tzinfo=utc) +\n datetime.timedelta(hours=3))", "async def renew_start(self, email: str) -> Optional[str]:\n db = self['db_engine']\n mailer = self['mailer']\n async with db.acquire() as connection:\n if await(await connection.execute(select([User]).where(User.email == email))).first():\n token = str(uuid4())\n expired_at = datetime.now(timezone.utc) + timedelta(seconds=RENEW_TOKEN_EXPIRED)\n query = insert(RegToken).values(token=token, email=email, expired_at=expired_at)\n if (await connection.execute(query)).rowcount:\n message = MIMEText(RENEW_LETTER.format(token), \"html\", \"utf-8\")\n await mailer.send(mailer.kwargs['username'], email, \"Invitation to password renew\", message)\n return token\n raise web.HTTPInternalServerError(reason=\"Cannot save renew token\")\n raise web.HTTPNotFound()", "def save(self, *args, **kwargs):\n if not self.require_confirm_email:\n User.objects.filter(is_active=False, deactivation_reason=\"pending\").update(\n is_active=True, deactivation_reason=None\n )\n if not self.invite_question_text:\n self.invite_question_text = \"What is your favourite book?\"\n super().save(*args, **kwargs)", "def test_activate_form(self, mock_sendmail):\r\n res = self.testapp.post('/api/v1/suspend',\r\n params={'email': u'[email protected]'},\r\n status=200)\r\n\r\n success = json.loads(res.body)\r\n self.assertTrue(\r\n 'message' in success,\r\n \"Should be successful with admin email address: \" + str(res))\r\n self.assertTrue(mock_sendmail.called)", "def test_for_success_recovery_and_email_confirmation(self):\n self.assertFalse(self.notconfirmed_u.is_email_confirmed())\n self.assertTrue(self.notconfirmed_u.check_password('hardpwd123'))\n response = self.client.post(\n reverse(\n 'users:recover_password',\n kwargs={\n 'token': self.notconfirmed_u.password_recovery.token,\n }\n ),\n data={\n 'new_password1': 'goodPwd345',\n 'new_password2': 'goodPwd345',\n },\n follow=True,\n )\n self.assertEqual(response.status_code, 200)\n self.notconfirmed_u.refresh_from_db()\n\n self.assertTrue(self.notconfirmed_u.has_perm('users.can_login'))\n with self.assertRaises(PasswordRecovery.DoesNotExist):\n self.notconfirmed_u.password_recovery\n\n self.assertFalse(self.notconfirmed_u.check_password('hardpwd123'))\n self.assertTrue(self.notconfirmed_u.check_password('goodPwd345'))", "def test_create_user_activation_email(self):\n\n data = {\n 'username': 'John',\n 'email': '[email protected]',\n 'password': 'test123!',\n 'phone': '1234567890',\n 'first_name': 'Chuck',\n 'last_name': 'Norris',\n 'university': {\n \"name\": \"random_university\"\n },\n 'academic_field': {'name': \"random_field\"},\n 'academic_level': {'name': \"random_level\"},\n 'gender': \"M\",\n 'birthdate': \"1999-11-11\",\n }\n\n response = self.client.post(\n reverse('user-list'),\n data,\n format='json',\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(json.loads(response.content)['phone'], '1234567890')\n\n user = User.objects.get(email=\"[email protected]\")\n activation_token = ActionToken.objects.filter(\n user=user,\n type='account_activation',\n )\n\n self.assertFalse(user.is_active)\n self.assertEqual(1, len(activation_token))\n\n # Test that one message was sent:\n self.assertEqual(len(mail.outbox), 1)", "def _resend_email(\n self, mock_sendmail: Any, mock_recaptcha: Any, data1: Optional[dict] = None, email: str = '[email protected]'\n ):\n mock_sendmail.return_value = True\n mock_recaptcha.return_value = True\n\n with self.session_cookie_anon(self.browser) as client:\n with self.app.test_request_context():\n with client.session_transaction() as sess:\n data = {'email': email, 'csrf_token': sess.get_csrf_token()}\n if data1 is not None:\n data.update(data1)\n\n return client.post('/resend-verification', data=json.dumps(data), content_type=self.content_type_json)", "def test_expired_activation(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n new_user.date_joined -= datetime.timedelta(\n days=settings.ACCOUNT_ACTIVATION_DAYS + 1)\n new_user.save()\n\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n self.assertIs(user, False)\n self.assertFalse(activated)\n\n new_user = UserModel().objects.get(username='alice')\n self.assertFalse(new_user.is_active)\n\n profile = self.registration_profile.objects.get(user=new_user)\n self.assertFalse(profile.activated)", "def test_activation_already_activated(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n profile = self.registration_profile.objects.get(user=new_user)\n _, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n self.assertFalse(activated)", "def test_set_send_email_notifications(self):\n # Setup scenario\n username = 'tester'\n password = 'secret'\n user = Account.objects.create_user(username=username, email='[email protected]', password=password)\n\n self.assertTrue(self.client.login(username=username, password=password))\n\n # Verify initial assumptions\n self.assertTrue(user.send_email_notifications)\n\n # Run code\n resp = self.client.post(reverse('account.api.configure_email'), {\n 'send_email_notifications': False,\n }, format='json')\n\n # Verify expectations\n self.assertEquals(status.HTTP_201_CREATED, resp.status_code)\n self.assertTrue(user.send_email_notifications)", "def _send_verify_email(request, preferences, db_entry, rnd_hash, new_entry):\n\n location = reverse(\"KursAnmeldung-verify_email\", kwargs={\"hash\":rnd_hash})\n verify_link = request.build_absolute_uri(location)\n\n # FIXME: convert to users local time.\n now = datetime.datetime.utcnow()\n\n email_context = {\n \"verify_link\": verify_link,\n \"db_entry\": db_entry,\n \"now\": now,\n }\n\n # Render the internal page\n emailtext = render_to_string(\"kurs_anmeldung/verify_mailtext.txt\", email_context)\n\n # Get the preferences from the database:\n raw_notify_list = preferences[\"notify\"]\n notify_list = raw_notify_list.splitlines()\n notify_list = [i.strip() for i in notify_list if i]\n\n email_kwargs = {\n \"from_email\": preferences[\"from_email\"],\n \"subject\": preferences[\"email_subject\"],\n \"body\": emailtext,\n \"to\": [db_entry.email],\n \"bcc\": notify_list,\n }\n\n if MAIL_DEBUG == True:\n msg = u\"MAIL_DEBUG is on: No Email was sended!\"\n request.page_msg(msg)\n db_entry.log(request, msg)\n db_entry.mail_sended = False\n\n request.page_msg(\"django.core.mail.EmailMessage kwargs:\")\n request.page_msg(email_kwargs)\n\n request.page_msg(\"debug mail text:\")\n request.page_msg(mark_safe(\"<pre>%s</pre>\" % emailtext))\n return\n\n # We can't use django.core.mail.send_mail, because all members\n # of the recipient list will see the others in the 'To' field.\n # But we would like to notify the admins via 'Bcc' field.\n\n connection = SMTPConnection(fail_silently=False)\n email = EmailMessage(**email_kwargs)\n\n try:\n sended = email.send(fail_silently=False)\n except Exception, err:\n msg = \"Error sending mail: %s\" % err\n LogEntry.objects.log_action(app_label=\"kurs_anmeldung\", action=\"error\",\n message=msg\n )\n db_entry.log(request, msg)\n db_entry.mail_sended = False\n if settings.DEBUG or request.user.is_staff:\n db_entry.save()\n raise\n else:\n db_entry.mail_sended = sended\n db_entry.log(request, \"mail sended: %s\" % sended)", "def send_sender_activation_email(self, email):\n logger.info(\"Function call: send_sender_activation_email for '{}'\".format(email, ))\n return self.__handle_error('Empty sender email') if not email else self.__handle_result(self.__send_request('senders/{}/code'.format(email, )))", "def send_confirm_email(request,uid):\n user=models.UserProfile.objects.get(id=uid)\n current_site=get_current_site(request)\n email_subject='Activate Your Account'\n message=render_to_string('activate_account.html',{\n 'user':user,\n 'domain':current_site.domain,\n 'uid':urlsafe_base64_encode(force_bytes(uid)),\n 'token':account_activation_token.make_token(user),\n })\n to_email= user.email\n email= EmailMessage(email_subject,message,to=[to_email])\n email.send()\n return JsonResponse(\n {\n \"status\":\"The confirmation email has been sent.\",\n }\n )", "def send_confirmation_email(self, *args, **kwargs):\n raise NotImplementedError", "def waiting_confirmation(self):", "def test_update_user_endpoint_new_email(self):\n print(\"Generate a new email and check if email is not allocated\")\n email_id = Workflows.generate_new_email(suffix=self.global_config[\"email_id_suffix\"])\n kwargs = {'email_id': email_id, 'return_response_obj': True,\n 'url': self.test_args[\"relative_url_check_email\"]}\n response = self.test_check_email_endpoint(**kwargs)\n assert json.loads(response.text)[\"data\"][\"available\"] is True, \"Unable to generate a new email id\"\n\n print(\"Update email id\")\n response = self.test_update_user_endpoint(**kwargs)\n\n print(\"Verify Response body\")\n assert json.loads(response.text)[\"message\"] == self.test_args[\"expected_result\"], \"Test Failed\"", "def envoie_activation_compte(request):\n # Vérification connexion utilisateur\n user = AuxilliariesUser().get_user(request)\n if user:\n # Utilisateur connecté\n activation_key = \"\".join(\n [\n random.choice(\n \"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n )\n for _ in range(24)\n ]\n )\n user.cle_dactivation_de_compte = activation_key\n user.save()\n sent_mail_statut = AuxilliariesAuthentification().send_mail(\n \"activation_account\", user\n )\n return redirect(\"../../user/home/\")\n else:\n # Utilisateur non connecté\n raise Http404()", "def reset(request):\r\n rdict = request.matchdict\r\n params = request.params\r\n\r\n # This is an initial request to show the activation form.\r\n username = rdict.get('username', None)\r\n activation_key = rdict.get('reset_key', None)\r\n user = ActivationMgr.get_user(username, activation_key)\r\n new_username = None\r\n\r\n if user is None:\r\n # just 404 if we don't have an activation code for this user\r\n raise HTTPNotFound()\r\n\r\n if 'code' in params:\r\n # This is a posted form with the activation, attempt to unlock the\r\n # user's account.\r\n username = params.get('username', None)\r\n activation = params.get('code', None)\r\n password = params.get('new_password', None)\r\n new_username = params.get('new_username', None)\r\n error = None\r\n\r\n if new_username:\r\n new_username = new_username.lower()\r\n\r\n # Check whether username exists or not. During signup request , a\r\n # record of current user is created with username as his email id\r\n # which is already checked for uniqueness. So when new_username is\r\n # equal to username ie the email id then no need to check for\r\n # uniqueness , but if new_username is something else it has to be\r\n # verified\r\n\r\n if username != new_username and \\\r\n UserMgr.get(username=new_username) is not None:\r\n # Set an error message to the template.\r\n error = \"Username already exists.\"\r\n elif not UserMgr.acceptable_password(password):\r\n # Set an error message to the template.\r\n error = \"Come on, pick a real password please.\"\r\n else:\r\n res = ActivationMgr.activate_user(username, activation, password)\r\n if res:\r\n # success so respond nicely\r\n AuthLog.reactivate(username, success=True, code=activation)\r\n\r\n # if there's a new username and it's not the same as our\r\n # current username, update it\r\n if new_username and new_username != username:\r\n try:\r\n user = UserMgr.get(username=username)\r\n user.username = new_username\r\n except IntegrityError:\r\n error = 'There was an issue setting your new username'\r\n else:\r\n AuthLog.reactivate(username, success=False, code=activation)\r\n error = ('There was an issue attempting to activate'\r\n 'this account.')\r\n\r\n if error:\r\n return {\r\n 'message': error,\r\n 'user': user\r\n }\r\n else:\r\n # Log the user in and move along.\r\n headers = remember(request, user.id, max_age=60 * 60 * 24 * 30)\r\n user.last_login = datetime.utcnow()\r\n\r\n # log the successful login\r\n AuthLog.login(user.username, True)\r\n\r\n # we're always going to return a user to their own /recent after a\r\n # login\r\n return HTTPFound(\r\n location=request.route_url(\r\n 'user_bmark_recent',\r\n username=user.username),\r\n headers=headers)\r\n\r\n else:\r\n LOG.error(\"CHECKING\")\r\n LOG.error(username)\r\n\r\n if user is None:\r\n # just 404 if we don't have an activation code for this user\r\n raise HTTPNotFound()\r\n\r\n LOG.error(user.username)\r\n LOG.error(user.email)\r\n return {\r\n 'user': user,\r\n }", "def handle_emails():\n email = request.data['email'].strip()\n user = User.query.filter_by(email=email).first()\n option = \\\n request.data['option'].strip() # have a <select> in the frontend\n token = s.dumps(email, salt='email-confirm')\n\n msg = Message('Reset password', sender=app.config['ADMINS'][0],\n recipients=[email])\n link = 'http://localhost:3000/confirm_email/{}/{}'\\\n .format(option, token)\n if user:\n msg.body = 'Your link is {}'.format(link)\n else:\n msg.body = 'You attempted to reset your password but you do not \\\n have an account with us. Please Sign Up and Log in. {}'\\\n .format('http://localhost:3000/register')\n\n mail.send(msg)\n return jsonify({\"message\":\"Please confirm your email.\"}), 201", "def test_email_sent_on_failure(self):\n self._authorize()\n data = {\n 'Subject_Number': '000-1111',\n 'Pin_Code': '1234',\n 'Date_Enrolled': datetime.datetime.now().strftime('%b %d %Y '),\n 'Mobile_Number': '2223334444',\n }\n patient = self.create_xml_patient(data)\n payload = self.create_xml_payload([patient])\n response = self._post(payload)\n self.assertEqual(response.status_code, 500)\n self.assertEqual(len(mail.outbox), 1)", "def account_activation_sent(request):\n current_user = request.user\n if current_user.is_authenticated():\n return HttpResponseRedirect('/')\n return render(request, 'registration/activation_complete.html')", "def test_activation_email_uses_site_address(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n site = Site.objects.get_current()\n profile.send_activation_email(site)\n from_email = 'admin@{}'.format(site.domain)\n self.assertEqual(mail.outbox[0].from_email, from_email)", "def on_email_confirmed(request, email_address: EmailAddress, **kwargs):\n dillo.tasks.profile.update_mailing_list_subscription(email_address.email, True)", "def test_reset_password_email(self, send_email):\r\n\r\n good_req = self.request_factory.post('/password_reset/', {'email': self.user.email})\r\n good_resp = password_reset(good_req)\r\n self.assertEquals(good_resp.status_code, 200)\r\n obj = json.loads(good_resp.content)\r\n self.assertEquals(obj, {\r\n 'success': True,\r\n 'value': \"('registration/password_reset_done.html', [])\",\r\n })\r\n\r\n (subject, msg, from_addr, to_addrs) = send_email.call_args[0]\r\n self.assertIn(\"Password reset\", subject)\r\n self.assertIn(\"You're receiving this e-mail because you requested a password reset\", msg)\r\n self.assertEquals(from_addr, settings.DEFAULT_FROM_EMAIL)\r\n self.assertEquals(len(to_addrs), 1)\r\n self.assertIn(self.user.email, to_addrs)\r\n\r\n #test that the user is not active\r\n self.user = User.objects.get(pk=self.user.pk)\r\n self.assertFalse(self.user.is_active)\r\n re.search(r'password_reset_confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/', msg).groupdict()", "def test_0140_account_activation_2(self):\n signer = URLSafeSerializer(self.get_app().settings['cookie_secret'])\n activation_key = signer.dumps(\"[email protected]\")\n response = self.fetch(\n '/activation/%s' % activation_key,\n method=\"GET\", follow_redirects=False\n )\n self.assertEqual(response.code, 302)\n cookies = response.headers.get('Set-Cookie')\n response = self.fetch(\n '/registration', method=\"GET\", headers={\n 'Cookie': cookies\n }\n )\n self.assertEqual(\n response.body.count(u'Invalid Activation Key, Please register.'), 1\n )", "def test_active_account_activation_key_expired(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n profile.refresh_from_db()\n self.assertTrue(profile.activation_key_expired())", "def test_admin_approval_email_is_html_by_default(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.activated = True\n self.registration_profile.objects.send_admin_approve_email(\n new_user, Site.objects.get_current())\n\n self.assertEqual(len(mail.outbox[0].alternatives), 1)", "def test_activation_deactivated(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n # Deactivate the new user.\n new_user.is_active = False\n new_user.save()\n\n # Try to activate again and ensure False is returned.\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n self.assertFalse(activated)", "def send_verification_reminder_email(user):\n # # check for his email preference.\n import django\n\n django.setup()\n from .models import User\n\n user = User.objects.get(id=user.id)\n if not user.is_email_verified:\n context = get_email_context(user)\n context[\"first_name\"] = user.first_name\n context[\"url\"] = django_settings.ACTIVATION_URL.format(**context)\n VerifyEmailReminderNotification(user.email, context=context).send()\n return None", "def test_invitation_email(self):\n queryset = models.Invitation.objects.filter(id=self.invitation.id)\n self.admin_instance.send_new_activation_email(self.some_request, queryset)\n # check whether there is a mail in the outbox\n self.assertEqual(len(mail.outbox), 1)\n # check subject\n self.assertEqual(\n mail.outbox[0].subject,\n \"Er is een account voor u aangemaakt op sso.lizard.net\",\n )\n self.assertEqual(mail.outbox[0].to, [\"[email protected]\"])\n # check mail starts with 'Hallo Reinout,'\n self.assertTrue(mail.outbox[0].body.startswith(\"Hallo Reinout,\"))", "def test_active_account_and_expired_accountactivation_key_expired(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n new_user.date_joined -= datetime.timedelta(\n days=settings.ACCOUNT_ACTIVATION_DAYS + 1)\n new_user.save()\n profile = self.registration_profile.objects.get(user=new_user)\n self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n profile.refresh_from_db()\n self.assertTrue(profile.activation_key_expired())", "def remind_to_verify_email(sender, created, instance, **kwargs):\n list_of_models = (\"Person\", \"Company\")\n scheduler = django_rq.get_scheduler(\"default\")\n if sender.__name__ in list_of_models:\n if created and instance.email:\n datetime = instance.date_joined + timedelta(days=5)\n scheduler.schedule(\n scheduled_time=datetime,\n func=send_verification_reminder_email,\n args=[instance],\n interval=432000, # 5 days\n repeat=5,\n )", "def confirmation_failed(self):", "def _send_registration_email(request, user, acct_type):\n current_site = get_current_site(request)\n subject = \"Activate your PuPPy Mentorship Account\"\n\n uid = urlsafe_base64_encode(force_bytes(user.pk))\n activation_token = account_activation_token.make_token(user)\n\n url_token = uid.decode('utf-8') + '/' + activation_token\n\n message = render_to_string(\n 'mentorship_profile/activation_email.html', {\n \"user\": user,\n \"domain\": current_site.domain,\n \"account_type\": acct_type,\n \"url_token\": url_token\n }\n )\n user.email_user(subject, message)", "def reactivation_email(self, user):\r\n return json.loads(reactivation_email_for_user(user).content)", "def test_activation_already_activated(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n self.assertEqual(user, new_user)\n self.assertFalse(activated)", "def is_pending_activation(self):\n if (self.auth_token_is_used and self.is_active):\n return False\n else:\n return True", "def test_email_after_contest_end(self):\n self.prep_consumer()\n temp_date = settings.CONTEST_END_DATE\n settings.CONTEST_END_DATE = str(\n datetime.today().date() - timedelta(days=1))\n UnqualifiedConsumerEmailTask().run(test_mode=self.consumer)\n log = get_last_db_log(\n 'email_gateway.tasks.send_unqualified_emails', 'EMAIL')\n if log:\n self.fail('Performed task even though contest ended.')\n settings.CONTEST_END_DATE = temp_date", "def activate(self):\r\n if self.activation_code == '':\r\n raise ValidationError('The member is already activated')\r\n signer = TimestampSigner()\r\n signer.unsign(self.activation_code, max_age=timedelta(days=2))\r\n self.hidden = False\r\n self.activation_code = ''\r\n self.joined_date = timezone.now()\r\n self.save()", "def _confirm_email(user, email):\n mail_subject = 'Подтверждение почты'\n message = render_to_string('accounts/account_verification_email.html', {\n 'user': user,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n 'token': default_token_generator.make_token(user),\n 'email': email,\n })\n to_email = email\n send_email = EmailMessage(mail_subject, message, to=[to_email])\n send_email.send()", "def test_activation_email_uses_site_address_improperly_configured(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n with self.assertRaises(ImproperlyConfigured):\n profile.send_activation_email(Site.objects.get_current())", "def handle_email_confirmed(sender, **kwargs):\n email = kwargs['email_address']\n email.user.userprofile.member.cast().confirm_email()", "def test_user_activation(self):\n user = User.objects.get()\n response = self.client.get(reverse('accounts:user-activate',\n kwargs={'uidb64': urlsafe_base64_encode(force_bytes(user.pk)),\n 'token': account_activation_token.make_token(user)}))\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def validation_email_sent(request):\n assert(settings.EMAIL_VALIDATION == True)\n logging.debug('')\n data = {\n 'email': request.user.email,\n 'change_email_url': reverse('user_changeemail'),\n 'action_type': 'validate'\n }\n return render_to_response('authenticator/changeemail.html', RequestContext(request, data))", "def PostResendVerifyEmail(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def confirm_registration_view(request):\n user_email_token = request.matchdict['email_confirm']\n non_active_user = User.get_one(request, url_token=user_email_token)\n if non_active_user is None:\n return {\"msg\": \"Error404 HTTPNotFound\"}\n else:\n non_active_user.status_id = UserStatus\\\n .get_user_by_status(request, status=\"Active\").id\n non_active_user.role_id = Role.get_role(request, role=\"user\").id\n non_active_user.url_token = None\n return {\"msg\": \"Your email address is confirmed\"}" ]
[ "0.76781446", "0.75325173", "0.7526243", "0.73247623", "0.723994", "0.72196436", "0.7116841", "0.69973254", "0.68883735", "0.68695354", "0.68685806", "0.6830412", "0.67408323", "0.66736573", "0.66241586", "0.6598078", "0.65814126", "0.65538317", "0.6545949", "0.6444337", "0.6424555", "0.6416764", "0.64132166", "0.64071006", "0.6378209", "0.63727033", "0.6372308", "0.63702893", "0.63697225", "0.63348943", "0.63215286", "0.6315409", "0.63147897", "0.62929255", "0.62514937", "0.6230894", "0.6206766", "0.6170045", "0.61276734", "0.6118946", "0.6116669", "0.61088634", "0.609979", "0.6097933", "0.6080204", "0.6067095", "0.60638547", "0.60620856", "0.60461134", "0.60303366", "0.60196316", "0.60127735", "0.5999829", "0.59848243", "0.59795773", "0.5973083", "0.59713805", "0.59700674", "0.5967871", "0.5962496", "0.59532243", "0.5940276", "0.593836", "0.59141153", "0.59039605", "0.5903199", "0.59003556", "0.5900071", "0.5894031", "0.58909243", "0.5887271", "0.5884714", "0.5877234", "0.5876938", "0.58762026", "0.58670235", "0.5859751", "0.58573395", "0.58522576", "0.58470464", "0.5841912", "0.58349156", "0.58286697", "0.5820814", "0.58196187", "0.5814595", "0.5811177", "0.58038086", "0.58008885", "0.57930726", "0.5789943", "0.5782797", "0.577947", "0.57685065", "0.57660687", "0.57656586", "0.57577544", "0.5755978", "0.5748746", "0.5737142" ]
0.7207715
6
Ensure admin can credit tickets to a user
def test_credit_ticket_as_admin(self): user = UserFactory() self.assertEqual(user.tickets, 1) nb_tickets_to_add = 5 data = { 'nb_tickets': nb_tickets_to_add, } self.client.force_authenticate(user=self.admin) response = self.client.post( reverse( 'user-credit-tickets', kwargs={'pk': user.id}, ), data, format='json', ) self.assertEqual( response.status_code, status.HTTP_200_OK, ) self.assertEqual( User.objects.get(pk=user.id).tickets, 1 + nb_tickets_to_add )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_credit_ticket_as_user(self):\n user = UserFactory()\n self.assertEqual(user.tickets, 1)\n nb_tickets_to_add = 5\n data = {\n 'nb_tickets': nb_tickets_to_add,\n }\n\n self.client.force_authenticate(user=self.user)\n response = self.client.post(\n reverse(\n 'user-credit-tickets',\n kwargs={'pk': user.id},\n ),\n data,\n format='json',\n )\n self.assertEqual(\n response.status_code,\n status.HTTP_403_FORBIDDEN,\n )", "async def ticket_add(self, ctx, user: discord.Member):\n guild_settings = await self.config.guild(ctx.guild).all()\n is_admin = await is_admin_or_superior(self.bot, ctx.author) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in ctx.author.roles]\n )\n must_be_admin = not guild_settings[\"usercanmodify\"]\n\n if not is_admin and must_be_admin:\n await ctx.send(\"Only Administrators can add/remove other users to tickets.\")\n return\n elif not is_admin:\n author = ctx.author\n author_id = author.id\n elif is_admin:\n # Since the author isn't specified, and it's an admin, we need to guess on who\n # the author is\n inverted = {}\n for author_id, tickets in guild_settings[\"created\"].items():\n for ticket in tickets:\n inverted[ticket[\"channel\"]] = author_id\n try:\n author = ctx.guild.get_member(int(inverted[ctx.channel.id]))\n if author:\n author_id = author.id\n else:\n author_id = int(inverted[ctx.channel.id])\n except KeyError:\n author = ctx.author\n author_id = author.id\n\n index = None\n\n if not guild_settings[\"created\"][str(author_id)]:\n await ctx.send(\"You don't have any open tickets.\")\n return\n elif len(guild_settings[\"created\"][str(author_id)]) == 1:\n index = 0\n else:\n for i, ticket in enumerate(guild_settings[\"created\"][str(author_id)]):\n if ticket[\"channel\"] == ctx.channel.id:\n index = i\n break\n\n if index is None:\n await ctx.send(\n \"You have multiple tickets open. \"\n \"Please run this command in the ticket channel you wish to edit.\"\n )\n return\n\n channel = self.bot.get_channel(guild_settings[\"created\"][str(author_id)][index][\"channel\"])\n\n if user.id in guild_settings[\"created\"][str(author_id)][index][\"added\"]:\n await ctx.send(\"That user is already added.\")\n return\n\n adding_is_admin = await is_admin_or_superior(self.bot, user) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in user.roles]\n )\n\n if adding_is_admin:\n await ctx.send(\"You cannot add a user in support or admin team.\")\n return\n\n channel = self.bot.get_channel(guild_settings[\"created\"][str(author_id)][index][\"channel\"])\n if not channel:\n await ctx.send(\"The ticket channel has been deleted.\")\n return\n\n try:\n await channel.set_permissions(user, send_messages=True, read_messages=True)\n except discord.Forbidden:\n await ctx.send(\n \"The Manage Permissions channel for me has been removed. \"\n \"I am unable to modify this ticket.\"\n )\n return\n\n async with self.config.guild(ctx.guild).created() as created:\n created[str(author_id)][index][\"added\"].append(user.id)\n\n await ctx.send(f\"{user.mention} has been added to the ticket.\")", "def test_credit_ticket_negative_int(self):\n user = UserFactory()\n self.assertEqual(user.tickets, 1)\n nb_tickets_to_add = -5\n data = {\n 'nb_tickets': nb_tickets_to_add,\n }\n\n self.client.force_authenticate(user=self.admin)\n response = self.client.post(\n reverse(\n 'user-credit-tickets',\n kwargs={'pk': user.id},\n ),\n data,\n format='json',\n )\n self.assertEqual(\n response.status_code,\n status.HTTP_400_BAD_REQUEST,\n )", "async def ticket_remove(self, ctx, user: discord.Member):\n guild_settings = await self.config.guild(ctx.guild).all()\n is_admin = await is_admin_or_superior(self.bot, ctx.author) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in ctx.author.roles]\n )\n must_be_admin = not guild_settings[\"usercanmodify\"]\n\n if not is_admin and must_be_admin:\n await ctx.send(\"Only Administrators can add/remove other users to tickets.\")\n return\n elif not is_admin:\n author = ctx.author\n author_id = author.id\n elif is_admin:\n # Since the author isn't specified, and it's an admin, we need to guess on who\n # the author is\n inverted = {}\n for author_id, tickets in guild_settings[\"created\"].items():\n for ticket in tickets:\n inverted[ticket[\"channel\"]] = author_id\n try:\n author = ctx.guild.get_member(int(inverted[ctx.channel.id]))\n if author:\n author_id = author.id\n else:\n author_id = int(inverted[ctx.channel.id])\n except KeyError:\n author = ctx.author\n author_id = author.id\n\n index = None\n\n if not guild_settings[\"created\"][str(author_id)]:\n await ctx.send(\"You don't have any open tickets.\")\n return\n elif len(guild_settings[\"created\"][str(author_id)]) == 1:\n index = 0\n else:\n for i, ticket in enumerate(guild_settings[\"created\"][str(author_id)]):\n if ticket[\"channel\"] == ctx.channel.id:\n index = i\n break\n\n if index is None:\n await ctx.send(\n \"You have multiple tickets open. \"\n \"Please run this command in the ticket channel you wish to edit.\"\n )\n return\n\n if user.id not in guild_settings[\"created\"][str(author_id)][index][\"added\"]:\n await ctx.send(\"That user is not added.\")\n return\n\n removing_is_admin = await is_admin_or_superior(self.bot, user) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in user.roles]\n )\n\n if removing_is_admin:\n await ctx.send(\"You cannot remove a user in support or admin team.\")\n return\n\n channel = self.bot.get_channel(guild_settings[\"created\"][str(author_id)][index][\"channel\"])\n if not channel:\n await ctx.send(\"The ticket channel has been deleted.\")\n\n try:\n await channel.set_permissions(user, send_messages=False, read_messages=False)\n except discord.Forbidden:\n await ctx.send(\n \"The Manage Permissions channel for me has been removed. \"\n \"I am unable to modify this ticket.\"\n )\n return\n\n async with self.config.guild(ctx.guild).created() as created:\n created[str(author_id)][index][\"added\"].remove(user.id)\n\n await ctx.send(f\"{user.mention} has been removed from the ticket.\")", "def test_credit_ticket_not_int(self):\n user = UserFactory()\n self.assertEqual(user.tickets, 1)\n nb_tickets_to_add = 'this is not an int'\n data = {\n 'nb_tickets': nb_tickets_to_add,\n }\n\n self.client.force_authenticate(user=self.admin)\n response = self.client.post(\n reverse(\n 'user-credit-tickets',\n kwargs={'pk': user.id},\n ),\n data,\n format='json',\n )\n self.assertEqual(\n response.status_code,\n status.HTTP_400_BAD_REQUEST,\n )", "def user_requested_access(user):\r\n user = CourseCreator.objects.get(user=user)\r\n if user.state != CourseCreator.GRANTED:\r\n user.state = CourseCreator.PENDING\r\n user.save()", "async def admin_credit(self, ctx, target: discord.Member, sum: int = 100):\n if is_registered(target.id):\n \n inventories = get_file(\"inventories\")\n inventories[str(target.id)][\"balance\"] += sum\n update_file(\"inventories\", inventories)\n\n embed = discord.Embed(color=admin_color)\n embed.set_author(name=\"🛠️ Admin\")\n embed.add_field(name=\"💰 Credit\",\n value=f\"{ctx.author.mention}, {target.mention} a été crédité de `{sum}` PO (pièces d'or)\")\n embed = set_footer(embed, ctx)\n await ctx.send(embed=embed)", "def AdminTicket(ticket):\n try:\n data, = xmlrpclib.loads(ticket)[0]\n name = data['slivers'][0]['name']\n if data != None:\n deliver_ticket(data)\n logger.log('api_calls: Admin Ticket delivered for %s' % name)\n Create(database.db.get(name))\n except Exception, err:\n raise xmlrpclib.Fault(102, 'Ticket error: ' + str(err))", "def user_allow_credit(self):\n try:\n return self.user.creditAllowed()\n except AttributeError:\n return False", "def test_user_can_change_admin(self):\n self.assertTrue(self.story.user_can_change(self.admin_user))", "def check_credit(self):\n self.ensure_one()\n getattr(self, '%s_check_credit' % self.provider, lambda: None)()", "def allowed(self, user, amount):\n return True", "def add_ticket(self, user):\n profile = user.get_profile()\n if profile.available_tickets() <= 0:\n raise Exception(\"This user does not have any tickets to allocate.\")\n \n ticket = RaffleTicket(raffle_prize=self, user=user)\n ticket.save()", "async def adduser(ctx, user: discord.Member):\n channel = ctx.channel\n if not IsATicket(channel.id):\n await ctx.send(\n \"This is not a ticket! Users can only be added to a ticket channel\"\n )\n return\n\n await channel.set_permissions(user, read_messages=True, send_messages=True)\n await ctx.message.delete()", "def issue_ticket(database, user):\n try:\n # check if user is an officer\n c = database.cursor()\n c.execute('SELECT utype FROM users WHERE uid = ?', (user, ))\n user_type = c.fetchone()[0]\n\n # If user is an officer \n if user_type == 'o':\n reg_num = int(input(\"Registration number: \"))\n c.execute(\"\"\"SELECT p.fname, p.lname, v.make, v.model, v.year, v.color FROM registrations r JOIN\n persons p ON (r.fname, r.lname) = (p.fname, p.lname) JOIN vehicles v ON r.vin = v.vin WHERE r.regno = ?\"\"\",(reg_num,))\n result = c.fetchone()\n fname = result[0]\n lname = result[1]\n make = result[2]\n model = result[3]\n year = result[4]\n color = result[5]\n print(\"\\n--------------------------\\nInformation\\n--------------------------\\n\")\n print(\"First Name: \", fname)\n print(\"Last Name: \", lname)\n print(\"Make: \", make)\n print(\"Model: \", model)\n print(\"Year: \", year)\n print(\"Color: \", color)\n\n print(\"\\n-------------------------\\nTicket the registra: \\n------------------------\\n\")\n violation_date = str(input(\"Violation Date: \")) # if not provided, today's date\n if violation_date == \"\":\n violation_date = datetime.today().strftime('%Y-%m-%d')\n violation_text = str(input(\"violation Text: \"))\n amount = str(input(\"Amount: \"))\n tno = randrange(1001, 9867699)\n\n c.execute(q.insert_into_tickets, (tno, reg_num, amount, violation_text, violation_date))\n\n database.commit()\n print(pm.all_done)\n # if user is not an officer\n else:\n print(pm.for_officers_only)\n sys.exit()\n except:\n print(pm.something_went_wrong)\n sys.exit()", "def write_authorize(cls, user, obj):\n if not obj.delivery.deadline.assignment_group.is_examiner(user):\n raise PermissionDenied()", "def test_func(self, user):\n return self.get_object().admin == user", "def create_ticket(self, user):\n return Ticket.objects.create_ticket('test', user)", "def can_approve(self, user, **data):\n raise Return(False)", "def credit_deliverer():\n return True", "async def plaguebearer(self, ctx):\n currency = await bank.get_currency_name(ctx.guild)\n await self.config.user(ctx.author).gameRole.set(\"Plaguebearer\")\n await self.notify_user(ctx=ctx, user=ctx.author, notificationType=\"plaguebearer\")\n await ctx.send(f\"{ctx.author} has spent 10,000 {currency} and become a Plaguebearer.\")", "def userreject_admin(user_id):\n\n # User objects list which includes list of all users which can be broken down into editors and sponsors\n # get individual user\n user = db.session.query(User).filter(User.id==user_id).first()\n # update status to approved\n user.user_status = 'rejected'\n # commit to database\n db.session.commit()\n\n return redirect(url_for('admin_bp.usersview_admin'))", "def test_creating_supply_user(self):\n request = self.factory.post(\n '/api/supplies/', {'name': '3d printer 2', 'state': 'good state', 'description': 'prints 3d objects'})\n force_authenticate(request, user=self.testuser1)\n response = SupplyListView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n try:\n Supply.objects.get(name='3d printer')\n self.fail()\n except Supply.DoesNotExist:\n pass", "def test_user_can_change_as_author(self):\n self.assertTrue(self.story.user_can_change(self.user1))", "def prepare_ticket(self, req, ticket, fields, actions):", "def test_admin_update_user_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n user_taskrun = TaskRunFactory.create()\r\n\r\n assert self.mock_admin.id != user_taskrun.user.id\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').update,\r\n user_taskrun)", "def accept(self, responder):\n try:\n with transaction.atomic():\n self._apply_decision(self.Status.ACCEPTED, responder)\n # update the user credentials\n user = self.user\n user.is_credentialed = True\n user.credential_datetime = timezone.now()\n user.save()\n except DatabaseError:\n messages.error(request, 'Database error. Please try again.')", "def check_admin():\r\n if not current_user.is_admin:\r\n abort(403)", "def write_authorize_examinercommon(cls, user, obj):\n if obj.delivered_by != None:\n raise PermissionDenied()", "def confirm_meal(request, e_id):\n enrolment = Enrolment.objects.get(pk=e_id)\n total_meal = enrolment.day_meal_count + enrolment.night_meal_count\n price = enrolment.plan.price\n extended_user = ExtendedUser.objects.get(user=request.user)\n extended_user.balance -= price * total_meal\n if extended_user.balance >= 0:\n extended_user.save()\n owner = enrolment.plan.store.owner\n owner = ExtendedUser.objects.get(user=owner)\n owner.balance += price * total_meal\n owner.save()\n return view_enrolments(request)", "async def ticket_name(self, ctx, *, name: str):\n guild_settings = await self.config.guild(ctx.guild).all()\n is_admin = await is_admin_or_superior(self.bot, ctx.author) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in ctx.author.roles]\n )\n must_be_admin = not guild_settings[\"usercanname\"]\n\n if not is_admin and must_be_admin:\n await ctx.send(\"Only Administrators can rename tickets.\")\n return\n elif not is_admin:\n author = ctx.author\n author_id = author.id\n elif is_admin:\n # Since the author isn't specified, and it's an admin, we need to guess on who\n # the author is\n inverted = {}\n for author_id, tickets in guild_settings[\"created\"].items():\n for ticket in tickets:\n inverted[ticket[\"channel\"]] = author_id\n try:\n author = ctx.guild.get_member(int(inverted[ctx.channel.id]))\n if author:\n author_id = author.id\n else:\n author_id = int(inverted[ctx.channel.id])\n except KeyError:\n author = ctx.author\n author_id = author.id\n\n if str(author_id) not in guild_settings[\"created\"]:\n await ctx.send(\"You don't have any open tickets.\")\n return\n\n index = None\n\n if not guild_settings[\"created\"][str(author_id)]:\n await ctx.send(\"You don't have any open tickets.\")\n return\n elif len(guild_settings[\"created\"][str(author_id)]) == 1:\n index = 0\n else:\n for i, ticket in enumerate(guild_settings[\"created\"][str(author_id)]):\n if ticket[\"channel\"] == ctx.channel.id:\n index = i\n break\n\n if index is None:\n await ctx.send(\n \"You have multiple tickets open. \"\n \"Please run this command in the ticket channel you wish to edit.\"\n )\n return\n\n channel = self.bot.get_channel(guild_settings[\"created\"][str(author_id)][index][\"channel\"])\n if not channel:\n await ctx.send(\"The ticket channel has been deleted.\")\n return\n\n if len(name) > 99:\n await ctx.send(\"Channel names must be less 100 characters\")\n return\n\n try:\n await channel.edit(name=name)\n except discord.Forbidden:\n await ctx.send(\n \"The Manage Channels channel for me has been removed. \"\n \"I am unable to modify this ticket.\"\n )\n return\n\n await ctx.send(\"The ticket has been renamed.\")", "def test_user_can_change_not_author(self):\n self.assertFalse(self.story.user_can_change(self.user2))", "def userapprove_admin(user_id):\n # take the supplied user_id and use that to access a given user.\n\n # User objects list which includes list of all users which can be broken down into editors and sponsors\n # get individual user\n user = db.session.query(User).filter(User.id==user_id).first()\n # update status to approved\n user.user_status = 'approved'\n # commit to database\n db.session.commit()\n\n return redirect(url_for('admin_bp.usersview_admin'))", "async def tickets(ctx, user: discord.User=None):\n\n tickets_emb = discord.Embed(\n title=\"Active support tickets\",\n color=EMBED_COLOR\n )\n\n if user is not None:\n tickets_emb.description = \"All open tickets of the given user.\"\n tickets_emb.set_author(\n name=f\"{user.name}#{user.discriminator}\",\n icon_url=user.avatar_url\n )\n\n db_user = User.select(graph, user.id).first()\n\n ticket_list = list(db_user.tickets)\n\n else:\n tickets_emb.description = \"All open tickets of this guild.\"\n\n guild = Guild.select(graph, ctx.guild.id).first()\n\n ticket_list = list(guild.tickets)\n\n # TODO: check scopes\n ticket_list = list(filter(lambda t: t.state != 'closed', ticket_list))\n ticket_list.reverse()\n\n if len(ticket_list) == 0:\n await ctx.send(\"There are no active support tickets.\")\n return None\n\n for ticket in ticket_list:\n tickets_emb.add_field(\n name=f\"#{ticket.id} || {ticket.title}\",\n value=ticket.description,\n inline=False\n )\n\n tickets_emb.set_footer(\n text=\"To see all properties of a ticket use the 'ticket show' command.\"\n )\n\n await ctx.send(embed=tickets_emb)", "async def plaguedoctor(self, ctx):\n currency = await bank.get_currency_name(ctx.guild)\n await self.config.user(ctx.author).gameRole.set(\"Doctor\")\n await self.notify_user(ctx=ctx, user=ctx.author, notificationType=\"doctor\")\n await ctx.send(f\"{ctx.author} has spent 10,000 {currency} and become a Doctor.\")", "def admin_reject(user):\n if user.comments in (None or \"\"):\n return\n\n subject = \"ECE/CIS Account - Account Application rejected for %s\" % user.username\n application = \"https://www.eecis.udel.edu/NewAccount/\"\n helprequest = \"https://www.eecis.udel.edu/service\"\n sponsor = \"%[email protected]\" % user.sponsor\n \n message = \"Your ECE/CIS Account has been rejected by ECE/CIS faculty adminstrators.\\n\" % user.sponsor\n message += \"The reason given for rejection was:\\n\\n%s\\n\\n\" % user.comments\n message += \"You may re-apply with corrected information at %s\\n\" % application\n message += \"Please don't reply to this email. If have any questions, please \\n\"\n message += \"please post a ticket as an outsider at %s\" % helprequest\n message += \"-- ECE\\CIS Labstaff\"\n\n\n send('[email protected]', 'ECE/CIS Account System', \\\n [user.email, sponsor], subject, message, MAILHOST)", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def test_auth_sharable_admin(self):\n self.do_sharable(True, 'pattieblack', None, tenant='froggy',\n is_admin=True)", "def check_admin():\n\tif not current_user.is_admin:\n\t\tabort(403)", "def test_buyTicket_FreeTicket():\n old_venue_balance = testVenue.wallet\n assert testUser4.buyTicket(testTicket4)\n assert testUser4.inventory[-1] == testTicket4\n assert not testTicket4.for_sale\n assert testUser4.wallet == 0\n assert testVenue.wallet == old_venue_balance", "def user_can_edit(self, user):\n return user == self.owner", "def _create_support_ticket(self, admin_uid, project_name, customer_name,\n tmp_name, supporter_name,\n customer_profile='TMS Customer Profile'):\n support_ticket_obj = self.registry('tms.support.ticket')\n partner_obj = self.registry('res.partner')\n project_obj = self.registry('tms.project')\n user_obj = self.registry('res.users')\n group_obj = self.registry('res.groups')\n cr = self.cr\n # Find the existed profiles\n customer_profile_id = group_obj.search(\n cr, admin_uid, [('name', '=', customer_profile)]\n )[0]\n tpm_profile_id = group_obj.search(\n cr, admin_uid, [('name', '=', 'Technical Project Manager Profile')]\n )[0]\n fc_profile_id = group_obj.search(\n cr, admin_uid, [('name', '=', 'Functional Consultant Profile')]\n )[0]\n # create TPM user\n tpm_user_vals = {\n 'name': tmp_name,\n 'login': tmp_name,\n 'password': 'tpm',\n 'email': '%[email protected]' % tmp_name,\n 'group_profile_id': tpm_profile_id,\n 'is_trobz_member': True,\n }\n tpm_uid = user_obj.create(cr, admin_uid, tpm_user_vals)\n\n # Create support user\n fc_user_vals = {\n 'name': supporter_name,\n 'login': supporter_name,\n 'password': 'supporter',\n 'email': '%[email protected]' % supporter_name,\n 'group_profile_id': fc_profile_id,\n 'is_trobz_member': True,\n }\n fc_uid = user_obj.create(cr, admin_uid, fc_user_vals)\n\n # Create a Partner, it is:\n # - Customer on Project form\n # - Employer on User form\n # - Customer on Support ticket form\n customer_vals = {\n 'name': customer_name,\n 'is_company': True,\n 'website': '%s-fake.com' % customer_name\n }\n customer_id = partner_obj.create(cr, admin_uid, customer_vals)\n\n # TPM creates a project\n # required for creating support ticket\n project_vals = {\n 'name': project_name,\n 'partner_id': customer_id,\n 'technical_project_manager_id': tpm_uid,\n 'state': 'active',\n 'default_supporter_id': fc_uid,\n 'project_supporter_rel_ids': [(4, fc_uid), (4, tpm_uid)]\n }\n # Computing the supporters here avoids the access control\n # related to `res.partner`.\n project_id = project_obj.create(\n cr, admin_uid, project_vals\n )\n # Create customer user\n customer_user_vals = {\n 'name': customer_name,\n 'login': customer_name,\n 'password': 'customer',\n 'email': '%[email protected]' % customer_name,\n 'group_profile_id': customer_profile_id,\n 'is_trobz_member': False,\n 'supporter_of_project_ids': [(6, 0, [project_id])],\n 'employer_id': customer_id,\n }\n customer_uid = user_obj.create(cr, admin_uid, customer_user_vals)\n # Customer create a support ticket\n support_ticket_vals = {\n 'reporter_id': customer_uid,\n 'summary': 'Support ticket test',\n 'description': 'Support Ticket Test',\n 'state': 'assigned',\n 'ticket_type': 'unclassified',\n 'priority': 'normal',\n 'project_id': project_id,\n 'customer_id': customer_id,\n }\n support_ticket_id = support_ticket_obj.create(\n cr, customer_uid, support_ticket_vals,\n {'test_support_ticket': True}\n )\n return customer_uid, support_ticket_id", "def test_validate_ticket(self):\n pgt = ProxyGrantingTicketFactory()\n ticket = ProxyGrantingTicket.objects.validate_ticket(pgt.ticket, 'https://www.example.com')\n self.assertEqual(ticket, pgt)\n self.assertFalse(ticket.is_consumed())", "def test_user_can_change_superuser(self):\n self.assertTrue(self.story.user_can_change(self.superuser))", "async def close(self, ctx, *, reason=None):\n guild_settings = await self.config.guild(ctx.guild).all()\n is_admin = await is_admin_or_superior(self.bot, ctx.author) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in ctx.author.roles]\n )\n must_be_admin = not guild_settings[\"usercanclose\"]\n\n if not is_admin and must_be_admin:\n await ctx.send(\"Only Administrators can close tickets.\")\n return\n elif not is_admin:\n author = ctx.author # no u\n author_id = author.id\n elif is_admin:\n # Let's try to get the current channel and get the author\n # If not, we'll default to ctx.author\n inverted = {}\n for author_id, tickets in guild_settings[\"created\"].items():\n for ticket in tickets:\n inverted[ticket[\"channel\"]] = author_id\n try:\n author = ctx.guild.get_member(int(inverted[ctx.channel.id]))\n if author:\n author_id = author.id\n else:\n author_id = int(inverted[ctx.channel.id])\n except KeyError:\n author = ctx.author\n author_id = author.id\n\n if str(author_id) not in guild_settings[\"created\"]:\n await ctx.send(\"That user does not have an open ticket.\")\n return\n\n index = None\n if not guild_settings[\"created\"][str(author_id)]:\n await ctx.send(\"You don't have any open tickets.\")\n return\n elif len(guild_settings[\"created\"][str(author_id)]) == 1:\n index = 0\n else:\n for i, ticket in enumerate(guild_settings[\"created\"][str(author_id)]):\n if ticket[\"channel\"] == ctx.channel.id:\n index = i\n break\n\n if index is None:\n await ctx.send(\n \"You have multiple tickets open. \"\n \"Please run this command in the ticket channel you wish to close.\"\n )\n return\n\n channel = self.bot.get_channel(guild_settings[\"created\"][str(author_id)][index][\"channel\"])\n archive = self.bot.get_channel(guild_settings[\"archive\"][\"category\"])\n added_users = [\n user\n for u in guild_settings[\"created\"][str(author_id)][index][\"added\"]\n if (user := ctx.guild.get_member(u))\n ]\n added_users.append(author)\n\n # Again, to prevent race conditions...\n async with self.config.guild(ctx.guild).created() as created:\n del created[str(author_id)][index]\n\n if guild_settings[\"report\"] != 0:\n reporting_channel = self.bot.get_channel(guild_settings[\"report\"])\n if reporting_channel:\n if await self.embed_requested(reporting_channel):\n embed = discord.Embed(\n title=\"Ticket Closed\",\n description=(\n f\"Ticket {channel.mention} created by \"\n f\"{author.mention if author else author_id} \"\n f\"has been closed by {ctx.author.mention}.\"\n ),\n color=await ctx.embed_color(),\n )\n if reason:\n embed.add_field(name=\"Reason\", value=reason)\n await reporting_channel.send(embed=embed)\n else:\n message = (\n f\"Ticket {channel.mention} created by \"\n f\"{str(author) if author else author_id} \"\n f\"has been closed by {str(ctx.author)}.\"\n )\n if reason:\n message += f\"\\n**Reason**: {reason}\"\n\n await reporting_channel.send(message)\n\n if guild_settings[\"dm\"] and author:\n embed = discord.Embed(\n title=\"Ticket Closed\",\n description=(\n f\"Your ticket {channel.mention} has been closed by {ctx.author.mention}.\"\n ),\n color=await ctx.embed_color(),\n )\n if reason:\n embed.add_field(name=\"Reason\", value=reason)\n with contextlib.suppress(discord.HTTPException):\n await author.send(embed=embed)\n\n if guild_settings[\"archive\"][\"enabled\"] and channel and archive:\n for user in added_users:\n with contextlib.suppress(discord.HTTPException):\n if user:\n await channel.set_permissions(\n user, send_messages=False, read_messages=True\n )\n await ctx.send(\n f\"Ticket {channel.mention} for {author.display_name if author else author_id} \"\n \"has been closed. Channel will be moved to archive in one minute.\"\n )\n\n await asyncio.sleep(60)\n\n try:\n admin_roles = [\n ctx.guild.get_role(role_id)\n for role_id in (await self.bot._config.guild(ctx.guild).admin_role())\n if ctx.guild.get_role(role_id)\n ]\n support_roles = [\n ctx.guild.get_role(role_id)\n for role_id in (await self.config.guild(ctx.guild).supportroles())\n if ctx.guild.get_role(role_id)\n ]\n\n all_roles = admin_roles + support_roles\n overwrites = {\n ctx.guild.default_role: discord.PermissionOverwrite(read_messages=False),\n ctx.guild.me: discord.PermissionOverwrite(\n read_messages=True,\n send_messages=True,\n manage_channels=True,\n manage_permissions=True,\n ),\n }\n for role in all_roles:\n overwrites[role] = discord.PermissionOverwrite(\n read_messages=True, send_messages=True\n )\n for user in added_users:\n if user:\n overwrites[user] = discord.PermissionOverwrite(read_messages=False)\n await channel.edit(category=archive, overwrites=overwrites)\n except discord.HTTPException as e:\n await ctx.send(f\"Failed to move to archive: {str(e)}\")\n else:\n if channel:\n for user in added_users:\n with contextlib.suppress(discord.HTTPException):\n if user:\n await channel.set_permissions(\n user, send_messages=False, read_messages=True\n )\n await ctx.send(\n f\"Ticket {channel.mention} for {author.display_name if author else author_id} \"\n \"has been closed. Channel will be deleted in one minute, if exists.\"\n )\n\n await asyncio.sleep(60)\n\n if channel:\n try:\n await channel.delete()\n except discord.HTTPException:\n with contextlib.suppress(discord.HTTPException):\n await ctx.send(\n 'Failed to delete channel. Please ensure I have \"Manage Channels\" '\n \"permission in the category.\"\n )", "def is_access_allowed(self, user_id):\n ### DATABASE CODE GOES HERE\n return False", "def test_renew_user_pending_cancel(self):\n self.braintree_customer.active = True\n self.braintree_customer.pending_cancel = True\n self.braintree_customer.subscription_id = \"ValidSubscriptionID\"\n\n result = SubscriptionManager.renew(self.braintree_customer)\n self.assertEqual(\"ValidSubscriptionID\",result)\n self.assertFalse(self.braintree_customer.pending_cancel)", "def write_authorize(cls, user, obj):\n if not cls._meta.model.published_where_is_examiner(user).filter(id=obj.id):\n raise PermissionDenied()\n if obj.id == None:\n raise PermissionDenied() # We only allow update", "def test_logged_in_permission_another_user_borrowed_book(self):\n login = self.client.login(\n username='testuser2',\n password='2HJ1vRV0Z&3iD')\n response = self.client.get(\n reverse('librarian-renew-book',\n kwargs={'pk': self.test_bookinstance1.pk}))\n self.assertEqual(response.status_code, 200)", "def test_validate_ticket_consumed_ticket(self):\n pgt = ProxyGrantingTicketFactory(consume=True)\n with self.assertRaises(InvalidTicket):\n ProxyGrantingTicket.objects.validate_ticket(pgt.ticket, 'https://www.example.com')", "def userCanAffordItemObj(self, user : bbUser.bbUser, item : bbItem.bbItem) -> bool:\n return user.credits >= item.getValue()", "def raise_not_editable(self, viewer):\n if not self.id or viewer.has_perm(\"bookwyrm.create_invites\"):\n return\n raise PermissionDenied()", "def allow_to_edit(user):\n return allow_to_edit_well(user)", "def temporarily_allow_handler(update, _):\n global TEMPORARILY_ALLOWED\n user_id = update.message.chat.id\n if user_id == ADMIN_ID:\n TEMPORARILY_ALLOWED = True\n update.message.reply_text(\"Temprarily allowed!\")", "def authorizes(self, user):\n return self.owner == user or self.workers.filter(pk=user.id).exists()", "def Build(self,admin):\n \n rv=admin.helper.setAccount(admin.userName,self.currency)\n if rv is None:\n return False\n else:\n return True", "def changeCredit(self,user_ids,credit,changer_admin_name,remote_address,credit_change_comment,loaded_users):\n self.__changeCreditCheckInput(user_ids,credit,changer_admin_name,remote_address,credit_change_comment,loaded_users)\n admin_consumed_credit=credit*len(user_ids)\n ibs_query=IBSQuery()\n ibs_query+=admin_main.getActionManager().consumeDeposit(changer_admin_name,admin_consumed_credit)\n try:\n changer_admin_obj=admin_main.getLoader().getAdminByName(changer_admin_name)\n ibs_query+=self.__changeCreditQuery(user_ids,credit)\n ibs_query+=user_main.getCreditChangeLogActions().logCreditChangeQuery(\"CHANGE_CREDIT\",changer_admin_obj.getAdminID(),user_ids,credit,\\\n admin_consumed_credit,remote_address,credit_change_comment)\n\n ibs_query+=ias_main.getActionsManager().logEvent(\"CHANGE_CREDIT\",changer_admin_name,credit,\",\".join(user_ids))\n\n ibs_query.runQuery()\n except:\n admin_main.getActionManager().consumeDeposit(changer_admin_name,-1*admin_consumed_credit,False) #re-add deposit to admin\n raise\n self.broadcastChange(user_ids)", "def userBuyModuleObj(self, user : bbUser.bbUser, requestedModule : bbModule.bbModule):\n if self.userCanAffordItemObj(user, requestedModule):\n self.modulesStock.removeItem(requestedModule)\n user.credits -= requestedModule.getValue()\n user.inactiveShips.addItem(requestedModule)\n else:\n raise RuntimeError(\"user \" + str(user.id) + \" attempted to buy module \" + requestedModule.name + \" but can't afford it: \" + str(user.credits) + \" < \" + str(requestedModule.getValue()))", "def user_added_credit(self):\n return (self.user.Credit > 0)", "def validate(self,admin):\n\n rv=admin.helper.setAmount(admin.userName,\n 'ARS',self.actual+self.cnt)\n if rv is None:\n return False\n else:\n return True", "def handle(self, *args, **options):\r\n username = 'populate_creators_command'\r\n email = '[email protected]'\r\n try:\r\n admin = User.objects.create_user(username, email, 'foo')\r\n admin.is_staff = True\r\n admin.save()\r\n except IntegrityError:\r\n # If the script did not complete the last time it was run,\r\n # the admin user will already exist.\r\n admin = User.objects.get(username=username, email=email)\r\n\r\n for user in get_users_with_role(CourseInstructorRole.ROLE):\r\n add_user_with_status_granted(admin, user)\r\n\r\n # Some users will be both staff and instructors. Those folks have been\r\n # added with status granted above, and add_user_with_status_unrequested\r\n # will not try to add them again if they already exist in the course creator database.\r\n for user in get_users_with_role(CourseStaffRole.ROLE):\r\n add_user_with_status_unrequested(user)\r\n\r\n # There could be users who are not in either staff or instructor (they've\r\n # never actually done anything in Studio). I plan to add those as unrequested\r\n # when they first go to their dashboard.\r\n\r\n admin.delete()", "def check_is_admin(context):\n init()\n\n #the target is user-self\n credentials = context.to_dict()\n target = credentials\n\n return policy.check('context_is_admin', target, credentials)", "def test_edition_of_other_users_aid(client, contributor):\n\n aid = AidFactory()\n form_url = reverse('aid_edit_view', args=[aid.slug])\n client.force_login(contributor)\n res = client.get(form_url)\n assert res.status_code == 404", "async def claim(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n number, name = ch.parse_number_and_name(args)\n out = ch.claim(ctx.user_object, name, number)\n await ctx.send(out)", "def notify_accounting(self, data):\n required = {'admin_token', 'token'}\n api.validate(data, required)\n admin_token = data['admin_token']\n self.credentials_module.authorize_admin(admin_token)\n token = data['token']\n job = self.credentials_module.get_job_from_token(token)\n accounting = self.batch_module.get_accounting(job['id'])\n job.update(accounting)\n self.credentials_module.update_job(token, job)\n result = self.batch_module.notify_accounting(admin_token, job)\n return result", "def test_change_permission(self):\r\n self.assertTrue(self.creator_admin.has_change_permission(self.request))\r\n\r\n self.request.user = self.user\r\n self.assertFalse(self.creator_admin.has_change_permission(self.request))", "def test_noTicket():\n assert testUser1.buyTicket(None) == False", "def test_buyTicket_Valid_Paramaters():\n old_venue_balance = testVenue.wallet\n assert testUser3.buyTicket(testTicket3)\n assert testTicket3 in testUser3.inventory\n assert not testTicket3.for_sale\n assert testUser3.wallet == 950\n assert testVenue.wallet == old_venue_balance + testTicket3.list_price", "def test_api_create_atmuser(self):\n users_num = ATMUser.objects.count()\n\n atmuser = ATMUser.objects.get(card='0000000000000000') # get admin\n view = ATMUserViewSet.as_view({'post': 'create'})\n\n data = {'card': '7777777777777777', 'password': '7777', 'cash': 700}\n request = factory.post(reverse('atmuser-list'), data, format='json')\n\n force_authenticate(request, user=atmuser)\n response = view(request)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(ATMUser.objects.count(), users_num + 1)", "async def credit(ctx, *args):\n users_mentioned = ctx.message.mentions\n user_mention = ctx.author.mention\n credit = 0\n for arg in args:\n try:\n credit = float(arg)\n await ctx.message.channel.send(user_mention+\", we have successfully debited as you commanded.\")\n break\n except:\n pass\n bals = self.data[\"balances.json\"]\n for user in users_mentioned:\n if user.id in bals:\n bals[user.id] += credit\n else:\n bals[user.id] = credit", "async def create_ticket(self, member : Member, guild : Guild):\n licence_id = await servers.get_licence_id(guild.id)\n category : CategoryChannel = guild.get_channel(await self.categorys.get_category_id(licence_id))\n role = guild.get_role(await self.roles.get_role_id(licence_id))\n \n\n channel : TextChannel = await category.create_text_channel(f'ticket-{member.name}')\n\n overwrite_everyone = PermissionOverwrite()\n overwrite_everyone.send_messages = False\n overwrite_everyone.read_messages = False\n\n overwrite_member = PermissionOverwrite()\n overwrite_member.send_messages = True\n overwrite_member.read_messages = True\n\n\n everyone_role = guild.default_role\n\n await channel.set_permissions(target=everyone_role,overwrite=overwrite_everyone)\n await channel.set_permissions(target=member, overwrite=overwrite_everyone)\n await channel.set_permissions(target=role, overwrite=overwrite_member)\n await channel.send(content = member.mention + \" \" + role.mention)", "def can_edit_or_403(self, user):\n if user.id != self.game_master.id:\n raise PermissionDenied\n return True", "def test_approve(self):\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.utils.account.login_as(username,userpass)\n\n self.contribtool.approve(TOOLNAME,TOOLLICENSEDATA)", "def test_func(self):\n answer = self.get_object()\n return True if self.request.user == answer.author or self.request.user.is_superuser else False", "def use(self):\n if self.credit < self.price_of_trip:\n print(\"Your credit is not enough, please increase your credit\")\n else:\n self.credit -= self.price_of_trip\n print(\"Done\")", "def test_valid_admin_approval(self):\n\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n self.assertIsInstance(user, UserModel())\n\n user = self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n self.assertIsInstance(user, UserModel())\n self.assertIs(user.is_active, True)", "def test_create_ticket_no_expires(self):\n st = ServiceTicket.objects.create_ticket(user=self.user)\n self.assertTrue(st.expires > now())", "def __call__(self, target, creds, enforcer):\n\n return creds['is_admin'] == self.expected", "def __call__(self, target, creds, enforcer):\n\n return creds['is_admin'] == self.expected", "def require_project_administrator(project):\n if not test_project_administrator(project):\n raise cherrypy.HTTPError(403)", "def test_approve_agreement(self):\n pass", "def test_api_update_atmuser_cash_enough(self):\n atmuser = ATMUser.objects.get(card='1111111111111111')\n view = ATMUserViewSet.as_view({'patch': 'partial_update'})\n\n cash = atmuser.cash\n withdrawal = cash - 1\n\n data = {'withdrawal': atmuser.cash - 1, 'card': atmuser.cash}\n request = factory.patch(reverse('atmuser-detail',\n kwargs={'card': atmuser.card}),\n data, format='json')\n\n force_authenticate(request, user=atmuser)\n response = view(request, card=atmuser.card)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n atmuser = ATMUser.objects.get(card='1111111111111111')\n self.assertEqual(atmuser.cash, cash - withdrawal)", "def test_allow_beta(self):\r\n user = UserFactory()\r\n allow_access(self.course, user, 'beta')\r\n self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(user))", "def authorized(self, user):\n\n return self.admin.id.getUnhashed() == user.id.getUnhashed()", "def test_no_admins_registered(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n\n with self.assertRaises(ImproperlyConfigured):\n self.registration_profile.objects.send_admin_approve_email(\n new_user, Site.objects.get_current())", "def authorize_admin(self, instance):\n\n # Authorize user admin.\n instance.client.post(\n reverse(\"login\"),\n {\"username\": \"admin\", \"password\": \"admin\"},\n )\n return instance.client.get(reverse(\"edit\"))", "def validate_ownership(item, user_id):\n if item.user_id != user_id:\n raise Forbidden('You are not allowed to modify this item.')", "def test_func(self):\n return self.request.user.is_superuser", "def charge(self, expired):\n self.message += \\\n '------------------- EXPIRED -------------------\\n'\n for lo in expired:\n self.message += '------------------\\n'\n # try to charge, or eliminate user\n if not lo.charge_borrower():\n self.message += 'USER DELETED :: ' + lo.borrower.user.username\n self.message += '\\n'\n lo.borrower.delete_for_loan()\n else:\n self.message += 'USER CHARGED :: ' + lo.borrower.user.username\n self.message += '\\n'", "def test_buyTicket_AlreadySold():\n assert not testUser2.buyTicket(testTicket1)\n assert testTicket1 in testUser1.inventory\n assert testTicket1 not in testUser2.inventory\n assert not testTicket1.for_sale\n assert testUser2.wallet == 500", "def test_get_non_owner(self):\n another_user = CustomUser(id=101, email='[email protected]', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n self.client.login(email='[email protected]', password='testpassword')\n\n url = reverse('route', kwargs={'way_id': self.route.way_id, 'route_id': self.route.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 403)", "def admin_required(handler):\n def admin_login(self, *args, **kwargs):\n auth = self.auth\n if not auth.get_user_by_session():\n self.redirect('/auth/login', abort=True)\n \n user = auth.get_user_by_session()\n queried_entity = User.get_by_id(user['user_id'])\n \n if queried_entity and queried_entity.phb_user_admin_status == 'admin-1':\n return handler(self, *args, **kwargs)\n else:\n self.redirect('/', abort = True)\n \n return admin_login", "def ticket_created(self, ticket):", "def customer_paid(request, user_correct, tickets, total, payment_id):\n comp = Competition.objects.get(is_active=True)\n user = User.objects.get(id=request.user.id)\n order = Order.objects.get(user=user, ordered=False)\n new_order = update_orders(comp, order, user_correct, payment_id)\n if user_correct:\n create_entries(order, user, comp, tickets, new_order)\n email_order(request, order, user_correct)\n check_for_new_competition(comp)\n if comp.tickets_left == 0:\n pick_competition_winner()\n request.session['order_id'] = order.id", "def KLP_User_Activate(request, user_id):\n\n # get logged in user\n\n user = request.user\n if user.id:\n\n # check logged in user permissions to delete user\n\n KLP_user_Perm(request.user, 'Users', None)\n userObj = User.objects.get(pk=user_id)\n userObj.is_active = 1 # activate user\n userObj.save() # save user object\n return render_to_response('viewtemplates/userAction_done.html',\n {\n 'user': request.user,\n 'selUser': userObj,\n 'message': 'User Activated Successfully',\n 'legend': 'Karnataka Learning Partnership',\n 'entry': 'Add',\n }, context_instance=RequestContext(request))\n else:\n\n # if user is not logged in redirect to login page\n\n return HttpResponseRedirect('/login/')", "def make_user_admin(connection,user):\r\n with connection:\r\n connection.execute(MAKE_USER_ADMIN,(user,))", "def test_editing_supplies_user(self):\n id = self.testsupply.id\n oldstate = self.testsupply.state\n request = self.factory.put(\n '/api/supplies/%s/' % id, {'name': '3d printer', 'state': 'aaa'})\n force_authenticate(request, user=self.testuser1)\n response = SupplyDetailsView.as_view()(request, pk=id)\n # normal user should get forbidden error\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n # data should not change\n self.assertEqual(Supply.objects.get(id=id).state, oldstate)", "def test_create_ticket_expires(self):\n expires = now() + timedelta(seconds=30)\n st = ServiceTicket.objects.create_ticket(expires=expires, user=self.user)\n self.assertEqual(st.expires, expires)", "def test_validate_ticket_invalid_ticket(self):\n with self.assertRaises(InvalidTicket):\n ProxyGrantingTicket.objects.validate_ticket('12345', 'https://www.example.com')" ]
[ "0.76057625", "0.7198469", "0.663993", "0.65313613", "0.6304915", "0.6245688", "0.62093043", "0.60432243", "0.5984338", "0.59788305", "0.5956783", "0.5948479", "0.5885243", "0.58366257", "0.5809721", "0.5722764", "0.569822", "0.5689488", "0.5654343", "0.5625085", "0.56015855", "0.5596882", "0.5558705", "0.554677", "0.55456024", "0.5541014", "0.5533439", "0.55198914", "0.5513701", "0.550214", "0.55004865", "0.54981726", "0.54958487", "0.5483909", "0.5477202", "0.5471479", "0.54673046", "0.54673046", "0.54620844", "0.5460678", "0.54444176", "0.54138476", "0.5413813", "0.5406805", "0.5404062", "0.54003155", "0.53974944", "0.5397107", "0.5385198", "0.53729975", "0.5366119", "0.5365097", "0.53650934", "0.5363505", "0.5353718", "0.5347329", "0.53463346", "0.53420776", "0.53392273", "0.532142", "0.53174967", "0.53089523", "0.52954304", "0.5291665", "0.52846926", "0.52792376", "0.52756476", "0.5273563", "0.5271977", "0.5265836", "0.52573985", "0.52550447", "0.5248008", "0.52431196", "0.5237193", "0.52357095", "0.52297026", "0.5229535", "0.5228171", "0.5228171", "0.5227021", "0.5221919", "0.5221307", "0.52191836", "0.52168924", "0.5215168", "0.5213623", "0.52112216", "0.52090883", "0.52086014", "0.52070934", "0.52041537", "0.52035654", "0.5199473", "0.5195631", "0.5194678", "0.51930815", "0.51908904", "0.5190326", "0.5190235" ]
0.8034523
0
Ensure user can't credit tickets to a user
def test_credit_ticket_as_user(self): user = UserFactory() self.assertEqual(user.tickets, 1) nb_tickets_to_add = 5 data = { 'nb_tickets': nb_tickets_to_add, } self.client.force_authenticate(user=self.user) response = self.client.post( reverse( 'user-credit-tickets', kwargs={'pk': user.id}, ), data, format='json', ) self.assertEqual( response.status_code, status.HTTP_403_FORBIDDEN, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_credit_ticket_negative_int(self):\n user = UserFactory()\n self.assertEqual(user.tickets, 1)\n nb_tickets_to_add = -5\n data = {\n 'nb_tickets': nb_tickets_to_add,\n }\n\n self.client.force_authenticate(user=self.admin)\n response = self.client.post(\n reverse(\n 'user-credit-tickets',\n kwargs={'pk': user.id},\n ),\n data,\n format='json',\n )\n self.assertEqual(\n response.status_code,\n status.HTTP_400_BAD_REQUEST,\n )", "def test_credit_ticket_not_int(self):\n user = UserFactory()\n self.assertEqual(user.tickets, 1)\n nb_tickets_to_add = 'this is not an int'\n data = {\n 'nb_tickets': nb_tickets_to_add,\n }\n\n self.client.force_authenticate(user=self.admin)\n response = self.client.post(\n reverse(\n 'user-credit-tickets',\n kwargs={'pk': user.id},\n ),\n data,\n format='json',\n )\n self.assertEqual(\n response.status_code,\n status.HTTP_400_BAD_REQUEST,\n )", "def test_credit_ticket_as_admin(self):\n user = UserFactory()\n self.assertEqual(user.tickets, 1)\n nb_tickets_to_add = 5\n data = {\n 'nb_tickets': nb_tickets_to_add,\n }\n\n self.client.force_authenticate(user=self.admin)\n response = self.client.post(\n reverse(\n 'user-credit-tickets',\n kwargs={'pk': user.id},\n ),\n data,\n format='json',\n )\n self.assertEqual(\n response.status_code,\n status.HTTP_200_OK,\n )\n\n self.assertEqual(\n User.objects.get(pk=user.id).tickets,\n 1 + nb_tickets_to_add\n )", "def check_credit(self):\n self.ensure_one()\n getattr(self, '%s_check_credit' % self.provider, lambda: None)()", "def cant(user, action):\n\n return not can(user, action)", "def test_not_permitted(self):\r\n test_user_client, test_user = self.create_non_staff_authed_user_client()\r\n CourseEnrollment.enroll(test_user, self.course.id)\r\n response = test_user_client.get(self.orphan_url)\r\n self.assertEqual(response.status_code, 403)\r\n response = test_user_client.delete(self.orphan_url)\r\n self.assertEqual(response.status_code, 403)", "def allowed(self, user, amount):\n return True", "def test_validate_ticket_no_ticket(self):\n with self.assertRaises(InvalidRequest):\n ProxyGrantingTicket.objects.validate_ticket(None, 'https://www.example.com')", "def test_noTicket():\n assert testUser1.buyTicket(None) == False", "def user_allow_credit(self):\n try:\n return self.user.creditAllowed()\n except AttributeError:\n return False", "def user_requested_access(user):\r\n user = CourseCreator.objects.get(user=user)\r\n if user.state != CourseCreator.GRANTED:\r\n user.state = CourseCreator.PENDING\r\n user.save()", "def test_validate_ticket_invalid_ticket(self):\n with self.assertRaises(InvalidTicket):\n ProxyGrantingTicket.objects.validate_ticket('12345', 'https://www.example.com')", "def test_negative_conditions(self):\r\n outline_url = reverse_course_url('course_handler', self.course.id)\r\n # register a non-staff member and try to delete the course branch\r\n non_staff_client, _ = self.create_non_staff_authed_user_client()\r\n response = non_staff_client.delete(outline_url, {}, HTTP_ACCEPT='application/json')\r\n self.assertEqual(response.status_code, 403)", "def test_validate_ticket_consumed_ticket(self):\n pgt = ProxyGrantingTicketFactory(consume=True)\n with self.assertRaises(InvalidTicket):\n ProxyGrantingTicket.objects.validate_ticket(pgt.ticket, 'https://www.example.com')", "def test_buyTicket_AlreadySold():\n assert not testUser2.buyTicket(testTicket1)\n assert testTicket1 in testUser1.inventory\n assert testTicket1 not in testUser2.inventory\n assert not testTicket1.for_sale\n assert testUser2.wallet == 500", "def test_validate_ticket_does_not_exist(self):\n ticket = 'PGT-0000000000-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'\n with self.assertRaises(InvalidTicket):\n ProxyGrantingTicket.objects.validate_ticket(ticket, 'https://www.example.com')", "def test_case_user_not_yet_customer(self):\n pass", "def test_buyTicket_NotForSale():\n old_venue_balance = testVenue.wallet\n assert not testUser2.buyTicket(testTicket2)\n assert testTicket2 not in testUser2.inventory\n assert not testTicket1.for_sale\n assert testUser2.wallet == 500\n assert testVenue.wallet == old_venue_balance", "def test_deny_pending_payment(self):\n pass", "def _is_ticket_blocked(self, registration, **kwargs):\n if not self._is_ticketing_handled(registration.registration_form):\n return False\n req = registration.cern_access_request\n return not req or not req.is_active or not req.has_identity_info", "async def ticket_add(self, ctx, user: discord.Member):\n guild_settings = await self.config.guild(ctx.guild).all()\n is_admin = await is_admin_or_superior(self.bot, ctx.author) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in ctx.author.roles]\n )\n must_be_admin = not guild_settings[\"usercanmodify\"]\n\n if not is_admin and must_be_admin:\n await ctx.send(\"Only Administrators can add/remove other users to tickets.\")\n return\n elif not is_admin:\n author = ctx.author\n author_id = author.id\n elif is_admin:\n # Since the author isn't specified, and it's an admin, we need to guess on who\n # the author is\n inverted = {}\n for author_id, tickets in guild_settings[\"created\"].items():\n for ticket in tickets:\n inverted[ticket[\"channel\"]] = author_id\n try:\n author = ctx.guild.get_member(int(inverted[ctx.channel.id]))\n if author:\n author_id = author.id\n else:\n author_id = int(inverted[ctx.channel.id])\n except KeyError:\n author = ctx.author\n author_id = author.id\n\n index = None\n\n if not guild_settings[\"created\"][str(author_id)]:\n await ctx.send(\"You don't have any open tickets.\")\n return\n elif len(guild_settings[\"created\"][str(author_id)]) == 1:\n index = 0\n else:\n for i, ticket in enumerate(guild_settings[\"created\"][str(author_id)]):\n if ticket[\"channel\"] == ctx.channel.id:\n index = i\n break\n\n if index is None:\n await ctx.send(\n \"You have multiple tickets open. \"\n \"Please run this command in the ticket channel you wish to edit.\"\n )\n return\n\n channel = self.bot.get_channel(guild_settings[\"created\"][str(author_id)][index][\"channel\"])\n\n if user.id in guild_settings[\"created\"][str(author_id)][index][\"added\"]:\n await ctx.send(\"That user is already added.\")\n return\n\n adding_is_admin = await is_admin_or_superior(self.bot, user) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in user.roles]\n )\n\n if adding_is_admin:\n await ctx.send(\"You cannot add a user in support or admin team.\")\n return\n\n channel = self.bot.get_channel(guild_settings[\"created\"][str(author_id)][index][\"channel\"])\n if not channel:\n await ctx.send(\"The ticket channel has been deleted.\")\n return\n\n try:\n await channel.set_permissions(user, send_messages=True, read_messages=True)\n except discord.Forbidden:\n await ctx.send(\n \"The Manage Permissions channel for me has been removed. \"\n \"I am unable to modify this ticket.\"\n )\n return\n\n async with self.config.guild(ctx.guild).created() as created:\n created[str(author_id)][index][\"added\"].append(user.id)\n\n await ctx.send(f\"{user.mention} has been added to the ticket.\")", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def test_create_ticket_no_expires(self):\n st = ServiceTicket.objects.create_ticket(user=self.user)\n self.assertTrue(st.expires > now())", "def add_ticket(self, user):\n profile = user.get_profile()\n if profile.available_tickets() <= 0:\n raise Exception(\"This user does not have any tickets to allocate.\")\n \n ticket = RaffleTicket(raffle_prize=self, user=user)\n ticket.save()", "def test_can_not_cancel_past_block(self):\n date = datetime.now().replace(minute=0, second=0, microsecond=0) - timedelta(hours=1)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'cancel': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)", "def validate_ownership(item, user_id):\n if item.user_id != user_id:\n raise Forbidden('You are not allowed to modify this item.')", "def test_can_not_cancel_current_block(self):\n date = datetime.now().replace(minute=0, second=0, microsecond=0)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'cancel': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)", "def test_user_can_change_not_author(self):\n self.assertFalse(self.story.user_can_change(self.user2))", "def test_validate_ticket(self):\n pgt = ProxyGrantingTicketFactory()\n ticket = ProxyGrantingTicket.objects.validate_ticket(pgt.ticket, 'https://www.example.com')\n self.assertEqual(ticket, pgt)\n self.assertFalse(ticket.is_consumed())", "def test_no_enable_paid_course_registration(self):\r\n self.add_to_cart()\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])", "def test_validate_ticket_no_service(self):\n pgt = ProxyGrantingTicketFactory()\n with self.assertRaises(InvalidRequest):\n ProxyGrantingTicket.objects.validate_ticket(pgt.ticket, None)", "def test_user_cannot_unlock_hint():\n app = create_ctfd()\n with app.app_context():\n with app.test_client():\n register_user(app, name=\"user1\", email=\"[email protected]\")\n\n chal = gen_challenge(app.db, value=100)\n chal_id = chal.id\n\n gen_flag(app.db, challenge_id=chal.id, content=\"flag\")\n\n hint = gen_hint(db, chal_id, cost=10)\n hint_id = hint.id\n\n client = login_as_user(app, name=\"user1\", password=\"password\")\n\n with client.session_transaction():\n r = client.get(\"/api/v1/hints/{}\".format(hint_id))\n resp = r.get_json()\n assert resp[\"data\"].get(\"content\") is None\n assert resp[\"data\"].get(\"cost\") == 10\n destroy_ctfd(app)", "async def ticket_remove(self, ctx, user: discord.Member):\n guild_settings = await self.config.guild(ctx.guild).all()\n is_admin = await is_admin_or_superior(self.bot, ctx.author) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in ctx.author.roles]\n )\n must_be_admin = not guild_settings[\"usercanmodify\"]\n\n if not is_admin and must_be_admin:\n await ctx.send(\"Only Administrators can add/remove other users to tickets.\")\n return\n elif not is_admin:\n author = ctx.author\n author_id = author.id\n elif is_admin:\n # Since the author isn't specified, and it's an admin, we need to guess on who\n # the author is\n inverted = {}\n for author_id, tickets in guild_settings[\"created\"].items():\n for ticket in tickets:\n inverted[ticket[\"channel\"]] = author_id\n try:\n author = ctx.guild.get_member(int(inverted[ctx.channel.id]))\n if author:\n author_id = author.id\n else:\n author_id = int(inverted[ctx.channel.id])\n except KeyError:\n author = ctx.author\n author_id = author.id\n\n index = None\n\n if not guild_settings[\"created\"][str(author_id)]:\n await ctx.send(\"You don't have any open tickets.\")\n return\n elif len(guild_settings[\"created\"][str(author_id)]) == 1:\n index = 0\n else:\n for i, ticket in enumerate(guild_settings[\"created\"][str(author_id)]):\n if ticket[\"channel\"] == ctx.channel.id:\n index = i\n break\n\n if index is None:\n await ctx.send(\n \"You have multiple tickets open. \"\n \"Please run this command in the ticket channel you wish to edit.\"\n )\n return\n\n if user.id not in guild_settings[\"created\"][str(author_id)][index][\"added\"]:\n await ctx.send(\"That user is not added.\")\n return\n\n removing_is_admin = await is_admin_or_superior(self.bot, user) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in user.roles]\n )\n\n if removing_is_admin:\n await ctx.send(\"You cannot remove a user in support or admin team.\")\n return\n\n channel = self.bot.get_channel(guild_settings[\"created\"][str(author_id)][index][\"channel\"])\n if not channel:\n await ctx.send(\"The ticket channel has been deleted.\")\n\n try:\n await channel.set_permissions(user, send_messages=False, read_messages=False)\n except discord.Forbidden:\n await ctx.send(\n \"The Manage Permissions channel for me has been removed. \"\n \"I am unable to modify this ticket.\"\n )\n return\n\n async with self.config.guild(ctx.guild).created() as created:\n created[str(author_id)][index][\"added\"].remove(user.id)\n\n await ctx.send(f\"{user.mention} has been removed from the ticket.\")", "def test_seat_not_available(self):\n\n user1 = User.objects.create(username=\"user1\", password=\"\", email=\"[email protected]\")\n user2 = User.objects.create(username=\"user2\", password=\"\", email=\"[email protected]\")\n\n course = Course.objects.first()\n course.student.add(user1)\n course.student.add(user2)\n\n self.assertFalse(course.is_seat_available())", "def test_user_cannot_unlock_hint():\n app = create_ctfd()\n with app.app_context():\n with app.test_client() as client:\n register_user(app, name=\"user1\", email=\"[email protected]\")\n\n chal = gen_challenge(app.db, value=100)\n chal_id = chal.id\n\n flag = gen_flag(app.db, chal=chal.id, flag='flag')\n\n hint = gen_hint(db, chal_id, cost=10)\n hint_id = hint.id\n\n client = login_as_user(app, name=\"user1\", password=\"password\")\n\n with client.session_transaction() as sess:\n data = {\n \"nonce\": sess.get('nonce')\n }\n r = client.post('/hints/{}'.format(hint_id), data=data)\n resp = json.loads(r.data.decode('utf8'))\n assert resp.get('errors') == 'Not enough points'\n destroy_ctfd(app)", "def raise_not_editable(self, viewer):\n if not self.id or viewer.has_perm(\"bookwyrm.create_invites\"):\n return\n raise PermissionDenied()", "def test_course_does_not_expire_for_verified_user(self):\n course = CourseFactory.create(start=THREE_YEARS_AGO)\n url = course_home_url(course)\n\n user = UserFactory.create(password=self.TEST_PASSWORD)\n CourseEnrollment.enroll(user, self.course.id, mode=CourseMode.VERIFIED)\n Schedule.objects.update(start_date=THREE_YEARS_AGO)\n\n # ensure that the user who has indefinite access\n self.client.login(username=user.username, password=self.TEST_PASSWORD)\n response = self.client.get(url)\n assert response.status_code == 200, 'Should not expire access for user'", "def can_accept_credit(self, value):\n return value >= 0", "def get_everyone_denied(self):", "def test_can_not_reserve_booked_block(self):\n booking_other = create_test_booking(self.someone, self.first_day, 11)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'book': str(booking_other.date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)\n\n self.assertEqual(type(context[\"info\"]), NotAllowedAlert)", "def test_non_contractor_acks_receipt(self):\n res = self.client.post(self.url)\n self.assertEqual(res.status_code, 403)", "def add_user_with_status_unrequested(user):\r\n _add_user(user, CourseCreator.UNREQUESTED)", "def cancel_dummy(self):\n if self.state != 'authorized':\n self.raise_user_error('cancel_only_authorized')\n else:\n self.state = 'cancel'\n self.save()", "def test_reject_agreement(self):\n pass", "async def adduser(ctx, user: discord.Member):\n channel = ctx.channel\n if not IsATicket(channel.id):\n await ctx.send(\n \"This is not a ticket! Users can only be added to a ticket channel\"\n )\n return\n\n await channel.set_permissions(user, read_messages=True, send_messages=True)\n await ctx.message.delete()", "def test_locked_asset_not_registered(self):\r\n self.client.login(username=self.usr, password=self.pwd)\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def test_not_permitted(self, default_store):\n course = self.create_course_with_orphans(default_store)\n orphan_url = reverse_course_url('orphan_handler', course.id)\n\n test_user_client, test_user = self.create_non_staff_authed_user_client()\n CourseEnrollment.enroll(test_user, course.id)\n response = test_user_client.get(orphan_url)\n self.assertEqual(response.status_code, 403)\n response = test_user_client.delete(orphan_url)\n self.assertEqual(response.status_code, 403)", "def make_eligible(self):\n pass", "def test_buyTicket_FreeTicket():\n old_venue_balance = testVenue.wallet\n assert testUser4.buyTicket(testTicket4)\n assert testUser4.inventory[-1] == testTicket4\n assert not testTicket4.for_sale\n assert testUser4.wallet == 0\n assert testVenue.wallet == old_venue_balance", "def test_validate_ticket_expired_ticket(self):\n pgt = ProxyGrantingTicketFactory(expire=True)\n with self.assertRaises(InvalidTicket):\n ProxyGrantingTicket.objects.validate_ticket(pgt.ticket, 'https://www.example.com')", "def verify_ticket(self, ticket):\n raise NotImplementedError()", "def test_buyTicket_insufficientFunds():\n old_venue_balance = testVenue.wallet\n assert not testUser4.buyTicket(testTicket3)\n assert testTicket3 not in testUser4.inventory\n assert testTicket3.for_sale\n assert testUser4.wallet == 0\n assert testVenue.wallet == old_venue_balance", "def test_ticket_not_consumed(self):\n st = ServiceTicketFactory()\n self.assertFalse(st.is_consumed())", "def no_reason(message, db):\n #message.reply(Strings['GRANT_EXAMPLE'].format(db))\n try:\n hf.grant(message, db.lower(), \"[EXTENDING ACCESS TIME]\", False)\n except Exception as e:\n message._client.send_message(errors_channel, \"```{}```\".format(e))", "def test_unpaid_penalty_prevents_borrow(self):\n ten_days_ago = timezone.now() - timezone.timedelta(days=10)\n Borrow.objects.create(\n book_id=1,\n student=self.students[0],\n requested_at=ten_days_ago,\n borrowed_at=ten_days_ago,\n duration=6,\n )\n client1 = APIClient()\n client1.login(username=self.manager.username, password=\"salam*123\")\n client1.post(\"/borrows/1/terminate/\")\n client2 = APIClient()\n client2.login(username=self.students[0].username, password=\"salam*123\")\n response = client2.post(\"/borrows/\", data={\"book\": 5})\n self.assertEqual(response.status_code, 400)", "def test_course_not_available(self):\n \n user1 = User.objects.create(username=\"user1\", password=\"1234\", email=\"[email protected]\")\n user2 = User.objects.create(username=\"user2\", password=\"1234\", email=\"[email protected]\")\n \n course = Course.objects.first()\n course.registered_course.add(user1)\n course.registered_course.add(user2)\n \n self.assertFalse(course.is_course_available())", "def checkStudentcanTake(self,course_object):\r\n\r\n if self.budget >= course_object.paymentBill() and self not in course_object.registered_users:\r\n return True\r\n return False", "def test_dont_cancel_for_events_with_no_cost(self, mock_tz):\n mock_tz.now.return_value = datetime(\n 2015, 2, 11, 10, tzinfo=dt_timezone.utc\n )\n self.ticketed_event.ticket_cost = 0\n self.ticketed_event.save()\n self.assertFalse(self.unpaid.cancelled)\n self.assertFalse(self.paid.cancelled)\n\n management.call_command('cancel_unpaid_ticket_bookings')\n # emails are sent to user per cancelled booking and studio once for all\n # cancelled bookings\n self.unpaid.refresh_from_db()\n self.paid.refresh_from_db()\n self.assertEqual(len(mail.outbox), 0)\n self.assertFalse(self.unpaid.cancelled)\n self.assertFalse(self.paid.cancelled)", "def can_edit_or_403(self, user):\n if user.id != self.game_master.id:\n raise PermissionDenied\n return True", "def no_reason(message, db):\n #message.reply(Strings['GRANT_EXAMPLE'].format(db))\n try:\n hf.grant(message, db.lower(), \"[EXTENDING ACCESS TIME]\", True)\n except Exception as e:\n message._client.send_message(errors_channel, \"```{}```\".format(e))", "def test_can_not_book_past_block(self):\n date = datetime.now().replace(minute=0, second=0, microsecond=0) - timedelta(hours=1)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'book': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)", "def violated(self) -> bool:\n ...", "def validate_ticket(self, req, ticket):\n\n res = []\n\n # the ticket we receive is a temporary not-yet-commited ticket\n # and contains fields set that weren't changed as well,\n # retrieve the original one so we can compare\n ot = model.Ticket(self.env, ticket.id)\n\n self.env.log.debug('validate_ticket: %s' % ticket.id)\n\n # refuse changes to dup_count and dups fields\n new = ticket.values.get('dups', None)\n if new is not None and new != ot.values.get('dups', None):\n res.append(('dups', 'Cannot manually change the dups field.'))\n return res\n\n new = ticket.values.get('dup_count', None)\n if new is not None and new != ot.values.get('dup_count', None):\n res.append(('dup_count',\n 'Cannot manually change the dup_count field.'))\n return res\n\n new_id = ticket.values.get('dup_of', None)\n\n # allow unsetting\n if not new_id:\n self.env.log.debug(\"validate_ticket: dup_of is None, so fine\")\n return res\n\n # care only about tickets that have dup_of changes\n old = ot.values.get('dup_of', None)\n if old == new_id:\n self.env.log.debug(\"validate_ticket: no dup_of changes\")\n return res\n\n # refuse to change closed tickets\n if ticket.values['status'] == u'closed':\n if ticket.values['resolution'] == u'duplicate':\n self.env.log.debug(\n \"validate_ticket: allowing unduplicate to get validated\")\n # but still subject to other rules\n else:\n self.env.log.debug(\n \"validate_ticket: refusing to dup closed ticket #%s\" %\n ticket.id)\n res.append(('dup_of',\n 'Ticket is already closed, and not as a duplicate.'))\n return res\n\n # refuse to dup_of and reopen ticket in one go\n if ot.values['status'] == u'closed' \\\n and ticket.values['status'] == u'reopened':\n self.env.log.debug(\"validate_ticket: \"\n \"refusing to dup_of and reopen ticket #%s\" %\n ticket.id)\n res.append(('status',\n 'If you want to duplicate an already closed ticket, '\n 'only change dup_of without reopening the ticket.'))\n return res\n\n # warn when it starts with #\n if len(new_id) > 0 and new_id[0] == '#':\n res.append(('dup_of',\n 'Please enter the ticket number without a leading #.'))\n return res\n\n # refuse to dup to non-existing tickets; this raises a TracError\n # if it doesn't exist\n # coderanger says a Ticket can have anything with a __str__ method\n # as id\n # except in the 0.10.5dev branch, a non-existing ticket id raises\n # a TracError with %d in the format string (fixed on TRUNK),\n # so make it an int here\n master = model.Ticket(self.env, int(new_id))\n\n # refuse to dup to self\n if str(new_id) == str(ticket.id):\n self.env.log.debug(\"validate_ticket: \"\n \"cowardly refusing to dup to self #%s\" % ticket.id)\n res.append(('dup_of',\n 'Cannot duplicate a ticket to itself.'))\n return res\n\n self.env.log.debug('validate_ticket: Validated ticket %s' % ticket.id)\n return res", "def test_renew_user_pending_cancel(self):\n self.braintree_customer.active = True\n self.braintree_customer.pending_cancel = True\n self.braintree_customer.subscription_id = \"ValidSubscriptionID\"\n\n result = SubscriptionManager.renew(self.braintree_customer)\n self.assertEqual(\"ValidSubscriptionID\",result)\n self.assertFalse(self.braintree_customer.pending_cancel)", "def test_validate_ticket_no_ticket(self):\n with self.assertRaises(InvalidRequest):\n ServiceTicket.objects.validate_ticket(None, self.url)", "def test_can_not_book_running_block(self):\n date = datetime.now().replace(minute=0, second=0, microsecond=0)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'book': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)", "def abort_not_request_owner(reqID, user):\n\n req = get_ride_request(reqID)\n if req.user_id != user:\n msg = \"You are not authorized to view this requests\"\n abort(HTTPStatus.UNAUTHORIZED, message=msg)", "def test_get_non_owner(self):\n another_user = CustomUser(id=101, email='[email protected]', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n self.client.login(email='[email protected]', password='testpassword')\n\n url = reverse('route', kwargs={'way_id': self.route.way_id, 'route_id': self.route.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 403)", "def _check_cost(self, cr, uid, ids, context=None):\n for enrich in self.browse(cr, uid, ids, context=context):\n if enrich.amount <= 0:\n raise osv.except_osv(_('ValidateError'), _('The Cost Must Be Greater Than Zero!'))\n return True", "def write_authorize(cls, user, obj):\n if not obj.delivery.deadline.assignment_group.is_examiner(user):\n raise PermissionDenied()", "def test_get_non_owner(self):\n another_user = CustomUser(id=101, email='[email protected]', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n self.client.login(email='[email protected]', password='testpassword')\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 403)", "def avoid_lockouts():\n db = get_db()\n if db.count_admins()[0][0] <= 2:\n session[\"last_error\"] = \"There must always be at least two administrators.\"\n return False\n return True", "def test_can_not_exceed_quota(self):\n create_test_booking(self.user, self.first_day, 8, facility='g')\n create_test_booking(self.user, self.first_day, 9, facility='0')\n create_test_booking(self.user, self.first_day, 10, facility='g')\n create_test_booking(self.user, self.first_day, 11, facility='h')\n create_test_booking(self.user, self.first_day, 12, facility='h')\n create_test_booking(self.user, self.first_day, 13, facility='g')\n create_test_booking(self.user, self.first_day, 14, facility='x')\n create_test_booking(self.user, self.first_day, 15, facility='y')\n create_test_booking(self.user, self.first_day, 16, facility='g')\n create_test_booking(self.user, self.first_day, 17, facility='g')\n\n date = datetime(2030, 1, 1, 8)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'book': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], 0)\n self.assertEqual(type(context[\"info\"]), QuotaExceededAlert)", "def test_buyTicket_EmptiesWallet():\n old_venue_balance = testVenue.wallet\n assert testUser1.buyTicket(testTicket1)\n assert testUser1.inventory[-1] == testTicket1\n assert not testTicket1.for_sale\n assert testUser1.wallet == 0\n assert testVenue.wallet == old_venue_balance + testTicket1.list_price", "def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def user_added_credit(self):\n return (self.user.Credit > 0)", "def test_auth_sharable_cannot_share(self):\n self.do_sharable(False, 'pattieblack', FakeMembership(False),\n tenant='froggy')", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_listing_from_wall_when_blocked_some_users(self):", "def testInsufficientCash(self):\n\n bid_move = self._move()\n context = self._context()\n context.players[0].cash = 200\n bfpc = BiddingForPrivateCompany()\n\n self.assertFalse(bfpc.run(bid_move, context), bfpc.errors())", "def _advance_to_pending(self):\n if all(signup.status != GameSignup.REGISTERED for signup in self.signups.all()):\n try:\n with transaction.atomic():\n self.status = self.PENDING\n self._create_characters()\n self.save()\n except DatabaseError:\n pass\n else:\n raise ValidationError('All user signups must be accepted, rejected, or withdrawn before continuing.')", "def issue_ticket(database, user):\n try:\n # check if user is an officer\n c = database.cursor()\n c.execute('SELECT utype FROM users WHERE uid = ?', (user, ))\n user_type = c.fetchone()[0]\n\n # If user is an officer \n if user_type == 'o':\n reg_num = int(input(\"Registration number: \"))\n c.execute(\"\"\"SELECT p.fname, p.lname, v.make, v.model, v.year, v.color FROM registrations r JOIN\n persons p ON (r.fname, r.lname) = (p.fname, p.lname) JOIN vehicles v ON r.vin = v.vin WHERE r.regno = ?\"\"\",(reg_num,))\n result = c.fetchone()\n fname = result[0]\n lname = result[1]\n make = result[2]\n model = result[3]\n year = result[4]\n color = result[5]\n print(\"\\n--------------------------\\nInformation\\n--------------------------\\n\")\n print(\"First Name: \", fname)\n print(\"Last Name: \", lname)\n print(\"Make: \", make)\n print(\"Model: \", model)\n print(\"Year: \", year)\n print(\"Color: \", color)\n\n print(\"\\n-------------------------\\nTicket the registra: \\n------------------------\\n\")\n violation_date = str(input(\"Violation Date: \")) # if not provided, today's date\n if violation_date == \"\":\n violation_date = datetime.today().strftime('%Y-%m-%d')\n violation_text = str(input(\"violation Text: \"))\n amount = str(input(\"Amount: \"))\n tno = randrange(1001, 9867699)\n\n c.execute(q.insert_into_tickets, (tno, reg_num, amount, violation_text, violation_date))\n\n database.commit()\n print(pm.all_done)\n # if user is not an officer\n else:\n print(pm.for_officers_only)\n sys.exit()\n except:\n print(pm.something_went_wrong)\n sys.exit()", "def test_redemption_denied_unpaid(\n self, voucher: Voucher, counter: int, extra_tokens: int\n ) -> None:\n num_tokens = counter + extra_tokens\n issuer = unpaid_redemption()\n treq = treq_for_loopback_ristretto(issuer)\n redeemer = RistrettoRedeemer(treq, NOWHERE)\n random_tokens = redeemer.random_tokens_for_voucher(voucher, counter, num_tokens)\n d = redeemer.redeemWithCounter(\n voucher,\n counter,\n random_tokens,\n )\n self.assertThat(\n Deferred.fromCoroutine(d),\n failed(\n AfterPreprocessing(\n lambda f: f.value,\n IsInstance(Unpaid),\n ),\n ),\n )", "def test_ticket_not_expired(self):\n st = ServiceTicketFactory()\n self.assertFalse(st.is_expired())", "def test_team_owner_rejected_request(self):\n data = {\n 'approved': False,\n }\n response = self.client.patch(reverse('api:user-team-requests-detail',\n kwargs={'pk': self.user_team_request.id}),\n data=data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(mail.outbox), 1)\n team_request = UserTeamRequest.objects.get(id=self.user_team_request.id)\n self.assertFalse(team_request.approved)\n self.assertTrue(team_request.email_was_send)", "def is_access_allowed(self, user_id):\n ### DATABASE CODE GOES HERE\n return False", "def authorizes(self, user):\n return self.owner == user or self.workers.filter(pk=user.id).exists()", "def test_delete_non_owner(self):\n another_user = CustomUser.objects.create(id=134, email='[email protected]', is_active=True)\n another_user.set_password('qwerty12345')\n another_user.save()\n\n self.client.login(email='[email protected]', password='qwerty12345')\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': 87876})\n\n response = self.client.delete(url)\n\n self.assertEqual(response.status_code, 403)", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_request_cancel_already_pending_cancel(self):\n self.braintree_customer.subscription_id = \"1234\"\n self.braintree_customer.pending_cancel = True\n self.braintree_customer.save()\n\n self.assertFalse(SubscriptionManager.request_cancel(self.braintree_customer))\n self.assertTrue(self.braintree_customer.pending_cancel)", "def can_approve(self, user, **data):\n raise Return(False)", "def billing_agent_required(func):\n\n def wrapper(request, *args, **kwargs):\n\n if not base_check(request):\n return redirect('{0}?next={1}'.format(reverse('core_login'), request.path))\n\n agent_for = models.BillingAgent.objects.filter(users__id__exact=request.user.pk)\n\n if not agent_for and not request.user.is_staff:\n raise PermissionDenied\n\n return func(request, *args, **kwargs)\n\n return wrapper", "def test_tally_no_candidates(self):\n self.init_elect_types()\n\n userA = models.User(\n name = \"UserA\",\n email = \"[email protected]\",\n password = \"asdf\")\n\n session.add(userA)\n session.commit()\n\n electionA = models.Election(\n title = \"Election A\",\n admin_id = userA.id)\n\n session.add(electionA)\n session.commit()\n\n raceA = models.Race(\n title = \"Race A\",\n election_id = electionA.id\n )\n\n session.add(raceA)\n session.commit()\n\n with self.assertRaises(NoCandidates):\n self.wta.check_race(raceA.id)\n\n with self.assertRaises(NoCandidates):\n self.proportional.check_race(raceA.id)\n\n with self.assertRaises(NoCandidates):\n self.schulze.check_race(raceA.id)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"DELETE\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_post_comment_to_project_chat_by_blocked_user_fails(self):\n # setup\n self.test_user = return_canned_user(username=\"test_user\", id=33333)\n self.test_user.create()\n self.test_user.role = UserRole.READ_ONLY.value\n # action\n response = self.client.post(\n self.endpoint_url,\n headers={\"Authorization\": generate_encoded_token(self.test_user.id)},\n json={\"message\": TEST_MESSAGE},\n )\n response_body = response.get_json()\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response_body[\"Error\"], \"User is on read only mode\")\n self.assertEqual(response_body[\"SubCode\"], \"ReadOnly\")", "def credit_deliverer():\n return True", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def _warn_and_lock_if_needed(self, transaction: Transaction) -> None:\n budget = self.budget_manager.get_budget(transaction.budget_category)\n exceeded_ratio = budget.exceeded_ratio\n if exceeded_ratio > 1:\n self._notify_exceeded_budget(budget)\n self._lock_budget(budget)\n self.print_transactions_for_review(budget)\n if self.budget_manager.no_locked_budgets >= 2:\n self._locked = True\n print('YOUR BANK ACCOUNT HAS BEEN LOCKED!')\n elif exceeded_ratio > 0.5:\n self._warn_nearing_exceed_budget(budget, 50)\n self.print_transactions_for_review(budget)", "def test_validate_ticket_consumed_ticket(self):\n st = ServiceTicketFactory(consume=True)\n with self.assertRaises(InvalidTicket):\n ServiceTicket.objects.validate_ticket(st.ticket, self.url)" ]
[ "0.69946736", "0.6597223", "0.64734346", "0.6470661", "0.63877785", "0.6298837", "0.62688833", "0.62648696", "0.62245417", "0.61735904", "0.6159603", "0.6098433", "0.6039654", "0.60156566", "0.600577", "0.5985433", "0.5973899", "0.5934116", "0.59265006", "0.59215", "0.5908241", "0.58887154", "0.5870351", "0.5846558", "0.583821", "0.58281153", "0.58258164", "0.58253825", "0.58220494", "0.5781086", "0.5778248", "0.5745033", "0.57366616", "0.5731006", "0.57139844", "0.56977", "0.5693894", "0.56843245", "0.568061", "0.5673939", "0.5652478", "0.56463337", "0.56445664", "0.5630063", "0.56184274", "0.5615372", "0.5606033", "0.5602953", "0.56002647", "0.5594693", "0.5592379", "0.55883014", "0.5578319", "0.55750126", "0.5568893", "0.555663", "0.5551681", "0.5543763", "0.554341", "0.55371505", "0.5529137", "0.55209935", "0.5519084", "0.5517571", "0.551745", "0.5515841", "0.55121744", "0.5511644", "0.5511425", "0.5506222", "0.5502651", "0.5494286", "0.549269", "0.5492283", "0.54875773", "0.54802626", "0.5468", "0.54555416", "0.54555416", "0.5451062", "0.54360694", "0.543016", "0.5429998", "0.5429202", "0.54195535", "0.5412522", "0.54001844", "0.53959894", "0.5382902", "0.53806335", "0.5377812", "0.53760064", "0.5366356", "0.53636837", "0.53631276", "0.53611565", "0.5352205", "0.53514796", "0.53449094", "0.5344562" ]
0.6947074
1
Ensure admin can't credit invalid tickets to a user
def test_credit_ticket_not_int(self): user = UserFactory() self.assertEqual(user.tickets, 1) nb_tickets_to_add = 'this is not an int' data = { 'nb_tickets': nb_tickets_to_add, } self.client.force_authenticate(user=self.admin) response = self.client.post( reverse( 'user-credit-tickets', kwargs={'pk': user.id}, ), data, format='json', ) self.assertEqual( response.status_code, status.HTTP_400_BAD_REQUEST, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_credit_ticket_as_admin(self):\n user = UserFactory()\n self.assertEqual(user.tickets, 1)\n nb_tickets_to_add = 5\n data = {\n 'nb_tickets': nb_tickets_to_add,\n }\n\n self.client.force_authenticate(user=self.admin)\n response = self.client.post(\n reverse(\n 'user-credit-tickets',\n kwargs={'pk': user.id},\n ),\n data,\n format='json',\n )\n self.assertEqual(\n response.status_code,\n status.HTTP_200_OK,\n )\n\n self.assertEqual(\n User.objects.get(pk=user.id).tickets,\n 1 + nb_tickets_to_add\n )", "def test_credit_ticket_negative_int(self):\n user = UserFactory()\n self.assertEqual(user.tickets, 1)\n nb_tickets_to_add = -5\n data = {\n 'nb_tickets': nb_tickets_to_add,\n }\n\n self.client.force_authenticate(user=self.admin)\n response = self.client.post(\n reverse(\n 'user-credit-tickets',\n kwargs={'pk': user.id},\n ),\n data,\n format='json',\n )\n self.assertEqual(\n response.status_code,\n status.HTTP_400_BAD_REQUEST,\n )", "def test_credit_ticket_as_user(self):\n user = UserFactory()\n self.assertEqual(user.tickets, 1)\n nb_tickets_to_add = 5\n data = {\n 'nb_tickets': nb_tickets_to_add,\n }\n\n self.client.force_authenticate(user=self.user)\n response = self.client.post(\n reverse(\n 'user-credit-tickets',\n kwargs={'pk': user.id},\n ),\n data,\n format='json',\n )\n self.assertEqual(\n response.status_code,\n status.HTTP_403_FORBIDDEN,\n )", "def test_validate_ticket_invalid_ticket(self):\n with self.assertRaises(InvalidTicket):\n ProxyGrantingTicket.objects.validate_ticket('12345', 'https://www.example.com')", "async def ticket_add(self, ctx, user: discord.Member):\n guild_settings = await self.config.guild(ctx.guild).all()\n is_admin = await is_admin_or_superior(self.bot, ctx.author) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in ctx.author.roles]\n )\n must_be_admin = not guild_settings[\"usercanmodify\"]\n\n if not is_admin and must_be_admin:\n await ctx.send(\"Only Administrators can add/remove other users to tickets.\")\n return\n elif not is_admin:\n author = ctx.author\n author_id = author.id\n elif is_admin:\n # Since the author isn't specified, and it's an admin, we need to guess on who\n # the author is\n inverted = {}\n for author_id, tickets in guild_settings[\"created\"].items():\n for ticket in tickets:\n inverted[ticket[\"channel\"]] = author_id\n try:\n author = ctx.guild.get_member(int(inverted[ctx.channel.id]))\n if author:\n author_id = author.id\n else:\n author_id = int(inverted[ctx.channel.id])\n except KeyError:\n author = ctx.author\n author_id = author.id\n\n index = None\n\n if not guild_settings[\"created\"][str(author_id)]:\n await ctx.send(\"You don't have any open tickets.\")\n return\n elif len(guild_settings[\"created\"][str(author_id)]) == 1:\n index = 0\n else:\n for i, ticket in enumerate(guild_settings[\"created\"][str(author_id)]):\n if ticket[\"channel\"] == ctx.channel.id:\n index = i\n break\n\n if index is None:\n await ctx.send(\n \"You have multiple tickets open. \"\n \"Please run this command in the ticket channel you wish to edit.\"\n )\n return\n\n channel = self.bot.get_channel(guild_settings[\"created\"][str(author_id)][index][\"channel\"])\n\n if user.id in guild_settings[\"created\"][str(author_id)][index][\"added\"]:\n await ctx.send(\"That user is already added.\")\n return\n\n adding_is_admin = await is_admin_or_superior(self.bot, user) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in user.roles]\n )\n\n if adding_is_admin:\n await ctx.send(\"You cannot add a user in support or admin team.\")\n return\n\n channel = self.bot.get_channel(guild_settings[\"created\"][str(author_id)][index][\"channel\"])\n if not channel:\n await ctx.send(\"The ticket channel has been deleted.\")\n return\n\n try:\n await channel.set_permissions(user, send_messages=True, read_messages=True)\n except discord.Forbidden:\n await ctx.send(\n \"The Manage Permissions channel for me has been removed. \"\n \"I am unable to modify this ticket.\"\n )\n return\n\n async with self.config.guild(ctx.guild).created() as created:\n created[str(author_id)][index][\"added\"].append(user.id)\n\n await ctx.send(f\"{user.mention} has been added to the ticket.\")", "def test_validate_ticket_consumed_ticket(self):\n pgt = ProxyGrantingTicketFactory(consume=True)\n with self.assertRaises(InvalidTicket):\n ProxyGrantingTicket.objects.validate_ticket(pgt.ticket, 'https://www.example.com')", "def test_validate_ticket_no_ticket(self):\n with self.assertRaises(InvalidRequest):\n ProxyGrantingTicket.objects.validate_ticket(None, 'https://www.example.com')", "def test_validate_ticket_expired_ticket(self):\n pgt = ProxyGrantingTicketFactory(expire=True)\n with self.assertRaises(InvalidTicket):\n ProxyGrantingTicket.objects.validate_ticket(pgt.ticket, 'https://www.example.com')", "async def ticket_remove(self, ctx, user: discord.Member):\n guild_settings = await self.config.guild(ctx.guild).all()\n is_admin = await is_admin_or_superior(self.bot, ctx.author) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in ctx.author.roles]\n )\n must_be_admin = not guild_settings[\"usercanmodify\"]\n\n if not is_admin and must_be_admin:\n await ctx.send(\"Only Administrators can add/remove other users to tickets.\")\n return\n elif not is_admin:\n author = ctx.author\n author_id = author.id\n elif is_admin:\n # Since the author isn't specified, and it's an admin, we need to guess on who\n # the author is\n inverted = {}\n for author_id, tickets in guild_settings[\"created\"].items():\n for ticket in tickets:\n inverted[ticket[\"channel\"]] = author_id\n try:\n author = ctx.guild.get_member(int(inverted[ctx.channel.id]))\n if author:\n author_id = author.id\n else:\n author_id = int(inverted[ctx.channel.id])\n except KeyError:\n author = ctx.author\n author_id = author.id\n\n index = None\n\n if not guild_settings[\"created\"][str(author_id)]:\n await ctx.send(\"You don't have any open tickets.\")\n return\n elif len(guild_settings[\"created\"][str(author_id)]) == 1:\n index = 0\n else:\n for i, ticket in enumerate(guild_settings[\"created\"][str(author_id)]):\n if ticket[\"channel\"] == ctx.channel.id:\n index = i\n break\n\n if index is None:\n await ctx.send(\n \"You have multiple tickets open. \"\n \"Please run this command in the ticket channel you wish to edit.\"\n )\n return\n\n if user.id not in guild_settings[\"created\"][str(author_id)][index][\"added\"]:\n await ctx.send(\"That user is not added.\")\n return\n\n removing_is_admin = await is_admin_or_superior(self.bot, user) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in user.roles]\n )\n\n if removing_is_admin:\n await ctx.send(\"You cannot remove a user in support or admin team.\")\n return\n\n channel = self.bot.get_channel(guild_settings[\"created\"][str(author_id)][index][\"channel\"])\n if not channel:\n await ctx.send(\"The ticket channel has been deleted.\")\n\n try:\n await channel.set_permissions(user, send_messages=False, read_messages=False)\n except discord.Forbidden:\n await ctx.send(\n \"The Manage Permissions channel for me has been removed. \"\n \"I am unable to modify this ticket.\"\n )\n return\n\n async with self.config.guild(ctx.guild).created() as created:\n created[str(author_id)][index][\"added\"].remove(user.id)\n\n await ctx.send(f\"{user.mention} has been removed from the ticket.\")", "def test_validate_ticket_does_not_exist(self):\n ticket = 'PGT-0000000000-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'\n with self.assertRaises(InvalidTicket):\n ProxyGrantingTicket.objects.validate_ticket(ticket, 'https://www.example.com')", "def test_validate_ticket(self):\n pgt = ProxyGrantingTicketFactory()\n ticket = ProxyGrantingTicket.objects.validate_ticket(pgt.ticket, 'https://www.example.com')\n self.assertEqual(ticket, pgt)\n self.assertFalse(ticket.is_consumed())", "def test_validate_ticket_invalid_ticket(self):\n with self.assertRaises(InvalidTicket):\n ServiceTicket.objects.validate_ticket('12345', self.url)", "def issue_ticket(database, user):\n try:\n # check if user is an officer\n c = database.cursor()\n c.execute('SELECT utype FROM users WHERE uid = ?', (user, ))\n user_type = c.fetchone()[0]\n\n # If user is an officer \n if user_type == 'o':\n reg_num = int(input(\"Registration number: \"))\n c.execute(\"\"\"SELECT p.fname, p.lname, v.make, v.model, v.year, v.color FROM registrations r JOIN\n persons p ON (r.fname, r.lname) = (p.fname, p.lname) JOIN vehicles v ON r.vin = v.vin WHERE r.regno = ?\"\"\",(reg_num,))\n result = c.fetchone()\n fname = result[0]\n lname = result[1]\n make = result[2]\n model = result[3]\n year = result[4]\n color = result[5]\n print(\"\\n--------------------------\\nInformation\\n--------------------------\\n\")\n print(\"First Name: \", fname)\n print(\"Last Name: \", lname)\n print(\"Make: \", make)\n print(\"Model: \", model)\n print(\"Year: \", year)\n print(\"Color: \", color)\n\n print(\"\\n-------------------------\\nTicket the registra: \\n------------------------\\n\")\n violation_date = str(input(\"Violation Date: \")) # if not provided, today's date\n if violation_date == \"\":\n violation_date = datetime.today().strftime('%Y-%m-%d')\n violation_text = str(input(\"violation Text: \"))\n amount = str(input(\"Amount: \"))\n tno = randrange(1001, 9867699)\n\n c.execute(q.insert_into_tickets, (tno, reg_num, amount, violation_text, violation_date))\n\n database.commit()\n print(pm.all_done)\n # if user is not an officer\n else:\n print(pm.for_officers_only)\n sys.exit()\n except:\n print(pm.something_went_wrong)\n sys.exit()", "def test_validate_ticket_renew_secondary(self):\n st = ServiceTicketFactory()\n with self.assertRaises(InvalidTicket):\n ServiceTicket.objects.validate_ticket(st.ticket, self.url,\n renew=True)", "def test_validate_ticket_consumed_ticket(self):\n st = ServiceTicketFactory(consume=True)\n with self.assertRaises(InvalidTicket):\n ServiceTicket.objects.validate_ticket(st.ticket, self.url)", "def check_credit(self):\n self.ensure_one()\n getattr(self, '%s_check_credit' % self.provider, lambda: None)()", "def test_user_cannot_unlock_hint():\n app = create_ctfd()\n with app.app_context():\n with app.test_client() as client:\n register_user(app, name=\"user1\", email=\"[email protected]\")\n\n chal = gen_challenge(app.db, value=100)\n chal_id = chal.id\n\n flag = gen_flag(app.db, chal=chal.id, flag='flag')\n\n hint = gen_hint(db, chal_id, cost=10)\n hint_id = hint.id\n\n client = login_as_user(app, name=\"user1\", password=\"password\")\n\n with client.session_transaction() as sess:\n data = {\n \"nonce\": sess.get('nonce')\n }\n r = client.post('/hints/{}'.format(hint_id), data=data)\n resp = json.loads(r.data.decode('utf8'))\n assert resp.get('errors') == 'Not enough points'\n destroy_ctfd(app)", "def test_user_cannot_unlock_hint():\n app = create_ctfd()\n with app.app_context():\n with app.test_client():\n register_user(app, name=\"user1\", email=\"[email protected]\")\n\n chal = gen_challenge(app.db, value=100)\n chal_id = chal.id\n\n gen_flag(app.db, challenge_id=chal.id, content=\"flag\")\n\n hint = gen_hint(db, chal_id, cost=10)\n hint_id = hint.id\n\n client = login_as_user(app, name=\"user1\", password=\"password\")\n\n with client.session_transaction():\n r = client.get(\"/api/v1/hints/{}\".format(hint_id))\n resp = r.get_json()\n assert resp[\"data\"].get(\"content\") is None\n assert resp[\"data\"].get(\"cost\") == 10\n destroy_ctfd(app)", "def avoid_lockouts():\n db = get_db()\n if db.count_admins()[0][0] <= 2:\n session[\"last_error\"] = \"There must always be at least two administrators.\"\n return False\n return True", "def test_validate_ticket_expired_ticket(self):\n st = ServiceTicketFactory(expire=True)\n with self.assertRaises(InvalidTicket):\n ServiceTicket.objects.validate_ticket(st.ticket, self.url)", "def test_not_permitted(self):\r\n test_user_client, test_user = self.create_non_staff_authed_user_client()\r\n CourseEnrollment.enroll(test_user, self.course.id)\r\n response = test_user_client.get(self.orphan_url)\r\n self.assertEqual(response.status_code, 403)\r\n response = test_user_client.delete(self.orphan_url)\r\n self.assertEqual(response.status_code, 403)", "def test_negative_conditions(self):\r\n outline_url = reverse_course_url('course_handler', self.course.id)\r\n # register a non-staff member and try to delete the course branch\r\n non_staff_client, _ = self.create_non_staff_authed_user_client()\r\n response = non_staff_client.delete(outline_url, {}, HTTP_ACCEPT='application/json')\r\n self.assertEqual(response.status_code, 403)", "def test_create_ticket_no_expires(self):\n st = ServiceTicket.objects.create_ticket(user=self.user)\n self.assertTrue(st.expires > now())", "def validate_ticket(self, req, ticket):\n\n res = []\n\n # the ticket we receive is a temporary not-yet-commited ticket\n # and contains fields set that weren't changed as well,\n # retrieve the original one so we can compare\n ot = model.Ticket(self.env, ticket.id)\n\n self.env.log.debug('validate_ticket: %s' % ticket.id)\n\n # refuse changes to dup_count and dups fields\n new = ticket.values.get('dups', None)\n if new is not None and new != ot.values.get('dups', None):\n res.append(('dups', 'Cannot manually change the dups field.'))\n return res\n\n new = ticket.values.get('dup_count', None)\n if new is not None and new != ot.values.get('dup_count', None):\n res.append(('dup_count',\n 'Cannot manually change the dup_count field.'))\n return res\n\n new_id = ticket.values.get('dup_of', None)\n\n # allow unsetting\n if not new_id:\n self.env.log.debug(\"validate_ticket: dup_of is None, so fine\")\n return res\n\n # care only about tickets that have dup_of changes\n old = ot.values.get('dup_of', None)\n if old == new_id:\n self.env.log.debug(\"validate_ticket: no dup_of changes\")\n return res\n\n # refuse to change closed tickets\n if ticket.values['status'] == u'closed':\n if ticket.values['resolution'] == u'duplicate':\n self.env.log.debug(\n \"validate_ticket: allowing unduplicate to get validated\")\n # but still subject to other rules\n else:\n self.env.log.debug(\n \"validate_ticket: refusing to dup closed ticket #%s\" %\n ticket.id)\n res.append(('dup_of',\n 'Ticket is already closed, and not as a duplicate.'))\n return res\n\n # refuse to dup_of and reopen ticket in one go\n if ot.values['status'] == u'closed' \\\n and ticket.values['status'] == u'reopened':\n self.env.log.debug(\"validate_ticket: \"\n \"refusing to dup_of and reopen ticket #%s\" %\n ticket.id)\n res.append(('status',\n 'If you want to duplicate an already closed ticket, '\n 'only change dup_of without reopening the ticket.'))\n return res\n\n # warn when it starts with #\n if len(new_id) > 0 and new_id[0] == '#':\n res.append(('dup_of',\n 'Please enter the ticket number without a leading #.'))\n return res\n\n # refuse to dup to non-existing tickets; this raises a TracError\n # if it doesn't exist\n # coderanger says a Ticket can have anything with a __str__ method\n # as id\n # except in the 0.10.5dev branch, a non-existing ticket id raises\n # a TracError with %d in the format string (fixed on TRUNK),\n # so make it an int here\n master = model.Ticket(self.env, int(new_id))\n\n # refuse to dup to self\n if str(new_id) == str(ticket.id):\n self.env.log.debug(\"validate_ticket: \"\n \"cowardly refusing to dup to self #%s\" % ticket.id)\n res.append(('dup_of',\n 'Cannot duplicate a ticket to itself.'))\n return res\n\n self.env.log.debug('validate_ticket: Validated ticket %s' % ticket.id)\n return res", "def cant(user, action):\n\n return not can(user, action)", "def run_ticket_validation(user_id, access_token, nonce):\n token_check_url = 'https://graph.oculus.com/user_nonce_validate?access_token={access_token}&nonce={nonce}&user_id={user_id}'\n url = token_check_url.format(user_id=user_id, access_token=access_token, nonce=nonce)\n\n try:\n ret = requests.post(url, headers={'Accept': 'application/json'})\n except requests.exceptions.RequestException as e:\n log.warning(\"Oculus authentication request failed: %s\", e)\n abort_unauthorized(\"Oculus ticket validation failed. Can't reach Oculus platform.\")\n\n if ret.status_code != 200 or not ret.json().get('is_valid', False):\n log.warning(\"Failed Oculus authentication. Response code %s: %s\", ret.status_code, ret.json())\n abort_unauthorized(\"User {} not authenticated on Oculus platform.\".format(user_id))\n\n return user_id", "def admin_reject(user):\n if user.comments in (None or \"\"):\n return\n\n subject = \"ECE/CIS Account - Account Application rejected for %s\" % user.username\n application = \"https://www.eecis.udel.edu/NewAccount/\"\n helprequest = \"https://www.eecis.udel.edu/service\"\n sponsor = \"%[email protected]\" % user.sponsor\n \n message = \"Your ECE/CIS Account has been rejected by ECE/CIS faculty adminstrators.\\n\" % user.sponsor\n message += \"The reason given for rejection was:\\n\\n%s\\n\\n\" % user.comments\n message += \"You may re-apply with corrected information at %s\\n\" % application\n message += \"Please don't reply to this email. If have any questions, please \\n\"\n message += \"please post a ticket as an outsider at %s\" % helprequest\n message += \"-- ECE\\CIS Labstaff\"\n\n\n send('[email protected]', 'ECE/CIS Account System', \\\n [user.email, sponsor], subject, message, MAILHOST)", "def error():\r\n raise RuntimeError('admin ticket generator at your service')", "def test_course_does_not_expire_for_verified_user(self):\n course = CourseFactory.create(start=THREE_YEARS_AGO)\n url = course_home_url(course)\n\n user = UserFactory.create(password=self.TEST_PASSWORD)\n CourseEnrollment.enroll(user, self.course.id, mode=CourseMode.VERIFIED)\n Schedule.objects.update(start_date=THREE_YEARS_AGO)\n\n # ensure that the user who has indefinite access\n self.client.login(username=user.username, password=self.TEST_PASSWORD)\n response = self.client.get(url)\n assert response.status_code == 200, 'Should not expire access for user'", "def test_noTicket():\n assert testUser1.buyTicket(None) == False", "def raise_not_editable(self, viewer):\n if not self.id or viewer.has_perm(\"bookwyrm.create_invites\"):\n return\n raise PermissionDenied()", "def allowed(self, user, amount):\n return True", "def test_validate_ticket_no_ticket(self):\n with self.assertRaises(InvalidRequest):\n ServiceTicket.objects.validate_ticket(None, self.url)", "def test_buyTicket_FreeTicket():\n old_venue_balance = testVenue.wallet\n assert testUser4.buyTicket(testTicket4)\n assert testUser4.inventory[-1] == testTicket4\n assert not testTicket4.for_sale\n assert testUser4.wallet == 0\n assert testVenue.wallet == old_venue_balance", "def test_no_admins_registered(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n\n with self.assertRaises(ImproperlyConfigured):\n self.registration_profile.objects.send_admin_approve_email(\n new_user, Site.objects.get_current())", "def test_validate_ticket_no_service(self):\n pgt = ProxyGrantingTicketFactory()\n with self.assertRaises(InvalidRequest):\n ProxyGrantingTicket.objects.validate_ticket(pgt.ticket, None)", "def test_validate_ticket_invalid_service(self):\n pgt = ProxyGrantingTicketFactory()\n with self.assertRaises(InvalidService):\n ProxyGrantingTicket.objects.validate_ticket(pgt.ticket, 'http://www.example.org')", "def AdminTicket(ticket):\n try:\n data, = xmlrpclib.loads(ticket)[0]\n name = data['slivers'][0]['name']\n if data != None:\n deliver_ticket(data)\n logger.log('api_calls: Admin Ticket delivered for %s' % name)\n Create(database.db.get(name))\n except Exception, err:\n raise xmlrpclib.Fault(102, 'Ticket error: ' + str(err))", "def test_delete_invalid_tickets(self):\n ServiceTicketFactory() # Should not be deleted\n expired = ServiceTicketFactory(expire=True)\n consumed = ServiceTicketFactory(consume=True)\n referenced = ServiceTicketFactory(consume=True) # Should not be deleted\n ProxyGrantingTicketFactory(granted_by_st=referenced)\n ServiceTicket.objects.delete_invalid_tickets()\n\n self.assertEqual(ServiceTicket.objects.count(), 2)\n self.assertRaises(ServiceTicket.DoesNotExist,\n ServiceTicket.objects.get,\n ticket=expired.ticket)\n self.assertRaises(ServiceTicket.DoesNotExist,\n ServiceTicket.objects.get,\n ticket=consumed.ticket)", "def validate_ownership(item, user_id):\n if item.user_id != user_id:\n raise Forbidden('You are not allowed to modify this item.')", "def get_everyone_denied(self):", "def add_ticket(self, user):\n profile = user.get_profile()\n if profile.available_tickets() <= 0:\n raise Exception(\"This user does not have any tickets to allocate.\")\n \n ticket = RaffleTicket(raffle_prize=self, user=user)\n ticket.save()", "def verify_ticket(self, ticket):\n raise NotImplementedError()", "def validate_change(ticket):\n # First ensure topic line mentions tickets, and pull them out.\n topic = COMMIT_MSG.split('\\n', 1)[0]\n fix_tickets = re.findall(\"[A-Z]{2,5}-[0-9]{1,6}\", topic)\n if len(fix_tickets) == 0:\n print \"\\n\\n\\n\\n\\n*********\\nERROR: commit message does not name a ticket!\"\n return False\n\n # Now get list of approved tickets from master ticket, and ensure\n # all \"fixed\" tickets are approved.\n approved_tickets = get_approved_tickets(ticket)\n for tick in fix_tickets:\n if not tick in approved_tickets:\n print \"\\n\\n\\n\\n\\n*********\\nERROR: ticket {} is not approved (see approval ticket {})\".format(\n tick, ticket)\n return False\n return True", "def test_can_not_exceed_quota(self):\n create_test_booking(self.user, self.first_day, 8, facility='g')\n create_test_booking(self.user, self.first_day, 9, facility='0')\n create_test_booking(self.user, self.first_day, 10, facility='g')\n create_test_booking(self.user, self.first_day, 11, facility='h')\n create_test_booking(self.user, self.first_day, 12, facility='h')\n create_test_booking(self.user, self.first_day, 13, facility='g')\n create_test_booking(self.user, self.first_day, 14, facility='x')\n create_test_booking(self.user, self.first_day, 15, facility='y')\n create_test_booking(self.user, self.first_day, 16, facility='g')\n create_test_booking(self.user, self.first_day, 17, facility='g')\n\n date = datetime(2030, 1, 1, 8)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'book': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], 0)\n self.assertEqual(type(context[\"info\"]), QuotaExceededAlert)", "def test_check_ticket_12(self):\n self.tkt.data_add = {\"invalid\"}\n import_genome.check_ticket(\n self.tkt, type_set=self.type_set,\n description_field_set=self.description_field_set,\n eval_mode_set=self.eval_mode_set,\n id_dupe_set=set(), phage_id_dupe_set=set(),\n retain_set=self.retain_set, retrieve_set=self.retrieve_set,\n add_set=self.add_set, parse_set=self.parse_set)\n count = count_status(self.tkt, \"error\")\n self.assertEqual(count, 1)", "def test_buyTicket_AlreadySold():\n assert not testUser2.buyTicket(testTicket1)\n assert testTicket1 in testUser1.inventory\n assert testTicket1 not in testUser2.inventory\n assert not testTicket1.for_sale\n assert testUser2.wallet == 500", "def user_requested_access(user):\r\n user = CourseCreator.objects.get(user=user)\r\n if user.state != CourseCreator.GRANTED:\r\n user.state = CourseCreator.PENDING\r\n user.save()", "def test_buyTicket_NotForSale():\n old_venue_balance = testVenue.wallet\n assert not testUser2.buyTicket(testTicket2)\n assert testTicket2 not in testUser2.inventory\n assert not testTicket1.for_sale\n assert testUser2.wallet == 500\n assert testVenue.wallet == old_venue_balance", "def check_admin():\r\n if not current_user.is_admin:\r\n abort(403)", "def test_admin_update_user_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n user_taskrun = TaskRunFactory.create()\r\n\r\n assert self.mock_admin.id != user_taskrun.user.id\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').update,\r\n user_taskrun)", "def test_owner_edit_assessment_invalid(self):\n req, resp = data.get_assessment(self.contract['id'])\n response = self.user_01.put(self.assessment_report_url, req)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_check_ticket_6(self):\n self.tkt.eval_mode = \"invalid\"\n import_genome.check_ticket(\n self.tkt, type_set=self.type_set,\n description_field_set=self.description_field_set,\n eval_mode_set=self.eval_mode_set,\n id_dupe_set=set(), phage_id_dupe_set=set(),\n retain_set=self.retain_set, retrieve_set=self.retrieve_set,\n add_set=self.add_set, parse_set=self.parse_set)\n count = count_status(self.tkt, \"error\")\n self.assertEqual(count, 1)", "def write_authorize(cls, user, obj):\n if not obj.delivery.deadline.assignment_group.is_examiner(user):\n raise PermissionDenied()", "def test_buyTicket_Valid_Paramaters():\n old_venue_balance = testVenue.wallet\n assert testUser3.buyTicket(testTicket3)\n assert testTicket3 in testUser3.inventory\n assert not testTicket3.for_sale\n assert testUser3.wallet == 950\n assert testVenue.wallet == old_venue_balance + testTicket3.list_price", "def test_resuableitem_invalid_vote(self):\n\n original_reusableitem = setup_public_reusable_item_1(self)\n data1 = submit_change_request_1(self, self.user_1)\n\n # user 2 now submits an invalid vote\n self.client.force_authenticate(user=self.user_2)\n\n data2 = {'vote': 'banana'}\n response = self.client.patch(get_reusable_item_1_url(self), data2, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_can_change_not_author(self):\n self.assertFalse(self.story.user_can_change(self.user2))", "def temporarily_allow_handler(update, _):\n global TEMPORARILY_ALLOWED\n user_id = update.message.chat.id\n if user_id == ADMIN_ID:\n TEMPORARILY_ALLOWED = True\n update.message.reply_text(\"Temprarily allowed!\")", "def test_update_self_fail(self):\n new_user = self.create_user('1')\n url = '/0/chefs/' + str(new_user.pk)\n\n headers = self.login()\n resp = self.client.put(url, **headers)\n self.assertInvalidCredentials(resp)", "def check_admin():\n\tif not current_user.is_admin:\n\t\tabort(403)", "def userreject_admin(user_id):\n\n # User objects list which includes list of all users which can be broken down into editors and sponsors\n # get individual user\n user = db.session.query(User).filter(User.id==user_id).first()\n # update status to approved\n user.user_status = 'rejected'\n # commit to database\n db.session.commit()\n\n return redirect(url_for('admin_bp.usersview_admin'))", "def test_validate_ticket_does_not_exist(self):\n ticket = 'ST-0000000000-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'\n with self.assertRaises(InvalidTicket):\n ServiceTicket.objects.validate_ticket(ticket, self.url)", "def test_check_ticket_5(self):\n self.tkt.description_field = \"invalid\"\n import_genome.check_ticket(\n self.tkt, type_set=self.type_set,\n description_field_set=self.description_field_set,\n eval_mode_set=self.eval_mode_set,\n id_dupe_set=set(), phage_id_dupe_set=set(),\n retain_set=self.retain_set, retrieve_set=self.retrieve_set,\n add_set=self.add_set, parse_set=self.parse_set)\n count = count_status(self.tkt, \"error\")\n self.assertEqual(count, 1)", "def test_can_not_cancel_past_block(self):\n date = datetime.now().replace(minute=0, second=0, microsecond=0) - timedelta(hours=1)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'cancel': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def test_put_non_owner(self):\n another_user = CustomUser.objects.create(id=1067, email='[email protected]', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n\n self.client.login(email='[email protected]', password='testpassword')\n\n data = {\n 'week_day': 3\n }\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n response = self.client.put(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 403)", "def test_ticket_not_expired(self):\n st = ServiceTicketFactory()\n self.assertFalse(st.is_expired())", "def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def disallow_handler(update, _):\n global TEMPORARILY_ALLOWED\n user_id = update.message.chat.id\n if user_id == ADMIN_ID:\n TEMPORARILY_ALLOWED = False\n update.message.reply_text(\"Temprarily allowed disabled!\")", "def test_buyTicket_insufficientFunds():\n old_venue_balance = testVenue.wallet\n assert not testUser4.buyTicket(testTicket3)\n assert testTicket3 not in testUser4.inventory\n assert testTicket3.for_sale\n assert testUser4.wallet == 0\n assert testVenue.wallet == old_venue_balance", "def testUpdateAccessDenied(self):\n self.runPut(None, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)\n self.response_401()\n for user in (self.guest, self.norole, self.unrelated_owner):\n self.runPut(user, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)\n self.response_403()", "def validate(self,admin):\n\n rv=admin.helper.setAmount(admin.userName,\n 'ARS',self.actual+self.cnt)\n if rv is None:\n return False\n else:\n return True", "def test_upsert_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user2_template, request=mock_request\n )", "def test_can_not_reserve_booked_block(self):\n booking_other = create_test_booking(self.someone, self.first_day, 11)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'book': str(booking_other.date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)\n\n self.assertEqual(type(context[\"info\"]), NotAllowedAlert)", "def test_update_no_customer(self):\n set_up_db()\n with self.assertRaises(ValueError):\n update_customer_credit(2, 5.50)", "def test_renew_user_pending_cancel(self):\n self.braintree_customer.active = True\n self.braintree_customer.pending_cancel = True\n self.braintree_customer.subscription_id = \"ValidSubscriptionID\"\n\n result = SubscriptionManager.renew(self.braintree_customer)\n self.assertEqual(\"ValidSubscriptionID\",result)\n self.assertFalse(self.braintree_customer.pending_cancel)", "def no_reason(message, db):\n #message.reply(Strings['GRANT_EXAMPLE'].format(db))\n try:\n hf.grant(message, db.lower(), \"[EXTENDING ACCESS TIME]\", False)\n except Exception as e:\n message._client.send_message(errors_channel, \"```{}```\".format(e))", "def test_create_missing_ticket(self):\n maint_params = {\n #'master_ticket' : '080102-00121',\n 'description' : 'do stuff',\n 'expedite_text' : 'do it faster',\n 'billing_text' : 'send me the bill',\n #'additional_duration_minutes': '60',\n 'service_type_id' : 1,\n 'employee_contact_id' : 1\n }\n response = self.app.post(url_for(controller='/maintenances', action='create'), params=maint_params, status=400)\n self.assertEqual(response.status, 400)", "def no_reason(message, db):\n #message.reply(Strings['GRANT_EXAMPLE'].format(db))\n try:\n hf.grant(message, db.lower(), \"[EXTENDING ACCESS TIME]\", True)\n except Exception as e:\n message._client.send_message(errors_channel, \"```{}```\".format(e))", "def test_cannot_update_user_with_blacklisted_token(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n user = dict(\n name='Summer Lover',\n username='lover',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_update_issue_by_unauthenticated_user_fails(self):\n response = self.client.patch(\n self.url,\n json={\"description\": TEST_ISSUE_DESCRIPTION, \"name\": TEST_ISSUE_NAME},\n )\n response_json = response.get_json()\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response_json[\"SubCode\"], \"InvalidToken\")", "def test_check_ticket_4(self):\n self.tkt.type = \"invalid\"\n import_genome.check_ticket(\n self.tkt, type_set=self.type_set,\n description_field_set=self.description_field_set,\n eval_mode_set=self.eval_mode_set,\n id_dupe_set=set(), phage_id_dupe_set=set(),\n retain_set=self.retain_set, retrieve_set=self.retrieve_set,\n add_set=self.add_set, parse_set=self.parse_set)\n count = count_status(self.tkt, \"error\")\n self.assertEqual(count, 1)", "def test_dont_cancel_for_events_with_no_cost(self, mock_tz):\n mock_tz.now.return_value = datetime(\n 2015, 2, 11, 10, tzinfo=dt_timezone.utc\n )\n self.ticketed_event.ticket_cost = 0\n self.ticketed_event.save()\n self.assertFalse(self.unpaid.cancelled)\n self.assertFalse(self.paid.cancelled)\n\n management.call_command('cancel_unpaid_ticket_bookings')\n # emails are sent to user per cancelled booking and studio once for all\n # cancelled bookings\n self.unpaid.refresh_from_db()\n self.paid.refresh_from_db()\n self.assertEqual(len(mail.outbox), 0)\n self.assertFalse(self.unpaid.cancelled)\n self.assertFalse(self.paid.cancelled)", "def test_reusableitem_vote_user_count_120_reject(self):\n\n original_reusableitem = setup_public_reusable_item_1(self)\n\n for index in range(3, 121):\n create_toptenlist(self, 'user_' + index.__str__(), index)\n reference_reusable_item(self, 'user_' + index.__str__(), self.reusableitem_1.id, 'toptenlist_' + index.__str__(), 0)\n\n # submit the change request\n data1 = submit_change_request_1(self, self.user_1)\n updated_reusableitem1 = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n\n # users vote for\n for index in range(2, 5):\n self.client.force_authenticate(user=getattr(self, 'user_' + index.__str__()))\n response = self.client.patch(get_reusable_item_1_url(self), {'vote': 'yes'}, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # users vote against\n for index in range(5, 10):\n self.client.force_authenticate(user=getattr(self, 'user_' + index.__str__()))\n response = self.client.patch(get_reusable_item_1_url(self), {'vote': 'no'}, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n updated_reusableitem2 = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n\n # the change request should be resolved\n self.assertEqual(updated_reusableitem2.change_request, None)\n\n # it should be rejected\n history_entry = updated_reusableitem2.history[-1]\n self.assertEqual(history_entry['change_request_resolution'], 'rejected')", "def test_request_membership_form_with_an_invalid_user_id(self):\n pass", "def test_admin_cannot_add_item(self):\n response = self.client.get(\n '/self.base_url/sales/3/2',\n headers=dict(Authorization=\"Bearer \" + self.owner_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"You cannot make a sale from an Admin account, Consider having an attendant account\")\n self.assertEqual(response.status_code,401)", "def test_can_not_cancel_current_block(self):\n date = datetime.now().replace(minute=0, second=0, microsecond=0)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'cancel': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)", "def charge(self, expired):\n self.message += \\\n '------------------- EXPIRED -------------------\\n'\n for lo in expired:\n self.message += '------------------\\n'\n # try to charge, or eliminate user\n if not lo.charge_borrower():\n self.message += 'USER DELETED :: ' + lo.borrower.user.username\n self.message += '\\n'\n lo.borrower.delete_for_loan()\n else:\n self.message += 'USER CHARGED :: ' + lo.borrower.user.username\n self.message += '\\n'", "def test_reject_agreement(self):\n pass", "def can_edit_or_403(self, user):\n if user.id != self.game_master.id:\n raise PermissionDenied\n return True", "def test_buyTicket_EmptiesWallet():\n old_venue_balance = testVenue.wallet\n assert testUser1.buyTicket(testTicket1)\n assert testUser1.inventory[-1] == testTicket1\n assert not testTicket1.for_sale\n assert testUser1.wallet == 0\n assert testVenue.wallet == old_venue_balance + testTicket1.list_price", "def assertFailedBeforeEmailing(self, email_user):\r\n self.assertRolledBack()\r\n self.assertFalse(email_user.called)", "def write_authorize_examinercommon(cls, user, obj):\n if obj.delivered_by != None:\n raise PermissionDenied()", "async def adduser(ctx, user: discord.Member):\n channel = ctx.channel\n if not IsATicket(channel.id):\n await ctx.send(\n \"This is not a ticket! Users can only be added to a ticket channel\"\n )\n return\n\n await channel.set_permissions(user, read_messages=True, send_messages=True)\n await ctx.message.delete()", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def testPostAccessDenied(self):\n self.runPost(None, data=self.post_data)\n self.response_401()\n for user in (self.guest, self.norole, self.unrelated_owner):\n self.runPost(user, data=self.post_data)\n self.response_403()", "def prepare_ticket(self, req, ticket, fields, actions):", "async def ticket_error(self, ctx, error):\n embed: Embed = settings.get_ticket_error_embed()\n\n embed.set_footer(text=embed.footer.text,\n icon_url=self.bot.user.avatar_url)\n \n if isinstance(error, commands.MissingRequiredArgument):\n embed.description = f\"\\nUse **!ticket <ticketpanelname>**\"\n else:\n embed.description = f\"\\nYou don't have permissions for executing this command.\"\n\n await ctx.send(embed=embed)", "def test_deny_pending_payment(self):\n pass" ]
[ "0.7239014", "0.6919179", "0.6916417", "0.662821", "0.65123874", "0.6383882", "0.6308077", "0.617782", "0.6163303", "0.608552", "0.59519124", "0.5946032", "0.5904957", "0.58991903", "0.58230376", "0.5820032", "0.58199584", "0.5809911", "0.58059365", "0.576321", "0.5720081", "0.5706111", "0.5673727", "0.56472284", "0.5646481", "0.5646001", "0.56326586", "0.56222785", "0.56207573", "0.56123674", "0.5593135", "0.55853707", "0.55785894", "0.55724066", "0.55661273", "0.5565648", "0.55654955", "0.55555296", "0.5545706", "0.55405027", "0.5540138", "0.55344564", "0.55226445", "0.54972446", "0.5494822", "0.54927564", "0.5491655", "0.54829204", "0.54801047", "0.5479053", "0.5473456", "0.5469791", "0.54642385", "0.5454643", "0.54492533", "0.54419935", "0.54377496", "0.54286623", "0.54257023", "0.54235446", "0.54165876", "0.5413338", "0.54108775", "0.5404445", "0.54032964", "0.54032964", "0.5402575", "0.5398855", "0.5395695", "0.5390947", "0.5390542", "0.53900665", "0.5390019", "0.5388371", "0.5383711", "0.5381891", "0.5376819", "0.5369266", "0.5364353", "0.53580177", "0.5350538", "0.53352517", "0.5335084", "0.5328189", "0.532517", "0.5315171", "0.53092706", "0.53072083", "0.53061366", "0.5302809", "0.5297665", "0.5296107", "0.5295685", "0.529556", "0.5294427", "0.5292785", "0.5291599", "0.52906954", "0.52880067", "0.52836883" ]
0.6576258
4