query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Add permissions to all DAGs. Creates 'can_read', 'can_edit', and 'can_delete' permissions for all DAGs, along with any `access_control` permissions provided in them. This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag` if you only need to sync a single DAG.
def create_dag_specific_permissions(self) -> None: perms = self.get_all_permissions() dagbag = DagBag(read_dags_from_db=True) dagbag.collect_dags_from_db() dags = dagbag.dags.values() for dag in dags: root_dag_id = dag.parent_dag.dag_id if dag.parent_dag else dag.dag_id dag_resource_name = permissions.resource_name_for_dag(root_dag_id) for action_name in self.DAG_ACTIONS: if (action_name, dag_resource_name) not in perms: self._merge_perm(action_name, dag_resource_name) if dag.access_control: self.sync_perm_for_dag(dag_resource_name, dag.access_control)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sync_perm_for_dag(\n self,\n dag_id: str,\n access_control: dict[str, Collection[str]] | None = None,\n ) -> None:\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n for dag_action_name in self.DAG_ACTIONS:\n self.create_permission(dag_action_name, dag_resource_name)\n\n if access_control is not None:\n self.log.info(\"Syncing DAG-level permissions for DAG '%s'\", dag_resource_name)\n self._sync_dag_view_permissions(dag_resource_name, access_control)\n else:\n self.log.info(\n \"Not syncing DAG-level permissions for DAG '%s' as access control is unset.\",\n dag_resource_name,\n )", "def create_perm_vm_for_all_dag(self) -> None:\n # create perm for global logical dag\n for resource_name in self.DAG_RESOURCES:\n for action_name in self.DAG_ACTIONS:\n self._merge_perm(action_name, resource_name)", "def set_permissions_all(self, replace=False):\r\n bucket = self._get_bucket()\r\n for key in bucket:\r\n self.set_permissions(key, replace)", "def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[str, Collection[str]]) -> None:\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n\n def _get_or_create_dag_permission(action_name: str) -> Permission | None:\n perm = self.get_permission(action_name, dag_resource_name)\n if not perm:\n self.log.info(\"Creating new action '%s' on resource '%s'\", action_name, dag_resource_name)\n perm = self.create_permission(action_name, dag_resource_name)\n\n return perm\n\n def _revoke_stale_permissions(resource: Resource):\n existing_dag_perms = self.get_resource_permissions(resource)\n for perm in existing_dag_perms:\n non_admin_roles = [role for role in perm.role if role.name != \"Admin\"]\n for role in non_admin_roles:\n target_perms_for_role = access_control.get(role.name, ())\n if perm.action.name not in target_perms_for_role:\n self.log.info(\n \"Revoking '%s' on DAG '%s' for role '%s'\",\n perm.action,\n dag_resource_name,\n role.name,\n )\n self.remove_permission_from_role(role, perm)\n\n resource = self.get_resource(dag_resource_name)\n if resource:\n _revoke_stale_permissions(resource)\n\n for rolename, action_names in access_control.items():\n role = self.find_role(rolename)\n if not role:\n raise AirflowException(\n f\"The access_control mapping for DAG '{dag_id}' includes a role named \"\n f\"'{rolename}', but that role does not exist\"\n )\n\n action_names = set(action_names)\n invalid_action_names = action_names - self.DAG_ACTIONS\n if invalid_action_names:\n raise AirflowException(\n f\"The access_control map for DAG '{dag_resource_name}' includes \"\n f\"the following invalid permissions: {invalid_action_names}; \"\n f\"The set of valid permissions is: {self.DAG_ACTIONS}\"\n )\n\n for action_name in action_names:\n dag_perm = _get_or_create_dag_permission(action_name)\n if dag_perm:\n self.add_permission_to_role(role, dag_perm)", "def aws_permissions(self, perms):\n for perm in perms:\n group = perm.get(\"Group\")\n if group:\n self.allowed_groups.append(group)\n\n user = perm.get(\"UserId\")\n if user:\n self.allowed_users.append(user)", "def add_permissions(self, permissions: List[str]):\n for permission in permissions:\n setattr(self.scopes, permission, True)\n self.save(update_fields=[\"scopes\"])", "def update_admin_permission(self) -> None:\n session = self.appbuilder.get_session\n dag_resources = session.scalars(\n select(Resource).where(Resource.name.like(f\"{permissions.RESOURCE_DAG_PREFIX}%\"))\n )\n resource_ids = [resource.id for resource in dag_resources]\n\n perms = session.scalars(select(Permission).where(~Permission.resource_id.in_(resource_ids)))\n perms = [p for p in perms if p.action and p.resource]\n\n admin = self.find_role(\"Admin\")\n admin.permissions = list(set(admin.permissions) | set(perms))\n\n session.commit()", "def _sync_all(cursor):\n _print_info('Syncing all privileges.')\n\n all_namespace_permissions = _fetch_all_namespace_permissions(cursor)\n\n for namespace_permission in all_namespace_permissions:\n namespace = namespace_permission['namespace']\n users = namespace_permission['users']\n\n _print_info('Working on namespace: \\'{}\\''.format(namespace))\n for user in users:\n _grant_select_privilege(cursor, user, namespace)", "def __permissions_to_manage(self):\n\n for model_name in self.MODELS_TO_MANAGE:\n try:\n model = apps.get_model(model_name)\n except LookupError:\n continue\n\n content_type = ContentType.objects.get_for_model(model)\n\n permissions = Permission.objects.filter(content_type=content_type)\n\n # retrieve default permissions first\n default_codenames = list(map(\n lambda x: f'{x}_{content_type.model}',\n ('view', 'add', 'change', 'delete'),\n ))\n for default_codename in default_codenames:\n perm = permissions.get(codename=default_codename)\n yield perm\n\n # and finally any custom defined permissions\n for custom_perm in permissions\\\n .exclude(codename__in=default_codenames)\\\n .order_by('name'):\n yield custom_perm", "def _set_rw_permissions_for_all(self, nms, path):\n nms.appliance.execute('chmod ugo+rw %s' % path)", "def all_perms(self, id, **kwargs):\r\n p = self.db.auth_permission\r\n if self.all_permissions:\r\n ret = self.sql(\r\n (p.record_id == id) & (p.table_name == self.table._tablename) & p.name.belongs(self.all_permissions),\r\n p.name, p.group_id,\r\n orderby=p.group_id)\r\n else:\r\n ret = []\r\n current.response.text = ret\r\n return ret", "def check_authorization(\n self,\n perms: Sequence[tuple[str, str]] | None = None,\n dag_id: str | None = None,\n ) -> bool:\n if not perms:\n return True\n\n for perm in perms:\n if perm in (\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),\n ):\n can_access_all_dags = self.has_access(*perm)\n if can_access_all_dags:\n continue\n\n action = perm[0]\n if self.can_access_some_dags(action, dag_id):\n continue\n return False\n\n elif not self.has_access(*perm):\n return False\n\n return True", "def setupPermissions( self, p ):\n mp = p.manage_permission\n for entry in Config.PortalPermissions:\n apply( mp, entry )", "def bulk_sync_roles(self, roles: Iterable[dict[str, Any]]) -> None:\n existing_roles = self._get_all_roles_with_permissions()\n non_dag_perms = self._get_all_non_dag_permissions()\n\n for config in roles:\n role_name = config[\"role\"]\n perms = config[\"perms\"]\n role = existing_roles.get(role_name) or self.add_role(role_name)\n\n for action_name, resource_name in perms:\n perm = non_dag_perms.get((action_name, resource_name)) or self.create_permission(\n action_name, resource_name\n )\n\n if perm not in role.permissions:\n self.add_permission_to_role(role, perm)", "def update_docrules_permissions(**kwargs):\n docrules = DocumentTypeRule.objects.all()\n for rule in docrules:\n rule.save()\n #print 'Created user role/permission for each DocumentTypeRule()'", "def sync_roles(self) -> None:\n # Create global all-dag permissions\n self.create_perm_vm_for_all_dag()\n\n # Sync the default roles (Admin, Viewer, User, Op, public) with related permissions\n self.bulk_sync_roles(self.ROLE_CONFIGS)\n\n self.add_homepage_access_to_custom_roles()\n # init existing roles, the rest role could be created through UI.\n self.update_admin_permission()\n self.clean_perms()", "def build_permissions(self):\n\t\tself.build_doctype_map()\n\t\tself.build_perm_map()\n\t\tuser_shared = frappe.share.get_shared_doctypes()\n\t\tno_list_view_link = []\n\t\tactive_modules = get_active_modules() or []\n\t\tfor dt in self.doctype_map:\n\t\t\tdtp = self.doctype_map[dt]\n\n\t\t\tp = self.perm_map.get(dt, {})\n\n\t\t\tif not p.get(\"read\") and (dt in user_shared):\n\t\t\t\tp[\"read\"] = 1\n\n\t\t\tif p.get(\"select\"):\n\t\t\t\tself.can_select.append(dt)\n\n\t\t\tif not dtp.get(\"istable\"):\n\t\t\t\tif p.get(\"create\") and not dtp.get(\"issingle\"):\n\t\t\t\t\tif dtp.get(\"in_create\"):\n\t\t\t\t\t\tself.in_create.append(dt)\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.can_create.append(dt)\n\t\t\t\telif p.get(\"write\"):\n\t\t\t\t\tself.can_write.append(dt)\n\t\t\t\telif p.get(\"read\"):\n\t\t\t\t\tif dtp.get(\"read_only\"):\n\t\t\t\t\t\t# read_only = \"User Cannot Search\"\n\t\t\t\t\t\tself.all_read.append(dt)\n\t\t\t\t\t\tno_list_view_link.append(dt)\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.can_read.append(dt)\n\n\t\t\tif p.get(\"cancel\"):\n\t\t\t\tself.can_cancel.append(dt)\n\n\t\t\tif p.get(\"delete\"):\n\t\t\t\tself.can_delete.append(dt)\n\n\t\t\tif p.get(\"read\") or p.get(\"write\") or p.get(\"create\"):\n\t\t\t\tif p.get(\"report\"):\n\t\t\t\t\tself.can_get_report.append(dt)\n\t\t\t\tfor key in (\"import\", \"export\", \"print\", \"email\"):\n\t\t\t\t\tif p.get(key):\n\t\t\t\t\t\tgetattr(self, \"can_\" + key).append(dt)\n\n\t\t\t\tif not dtp.get(\"istable\"):\n\t\t\t\t\tif not dtp.get(\"issingle\") and not dtp.get(\"read_only\"):\n\t\t\t\t\t\tself.can_search.append(dt)\n\t\t\t\t\tif dtp.get(\"module\") not in self.allow_modules:\n\t\t\t\t\t\tif active_modules and dtp.get(\"module\") not in active_modules:\n\t\t\t\t\t\t\tpass\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tself.allow_modules.append(dtp.get(\"module\"))\n\n\t\tself.can_write += self.can_create\n\t\tself.can_write += self.in_create\n\t\tself.can_read += self.can_write\n\n\t\tself.shared = frappe.get_all(\n\t\t\t\"DocShare\", {\"user\": self.name, \"read\": 1}, distinct=True, pluck=\"share_doctype\"\n\t\t)\n\t\tself.can_read = list(set(self.can_read + self.shared))\n\t\tself.all_read += self.can_read\n\n\t\tfor dt in no_list_view_link:\n\t\t\tif dt in self.can_read:\n\t\t\t\tself.can_read.remove(dt)\n\n\t\tif \"System Manager\" in self.get_roles():\n\t\t\tself.can_import += frappe.get_all(\"DocType\", {\"allow_import\": 1}, pluck=\"name\")\n\t\t\tself.can_import += frappe.get_all(\n\t\t\t\t\"Property Setter\",\n\t\t\t\tpluck=\"doc_type\",\n\t\t\t\tfilters={\"property\": \"allow_import\", \"value\": \"1\"},\n\t\t\t)\n\n\t\tfrappe.cache.hset(\"can_import\", frappe.session.user, self.can_import)", "def can_edit_all_dags(self, user=None) -> bool:\n return self.has_access(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG, user)", "def setpermissions(self, lvl):\n\n admingroup = Group.objects.get(name=self.comicsite.admin_group_name())\n participantsgroup = Group.objects.get(name=self.comicsite.participants_group_name())\n everyonegroup = Group.objects.get(name=\"everyone\")\n\n\n\n self.persist_if_needed()\n if lvl == self.ALL:\n assign_perm(\"view_ComicSiteModel\",admingroup,self)\n assign_perm(\"view_ComicSiteModel\",participantsgroup,self)\n assign_perm(\"view_ComicSiteModel\",everyonegroup,self)\n elif lvl == self.REGISTERED_ONLY:\n\n assign_perm(\"view_ComicSiteModel\",admingroup,self)\n assign_perm(\"view_ComicSiteModel\",participantsgroup,self)\n remove_perm(\"view_ComicSiteModel\",everyonegroup,self)\n elif lvl == self.ADMIN_ONLY:\n\n assign_perm(\"view_ComicSiteModel\",admingroup,self)\n remove_perm(\"view_ComicSiteModel\",participantsgroup,self)\n remove_perm(\"view_ComicSiteModel\",everyonegroup,self)\n else:\n raise ValueError(\"Unknown permissions level '\"+ lvl +\"'. I don't know which groups to give permissions to this object\")", "def get_all_permissions(self, obj=None):", "def access_control_list(self, values):\n # pylint: disable=not-an-iterable\n if isinstance(values, dict):\n self.validate_acl_data(values)\n email_names = self.parse_sync_service_acl(values)\n from ggrc.utils import user_generator as ug\n existing_people = {\n p.email: p for p in ug.load_people_with_emails(email_names)\n }\n\n absent_emails = set(email_names) - set(existing_people)\n absent_users = {email: email_names[email] for email in absent_emails}\n new_people = {\n p.email: p for p in ug.create_users_with_role(absent_users)\n }\n all_acl_people = dict(existing_people, **new_people)\n\n for acl in self._access_control_list:\n users = values.get(acl.ac_role.name, [])\n people = {all_acl_people[user[\"email\"]] for user in users}\n acl.update_people(people)\n else:\n roleable.Roleable.access_control_list.fset(self, values)", "def __add_permission_to_group(self, group: Group) -> None:\n for permission_codename in main_app_groups[group.name]:\n permission = Permission.objects.get(codename=permission_codename)\n group.permissions.add(permission)", "def __acl__(self):\n # type: () -> AccessControlListType\n user = self.request.user\n # allow if role MAGPIE_ADMIN_PERMISSION is somehow directly set instead of inferred via members of admin-group\n acl = [(Allow, get_constant(\"MAGPIE_ADMIN_PERMISSION\", self.request), ALL_PERMISSIONS)]\n admin_group_name = get_constant(\"MAGPIE_ADMIN_GROUP\", self.request)\n admins = GroupService.by_group_name(admin_group_name, db_session=self.request.db)\n if admins:\n # need to add explicit admin-group ALL_PERMISSIONS otherwise views with other permissions than the\n # default MAGPIE_ADMIN_PERMISSION will be refused access (e.g.: views with MAGPIE_LOGGED_PERMISSION)\n acl += [(Allow, \"group:{}\".format(admins.id), ALL_PERMISSIONS)]\n if user:\n # user-specific permissions (including group memberships)\n permissions = UserService.permissions(user, self.request.db)\n user_acl = permission_to_pyramid_acls(permissions)\n # allow views that require minimally to be logged in (regardless of who is the user)\n auth_acl = [(Allow, user.id, Authenticated)]\n acl += user_acl + auth_acl\n return acl", "def get_all_permissions(self, obj=None):\n return self.get_group_permissions(obj)", "def request_access_to_groups(self, ceph):\n for ceph_group in (\"volumes\", \"images\", \"vms\"):\n ceph.request_access_to_group(\n name=ceph_group,\n object_prefix_permissions={\"class-read\": [\"rbd_children\"]},\n permission=\"rwx\",\n )", "def my_perms(self, ids, **kwargs):\r\n auth = self.app.auth\r\n # checking all objects\r\n p = self.db.auth_permission\r\n if type(ids) in (list, tuple, set):\r\n _ids = type(ids)((0,)) + ids\r\n else:\r\n _ids = [0, ids]\r\n grouped = self.db(p.record_id.belongs(_ids) & p.group_id.belongs(auth.user_groups.keys()) & (\r\n p.table_name == self.table._tablename)).select(p.name, p.record_id).group_by_value('record_id')\r\n take_names = itemgetter('name')\r\n base_permissions = set(imap(take_names, grouped.get(0, set())))\r\n ret = dict(PERMISSIONS={self.name: [\r\n dict((id, set(imap(take_names, grouped.get(id, []))).union(base_permissions)) for id in map(int, ids))]})\r\n current.response.text = ret\r\n return ret", "def __populate_permissions(self):\n if self.auth_group.value:\n grpid = self.auth_group.value\n grp = AuthGroup.objects.get(pk=grpid)\n for perm in Permission.objects.all():\n if hasattr(self, perm.codename):\n if grp.permissions.filter(pk=perm.pk).exists():\n getattr(self, perm.codename).value = True\n else:\n getattr(self, perm.codename).value = False", "def permissions(self, permissions):\n\n self._permissions = permissions", "def permissions(self, permissions):\n\n self._permissions = permissions", "async def _p_all(self, ctx):\n result = self.database.get_all_perm_rules()\n if len(result) == 0:\n await ctx.send(\"All permissions default\")\n return\n guild_perms = {}\n for permission in result:\n if guild_perms.get(permission.id, None) is None:\n guild_perms[permission.id] = {}\n if guild_perms.get(permission.id).get(permission.command, None) is None:\n guild_perms[permission.id][permission.command] = {}\n if guild_perms.get(permission.id).get(permission.command).get(permission.perm_type, None) is None:\n guild_perms[permission.id][permission.command][permission.perm_type] = []\n guild_perms[permission.id][permission.command][permission.perm_type].append([permission.target,\n permission.priority,\n permission.allow])\n\n out = \"```\"\n for guild in guild_perms:\n guild_name = self.bot.get_guild(guild)\n out += f\"Guild: {guild_name}\\n\"\n for command in guild_perms[guild]:\n out += f\" Command: {command}\\n\"\n for level in sorted(guild_perms[guild][command], key=lambda a: self.LEVELS[a]):\n out += f\" Level: {level}\\n\"\n if level == \"guild\":\n out += f\" {guild_perms[guild][command][level]}\\n\"\n else:\n for detail in guild_perms[guild][command][level]:\n out += f\" {detail[1]}-{detail[0]}: {bool(detail[2])}\\n\"\n out += \"```\"\n await ctx.send(out)" ]
[ "0.71445847", "0.6928385", "0.6225734", "0.618926", "0.59796953", "0.5897693", "0.5667359", "0.55808985", "0.5536757", "0.5498319", "0.5476887", "0.54443544", "0.5367707", "0.5366856", "0.5364191", "0.53560907", "0.53519815", "0.5307616", "0.5281192", "0.5278737", "0.52778745", "0.52128834", "0.52079314", "0.5205295", "0.5193546", "0.5189143", "0.51561403", "0.5130361", "0.5130361", "0.5118235" ]
0.7489947
0
Add missing permissions to the table for admin. Admin should get all the permissions, except the dag permissions because Admin already has Dags permission. Add the missing ones to the table for admin.
def update_admin_permission(self) -> None: session = self.appbuilder.get_session dag_resources = session.scalars( select(Resource).where(Resource.name.like(f"{permissions.RESOURCE_DAG_PREFIX}%")) ) resource_ids = [resource.id for resource in dag_resources] perms = session.scalars(select(Permission).where(~Permission.resource_id.in_(resource_ids))) perms = [p for p in perms if p.action and p.resource] admin = self.find_role("Admin") admin.permissions = list(set(admin.permissions) | set(perms)) session.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_missing_perms(self) -> None:\n\n # pylint: disable=import-outside-toplevel\n from superset.connectors.sqla.models import SqlaTable\n from superset.models import core as models\n\n logger.info(\"Fetching a set of all perms to lookup which ones are missing\")\n all_pvs = set()\n for pv in self.get_session.query(self.permissionview_model).all():\n if pv.permission and pv.view_menu:\n all_pvs.add((pv.permission.name, pv.view_menu.name))\n\n def merge_pv(view_menu: str, perm: Optional[str]) -> None:\n \"\"\"Create permission view menu only if it doesn't exist\"\"\"\n if view_menu and perm and (view_menu, perm) not in all_pvs:\n self.add_permission_view_menu(view_menu, perm)\n\n logger.info(\"Creating missing datasource permissions.\")\n datasources = SqlaTable.get_all_datasources(self.get_session)\n for datasource in datasources:\n merge_pv(\"datasource_access\", datasource.get_perm())\n merge_pv(\"schema_access\", datasource.get_schema_perm())\n\n logger.info(\"Creating missing database permissions.\")\n databases = self.get_session.query(models.Database).all()\n for database in databases:\n merge_pv(\"database_access\", database.perm)", "def create_dag_specific_permissions(self) -> None:\n perms = self.get_all_permissions()\n dagbag = DagBag(read_dags_from_db=True)\n dagbag.collect_dags_from_db()\n dags = dagbag.dags.values()\n\n for dag in dags:\n root_dag_id = dag.parent_dag.dag_id if dag.parent_dag else dag.dag_id\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n for action_name in self.DAG_ACTIONS:\n if (action_name, dag_resource_name) not in perms:\n self._merge_perm(action_name, dag_resource_name)\n\n if dag.access_control:\n self.sync_perm_for_dag(dag_resource_name, dag.access_control)", "def add_permissions(apps, schema_editor):\n\n Permission = apps.get_model(\"auth\", \"Permission\")\n Group = apps.get_model(\"auth\", \"Group\")\n ContentType = apps.get_model(\"contenttypes\", \"ContentType\")\n\n permission, created = Permission.objects.get_or_create(\n codename=\"can_approve_estimated_completion_date\",\n defaults={\n \"name\": \"Can approve estimated completion date\",\n \"content_type\": ContentType.objects.get_for_model(\n apps.get_model(\"barriers\", \"Barrier\")\n ),\n },\n )\n\n admin_group = Group.objects.get(name=\"Administrator\")\n admin_group.permissions.add(permission)\n\n print(\n 'Permission \"can_approve_estimated_completion_date\" added to the \"Admin\" group.'\n )", "def create_custom_permissions(self) -> None:\n self.add_permission_view_menu(\"all_datasource_access\", \"all_datasource_access\")\n self.add_permission_view_menu(\"all_database_access\", \"all_database_access\")\n self.add_permission_view_menu(\"all_query_access\", \"all_query_access\")\n self.add_permission_view_menu(\"can_share_dashboard\", \"Superset\")\n self.add_permission_view_menu(\"can_share_chart\", \"Superset\")", "def build_permissions(self):\n\t\tself.build_doctype_map()\n\t\tself.build_perm_map()\n\t\tuser_shared = frappe.share.get_shared_doctypes()\n\t\tno_list_view_link = []\n\t\tactive_modules = get_active_modules() or []\n\t\tfor dt in self.doctype_map:\n\t\t\tdtp = self.doctype_map[dt]\n\n\t\t\tp = self.perm_map.get(dt, {})\n\n\t\t\tif not p.get(\"read\") and (dt in user_shared):\n\t\t\t\tp[\"read\"] = 1\n\n\t\t\tif p.get(\"select\"):\n\t\t\t\tself.can_select.append(dt)\n\n\t\t\tif not dtp.get(\"istable\"):\n\t\t\t\tif p.get(\"create\") and not dtp.get(\"issingle\"):\n\t\t\t\t\tif dtp.get(\"in_create\"):\n\t\t\t\t\t\tself.in_create.append(dt)\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.can_create.append(dt)\n\t\t\t\telif p.get(\"write\"):\n\t\t\t\t\tself.can_write.append(dt)\n\t\t\t\telif p.get(\"read\"):\n\t\t\t\t\tif dtp.get(\"read_only\"):\n\t\t\t\t\t\t# read_only = \"User Cannot Search\"\n\t\t\t\t\t\tself.all_read.append(dt)\n\t\t\t\t\t\tno_list_view_link.append(dt)\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.can_read.append(dt)\n\n\t\t\tif p.get(\"cancel\"):\n\t\t\t\tself.can_cancel.append(dt)\n\n\t\t\tif p.get(\"delete\"):\n\t\t\t\tself.can_delete.append(dt)\n\n\t\t\tif p.get(\"read\") or p.get(\"write\") or p.get(\"create\"):\n\t\t\t\tif p.get(\"report\"):\n\t\t\t\t\tself.can_get_report.append(dt)\n\t\t\t\tfor key in (\"import\", \"export\", \"print\", \"email\"):\n\t\t\t\t\tif p.get(key):\n\t\t\t\t\t\tgetattr(self, \"can_\" + key).append(dt)\n\n\t\t\t\tif not dtp.get(\"istable\"):\n\t\t\t\t\tif not dtp.get(\"issingle\") and not dtp.get(\"read_only\"):\n\t\t\t\t\t\tself.can_search.append(dt)\n\t\t\t\t\tif dtp.get(\"module\") not in self.allow_modules:\n\t\t\t\t\t\tif active_modules and dtp.get(\"module\") not in active_modules:\n\t\t\t\t\t\t\tpass\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tself.allow_modules.append(dtp.get(\"module\"))\n\n\t\tself.can_write += self.can_create\n\t\tself.can_write += self.in_create\n\t\tself.can_read += self.can_write\n\n\t\tself.shared = frappe.get_all(\n\t\t\t\"DocShare\", {\"user\": self.name, \"read\": 1}, distinct=True, pluck=\"share_doctype\"\n\t\t)\n\t\tself.can_read = list(set(self.can_read + self.shared))\n\t\tself.all_read += self.can_read\n\n\t\tfor dt in no_list_view_link:\n\t\t\tif dt in self.can_read:\n\t\t\t\tself.can_read.remove(dt)\n\n\t\tif \"System Manager\" in self.get_roles():\n\t\t\tself.can_import += frappe.get_all(\"DocType\", {\"allow_import\": 1}, pluck=\"name\")\n\t\t\tself.can_import += frappe.get_all(\n\t\t\t\t\"Property Setter\",\n\t\t\t\tpluck=\"doc_type\",\n\t\t\t\tfilters={\"property\": \"allow_import\", \"value\": \"1\"},\n\t\t\t)\n\n\t\tfrappe.cache.hset(\"can_import\", frappe.session.user, self.can_import)", "def init_permissions(apps):\n try:\n group = apps.get_model(\"auth\", \"Group\")\n permission = apps.get_model(\"auth\", \"Permission\")\n\n # Get or Create the default group\n default_group, created = group.objects.get_or_create(name=main_rights.default_group)\n\n # Get explore example permissions\n explore_access_perm = permission.objects.get(codename=explore_example_rights.explore_example_access)\n explore_save_query_perm = permission.objects.get(codename=explore_example_rights.explore_example_save_query)\n explore_delete_query_perm = permission.objects.get(codename=explore_example_rights.explore_example_delete_query)\n\n # add permissions to default group\n default_group.permissions.add(explore_access_perm,\n explore_save_query_perm,\n explore_delete_query_perm)\n except Exception, e:\n print('ERROR : Impossible to init the permissions : ' + e.message)", "def init_permissions(apps):\n try:\n group = apps.get_model(\"auth\", \"Group\")\n permission = apps.get_model(\"auth\", \"Permission\")\n\n # Get or Create the default group\n default_group, created = group.objects.get_or_create(name=main_rights.default_group)\n\n # Get explore keyword permissions\n explore_access_perm = permission.objects.get(codename=explore_keyword_rights.explore_keyword_access)\n\n # add permissions to default group\n default_group.permissions.add(explore_access_perm)\n except Exception, e:\n print('ERROR : Impossible to init the permissions for core_explore_keyword_app : ' + e.message)", "def __check_new_permissions(self) -> None:\n for permission in main_app_permissions:\n try:\n permission_old = Permission.objects.get(codename=main_app_permissions[permission][CODENAME])\n self.__upgrade_permission(permission, permission_old)\n except ObjectDoesNotExist: # need to create new\n self.__create_new_permission(\n codename=main_app_permissions[permission][CODENAME],\n name=main_app_permissions[permission][NAME],\n content_type=main_app_permissions[permission][CONTENT_TYPE]\n )\n\n self.stdout.write(f'Added new permission {main_app_permissions[permission][CODENAME]}')", "def init_permissions(apps):\n try:\n group = apps.get_model(\"auth\", \"Group\")\n permission = apps.get_model(\"auth\", \"Permission\")\n\n # Get or Create the default group\n default_group, created = group.objects.get_or_create(\n name=main_rights.DEFAULT_GROUP\n )\n\n # Get explore keyword permissions\n explore_access_perm = permission.objects.get(\n codename=explore_keyword_rights.EXPLORE_KEYWORD_ACCESS\n )\n\n # Add permissions to default group\n default_group.permissions.add(explore_access_perm)\n except Exception as exception:\n logger.error(\n \"Impossible to init explore_keyword permissions: %s\"\n % str(exception)\n )", "def __populate_permissions(self):\n if self.auth_group.value:\n grpid = self.auth_group.value\n grp = AuthGroup.objects.get(pk=grpid)\n for perm in Permission.objects.all():\n if hasattr(self, perm.codename):\n if grp.permissions.filter(pk=perm.pk).exists():\n getattr(self, perm.codename).value = True\n else:\n getattr(self, perm.codename).value = False", "def sync_perm_for_dag(\n self,\n dag_id: str,\n access_control: dict[str, Collection[str]] | None = None,\n ) -> None:\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n for dag_action_name in self.DAG_ACTIONS:\n self.create_permission(dag_action_name, dag_resource_name)\n\n if access_control is not None:\n self.log.info(\"Syncing DAG-level permissions for DAG '%s'\", dag_resource_name)\n self._sync_dag_view_permissions(dag_resource_name, access_control)\n else:\n self.log.info(\n \"Not syncing DAG-level permissions for DAG '%s' as access control is unset.\",\n dag_resource_name,\n )", "def permissions():\n pass", "def create_perm_vm_for_all_dag(self) -> None:\n # create perm for global logical dag\n for resource_name in self.DAG_RESOURCES:\n for action_name in self.DAG_ACTIONS:\n self._merge_perm(action_name, resource_name)", "def permissions(self):\n return None", "def setupPermissions( self, p ):\n mp = p.manage_permission\n for entry in Config.PortalPermissions:\n apply( mp, entry )", "def get_permissions_map(self, created):\n current_user = self.context['request'].user\n company = get_object_or_404(models.Company, pk=self.data['id'])\n admins = company.admins\n accountants = company.accountants\n current_user.groups.add(admins)\n current_user.groups.add(accountants)\n assign_perm(\"change_group\", admins, admins)\n assign_perm(\"change_group\", admins, accountants)\n assign_perm(\"delete_group\", admins, admins)\n assign_perm(\"delete_group\", admins, accountants)\n return {\n 'view_company': [admins, accountants],\n 'change_company': [admins],\n 'delete_company': [admins]\n }", "def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[str, Collection[str]]) -> None:\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n\n def _get_or_create_dag_permission(action_name: str) -> Permission | None:\n perm = self.get_permission(action_name, dag_resource_name)\n if not perm:\n self.log.info(\"Creating new action '%s' on resource '%s'\", action_name, dag_resource_name)\n perm = self.create_permission(action_name, dag_resource_name)\n\n return perm\n\n def _revoke_stale_permissions(resource: Resource):\n existing_dag_perms = self.get_resource_permissions(resource)\n for perm in existing_dag_perms:\n non_admin_roles = [role for role in perm.role if role.name != \"Admin\"]\n for role in non_admin_roles:\n target_perms_for_role = access_control.get(role.name, ())\n if perm.action.name not in target_perms_for_role:\n self.log.info(\n \"Revoking '%s' on DAG '%s' for role '%s'\",\n perm.action,\n dag_resource_name,\n role.name,\n )\n self.remove_permission_from_role(role, perm)\n\n resource = self.get_resource(dag_resource_name)\n if resource:\n _revoke_stale_permissions(resource)\n\n for rolename, action_names in access_control.items():\n role = self.find_role(rolename)\n if not role:\n raise AirflowException(\n f\"The access_control mapping for DAG '{dag_id}' includes a role named \"\n f\"'{rolename}', but that role does not exist\"\n )\n\n action_names = set(action_names)\n invalid_action_names = action_names - self.DAG_ACTIONS\n if invalid_action_names:\n raise AirflowException(\n f\"The access_control map for DAG '{dag_resource_name}' includes \"\n f\"the following invalid permissions: {invalid_action_names}; \"\n f\"The set of valid permissions is: {self.DAG_ACTIONS}\"\n )\n\n for action_name in action_names:\n dag_perm = _get_or_create_dag_permission(action_name)\n if dag_perm:\n self.add_permission_to_role(role, dag_perm)", "def setUp(self):\n\n from django.contrib.auth.models import Permission, User\n perms = Permission.objects.filter(codename__startswith='resource_')\n jedi = User.objects.get(username='jedi')\n sith = User.objects.get(username='sith')\n for perm in perms:\n if perm.codename.endswith('_get'):\n sith.user_permissions.add(perm)\n jedi.user_permissions.add(perm)\n jedi.save()\n sith.save()", "def __permissions_to_manage(self):\n\n for model_name in self.MODELS_TO_MANAGE:\n try:\n model = apps.get_model(model_name)\n except LookupError:\n continue\n\n content_type = ContentType.objects.get_for_model(model)\n\n permissions = Permission.objects.filter(content_type=content_type)\n\n # retrieve default permissions first\n default_codenames = list(map(\n lambda x: f'{x}_{content_type.model}',\n ('view', 'add', 'change', 'delete'),\n ))\n for default_codename in default_codenames:\n perm = permissions.get(codename=default_codename)\n yield perm\n\n # and finally any custom defined permissions\n for custom_perm in permissions\\\n .exclude(codename__in=default_codenames)\\\n .order_by('name'):\n yield custom_perm", "def sync_roles(self) -> None:\n # Create global all-dag permissions\n self.create_perm_vm_for_all_dag()\n\n # Sync the default roles (Admin, Viewer, User, Op, public) with related permissions\n self.bulk_sync_roles(self.ROLE_CONFIGS)\n\n self.add_homepage_access_to_custom_roles()\n # init existing roles, the rest role could be created through UI.\n self.update_admin_permission()\n self.clean_perms()", "def test_add_permission(self):\r\n self.assertFalse(self.creator_admin.has_add_permission(self.request))", "def PermissionSet(self) -> _n_6_t_0:", "def add_permissions(self, permissions: List[str]):\n for permission in permissions:\n setattr(self.scopes, permission, True)\n self.save(update_fields=[\"scopes\"])", "def __add_permission_to_group(self, group: Group) -> None:\n for permission_codename in main_app_groups[group.name]:\n permission = Permission.objects.get(codename=permission_codename)\n group.permissions.add(permission)", "def __compute_permissions(permissions):\n if permissions.public:\n permissions.authorized_ids = list()\n elif not permissions.public and _BACKEND_ID not in permissions.authorized_ids:\n permissions.authorized_ids.append(_BACKEND_ID)\n return permissions", "def reset_permissions(self):\n self.permissions = 0", "def add_view_permissions(sender, **kwargs):\n # for each of our content types\n for content_type in ContentType.objects.all():\n # build our permission slug\n codename = \"view_%s\" % content_type.model\n\n # if it doesn't exist..\n if not Permission.objects.filter(content_type=content_type, codename=codename):\n # add it\n Permission.objects.create(content_type=content_type,\n codename=codename,\n name=\"Can view %s\" % content_type.name)\n # print \"Added view permission for %s\" % content_type.name", "def get_permissions(self):\n permissions = [IsAdminUser]\n return [permission() for permission in permissions]", "def all_perms(self, id, **kwargs):\r\n p = self.db.auth_permission\r\n if self.all_permissions:\r\n ret = self.sql(\r\n (p.record_id == id) & (p.table_name == self.table._tablename) & p.name.belongs(self.all_permissions),\r\n p.name, p.group_id,\r\n orderby=p.group_id)\r\n else:\r\n ret = []\r\n current.response.text = ret\r\n return ret", "def test_permissions(self):\n self.assert_('admin' in get_model_perms(Group))" ]
[ "0.69647944", "0.66457176", "0.6399103", "0.6032672", "0.6023026", "0.5861807", "0.5824052", "0.5797001", "0.5669763", "0.5656293", "0.5622083", "0.56155336", "0.55685437", "0.5565045", "0.54963064", "0.5486159", "0.54558253", "0.5441496", "0.5438132", "0.54299444", "0.5400835", "0.538407", "0.5377228", "0.535187", "0.5343294", "0.53362066", "0.5303377", "0.52991617", "0.52829427", "0.52713203" ]
0.6819034
1
Initialize default and custom roles with related permissions. 1. Init the default role(Admin, Viewer, User, Op, public) with related permissions. 2. Init the custom role(daguser) with related permissions.
def sync_roles(self) -> None: # Create global all-dag permissions self.create_perm_vm_for_all_dag() # Sync the default roles (Admin, Viewer, User, Op, public) with related permissions self.bulk_sync_roles(self.ROLE_CONFIGS) self.add_homepage_access_to_custom_roles() # init existing roles, the rest role could be created through UI. self.update_admin_permission() self.clean_perms()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_roles(self):\n self.role_owner = Role.objects.get_or_create(\n name=PROJECT_ROLE_OWNER, rank=ROLE_RANKING[PROJECT_ROLE_OWNER]\n )[0]\n self.role_delegate = Role.objects.get_or_create(\n name=PROJECT_ROLE_DELEGATE, rank=ROLE_RANKING[PROJECT_ROLE_DELEGATE]\n )[0]\n self.role_contributor = Role.objects.get_or_create(\n name=PROJECT_ROLE_CONTRIBUTOR,\n rank=ROLE_RANKING[PROJECT_ROLE_CONTRIBUTOR],\n )[0]\n self.role_guest = Role.objects.get_or_create(\n name=PROJECT_ROLE_GUEST, rank=ROLE_RANKING[PROJECT_ROLE_GUEST]\n )[0]\n self.role_finder = Role.objects.get_or_create(\n name=PROJECT_ROLE_FINDER,\n rank=ROLE_RANKING[PROJECT_ROLE_FINDER],\n project_types=[PROJECT_TYPE_CATEGORY],\n )[0]", "def init() -> None:\n appbuilder.add_permissions(update_perms=True)\n security_manager.sync_role_definitions()", "def set_user_roles(request):\n\n # defaults\n permissions = {\n \"can_view_manager\": False,\n \"can_view_operator\": False\n }\n\n if request.user.is_anonymous or request.user.userprofile.user_type == UserType.CUSTOMER:\n return permissions\n\n user_profile = request.user.userprofile\n if user_profile.user_type == UserType.OPERATOR:\n permissions['can_view_operator'] = True\n return permissions\n if user_profile.user_type == UserType.MANAGER:\n permissions['can_view_manager'] = True\n permissions['can_view_operator'] = True\n return permissions", "def __init__(self, roles, role):\n self._roles = roles\n self._requestor = self._roles._requestor\n self._id = role[\"id\"]\n self._data = role\n self.name = role[\"name\"]\n self.description = role[\"description\"]\n self.system = role[\"system\"]\n self.permissions = dict(role[\"permissions\"])", "def create_basic_roles(script):\n roles = script.do(Roles.GetAll())\n roles = [a['name'] for a in roles]\n\n 'developers' in roles or script.do(Roles.Create('developers'))\n 'supervisors' in roles or script.do(Roles.Create('supervisors'))\n 'readers' in roles or script.do(Roles.Create('readers'))", "def initialize_role_table():\n roles = [\n {\n \"name\": \"user\",\n \"description\": \"registered user permission\",\n \"raw_permissions\": Role.Permissions.REGISTERED.value\n },\n {\n \"name\": \"editor\",\n \"description\": \"user has ability to edit all content and comments\",\n \"raw_permissions\": (Role.Permissions.REGISTERED | Role.Permissions.EDITOR).value\n },\n {\n \"name\": \"admin\",\n \"description\": \"administrator user with access to all of the application\",\n \"raw_permissions\": (Role.Permissions.REGISTERED | Role.Permissions.EDITOR | Role.Permissions.ADMINISTRATOR).value\n }\n ]\n with session_manager() as session:\n for r in roles:\n role = Role.query.filter(Role.name == r.get(\"name\")).one_or_none()\n\n # is there no existing role by a given name?\n if role is None:\n role = Role(\n name=r.get(\"name\"),\n description=r.get(\"description\"),\n raw_permissions=r.get(\"raw_permissions\")\n )\n # otherwise, need to update existing role permissions\n else:\n role.description = r.get(\"description\")\n role.raw_permissions = r.get(\"raw_permissions\")\n\n db.session.add(role)\n db.session.commit()", "def create_predefined_roles(sender, **kwargs):\n from django.contrib.contenttypes.models import ContentType\n from w2s.defaults import TaskDefaults\n from users.models import Roles\n\n if ContentType.objects.filter(app_label='auth', model='group').exists() and ContentType.objects.filter(app_label='users', model='roles').exists():\n predefined_roles = TaskDefaults.get_predefined_roles()\n for role_alias, role_name in predefined_roles.items():\n group_model = ContentType.objects.filter(app_label='auth', model='group')[0].model_class()\n\n if not group_model.objects.filter(name=role_name).exists():\n access_specifiers = TaskDefaults.get_predefined_role_access_specifiers(role_alias=role_alias)\n allowed_permissions_sets = [\n TaskDefaults.get_access_specifier_permissions(specifier)[0] for specifier in access_specifiers]\n allowed_permissions = list(set([item for sublist in allowed_permissions_sets for item in sublist]))\n\n # Creating Group\n group_instance = group_model.objects.create(name=role_name)\n group_instance.permissions.set(allowed_permissions)\n if group_instance.save() is None:\n print('\\033[0;37;42m Generated new role \"%s\", Applying details... \\033[0m' % role_alias)\n\n # Creating Role detail\n role_instance = Roles.objects.create(\n group = group_instance,\n alias = role_alias,\n accesses = ','.join(access_specifiers),\n description = 'Predefined role for %s' % role_alias\n )\n\n if role_instance.save() is None:\n print('\\033[0;37;42m Details applied for role: %s \\033[0m' % role_alias)\n else:\n print('---- Error while generating predefined roles ---')\n print(' -Either auth.group or users.roles model does not exists !!!')", "def createAccessRoles(Role):\n if not Role.query.first():\n for role_name in [\"Guest\", \"User\", \"Admin\"]:\n role = Role(name=role_name)\n db.session.add(role)\n db.session.commit()\n logger.info(\"Added default access roles\")\n return", "def sync_role_definitions(self) -> None:\n\n logger.info(\"Syncing role definition\")\n\n self.create_custom_permissions()\n\n # Creating default roles\n self.set_role(\"Admin\", self._is_admin_pvm)\n self.set_role(\"Alpha\", self._is_alpha_pvm)\n self.set_role(\"Gamma\", self._is_gamma_pvm)\n self.set_role(\"granter\", self._is_granter_pvm)\n self.set_role(\"sql_lab\", self._is_sql_lab_pvm)\n\n # Configure public role\n if current_app.config[\"PUBLIC_ROLE_LIKE\"]:\n self.copy_role(\n current_app.config[\"PUBLIC_ROLE_LIKE\"],\n self.auth_role_public,\n merge=True,\n )\n\n self.create_missing_perms()\n\n # commit role and view menu updates\n self.get_session.commit()\n self.clean_perms()", "def init_permissions(apps):\n try:\n group = apps.get_model(\"auth\", \"Group\")\n permission = apps.get_model(\"auth\", \"Permission\")\n\n # Get or Create the default group\n default_group, created = group.objects.get_or_create(\n name=main_rights.DEFAULT_GROUP\n )\n\n # Get explore keyword permissions\n explore_access_perm = permission.objects.get(\n codename=explore_keyword_rights.EXPLORE_KEYWORD_ACCESS\n )\n\n # Add permissions to default group\n default_group.permissions.add(explore_access_perm)\n except Exception as exception:\n logger.error(\n \"Impossible to init explore_keyword permissions: %s\"\n % str(exception)\n )", "def __init__(self, **kwargs):\n super(User, self).__init__(**kwargs)\n Role.insert_roles()\n if self.role is None:\n if self.email == current_app.config[\"FLASKY_ADMIN\"]:\n self.role = Role.query.filter_by(permissions=0xff).first()\n if self.role is None:\n self.role = Role.query.filter_by(default=True).first()\n # self.image_url = photos.url(\"user/default.png\")\n self.image_url = self.avatar(128)", "def setup(bot):\n bot.add_cog(RoleManager(bot))", "def init_permissions(apps):\n try:\n group = apps.get_model(\"auth\", \"Group\")\n permission = apps.get_model(\"auth\", \"Permission\")\n\n # Get or Create the default group\n default_group, created = group.objects.get_or_create(name=main_rights.default_group)\n\n # Get explore keyword permissions\n explore_access_perm = permission.objects.get(codename=explore_keyword_rights.explore_keyword_access)\n\n # add permissions to default group\n default_group.permissions.add(explore_access_perm)\n except Exception, e:\n print('ERROR : Impossible to init the permissions for core_explore_keyword_app : ' + e.message)", "def setup_test_role(self):\n self.test_role = rand_name('role')\n resp, self.role = self.client.create_role(self.test_role)\n self.roles.append(self.role)", "def init_permissions(apps):\n try:\n group = apps.get_model(\"auth\", \"Group\")\n permission = apps.get_model(\"auth\", \"Permission\")\n\n # Get or Create the default group\n default_group, created = group.objects.get_or_create(name=main_rights.default_group)\n\n # Get explore example permissions\n explore_access_perm = permission.objects.get(codename=explore_example_rights.explore_example_access)\n explore_save_query_perm = permission.objects.get(codename=explore_example_rights.explore_example_save_query)\n explore_delete_query_perm = permission.objects.get(codename=explore_example_rights.explore_example_delete_query)\n\n # add permissions to default group\n default_group.permissions.add(explore_access_perm,\n explore_save_query_perm,\n explore_delete_query_perm)\n except Exception, e:\n print('ERROR : Impossible to init the permissions : ' + e.message)", "def __init__(self, roles, role):\n self._roles = roles\n self._requestor = self._roles._requestor\n self._id = role[\"id\"]\n self._name = role[\"name\"]\n self._description = role[\"description\"]\n self._data = role", "async def _set_roles(self, ctx: Context):\n\n guild: discord.Guild = ctx.guild\n\n host = await guild.create_role(\n name=\"Host\", colour=discord.Color(0xFFBF37),\n hoist=True, mentionable=True\n )\n await self.config.guild(guild).host_id.set(host.id)\n await ctx.author.add_roles(host)\n\n player = await guild.create_role(\n name=\"Player\", colour=discord.Color(0x37BFFF),\n hoist=True, mentionable=True\n )\n await self.config.guild(guild).player_id.set(player.id)\n\n repl = await guild.create_role(\n name=\"Replacement\", colour=discord.Color(0x86FF40)\n )\n await self.config.guild(guild).repl_id.set(repl.id)\n\n spec = await guild.create_role(\n name=\"Spectator\", colour=discord.Color(0xD837FF)\n )\n await self.config.guild(guild).spec_id.set(spec.id)\n\n dead = await guild.create_role(\n name=\"Dead\", colour=discord.Color(0xDC5757)\n )\n await self.config.guild(guild).dead_id.set(dead.id)\n\n txt = _(\n \"Host: {}\"\n \"\\nPlayer: {}\"\n \"\\nSpectator: {}\"\n \"\\nDead: {}\"\n \"\\nReplacement: {}\"\n ).format(\n host.mention,\n player.mention,\n spec.mention,\n dead.mention,\n repl.mention\n )\n\n embed = discord.Embed(\n color=0x37BFFF, title=\"Created Roles!\", description=txt\n )\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n await ctx.send(\"Set up required roles!\")", "def set_permissions(self, role):\n if role == User.ROLE_ADMIN:\n for perm in permissions.admin_permissions():\n self.user_permissions.add(perm)\n elif role == User.ROLE_MANAGER:\n for perm in permissions.manager_permissions():\n self.user_permissions.add(perm)\n elif role == User.ROLE_SUB_MANAGER:\n for perm in permissions.sub_manager_permissions():\n self.user_permissions.add(perm)\n else:\n for perm in permissions.user_permissions():\n self.user_permissions.add(perm)", "def __init__(self, **kwargs):\n super(User, self).__init__(**kwargs)\n\n # Set default role for a regular new User\n self.role = Role.query.filter_by(default=True).first()", "def setup(self):\n # TODO: refactor database cleanup\n with gus.config.get_db_conn().cursor() as c:\n c.execute(\"TRUNCATE TABLE chef_roles, chef_roles_xref_projects CASCADE\")\n self.role_name = 'www'\n self.role_id = chef_role.create(self.role_name, True)", "def insert_roles():\n roles = {\n 'User': [Permission.CRUD_OWNED],\n 'Usermanager': [Permission.CRUD_OWNED, Permission.CRUD_USERS],\n 'Administrator': [Permission.CRUD_OWNED, Permission.CRUD_USERS,\n Permission.ADMIN],\n }\n default_role = 'User'\n for rol in roles:\n role = Role.query.filter_by(name=rol).first()\n if role is None:\n role = Role(name=rol)\n role.reset_permissions()\n for perm in roles[rol]:\n role.add_permission(perm)\n role.default = (role.name == default_role)\n db.session.add(role)\n db.session.commit()", "def init_role(self, role_name, perms) -> None:\n warnings.warn(\n \"`init_role` has been deprecated. Please use `bulk_sync_roles` instead.\",\n RemovedInAirflow3Warning,\n stacklevel=2,\n )\n self.bulk_sync_roles([{\"role\": role_name, \"perms\": perms}])", "def seed_permissions_roles(course_key):\r\n administrator_role = _save_forum_role(course_key, \"Administrator\")\r\n moderator_role = _save_forum_role(course_key, \"Moderator\")\r\n community_ta_role = _save_forum_role(course_key, \"Community TA\")\r\n student_role = _save_forum_role(course_key, \"Student\")\r\n\r\n for per in _STUDENT_ROLE_PERMISSIONS:\r\n student_role.add_permission(per)\r\n\r\n for per in _MODERATOR_ROLE_PERMISSIONS:\r\n moderator_role.add_permission(per)\r\n\r\n for per in _ADMINISTRATOR_ROLE_PERMISSIONS:\r\n administrator_role.add_permission(per)\r\n\r\n moderator_role.inherit_permissions(student_role)\r\n\r\n # For now, Community TA == Moderator, except for the styling.\r\n community_ta_role.inherit_permissions(moderator_role)\r\n\r\n administrator_role.inherit_permissions(moderator_role)", "def init():\n click.secho(\"[+] Initialize permissions\", fg=\"cyan\")\n init_permissions()\n click.secho(\"[+] Initialize permissions successfully\", fg=\"green\")", "def create_custom_permissions(self) -> None:\n self.add_permission_view_menu(\"all_datasource_access\", \"all_datasource_access\")\n self.add_permission_view_menu(\"all_database_access\", \"all_database_access\")\n self.add_permission_view_menu(\"all_query_access\", \"all_query_access\")\n self.add_permission_view_menu(\"can_share_dashboard\", \"Superset\")\n self.add_permission_view_menu(\"can_share_chart\", \"Superset\")", "def test03_perm_roles(self):\n print_ln('test16_perm_roles')\n \n try:\n pList = review.find_perms(Perm(obj_name='py-obj*', op_name='*'))\n for perm in pList: \n print_ln(\"Role Perm obj name=\" + perm.obj_name + ', op=' + perm.op_name + ', id=' + perm.obj_id)\n rList = review.perm_roles(perm)\n for role in rList:\n print_ln(\"Assigned role=\" + role, 1)\n except Exception as e:\n self.fail('test16_perm_roles failed, exception=' + e.msg)", "def configure_roles(self, options):\n roles = [\n {\n 'name': 'orchestrate.devOps',\n 'title': 'Orchestrate DevOps',\n 'description': (\n 'Orchestrate the creation and lifecycle of all resources'\n ' available to and created by users.'\n ),\n 'includedPermissions': [\n 'compute.acceleratorTypes.list',\n 'compute.images.list',\n 'compute.images.get',\n 'compute.images.create',\n 'compute.images.delete',\n 'compute.images.getFromFamily',\n 'compute.images.useReadOnly',\n 'compute.instanceTemplates.list',\n 'compute.instanceTemplates.get',\n 'compute.instanceTemplates.create',\n 'compute.instanceTemplates.delete',\n 'compute.instances.list',\n 'compute.instances.get',\n 'compute.instances.create',\n 'compute.instances.delete',\n 'compute.instances.setDeletionProtection',\n 'compute.instances.setLabels',\n 'compute.instances.setMetadata',\n 'compute.instances.setServiceAccount',\n 'compute.instances.setTags',\n 'compute.instances.stop',\n 'compute.disks.create',\n 'compute.disks.useReadOnly',\n 'compute.networks.get',\n 'compute.networks.addPeering',\n 'compute.networks.updatePolicy',\n 'compute.subnetworks.get',\n 'compute.subnetworks.use',\n 'compute.subnetworks.useExternalIp',\n 'compute.globalOperations.get',\n 'compute.regionOperations.get',\n 'compute.zoneOperations.get'\n ],\n 'stage': 'ALPHA'\n },\n {\n 'name': 'orchestrate.resourceManager',\n 'title': 'Orchestrate Resource Manager',\n 'description': (\n 'Create instance templates, instances, and manage the lifecycle'\n ' of resources created by users.'\n ),\n 'includedPermissions': [\n 'compute.acceleratorTypes.list',\n 'compute.images.list',\n 'compute.images.get',\n 'compute.images.create',\n 'compute.images.delete',\n 'compute.images.getFromFamily',\n 'compute.images.useReadOnly',\n 'compute.instanceTemplates.list',\n 'compute.instanceTemplates.get',\n 'compute.instanceTemplates.create',\n 'compute.instanceTemplates.delete',\n 'compute.instances.list',\n 'compute.instances.get',\n 'compute.instances.create',\n 'compute.instances.delete',\n 'compute.instances.setLabels',\n 'compute.instances.setMetadata',\n 'compute.instances.setServiceAccount',\n 'compute.instances.setTags',\n 'compute.instances.stop',\n 'compute.disks.create',\n 'compute.subnetworks.use',\n 'compute.subnetworks.useExternalIp'\n ],\n 'stage': 'ALPHA'\n },\n {\n 'name': 'orchestrate.user',\n 'title': 'Orchestrate User',\n 'description': (\n 'Create instances and do basic lifecycle management of'\n ' resources they own.'\n ),\n 'includedPermissions': [\n 'compute.instanceTemplates.list',\n 'compute.instanceTemplates.get',\n 'compute.instances.list',\n 'compute.instances.get',\n 'compute.instances.create',\n 'compute.instances.delete',\n 'compute.instances.setDeletionProtection',\n 'compute.instances.setLabels',\n 'compute.instances.setMetadata',\n 'compute.instances.setServiceAccount',\n 'compute.instances.setTags',\n 'compute.instances.stop',\n 'compute.disks.create',\n 'compute.subnetworks.use',\n 'compute.subnetworks.useExternalIp'\n ],\n 'stage': 'ALPHA'\n }\n ]\n for role in roles:\n permissions = ','.join(role['includedPermissions'])\n try:\n # Try to create first\n command = (\n 'gcloud iam roles create {name} --project={project}'\n ' --title=\"{title}\" --description=\"{description}\"'\n ' --permissions={permissions} --stage={stage} --quiet').format(\n project=options.project,\n permissions=permissions,\n **role,\n )\n log.debug(command)\n subprocess.check_call(command, shell=True)\n except subprocess.CalledProcessError as exception:\n # if it fails, then try to update\n command = (\n 'gcloud iam roles update {name} --project={project}'\n ' --title=\"{title}\" --description=\"{description}\"'\n ' --permissions={permissions} --stage={stage} --quiet').format(\n project=options.project,\n permissions=permissions,\n **role,\n )\n run(command)", "async def roles(self, ctx):\n\n pass", "def setup_roles_and_persona(self):\n logging.info('Setting up roles, orders, persona.')\n self.end_onboarding_state()\n self.broadcast_apprentice_persona('') # clear onboarding persona\n starting_role = self.assign_roles()\n self.send_wizard_persona_emphasize_message()\n self.selected_persona = self.apprentice_choose_persona()\n self.broadcast_apprentice_persona(self.selected_persona)\n self.send_time_length_info()\n self.send_starter_instruction(starting_role)", "def setup_general():\n Role.insert_roles()\n admin_query = Role.query.filter_by(name='Administrator')\n if admin_query.first() is not None:\n if Employee.query.filter_by(email=Config.ADMIN_EMAIL).first() is None:\n user = Employee(first_name='Admin',\n last_name='Account',\n password=Config.ADMIN_PASSWORD,\n email=Config.ADMIN_EMAIL)\n db.session.add(user)\n db.session.commit()\n print('Added administrator {}'.format(user.full_name()))" ]
[ "0.71640617", "0.69580996", "0.6330744", "0.62119347", "0.61950326", "0.6079152", "0.60608494", "0.6044329", "0.60235196", "0.59742445", "0.59428924", "0.59159476", "0.5888189", "0.5868204", "0.58649665", "0.5847457", "0.58105373", "0.580332", "0.57923406", "0.5730404", "0.5726222", "0.5667738", "0.56540954", "0.56242865", "0.56025684", "0.5581107", "0.55713135", "0.556144", "0.55220443", "0.55103356" ]
0.7069346
1
Sync permissions for given dag id. The dag id surely exists in our dag bag as only / refresh button or DagBag will call this function.
def sync_perm_for_dag( self, dag_id: str, access_control: dict[str, Collection[str]] | None = None, ) -> None: dag_resource_name = permissions.resource_name_for_dag(dag_id) for dag_action_name in self.DAG_ACTIONS: self.create_permission(dag_action_name, dag_resource_name) if access_control is not None: self.log.info("Syncing DAG-level permissions for DAG '%s'", dag_resource_name) self._sync_dag_view_permissions(dag_resource_name, access_control) else: self.log.info( "Not syncing DAG-level permissions for DAG '%s' as access control is unset.", dag_resource_name, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[str, Collection[str]]) -> None:\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n\n def _get_or_create_dag_permission(action_name: str) -> Permission | None:\n perm = self.get_permission(action_name, dag_resource_name)\n if not perm:\n self.log.info(\"Creating new action '%s' on resource '%s'\", action_name, dag_resource_name)\n perm = self.create_permission(action_name, dag_resource_name)\n\n return perm\n\n def _revoke_stale_permissions(resource: Resource):\n existing_dag_perms = self.get_resource_permissions(resource)\n for perm in existing_dag_perms:\n non_admin_roles = [role for role in perm.role if role.name != \"Admin\"]\n for role in non_admin_roles:\n target_perms_for_role = access_control.get(role.name, ())\n if perm.action.name not in target_perms_for_role:\n self.log.info(\n \"Revoking '%s' on DAG '%s' for role '%s'\",\n perm.action,\n dag_resource_name,\n role.name,\n )\n self.remove_permission_from_role(role, perm)\n\n resource = self.get_resource(dag_resource_name)\n if resource:\n _revoke_stale_permissions(resource)\n\n for rolename, action_names in access_control.items():\n role = self.find_role(rolename)\n if not role:\n raise AirflowException(\n f\"The access_control mapping for DAG '{dag_id}' includes a role named \"\n f\"'{rolename}', but that role does not exist\"\n )\n\n action_names = set(action_names)\n invalid_action_names = action_names - self.DAG_ACTIONS\n if invalid_action_names:\n raise AirflowException(\n f\"The access_control map for DAG '{dag_resource_name}' includes \"\n f\"the following invalid permissions: {invalid_action_names}; \"\n f\"The set of valid permissions is: {self.DAG_ACTIONS}\"\n )\n\n for action_name in action_names:\n dag_perm = _get_or_create_dag_permission(action_name)\n if dag_perm:\n self.add_permission_to_role(role, dag_perm)", "def create_dag_specific_permissions(self) -> None:\n perms = self.get_all_permissions()\n dagbag = DagBag(read_dags_from_db=True)\n dagbag.collect_dags_from_db()\n dags = dagbag.dags.values()\n\n for dag in dags:\n root_dag_id = dag.parent_dag.dag_id if dag.parent_dag else dag.dag_id\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n for action_name in self.DAG_ACTIONS:\n if (action_name, dag_resource_name) not in perms:\n self._merge_perm(action_name, dag_resource_name)\n\n if dag.access_control:\n self.sync_perm_for_dag(dag_resource_name, dag.access_control)", "def can_edit_dag(self, dag_id: str, user=None) -> bool:\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_EDIT, dag_resource_name, user=user)", "def can_delete_dag(self, dag_id: str, user=None) -> bool:\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_DELETE, dag_resource_name, user=user)", "def can_access_some_dags(self, action: str, dag_id: str | None = None) -> bool:\n if dag_id and dag_id != \"~\":\n root_dag_id = self._get_root_dag_id(dag_id)\n return self.has_access(action, permissions.resource_name_for_dag(root_dag_id))\n\n user = g.user\n if action == permissions.ACTION_CAN_READ:\n return any(self.get_readable_dag_ids(user))\n return any(self.get_editable_dag_ids(user))", "def can_read_dag(self, dag_id: str, user=None) -> bool:\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_READ, dag_resource_name, user=user)", "def check_authorization(\n self,\n perms: Sequence[tuple[str, str]] | None = None,\n dag_id: str | None = None,\n ) -> bool:\n if not perms:\n return True\n\n for perm in perms:\n if perm in (\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),\n ):\n can_access_all_dags = self.has_access(*perm)\n if can_access_all_dags:\n continue\n\n action = perm[0]\n if self.can_access_some_dags(action, dag_id):\n continue\n return False\n\n elif not self.has_access(*perm):\n return False\n\n return True", "def prefixed_dag_id(self, dag_id: str) -> str:\n warnings.warn(\n \"`prefixed_dag_id` has been deprecated. \"\n \"Please use `airflow.security.permissions.resource_name_for_dag` instead.\",\n RemovedInAirflow3Warning,\n stacklevel=2,\n )\n root_dag_id = self._get_root_dag_id(dag_id)\n return permissions.resource_name_for_dag(root_dag_id)", "def tenant_permissions(self, permission_id, data, tenant_id=None, api_version=\"v2.0\"):\n\n if tenant_id is None and self._parent_class.tenant_id:\n # Pull tenant_id from parent namespace cache.\n tenant_id = self._parent_class.tenant_id\n elif not tenant_id:\n # No value for tenant_id.\n raise TypeError(\"tenant_id is required but not set or cached.\")\n cur_ctlr = self._parent_class.controller\n\n url = str(cur_ctlr) + \"/{}/api/tenants/{}/permissions/{}\".format(api_version,\n tenant_id,\n permission_id)\n\n api_logger.debug(\"URL = %s\", url)\n return self._parent_class.rest_call(url, \"put\", data=data)", "def update_admin_permission(self) -> None:\n session = self.appbuilder.get_session\n dag_resources = session.scalars(\n select(Resource).where(Resource.name.like(f\"{permissions.RESOURCE_DAG_PREFIX}%\"))\n )\n resource_ids = [resource.id for resource in dag_resources]\n\n perms = session.scalars(select(Permission).where(~Permission.resource_id.in_(resource_ids)))\n perms = [p for p in perms if p.action and p.resource]\n\n admin = self.find_role(\"Admin\")\n admin.permissions = list(set(admin.permissions) | set(perms))\n\n session.commit()", "def can_edit_all_dags(self, user=None) -> bool:\n return self.has_access(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG, user)", "def permissions(self, account_id):\n from pureport_client.commands.accounts.permissions import Command\n return Command(self.client, account_id)", "def enable_storage_permission(sdk_path, device_id, dest_dir, package_name):\n\n print 'Starting storage permission grant'\n perm_command = [os.path.join(sdk_path, 'platform-tools',\n 'adb'),\n '-s', device_id,\n 'shell',\n 'pm', 'grant', package_name,\n 'android.permission.WRITE_EXTERNAL_STORAGE']\n print perm_command\n log_file_path = os.path.join(dest_dir, 'logs', 'enable_storage_perm.log')\n with open(log_file_path, 'w') as log_file:\n try:\n subprocess.call(perm_command,\n stdout=log_file,\n stderr=subprocess.STDOUT,\n shell=False)\n except OSError:\n print 'ERROR executing permission grant.'", "def permissionContextById(self, id: str) -> PermissionContext:", "def all_perms(self, id, **kwargs):\r\n p = self.db.auth_permission\r\n if self.all_permissions:\r\n ret = self.sql(\r\n (p.record_id == id) & (p.table_name == self.table._tablename) & p.name.belongs(self.all_permissions),\r\n p.name, p.group_id,\r\n orderby=p.group_id)\r\n else:\r\n ret = []\r\n current.response.text = ret\r\n return ret", "def get_dag(self, config: cconfig.Config, dag: Optional[DAG] = None) -> DAG:", "def submit_dag(config, dag_file):\n with SUBMIT_LOCK:\n try:\n condor_dag_cmd = osp.join(get_condor_bin_dir(config),\n CONDOR_COMMAND['dag'])\n\n pipe = subprocess.Popen(args=(condor_dag_cmd, '-force', dag_file),\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n\n output = pipe.stdout.read()\n status = pipe.wait()\n return status, output\n except OSError, exc:\n return -1, str(exc)", "def create_perm_vm_for_all_dag(self) -> None:\n # create perm for global logical dag\n for resource_name in self.DAG_RESOURCES:\n for action_name in self.DAG_ACTIONS:\n self._merge_perm(action_name, resource_name)", "def enable_dump_permission(sdk_path, device_id, dest_dir, package_name):\n\n print 'Starting dump permission grant'\n perm_command = [os.path.join(sdk_path, 'platform-tools', 'adb'),\n '-s', device_id,\n 'shell',\n 'pm', 'grant', package_name,\n 'android.permission.DUMP']\n print perm_command\n log_file_path = os.path.join(dest_dir, 'logs', 'enable_dump_perm.log')\n with open(log_file_path, 'w') as log_file:\n try:\n subprocess.call(perm_command,\n stdout=log_file,\n stderr=subprocess.STDOUT,\n shell=False)\n except OSError:\n print 'ERROR executing permission grant.'", "async def permissions(self, ctx, *, channel_id: int = None):\n\n if not await ctx.is_co_owner() and channel_id is not None:\n return await ctx.error('Only co-owners of the bot can specify channel')\n\n channel = ctx.get(ctx.bot.get_all_channels(), id=channel_id)\n guild = channel.guild if channel else ctx.guild\n channel = channel or ctx.channel\n guild_perms = guild.me.guild_permissions\n chan_perms = channel.permissions_for(guild.me)\n req_perms = ctx.bot.req_perms\n\n embed = await ctx.info('Bot Permissions', send=False)\n\n wrap = functools.partial(textwrap.wrap, width=20)\n names = [wrap(channel.name), wrap(guild.name)]\n if channel.category:\n names.append(wrap(channel.category.name))\n name_len = max(len(n) for n in names)\n names = [same_len(n, name_len) for n in names]\n chan_msg = [f\"**{names[0]}**\\n{channel.id}\\n\"]\n guild_msg = [f\"**{names[1]}**\\n{guild.id}\\n\", perms_result(guild_perms, req_perms)]\n chan_msg.append(perms_result(chan_perms, req_perms))\n embed.add_field(name='GUILD', value='\\n'.join(guild_msg))\n if channel.category:\n cat_perms = channel.category.permissions_for(guild.me)\n cat_msg = [f\"**{names[2]}**\\n{channel.category.id}\\n\", perms_result(cat_perms, req_perms)]\n embed.add_field(name='CATEGORY', value='\\n'.join(cat_msg))\n embed.add_field(name='CHANNEL', value='\\n'.join(chan_msg))\n\n try:\n await ctx.send(embed=embed)\n except discord.errors.Forbidden:\n # didn't have permissions to send a message with an embed\n try:\n msg = \"I couldn't send an embed here, so I've sent you a DM\"\n await ctx.send(msg)\n except discord.errors.Forbidden:\n # didn't have permissions to send a message at all\n pass\n await ctx.author.send(embed=embed)", "def permission(guild_id: int, permissions: list):\n\n def wrapper(cmd):\n if not getattr(cmd, \"__permissions__\", None):\n cmd.__permissions__ = {}\n cmd.__permissions__[guild_id] = permissions\n return cmd\n\n return wrapper", "def set_permissions(self, id, permissions, _check_permissions=True):\r\n auth = self.app.auth\r\n pt = self.db.auth_permission\r\n actual_permissions = dict((gid, set(imap(ig1, g))) for gid, g in groupby(\r\n self.sql((pt.record_id == id) & (pt.table_name == self.table._tablename), pt.group_id, pt.name, as_dict=False,\r\n orderby=pt.group_id), ig0))\r\n minimals = set(map(attrgetter('permission'), self.inv_minimals))\r\n permissions = dict((int(k), set(x for x, y in p.iteritems() if y) if type(p) is dict else p) for k, p in\r\n permissions.iteritems())\r\n\r\n changed_groups = set()\r\n minimal_changed = {}\r\n for group_id, perms in permissions.items():\r\n for perm_name in self.all_permissions:\r\n val = perm_name in perms\r\n if val != (perm_name in actual_permissions.get(group_id, empty)):\r\n (auth.add_permission if val else auth.del_permission)(\r\n group_id=group_id,\r\n name=perm_name,\r\n table_name=self.table._tablename,\r\n record_id=id,\r\n )\r\n changed_groups.add(group_id)\r\n if perm_name in minimals:\r\n minimal_changed.setdefault(perm_name, {})[group_id] = val\r\n\r\n minimal_permissions = dict((p, set()) for p in minimal_changed)\r\n for p in (p for p in self.inv_minimals if p.permission in minimal_permissions):\r\n minimal_permissions[p.permission].add(p)\r\n\r\n if self.realtime_enabled:\r\n related_objects = {}\r\n if minimal_changed:\r\n for perm_name, groups in minimal_changed.iteritems():\r\n rel_perm = {}\r\n for permission in minimal_permissions[perm_name]:\r\n related = permission.get_objects(id)\r\n if related:\r\n rel_perm.setdefault(permission.resource, []).extend(related)\r\n if perm_name not in related_objects:\r\n related_objects[perm_name] = {}\r\n related_objects[perm_name] = rel_perm\r\n\r\n if reduce(set.union, related_objects.itervalues(), set()):\r\n delete_message = {}\r\n insert_message = {}\r\n for perm, rel_objs in related_objects.iteritems():\r\n del_message = delete_message.get(perm, [])\r\n ins_message = insert_message.get(perm, [])\r\n for resource, items in rel_objs.iteritems():\r\n del_message.extend([[resource.name, 'deleted', id] for id in imap(itemgetter('id'), items)])\r\n ins_message.extend([[resource.name, 'results', item] for item in items])\r\n delete_message[perm] = del_message\r\n insert_message[perm] = ins_message\r\n\r\n # send realtime XOR\r\n for perm_name in minimal_permissions:\r\n positive_groups = set(k for k, p in permissions.iteritems() if perm_name in p)\r\n negative_groups = set(k for k, p in permissions.iteritems() if perm_name not in p)\r\n # if positive_groups and negative_groups:\r\n current.rt_permissions.append(('send_xor', self.app, positive_groups, negative_groups,\r\n insert_message[perm_name], delete_message[perm_name]))\r\n # elif positive_groups:\r\n # rt_command('send_groups',insert_message[perm_name],map(int,positive_groups))\r\n # else:\r\n # rt_command('send_groups',delete_message[perm_name],map(int,negative_groups))\r\n\r\n for g, p in self.zero_permissions.iteritems():\r\n actual_permissions.setdefault(g, set()).update(p)\r\n permissions.setdefault(g, set()).update(p)\r\n\r\n if id in map(itemgetter('id'),\r\n imap(ig2, ifilter(lambda x: x[0] == 'inserted' and x[1] == self, current.update_log))):\r\n actual_permissions = {}\r\n\r\n current.rt_permissions.append(['send_permissions',self.app, self.name, id, permissions, actual_permissions])\r\n # rt_command('send_permissions',self.name,id,permissions,actual_permissions)\r\n\r\n # self.db.commit()\r\n ret = self.my_perms([id])\r\n current.response.text = ret\r\n return ret", "def set_permission(StackId=None, IamUserArn=None, AllowSsh=None, AllowSudo=None, Level=None):\n pass", "def update_tag_group_acl(session, tag_id=None, group_id=None,\n allow_install=False, allow_uninstall=False, allow_reboot=False,\n allow_schedule=False, allow_wol=False, allow_snapshot_creation=False,\n allow_snapshot_removal=False, allow_snapshot_revert=False,\n allow_tag_creation=False, allow_tag_removal=False, allow_read=False,\n date_modified=datetime.now(), username='system_user'\n ):\n session = validate_session(session)\n group = None\n\n if group_id and tag_id:\n group = session.query(TagGroupAccess).\\\n filter(TagGroupAccess.group_id == group_id).\\\n filter(TagGroupAccess.tag_id == tag_id).first()\n if group:\n try:\n group.allow_install = allow_install\n group.allow_uninstall = allow_uninstall\n group.allow_reboot = allow_reboot\n group.allow_schedule = allow_schedule\n group.allow_wol = allow_wol\n group.allow_snapshot_creation = allow_snapshot_creation\n group.allow_snapshot_removal = allow_snapshot_removal\n group.allow_snapshot_revert = allow_snapshot_revert\n group.allow_tag_creation = allow_tag_creation\n group.allow_tag_removal = allow_tag_removal\n group.allow_read = allow_read\n group.date_modified = date_modified\n session.commit()\n return({\n 'pass': True,\n 'message': 'ACL for Group %s was modified for Tag %s' % \\\n (group_id, tag_id)\n })\n except Exception as e:\n session.rollback()\n return({\n 'pass': False,\n 'message': 'Failed to modify ACL for Group %s on Tag %s' % \\\n (group_id, tag_id)\n })\n else:\n return({\n 'pass': False,\n 'message': 'Invalid group_id %s and or tag_id' % \\\n (group_id, tag_id)\n })", "def run(self, dag):\n\n # If the option commutative_analysis is set, construct DAGDependency from the given DAGCircuit.\n if self.do_commutative_analysis:\n dag = dag_to_dagdependency(dag)\n\n # call collect_function to collect blocks from DAG\n blocks = self.collect_function(dag)\n\n # call collapse_function to collapse each block in the DAG\n self.collapse_function(dag, blocks)\n\n # If the option commutative_analysis is set, construct back DAGCircuit from DAGDependency.\n if self.do_commutative_analysis:\n dag = dagdependency_to_dag(dag)\n\n return dag", "def test_update_module_perm_by_id(self):\n #Test something that really shouldn't be there\n with pytest.raises(DbException) as err:\n ModulePerm.update_module_perm_by_id(0,0)\n assert str(err.value) == \"(404, 'Permission not found.')\"\n\n perm = ModulePerm.get_module_perm_by_id(self.permList[0].id)\n assert perm.id == self.permList[0].id\n assert perm.user_id == self.permList[0].user_id\n assert perm.module_id == self.permList[0].module_id\n assert perm.permissions == self.permList[0].permissions\n\n assert ModulePerm.update_module_perm_by_id(self.permList[0].id, 11)\n perm = ModulePerm.get_module_perm_by_id(self.permList[0].id)\n assert perm.id == self.permList[0].id\n assert perm.user_id == self.permList[0].user_id\n assert perm.module_id == self.permList[0].module_id\n assert perm.permissions == 11\n\n assert ModulePerm.get_module_perm_by_id(0, False) == None", "def postprocess_dag(self, dag: networkx.DiGraph) -> networkx.DiGraph:\n return dag", "def sync_vm_rules(project_id=None):\n if not env.instance_uuid and project_id is None:\n error(\"No instance ID or project specified.\")\n if env.instance_uuid:\n uuid = env.instance_uuid\n nova_client = client()\n server = nova_client.servers.get(uuid)\n project_id = server.tenant_id\n with show('stdout', 'stderr'):\n run('nova-manage project sync_secgroups %s' % project_id)", "async def lock(ctx):\n member = ctx.message.author\n channel = ctx.message.channel\n\n if (channel.category.name in [\"beta\", \"staff\", \"Pi-Bot\"]):\n return await ctx.send(\"This command is not suitable for this channel because of its category.\")\n\n member_role = discord.utils.get(member.guild.roles, name=ROLE_MR)\n if (channel.category.name == CATEGORY_STATES):\n await ctx.channel.set_permissions(member_role, add_reactions=False, send_messages=False)\n else:\n await ctx.channel.set_permissions(member_role, add_reactions=False, send_messages=False, read_messages=True)\n\n wiki_role = discord.utils.get(member.guild.roles, name=ROLE_WM)\n gm_role = discord.utils.get(member.guild.roles, name=ROLE_GM)\n admin_role = discord.utils.get(member.guild.roles, name=ROLE_AD)\n bot_role = discord.utils.get(member.guild.roles, name=ROLE_BT)\n await ctx.channel.set_permissions(wiki_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(gm_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(admin_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(bot_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.send(\"Locked the channel to Member access.\")", "def my_perms(self, ids, **kwargs):\r\n auth = self.app.auth\r\n # checking all objects\r\n p = self.db.auth_permission\r\n if type(ids) in (list, tuple, set):\r\n _ids = type(ids)((0,)) + ids\r\n else:\r\n _ids = [0, ids]\r\n grouped = self.db(p.record_id.belongs(_ids) & p.group_id.belongs(auth.user_groups.keys()) & (\r\n p.table_name == self.table._tablename)).select(p.name, p.record_id).group_by_value('record_id')\r\n take_names = itemgetter('name')\r\n base_permissions = set(imap(take_names, grouped.get(0, set())))\r\n ret = dict(PERMISSIONS={self.name: [\r\n dict((id, set(imap(take_names, grouped.get(id, []))).union(base_permissions)) for id in map(int, ids))]})\r\n current.response.text = ret\r\n return ret" ]
[ "0.69756424", "0.6454632", "0.607378", "0.5552861", "0.5497927", "0.5473163", "0.5398732", "0.51111424", "0.5103682", "0.50776064", "0.4782709", "0.47417465", "0.46969014", "0.46962237", "0.46481147", "0.46422902", "0.4602091", "0.45997083", "0.4581264", "0.45444247", "0.44694617", "0.4466372", "0.44529307", "0.44494498", "0.44142282", "0.44046387", "0.4391978", "0.43867108", "0.43584794", "0.4299202" ]
0.8022313
0
Set the access policy on the given DAG's ViewModel.
def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[str, Collection[str]]) -> None: dag_resource_name = permissions.resource_name_for_dag(dag_id) def _get_or_create_dag_permission(action_name: str) -> Permission | None: perm = self.get_permission(action_name, dag_resource_name) if not perm: self.log.info("Creating new action '%s' on resource '%s'", action_name, dag_resource_name) perm = self.create_permission(action_name, dag_resource_name) return perm def _revoke_stale_permissions(resource: Resource): existing_dag_perms = self.get_resource_permissions(resource) for perm in existing_dag_perms: non_admin_roles = [role for role in perm.role if role.name != "Admin"] for role in non_admin_roles: target_perms_for_role = access_control.get(role.name, ()) if perm.action.name not in target_perms_for_role: self.log.info( "Revoking '%s' on DAG '%s' for role '%s'", perm.action, dag_resource_name, role.name, ) self.remove_permission_from_role(role, perm) resource = self.get_resource(dag_resource_name) if resource: _revoke_stale_permissions(resource) for rolename, action_names in access_control.items(): role = self.find_role(rolename) if not role: raise AirflowException( f"The access_control mapping for DAG '{dag_id}' includes a role named " f"'{rolename}', but that role does not exist" ) action_names = set(action_names) invalid_action_names = action_names - self.DAG_ACTIONS if invalid_action_names: raise AirflowException( f"The access_control map for DAG '{dag_resource_name}' includes " f"the following invalid permissions: {invalid_action_names}; " f"The set of valid permissions is: {self.DAG_ACTIONS}" ) for action_name in action_names: dag_perm = _get_or_create_dag_permission(action_name) if dag_perm: self.add_permission_to_role(role, dag_perm)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sync_perm_for_dag(\n self,\n dag_id: str,\n access_control: dict[str, Collection[str]] | None = None,\n ) -> None:\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n for dag_action_name in self.DAG_ACTIONS:\n self.create_permission(dag_action_name, dag_resource_name)\n\n if access_control is not None:\n self.log.info(\"Syncing DAG-level permissions for DAG '%s'\", dag_resource_name)\n self._sync_dag_view_permissions(dag_resource_name, access_control)\n else:\n self.log.info(\n \"Not syncing DAG-level permissions for DAG '%s' as access control is unset.\",\n dag_resource_name,\n )", "def set_policy(self, name, policy):\n client = self.connect(VAULT_TOKEN)\n client.set_policy(name, policy)", "def set_perm(\n self, mapper: Mapper, connection: Connection, target: \"BaseDatasource\"\n ) -> None:\n try:\n target_get_perm = target.get_perm()\n except DatasetInvalidPermissionEvaluationException:\n logger.warning(\"Dataset has no database refusing to set permission\")\n return\n link_table = target.__table__\n if target.perm != target_get_perm:\n connection.execute(\n link_table.update()\n .where(link_table.c.id == target.id)\n .values(perm=target_get_perm)\n )\n target.perm = target_get_perm\n\n if (\n hasattr(target, \"schema_perm\")\n and target.schema_perm != target.get_schema_perm()\n ):\n connection.execute(\n link_table.update()\n .where(link_table.c.id == target.id)\n .values(schema_perm=target.get_schema_perm())\n )\n target.schema_perm = target.get_schema_perm()\n\n pvm_names = []\n if target.__tablename__ in {\"dbs\", \"clusters\"}:\n pvm_names.append((\"database_access\", target_get_perm))\n else:\n pvm_names.append((\"datasource_access\", target_get_perm))\n if target.schema:\n pvm_names.append((\"schema_access\", target.get_schema_perm()))\n\n # TODO(bogdan): modify slice permissions as well.\n for permission_name, view_menu_name in pvm_names:\n permission = self.find_permission(permission_name)\n view_menu = self.find_view_menu(view_menu_name)\n pv = None\n\n if not permission:\n permission_table = (\n self.permission_model.__table__ # pylint: disable=no-member\n )\n connection.execute(\n permission_table.insert().values(name=permission_name)\n )\n permission = self.find_permission(permission_name)\n self.on_permission_after_insert(mapper, connection, permission)\n if not view_menu:\n view_menu_table = (\n self.viewmenu_model.__table__ # pylint: disable=no-member\n )\n connection.execute(view_menu_table.insert().values(name=view_menu_name))\n view_menu = self.find_view_menu(view_menu_name)\n self.on_view_menu_after_insert(mapper, connection, view_menu)\n\n if permission and view_menu:\n pv = (\n self.get_session.query(self.permissionview_model)\n .filter_by(permission=permission, view_menu=view_menu)\n .first()\n )\n if not pv and permission and view_menu:\n permission_view_table = (\n self.permissionview_model.__table__ # pylint: disable=no-member\n )\n connection.execute(\n permission_view_table.insert().values(\n permission_id=permission.id, view_menu_id=view_menu.id\n )\n )\n permission = self.find_permission_view_menu(\n permission_name, view_menu_name\n )\n self.on_permission_view_after_insert(mapper, connection, permission)", "def setPolicy(self, value):\n return self._set(policy=value)", "def _set_restricted_policy(environ, bag):\n username = environ['tiddlyweb.usersign']['name']\n if username == 'GUEST':\n return\n bag.policy.owner = username\n # accept does not matter here\n for constraint in ['read', 'write', 'create', 'delete', 'manage']:\n setattr(bag.policy, constraint, [username])\n return", "def set(self, layer='', name='', uid='', params={}):\n return self.__common_client._post_with_layer('set-access-rule', layer, name, uid, params)", "def set_actor_policy(self, actor_policy):\n raise NotImplementedError", "def set_permission(sender, instance, created, **kwargs):\n if created:\n assign_perm(\n \"website.can_see\",\n instance.author,\n instance,\n )", "def set_target_policy(self, policy):\n self.target_policy = policy", "def update_policy(self, *args, **kwargs):\r\n pass", "def set_group_access(self, group, allowed_signals, allowed_controls):\n self._access_lists.set_group_access(group, allowed_signals, allowed_controls)", "def copy_access_level(apps, schema_editor):\n # We get the model from the versioned app registry;\n # if we directly import it, it will be the wrong version.\n State = apps.get_model(\"motions\", \"State\")\n for state in State.objects.all():\n if state.access_level == 3:\n state.restriction = [\"managers_only\"]\n elif state.access_level == 2:\n state.restriction = [\n \"motions.can_see_internal\",\n \"motions.can_manage_metadata\",\n ]\n elif state.access_level == 1:\n state.restriction = [\n \"motions.can_see_internal\",\n \"motions.can_manage_metadata\",\n \"is_submitter\",\n ]\n state.save(skip_autoupdate=True)", "def allow_access(self, context, share, access, share_server=None):\r\n LOG.debug(\"Allow access.\")\r\n self.helper._allow_access(share['name'], access, share['share_proto'])", "def set_access_policy( # pylint: disable=inconsistent-return-statements\n self,\n timeout: Optional[int] = None,\n lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,\n share_acl: Optional[List[_models.SignedIdentifier]] = None,\n **kwargs: Any\n ) -> None:\n error_map = {\n 401: ClientAuthenticationError,\n 404: ResourceNotFoundError,\n 409: ResourceExistsError,\n 304: ResourceNotModifiedError,\n }\n error_map.update(kwargs.pop(\"error_map\", {}) or {})\n\n _headers = case_insensitive_dict(kwargs.pop(\"headers\", {}) or {})\n _params = case_insensitive_dict(kwargs.pop(\"params\", {}) or {})\n\n restype: Literal[\"share\"] = kwargs.pop(\"restype\", _params.pop(\"restype\", \"share\"))\n comp: Literal[\"acl\"] = kwargs.pop(\"comp\", _params.pop(\"comp\", \"acl\"))\n content_type: str = kwargs.pop(\"content_type\", _headers.pop(\"Content-Type\", \"application/xml\"))\n cls: ClsType[None] = kwargs.pop(\"cls\", None)\n\n _lease_id = None\n if lease_access_conditions is not None:\n _lease_id = lease_access_conditions.lease_id\n serialization_ctxt = {\"xml\": {\"name\": \"SignedIdentifiers\", \"wrapped\": True}}\n if share_acl is not None:\n _content = self._serialize.body(\n share_acl, \"[SignedIdentifier]\", is_xml=True, serialization_ctxt=serialization_ctxt\n )\n else:\n _content = None\n\n request = build_set_access_policy_request(\n url=self._config.url,\n timeout=timeout,\n lease_id=_lease_id,\n restype=restype,\n comp=comp,\n content_type=content_type,\n version=self._config.version,\n content=_content,\n template_url=self.set_access_policy.metadata[\"url\"],\n headers=_headers,\n params=_params,\n )\n request = _convert_request(request)\n request.url = self._client.format_url(request.url)\n\n _stream = False\n pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access\n request, stream=_stream, **kwargs\n )\n\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)\n raise HttpResponseError(response=response, model=error)\n\n response_headers = {}\n response_headers[\"ETag\"] = self._deserialize(\"str\", response.headers.get(\"ETag\"))\n response_headers[\"Last-Modified\"] = self._deserialize(\"rfc-1123\", response.headers.get(\"Last-Modified\"))\n response_headers[\"x-ms-request-id\"] = self._deserialize(\"str\", response.headers.get(\"x-ms-request-id\"))\n response_headers[\"x-ms-version\"] = self._deserialize(\"str\", response.headers.get(\"x-ms-version\"))\n response_headers[\"Date\"] = self._deserialize(\"rfc-1123\", response.headers.get(\"Date\"))\n\n if cls:\n return cls(pipeline_response, None, response_headers)", "def set_permmodel(dat, zonelist, index, permmodel_dict):\n perm_mod = fdata.fmodel('permmodel', index=index,\n zonelist=zonelist)\n # Set required permeability\n for key, value in permmodel_dict.iteritems():\n perm_mod.param[key] = value\n dat.add(perm_mod)\n return dat", "def update_policy(self):\n pass", "def _allow_access(self, context, share, access, share_server=None):\n if access['access_type'] != 'ip':\n raise exception.InvalidShareAccess(\n _('Quobyte driver only supports ip access control'))\n\n volume_uuid = self._resolve_volume_name(share['name'],\n share['project_id'])\n ro = access['access_level'] == (constants.ACCESS_LEVEL_RO)\n call_params = {\n \"volume_uuid\": volume_uuid,\n \"read_only\": ro,\n \"add_allow_ip\": access['access_to']}\n self.rpc.call('exportVolume', call_params)", "def allow_access(self, context, share, access, share_server=None):\n self._get_helper(share).allow_access('/', share, access)", "def UpdateAccessApprovalSettings(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def set_access_token(self, value: str) -> None:\n\n self.__requester.set_authorization(value)", "def network_access(self, network_access):\n\n self._network_access = network_access", "def allowed_topology_access_change(user, topology):\n try:\n up = user.get_profile()\n except AttributeError:\n return False\n\n return topology.owner == user or user.has_perm(\"vnswww.topology_change_any\") or (user.has_perm(\"vnswww.topology_change_org\") and up.org == topology.org)", "def access(self, value):\n self._access = value", "def update_policy(self):\n self._sess.run(self._hard_copy_to_target_op);", "def allow_access(course, user, level):\r\n _change_access(course, user, level, 'allow')", "def set_network_security_policy(options):\n network = get_network_on_vc(options)\n name = get_network_name(options)\n\n config_spec = Vim.Dvs.DistributedVirtualPortgroup.ConfigSpec()\n config_info = network.GetConfig()\n config_spec.description = config_info.name\n config_spec.name = name\n config_spec.configVersion = config_info.configVersion\n\n true_policy = Vim.BoolPolicy()\n true_policy.value = True\n dvs_port_setting = Vim.VMwareDVSPortSetting()\n security_policy = Vim.DVSSecurityPolicy()\n security_policy.allowPromiscuous = true_policy\n security_policy.forgedTransmits = true_policy\n security_policy.macChanges = true_policy\n security_policy.inherited = False\n dvs_port_setting.securityPolicy = security_policy\n config_spec.defaultPortConfig = dvs_port_setting\n\n network.ReconfigureDVPortgroup_Task(config_spec)", "def set_permission(StackId=None, IamUserArn=None, AllowSsh=None, AllowSudo=None, Level=None):\n pass", "async def set_access_policy(\n self,\n timeout: Optional[int] = None,\n access: Optional[Union[str, \"_models.PublicAccessType\"]] = None,\n request_id_parameter: Optional[str] = None,\n container_acl: Optional[List[\"_models.SignedIdentifier\"]] = None,\n lease_access_conditions: Optional[\"_models.LeaseAccessConditions\"] = None,\n modified_access_conditions: Optional[\"_models.ModifiedAccessConditions\"] = None,\n **kwargs\n ) -> None:\n cls = kwargs.pop('cls', None) # type: ClsType[None]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n \n _lease_id = None\n _if_modified_since = None\n _if_unmodified_since = None\n if lease_access_conditions is not None:\n _lease_id = lease_access_conditions.lease_id\n if modified_access_conditions is not None:\n _if_modified_since = modified_access_conditions.if_modified_since\n _if_unmodified_since = modified_access_conditions.if_unmodified_since\n restype = \"container\"\n comp = \"acl\"\n content_type = kwargs.pop(\"content_type\", \"application/xml\")\n accept = \"application/xml\"\n\n # Construct URL\n url = self.set_access_policy.metadata['url'] # type: ignore\n path_format_arguments = {\n 'url': self._serialize.url(\"self._config.url\", self._config.url, 'str', skip_quote=True),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['restype'] = self._serialize.query(\"restype\", restype, 'str')\n query_parameters['comp'] = self._serialize.query(\"comp\", comp, 'str')\n if timeout is not None:\n query_parameters['timeout'] = self._serialize.query(\"timeout\", timeout, 'int', minimum=0)\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n if _lease_id is not None:\n header_parameters['x-ms-lease-id'] = self._serialize.header(\"lease_id\", _lease_id, 'str')\n if access is not None:\n header_parameters['x-ms-blob-public-access'] = self._serialize.header(\"access\", access, 'str')\n if _if_modified_since is not None:\n header_parameters['If-Modified-Since'] = self._serialize.header(\"if_modified_since\", _if_modified_since, 'rfc-1123')\n if _if_unmodified_since is not None:\n header_parameters['If-Unmodified-Since'] = self._serialize.header(\"if_unmodified_since\", _if_unmodified_since, 'rfc-1123')\n header_parameters['x-ms-version'] = self._serialize.header(\"self._config.version\", self._config.version, 'str')\n if request_id_parameter is not None:\n header_parameters['x-ms-client-request-id'] = self._serialize.header(\"request_id_parameter\", request_id_parameter, 'str')\n header_parameters['Content-Type'] = self._serialize.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n body_content_kwargs = {} # type: Dict[str, Any]\n serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'wrapped': True, 'itemsName': 'SignedIdentifier'}}\n if container_acl is not None:\n body_content = self._serialize.body(container_acl, '[SignedIdentifier]', is_xml=True, serialization_ctxt=serialization_ctxt)\n else:\n body_content = None\n body_content_kwargs['content'] = body_content\n request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)\n pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(_models.StorageError, response)\n raise HttpResponseError(response=response, model=error)\n\n response_headers = {}\n response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))\n response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))\n response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))\n response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))\n response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))\n response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))\n\n if cls:\n return cls(pipeline_response, None, response_headers)", "def __set_dataset_access(service_account):\n\n assert service_account, 'service_account cannot be blank!'\n\n dataset_roles = {\n \"access\": [\n {\n \"role\": \"WRITER\",\n \"userByEmail\": service_account\n },\n {\n \"role\": \"OWNER\",\n \"groupByEmail\": OWNERS_GROUP\n },\n {\n \"role\": \"OWNER\",\n \"groupByEmail\": AUDITORS_GROUP\n },\n {\n \"role\": \"READER\",\n \"groupByEmail\": LOG_READER_GROUP\n }\n ]\n }\n\n save_JSON (dataset_roles, 'tmp_ds_roles.json')\n\n # Use the temp json file to overwrite existing policies with above-defined roles.\n run_command('bq update --source=tmp_ds_roles.json {}'.format(LOGS_SINK_DATASET_ID) )\n\n # When done, remove the temp file.\n run_command('rm tmp_ds_roles.json')", "def grant_access(message, db, reason):\n hf.grant(message, db.lower(), reason, \"readonly\")" ]
[ "0.5678417", "0.5498908", "0.53634614", "0.525106", "0.5222352", "0.52205503", "0.5167241", "0.5149371", "0.51167226", "0.5108002", "0.50590044", "0.5043949", "0.50261927", "0.50103194", "0.50053406", "0.49526137", "0.48841444", "0.48699006", "0.4852434", "0.48339957", "0.48022333", "0.4771825", "0.4744509", "0.47416505", "0.4738578", "0.47228825", "0.4698516", "0.46968397", "0.46881616", "0.46742377" ]
0.57381666
0
Create permvm if not exist and insert into FAB security model for alldags.
def create_perm_vm_for_all_dag(self) -> None: # create perm for global logical dag for resource_name in self.DAG_RESOURCES: for action_name in self.DAG_ACTIONS: self._merge_perm(action_name, resource_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_vpc_if_policy_group(self, name, aep_name):\n policy_group_mo = AccBndlGrp('uni/infra/funcprof/', name, lagT='node')\n self.commit(policy_group_mo)\n # if attachable entity profile does not exists, creates a new one\n class_query = ClassQuery('infraAttEntityP')\n class_query.propFilter = 'eq(infraAttEntityP.name, \"' + AEP_PREFIX + aep_name + '\")'\n pd_list = self.moDir.query(class_query)\n if len(pd_list) == 0:\n vlan_pool_mo = self.create_vlan_pool(VLAN_POOL_PREFIX + aep_name, 'static')\n DomP_mo = self.create_physical_domain(PD_PREFIX + aep_name, str(vlan_pool_mo.dn))\n AttEntityP_mo = self.create_attachable_entity_profile(AEP_PREFIX + aep_name, str(DomP_mo.dn))\n else:\n AttEntityP_mo = pd_list[0]\n # Assign attached entity profile\n self.commit(\n RsAttEntP(policy_group_mo.dn, tDn=str(AttEntityP_mo.dn))\n )\n # Assign interface policies. For non-defaults, check if is already created. If not, the system will create them\n IfPolmo = self.moDir.lookupByDn('uni/infra/cdpIfP-CDP-ON')\n if not IfPolmo:\n IfPolmo = IfPol('uni/infra','CDP-ON',adminSt='enabled')\n self.commit(IfPolmo)\n self.commit(\n RsCdpIfPol(policy_group_mo.dn, tnCdpIfPolName=IfPolmo.name)\n )\n self.commit(\n RsHIfPol(policy_group_mo.dn, tnFabricHIfPolName='default')\n )\n self.commit(\n RsL2IfPol(policy_group_mo.dn, tnL2IfPolName='default')\n )\n LagPolmo = self.moDir.lookupByDn('uni/infra/lacplagp-LACP')\n if not LagPolmo:\n LagPolmo = LagPol('uni/infra', 'LACP', mode='active')\n self.commit(LagPolmo)\n self.commit(\n RsLacpPol(policy_group_mo.dn, tnLacpLagPolName=LagPolmo.name)\n )\n self.commit(\n RsLldpIfPol(policy_group_mo.dn, tnLldpIfPolName='default')\n )\n self.commit(\n RsMcpIfPol(policy_group_mo.dn, tnMcpIfPolName='default')\n )\n self.commit(\n RsMonIfInfraPol(policy_group_mo.dn, tnMonInfraPolName='default')\n )\n self.commit(\n RsStormctrlIfPol(policy_group_mo.dn, tnStormctrlIfPolName='default')\n )\n self.commit(\n RsStpIfPol(policy_group_mo.dn, tnStpIfPolName='default')\n )\n return policy_group_mo", "def create_if_policy_group(self, name, aep_name):\n # Creates policy group\n if_policy_group_mo = AccPortGrp('uni/infra/funcprof/', name)\n self.commit(if_policy_group_mo)\n # Query the AEP\n class_query = ClassQuery('infraAttEntityP')\n class_query.propFilter = 'eq(infraAttEntityP.name, \"' + AEP_PREFIX + aep_name + '\")'\n pd_list = self.moDir.query(class_query)\n if len(pd_list) == 0:\n # if attachable entity profile does not exists, creates a new one\n vlan_pool_mo = self.create_vlan_pool('vlan-pool-' + aep_name, 'static')\n DomP_mo = self.create_physical_domain('pd-' + aep_name, str(vlan_pool_mo.dn))\n AttEntityP_mo = self.create_attachable_entity_profile('aep-' + aep_name, str(DomP_mo.dn))\n else:\n AttEntityP_mo = pd_list[0]\n # Assign attached entity profile to the policy group\n self.commit(\n RsAttEntP(if_policy_group_mo.dn, tDn=str(AttEntityP_mo.dn))\n )\n # Assign interface policies. For non-defaults, check if is already created. If not, the system will create them\n IfPolmo = self.moDir.lookupByDn('uni/infra/cdpIfP-CDP-ON')\n if not IfPolmo:\n IfPolmo = IfPol('uni/infra','CDP-ON',adminSt='enabled')\n self.commit(IfPolmo)\n self.commit(\n RsCdpIfPol(if_policy_group_mo.dn, tnCdpIfPolName=IfPolmo.name)\n )\n HIfPolmo = self.moDir.lookupByDn('uni/infra/hintfpol-1GB')\n if not HIfPolmo:\n HIfPolmo = HIfPol('uni/infra', '1GB', speed='1G')\n self.commit(HIfPolmo)\n self.commit(\n RsHIfPol(if_policy_group_mo.dn, tnFabricHIfPolName=HIfPolmo.name)\n )\n self.commit(\n RsL2IfPol(if_policy_group_mo.dn, tnL2IfPolName='default')\n )\n self.commit(\n RsLldpIfPol(if_policy_group_mo.dn, tnLldpIfPolName='default')\n )\n self.commit(\n RsMcpIfPol(if_policy_group_mo.dn, tnMcpIfPolName='default')\n )\n self.commit(\n RsMonIfInfraPol(if_policy_group_mo.dn, tnMonInfraPolName='default')\n )\n self.commit(\n RsStormctrlIfPol(if_policy_group_mo.dn, tnStormctrlIfPolName='default')\n )\n self.commit(\n RsStpIfPol(if_policy_group_mo.dn, tnStpIfPolName='default')\n )\n return if_policy_group_mo", "def post_floating_ip_pool_create(self, resource_dict):\n pass", "def pre_floating_ip_pool_create(self, resource_dict):\n pass", "def create_dag_specific_permissions(self) -> None:\n perms = self.get_all_permissions()\n dagbag = DagBag(read_dags_from_db=True)\n dagbag.collect_dags_from_db()\n dags = dagbag.dags.values()\n\n for dag in dags:\n root_dag_id = dag.parent_dag.dag_id if dag.parent_dag else dag.dag_id\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n for action_name in self.DAG_ACTIONS:\n if (action_name, dag_resource_name) not in perms:\n self._merge_perm(action_name, dag_resource_name)\n\n if dag.access_control:\n self.sync_perm_for_dag(dag_resource_name, dag.access_control)", "def add_view_permissions(sender, instance, created, **kwargs):\n if created:\n group = Group.objects.get(name=settings.DEFAULT_GROUP_NAME)\n assign_perm('view_tag', group, instance)", "def addPkmn(self):\n params = []\n toAdd = []\n \n for key in self.vals.keys():\n if self.vals[key] is None:\n continue\n \n params += [key]\n toAdd += [self.vals[key]]\n \n paramStr = self.GetStrFromList(params)\n \n print \"Adding Pkmn:\", self.vals['name']\n self.insertIntoDB(\"Pokemon\", paramStr, toAdd)\n \n id = sself.cursor.lastrowid\n \n for attack in self.attacks:\n front = DBAddAttackInUse(id, connection = self.connection, cursor = self.cursor)\n front.execute(attack)", "def create_models(self):\r\n self.all_ratings = AllRatingsWithCommon(\r\n experts=self.users,\r\n objects=self.videos,\r\n output_features=self.features,\r\n name=\"prod\",\r\n )\r\n\r\n print_memory(stage=\"DPLF:ratings_nodata_created\")\r\n\r\n # creating models\r\n self.user_to_model = {\r\n user: FeaturelessPreferenceLearningModel(\r\n expert=user, all_ratings=self.all_ratings\r\n )\r\n for user in self.users\r\n }\r\n\r\n print_memory(stage=\"DPLF:models_created\")\r\n\r\n # before creating the aggregator, filling models with data\r\n self.user_to_size = {\r\n user: self.fill_model_data(self.user_to_model[user], user)\r\n for user in tqdmem(self.users, desc=\"fill_data\")\r\n }\r\n\r\n # virtual 'common' data\r\n fplm_common = FeaturelessPreferenceLearningModel(\r\n expert=AllRatingsWithCommon.COMMON_EXPERT, all_ratings=self.all_ratings\r\n )\r\n fplm_common.on_dataset_end()\r\n\r\n print_memory(stage=\"DPLF:data_filled\")\r\n\r\n # resetting the model given the data\r\n self.all_ratings.reset_model()\r\n\r\n print_memory(stage=\"DPLF:model_reset_ok\")\r\n\r\n # aggregating models\r\n self.aggregator = FeaturelessMedianPreferenceAverageRegularizationAggregator(\r\n models=[self.user_to_model[u] for u in self.users]\r\n )\r\n self.aggregator.certification_status = self.user_certified\r\n\r\n print_memory(stage=\"DPLF:aggregator_created\")", "def update_pin_group():\n create_instance(new=False)", "def pre_floating_ip_create(self, resource_dict):\n pass", "def add_post_into_ds(post, ds_group, ds_item_role):\n post.ds_group = ds_group\n post.ds_item_role = ds_item_role\n post.is_in_ds = True\n post.save()", "def createPost(request):\n\n #save the organization's post\n if request.method == 'POST':\n form = PostForm(request.user, request.POST, request.FILES)\n if form.is_valid():\n filterList = ['everyone', 'black', 'hispanic', 'female', 'lgbt', 'immigrants', 'disabled', 'poor'] \n newpost = form.save()\n\n #Add tags to the object only if in the filterlist\n tags = form.cleaned_data.get('tags')\n tags = [tag.lower() for tag in tags if tag.lower() in filterList]\n\n newpost.tags.add(*tags)\n messages.success(request, 'You have successful created the post')\n form = PostForm(request.user)\n context = {'form':form} \n return render(request, 'create_post.html', context=context)\n \n #form to fill out for the post\n form = PostForm(request.user)\n context = {'form':form} \n return render(request, 'create_post.html', context=context)", "def create_pre_db():\n db.drop_all()\n db.create_all()\n admin = Admin(\n email=app.config['BLOG_ADMIN_EMAIL'],\n password=\"123456\",\n )\n work_exp = [WorkExperience(\n work_title=u\"Flask Blog-%d\" % i,\n work_type=u\"Personal Project\",\n pos_in_work=u\"Total\",\n work_desc=u\"Use Flask implement a blog application\",\n start_time=datetime.date(2016, 2, 5),\n owner=admin\n ) for i in range(3)]\n edu_exp = [EducationExperience(\n institution=u\"TongJi University-%d\" % i,\n learn_what=u\"Information Security\",\n gpa=3.89,\n start_time=datetime.date(2016, 2, 5),\n owner=admin\n ) for i in range(3)]\n skills = [Skill(\n skill_name=u\"Python-%d\" % i,\n master_degree=4,\n owner=admin\n ) for i in range(3)]\n tags = [Tag(name=u\"tag-%d\" % i) for i in range(10)]\n db.session.add_all([admin]+work_exp+edu_exp+skills+tags)\n db.session.commit()\n Post.generate_fake_posts(12)\n Comment.generate_fake_comments(5)", "def pre_network_ipam_create(self, resource_dict):\n pass", "def save(self, commit):\n preset = super().save(commit)\n # Remove all permissions on the preset.\n preset.permission_group.permissions.all().delete()\n # Add permission to selected groups.\n for group in Group.objects.filter(pk__in=self.cleaned_data[\"groups\"]):\n preset.set_permission(Permission.VIEW, group)\n return preset", "def post_floating_ip_create(self, resource_dict):\n pass", "def _create(self, name):\n command = [\n 'ipset create -exist ' + name + ' hash:net family inet maxelem 536870912',\n ]\n self.__run(command)", "def at_object_creation(self):\n self.locks.add(\"view:perm(Immortals)\")", "def test_add_facility_pt1(self):\n self.assertFalse(self.admin.has_perm('auth.add_facility'))", "def setup_method(self):\n\n # TODO add deletion of vms also.\n self.created_bp_list = []\n self.created_app_list = []\n\n if not self.vrp_name:\n vrp_data = self._create_vm_recovery_point()\n self.vrp_name = vrp_data[\"name\"]\n self.vrp_uuid = vrp_data[\"uuid\"]\n\n # Writing vm_ip to local directory file\n LOG.info(\n \"Writing vrp name {} to file '{}'\".format(\n self.vrp_name, LOCAL_RP_NAME_PATH\n )\n )\n make_file_dir(LOCAL_RP_NAME_PATH)\n with open(LOCAL_RP_NAME_PATH, \"w\") as f:\n f.write(self.vrp_name)", "def create_missing_perms(self) -> None:\n\n # pylint: disable=import-outside-toplevel\n from superset.connectors.sqla.models import SqlaTable\n from superset.models import core as models\n\n logger.info(\"Fetching a set of all perms to lookup which ones are missing\")\n all_pvs = set()\n for pv in self.get_session.query(self.permissionview_model).all():\n if pv.permission and pv.view_menu:\n all_pvs.add((pv.permission.name, pv.view_menu.name))\n\n def merge_pv(view_menu: str, perm: Optional[str]) -> None:\n \"\"\"Create permission view menu only if it doesn't exist\"\"\"\n if view_menu and perm and (view_menu, perm) not in all_pvs:\n self.add_permission_view_menu(view_menu, perm)\n\n logger.info(\"Creating missing datasource permissions.\")\n datasources = SqlaTable.get_all_datasources(self.get_session)\n for datasource in datasources:\n merge_pv(\"datasource_access\", datasource.get_perm())\n merge_pv(\"schema_access\", datasource.get_schema_perm())\n\n logger.info(\"Creating missing database permissions.\")\n databases = self.get_session.query(models.Database).all()\n for database in databases:\n merge_pv(\"database_access\", database.perm)", "def test_ipam_vrfs_create(self):\n pass", "def pre_security_group_create(self, resource_dict):\n pass", "def create():", "def create():", "def seed_db(db):\n permission = Permission('manage_users')\n permissions.description = 'Can manage users'\n db.session.add(permission)\n\n user = User('Admin', 'User')\n user.email = '[email protected]'\n user.password = User.hash_password('Password123')\n db.session.add(user)\n\n db.session.commit()\n\n permission = Permission.query.filter_by(name='manage_users').first()\n user = User.query.filter_by(email='[email protected]').first()\n\n db.session.add(UserPermission(user.id, permission.id))\n db.session.commit()", "def test_ipam_vlan_groups_create(self):\n pass", "def _create_vm(self):\n self._create_instance_in_the_db()\n self.type_data = db.instance_type_get_by_name(None, 'm1.large')\n self.conn.spawn(self.context, self.instance, self.network_info)\n self._check_vm_record()", "def test_ipam_vlans_create(self):\n pass", "def save(self, *args, **kwargs):\n grupo_vendedor = Group.objects.get_or_create(name=\"vendedor\")[0]\n self.user.groups.add(grupo_vendedor)\n return super(Vendedor, self).save(*args, **kwargs)" ]
[ "0.58854306", "0.5420295", "0.51205784", "0.5104541", "0.50927716", "0.5074957", "0.50747347", "0.5033376", "0.4975094", "0.4972838", "0.48911983", "0.48370972", "0.4829802", "0.48251015", "0.48227528", "0.4808085", "0.4798749", "0.47678334", "0.47611704", "0.4758598", "0.47429836", "0.473363", "0.47236937", "0.47201777", "0.47201777", "0.47187275", "0.47135875", "0.47102708", "0.46996412", "0.46839318" ]
0.5463835
1
Initialize a primative parser.
def __init__(self): self.prim_parser = parser.Parser()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, *args, **kw):\n self.parser = Parser(*args, **kw)", "def __init__(self, parser=None):", "def __init__(self):\n print \"You asked for a Parser!\"", "def __init__(self, parser: Any = None):", "def __init__(self):\n Parser.__init__(self)\n self.__line_number = 0 # initialize the line number to 0", "def setup_parse(self, inputstring: str, document: nodes.document) -> None:\n self.inputstring = inputstring\n self.document = document", "def setup_parser(self, parser):", "def __init__(self, node):\n super(LAMMPSBaseParser, self).__init__(node)", "def __init__(self):\n\n self.parser = self.define_parser()\n self.pen = Pen()", "def initialise_parser(**kwargs):\n kwargs['formatter_class'] = ap.ArgumentDefaultsHelpFormatter\n p = MpArgumentParser(**kwargs)\n\n # -- version\n path_file = os.path.sep.join(\n os.path.abspath(__file__).split(os.path.sep)[:-2])\n with open(os.path.join(path_file, 'VERSION'), 'r') as version_file:\n version = version_file.readline()\n p.add_argument('--version', action='version', version=version)\n\n p.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"Verbose mode\")\n\n return p", "def create_parser():\n pass", "def initParser():\n libxml2mod.xmlInitParser()", "def __init__(self, parser):\n if parser == \"csv\":\n self._parser = CSVParser()\n elif parser == \"static\":\n self._parser = StaticParser()\n else:\n raise NotImplementedError", "def __init__(self, redirector, terminators, multilineCommands, legalChars, commentGrammars, commentInProgress,\n case_insensitive, blankLinesAllowed, prefixParser, preparse, postparse, shortcuts):\n\n self.commentGrammars = commentGrammars\n self.preparse = preparse\n self.postparse = postparse\n self.shortcuts = shortcuts\n\n self.main_parser = self._build_main_parser(redirector=redirector, terminators=terminators,\n multilineCommands=multilineCommands, legalChars=legalChars,\n commentInProgress=commentInProgress,\n case_insensitive=case_insensitive,\n blankLinesAllowed=blankLinesAllowed, prefixParser=prefixParser)\n self.input_source_parser = self._build_input_source_parser(legalChars=legalChars,\n commentInProgress=commentInProgress)", "def __init__(s, p):\n Zmod.__init__(s, p)\n if s.element_class != FiniteFieldElement:\n raise ArithmeticError(\"Invalid Prime : %d\" % p)\n s.p = p", "def init_from_file(filename, parser=int):\n filename = filename + \".\" + str(PID)\n\n def __parser_couple(s):\n s = s.replace(\"(\", \"\")\n s = s.replace(\")\", \"\")\n ss = s.split(\",\")\n return int(ss[0]), int(ss[1])\n\n p = PTree()\n content = SList([])\n with open(filename, \"r\") as f:\n count_line = 0\n for line in f:\n if line.strip()[0] == '#':\n continue\n # __distribution: PID -> nb of segments\n # __global_index: num seg -> (start, offset)\n if count_line == 0: # Get the distribution\n p.distribution = SList.from_str(line)\n p.start_index = p.distribution.scanl(lambda x, y: x + y, 0)[PID]\n p.nb_segs = p.distribution[PID]\n elif count_line == 1: # Get the global_index\n p.global_index = SList.from_str(line, parser=__parser_couple)\n else: # Get the content\n content.extend(Segment.from_str(line, parser=parser))\n count_line = count_line + 1\n p.content = content\n return p", "def __init__(self):\n\t\tself.prim = None\n\t\tself.len = 0", "def _init_parser(self):\n # outputParser = (pyparsing.Literal('>>') | (pyparsing.WordStart() + '>') | pyparsing.Regex('[^=]>'))('output')\n outputParser = (pyparsing.Literal(self.redirector * 2) |\n (pyparsing.WordStart() + self.redirector) |\n pyparsing.Regex('[^=]' + self.redirector))('output')\n\n terminatorParser = pyparsing.Or(\n [(hasattr(t, 'parseString') and t) or pyparsing.Literal(t) for t in self.terminators])('terminator')\n stringEnd = pyparsing.stringEnd ^ '\\nEOF'\n self.multilineCommand = pyparsing.Or(\n [pyparsing.Keyword(c, caseless=self.case_insensitive) for c in self.multilineCommands])('multilineCommand')\n oneLineCommand = (~self.multilineCommand + pyparsing.Word(self.legalChars))('command')\n pipe = pyparsing.Keyword('|', identChars='|')\n self.commentGrammars.ignore(pyparsing.quotedString).setParseAction(lambda x: '')\n doNotParse = self.commentGrammars | self.commentInProgress | pyparsing.quotedString\n afterElements = \\\n pyparsing.Optional(pipe + pyparsing.SkipTo(outputParser ^ stringEnd, ignore=doNotParse)('pipeTo')) + \\\n pyparsing.Optional(\n outputParser + pyparsing.SkipTo(stringEnd, ignore=doNotParse).setParseAction(lambda x: x[0].strip())(\n 'outputTo'))\n if self.case_insensitive:\n self.multilineCommand.setParseAction(lambda x: x[0].lower())\n oneLineCommand.setParseAction(lambda x: x[0].lower())\n if self.blankLinesAllowed:\n self.blankLineTerminationParser = pyparsing.NoMatch\n else:\n self.blankLineTerminator = (pyparsing.lineEnd + pyparsing.lineEnd)('terminator')\n self.blankLineTerminator.setResultsName('terminator')\n self.blankLineTerminationParser = ((self.multilineCommand ^ oneLineCommand) +\n pyparsing.SkipTo(self.blankLineTerminator, ignore=doNotParse).setParseAction(\n lambda x: x[0].strip())('args') + self.blankLineTerminator)('statement')\n self.multilineParser = (((self.multilineCommand ^ oneLineCommand) + pyparsing.SkipTo(terminatorParser,\n ignore=doNotParse).setParseAction(\n lambda x: x[0].strip())('args') + terminatorParser)('statement') +\n pyparsing.SkipTo(outputParser ^ pipe ^ stringEnd, ignore=doNotParse).setParseAction(\n lambda x: x[0].strip())('suffix') + afterElements)\n self.multilineParser.ignore(self.commentInProgress)\n self.singleLineParser = ((oneLineCommand + pyparsing.SkipTo(terminatorParser ^ stringEnd ^ pipe ^ outputParser,\n ignore=doNotParse).setParseAction(\n lambda x: x[0].strip())('args'))('statement') +\n pyparsing.Optional(terminatorParser) + afterElements)\n # self.multilineParser = self.multilineParser.setResultsName('multilineParser')\n # self.singleLineParser = self.singleLineParser.setResultsName('singleLineParser')\n self.blankLineTerminationParser = self.blankLineTerminationParser.setResultsName('statement')\n self.parser = self.prefixParser + (\n stringEnd |\n self.multilineParser |\n self.singleLineParser |\n self.blankLineTerminationParser |\n self.multilineCommand + pyparsing.SkipTo(stringEnd, ignore=doNotParse)\n )\n self.parser.ignore(self.commentGrammars)\n\n inputMark = pyparsing.Literal('<')\n inputMark.setParseAction(lambda x: '')\n fileName = pyparsing.Word(self.legalChars + '/\\\\')\n inputFrom = fileName('inputFrom')\n inputFrom.setParseAction(replace_with_file_contents)\n # a not-entirely-satisfactory way of distinguishing < as in \"import from\" from <\n # as in \"lesser than\"\n self.inputParser = inputMark + pyparsing.Optional(inputFrom) + pyparsing.Optional('>') + \\\n pyparsing.Optional(fileName) + (pyparsing.stringEnd | '|')\n self.inputParser.ignore(self.commentInProgress)", "def __init__(self, root=0, prime=None):\n if type(root) != int:\n raise ValueError(\n 'Root must be an integer MIDI note number. ' +\n 'Got: {}'.format(root))\n if (root < 0) or (root > 115):\n raise ValueError(\n 'Root must be a valid MIDI note in the range of 0 to 115. ' +\n 'Got: {}'.format(root))\n\n self._transposition = 0\n\n if prime is not None:\n self._prime = prime\n else:\n self._generate_prime(root)\n\n self._apply_transformations()", "def get_parser():\n\tparser = argparse.ArgumentParser('preprocessing.py',\n\t\tformatter_class=argparse.RawDescriptionHelpFormatter,\n\t\tdescription=\"\"\"\nRun a piepline for one NICER ObsID data. \n\t\t\"\"\"\n\t\t)\n\tversion = '%(prog)s ' + __version__\n\tparser.add_argument('obsid', type=str, \n\t\thelp='ObsID (e.g., 4012010109)')\t\n\treturn parser", "def __init__(self, sentence):\n self.sentence = sentence\n assert len(sentence) > 0\n self.stack = [] \n self.buffer = list(range(len(sentence)))\n self.dependencies = []\n self.transitions = []\n self.probs = []\n self.finish = False # whether the parse has finished", "def _load_parser(self, grammar: str, protocol: Protocol) -> None:\n self.parser = parsley.makeGrammar(grammar, {\n 'punctuation': string.punctuation,\n 'ascii_uppercase': string.ascii_uppercase,\n 'ascii_lowercase': string.ascii_lowercase,\n 'itertools': itertools,\n\n 'Art': Art,\n 'ArtField': ArtField,\n 'Field': Field,\n 'RelLoc': RelLoc,\n 'Names': Names,\n\n 'protocol': protocol,\n 'Boolean': Boolean,\n 'Size': Size,\n 'ArgumentExpression': ArgumentExpression,\n 'MethodInvocationExpression': MethodInvocationExpression,\n 'ConstantExpression': ConstantExpression,\n 'FieldAccessExpression': FieldAccessExpression,\n 'ThisExpression': ThisExpression,\n 'IfElseExpression': IfElseExpression,\n })", "def __init__(self):\n # Token management\n self.tokens = None\n self.token = None\n self.prior_token = None\n\n # Patching\n self.pfile = None\n\n # Configuration\n self._default_start_index = 1\n self._global_start_index = None\n self._comment_tokens = '!'\n self._sparse_arrays = False\n self._row_major = False\n self._strict_logical = True", "def __init__(self):\n # compile regexes\n self._currency_or_init_punct = Regex(r' ([\\p{Sc}\\(\\[\\{\\¿\\¡]+) ', flags=UNICODE)\n self._noprespace_punct = Regex(r' ([\\,\\.\\?\\!\\:\\;\\\\\\%\\}\\]\\)]+) ', flags=UNICODE)\n self._contract = Regex(r\" (\\p{Alpha}+) ' (ll|ve|re|[dsmt])(?= )\", flags=UNICODE | IGNORECASE)\n self._dash_fixes = Regex(r\" (\\p{Alpha}+|£ [0-9]+) - (priced|star|friendly|(?:£ )?[0-9]+) \", flags=UNICODE | IGNORECASE)\n self._dash_fixes2 = Regex(r\" (non) - ([\\p{Alpha}-]+) \", flags=UNICODE | IGNORECASE)\n self._contractions = Regex(r\" (n't)\", flags=UNICODE)\n self._esses = Regex(r\" s \", flags=UNICODE)\n self._international_things = {'chinese': 'Chinese', 'japanese':'Japanese',\n 'french':'French', 'indian':'Indian',\n 'english':'English', 'italian':'Italian'}\n self.moses_detokenizer = MosesDetokenizer()", "def init_parser():\n usage = \"usage: %prog [tx_options] init <path>\"\n description = \"This command initializes a new project for use with \"\\\n \"Transifex. It is recommended to execute this command in the \"\\\n \"top level directory of your project so that you can include \"\\\n \"all files under it in transifex. If no path is provided, the \"\\\n \"current working dir will be used.\"\n parser = OptionParser(usage=usage, description=description)\n parser.add_option(\"--host\", action=\"store\", dest=\"host\", default=None,\n help=\"Specify a default Transifex host.\")\n parser.add_option(\"--user\", action=\"store\", dest=\"user\", default=None,\n help=\"Specify username for Transifex server.\")\n parser.add_option(\"--pass\", action=\"store\", dest=\"password\", default=None,\n help=\"Specify password for Transifex server.\")\n parser.add_option(\n \"--force-save\",\n action=\"store_true\",\n dest=\"save\",\n default=False,\n help=\"Override .transifexrc file with the given credentials.\"\n )\n\n parser.add_option(\"--token\", action=\"store\", dest=\"token\", default=None,\n help=\"Specify an api token.\\nYou can get one from\"\n \" user's settings\")\n return parser", "def init_argparse(self, parser=None):\n if parser:\n p = parser\n else:\n p = argparse.ArgumentParser()\n\n # generic options\n p.add_argument(\"-q\", \"--quiet\", action=\"store_true\",\n help=\"log only errors and warnings\")\n p.add_argument(\"-v\", \"--verbose\", action=\"count\",\n help=\"log verbosely\")\n p.add_argument(\"-V\", \"--version\", action=\"store_true\",\n help=\"print version info and exit\")\n p.add_argument(\"--set\", action=\"append\",\n help=\"override config setting (--set 'PARAM=VAL')\")\n p.add_argument(\"command\", help=\"command name\")\n p.add_argument(\"args\", nargs=argparse.REMAINDER, help=\"arguments for command\")\n return p", "def __init__(\n self,\n parser,\n stop_words=spacy.lang.en.stop_words.STOP_WORDS,\n punctuations=string.punctuation,\n ):\n self.parser = parser\n # list of stop words and punctuation marks\n self.stop_words = stop_words\n self.punctuations = punctuations", "def __init__(self, src=None, **opts):\n self.input = None\n\n if src is not None:\n self.parse(src, **opts)", "def _init_parser():\n\t\n\t_parser = argparse.ArgumentParser()\n\t_parser.add_argument(\"--pull\", help=\"pull scripts from UR3\", action=\"store_true\")\n\t_parser.add_argument(\"--create\", help=\"create data base from script files\", action=\"store_true\")\n\t_parser.add_argument(\"--clear\", help=\"clear all data base\", action=\"store_true\")\n\treturn _parser", "def test_constructor(self):\n assert self.parser is not None" ]
[ "0.6944528", "0.66409266", "0.66263324", "0.6401875", "0.6371824", "0.61412036", "0.6126739", "0.60916764", "0.60392183", "0.60321903", "0.59601814", "0.5923435", "0.5907228", "0.5870026", "0.5801741", "0.57332784", "0.572136", "0.5712518", "0.57117873", "0.5708687", "0.5700573", "0.5687348", "0.56870776", "0.56847763", "0.5678854", "0.5675373", "0.5655629", "0.56517655", "0.5648337", "0.5639904" ]
0.8043076
0
Parse the PRINT statement. The arguments of print can be a primative or arithmetic expression.
def parse_print(self, words): print_obj = Print() input_str = ' '.join(words) obj = self.prim_parser.parse_arith_expr(input_str) if obj: print_obj.arg = obj else: obj = self.prim_parser.parse_primative_obj(input_str) if obj: print_obj.arg = obj else: raise StatementParseError( 'No valid print args: {0}.'.format(words)) return print_obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_print(self, cmd):\n try:\n print(self.EvalExpression(cmd))\n except:\n pass", "def pprint(*args, **kwargs):\n if PRINTING:\n print(*args, **kwargs)", "def _run_print(self):\n instruction = self.bytecode.code[self.ip]\n input_type = self.bytecode.code[self.ip + 1]\n input_value = self.bytecode.code[self.ip + 2]\n\n # If the value to print is inside a register, we fetch the value at the given register index\n if input_type == OpType.REGISTER:\n print(\"Printing value from register.\")\n\n # If the value to print is binary literal, we print it directly\n elif input_type == OpType.BINARY:\n print(\"Printing binary literal.\")\n\n # Anything else is an error\n else:\n raise ValueError(\"Unexpected input type to be printed at offset <\" + str(self.ip + 1) + \">.\")\n\n # Update the instruction pointer to the next instruction\n self.ip = self.ip + 3", "def print_(*input_x):\n print_op = _get_cache_prim(Print)()\n return print_op(*input_x)", "def PrintArg(self):\n if self.currtok[1].name == \"STRING_LIT\":\n arg = String_LitExpr(self.currtok[0])\n self.currtok = next(self.tg)\n return printArg(arg)\n if self.functions.get(self.currtok[0]) is not None:\n arg = self.FunctionCall()\n return printArg(arg)\n arg = self.Expression()\n return printArg(arg)", "def _Print(self, t):\n self.RaiseError(t, \"Print not supported\")", "def p(value):\n pp.pprint(value)", "def PrintStmt(self):\n args = list()\n self.currtok = next(self.tg)\n if self.currtok[1].name == \"LPAREN\":\n self.currtok = next(self.tg)\n arg = self.PrintArg()\n args.append(arg)\n while self.currtok[1].name == \"COMMA\":\n self.currtok = next(self.tg)\n arg = self.PrintArg()\n args.append(arg)\n\n if self.currtok[1].name == \"RPAREN\":\n self.currtok = next(self.tg)\n if self.currtok[1].name == \"SEMI\":\n return printstmtStmt(args)\n raise SLUCSyntaxError(\"ERROR: Missing right semicolon line {0}\".format(str(self.currtok[2] - 1)))\n raise SLUCSyntaxError(\"ERROR: Missing right paren or a comma line {0}\".format(str(self.currtok[2] - 1)))\n raise SLUCSyntaxError(\"ERROR: Missing left paren on line {0}\".format(str(self.currtok[2] - 1)))", "def _print(self, *args):\n return _ida_hexrays.vd_printer_t__print(self, *args)", "def do_pp(self, arg):\n width = getattr(arg, \"cmd_count\", None)\n try:\n val = self._getval(arg)\n except:\n return\n if width is None:\n try:\n width, _ = self.get_terminal_size()\n except Exception as exc:\n self.message(\"warning: could not get terminal size ({})\".format(exc))\n width = None\n try:\n pprint.pprint(val, self.stdout, width=width)\n except:\n exc_info = sys.exc_info()[:2]\n self.error(traceback.format_exception_only(*exc_info)[-1].strip())", "def _print_dot(_self, expr):\r\n return r'{((%s) \\cdot (%s))}' % (expr.args[0], expr.args[1])", "def print(*args, **kwargs):\n with P_LOCK:\n __builtins__.print(*args, **kwargs)", "def pr(form, *args):\n # variables\n global lPr, lMaPr\n\n if lPr < lMaPr:\n for l in range(lPr + 1):\n sys.stdout.write('-')\n if len(args) == 0:\n print form\n if logFile is not None:\n logging.info(form)\n else:\n print form % args\n if logFile is not None:\n logging.info(form % args)", "def print(*args, sep=\" \"):\n pass", "def print_msg(*vargs, **kwargs):\n print(*vargs, **kwargs)", "def Print(self, text):\n pass", "def vprint(expr, **settings):\n\n outstr = vsprint(expr, **settings)\n\n import builtins\n if (outstr != 'None'):\n builtins._ = outstr\n print(outstr)", "def vsprint(expr, **settings):\n\n string_printer = VectorStrPrinter(settings)\n return string_printer.doprint(expr)", "def parseprint(code, filename=\"<string>\", mode=\"exec\", **kwargs):\n node = parse(code, mode=mode) # An ode to the code\n print(dump(node, **kwargs))", "def _p(self, *args, level=2, **kwargs):\n if self._verbosity >= level:\n print(*args, **kwargs)", "def parse_statement(self, words):\n\n keyword = words[0]\n rest = words[1:]\n\n if keyword == 'PRINT':\n obj = self.parse_print(rest)\n elif keyword == 'LET':\n obj = self.parse_let(rest)\n elif keyword == 'GOTO':\n obj = self.parse_goto(rest)\n elif keyword == 'FOR':\n obj = self.parse_for(rest)\n elif keyword == 'NEXT':\n obj = self.parse_next(rest)\n elif keyword == 'IF':\n obj = self.parse_ifthen(rest)\n elif keyword == 'END':\n obj = self.parse_end(rest)\n elif keyword == 'REM':\n obj = self.parse_rem(rest)\n else:\n raise StatementParseInvalidKeyword(\n 'Invalid keyword: {0}.'.format(keyword))\n\n return obj", "def print_parse_result(self, line, log_probs, backpointer, length):\n line = line.strip()\n if log_probs[(0, length-1)][START_SYM] != -float('inf'):\n print(\n self._format_parse(START_SYM, backpointer, 0, length-1, 0))\n print(log_probs[(0, length-1)][START_SYM])\n else:\n print('NONE')", "def parse_goto(self, words):\n\n goto_obj = Goto()\n input_str = ' '.join(words)\n\n obj = self.prim_parser.parse_arith_expr(input_str)\n if obj:\n goto_obj.label = obj\n else:\n obj = self.prim_parser.parse_num(input_str)\n if obj:\n goto_obj.label = obj\n else:\n raise StatementParseError(\n 'No valid print args: {0}.'.format(words))\n\n return goto_obj", "def printme(name, age): # create function printme with arguments name and age\n print(name, age) # print name then age\n return", "def parseAssign( ): # parse rountine for the assign and uses the assign class to print out the appropriate string\n\n\ttok = tokens.peek( )\n\tif debug: print( \"assign: \", tok )\n\tif re.match( Lexer.identifier, tok ):\n\t\tident = VarRef( tok )\n\telse: \n\t\terror( \"Invalid identifier\" )\n\ttok = tokens.next( )\n\tequals = match( \"=\" )\n\ttok = tokens.peek( )\n\texpr = expression( )\n\tmatch( \";\" )\n\tequals = VarRef( equals )\n\tstatement = assign( equals, ident, expr )\n\treturn statement", "def print(self, *args, **kwargs):\n print(*args, **kwargs)", "def eval(self, expression: str) -> str:\n ret = self.exec_(\"print({})\".format(expression))\n ret = ret.strip()\n return ret", "def print_(self, s: str) -> None:", "def test_ui_print_hpmp(self):\n def write(value):\n return value\n\n player = Person(\"test0\", 200, 100, 10, 10, [], [])\n enemy = Person(\"test1\", 200, 100, 10, 10, [], [])\n self.assertEqual(type(self.ui.print_hpmp(player, enemy, write)), str,\n \"Does not print a string.\")", "def repl_print_statements():\n pass" ]
[ "0.5450321", "0.54257673", "0.5296611", "0.5223602", "0.51722485", "0.5059355", "0.5012089", "0.49853182", "0.49688977", "0.4929221", "0.49207166", "0.4889229", "0.4842015", "0.47861603", "0.47843853", "0.4772744", "0.47649443", "0.47477672", "0.47368357", "0.47303003", "0.4729855", "0.4712261", "0.47105232", "0.47044966", "0.47038504", "0.46967918", "0.46938375", "0.46798596", "0.46673605", "0.46634838" ]
0.7103151
0
Parse the LET statement args. The arguments are a variable, "=", and an expression.
def parse_let(self, words): let_obj = Let() obj_var = self.prim_parser.parse_var(words[0]) if obj_var: let_obj.var = obj_var if words[1] == '=': rest = words[2:] rest_str = ' '.join(words[2:]) arith_expr_obj = self.prim_parser.parse_arith_expr(rest_str) prim_obj = self.prim_parser.parse_primative_obj(rest_str) if arith_expr_obj: let_obj.value = arith_expr_obj elif prim_obj: let_obj.value = prim_obj else: raise StatementParseError( 'Invalid args for LET {0} = {1}'.format( words[0], rest)) else: raise StatementParseError( 'Invalid syntax for LET: {0}'.format(' '.join(words))) else: raise StatementParseError( 'Invalid syntax for LET: {0} not variable.'.format(words[0])) return let_obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_arguments(args):", "def parse_args(args_text: Text) -> Args:\n return tuple(\n interp_value_from_string(a) for a in args_text.split(';') if a.strip())", "def parse_arguements():\r\n\r\n info = \"Perform LLE on a given X matrix of data and optional Y labels\"\r\n parser = argparse.ArgumentParser(description=info)\r\n\r\n # program arguments\r\n parser.add_argument('-X', '--X-data',\r\n type=str, required=True,\r\n help='Path to file containing X data')\r\n\r\n parser.add_argument('-Y', '--Y-labels',\r\n type=str, default=None,\r\n help='Path to file containing Y labels (optional)')\r\n\r\n parser.add_argument('-o', '--output',\r\n type=str, required=True,\r\n help='Path, including name of the output png')\r\n\r\n parser.add_argument('-op', '--output-projection',\r\n type=str, required=True,\r\n help='Path, including name of the output pickle for the PCA eigen vectors')\r\n\r\n parser.add_argument('-ox', '--output-x',\r\n type=str, required=True,\r\n help='Path, including name of the output pickle for the PCA representation of the original data')\r\n\r\n parser.add_argument('-nc', '--num-components',\r\n type=int, default=2,\r\n help='Num components to compute representation for')\r\n\r\n args = parser.parse_args()\r\n opts = vars(args)\r\n return opts", "def parseArgs ():\n independentBaseName = None\n dependentBaseName = None\n independentTSID = None\n dependentTSID = None\n statisticsFile = None\n nEquations = None\n logFile = None\n #\n # Loop through command line arguments\n for arg in sys.argv:\n parts = arg.split('=')\n if ( (parts == None) or (len(parts) != 2) ):\n # Not an arg=value command line argument\n continue\n argName = parts[0].upper()\n argValue = parts[1]\n if ( argName == 'DEPENDENTBASENAME' ):\n dependentBaseName = argValue\n elif ( argName == 'DEPENDENTTSID' ):\n dependentTSID = argValue\n elif ( argName == 'INDEPENDENTBASENAME' ):\n independentBaseName = argValue\n elif ( argName == 'INDEPENDENTTSID' ):\n independentTSID = argValue\n elif ( argName == 'LOGFILE' ):\n logFile = argValue\n elif ( argName == 'NUMBEROFEQUATIONS' ):\n nEquations = int(argValue)\n elif ( argName == 'STATISTICSFILE' ):\n statisticsFile = argValue\n return ( independentBaseName, dependentBaseName, independentTSID, dependentTSID,\n statisticsFile, nEquations, logFile )", "def _parse_args():\n parser = argparse.ArgumentParser(description='Pure-python command-line calculator.')\n\n parser.add_argument('EXPRESSION', action=\"store\", type=str, help=\"expression string to evaluate\")\n parser.add_argument('-m', '--use-modules', nargs='+', action=\"store\", dest=\"MODULE\", type=str,\n help=\"additional modules to use\")\n\n return parser.parse_args()", "def parse_ast_args(cls, ast_args: List) -> Union[tree.AstNode, List[tree.AstNode]]:\n if cls == tree.Declaration and len(ast_args) >= 3:\n # We deal with chained declarations here (`int a = b = 1;`). We want two separate variable declarations.\n if(ast_args[2] == '['):\n var_type, identifier, _, valInt, _ = ast_args\n ast_args[0] = ast_args[0] + str(tree.Identifier(valInt.value))\n ast_args = ast_args[: 2]\n else:\n print(ast_args)\n var_type, identifier, expr = ast_args\n if isinstance(expr, tree.Assignment):\n # We should raise an error somehow if there's no previous declaration of the variable here.\n # A good solution would maintain a mapping to the original source code so we can show where the error is.\n # We want to move the assignment node one up so it is **sibling** to this declaration node.\n # Then the declaration should be made with the value of the assigned variable.\n ast_args[2] = tree.Identifier(expr.identifier.name)\n return [expr, parse_ast_args(cls, ast_args)]\n\n if cls == tree.Function:\n # Sometimes we don't have function arguments. I don't know how to handle it but here, rearranging args order.\n assert len(ast_args) in {3, 4}\n if len(ast_args) == 4:\n # Swap function args and body so it works with our class' constructor default args.\n ast_args[2], ast_args[3] = ast_args[3], ast_args[2]\n\n if cls == tree.Expr and any(op in ast_args for op in tree.BinOp.OPERATORS):\n # We want to parse 4 / 3 * 2 with left-associativity. (it should output 2)\n # It means we need to parse the multiplication first\n *left_hand_side, op, right_hand_side = ast_args\n assert op in tree.BinOp.OPERATORS, \"Operator should be in second place in the token list\"\n\n if len(left_hand_side) > 1:\n # We need to parse something like 1 + 2 + 3 + 4\n left_hand_side = parse_ast_args(cls, left_hand_side)\n else:\n # The right hand side is a single expression, it was already parsed into an ast.\n left_hand_side = left_hand_side[0]\n\n return tree.BinOp(left_hand_side, op, right_hand_side)\n\n # We 'unnest' the structure - these classes are abstract so we are rly interested in what they contain.\n if cls == tree.Expr:\n assert len(ast_args) == 1\n return ast_args[0]\n if cls == tree.Statement:\n return ast_args[0] if ast_args else None\n\n # Hack. Esp since some 'class_name' refer to functions.\n if \"\\t\" in ast_args:\n ast_args.remove(\"\\t\")\n\n if cls == tree.Assignment and len(ast_args) >= 3:\n # We deal with chained declarations here (`int a = b = 1;`). We want two separate variable declarations.\n if(ast_args[1] == '['):\n identifier, _, valInt, _, expres = ast_args\n identifier.name = identifier.name + \"[\" + str(valInt.value) + \"]\"\n ast_args[0] = identifier\n ast_args[1] = expres\n ast_args = ast_args[: 2]\n\n if cls == tree.Identifier and len(ast_args) > 1:\n if (ast_args[1] == '['):\n identifier, _, valInt, _ = ast_args\n tmp = str(valInt)[:10]\n if tmp == \"Identifier\":\n identifier = identifier + \"[\" + valInt.name + \"]\"\n else:\n identifier = identifier + \"[\" + str(valInt.value) + \"]\"\n ast_args[0] = identifier\n ast_args = ast_args[: 1]\n\n return cls(*ast_args)", "def _parse_xecute(self, args, env):\n self.output = False\n for expr in args:\n self.repl['lex'].reset()\n try:\n p = self.repl['parser'].parse(str(expr),\n lexer=self.repl['lex'].lexer)\n p.execute()\n except mumpy.MUMPSReturn as ret:\n return ret.value()\n except mumpy.MUMPSCommandEnd:\n continue\n except mumpy.MUMPSGotoLine as goto:\n fn = goto.func\n return trampoline(self._parse_tag, fn.rou, fn.tag)\n except Exception as e:\n raise mumpy.MUMPSSyntaxError(e)", "def argumentsParser(args):\n\targuments = []\n\tif args.find('\"') > -1:\n\t\tt_arguments = args.split('\"')\n\t\tfor a in t_arguments:\n\t\t\tif a == '' or a == ' ':\n\t\t\t\tpass\n\t\t\telif a[-1] == ' ':\n\t\t\t\targuments.append(a[:-1])\n\t\t\telse:\n\t\t\t\targuments.append(a)\n\telif args.find(\"'\") > -1:\n\t\tt_arguments = args.split(\"'\")\n\t\tfor a in t_arguments:\n\t\t\tif a == '' or a == ' ':\n\t\t\t\tpass\n\t\t\telif a[-1] == ' ':\n\t\t\t\targuments.append(a[:-1])\n\t\t\telse:\n\t\t\t\targuments.append(a)\n\telif args == ' ':\n\t\tpass\n\telse:\n\t\targuments = args.split(' ')\n\treturn arguments", "def _parse_args(self, prepared_args):\n pass", "def substitute(self, args, lvars, within_list):\n\n if is_String(args) and not isinstance(args, CmdStringHolder):\n args = str(args) # In case it's a UserString.\n args = _separate_args.findall(args)\n for a in args:\n if a[0] in ' \\t\\n\\r\\f\\v':\n if '\\n' in a:\n self.next_line()\n elif within_list:\n self.append(a)\n else:\n self.next_word()\n else:\n self.expand(a, lvars, within_list)\n else:\n self.expand(args, lvars, within_list)", "def parse_arguments(self):\n self.args = self.argparser.parse_args(self.template_args) # noqa: T484\n\n # get values from args or defaults\n for name, (categ, rest) in self.data.items():\n if categ not in '<>?':\n continue\n val = getattr(self.args, name)\n if rest.get('type') == 'flag':\n val = str(rest.get('val')) if val else ''\n else:\n val = val if val is not None else rest.get('default')\n self.variables[name] = val\n\n # possibly fill in substitutions in the template variables\n findreplace = re.compile(r'{{\\s*(\\w+)\\s*}}')\n for name, val in self.variables.items():\n if findreplace.search(val):\n t = jinja2.Template(val)\n self.variables[name] = t.render(self.variables)", "def parse_argument_expression(self, tokens):\n\n clauses = []\n while tokens:\n clauses.append(self._parse_clause(tokens))\n if not tokens or tokens[0].type != 'OR':\n break\n tokens.pop(0)\n else:\n self._parser_state.error('empty argument expression')\n return clauses", "def __parse_args(self):\n for argument in self.args:\n source_arg = re.match(\"^(--source=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n input_arg = re.match(\"^(--input=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n stats_arg = re.match(\"^(--stats=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n help_arg = re.match(\"^--help$\", argument)\n vars_arg = re.match(\"^--vars$\", argument)\n insts_arg = re.match(\"^--insts$\", argument)\n if source_arg:\n self.sourceFile = source_arg.group(2)\n self.passedArgs.append(\"source\")\n elif input_arg:\n self.inputFile = input_arg.group(2)\n self.passedArgs.append(\"input\")\n elif help_arg:\n print(\"napoveda\")\n sys.exit(0)\n elif stats_arg:\n self.statsFile = stats_arg.group(2)\n self.passedArgs.append(\"stats\")\n elif vars_arg:\n self.passedArgs.append(\"vars\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"vars\"\n elif insts_arg:\n self.passedArgs.append(\"insts\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"insts\"\n else:\n raise ArgError(\"Unknown argument or format of the argument! (\" + argument + \")\")", "def Args(parser):", "def main():\n argc = len(sys.argv)\n if argc > 1:\n first_arg = sys.argv[1]\n if first_arg == '--test':\n env = environment.Environment()\n execution.execute_statement('x = 3', env)\n execution.execute_statement('x+=7', env)\n execution.execute_statement('y=9.23', env)\n env.new_frame()\n execution.execute_statement('x = 5', env)\n print(env.frames)\n execution.execute_statement('z=\"hello world\"', env)\n execution.execute_statement('z +=\"!!!\"', env)\n execution.execute_statement('a= `gelatin`', env)\n print(env.frames)\n ast = ast2.AST(\"3*4+5 ^ 7\")\n print(ast.parse())\n print(ast.collapse_indices(ast.build_indices()))\n ast = ast2.AST(\"18+15*9:3+10\")\n print(ast.parse())\n print(ast.collapse_indices(ast.build_indices()))\n\n print(execution.evaluate_expression('1+2+3+4', environment.Environment()))\n print(execution.evaluate_expression('45+7*8', environment.Environment()))\n print(execution.evaluate_expression('3.2+18^2-7', environment.Environment()))\n print(execution.evaluate_expression('1:2 + 1:3 + 1:5', environment.Environment()))\n print(execution.evaluate_expression('2:3 + 3^3 - 1:5', environment.Environment()))\n print(execution.evaluate_expression('1234', environment.Environment()))\n \n ast = ast2.AST(\"3 + 1 == 4\")\n print(ast.parse())\n ast = ast2.AST(\"3 + 1 > 4\")\n print(ast.parse())\n ast = ast2.AST(\"18:1 != 18.2\")\n print(ast.parse())\n ast = ast2.AST(\"x = 4\")\n print(ast.parse())\n ast = ast2.AST(\"y = 3 > 4\")\n print(ast.parse())\n \n env2 = environment.Environment()\n execution.execute_statement('x = 3+5*4', env2)\n execution.execute_statement('y = x + 19 - 3*6', env2)\n print(env2.frames)\n elif first_arg == '--test2':\n ast = ast2.AST('x = \"ice cream, eggs, and milk\" + \"...alpha or beta\"')\n print(ast.parse())\n ast = ast2.AST('y = f(1 + 1, 2 + 2, 3 + 3) - g((9+7)*2, 128/(2+2))')\n print(ast.parse())\n ast = ast2.AST('z = f(\"ice cream\", \"eggs and milk\") * g(\"alpha or beta\", 3:8, \"gamma or delta\")')\n print(ast.parse())\n ast = ast2.AST('makeList(1,2,3) + makeList(4,5,6)')\n print(ast.parse())\n ast = ast2.AST('[max(16, 25), max(36, max(49, 64))]')\n print(ast.parse())\n ast = ast2.AST('[concat_lists([10], [20]), concat_lists([30], [40])]')\n print(ast.parse())\n elif first_arg == '--test3':\n ast = ast2.AST('[1, 2, 3]')\n print(ast.split_list_elems())\n ast = ast2.AST('[f(2), f(3), f(4)]')\n print(ast.split_list_elems())\n ast = ast2.AST('[f(2, 3), f(3, 4, 5), f(4, 1)]')\n print(ast.split_list_elems())\n ast = ast2.AST('1 + 2 * 3')\n print(ast.split_list_elems())\n print(ast.parse())\n elif first_arg == '--test4':\n ast = ast2.AST('x.length()')\n print(ast.parse())\n ast = ast2.AST('[1,2,3].length()')\n print(ast.parse())\n ast = ast2.AST('3.01')\n print(ast.parse())\n ast = ast2.AST('3.1')\n print(ast.parse())\n elif first_arg == '--test5':\n env = environment.Environment()\n env.new_type(['Number'], 'ComplexNumber')\n c = {'$type': 'ComplexNumber', 'real': 1, 'imag': 2}\n print(env.value_is_a(c, 'ComplexNumber'))\n print(env.value_is_a(c, 'Number'))\n print(env.value_is_a(c, 'Int'))\n print(\"\")\n env.new_type(['Object'], 'Food')\n env.new_type(['Food'], 'Pizza')\n env.new_type(['Food'], 'Dessert')\n env.new_type(['Dessert'], 'ChocolateItem')\n env.new_type(['Pizza'], 'PepperoniPizza')\n env.new_type(['Pizza', 'ChocolateItem'], 'ChocolatePizza')\n pepperoni_pizza = {'$type': 'PepperoniPizza'}\n chocolate_pizza = {'$type': 'ChocolatePizza'}\n print(env.value_is_a(pepperoni_pizza, 'PepperoniPizza'))\n print(env.value_is_a(pepperoni_pizza, 'Pizza'))\n print(env.value_is_a(pepperoni_pizza, 'Food'))\n print(env.value_is_a(pepperoni_pizza, 'Dessert'))\n print(env.value_is_a(pepperoni_pizza, 'ChocolateItem'))\n print(\"\")\n print(env.value_is_a(chocolate_pizza, 'PepperoniPizza'))\n print(env.value_is_a(chocolate_pizza, 'Pizza'))\n print(env.value_is_a(chocolate_pizza, 'Food'))\n print(env.value_is_a(chocolate_pizza, 'Dessert'))\n print(env.value_is_a(chocolate_pizza, 'ChocolateItem'))\n print(\"\")\n env.new_type(['ChocolatePizza'], 'HugeChocolatePizza')\n huge_chocolate_pizza = {'$type': 'HugeChocolatePizza'}\n print(env.value_is_a(huge_chocolate_pizza, 'PepperoniPizza'))\n print(env.value_is_a(huge_chocolate_pizza, 'Pizza'))\n print(env.value_is_a(huge_chocolate_pizza, 'Food'))\n print(env.value_is_a(huge_chocolate_pizza, 'Dessert'))\n print(env.value_is_a(huge_chocolate_pizza, 'ChocolateItem'))\n print(env.value_is_a(huge_chocolate_pizza, 'ChocolatePizza'))\n print(\"\")\n elif first_arg == '--test6':\n ast = ast2.AST('{1, 2 | 3, 4}')\n print(ast.parse())\n elif first_arg == '--test7':\n ast = ast2.AST('throw \"something\"')\n print(ast.parse())\n elif first_arg == '--test8':\n ast = ast2.AST('true and not false')\n print(ast.parse())\n print(ast.collapse_indices(ast.build_indices()))\n elif first_arg == '--test9':\n sample = \"\"\"\n x = 5 // comment\n // comment\n /* multi\n line\n comment\n */y = 6\n z = \"https://example.com\"\n \"\"\"\n print(prepare_program.preprocess(sample))\n elif first_arg == '--test10':\n ast = ast2.AST('-3.0e5 + 186e-20 * 1e-6 / 28.8e+6 + 34.4e+99')\n print(ast.parse())\n ast = ast2.AST('-3.0E5 + 186E-20 * 1E-6 / 28.8e+6 + 34.4E+99')\n print(ast.parse())\n elif first_arg == '--test11':\n print(execution.is_assignment_statement('a = 5'))\n print(execution.is_assignment_statement('a=5==6'))\n print(execution.is_assignment_statement('not (5==6) and (8>=7)'))\n print(execution.is_assignment_statement('z='))\n elif first_arg == '--test12':\n lines = [\n 'sub this + that',\n 'func Int x + this',\n 'func x + this',\n 'func this * y',\n 'func Int -this',\n 'sub -this',\n 'sub not this',\n 'sub Boolean not this',\n 'sub this-b',\n 'sub b-this',\n 'func Int-this',\n 'func Int- this',\n 'sub Int - this'\n ]\n print(prepare_program.replace_op_overload_syntax(lines))\n elif first_arg == '--test-tree-merge':\n tests.test_tree_merge()\n elif first_arg == '--test-all':\n tests.test_all('capacita_programs')\n elif first_arg == '--test-all-fast':\n tests.test_all('capacita_programs', has_delay=False)\n elif first_arg == '--test-repl':\n tests.test_all('capacita_programs', has_delay=True, use_repl=True)\n elif first_arg == '--test-repl-fast':\n tests.test_all('capacita_programs', has_delay=False, use_repl=True)\n elif first_arg == '--test-file' and argc > 2:\n if argc == 4 and sys.argv[2] == '--repl':\n tests.test_file(sys.argv[3], use_repl=True)\n else:\n tests.test_file(sys.argv[2], use_repl=False)\n else:\n # Run a program from a text file:\n file_name = first_arg\n execute_file(file_name)\n exit()\n repl()", "def parse(self):\n args = self.args\n if args and not args[0] in [\"'\", \",\", \":\"]:\n args = \" %s\" % args.strip()\n self.args = args", "def parseArgs(self, args, **vars):\n argList = []\n for token in self.argLexer.finditer(args):\n for tokenType, tokenValue in list(token.groupdict().items()):\n if tokenValue is not None:\n argList.append(getattr(self, 'argtoken_' +\n tokenType)(tokenValue, vars))\n return argList", "def parse_node(self):\n\tline_splitted = self.line.split()\n\ti=t=w=s=l=v=None\n\tfor argument_unit in line_splitted:\n\t words = self.split_argument_unit(argument_unit)\n\n\t if words[0] == 'I':\n\t\ti = words[1]\n\t elif words[0] == 't':\n\t\tt = float(words[1])\n\t elif words[0] == 'W':\n\t\tw = words[1]\n\t elif words[0] == 's':\n\t\ts = words[1]\n\t elif words[0] == 'L':\n\t\tl = words[1]\n\t elif words[0] == 'v':\n\t\tv = int(words[1])\n\t else:\n\t\traise ArgumentNotFoundError(found = words[0])\n\tif i != None:\n\t self.nodes.append(Node(i, t, w, s, l, v))\n\telse:\n\t ArgumentNotFoundError(self.line, 'I = identifier expected')", "def parse(s):\n return expr.parseString(s, parseAll=True)", "def let_statement(u):\n if u.__class__ is node.let:\n if (u.ret.__class__ is node.ident and\n u.args.__class__ is node.matrix):\n u.args = node.funcall(func_expr=node.ident(\"matlabarray\"),\n args=node.expr_list([u.args]))", "def parse_args() -> tuple:\n operation = 0\n args = list()\n filter = tuple()\n suppress_errors = False\n\n for arg in argv[1:]:\n if arg.startswith('--mode='):\n operation = int(arg[7:])\n elif arg == '--suppress_errors':\n suppress_errors = True\n elif arg.startswith('--filter='):\n filter = tuple(remove_quotes(arg[9:]).split())\n else:\n args.append(remove_quotes(arg))\n\n if filter == tuple():\n filter = None\n\n return operation, suppress_errors, filter, args", "def eval_let(env, bindings, body):\n new_env = Env(env)\n for ll in value(bindings):\n assert typeof(ll) == 'list', 'bindings must be a list'\n pair = value(ll)\n assert len(pair) == 2, 'bindings must be pairs'\n nam, arg = pair\n assert typeof(nam) == 'atom', 'binding LHS must be atom'\n intern(new_env, value(nam), evalu(arg, env))\n args_evaled = [evalu(x, new_env) for x in body]\n return args_evaled[-1]", "def str_to_args(line):\n args_in = line.split()\n args_out = []\n kwargs_out = {}\n gadget_lookup = {g.name: g for g in Gadget.getinstances()}\n for a in args_in:\n if '=' in a:\n key, val = a.split('=')\n if ('*' in val) or ('?' in val):\n matching_names = filter(gadget_lookup.keys(), val)\n kwargs_out[key] = [gadget_lookup[name] for name in matching_names]\n elif val in gadget_lookup.keys():\n kwargs_out[key] = gadget_lookup[val]\n else:\n kwargs_out[key] = eval(val)\n else:\n if ('*' in a) or ('?' in a):\n matching_names = filter(gadget_lookup.keys(), a)\n args_out += [gadget_lookup[name] for name in matching_names]\n elif a in gadget_lookup.keys():\n args_out.append(gadget_lookup[a])\n else:\n try:\n args_out.append(eval(a))\n except NameError:\n args_out.append(a)\n return args_out, kwargs_out", "def parse_selection(expression, dataset):\n id1, op, id2 = re.split('(<=|>=|!=|=~|>|<|=)', expression, 1)\n\n op = {\n '<=': operator.le,\n '>=': operator.ge,\n '!=': operator.ne,\n '=': operator.eq,\n '>': operator.gt, \n '<': operator.lt,\n }[op]\n\n try:\n id1 = get_var(dataset, id1)\n except:\n id1 = ast.literal_eval(id1)\n\n try:\n id2 = get_var(dataset, id2)\n except:\n id2 = ast.literal_eval(id2)\n\n return id1, op, id2", "def parse_arguments(self):\n \n for arg in sys.argv[1:]:\n (key, sep, value) = arg.partition(\"=\")\n if sep != \"=\":\n raise ProcessorError(\"Illegal argument '%s'\" % arg)\n self.update_data(key, value)", "def parse(self, args):\n pass", "def parse(self, command):\n split_command = command.split('=')\n semi = 1\n dest = None\n if len(split_command) > 1:\n dest = split_command[0]\n else:\n semi = 0\n c_or_j = split_command[semi].split(';')\n comp = c_or_j[0]\n jump = c_or_j[1] if len(c_or_j) > 1 else None\n return comp, dest, jump", "def get_list_of_arguments(self):\n arglist = self.lhs.split(\"|\")\n if len(arglist) == 1:\n return (self.lhslist[0],), self.lhslist[1:]\n else:\n return arglist[0].split(\",\"), arglist[1].split(\",\")", "def parse(self, args):\r\n # handle `sl ...`\r\n main_args = self.parse_main_args(args)\r\n module_name = main_args['<module>']\r\n\r\n # handle `sl <module> ...`\r\n module_args = self.parse_module_args(module_name, main_args['<args>'])\r\n\r\n # get the command argument\r\n command_name = module_args.get('<command>')\r\n\r\n # handle `sl <module> <command> ...`\r\n return self.parse_command_args(\r\n module_name,\r\n command_name,\r\n main_args['<args>'])", "def visit_expr_stmt(self: Parser, node: doc.Expr) -> None:\n\n res = self.eval_expr(node.value)\n if res is None:\n pass\n elif isinstance(res, Frame):\n res.add_callback(partial(res.__exit__, None, None, None))\n res.__enter__()\n elif isinstance(res, PrimExpr):\n T.evaluate(res)\n elif isinstance(res, (int, bool)):\n T.evaluate(tvm.tir.const(res))\n elif isinstance(res, tvm.relay.Call) and not res.args:\n # Using GlobalVar.__call__ with no arguments is ambiguous, as\n # each IR has a different function Call representation. If\n # this occurs, convert to the TIR representation.\n T.evaluate(tvm.tir.call_tir(res.op))\n elif isinstance(res, str):\n # Ignore docstrings\n pass\n else:\n self.report_error(node, f\"Parsing resulted in unexpected type {type(res)}\")" ]
[ "0.56393695", "0.5488855", "0.5391135", "0.536646", "0.53150535", "0.5247378", "0.52155995", "0.5206109", "0.5205791", "0.5194514", "0.5180917", "0.51377404", "0.51271456", "0.51270646", "0.5068318", "0.5052861", "0.50487244", "0.504253", "0.5022233", "0.5016177", "0.5016102", "0.5003135", "0.49948618", "0.49901932", "0.49492177", "0.49419725", "0.49408245", "0.4938254", "0.49298364", "0.49241522" ]
0.59192336
0
Parse the GOTO statement. The argument can be anything that evaluates to a number label.
def parse_goto(self, words): goto_obj = Goto() input_str = ' '.join(words) obj = self.prim_parser.parse_arith_expr(input_str) if obj: goto_obj.label = obj else: obj = self.prim_parser.parse_num(input_str) if obj: goto_obj.label = obj else: raise StatementParseError( 'No valid print args: {0}.'.format(words)) return goto_obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cg_goto(self, cmd):\n label = self.makeLabel(cmd)\n self.asm(unindent(f\"\"\"\n @{label}\n 0;JMP\n \"\"\"))", "def goto(n):\n n = int('{}'.format(n))\n get_controller().step_to(n)", "def goto(action, value, error_handle):\n print_info(\"failed: failure action= goto %s\" % value)\n error_handle['action'] = 'GOTO'\n error_handle['value'] = value\n return error_handle", "def gen_goto(self, stmt: statements.Goto) -> None:\n block = self.get_label_block(stmt.label)\n self.builder.emit_jump(block)\n new_block = self.builder.new_block()\n self.builder.set_block(new_block)", "def write_goto_in_func(self, label):\n label = self.label_by_scope(label)\n self.write(\"@\" + label + \"\\n0;JMP\\n\")\n # @label\n # 0;JMP", "def cg_if_goto(self, cmd):\n label = self.makeLabel(cmd)\n self.asm(unindent(f\"\"\"\n @SP\n AM=M-1 // SP --\n D=M // D = MEM[SP]\n @{label}\n D;JNE // if-goto {label}\n \"\"\"))", "def test_goto_definition_at_zero(Script):\n assert Script(\"a\").infer(1, 1) == []\n s = Script(\"str\").infer(1, 1)\n assert len(s) == 1\n assert list(s)[0].description == 'class str'\n assert Script(\"\").infer(1, 0) == []", "def _is_goto(self, words):\n if words[0] == 'goto':\n if len(words) != 2:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_GOTO command.\".format(self._file_line))\n return True\n else:\n return False", "def write_go_to(self, label):\n self._write_line('goto ' + label)", "def write_goto(self, label):\n\n symbol = LabelIfGoto.get_label_symbol(self.func_name, label)\n for line in LabelIfGoto.GOTO:\n line = line.format(symbol=symbol)\n self.write_line(line)", "def parseJump(cmds):\n if (len(cmds) != 0):\n parseExpr(cmds[0])\n parseJump(cmds[1:])", "def write_goto(self, label: str) -> None:\n self._write(f'@{self._file_name}${label}')\n self._write('0;JMP')", "def write_goto(output_file, command, label, curr_function):\n if command == \"if-goto\":\n output_file.write(\"@SP\" + \"\\n\" +\n \"AM = M - 1\" + \"\\n\" +\n \"D = M\" + \"\\n\" +\n \"@\" + curr_function[0] + \"$\" + label + \"\\n\" +\n \"D; JNE\" + \"\\n\")\n else:\n output_file.write(\"@\" + curr_function[0] + \"$\" + label + \"\\n\" +\n \"0; JMP\" + \"\\n\")", "def goto(number):\n if isinstance(number, tuple):\n number = number[-1]\n if not isinstance(number, str):\n number = number()\n return redirect(question_url(number))", "def jmp(self, params):\n return int(params[0])", "def convert_instruction(instruction: str) -> Tuple[int, int, int]:\n\t# NOOP\n\tif match := NOOP_REGEX.match(instruction):\n\t\tinstruction_type = 0\n\t# ADD\n\telif match := ADD_REGEX.match(instruction):\n\t\tinstruction_type = 1\n\t# MINUS\n\telif match := MINUS_REGEX.match(instruction):\n\t\tinstruction_type = 2\n\t# GOTO\n\telif match := GOTO_REGEX.match(instruction):\n\t\tinstruction_type = encode_label(match.group(\"TARGET\")) + 2\n\t# No match\n\telse:\n\t\traise ValueError(f\"Unrecognized instruction: {instruction}\")\n\t# get a and c from the label and variable capture groups\n\tlabel = encode_label(match.group(\"LABEL\"))\n\tvariable = encode_var(match.group(\"VAR\")) - 1\n\treturn label, instruction_type, variable", "def flow_control(self):\r\n c1 = self.eat_char()\r\n c2 = self.eat_char()\r\n cmd = c1+c2\r\n if cmd=='ss':\r\n #Mark a location in the program with label n\r\n self.log += ' Mark label '\r\n label = self.read_label()\r\n if not self.fix_labels:\r\n if label in self.labels:\r\n raise Exception('Same label used twice.')\r\n self.labels[label] = self.cursor #!!correct cursor postion?\r\n elif cmd=='st':\r\n #Call a subroutine with the location specified by label n.\r\n self.log += ' Call subroutine '\r\n label = self.read_label()\r\n self.jump_back = self.cursor\r\n self.jump_to(label)\r\n elif cmd=='sn':\r\n #Jump unconditionally to the position specified by label n.\r\n self.log += ' Jump to label '\r\n label = self.read_label()\r\n self.jump_to(label)\r\n elif cmd=='ts':\r\n #Pop a value off the stack and jump to the label specified by n if the value is zero.\r\n self.log += ' If zero jump to label '\r\n label = self.read_label()\r\n if self.pop_stack() == 0:\r\n self.jump_to(label)\r\n elif cmd=='tt':\r\n #Pop a value off the stack and jump to the label specified by n if the value is less than zero.\r\n self.log += ' If neg jump to label '\r\n label = self.read_label()\r\n if self.pop_stack() < 0:\r\n self.jump_to(label)\r\n elif cmd=='tn':\r\n #Exit a subroutine and return control to the location from which the subroutine was called.\r\n self.log += ' Exit subroutine '\r\n if not self.scan_only:\r\n if self.jump_back==0:\r\n raise Exception('Attempted to exit subroutine before entering one')\r\n self.cursor = self.jump_back\r\n self.jump_back = 0\r\n elif cmd=='nn':\r\n #Exit the program.\r\n self.log += ' Exit program '\r\n self.terminate = True\r\n else:\r\n raise Exception('Invalid command')", "def goto(self, item):\n command = 'goto ' + str(item)\n self.run_command(command)", "def parser(line):\n # Remove comment and whitespace\n line = re.sub(r'//.*', '' , line) # remove comment\n line = line.strip() # remove whitespace\n\n # Parse A instruction, return int or string\n if line.find('@') == 0:\n try:\n parsed = int(line[1:])\n flag = \"A_DECIMAL\"\n except:\n parsed = line[1:]\n flag = \"A_INSTRUCTION\"\n\n elif line.startswith(\"(\") and line.endswith(\")\"):\n parsed = line[1:-1]\n flag = \"GOTO_INSTRUCTION\"\n else:\n # Parse C instruction, return tuple\n if line.find(';') != -1:\n comp, jump = line.split(';') # comp ; jump\n dest = \"null\"\n if comp.find('=') != -1:\n dest, comp = comp.split('=') # dest = comp ; jump\n parsed = comp, dest, jump\n flag = \"C_INSTRUCTION\"\n\n elif line.find('=') != -1:\n dest, comp = line.split('=') # dest = comp\n jump = \"null\"\n parsed = comp, dest, jump\n flag = \"C_INSTRUCTION\"\n else:\n parsed = None\n flag = None\n\n return parsed, flag", "def go_cmd(cmd, cnt, args):\n #log(\"g \"+str(args[1:]))\n \n if cnt == 1:\n cpu.go()\n return\n elif cnt >= 2:\n try:\n instr_count = int(args[1])\n except ValueError:\n log(\"Expected a decimal count - g count [from_address]\") \n return\n if cnt == 2:\n cpu.go(instr_count) \n elif cnt >= 3:\n try:\n from_address = int(args[2], base=16) \n except ValueError:\n log(\"Expected a hexadecimal address - g count [from_address]\")\n return\n cpu.go(instr_count, from_address)", "def _is_ifgoto(self, words):\n if words[0] == 'if-goto':\n if len(words) != 2:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_IFGOTO command.\".format(self._file_line))\n return True\n else:\n return False", "def goto_procedure(self, destination):\n raise RuntimeError(\"the 'goto_procedure' method must be overriden\")", "def goto(self, offset):\n self._vim.command('goto {}'.format(offset))", "def goto(self, index):\n raise NotImplementedError", "def _get_jump_with_pattern(text, format_tuple):\n fmt_str = format_tuple[0]\n group = format_tuple[1]\n direction = format_tuple[2]\n\n match = _match_pattern(text, fmt_str)\n if match and match[group]:\n page = match[group]\n if page.isdigit() and int(page) < 20:\n return int(page) * direction # \"from\" jumps are negative\n elif _match_pattern(page, \"(front|back){e<=2}\"):\n return page\n\n return False", "def goto(cls, quad):\n\t\treturn quad.result", "def __ge__(self, *args):\n return _ida_hexrays.cgoto_t___ge__(self, *args)", "def irgen_break(stmt, builder, table):\n tmp = builder.unreachable() \n if stmt.label:\n table.breaks[tmp] = (builder.block, table[stmt.label])\n else:\n table.breaks[tmp] = (builder.block, None)", "def __le__(self, *args):\n return _ida_hexrays.cgoto_t___le__(self, *args)", "def goto(fixed_pc: int):\n\n def _goto(state: State) -> State:\n return state._replace(pc=fixed_pc)\n\n return _goto" ]
[ "0.59043664", "0.571691", "0.5656129", "0.5361607", "0.535743", "0.53304034", "0.52985924", "0.51921517", "0.5165587", "0.51377875", "0.5116282", "0.5073228", "0.50564355", "0.5052013", "0.49735686", "0.4971201", "0.49217594", "0.49034017", "0.48185778", "0.4752092", "0.47519904", "0.47283548", "0.47131824", "0.4707043", "0.46416527", "0.4637841", "0.46245942", "0.45781747", "0.45430556", "0.45308596" ]
0.6092517
0
Parse the NEXT statement.
def parse_next(self, words): next_obj = Next() if len(words) == 1: var_obj = self.prim_parser.parse_var(words[0]) if var_obj: next_obj.var = var_obj else: raise StatementParseError( 'Invalid NEXT statement var {0}.'.format(words[0])) else: raise StatementParseError( 'Invalid NEXT statement. Words: {0}.'.format(words)) return next_obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_next_instruction(self) -> None:\n instruction = self.program[self.pointer]\n opcode = instruction % 100\n if opcode == 99:\n self.halt = True\n\n self.modes = instruction // 100\n\n if opcode == 1:\n self.op_sum()\n if opcode == 2:\n self.op_multiply()\n if opcode == 3:\n self.op_input()\n if opcode == 4:\n self.op_output()\n if opcode == 5:\n self.op_jump_if_true()\n if opcode == 6:\n self.op_jump_if_false()\n if opcode == 7:\n self.op_less_than()\n if opcode == 8:\n self.op_equal_to()\n if opcode == 9:\n self.op_adjust_relative()", "def next():", "def next():", "def next(self):\n resp = yield from self.command('next')\n return True", "def _analyse_stmt_Continue(\n self, statement: ast.Continue, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=self._context[_CONTINUE])", "def next(self):\n lines = []\n query = False\n while 1:\n line = self._uhandle.readline()\n if not line:\n break\n # If I've reached the next one, then put the line back and stop.\n if lines and (line.startswith('BLAST')\n or line.startswith('BLAST', 1)\n or line.startswith('<?xml ')):\n self._uhandle.saveline(line)\n break\n # New style files ommit the BLAST line to mark a new query:\n if line.startswith(\"Query=\"):\n if not query:\n if not self._header:\n self._header = lines[:]\n query = True\n else:\n #Start of another record\n self._uhandle.saveline(line)\n break\n lines.append(line)\n\n if query and \"BLAST\" not in lines[0]:\n #Cheat and re-insert the header\n #print \"-\"*50\n #print \"\".join(self._header)\n #print \"-\"*50\n #print \"\".join(lines)\n #print \"-\"*50\n lines = self._header + lines\n \n if not lines:\n return None\n \n data = ''.join(lines)\n if self._parser is not None:\n return self._parser.parse(File.StringHandle(data))\n return data", "async def get_next(continuation_token=None):\n if not continuation_token:\n return {\"nextLink\": \"page2\", \"value\": [\"value1.0\", \"value1.1\"]}\n else:\n return {\"nextLink\": None, \"value\": [\"value2.0\", \"value2.1\"]}", "async def get_next(continuation_token=None):\n if not continuation_token:\n return {\"nextLink\": \"page2\", \"value\": [\"value1.0\", \"value1.1\"]}\n else:\n return {\"nextLink\": None, \"value\": [\"value2.0\", \"value2.1\"]}", "def _advance(self, idlist=None):\n if self.token.id == \"END\":\n return\n if idlist and self.token.id in idlist:\n self.token = next(self.token_gen)\n elif not idlist:\n self.token = next(self.token_gen)\n else:\n raise ParseError(\n \"\"\"Expected one of %s found %r instead. (line: %i)\"\"\"\n % (\" \".join(idlist), self.token.id, self.line)\n )", "def get_next_token(self):\n while self.current_char is not None:\n if self.current_char.isspace():\n self.skip_over_whitespace()\n continue\n\n if self.current_char.isdigit():\n return Token(INTEGER, self.integer())\n\n if self.current_char == '+':\n self.move_forward()\n return Token(PLUS, '+')\n\n if self.current_char == '-':\n self.move_forward()\n return Token(MINUS, '-')\n\n if self.current_char == '*':\n self.move_forward()\n return Token(MULTI, '*')\n\n if self.current_char == '/':\n self.move_forward()\n return Token(DIV, '/')\n\n self.error()\n\n return Token(EOF, None)", "def next_token(self) -> T.Optional[Token]:\n if self.has_finished():\n return None\n token_type = None\n token_chars = []\n if is_number_char(self.current):\n token_type = \"N\"\n while not self.has_finished() and is_number_char(self.current):\n token_chars.append(self.consume())\n elif is_char_token(self.current):\n if self.current in [\"(\", \")\"]:\n token_type = self.current\n elif self.current in [\"+\", \"-\"]:\n token_type = \"S\"\n elif self.current in [\"*\", \"/\"]:\n token_type = \"M\"\n else:\n raise ExprSyntaxError\n token_chars.append(self.consume())\n elif self.current.isspace():\n self.consume()\n return self.next_token()\n else:\n raise UnexpectedChar\n return Token(token_type, \"\".join(token_chars))", "def get_next_token(self):\n while self.current_char is not None:\n if self.current_char.isspace():\n self.skip_whitespace()\n continue\n if self.current_char.isdigit():\n return Tokenizer(INTEGER, self.integer())\n if self.current_char == '+':\n self.advance()\n return Tokenizer(Token.PLUS, '+')\n if self.current_char == '-':\n self.advance()\n return Tokenizer(Token.MINUS, '-')\n if self.current_char == '*':\n self.advance()\n return Tokenizer(Token.MULTIPLICATION, '*')\n if self.current_char == '/':\n self.advance()\n return Tokenizer(Token.DIVISION, '/')\n\n self.error()\n return Tokenizer(EOF, None)", "def get_next_token(self):\n while self.current_char is not None:\n\n if self.current_char.isspace():\n self.skip_whitespace()\n continue\n\n if self.current_char.isalpha():\n return self._id()\n\n if self.current_char.isdigit():\n return Token(INTEGER, self.integer())\n\n if self.current_char == '=':\n self.advance()\n return Token(ASSIGN, '=')\n\n if self.current_char == ';':\n self.advance()\n return Token(SEMI, ';')\n\n if self.current_char == '+':\n self.advance()\n return Token(PLUS, '+')\n\n if self.current_char == '-':\n self.advance()\n return Token(MINUS, '-')\n\n if self.current_char == '*':\n self.advance()\n return Token(MUL, '*')\n\n if self.current_char == '(':\n self.advance()\n return Token(OPEN_BRACE, '(')\n\n if self.current_char == ')':\n self.advance()\n return Token(CLOSE_BRACE, ')')\n\n self.error()\n\n return Token(EOF, None)", "def get_next_token(self):\n while self.current_char is not None:\n\n if self.current_char.isspace():\n self.skip_whitespace()\n continue\n\n if self.current_char == '{':\n self.advance()\n self.skip_comment()\n continue\n\n if self.current_char.isalpha():\n return self._id()\n\n if self.current_char.isdigit():\n return self.number()\n\n if self.current_char == '\\'':\n return self._string()\n\n if self.current_char == ':' and self.peek() == '=':\n self.advance()\n self.advance()\n return Token(ASSIGN, ':=')\n\n if self.current_char == ';':\n self.advance()\n return Token(SEMI, ';')\n\n if self.current_char == ':':\n self.advance()\n return Token(COLON, ':')\n\n if self.current_char == ',':\n self.advance()\n return Token(COMMA, ',')\n\n if self.current_char == '+':\n self.advance()\n return Token(PLUS, '+')\n\n if self.current_char == '-':\n self.advance()\n return Token(MINUS, '-')\n\n if self.current_char == '*':\n self.advance()\n return Token(MUL, '*')\n\n if self.current_char == '/':\n self.advance()\n return Token(FLOAT_DIV, '/')\n\n if self.current_char == '(':\n self.advance()\n return Token(LPAREN, '(')\n\n if self.current_char == ')':\n self.advance()\n return Token(RPAREN, ')')\n\n if self.current_char == '.':\n self.advance()\n return Token(DOT, '.')\n\n if self.current_char == '<':\n self.advance()\n return Token(LESS_THAN, '<')\n\n if self.current_char == '>':\n self.advance()\n return Token(GREATER_THAN, '>')\n\n if self.current_char == '=':\n self.advance()\n return Token(EQUAL, '=')\n\n self.error()\n\n return Token(EOF, None)", "def statement( ): # parse rountin for statement that makes sure the token is one of the following, eventually there will be an error caught\n\n\ttok = tokens.peek( )\n\tif debug: print( \"statement: \", tok )\n\tif tok == \"if\":\n\t\tstat = parseIfStatement( )\n\t\treturn stat\n\telif tok == \"while\":\n\t\tstat = parseWhileStatement( )\n\t\treturn stat\n\telse: \n\t\tstat = parseAssign( )\n\t\treturn stat", "def Next():\n return CheckForError(lib.Generators_Get_Next())", "def next_token(self, context, token):", "def next(action, value, error_handle, skip_invoked=True):\n error_handle['action'] = 'NEXT'\n if skip_invoked:\n print_info(\"failure action= next\")\n return error_handle", "def extract_next_page(parser):\r\n url = ''\r\n table = parser.table.find_all('table')[1]\r\n tr = table.findAll('tr')\r\n url = url + str(tr[len(tr) - 1].a.get('href'))\r\n\r\n return url", "def _next(verbose=0, quiet=False):\n Spotify.request('me/player/next', method='POST')\n if not quiet:\n from cli.commands.status import status\n status.callback(verbose=verbose)\n\n return", "def next(self):\n self.jumpahead(1)", "def next(self):\n pass", "def next(self):\n pass", "def next(self):\n pass", "def next(self):\n pass", "def next(self):\r\n pass", "def nextToken(self):\n # Get the next token.\n next = super(ECMAScriptLexer, self).nextToken()\n\n if next.channel == Token.DEFAULT_CHANNEL:\n # Keep track of the last token on the default channel.\n self._lastToken = next\n\n return next", "def next(self) -> str:\n raise NotImplementedError", "def test__parse_next(value, position, expected_output, expected_position):\n state = ParserState(value)\n state.position = position\n \n output = parse_next(state)\n vampytest.assert_instance(output, tuple)\n vampytest.assert_eq(output, expected_output)\n vampytest.assert_eq(state.position, expected_position)", "def __next__(self):\n raise NotImplementedError(\"next() not implemented!\")" ]
[ "0.62025493", "0.603383", "0.603383", "0.60183877", "0.5960151", "0.5826634", "0.5793032", "0.5793032", "0.5746273", "0.57045674", "0.5698597", "0.56563187", "0.5653924", "0.5641119", "0.56398654", "0.5605107", "0.56033105", "0.55152005", "0.5478389", "0.54605097", "0.54191387", "0.5411095", "0.5411095", "0.5411095", "0.5411095", "0.53747743", "0.5358271", "0.533729", "0.53346694", "0.53266615" ]
0.69342494
0
Parse the IFTHEN statement.
def parse_ifthen(self, words): ifthen_obj = IfThen() if len(words) == 5: arg1_obj = self.prim_parser.parse_primative_obj(words[0]) bool_op_obj = self.prim_parser.parse_bool_op(words[1]) arg2_obj = self.prim_parser.parse_primative_obj(words[2]) then_flag = words[3] == 'THEN' label_obj = self.prim_parser.parse_num(words[4]) if all([arg1_obj, bool_op_obj, arg2_obj, then_flag, label_obj]): ifthen_obj.arg1 = arg1_obj ifthen_obj.bool_op = bool_op_obj ifthen_obj.arg2 = arg2_obj ifthen_obj.label = label_obj else: raise StatementParseError( 'Invalid IF THEN statement: {0}'.format(words)) return ifthen_obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parseIfStatement( ): # parse rountine for the if and uses the if class to print out the appropriate string\n\n\ttok = tokens.peek( )\n\tif debug: print( \"ifStatement: \", tok )\n\tstart = match( \"if\" )\n\texpr = expression( )\n\tblk = parseBlock( )\n\telseblk = None\n\ttok = tokens.peek( )\n\tif tok == \"else\":\n\t\tmatch( \"else\" )\n\t\telseblk = parseBlock( )\n\treturn ifStatement(expr, blk, elseblk)", "def parse_if_cmd(self, line):\n line = re.sub(\"^if *\", \"\", line)\n\n # remove the brackets\n statement, _ = gen_parse.get_str_between_delims(line, \"(\", \")\")\n\n # Check all variables have been declared\n any_vars = [i for i in re.findall(IN_STR_VAR_REGEX, statement)]\n # Get the variables declared\n _vars = []\n for var in any_vars:\n _Var = getattr(self, var.strip('$'))\n if type(_Var) == inp_types.Variable: _vars.append(_Var.data)\n else: _vars.append(_Var)\n\n for var_name, var_val in zip(any_vars, _vars):\n statement = statement.replace(var_name, str(var_val))\n\n # Evaluate the if statement\n try:\n var_container = {}\n exec(f\"val = {statement}\", var_container)\n val = var_container['val']\n except Exception as e:\n self.print_error(\"Couldn't parse the if statement\\n\\nError:\"\n + str(e))\n\n end_line = self.get_end_brace()\n\n self.line_num += 1\n if val is False:\n self.line_num = end_line", "def statement( ): # parse rountin for statement that makes sure the token is one of the following, eventually there will be an error caught\n\n\ttok = tokens.peek( )\n\tif debug: print( \"statement: \", tok )\n\tif tok == \"if\":\n\t\tstat = parseIfStatement( )\n\t\treturn stat\n\telif tok == \"while\":\n\t\tstat = parseWhileStatement( )\n\t\treturn stat\n\telse: \n\t\tstat = parseAssign( )\n\t\treturn stat", "def stmt_if(executor, stmt):\n e = Expression()\n result = e.eval(stmt._tokens, symbols=executor._symbols)\n if not result:\n executor.goto_next_line()", "def visit_if(self: Parser, node: doc.If) -> None:\n with self.var_table.with_frame():\n with T.If(self.eval_expr(node.test)):\n with T.Then():\n with self.var_table.with_frame():\n self.visit_body(node.body)\n if node.orelse:\n with T.Else():\n with self.var_table.with_frame():\n self.visit_body(node.orelse)", "def IfStatement(self):\n self.currtok = next(self.tg)\n if self.currtok[1].name == \"LPAREN\":\n self.currtok = next(self.tg)\n express = self.Expression()\n if self.currtok[1].name == \"RPAREN\":\n self.currtok = next(self.tg)\n state = self.Statement()\n if self.currtok[1].name == \"else\":\n self.currtok = next(self.tg)\n state2 = self.Statement()\n return ifelseStmt(express, state, state2)\n else:\n return ifStmt(express, state)\n raise SLUCSyntaxError(\"ERROR: Missing right paren on line {0}\".format(str(self.currtok[2] - 1)))\n raise SLUCSyntaxError(\"ERROR: Missing left paren on line {0}\".format(str(self.currtok[2] - 1)))", "def elif_(self):\n initial_pos = self.pos\n try:\n self.match_value(Identifier, \"elif\")\n expr = self.tokens[self.pos:]\n self.pos = len(self.tokens)\n\n return ElIfNode(expr)\n except ParseError:\n self.pos = initial_pos\n raise ParseError(\"Invalid elif directive.\")", "def _analyse_stmt_If(self, statement: ast.If, *, next: CFNode) -> CFNode:\n # Analyse both branches unconditionally: even if they're not reachable,\n # they still need to exist in the graph produced.\n if_branch = self._analyse_statements(statement.body, next=next)\n else_branch = self._analyse_statements(statement.orelse, next=next)\n\n # Analyse the condition, if a constant.\n branches: Dict[str, CFNode] = {}\n test_is_constant, test_value = self._expression_as_constant(statement.test)\n if test_is_constant:\n if test_value:\n branches.update(enter=if_branch)\n else:\n branches.update(else_=else_branch)\n else:\n branches.update(enter=if_branch, else_=else_branch, error=self._raise)\n\n return self._ast_node(statement, **branches)", "def compile_if(self) -> None:\n self._consume('if')\n self._consume('(')\n self.compile_expression()\n self._consume(')')\n\n end_lbl = f'IF_END_{self._if_count}'\n false_lbl = f'IF_FALSE_{self._if_count}'\n self._if_count += 1\n\n self._consume('{')\n self.writer.write_if(false_lbl)\n\n self.compile_statements()\n self.writer.write_goto(end_lbl)\n self.writer.write_label(false_lbl)\n\n self._consume('}')\n\n if self._get_current_token() == 'else':\n self._consume('else')\n self._consume('{')\n self.compile_statements()\n self._consume('}')\n\n self.writer.write_label(end_lbl)", "def elifs_to_if_then_else(stm):\n if stm.elifs:\n # replace elifs with nested if statements\n ifFalse = HdlStmBlock()\n topIf = HdlStmIf(stm.cond, stm.if_true, ifFalse)\n\n for c, stms in stm.elifs:\n _ifFalse = HdlStmBlock()\n\n lastIf = HdlStmIf(c, stms, _ifFalse)\n\n ifFalse.append(lastIf)\n ifFalse = _ifFalse\n\n if stm.if_false is None:\n lastIf.if_false = HdlStmBlock()\n else:\n lastIf.if_false = stm.if_false\n\n return topIf\n return stm", "def compile_if(self):\r\n else_label = \"ELSE_\" + str(self.__if_count)\r\n end_label = \"END_IF_\" + str(self.__if_count)\r\n self.__if_count += 1\r\n self.__advance(n=2)\r\n self.compile_expression()\r\n self.__vmwriter.write_arithmetic(\"not\")\r\n self.__vmwriter.write_if(else_label)\r\n self.__advance(n=2)\r\n self.compile_statements()\r\n self.__vmwriter.write_goto(end_label)\r\n self.__vmwriter.write_label(else_label)\r\n self.__advance()\r\n if self.__tokenizer.keyword() == TYPES_DIC[\"ELSE\"]:\r\n self.__advance(n=2)\r\n self.compile_statements()\r\n self.__advance()\r\n self.__vmwriter.write_label(end_label)", "def else_(self):\n initial_pos = self.pos\n try:\n self.match_value(Identifier, \"else\")\n return ElseNode()\n except ParseError:\n self.pos = initial_pos\n raise ParseError(\"Invalid else directive.\")", "def test_if_elseif_paren_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif (foo and bar) or foo and (bar or (foo and bar))}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif (foo and bar) or foo and (bar or (foo and bar)) %}\\nfoo{% endif %}\"", "def _process_if(self, node):\n creg = node.children[0].name\n cval = node.children[1].value\n self.backend.set_condition(creg, cval)\n self._process_node(node.children[2])\n self.backend.drop_condition()", "def _If(self, t):\n self.fill(\"if (\")\n self.dispatch(t.test)\n self.write(\")\")\n self.enter()\n self.dispatch(t.body)\n self.leave()\n # collapse nested ifs into equivalent elifs.\n while (t.orelse and len(t.orelse) == 1 and\n isinstance(t.orelse[0], ast.If)):\n t = t.orelse[0]\n self.fill(\"else if (\")\n self.dispatch(t.test)\n self.write(\")\")\n self.enter()\n self.dispatch(t.body)\n self.leave()\n # final else\n if t.orelse:\n self.fill(\"else\")\n self.enter()\n self.dispatch(t.orelse)\n self.leave()", "def compile_if(self):\n\n\t\txml = '<ifStatement>\\n' + self.tokenizer.keyword() + self.tokenizer.symbol()\n\t\tself.outfile.write(xml)\n\n\t\tself.compile_expression()\n\n\t\txml = self.tokenizer.symbol() + self.tokenizer.symbol() + '<statements>\\n'\n\t\tself.outfile.write(xml)\n\n\t\twhile self.tokenizer.get_token() != '}':\n\t\t\tself.compile_statements()\n\n\t\tself.outfile.write('</statements>\\n' + self.tokenizer.symbol())\n\n\t\tif self.tokenizer.get_token() == 'else':\n\t\t\tself.compile_else()\n\n\t\tself.outfile.write('</ifStatement>\\n')", "def __EvaluateIf(self, countIf, line):\n countIf = countIf - 1\n i = self.__ifs[countIf]\n i.SetLinePointer(self.__linePointer)\n #s = self.ScanIfCond(self.__oc.GermanUmlautReplace(line))\n s = self.ScanIfCond(line)\n if s:\n i.Set(s[0])\n try:\n i.Eval()\n line = ''\n except:\n raise Core.Error.IfHasNoEndif(0, 'IF-EXPRESSION %i HAS HAD AN ERROR:' \\\n ' EITHER NO CORRESPONDING (endif) OR SYNTAX ERROR'\n % countIf)\n l1, l2 = i.GetNextLine(), line\n return l1, l2", "def compile_if(self):\r\n lab1 = self.class_name + \".L\" + str(self.label_index)\r\n self.label_index += 1\r\n lab2 = self.class_name + \".L\" + str(self.label_index)\r\n self.label_index += 1\r\n self.tokenizer.advance() # ignore 'if' keyword\r\n self.tokenizer.advance() # ignore '(' symbol\r\n self.compile_expression()\r\n self.code_writer.write_arithmetic(\"not\")\r\n self.tokenizer.advance() # ignore ')' symbol\r\n self.tokenizer.advance() # ignore '{'\r\n self.code_writer.write_if(lab1)\r\n self.compile_statements()\r\n self.code_writer.write_goto(lab2)\r\n self.tokenizer.advance() # ignore '}' symbol\r\n self.code_writer.write_label(lab1)\r\n if (self.tokenizer.token_type() == JackTokenizer.KEYWORD_T and\r\n self.tokenizer.key_word() == \"else\"):\r\n self.tokenizer.advance()\r\n self.tokenizer.advance() # ignore '{' symbol\r\n self.compile_statements()\r\n self.tokenizer.advance() # ignore '}' symbol\r\n self.code_writer.write_label(lab2)", "def test_if_elseif_else_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif blue}\\nfoo\\n{else}bar{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif blue %}\\nfoo\\n{% else %}bar{% endif %}\"", "def get_if_condition(self, file, i):\n\n # Check if 'if function' is to run main function of program\n if re.match(\"if __name__ == [\\\"']__main__[\\\"']:\", file[i]) and \\\n re.match(r\"\\s*main\\(\\)\", file[i + 1]):\n\n # If yes, return None\n return \"omit\", 2, \n\n # Run super definition\n line = super().get_if_condition(file, i)\n\n # Strip ending colon\n line = line.split(\":\", 1)\n line, multi_statement = line[0], line[1]\n\n # Set if keyword for back translation\n ln_split = line.split(\" \")\n if ln_split[0] not in [\"elif\", \"else\"]:\n if_kw = \"if\"\n else:\n if_kw, line = ln_split[0], \" \".join(ln_split[1:]).strip()\n\n # Replace 'elif' with standard\n if if_kw == \"elif\":\n if_kw = \"else if\"\n\n # Replace logical operators\n line = self.replace_logical_ops(line, direction=\"to\")\n\n # Create start and end for while call\n start = []\n end = []\n\n # Check if multiple statements are declared in one line\n if multi_statement.strip():\n start += multi_statement.split(\";\")\n\n # Return if condition\n return line, if_kw, start, end", "def test_28_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then return; else return 0; end\n\t\tend\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(None)\"\n\t\tself.assertTrue(TestChecker.test(input,expect,428))", "def __parse_conditional(self, buffer):\n\t\tret = []\n\t\t\n\t\twhile True:\n\t\t\tcondition = Condition(self.__read_until(buffer, \"[\"))\n\t\t\tbuffer.seek(-1, SEEK_CUR)\n\t\t\tvalue = self.__read_block(buffer, startchr=\"[\", endchr=\"]\")\n\t\t\tvalue = SpellString(value).format(self.obj, proxy=self.proxy)\n\t\t\tret.append((condition, value))\n\t\t\tif condition.is_else():\n\t\t\t\tbreak\n\t\t\n\t\treturn ret", "def eval_if_else(item, motif_node_dict):\n # evaluate the `if` branch first\n true_branch = item.iftrue\n if type(true_branch).__name__ == 'FuncCall':\n motif_node, left = eval_function_call(true_branch, motif_node_dict) \n elif type(true_branch).__name__ == 'Assignment':\n left = eval_assignment(true_branch, motif_node_dict)\n elif type(true_branch).__name__ == 'Decl':\n left = eval_declaration(true_branch, motif_node_dict)\n elif type(true_branch).__name__ == 'Return':\n left = eval_return(true_branch, motif_node_dict)\n elif type(true_branch).__name__ == 'Compound':\n left = eval_function_body(true_branch, motif_node_dict)\n else:\n left = None\n # evaluate the `else` branch if it exists\n false_branch = item.iffalse\n if type(false_branch).__name__ == 'FuncCall':\n motif_node, right = eval_function_call(false_branch, motif_node_dict)\n elif type(false_branch).__name__ == 'Assignment':\n right = eval_assignment(false_branch, motif_node_dict)\n elif type(false_branch).__name__ == 'Decl':\n right = eval_declaration(false_branch, motif_node_dict)\n elif type(false_branch).__name__ == 'Return':\n right = eval_return(false_branch, motif_node_dict)\n elif type(false_branch).__name__ == 'Compound':\n right = eval_function_body(false_branch, motif_node_dict)\n elif type(false_branch).__name__ == 'If': # else if case\n right = eval_if_else(false_branch, motif_node_dict)\n else:\n right = None\n\n if left or right:\n # only under certain circumstances do we actually create alternation node\n if eval_if_condition(item.cond):\n return provenance.create_alternation_node(left, right)\n else:\n # if only one branch is not None, we need not create a group node\n if not left:\n return right\n if not right:\n return left\n return provenance.create_group_node(left, right)\n else:\n return None", "def test_if_elseif_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif blue}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif blue %}\\nfoo{% endif %}\"", "def fn_if(self, value):\n\n condition_name, true_value, false_value = value\n if self.parser.conditions.evaluate(condition_name):\n return true_value\n else:\n return false_value", "def test_if_elseif_and_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif awesome.sauce[1] and blue and 'hello'}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif awesome.sauce[1] and blue and 'hello' %}\\nfoo{% endif %}\"", "def check_if_statement(self, line):\n line = re.sub(\"^if *\", \"\", line)\n if '(' not in line or ')' not in line:\n self.print_error(\"Syntax error: If statements take the syntax if (condition) { ... }\",\n errorFunc=SyntaxError)\n\n\n # remove the brackets\n statement, _ = gen_parse.get_str_between_delims(line, \"(\", \")\")\n\n\n # Check all variables have been declared\n any_vars = [i.strip('$') for i in re.findall(VAR_REGEX, statement)]\n for var_name in any_vars:\n if var_name not in self.variables:\n self.print_error(f\"Unknown variable: {var_name}\")", "def if_(self):\n initial_pos = self.pos\n try:\n self.match_value(Identifier, \"if\")\n expr = self.tokens[self.pos:]\n self.pos = len(self.tokens)\n\n return IfNode(expr)\n except ParseError:\n self.pos = initial_pos\n raise ParseError(\"Invalid if directive.\")", "def postparsing_precmd(self, statement):\n stop = False\n return stop, statement", "def postparsing_precmd(self, statement):\n stop = False\n return stop, statement" ]
[ "0.688943", "0.64483243", "0.6328138", "0.63113165", "0.62778693", "0.59737927", "0.58829176", "0.58608335", "0.5852734", "0.5815672", "0.5770163", "0.57656604", "0.57615894", "0.57266235", "0.5684156", "0.56465256", "0.5621939", "0.5607341", "0.55948055", "0.55742", "0.55658996", "0.55628765", "0.550219", "0.547989", "0.54629874", "0.5458752", "0.54465246", "0.54266226", "0.5414987", "0.5414987" ]
0.7190795
0
Parse the END statement.
def parse_end(self, words): end_obj = End() if words: raise StatementParseError( 'The END statement should have no extra words: {0}.'.format( words)) return end_obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def end():\n return EndBlock()", "def parse_footer(self): # -> tuple[list[Unknown], Literal['']]:\n ...", "def parse(self, tokens):\n self.tokens = tokens\n self.tokens.append(END())\n t = self.e()\n self.expect(END)\n return t", "def endComment():\r\n\tglobal sEType, sEVar, sEData, iIndent\r\n\tsEType = BRIEF\r\n\tsEVar = None\r\n\tsEData = \"\"\r\n\tiIndent = -1", "def fix_end(self, node):\n if node.header.tokens[0].type == Token.SEPARATOR:\n indent = node.header.tokens[0]\n else:\n indent = Token(Token.SEPARATOR, self.formatting_config.separator)\n node.end = End([indent, Token(Token.END, \"END\"), Token(Token.EOL)])", "def handle_eof_in_block(self):\n self.handle_error(\"hit EOF, expected close tag\")", "def end(p):\n # Input file\n f = '/'.join([p, 'collector.stats'])\n check_path(f)\n\n # Open file with universal newline support\n with open(f, 'rU') as fh:\n for line in fh.readlines():\n if 'Script ended' in line:\n end = line.split(':', 1)[1].strip()\n break\n\n return end", "def parse_footer(self):\n lines=self.lines\n bodyfinish=re.compile(r\"</body>\", re.IGNORECASE).search(lines).span()[0]\n self.footer=lines[bodyfinish:]", "def get_end_brace(self):\n # Find the code to run\n\n brack_num, found_first = 0, False\n for iline, line in enumerate(self.file_ltxt[self.line_num:]):\n if '{' in line: brack_num += 1\n if '}' in line: brack_num -= 1\n\n if not found_first:\n if brack_num > 0: found_first = True\n else: continue\n\n if brack_num == 0: break\n\n else:\n self.print_error(\"Can't find the closing brace\")\n\n end_line = self.line_num + iline\n return end_line", "def at_end():\n def run(chunk, last):\n if chunk:\n return ParserResult.from_done(False, chunk, last)\n elif last:\n return ParserResult.from_done(True, chunk, last)\n else:\n return ParserResult.from_partial(Parser(run))\n return Parser(run)", "def end_of_input():\n return at_end.bind(lambda end:\n Parser(lambda chunk, last: ParserResult.from_error(\"Not end of input\"))\n if not end else Parser.unit(None))", "def parse_footer(self): # -> tuple[list[Unknown], str]:\n ...", "def parse_footer(self): # -> tuple[list[Unknown], str]:\n ...", "def finish_parse(self) -> None:\n pass", "def isEnd(self):\n return _libsbml.XMLToken_isEnd(self)", "def handle_endtag(self, tag):\n if verbose(): print(\"TIParser.handle_endtag(self, %s)\" % (tag))\n if tag == 'head':\n self.head = 'closed'\n if tag == 'body':\n self.body = 'closed'\n (line, offset) = self.getpos()\n etag = self.text[line-1][offset:]\n if tag not in self.nostack:\n pop = self.stack.pop()\n if tag != pop:\n self.errmsg(\"</%s> does not match <%s>\" % (tag, pop))", "def _ParseRecordEnd(self, parser_mediator, structure):\n time_elements_structure = self._GetValueFromStructure(\n structure, 'date_time')\n\n self._event_data.end_time = self._ParseTimeElements(time_elements_structure)\n\n parser_mediator.ProduceEventData(self._event_data)\n\n self._ResetState()", "def end(self):\n return self.__end_line", "def _is_at_end(self):\n return self._peek().token_type == scanner.TokenType.EOF", "def RespEnd(builder):\n return End(builder)", "def statements(self):\n\n while self.token.value not in ('EOF', 'else', 'end'):\n\n with self.resync('\\n', consume=True):\n self.statement()\n\n if not self.match(Tokens.SYMBOL, \";\"):\n self.error(\"expected ';' after statement \", token=self.prev_token, after_token=True)\n\n # consume the 'end' token if there is one\n self.match(Tokens.KEYWORD, 'end')", "def test_sv_end_svend():\n # Example:\n # 2 321682 . T <DEL> 6 PASS SVTYPE=DEL;END=321887;SVLEN=-205;CIPOS=-56,20;CIEND=-10,62 GT:GQ 0/1:12\n end = sv_end(pos=321682, alt=\"<DEL>\", svend=321887, svlen=-205)\n assert end == 321886", "def do_end(end):\n if end:\n do_action(end)", "def consume_endmarker(self) -> None:\n line = self.fetch(1, allow_endmarker=True)\n if self.pattern.match(line):\n self.step(1)", "def EquipmentStatExcelEnd(builder):\n return End(builder)", "def GachaCraftNodeExcelEnd(builder):\n return End(builder)", "def end (self):\n return self._end if self._end != self.inf else self.e", "def end(text=None):\n global _current_line\n if _current_line is not None:\n _current_line.end(text)\n _current_line = None", "def error_till_line_end(self, start, text):\n end = start\n try:\n while text[end] != '\\n': # there's whitespace in rules\n end += 1\n except IndexError:\n end = len(text)\n if end != start:\n self.cur.append((start, Error, text[start:end]))\n end = self.whitespace(end, text)\n return end", "def end(self):\n return self._get('end')" ]
[ "0.6380834", "0.62347376", "0.6156883", "0.60980904", "0.60832125", "0.60414135", "0.6007475", "0.59288806", "0.59009564", "0.58169407", "0.57706946", "0.56779563", "0.56779563", "0.56710136", "0.56252414", "0.5613877", "0.55523235", "0.5547321", "0.55277026", "0.55220705", "0.55143076", "0.5431874", "0.54078", "0.5385679", "0.53370357", "0.5312417", "0.53053665", "0.52992594", "0.5289059", "0.5280394" ]
0.7269551
0
Parse a set of statement words. The label line number has already been removed and the first word is the statement keyword. The rest of the words are the statement arguments.
def parse_statement(self, words): keyword = words[0] rest = words[1:] if keyword == 'PRINT': obj = self.parse_print(rest) elif keyword == 'LET': obj = self.parse_let(rest) elif keyword == 'GOTO': obj = self.parse_goto(rest) elif keyword == 'FOR': obj = self.parse_for(rest) elif keyword == 'NEXT': obj = self.parse_next(rest) elif keyword == 'IF': obj = self.parse_ifthen(rest) elif keyword == 'END': obj = self.parse_end(rest) elif keyword == 'REM': obj = self.parse_rem(rest) else: raise StatementParseInvalidKeyword( 'Invalid keyword: {0}.'.format(keyword)) return obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parseStmt(line):\n print(\"Statement\")\n index=0\n if line[0] == 's':\n print(\"Set\")\n index += 4\n rest = line[index:]\n cmds = str.split(rest, ',')\n parseSet(cmds)\n elif line[0] == 'h':\n exit()\n elif line[0] == 'j':\n index += 5\n if line[index] == ' ':\n print(\"Jumpt\")\n index += 1\n rest = line[index:]\n cmds = str.split(rest, ',')\n parseJumpt(cmds)\n else:\n print(\"Jump\")\n rest = line[index:]\n cmds = str.split(rest, ',')\n parseJump(cmds)\n else:\n print(\"Invalid Operation\")", "def wordize(lines):\n parser = Parser()\n tokenizer = Tokenizer()\n word_ctr = WordCounter()\n words = []\n for l in lines :\n if (l.rstrip()) :\n statement = parser.parseSentence(l, int(word_ctr))\n token_lists = tokenizer.tokenizeStatement(statement, int(word_ctr))\n for l in token_lists :\n if len(l) > 0 :\n words.append(l)\n word_ctr += 1\n return words", "def process_statement (lx,wlist,fb):\n # Grammar for the statement language is:\n # S -> P is AR Ns | P is A | P Is | P Ts P\n # AR -> a | an\n # We parse this in an ad hoc way.\n msg = add_proper_name (wlist[0],lx)\n if (msg == ''):\n if (wlist[1] == 'is'):\n if (wlist[2] in ['a','an']):\n lx.add (wlist[3],'N')\n fb.addUnary ('N_'+wlist[3],wlist[0])\n else:\n lx.add (wlist[2],'A')\n fb.addUnary ('A_'+wlist[2],wlist[0])\n else:\n stem = verb_stem(wlist[1])\n if (len(wlist) == 2):\n lx.add (stem,'I')\n fb.addUnary ('I_'+stem,wlist[0])\n else:\n msg = add_proper_name (wlist[2],lx)\n if (msg == ''):\n lx.add (stem,'T')\n fb.addBinary ('T_'+stem,wlist[0],wlist[2])\n return msg", "def process_statement (lx,wlist,fb):\n # Grammar for the statement language is:\n # S -> P is AR Ns | P is A | P Is | P Ts P\n # AR -> a | an\n # We parse this in an ad hoc way.\n msg = add_proper_name (wlist[0],lx)\n if (msg == ''):\n if (wlist[1] == 'is'):\n if (wlist[2] in ['a','an']):\n lx.add (wlist[3],'N')\n fb.addUnary ('N_'+wlist[3],wlist[0])\n else:\n lx.add (wlist[2],'A')\n fb.addUnary ('A_'+wlist[2],wlist[0])\n else:\n stem = verb_stem(wlist[1])\n if (len(wlist) == 2):\n lx.add (stem,'I')\n fb.addUnary ('I_'+stem,wlist[0])\n else:\n msg = add_proper_name (wlist[2],lx)\n if (msg == ''):\n lx.add (stem,'T')\n fb.addBinary ('T_'+stem,wlist[0],wlist[2])\n return msg", "def parse_words(source_dict):\n name, syn = None, []\n if 'synonym' in source_dict:\n syn = source_dict['synonym']\n if 'prefLabel' in source_dict:\n name = source_dict['prefLabel']\n return name, syn", "def syntax_parse(self, sent):\r\n tuples = list()\r\n for word in sent:\r\n if word.head is word:\r\n head_idx = 0\r\n else:\r\n head_idx = word.head.i + 1\r\n tuples.append([word.i + 1, # Current word index, begin with 1\r\n word.text, # Word\r\n word.pos_, # Coarse-grained tag\r\n word.head,\r\n head_idx, # Head of current Index\r\n word.dep_, # Relation\r\n ])\r\n return tuples", "def completedefault(self, text, line, begidx, *ignored):\r\n stdline = line[:begidx].rstrip().upper()\r\n if stdline.endswith(('ANALYZE', 'DESC', 'FROM', 'INTO ', 'REINDEX',\r\n 'REPLACE', 'TABLE', 'UPDATE', 'VACUUM')):\r\n return self.get_tables(text)\r\n if stdline.endswith(('PRAGMA')):\r\n return [i for i in self.PRAGMA if i.startswith(text.lower())]\r\n return [i for i in (self.SQLWORDS + self.SQLCOMMANDS)\r\n if i.startswith(text.upper())]", "def postparsing_precmd(self, statement):\n stop = False\n return stop, statement", "def postparsing_precmd(self, statement):\n stop = False\n return stop, statement", "def parseStatements(inputFile):\n lex = Lexor(inputFile)\n while lex.peek() != '':\n parseStmt(lex.next())", "def compile_statements(self):\r\n tok_type = self.tokenizer.token_type()\r\n while tok_type == JackTokenizer.KEYWORD_T:\r\n key = self.tokenizer.key_word()\r\n if key == \"let\":\r\n self.compile_let()\r\n elif key == \"do\":\r\n self.compile_do()\r\n elif key == \"while\":\r\n self.compile_while()\r\n elif key == \"return\":\r\n self.compile_return()\r\n else:\r\n self.compile_if()\r\n tok_type = self.tokenizer.token_type()\r\n continue\r\n self.tokenizer.advance() # ignore ';' symbol\r\n tok_type = self.tokenizer.token_type()", "def stmtList( ):\n\n\ttok = tokens.peek( )\n\tif debug: print( \"stmtList: \", tok )\n\tstat = statement( )\n\treturn stat", "def parse_cmdline(argv):\n if argv is None:\n argv = sys.argv[1:]\n\n # initialize the parser object:\n parser = argparse.ArgumentParser(description='Adds a word to a Hunspell-type dictionary file.')\n\n parser.add_argument(\"-s\", \"--sfx\", help=\"Suffix to be added after word (and after a '/'. For example, 'SM' will \"\n \"allow the word to be made plural and possessive. See hunspell \"\n \"documentation for more documentation on codes.\", default='')\n\n parser.add_argument(\"-d\", \"--dict_loc\", help=\"Location of the dictionary file to be modified. \"\n \"The default is: '{}'\".format(DEF_DICT), default=DEF_DICT)\n\n parser.add_argument(\"new_word\", help=\"The word to add to the dictionary\", type=str)\n\n args = None\n try:\n args = parser.parse_args(argv)\n except (InvalidDataError, IOError, DuplicateOptionError, SystemExit) as e:\n if hasattr(e, 'code') and e.code == 0:\n return args, GOOD_RET\n warning(e)\n parser.print_help()\n return args, INPUT_ERROR\n\n return args, GOOD_RET", "def parse(args, query):\n\n global query_type\n\n # Deal first with requests for definition or pronunciation\n # 1. Make the code easier to read\n first_word = args[0]\n second_word = args[1] if len(args) > 1 else \"\"\n third_word = args[2] if len(args) > 2 else \"\"\n fourth_word = args[3] if len(args) > 3 else \"\"\n # we use the teranary operator (this if ____ else that) to avoid an IndexError\n # IndexError would be raised if we tried to access the second element (args[1])\n # in a list which contained only one item (eg args == [\"lonely\"])\n # the teranary operator (in most languages it looks like \"____ ? this : that\")\n # returns \"this\" when the if is true and \"that\" when the if is false\n # meaning, if len(args) is NOT greater than 1, second_word == \"\"\n\n # 2. Check for keywords in the list of arguments\n # Example: nostrum defined\n # Example: pronunciation of otolaryngology\n if first_word == \"define\":\n # e.g. if the first word is \"define\" we'll add the second word to the query\n query = {\"sp\": second_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # the query is a dictionary of GET parameters for the http request, eg\n # https://api.datamuse.com/words?max=1&sp=SECOND_WORD_HERE&qe=sp&md=d&ipa=1\n elif second_word == \"defined\" or second_word == \"definition\":\n query = {\"sp\": first_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # this one uses string interpolation (the f\"\" stuff)\n elif f\"{second_word} {third_word}\" == \"means what\":\n query = {\"sp\": first_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n elif f\"{second_word} {third_word} {fourth_word}\" == \"is said how\":\n query = {\"sp\": first_word, \"md\": \"r\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # this one uses regular expressions -- i.e. if the second_word is \"of\" or \"for\"\n elif first_word == \"definition\" and re.match(r'(of)|(for)',second_word):\n query = {\"sp\": third_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # the is_pronounced function returns true if first_word is a (mis)spelling of pronounced\n elif re.match(r'(of)|(for)',second_word) and is_pronounced(first_word):\n query = {\"sp\": third_word, \"md\": \"r\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # the ordering in the above list is not entirely random\n # since an if-elif-else statement won't keep evaluating after it finds a match\n # it makes sense to put the most computationally complex clauses at the end\n # >>> import timeit\n # >>> timeit.timeit('from word_helpers import is_pronounced; is_pronounced(\"pronounced\")', number=10000)\n # 0.022870146989589557\n # >>> timeit.timeit('args = [\"defined\"]; args[0] == \"defined\"', number=10000)\n # 0.002359684993280098\n # it takes 2 milliseconds to compare a string in a list 10,000 times\n # -- versus 2 centiseconds to run is_pronounced 10,000 times\n # (on my Intel Core i5 2.67GHz CPU -- obviously speed depends on the processor)\n # it's also worth noting that readability counts more than speed optimization (most of the time!)\n\n # Quick way to check if any of the above if statements matched\n if \"sp\" in query:\n # if so, we are done in this function\n if query[\"md\"] == \"r\": query_type = \"PRO\"\n if query[\"md\"] == \"d\": query_type = \"DEF\"\n return query\n\n # these will be useful later\n STOP_WORDS = (\"and\", \"meaning\", \"means\", \"max\", \"about\", \"which\", \"that\")\n\n # Parse more complicated requests for synonyms, etc\n # 0 is false in python, so this loop will run until we've removed all the args\n while len(args):\n # we must reset these vars each time the loop starts\n # in case we've deleted items from the args list\n first_word = args[0]\n second_word = args[1] if len(args) > 1 else \"\"\n third_word = args[2] if len(args) > 2 else \"\"\n # we use the teranary operator (this if ____ else that) to avoid an IndexError\n # IndexError would be raised if we tried to access the second element (args[1])\n # in a list which contained only one item (eg args == [\"lonely\"])\n # the teranary operator (in most languages it looks like \"____ ? this : that\")\n # returns \"this\" when the if is true and \"that\" when the if is false\n # meaning, if len(args) is NOT greater than 1, second_word == \"\"\n\n # Disambiguate homonym requests from spelling correction requests\n # Example: sounding like tung\n # Example: sounds like doe but spelled differently\n if re.match(r'sound((s)|(ing)) like',f\"{first_word} {second_word}\"):\n\n # again, use len(args) to avoid an IndexError\n if len(args) >= 6 and \\\n re.match(r'((but)|(except)) spelled different(ly)?',f\"{args[3]} {args[4]} {args[5]}\"):\n # but instead of teranary operator,\n # use \"short circuit logic\" -- when python sees \"if __A__ and __B__ \",\n # it knows that if A is false, the whole thing will be false\n # (you can't have \"ice cream and potatoes\" for dinner if you don't have ice cream)\n # and it won't waste time evaluating B, so re.match won't run and args[4]\n # won't be accessed and no IndexError will be raised, yay!\n # regex explained: ? means the prior thing matched zero or one times\n # different(ly)? matches \"different\" and \"differently\"\n query[\"rel_hom\"] = third_word\n # now, delete 6 items from args, starting at item 0\n del args[0:6]\n else:\n query[\"sl\"] = third_word\n del args[0:3]\n\n # Example: spelled like 'cens?r'\n elif re.match(r'spell((ed)|(ing)) like',f\"{first_word} {second_word}\"):\n # two stars (**) means \"unpack\" a dictionary\n # just like unpacking a suitcase, we've dumped the old contents of query\n # into a new dictionary (which we are saving with the same variable name!)\n query = {**query,\"sp\": third_word}\n # query[\"sp\"] = third_word also works fine\n # just showing off how to combine two dictionaries :)\n del args[0:3]\n\n # Example: rhymes with culminate\n elif len(args) > 2 and second_word == \"with\" and is_rhymes(first_word):\n query[\"rel_rhy\"] = third_word\n del args[0:3]\n\n # Example: almost rhymes with culminate\n elif len(args) > 3 and \\\n f\"{first_word} {third_word}\" == \"almost with\" and \\\n is_rhymes(second_word):\n query[\"rel_nry\"] = args[3] # fourth_word\n del args[0:4]\n\n # Example: comes after sea\n elif f\"{first_word} {second_word}\" == \"comes after\":\n query[\"lc\"] = third_word\n del args[0:3]\n elif first_word == \"follows\":\n query[\"lc\"] = second_word\n del args[0:2]\n elif f\"{first_word} {second_word}\" == \"comes before\":\n query[\"rc\"] = third_word\n del args[0:3]\n elif first_word == \"preceeds\":\n query[\"rc\"] = second_word\n del args[0:2]\n\n # Example: describes paint\n elif first_word == \"describes\":\n query[\"rel_jjb\"] = second_word\n del args[0:2]\n\n # Example: associated with feet\n elif f\"{first_word} {second_word}\" == \"associated with\" or \\\n f\"{first_word} {second_word}\" == \"triggered by\":\n query[\"rel_trg\"] = third_word\n del args[0:3]\n\n # Example: meaning feeling tired\n elif first_word in [\"means\",\"meaning\",\"like\"]:\n # get rid of first_word\n del args[0]\n # now short circuit logic again, plus using the tuple from ealier\n # b/c if we have \"meaning deer and sounds like roe\" we don't want\n # query[\"ml\"] == \"deer and sounds like roe\" -- it should be just \"deer\"\n while len(args) and args[0] not in STOP_WORDS:\n # teranary operator prevents KeyError if \"ml\" not already in query dictionary\n query[\"ml\"] = f\"{query['ml']} {args[0]}\" if \"ml\" in query else args[0]\n del args[0]\n # an example with the previous code to make things clearer\n # say args == [\"means\", \"egg\", \"beater\", \"and\", \"max\", \"35\"]\n # first_word IS in [\"means\",\"meaning\",\"like\"]\n # del first_word, args is now [\"egg\", \"beater\", \"and\", \"max\", \"35\"]\n # len(args) == 5, args[0] is NOT in STOP_WORDS\n # \"ml\" is NOT in query, so teranary returns args[0] (\"egg\")\n # args[0] is copied to query[\"ml\"] (query is now {ml: \"egg\"})\n # del args[0], args is now [\"beater\", \"and\", \"max\", \"35\"]\n # return to top of while loop, len(args) == 4, args[0] is NOT in STOP_WORDS\n # \"ml\" IS in query, so teranary returns f\"{query['ml']} {args[0]}\" (\"egg beater\") \n # f\"{query['ml']} {args[0]}\" is copied to query[\"ml\"]\n # (query is now {ml: \"egg beater\"})\n # del args[0], args is now [\"and\", \"max\", \"35\"]\n # return to top of while loop, len(args) == 3,\n # args[0] IS in STOP_WORDS (args[0] == \"and\")\n # DO NOT enter the while loop, continue past this code block\n\n # Discover the topic of our query\n elif first_word == \"about\":\n del args[0]\n count = 0\n # Datamuse allows a max of five topic words\n while len(args) and args[0] not in STOP_WORDS and count <= 5:\n query[\"topics\"] = f\"{query['topics']} {args[0]}\" if \"topics\" in query else args[0]\n del args[0]\n # count += 1 is the same as count = count + 1\n count += 1\n\n # How many results to return (max 1000)\n elif first_word in [\"max\", \"maximum\", \"only\"]:\n user_max = convert_num(second_word)\n if user_max and int(user_max) <= 1000:\n query[\"max\"] = user_max\n del args[0:2]\n\n # Remove filler words if they weren't parsed out above\n elif first_word in [\"that\",\"which\",\"and\",\"like\",\"is\"]:\n del args[0]\n\n # Add anything not otherwise parsable to the ml parameter\n else:\n query[\"ml\"] = f\"{query['ml']} {first_word}\" if \"ml\" in query else first_word\n del args[0]\n\n # this is the bottom of that massive while loop\n # if args is not empty by now, we'll start over from the top ^\n\n return query\n # and this is the end of the \"def parse(args, query)\" function\n # whew!", "def parse_goto(self, words):\n\n goto_obj = Goto()\n input_str = ' '.join(words)\n\n obj = self.prim_parser.parse_arith_expr(input_str)\n if obj:\n goto_obj.label = obj\n else:\n obj = self.prim_parser.parse_num(input_str)\n if obj:\n goto_obj.label = obj\n else:\n raise StatementParseError(\n 'No valid print args: {0}.'.format(words))\n\n return goto_obj", "def visit_stmt(\n self, node: Node, keywords: Set[str], parens: Set[str]\n ) -> Iterator[Line]:\n normalize_invisible_parens(\n node, parens_after=parens, mode=self.mode, features=self.features\n )\n for child in node.children:\n if is_name_token(child) and child.value in keywords:\n yield from self.line()\n\n yield from self.visit(child)", "def parse_lines(lines, packages):\n for line in lines:\n x = line.split(' ')\n cmd = x[0].upper()\n #LOG.debug(cmd)\n if 'LIST' in cmd:\n getattr(commands, cmd)(p)\n else:\n getattr(commands, cmd)(line, p)", "def doctest_DKBCCCsvStatementParser():", "def populate_keywords(kwds, pkg_id):\n if not kwds:\n return\n for word in kwds:\n # @todo(Check data and use the special character-list\n # variable in the constants' file.)\n word = word.strip(\".:;=-,\\\"'\\n $_%{}()[]^*?& +#`\").lower()\n if len(word) <= 1 or (word in constants.STOP_WORDS) or \\\n has_special_chars(word):\n continue\n insert_keyword(word, pkg_id)", "def parse(args: list, keyword_set: set) -> dict:\n parsed_dict = {'': []}\n while args:\n keyword = get_keyword(arg=args[0], keyword_set=keyword_set)\n\n if keyword is not None:\n args.pop(0)\n keyword_name = keyword.keyword_name\n\n if keyword_name in parsed_dict:\n raise necrobot.exception.DoubledArgException(keyword=keyword.keyword)\n\n if keyword.param_for is not None:\n parsed_dict[keyword_name] = [keyword.keyword]\n else:\n parsed_dict[keyword_name] = []\n num_args_pulled = 0\n while num_args_pulled < keyword.num_args:\n if not args:\n raise necrobot.exception.NumParametersException(\n keyword=keyword,\n num_expected=keyword.num_args,\n num_given=num_args_pulled\n )\n else:\n num_args_pulled += 1\n parsed_dict[keyword_name].append(args[0])\n args.pop(0)\n else:\n parsed_dict[''].append(args[0])\n args.pop(0)\n\n return parsed_dict", "def handle_stmt(self, stmt, p_elem, pset={}):\n if self.debug > 0:\n sys.stderr.write(\"Handling '%s %s'\\n\" %\n (util.keyword_to_str(stmt.raw_keyword), stmt.arg))\n try:\n method = self.stmt_handler[stmt.keyword]\n except KeyError:\n if isinstance(stmt.keyword, tuple): # extension\n self.handle_extension(stmt, p_elem)\n return\n else:\n raise error.EmitError(\n \"Unknown keyword %s (this should not happen)\\n\"\n % stmt.keyword)\n\n method(stmt, p_elem, pset)", "def scan_for_statements(source):\n line_terminators = \"\\n\\r\" + ATASCII_LINEFEED\n statement_terminators = \":'\" + line_terminators\n statement_list = []\n accumulator = []\n state = ScanState.NEUTRAL\n last_space_position = None\n match_tree = build_match_tree(ABBREVIATIONS)\n source += \":\" # Colon is a sentinel needed to resolve final statement\n for char in source:\n if state == ScanState.NEUTRAL:\n if char == '\"':\n state = ScanState.STRING\n accumulator.append('\"')\n elif char in \"'.\":\n state = ScanState.COMMENT\n elif char not in \" \" + statement_terminators:\n state = ScanState.CODE\n accumulator.append(char)\n elif state == ScanState.CODE:\n if char in statement_terminators:\n statement = abbreviate(match_tree, \"\".join(accumulator))\n statement_list.append(statement)\n accumulator = []\n if char == \"'\":\n state = ScanState.COMMENT\n else:\n state = ScanState.NEUTRAL\n elif char != \" \":\n accumulator.append(char)\n if char == '\"':\n state = ScanState.STRING\n else:\n tail_window = (\"\".join(accumulator[-4:])).upper()\n if re.match(\"[A-Z]AND\", tail_window) and len(accumulator) - last_space_position == 3:\n accumulator.insert(last_space_position, \" \")\n elif re.match(\".[A-Z]OR\", tail_window) and len(accumulator) - last_space_position == 2:\n accumulator.insert(last_space_position, \" \")\n else:\n last_space_position = len(accumulator)\n elif state == ScanState.STRING:\n if char == \"$\":\n state = ScanState.HEX\n hex_accumulator = []\n else:\n accumulator.append(char)\n if char == '\"':\n state = ScanState.CODE\n elif state == ScanState.HEX:\n hex_accumulator.append(char)\n if re.match(\"[0-9a-fA-F]\", char):\n if len(hex_accumulator) == 2:\n escape_value = int(\"\".join(hex_accumulator), 16)\n accumulator.append(chr(escape_value))\n state = ScanState.STRING\n else:\n accumulator.append(\"$\")\n accumulator.extend(hex_accumulator)\n state = ScanState.STRING\n else: # state is ScanState.COMMENT (by elimination of all others)\n if char in line_terminators:\n state = ScanState.NEUTRAL\n return statement_list", "def compile_statements(self):\r\n while self.__tokenizer.token_type() == TYPES_DIC[\"KEYWORD\"]:\r\n if self.__tokenizer.keyword() == TYPES_DIC[\"LET\"]:\r\n self.compile_let()\r\n elif self.__tokenizer.keyword() == TYPES_DIC[\"DO\"]:\r\n self.compile_do()\r\n elif self.__tokenizer.keyword() == TYPES_DIC[\"WHILE\"]:\r\n self.compile_while()\r\n elif self.__tokenizer.keyword() == TYPES_DIC[\"RETURN\"]:\r\n self.compile_return()\r\n elif self.__tokenizer.keyword() == TYPES_DIC[\"IF\"]:\r\n self.compile_if()", "def statement( ): # parse rountin for statement that makes sure the token is one of the following, eventually there will be an error caught\n\n\ttok = tokens.peek( )\n\tif debug: print( \"statement: \", tok )\n\tif tok == \"if\":\n\t\tstat = parseIfStatement( )\n\t\treturn stat\n\telif tok == \"while\":\n\t\tstat = parseWhileStatement( )\n\t\treturn stat\n\telse: \n\t\tstat = parseAssign( )\n\t\treturn stat", "def parser_words(self):\n words = self.query_no_accent.split()\n query_words = []\n for word in words:\n if (word not in STOP_WORDS and word not in QUESTION_WORDS):\n query_words.append(word)\n query_words = ' '.join(query_words)\n return query_words", "def parseline(self, line):\n line = line.strip()\n\n if not line:\n # Deal with empty line or all whitespace line\n return None, None, line\n\n # Expand command shortcuts to the full command name\n for (shortcut, expansion) in self.shortcuts:\n if line.startswith(shortcut):\n line = line.replace(shortcut, expansion + ' ', 1)\n break\n\n i, n = 0, len(line)\n while i < n and line[i] in self.identchars:\n i += 1\n command, arg = line[:i], line[i:].strip()\n return command, arg, line", "def _parse_mw(self, line):\n # Parse the line\n # pylint: disable=W0612\n if self.lang == ENGLISH:\n # From the README:\n # The emw.cd file contains the following fields:\n # 1. IdNum\n # 2. Word\n # 3. Cob\n # 4. IdNumLemma\n # 5. FlectType\n # 6. TransInfl\n word_id, word, frequency, lemma, features, analysis = line.split('\\\\')\n elif self.lang == DUTCH:\n # From the README:\n # The dmw.cd file contains the following fields:\n # 1. Idnum\n # 2. Word\n # 3. Inl\n # 4. IdNumLemma\n # 5. FlectType\n word_id, word, frequency, lemma, features = line.split('\\\\')\n analysis = None\n\n return (word, int(frequency), lemma, features, analysis)", "def __line_parse(index: int, line: list, dictionary: dict, word_list: list):\n\n if index + 2 >= len(line):\n return\n word_1 = line[index + 2]\n word_2 = line[index + 1]\n word_3 = line[index]\n if word_1 == \"\" or word_2 == \"\" or word_3 == \"\":\n return\n\n if word_1 not in dictionary:\n dictionary[word_1] = {\n str(word_1 + \"_1\"): {\n\n },\n str(word_1 + \"_2\"): {\n\n },\n str(word_1 + \"_3\"): {\n\n }\n }\n if word_2 not in dictionary:\n dictionary[word_2] = {\n str(word_2 + \"_1\"): {\n\n },\n str(word_2 + \"_2\"): {\n\n },\n str(word_2 + \"_3\"): {\n\n }\n }\n if word_3 not in dictionary:\n dictionary[word_3] = {\n str(word_3 + \"_1\"): {\n\n },\n str(word_3 + \"_2\"): {\n\n },\n str(word_3 + \"_3\"): {\n\n }\n }\n if word_1 not in word_list:\n word_list.append(word_1)\n if word_2 not in word_list:\n word_list.append(word_2)\n if word_3 not in word_list:\n word_list.append(word_3)\n \"\"\" word_3 word_2 word_1\"\"\"\n if word_2 not in dictionary[word_1][str(word_1 + \"_1\")]:\n dictionary[word_1][str(word_1 + \"_1\")][word_2] = 1\n else:\n dictionary[word_1][str(word_1 + \"_1\")][word_2] = dictionary[word_1][str(word_1 + \"_1\")][word_2] + 1\n if word_3 not in dictionary[word_1][str(word_1 + \"_2\")]:\n dictionary[word_1][str(word_1 + \"_2\")][word_3] = 1\n else:\n dictionary[word_1][str(word_1 + \"_2\")][word_3] = dictionary[word_1][str(word_1 + \"_2\")][word_3] + 1\n if word_3 not in dictionary[word_2][str(word_2 + \"_1\")]:\n dictionary[word_2][str(word_2 + \"_1\")][word_3] = 1\n else:\n dictionary[word_2][str(word_2 + \"_1\")][word_3] = dictionary[word_2][str(word_2 + \"_1\")][word_3] + 1\n if index + 3 >= len(line) or line[index + 3] == \"\":\n return\n word_0 = line[index + 3]\n if word_0 not in dictionary:\n dictionary[word_0] = {\n str(word_0 + \"_1\"): {\n\n },\n str(word_0 + \"_2\"): {\n\n },\n str(word_0 + \"_3\"): {\n\n }\n }\n\n if word_0 not in word_list:\n word_list.append(word_0)\n\n if word_3 not in dictionary[word_0][str(word_0 + \"_3\")]:\n dictionary[word_0][str(word_0 + \"_3\")][word_3] = 1\n else:\n dictionary[word_0][str(word_0 + \"_3\")][word_3] = dictionary[word_0][str(word_0 + \"_3\")][word_3] + 1", "def _parse_move_statement(dlstr):\n\n try:\n tokens = dlstr.lower().split()\n if tokens[0] != \"move\":\n raise ValueError(\"Expected 'move' statement\")\n\n mtype, nmove, pfreq, rmin = \\\n tokens[1], int(tokens[2]), int(tokens[3]), float(tokens[4])\n except IndexError:\n raise ValueError(\"Badly formed 'move' statement?\")\n\n return mtype, nmove, pfreq, rmin", "def process(self, element, **kwargs):\n regex = r'[a-zA-Z]+' # r'\\w+' to include numbers as well\n line_words = re.findall(regex, element.lower()) # clean punctuation: get a list of (re)\n words_to_tuples = [(line_words[i], line_words[i+1]) for i in range(len(line_words)-1)]\n return words_to_tuples" ]
[ "0.573954", "0.5613782", "0.5517276", "0.5517276", "0.52339894", "0.5123275", "0.49992552", "0.49817768", "0.49817768", "0.4913706", "0.49128622", "0.48561028", "0.4813784", "0.47896823", "0.47804374", "0.4778509", "0.47712478", "0.47417298", "0.4740112", "0.47180837", "0.47102773", "0.4696946", "0.45837364", "0.4583648", "0.45624596", "0.45446995", "0.45395947", "0.44916245", "0.4485415", "0.44827673" ]
0.6375764
0
Trap JSON decoding failures and provide more detailed errors Remove ')]}' XSS prefix from data if it is present, then decode it as JSON and return the results.
def decode_json(raw): # Gerrit's REST API prepends a JSON-breaker to avoid XSS vulnerabilities if raw.text.startswith(")]}'"): trimmed = raw.text[4:] else: trimmed = raw.text # Try to decode and bail with much detail if it fails try: decoded = json.loads(trimmed) except Exception: LOG.error( '\nrequest returned %s error to query:\n\n %s\n' '\nwith detail:\n\n %s\n', raw, raw.url, trimmed) raise return decoded
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decode_json(raw):\n\n # Gerrit's REST API prepends a JSON-breaker to avoid XSS vulnerabilities\n if raw.text.startswith(\")]}'\"):\n trimmed = raw.text[4:]\n else:\n trimmed = raw.text\n\n # Try to decode and bail with much detail if it fails\n try:\n decoded = json.loads(trimmed)\n except Exception:\n print('\\nrequest returned %s error to query:\\n\\n %s\\n'\n '\\nwith detail:\\n\\n %s\\n' % (raw, raw.url, trimmed),\n file=sys.stderr)\n raise\n return decoded", "def process_json(self, data):\r\n rsp = json.loads(data)\r\n\r\n if rsp['stat'] == 'fail':\r\n raise APIError, rsp\r\n\r\n return rsp", "def decode_message(json_data):\n\t# Remove the quotations at the start and end of the string\n\tjson_data = re.sub(r'\"(.*)\"', r'\\1', json_data)\n\t# Unescape all other quotation marks\n\tjson_data = re.sub(r'\\\\(.)', r'\\1', json_data)\n\tclean_data = json.loads(json_data)\n\n\treturn clean_data", "async def json(self):\n try:\n return await super().json()\n except JSONDecodeError:\n from .errors import HTTPError # prevent circular imports\n\n raise HTTPError(400, detail=\"JSON is malformed.\")", "def _reject_invalid_json(val: Any) -> None:\n raise ValueError(f\"Invalid JSON value: {val!r}\")", "def test_cli_format_error_handler_broken_json():\n resp = MagicMock()\n resp.json.side_effect = ValueError(\"\")\n resp.text = \"Not JSON\"\n output = format_utils.cli_format_error_handler(resp)\n assert 'Error: Unable to decode response. Value: Not JSON' in output", "def _reject_invalid_json(val):\n raise ValueError(f\"Invalid JSON value: {val!r}\")", "def _handle_response(self, resp):\n\n try:\n resp.raise_for_status()\n results = json.loads(resp.text)\n except requests.RequestException:\n raise Exception(resp.text)\n except JSONDecodeError:\n raise Exception(\"Error in parsing: {}\".format(resp.text))\n return results", "def sanitise_json_error(error_dict):\n ret = copy.copy(error_dict)\n chop = len(JSON_ERROR)\n ret['detail'] = ret['detail'][:chop]\n return ret", "def decode_json_arg(json_arg):\n\n try:\n return json.loads(json_arg)\n except ValueError:\n print(\"Decoding JSON argument has failed\")\n raise Exception(\"Decoding JSON argument has failed\")", "def json_decode(self, data, **kwargs):\n kwargs.pop('object_hook', None)\n json.loads(data, object_hook = self._dict_to_obj, **kwargs)", "def _decode(self, data: bytes):\n\n return json.loads(data.decode('utf-8'))", "def send_incorrect_json_bad_request():\n return make_response(jsonify({\"validation_error\": {\n \"error\": 'Syntax error',\n \"description\": 'Parsing of input JSON is unavailable'\n }}), 400)", "def _parse_json_response(self, json_response, expect_errors=False):\n if not expect_errors:\n self.assertEqual(json_response.status_int, 200)\n\n self.assertEqual(\n json_response.content_type, 'application/javascript')\n self.assertTrue(json_response.body.startswith(feconf.XSSI_PREFIX))\n\n return json.loads(json_response.body[len(feconf.XSSI_PREFIX):])", "def json_decode(data):\n if isinstance(data, bytes):\n data = data.decode(\"utf-8\")\n\n return json.loads(data)", "def test_error_no_json(self, app, data_queues, metricsmock):\n res = self._call(app, \"\\xae\", method=\"post\", status=400)\n detail = \"JSONDecodeError('Expecting value: line 1 column 1 (char 0)')\"\n self.check_response(data_queues, res, \"parse_error\", details={\"decode\": detail})\n metricsmock.assert_incr_once(\n self.metric_type + \".request\", tags=[self.metric_path, \"key:test\"]\n )", "def handle_marshmallow_validaton(err): # except ValidationError as err\n return jsonify(err.messages), 400 # bad request", "def _parse_json(req, resp):\n try:\n body = req.stream.read()\n return json.loads(body)\n except ValueError as e:\n err_msg = str(e) + ': ' + body\n resp.status = falcon.HTTP_400\n resp.body = make_error_body(err_msg)\n return", "def handle_json(self, source, data):\n method, args = json.loads(data)\n try:\n result = self.call(source, method, *args)\n except Exception as exc:\n result = str(exc)\n\n return json.dumps(result)", "def test_bad_encoding(self, app, data_queues):\n body = b'{\"comment\": \"R\\xe9sum\\xe9 from 1990\", \"items\": []}'\n assert \"Résumé\" in body.decode(\"iso8859-1\")\n with pytest.raises(UnicodeDecodeError):\n body.decode(\"utf-8\")\n headers = {\"Content-Type\": \"application/json; charset=utf-8\"}\n res = self._call(app, body=body, headers=headers, method=\"post\", status=400)\n detail = (\n \"'utf-8' codec can't decode byte 0xe9 in position 14: invalid\"\n \" continuation byte\"\n )\n self.check_response(data_queues, res, \"parse_error\", details={\"decode\": detail})", "def parse_json_or_fail(message, schema):\n try:\n body = tornado.escape.json_decode(message)\n except ValueError as e:\n raise tornado.web.HTTPError(400, reason=str(e))\n\n try:\n jsonschema.validate(body, schema)\n except jsonschema.exceptions.ValidationError as e:\n raise tornado.web.HTTPError(400, reason=e.message)\n\n return body", "def _check_json(json_data: Any, clean: bool) -> Any:\n try:\n json.loads(json_data)\n except ValueError:\n return \"unknown\" if clean else False\n return \"success\" if clean else True", "def parse_json(raw):\n return escape.recursive_unicode(escape.json_decode(raw)) if raw != None else None", "def try_json(string):\n try:\n return json.loads(string)\n except:\n return string", "def json_decode(x):\n return json.loads(x, object_hook=json_hook)", "def handle_response_json(self, http: Http, response, **kwargs) -> dict:\n try:\n data = response.json()\n except Exception as exc:\n raise JsonInvalid(msg=\"Response has invalid JSON\", response=response, exc=exc)\n return data", "def validate_json(self):\n pass", "def parse_json(data):\n return json.loads(data)", "def json_decode(text):\n return _json_decode(text)", "def test_bad_filter_json_format(admin_client, public_resource_with_metadata):\n query_filter = {'malformed': 'json'}\n djangoresponse = admin_client.get('/discoverapi/?filter={}'.format(query_filter), follow=True)\n response = json.loads(djangoresponse.content.decode(\"utf-8\"))\n assert djangoresponse.status_code == 400\n assert \"Filter JSON parsing error\" in response['message']" ]
[ "0.6794312", "0.6583596", "0.6417889", "0.6340136", "0.62537545", "0.6229566", "0.6154212", "0.6125549", "0.6063384", "0.6061887", "0.60408986", "0.6032854", "0.5998402", "0.59870005", "0.5962834", "0.5893691", "0.5891434", "0.5890469", "0.58832896", "0.5878274", "0.5867642", "0.5839571", "0.58097416", "0.57821286", "0.5759116", "0.572451", "0.5686148", "0.56784713", "0.5650124", "0.56251025" ]
0.67851794
1
Query the Gerrit REST API
def query_gerrit(offset=0): url = 'https://review.opendev.org/changes/' LOG.debug('fetching %s', url) raw = requests.get( url, params={ 'n': '100', 'start': offset, 'q': 'project:openstack/governance is:open', 'o': [ 'ALL_REVISIONS', 'REVIEWER_UPDATES', 'DETAILED_ACCOUNTS', 'CURRENT_COMMIT', 'LABELS', 'DETAILED_LABELS', 'MESSAGES', ], }, headers={'Accept': 'application/json'}, ) return decode_json(raw)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_gerrit(method, params={}, verbose=0):\n\n return decode_json(requester(\"%s/%s\" % (GERRIT_BASE, method),\n params=params,\n headers={'Accept': 'application/json'},\n verbose=verbose))", "def _query(action=None, command=None, args=None, method=\"GET\", data=None):\n subdomain = __opts__.get(\"bamboohr\", {}).get(\"subdomain\", None)\n path = \"https://api.bamboohr.com/api/gateway.php/{}/v1/\".format(subdomain)\n\n if action:\n path += action\n\n if command:\n path += \"/{}\".format(command)\n\n log.debug(\"BambooHR URL: %s\", path)\n\n if not isinstance(args, dict):\n args = {}\n\n return_content = None\n result = salt.utils.http.query(\n path,\n method,\n username=_apikey(),\n password=\"saltypork\",\n params=args,\n data=data,\n decode=False,\n text=True,\n status=True,\n opts=__opts__,\n )\n log.debug(\"BambooHR Response Status Code: %s\", result[\"status\"])\n\n return [result[\"status\"], result[\"text\"]]", "def getApi(username, password, server):\n\n if not pygerrit2Installed:\n raise RuntimeError(\n 'pygerrit2 not installed, HTTP remotes not supported. To install run \"pip3 install pygerrit2\"'\n )\n\n return GerritRestAPI(url=server, auth=HTTPBasicAuth(username, password))", "def query_repositories():\n return buildapi.query_repositories()", "def get(self):\n\n response = PluginHelper.request_get(params=self.request.arguments)\n if (\n response.status_code == 200\n and response.json()[\"status\"] == \"ok\"\n ):\n result_json = {\n \"results\": response.json()[\"results\"],\n }\n else:\n raise exc.BadRequest(\"Bad host query: {}\".format(\n self.request.arguments\n ))\n\n self.success(result_json)", "def __apiRequest(self, url, parms={}):\n authparms = self.__addAuthParms(parms);\n request = self.http.request('GET', url, fields=authparms)\n if request.status != 200:\n raise ApiCommunicationError('Failed to retrieve data from Marvel, HTTP Status {}'.format(request.status))\n else:\n return json.loads( request.data.decode('utf-8') )", "def request(host, path, api_key, url_params=None):\r\n url_params = url_params or {}\r\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\r\n headers = {\r\n 'Authorization': 'Bearer %s' % api_key,\r\n }\r\n\r\n print(u'Querying {0} ...'.format(url))\r\n \r\n response = requests.request('GET', url, headers=headers, params=url_params)\r\n\r\n return response.json()", "def do_the_query(request):\n\n # TODO Check if params are there..\n # TODO return error code if not present\n user_id = request.args.get(\"user_id\")\n repo_id = request.args.get(\"repo_id\")\n print(user_id, repo_id)\n fetch_and_dump(user_id, repo_id)\n return (str([user_id, repo_id]), 200, {\"Output\": \"Works\"})", "def query():\n args_dict = flask.request.args.to_dict()\n if \"versioned\" in args_dict.keys():\n args_dict[\"versioned\"] = args_dict[\"versioned\"].lower() in [\n \"true\",\n \"t\",\n \"yes\",\n \"y\",\n ]\n if \"exclude_deleted\" in args_dict.keys():\n args_dict[\"exclude_deleted\"] = args_dict[\"exclude_deleted\"].lower() in [\n \"true\",\n \"t\",\n \"yes\",\n \"y\",\n ]\n\n record_list = blueprint.driver.query_urls(**args_dict)\n return flask.Response(\n json.dumps(record_list, indent=2, separators=(\", \", \": \")),\n 200,\n mimetype=\"application/json\",\n )", "def _query(self, url, payload=None, action='GET'):\r\n full_url = self._base + url\r\n\r\n headers = {\r\n 'Content-Type': 'application/json'\r\n }\r\n\r\n auth = None\r\n if self._user and self._password:\r\n auth = self._user, self._password\r\n\r\n resp = requests.request(action.lower(), full_url, json=payload, headers=headers, auth=auth)\r\n\r\n if resp.status_code in (401, 403):\r\n raise Exception('Auth failed')\r\n\r\n if resp.status_code != 200:\r\n raise Exception('Wrong answer for [url=\"{}\", status=\"{}\", text=\"{}\"]'.format(\r\n full_url, resp.status_code, resp.text)\r\n )\r\n\r\n answer = resp.json()\r\n\r\n return answer", "def query_site(url, params, uid=\"\", fmt=\"json\"):\n params[\"fmt\"] = fmt\n r = requests.get(url + uid, params=params)\n print(\"requesting\"+r.url)\n\n if r.status_code == requests.codes.ok:\n return r.json()\n else:\n r.raise_for_status()", "def request(host, path, api_key, url_params=None):\n\n url_params = url_params or {}\n\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\n\n headers = {\n\n 'Authorization': 'Bearer %s' % api_key,\n\n }\n\n\n print(u'Querying {0} ...'.format(url))\n\n\n response = requests.request('GET', url, headers=headers, params=url_params)\n\n\n return response.json()", "def query(self):", "def request(query):", "def GetReviewers(host, change):\n path = '%s/reviewers' % _GetChangePath(change)\n return FetchUrlJson(host, path)", "def send_get(self, api_url, query=None):\n resp = requests.get(self.base_url + api_url, params=query)\n\n return resp", "def _query_api(\n master_url=settings.OPENSHIFT_API['NP']['OPENSHIFT_MASTER'],\n api_token=settings.OPENSHIFT_API['NP']['API_TOKEN'],\n endpoint='/oapi/v1/buildconfigs'):\n\n openshift_api_url = 'https://' + master_url\n openshift_api_get_endpoint = openshift_api_url + endpoint\n bearer_token_header = {'Authorization': 'Bearer ' + api_token }\n\n try:\n response = requests.get(openshift_api_get_endpoint,headers=bearer_token_header, timeout=2.0)\n except requests.ConnectTimeout as e:\n logger.error(e)\n return None\n except requests.ConnectionError as e:\n logger.error(e)\n return None\n\n if not response.ok:\n logger.error(response.status_code)\n return None\n else:\n return response", "def request(host, path, api_key, url_params=None):\n url_params = url_params or {}\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\n headers = {\n 'Authorization': 'Bearer %s' % api_key,\n }\n\n print(u'Querying {0} ...'.format(url))\n\n response = requests.request('GET', url, headers=headers, params=url_params)\n\n return response.json()", "def request(host, path, api_key, url_params=None):\n url_params = url_params or {}\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\n headers = {\n 'Authorization': 'Bearer %s' % api_key,\n }\n\n print(u'Querying {0} ...'.format(url))\n\n response = requests.request('GET', url, headers=headers, params=url_params)\n\n return response.json()", "def get(self, *args, **kwargs):\r\n url = '{0}/user/repositories/'.format(self.parent.get_url())\r\n return http.Request('GET', url), parsers.parse_json", "def test_api_repo_status_get(self):\n default_api = DefaultApi(api_client=self.api_client)\n params = dlrnapi_client.Params2()\n path, method = default_api.api_repo_status_get(params)\n self.assertEqual(path, '/api/repo_status')\n self.assertEqual(method, 'GET')", "def request_fields(self, fields=None):\n # The cursor only works for the 'search' endpoint, just call\n # the 'field' endpoint and return all the field types\n response = self.connection.get_request(self.uri_field)\n if response.status_code != requests.codes.ok:\n logger.warning('JIRA Cloud returned %d for %s', response.status_code, self.uri_field)\n return []\n content = json.loads(response.content)\n # Overwrite some fields\n for c in content:\n if c['name'] == 'Epic Status':\n c['schema']['type'] = 'string'\n c['choices'] = (('To Do', 'To Do'), ('In Progress', 'In Progress'), ('Done', 'Done'))\n elif c['name'] == 'Resolution':\n c['choices'] = self._get_resolutions()\n\n # The KEY field is never returned\n c = {\n \"id\": \"key\",\n \"key\": \"key\",\n \"name\": \"Key\",\n \"custom\": False,\n \"orderable\": True,\n \"navigable\": True,\n \"searchable\": True,\n \"clauseNames\": [\n \"key\",\n ],\n \"schema\": {\n \"type\": \"string\",\n }\n }\n content.append(c)\n # The parent field is never returned\n c = {\n \"id\": \"parent\",\n \"key\": \"parent\",\n \"name\": \"Parent\",\n \"custom\": True,\n \"orderable\": True,\n \"navigable\": True,\n \"searchable\": True,\n \"clauseNames\": [\n \"parent\",\n ],\n \"schema\": {\n \"type\": \"any\",\n \"custom\": \"com.django-atlassian:parent\"\n }\n }\n content.append(c)\n return content", "def query(self):\n pass", "def GET(self, path, params={}):\n request_url = 'https://{0}:{1}/rest/{2}'.format(\n self.settings.api_host,\n self.settings.api_port,\n path\n )\n\n # Make the API request\n response = requests.get(request_url,\n auth = (self.settings.api_user, self.settings.api_password),\n verify = self.settings.verify_ssl,\n headers = self.settings.headers,\n params = params\n )\n\n # Request failed\n if not int(response.status_code) == 200:\n raise Exception('Failed to GET {0}: {1}'.format(request_url, response.json()))\n return response.json()", "def GetGerritFetchUrl(host):\n return 'https://%s/' % host", "def cr_list(request, target, project, format=None):\n if request.method == 'GET':\n cr = CR.objects.filter(target=target, key__startswith=project + '-').order_by('keynum')\n serializer = CRSerializer(cr, many=True)\n return Response(serializer.data)", "def submit_query(self, username, password):\n\n #username = os.environ['NEWSREADER_USERNAME']\n #password = os.environ['NEWSREADER_PASSWORD']\n payload = {'id': self.query}\n \n endpoint_url = self.endpoint_stub_url.format(action=self.action)\n print \"\\n\\n**New CRUD query**\"\n print endpoint_url, payload\n t0 = time.time()\n try:\n response = requests.get(endpoint_url, auth=(username, password),\n params=payload)\n except Exception as e:\n print \"Query raised an exception\"\n print type(e)\n t1 = time.time()\n total = t1-t0\n raise QueryException(\"Query raised an exception: {0}\".format(type(e).__name__))\n else:\n t1 = time.time()\n total = t1-t0\n print \"Time to return from query: {0:.2f} seconds\".format(total)\n print \"Response code: {0}\".format(response.status_code)\n print \"From cache: {0}\".format(response.from_cache)\n\n #print response.content\n \n if response and (response.status_code == requests.codes.ok):\n self.json_result = {\"content\":response.content}\n self.clean_json = self.json_result\n else:\n raise QueryException(\"Response code not OK: {0}\".format(response.status_code))", "def do_GET(self):\n self.http_method = 'GET'\n self.response()", "def GetChangeReviewers(host, change):\n path = '%s/reviewers' % _GetChangePath(change)\n return FetchUrlJson(host, path)", "def QueryChanges(host, param_dict, first_param=None, limit=None, o_params=None,\n start=None):\n # Note that no attempt is made to escape special characters; YMMV.\n if not param_dict and not first_param:\n raise RuntimeError('QueryChanges requires search parameters')\n path = 'changes/?q=%s' % _QueryString(param_dict, first_param)\n if start:\n path = '%s&S=%d' % (path, start)\n if limit:\n path = '%s&n=%d' % (path, limit)\n if o_params:\n path = '%s&%s' % (path, '&'.join(['o=%s' % p for p in o_params]))\n # Don't ignore 404; a query should always return a list, even if it's empty.\n return FetchUrlJson(host, path, ignore_404=False)" ]
[ "0.77562743", "0.6120338", "0.60652655", "0.566763", "0.56369096", "0.5617672", "0.557019", "0.5536899", "0.55219513", "0.55080754", "0.5492061", "0.54717964", "0.54580337", "0.54435146", "0.54301286", "0.5413435", "0.54131085", "0.5411413", "0.5411413", "0.53971434", "0.5370158", "0.53426945", "0.53331846", "0.53300864", "0.53271323", "0.5323191", "0.5317053", "0.5302447", "0.5296802", "0.5275351" ]
0.6680208
1
Determines the satellite number for a given date.
def _get_goes_sat_num(self, date): goes_operational = { 2: TimeRange("1981-01-01", "1983-04-30"), 5: TimeRange("1983-05-02", "1984-07-31"), 6: TimeRange("1983-06-01", "1994-08-18"), 7: TimeRange("1994-01-01", "1996-08-13"), 8: TimeRange("1996-03-21", "2003-06-18"), 9: TimeRange("1997-01-01", "1998-09-08"), 10: TimeRange("1998-07-10", "2009-12-01"), 11: TimeRange("2006-06-20", "2008-02-15"), 12: TimeRange("2002-12-13", "2007-05-08"), 13: TimeRange("2006-08-01", "2006-08-01"), 14: TimeRange("2009-12-02", "2010-10-04"), 15: TimeRange("2010-09-01", parse_time("now")), } results = [] for sat_num in goes_operational: if date in goes_operational[sat_num]: # if true then the satellite with sat_num is available results.append(sat_num) if results: # Return the newest satellite return max(results) else: # if no satellites were found then raise an exception raise ValueError( "No operational GOES satellites on {}".format( date.strftime(TIME_FORMAT) ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_goes_sat_num(self, date):\n\n # GOES-17 is operational but currently does not provide Level 2 data\n # GOES-16 start date is based on the availability of regular level 1b data\n suvi_operational = {\n 16: TimeRange(\"2018-06-01\", parse_time(\"now\")),\n }\n\n results = []\n for sat_num in suvi_operational:\n if date in suvi_operational[sat_num]:\n # if true then the satellite with sat_num is available\n results.append(sat_num)\n\n if results:\n # Return the newest satellite\n return max(results)\n else:\n # if no satellites were found then raise an exception\n raise ValueError(f\"No operational SUVI instrument on {date.strftime(TIME_FORMAT)}\")", "def sunrise(cls, date):\n return (date + Clock.days_from_hours(6) + \n ((cls.UJJAIN.longitude - cls.LOCATION.longitude) / 360) -\n cls.equation_of_time(date) +\n ((1577917828/1582237828 / 360) *\n (cls.ascensional_difference(date, cls.LOCATION) +\n (1/4 * cls.solar_sidereal_difference(date)))))", "def sunset(cls, date):\n return (date + Clock.days_from_hours(18) + \n ((cls.UJJAIN.longitude - cls.LOCATION.longitude) / 360) -\n cls.equation_of_time(date) +\n (((1577917828/1582237828) / 360) *\n (- cls.ascensional_difference(date, cls.LOCATION) +\n (3/4 * cls.solar_sidereal_difference(date)))))", "def alt_sunrise(cls, date):\n rise = cls.UJJAIN.dawn(date, angle(0, 47, 0))\n return 1/24 * 1/60 * iround(rise * 24 * 60)", "def hindu_lunar_station(date):\n critical = HinduDate.sunrise(date)\n return quotient(HinduLunarDate.longitude(critical), angle(0, 800, 0)) + 1", "def sun_single_day(date):\r\n\r\n\tsun = l.sun(date=date, local=True)\r\n\tsunrise = sun['sunrise']\r\n\tsunset = sun['sunset']\r\n\tday_length = str(sunset-sunrise)\r\n\tsolar_noon = l.solar_noon(date=date, local=True)\r\n\tsolar_zenith = l.solar_elevation(solar_noon.replace(tzinfo=None))\r\n\r\n\treturn {'sunrise':sunrise, 'sunset': sunset, 'daylength': day_length, 'solar_noon': solar_noon, 'zenith': solar_zenith}", "def station(unit, date):\r\n req = 'select ParentLocId from PI_PlaceRelationship where RelationId = 4 and LocId = \"{}\" and EndDate > \"{}\"'.format(unit,d2d(date))\r\n try:\r\n station = pd.read_sql(req, engine).values[0][0]\r\n return station if unit != station else 0\r\n except:\r\n logging.warning('error in getting station ID for {} ({})'.format(name(unit), unit))\r\n return 0", "def fetch_sundata(self, date: datetime) -> Sundata:\n pass", "def date_to_draw_number(date):\n\n today = date.today()\n\n #hotspot plays only last for 180 days\n #validate entered date\n if (today - date).days > 180 or date > today:\n return 0\n\n days_between = (date - INIT_DATE).days\n\n return INIT_DRAW_NUMBER + (300 * days_between)\n\n\n # num_spots_sampled, spot_histogram, range_histogram, mod_histogram,\n # last_seen_dict, avg_draw_distance_dict, draw_distance_dict, last_n_avg_distance_dict_list, current_draw_num", "def date_info_day(date_str, infile):\n #date_str = str(sys.argv[1])\n #infile = './' + date_str + '.nc'\n\n # prepare date\n year,mon,day = date_str.split('-')\n year_num = int(float(year))\n mon_num = int(float(mon))\n day_num = int(float(day))\n\n\n datesec_calc = []\n val_pr_day = 4\n secstep = 86400/val_pr_day\n sec = [0, 1*secstep, 2*secstep, 3*secstep]\n for j in sec:\n datesec_calc.append(j)\n\n # Open a netCDF file for appending:\n ncfile = Dataset(infile,'a')\n #time_in = ncfile.variables['time'][:]\n #ncfile = Dataset('date_datesec' + date + '.nc','w')\n\n # Create the variable (4 byte integer in this case)\n # first argument is name of variable, second is datatype, third is\n # a tuple with the names of dimensions.\n date_str = ncfile.createVariable('date',dtype('int32').char,('time'))\n datesec = ncfile.createVariable('datesec',dtype('int32').char,('time'))\n\n # Write data to variable:\n date_str[:] = year_num*10000+mon_num*100+day_num\n datesec[:] = datesec_calc\n\n # Add attributes to the variables:\n date_str.long_name = 'current date (YYYYMMDD)'\n datesec.long_name = 'current seconds of current date'\n\n # close the file.\n ncfile.close()\n return", "def solar_noon(self, date=None, local=True):\n \n if self.astral is None:\n self.astral = Astral()\n\n if date is None:\n date = datetime.date.today()\n\n noon = self.astral.solar_noon_utc(date, self.longitude)\n\n if local:\n return noon.astimezone(self.tz) \n else:\n return noon", "def getSeason(date):\n\n date = validate.timestamp(date)\n day = date.dayofyear\n leap_year = int(date.is_leap_year)\n\n spring = numpy.arange(80, 172) + leap_year\n summer = numpy.arange(172, 264) + leap_year\n autumn = numpy.arange(264, 355) + leap_year\n\n if day in spring:\n season = \"spring\"\n elif day in summer:\n season = \"summer\"\n elif day in autumn:\n season = \"autumn\"\n else:\n season = \"winter\"\n\n return season", "def night_center(self, date=None):\n sunset = self.sunset(date=date)\n sunrise = self.sunrise(date=sunset)\n center = sunset + timedelta(0, (sunrise - sunset).total_seconds() / 2.0)\n center = self.date_to_local(center)\n return center", "def update_satellite_state(self, current_date):\n pass", "def from_fixed(cls, date):\n sun = cls.hindu_day_count(date) + Clock.days_from_hours(6)\n year = quotient(sun, cls.ARYA_SOLAR_YEAR)\n month = mod(quotient(sun, cls.ARYA_SOLAR_MONTH), 12) + 1\n day = ifloor(mod(sun, cls.ARYA_SOLAR_MONTH)) + 1\n return OldHinduSolarDate(year, month, day)", "def seasonal(path, date_inf=\"15-05\", date_sup=\"15-10\"):\n with open(os.path.join(path, \"info.json\"), \"r\") as f:\n info = json.load(f)\n\n date_inf = datetime.strptime(date_inf, \"%d-%m\").timetuple().tm_yday\n date_sup = datetime.strptime(date_sup, \"%d-%m\").timetuple().tm_yday\n day_of_year = timestamp_to_datetime(\n info['Sensing start']).timetuple().tm_yday\n\n return (day_of_year > date_inf) and (day_of_year < date_sup)", "def rsi(date):\n\n # print(float(r_json['Technical Analysis: RSI'][date]['RSI']))\n return float(r_json['Technical Analysis: RSI'][date]['RSI'])", "def yoga(date):\n return ifloor(mod((HinduSolarDate.longitude(date) + HinduLunarDate.longitude(date)) / angle(0, 800, 0), 27)) + 1", "def sunrise(self, date=None):\n self.site.horizon = self.horizon\n self._set_site_date(date)\n r_date = self.site.next_rising(self.sun)\n r_date = self.date_to_local(r_date.datetime())\n return r_date", "def find_index(weather_data: dict, date: datetime) -> int:\n weather_list = weather_data['list']\n for index, weather in enumerate(weather_list):\n if weather['dt_txt'] == date.strftime('%Y-%m-%d %H:%M:%S'):\n return index\n return 0", "def date_ym_value(date: dt.datetime) -> int:\n return (100 * date.year) + date.month", "def DAY(date):\n return _make_datetime(date).day", "def get_season(\n current_date: date, hemisphere: str, season_tracking_type: str\n) -> str | None:\n\n if hemisphere == \"equator\":\n return None\n\n if season_tracking_type == TYPE_ASTRONOMICAL:\n spring_start = ephem.next_equinox(str(current_date.year)).datetime()\n summer_start = ephem.next_solstice(str(current_date.year)).datetime()\n autumn_start = ephem.next_equinox(spring_start).datetime()\n winter_start = ephem.next_solstice(summer_start).datetime()\n else:\n spring_start = datetime(2017, 3, 1).replace(year=current_date.year)\n summer_start = spring_start.replace(month=6)\n autumn_start = spring_start.replace(month=9)\n winter_start = spring_start.replace(month=12)\n\n if spring_start <= current_date < summer_start:\n season = STATE_SPRING\n elif summer_start <= current_date < autumn_start:\n season = STATE_SUMMER\n elif autumn_start <= current_date < winter_start:\n season = STATE_AUTUMN\n elif winter_start <= current_date or spring_start > current_date:\n season = STATE_WINTER\n\n # If user is located in the southern hemisphere swap the season\n if hemisphere == NORTHERN:\n return season\n return HEMISPHERE_SEASON_SWAP.get(season)", "def solar_sidereal_difference(cls, date):\n return cls.daily_motion(date) * cls.rising_sign(date)", "def evening_twilight_12(self, date=None):\n self.site.horizon = self.horizon12\n self._set_site_date(date)\n r_date = self.site.next_setting(self.sun)\n r_date = self.date_to_local(r_date.datetime())\n return r_date", "def _read_sansculottide_date(match):\n day_string = match.group(1)\n d = None\n\n for n, candidate in enumerate(names.sans_culottides):\n if candidate.sanitized == day_string:\n d = n\n break\n else:\n return\n\n y = roman_to_decimal(match.group(2))\n\n return (y, 13, d)", "def get_weather_on_date(date, meteo_day, store_id):\n return meteo_day[(meteo_day['STO_EAN'] == store_id) & (meteo_day['DATE_KEY'] == date)]", "def moon_rise(self, date=None):\n self._set_site_date(date)\n moonrise = self.site.next_rising(self.moon)\n moonrise = self.date_to_local(moonrise.datetime())\n ## if moonrise < self.sunset():\n ## moonrise = None\n return moonrise", "def evening_twilight_6(self, date=None):\n self.site.horizon = self.horizon6\n self._set_site_date(date)\n r_date = self.site.next_setting(self.sun)\n r_date = self.date_to_local(r_date.datetime())\n return r_date", "def get_timestamp_from_date(date: str) -> int:\n return time.mktime(datetime.datetime.strptime(date, \"%d/%m/%y\").timetuple())" ]
[ "0.7167133", "0.6191373", "0.59824467", "0.59554714", "0.5927746", "0.58375365", "0.5829639", "0.579904", "0.5788288", "0.5535905", "0.54828674", "0.54560727", "0.5452765", "0.5443098", "0.5432317", "0.54228646", "0.5382635", "0.53717595", "0.5346957", "0.53393257", "0.5331354", "0.53184897", "0.53164935", "0.53036433", "0.5292154", "0.5279383", "0.5274468", "0.52559227", "0.5249847", "0.5193736" ]
0.7115077
1
Determines the best satellite number for a given date.
def _get_goes_sat_num(self, date): # GOES-17 is operational but currently does not provide Level 2 data # GOES-16 start date is based on the availability of regular level 1b data suvi_operational = { 16: TimeRange("2018-06-01", parse_time("now")), } results = [] for sat_num in suvi_operational: if date in suvi_operational[sat_num]: # if true then the satellite with sat_num is available results.append(sat_num) if results: # Return the newest satellite return max(results) else: # if no satellites were found then raise an exception raise ValueError(f"No operational SUVI instrument on {date.strftime(TIME_FORMAT)}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_goes_sat_num(self, date):\n goes_operational = {\n 2: TimeRange(\"1981-01-01\", \"1983-04-30\"),\n 5: TimeRange(\"1983-05-02\", \"1984-07-31\"),\n 6: TimeRange(\"1983-06-01\", \"1994-08-18\"),\n 7: TimeRange(\"1994-01-01\", \"1996-08-13\"),\n 8: TimeRange(\"1996-03-21\", \"2003-06-18\"),\n 9: TimeRange(\"1997-01-01\", \"1998-09-08\"),\n 10: TimeRange(\"1998-07-10\", \"2009-12-01\"),\n 11: TimeRange(\"2006-06-20\", \"2008-02-15\"),\n 12: TimeRange(\"2002-12-13\", \"2007-05-08\"),\n 13: TimeRange(\"2006-08-01\", \"2006-08-01\"),\n 14: TimeRange(\"2009-12-02\", \"2010-10-04\"),\n 15: TimeRange(\"2010-09-01\", parse_time(\"now\")),\n }\n\n results = []\n for sat_num in goes_operational:\n if date in goes_operational[sat_num]:\n # if true then the satellite with sat_num is available\n results.append(sat_num)\n\n if results:\n # Return the newest satellite\n return max(results)\n else:\n # if no satellites were found then raise an exception\n raise ValueError(\n \"No operational GOES satellites on {}\".format(\n date.strftime(TIME_FORMAT)\n )\n )", "def latest_season_before(date):\n\tif date.month < 9:\n\t\treturn date.year - 1\n\treturn date.year", "def get_season_no(token, url):\n headers = {'Accept': 'application/json', 'Authorization': token}\n r = requests.get(url, headers=headers)\n json_data = json.loads(r.text).get('data')\n high_season = 1\n for episode in json_data:\n if episode.get('airedSeason') > high_season:\n high_season = episode.get('airedSeason')\n return high_season", "def station(unit, date):\r\n req = 'select ParentLocId from PI_PlaceRelationship where RelationId = 4 and LocId = \"{}\" and EndDate > \"{}\"'.format(unit,d2d(date))\r\n try:\r\n station = pd.read_sql(req, engine).values[0][0]\r\n return station if unit != station else 0\r\n except:\r\n logging.warning('error in getting station ID for {} ({})'.format(name(unit), unit))\r\n return 0", "def date_to_draw_number(date):\n\n today = date.today()\n\n #hotspot plays only last for 180 days\n #validate entered date\n if (today - date).days > 180 or date > today:\n return 0\n\n days_between = (date - INIT_DATE).days\n\n return INIT_DRAW_NUMBER + (300 * days_between)\n\n\n # num_spots_sampled, spot_histogram, range_histogram, mod_histogram,\n # last_seen_dict, avg_draw_distance_dict, draw_distance_dict, last_n_avg_distance_dict_list, current_draw_num", "def seasonal(path, date_inf=\"15-05\", date_sup=\"15-10\"):\n with open(os.path.join(path, \"info.json\"), \"r\") as f:\n info = json.load(f)\n\n date_inf = datetime.strptime(date_inf, \"%d-%m\").timetuple().tm_yday\n date_sup = datetime.strptime(date_sup, \"%d-%m\").timetuple().tm_yday\n day_of_year = timestamp_to_datetime(\n info['Sensing start']).timetuple().tm_yday\n\n return (day_of_year > date_inf) and (day_of_year < date_sup)", "def find_max_temp(pdata, day):\n for ival in range(len(pdata['values'])):\n start = pdata['time-layout']['start'][ival]\n end = pdata['time-layout']['end'][ival]\n if start.day == day and end.day == day:\n return int(pdata['values'][ival])\n # raise Exception('ERROR didn\\'t find max temp for %d in %s' % (day, pdata['time-layout']))\n return None", "def get_season(\n current_date: date, hemisphere: str, season_tracking_type: str\n) -> str | None:\n\n if hemisphere == \"equator\":\n return None\n\n if season_tracking_type == TYPE_ASTRONOMICAL:\n spring_start = ephem.next_equinox(str(current_date.year)).datetime()\n summer_start = ephem.next_solstice(str(current_date.year)).datetime()\n autumn_start = ephem.next_equinox(spring_start).datetime()\n winter_start = ephem.next_solstice(summer_start).datetime()\n else:\n spring_start = datetime(2017, 3, 1).replace(year=current_date.year)\n summer_start = spring_start.replace(month=6)\n autumn_start = spring_start.replace(month=9)\n winter_start = spring_start.replace(month=12)\n\n if spring_start <= current_date < summer_start:\n season = STATE_SPRING\n elif summer_start <= current_date < autumn_start:\n season = STATE_SUMMER\n elif autumn_start <= current_date < winter_start:\n season = STATE_AUTUMN\n elif winter_start <= current_date or spring_start > current_date:\n season = STATE_WINTER\n\n # If user is located in the southern hemisphere swap the season\n if hemisphere == NORTHERN:\n return season\n return HEMISPHERE_SEASON_SWAP.get(season)", "def get_day_highest_rainfall(raindata_list_raw_values_from_urlstream):\n\tlist_with_nonempty_values = remove_emptystring_values(raindata_list_raw_values_from_urlstream) # casting the comparison key to another type to compare is undesirable, first cast the type to the correct data type, then store it in the list for comparison.\n\thighest_day_rain = max(list_with_nonempty_values, key=lambda row: int(row[1])) # lambda expression works IFF there is NO empty string bad data before cast to int! '' Empty string data in second element column must first be removed from source.\n\treturn highest_day_rain", "def getSNIa( age=0, z=1 ):\n import os\n import numpy as np\n import sys\n\n thisfile = sys.argv[0]\n if 'ipython' in thisfile : thisfile = __file__\n thispath = os.path.abspath( os.path.dirname( thisfile ) )\n\n sedfile = os.path.join( thispath, 'Hsiao07.dat')\n d,w,f = np.loadtxt( sedfile, unpack=True ) \n\n #d = d.astype(int)\n uniquedays = np.unique( d )\n ibestday = np.abs( uniquedays-age ).argmin()\n iday = np.where( d==uniquedays[ibestday] )\n\n dbest = d[ iday ]\n wbest = w[ iday ]\n fbest = f[ iday ]\n\n return( wbest*(1+z), fbest )", "def fetch_sundata(self, date: datetime) -> Sundata:\n pass", "def sunrise(cls, date):\n return (date + Clock.days_from_hours(6) + \n ((cls.UJJAIN.longitude - cls.LOCATION.longitude) / 360) -\n cls.equation_of_time(date) +\n ((1577917828/1582237828 / 360) *\n (cls.ascensional_difference(date, cls.LOCATION) +\n (1/4 * cls.solar_sidereal_difference(date)))))", "def sunset(cls, date):\n return (date + Clock.days_from_hours(18) + \n ((cls.UJJAIN.longitude - cls.LOCATION.longitude) / 360) -\n cls.equation_of_time(date) +\n (((1577917828/1582237828) / 360) *\n (- cls.ascensional_difference(date, cls.LOCATION) +\n (3/4 * cls.solar_sidereal_difference(date)))))", "def getSeason(date):\n\n date = validate.timestamp(date)\n day = date.dayofyear\n leap_year = int(date.is_leap_year)\n\n spring = numpy.arange(80, 172) + leap_year\n summer = numpy.arange(172, 264) + leap_year\n autumn = numpy.arange(264, 355) + leap_year\n\n if day in spring:\n season = \"spring\"\n elif day in summer:\n season = \"summer\"\n elif day in autumn:\n season = \"autumn\"\n else:\n season = \"winter\"\n\n return season", "def _evaluate_dates(self):\n if len(self.date_strings)==0: \n return \"\"\n\n # high confidence: filter for date with a time attached, expected only 1 unique time\n high_confidence = self._extract_high_conf_dates(set(self.date_strings))\n if high_confidence: \n return high_confidence\n\n #else pick majority vote using absolute\n majority_vote = self.date_strings.most_common(1)[0][0]\n return majority_vote", "def from_fixed(cls, date):\n sun = cls.hindu_day_count(date) + Clock.days_from_hours(6)\n year = quotient(sun, cls.ARYA_SOLAR_YEAR)\n month = mod(quotient(sun, cls.ARYA_SOLAR_MONTH), 12) + 1\n day = ifloor(mod(sun, cls.ARYA_SOLAR_MONTH)) + 1\n return OldHinduSolarDate(year, month, day)", "def find_index(weather_data: dict, date: datetime) -> int:\n weather_list = weather_data['list']\n for index, weather in enumerate(weather_list):\n if weather['dt_txt'] == date.strftime('%Y-%m-%d %H:%M:%S'):\n return index\n return 0", "def _get_goes_sat_num(start, end):\n goes_operational = {\n 2: TimeRange('1980-01-04', '1983-05-01'),\n 5: TimeRange('1983-05-02', '1984-08-01'),\n 6: TimeRange('1983-06-01', '1994-08-19'),\n 7: TimeRange('1994-01-01', '1996-08-14'),\n 8: TimeRange('1996-03-21', '2003-06-19'),\n 9: TimeRange('1997-01-01', '1998-09-09'),\n 10: TimeRange('1998-07-10', '2009-12-02'),\n 11: TimeRange('2006-06-20', '2008-02-16'),\n 12: TimeRange('2002-12-13', '2007-05-09'),\n 13: TimeRange('2006-08-01', '2006-08-01'),\n 14: TimeRange('2009-12-02', '2010-11-05'),\n 15: TimeRange('2010-09-01', Time.now()),\n }\n\n sat_list = []\n for sat_num in goes_operational:\n if (goes_operational[sat_num].start <= start <= goes_operational[sat_num].end and\n goes_operational[sat_num].start <= end <= goes_operational[sat_num].end):\n # if true then the satellite with sat_num is available\n sat_list.append(sat_num)\n\n if not sat_list:\n # if no satellites were found then raise an exception\n raise Exception('No operational GOES satellites within time range')\n else:\n return sat_list", "def hindu_lunar_station(date):\n critical = HinduDate.sunrise(date)\n return quotient(HinduLunarDate.longitude(critical), angle(0, 800, 0)) + 1", "def get_year_with_most_rain(totals_list_per_day_from_datasource):\n\train_by_year_collection = {}\n\tfor row in totals_list_per_day_from_datasource:\n\t\tyear_component_of_parsed_date = row[0].split('-')[2]\n\t\tif None == rain_by_year_collection.get(year_component_of_parsed_date):\n\t\t\train_by_year_collection[year_component_of_parsed_date] = 0\n\t\tif '' == row[1]:\n\t\t\tcontinue\n\t\train_by_year_collection[year_component_of_parsed_date] += int(row[1])\n\tmax_rainy_year = max(rain_by_year_collection.items(), key=max_rain_compare) # use .items() and always searches based on the structure given, here .items() returns the dictionary as a tuple only.\n\tyear_with_most_rain = { max_rainy_year[0] : max_rainy_year[1] }\n\t#year_most_rain = { key : value for key, value in rain_by_year.items() if value == max_value[1] } # find key/year in the dictionary by value/rain reverse thinking.\n\treturn year_with_most_rain", "def _first_good_date(self, day):\n count = 0\n while True:\n try:\n self.data.loc[day - timedelta(count)]\n return day - timedelta(count)\n except KeyError:\n count += 1", "def alt_sunrise(cls, date):\n rise = cls.UJJAIN.dawn(date, angle(0, 47, 0))\n return 1/24 * 1/60 * iround(rise * 24 * 60)", "def sun_single_day(date):\r\n\r\n\tsun = l.sun(date=date, local=True)\r\n\tsunrise = sun['sunrise']\r\n\tsunset = sun['sunset']\r\n\tday_length = str(sunset-sunrise)\r\n\tsolar_noon = l.solar_noon(date=date, local=True)\r\n\tsolar_zenith = l.solar_elevation(solar_noon.replace(tzinfo=None))\r\n\r\n\treturn {'sunrise':sunrise, 'sunset': sunset, 'daylength': day_length, 'solar_noon': solar_noon, 'zenith': solar_zenith}", "def get_price_on_or_before_date(date, prices):\n for i in range(6):\n current_date = date - timedelta(days=i)\n if current_date in prices:\n return float(prices[current_date]), i\n return (None, None)", "def forecast_for_closest(\n lat: float, lon: float, lang=_DEFAULT_LANG, num_stations_to_try: int = 3\n) -> Tuple[Dict, Dict]:\n assert lang in _SUPPORTED_LANGS\n\n stations = closest_stations(lat, lon, limit=num_stations_to_try)\n for s in stations:\n o = forecast_for_station(s[\"id\"], lang=lang)\n if o[\"results\"] and not o[\"results\"][0].get(\"err\") and o[\"results\"][0][\"valid\"]:\n return o, s\n\n return forecast_for_station(stations[0][\"id\"], lang=lang), stations[0]", "def yoga(date):\n return ifloor(mod((HinduSolarDate.longitude(date) + HinduLunarDate.longitude(date)) / angle(0, 800, 0), 27)) + 1", "def find_mllw(timeseries):\n return find_tidal_datum(timeseries,stat='min',daily=True)", "def solar_noon(self, date=None, local=True):\n \n if self.astral is None:\n self.astral = Astral()\n\n if date is None:\n date = datetime.date.today()\n\n noon = self.astral.solar_noon_utc(date, self.longitude)\n\n if local:\n return noon.astimezone(self.tz) \n else:\n return noon", "def find_streekday_(self):\n # streekdays pattern repeats every 1280 years:\n epoch = self.year % 1280\n # ...and all 40 years if we ignore the 128-year rule.\n subepoch = epoch % 40\n year_offset = None\n if subepoch in (2,4,21,23):\n year_offset = 0\n elif subepoch in (6,8,25,27):\n year_offset = 1\n elif subepoch in (10,12,29,31):\n year_offset = 2\n elif subepoch in (14,16,33,35):\n year_offset = 3\n elif subepoch in (18,20,37,39):\n year_offset = 4\n elif subepoch in (1,3,22,24):\n year_offset = 5\n elif subepoch in (5,7,26,28):\n year_offset = 6\n elif subepoch in (9,11,30,32): \n year_offset = 7\n elif subepoch in (13,15,34,36):\n year_offset = 8\n elif subepoch in (17,19,38,0):\n year_offset = 9\n year_offset -= math.floor((epoch-1) / 128)\n # another -1 because 0-W-1 is mudday = index 0.\n day_offset = (self.day_in_year + year_offset - 1) % 10\n return day_offset", "def _read_sansculottide_date(match):\n day_string = match.group(1)\n d = None\n\n for n, candidate in enumerate(names.sans_culottides):\n if candidate.sanitized == day_string:\n d = n\n break\n else:\n return\n\n y = roman_to_decimal(match.group(2))\n\n return (y, 13, d)" ]
[ "0.74015623", "0.56602454", "0.5434096", "0.5408103", "0.5402877", "0.53601795", "0.53103787", "0.52805215", "0.5273263", "0.5166013", "0.5148456", "0.5140715", "0.5115912", "0.5089272", "0.50828665", "0.50501645", "0.50464404", "0.4998767", "0.49792892", "0.49729776", "0.4970976", "0.49212497", "0.49085304", "0.4893009", "0.48905686", "0.48879266", "0.4872262", "0.48657152", "0.4864173", "0.4824169" ]
0.74445915
0
Calculates the loglikelihood of neighboring solutions of a batch of nodes by changing their membership. If a higher loglikelihood was achieved the best solution will be returned, else a tuple of three np.nan is returned.
def calc_node_neigh_solutions(event_dict, n_classes, duration, node_membership, log_lik_init, node_batch): best_neigh = (np.nan, np.nan, np.nan) log_lik = log_lik_init # node_membership = node_membership.copy() for n_i in node_batch: n_i_class = node_membership[n_i] # Adding a constraint to maintain the number of blocks. if np.sum(node_membership == n_i_class) <= 2: continue for c_i in range(n_classes): if c_i == n_i_class: continue # update node_membership temporarily node_membership[n_i] = c_i # Eval the aprox log_lik of this neighbor, by est all block parameters. (neigh_log_lik, fitted_params) = bhm_estimate_utils.estimate_hawkes_param_and_calc_log_likelihood(event_dict, node_membership, duration, n_classes, False) # if log_lik if this neighbor is better than the "so far" best neighbor, use this neighbors as the best. if log_lik < neigh_log_lik: log_lik = neigh_log_lik best_neigh = (n_i, c_i, log_lik) node_membership[n_i] = n_i_class return best_neigh
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_full_log_likelihood(bp_events, node_membership, mu, alpha, beta, duration, num_classes,\n add_com_assig_log_prob=False):\n ll = 0\n for b_i in range(num_classes):\n for b_j in range(num_classes):\n bp_size = len(np.where(node_membership == b_i)[0]) * len(np.where(node_membership == b_j)[0])\n if b_i == b_j:\n bp_size -= len(np.where(node_membership == b_i)[0])\n\n ll += block_pair_conditional_log_likelihood(bp_events[b_i][b_j],\n mu[b_i, b_j], alpha[b_i, b_j], beta[b_i, b_j],\n duration, bp_size)\n\n if add_com_assig_log_prob:\n # Adding the log probability of the community assignments to the full log likelihood\n n_nodes = len(node_membership)\n _, block_count = np.unique(node_membership, return_counts=True)\n class_prob_mle = block_count / sum(block_count)\n rv_multi = multinomial(n_nodes, class_prob_mle)\n log_prob_community_assignment = rv_multi.logpmf(block_count)\n\n ll += log_prob_community_assignment\n\n return ll", "def __log_likelihood(self, params, *args):\n\t\tX, y, feature_set, lambda_reg, empirical_weights, verbose, sign = args\n\n\t\tno_example = len(X)\n\t\ttotal_logZ = 0\n\t\ttotal_logProb = 0\n\t\texpected_weights = np.zeros(len(feature_set))\n\t\tfor t in range(len(X)):\n\t\t\t# example_features = X[t], example_labels = y[t]\n\n\t\t\tpotential = np.zeros(len(X[t]))\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\t#candidate_features = X[t][i], candidate_label = y[t][i]\n\t\t\t\tpotential[i] = feature_set.calc_inner_product(X[t][i], params)\n\n\t\t\t#scaling\n\t\t\tpotential = potential - np.max(potential, keepdims=True)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\ttotal_logProb += potential[i] * y[t][i]\n\n\t\t\tpotential, Z = self.__softmax(potential)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\tfeature_set.calc_inner_sum(expected_weights, X[t][i], potential[i])\n\n\t\t\ttotal_logZ += log(Z)\n\n\t\t# _params = feature_set.get_regularized_params(params, 'bias')\n\t\t_params = params\n\t\tlog_likelihood = total_logProb - total_logZ - (lambda_reg/2) * np.sum(np.multiply(_params,_params))\n\t\tgradients = empirical_weights - expected_weights - lambda_reg * _params\n\n\t\tglobal SUB_ITERATION_NUM\n\t\tif verbose:\n\t\t\tsub_iteration_str = ' '\n\t\t\tif SUB_ITERATION_NUM > 0:\n\t\t\t\tsub_iteration_str = '(' + '{0:02d}'.format(SUB_ITERATION_NUM) + ')'\n\t\t\tprint(' ', '{0:03d}'.format(ITERATION_NUM), sub_iteration_str, ':', log_likelihood * sign)\n\n\t\tSUB_ITERATION_NUM += 1\n\n\t\treturn sign * log_likelihood, sign * gradients", "def loglikelihood(self, net):\n\n adjmat = net.edges.adjacency_matrix\n\n # if any of the mustexist or mustnotexist constraints are violated,\n # return negative infinity\n if (not (adjmat | self.mustexist).all()) or \\\n (adjmat & self.mustnotexist).any():\n return NEGINF\n\n # if any custom constraints are violated, return negative infinity\n if self.constraints and not all(c(adjmat) for c in self.constraints):\n return NEGINF\n\n loglike = 0.0\n if self.energy_matrix != None:\n energy = N.sum(adjmat * self.energy_matrix) \n loglike = -self.weight * energy\n\n return loglike", "def _compute_likelihood(\n self, indices_ones, pi, alpha_1, alpha_2, tau_1, tau_2\n ):\n nq, nl = self.n_row_clusters, self.n_column_clusters\n return (\n -self._np.sum(tau_1 * self._np.log(tau_1))\n - self._np.sum(tau_2 * self._np.log(tau_2))\n + tau_1.sum(0) @ self._np.log(alpha_1)\n + tau_2.sum(0) @ self._np.log(alpha_2).T\n + (\n tau_1[indices_ones[0]].reshape(-1, nq, 1)\n * tau_2[indices_ones[1]].reshape(-1, 1, nl)\n * (\n self._np.log(pi.reshape(1, nq, nl))\n - self._np.log(1 - pi).reshape(1, nq, nl)\n )\n ).sum()\n + (tau_1.sum(0) @ self._np.log(1 - pi) @ tau_2.sum(0))\n )", "def __compute_log_likelihood(self, outputs, data, boolean):\n end_loc, pi_loc, mu_loc, sigma_loc, rho_loc = outputs\n\n mu1_loc, mu2_loc = mu_loc[:, :, :20], mu_loc[:, :, 20:]\n sig1_loc, sig2_loc = (\n sigma_loc[:, :, :20] + 10e-10,\n sigma_loc[:, :, 20:] + 10e-10,\n )\n\n x1_loc = data[:, 1:, 1].unsqueeze(2).to(self.__device)\n x2_loc = data[:, 1:, 2].unsqueeze(2).to(self.__device)\n x3_loc = data[:, 1:, 0].to(self.__device)\n\n end_loc = end_loc[:, :-1, -1].to(self.__device)\n pi_loc = pi_loc[:, :-1, :].to(self.__device)\n mu1_loc = mu1_loc[:, :-1, :].to(self.__device)\n mu2_loc = mu2_loc[:, :-1, :].to(self.__device)\n sig1_loc = sig1_loc[:, :-1, :].to(self.__device)\n sig2_loc = sig2_loc[:, :-1, :].to(self.__device)\n rho_loc = rho_loc[:, :-1, :].to(self.__device)\n\n boolean = boolean[:, :-1].to(self.__device)\n\n gauss = pi_loc * self.__bivariate_gaussian(\n sig1_loc, sig2_loc, mu1_loc, mu2_loc, x1_loc, x2_loc, rho_loc\n )\n gauss = torch.sum(gauss, dim=2).to(self.__device)\n\n log_lik = torch.sum(\n -boolean * torch.log(gauss + 10e-10)\n - boolean * torch.log(end_loc + 10e-10) * (x3_loc)\n - boolean * torch.log(1 - end_loc + 10e-10) * (1 - x3_loc)\n )\n\n return log_lik", "def __compute_log_likelihood(self, outputs, data, boolean):\n end_loc, pi_loc, mu_loc, sigma_loc, rho_loc = outputs\n\n mu1_loc, mu2_loc = mu_loc[:, :, :20], mu_loc[:, :, 20:]\n sig1_loc, sig2_loc = (\n sigma_loc[:, :, :20] + 10e-10,\n sigma_loc[:, :, 20:] + 10e-10,\n )\n\n x1_loc = data[:, 1:, 1].unsqueeze(2).to(self.__device)\n x2_loc = data[:, 1:, 2].unsqueeze(2).to(self.__device)\n x3_loc = data[:, 1:, 0].to(self.__device)\n\n end_loc = end_loc[:, :-1, -1].to(self.__device)\n pi_loc = pi_loc[:, :-1, :].to(self.__device)\n mu1_loc = mu1_loc[:, :-1, :].to(self.__device)\n mu2_loc = mu2_loc[:, :-1, :].to(self.__device)\n sig1_loc = sig1_loc[:, :-1, :].to(self.__device)\n sig2_loc = sig2_loc[:, :-1, :].to(self.__device)\n rho_loc = rho_loc[:, :-1, :].to(self.__device)\n\n boolean = boolean[:, :-1].to(self.__device)\n\n gauss = pi_loc * self.__bivariate_gaussian(\n sig1_loc, sig2_loc, mu1_loc, mu2_loc, x1_loc, x2_loc, rho_loc\n )\n gauss = torch.sum(gauss, dim=2).to(self.__device)\n\n log_lik = torch.sum(\n -boolean * torch.log(gauss + 10e-10)\n - boolean * torch.log(end_loc + 10e-10) * (x3_loc)\n - boolean * torch.log(1 - end_loc + 10e-10) * (1 - x3_loc)\n )\n\n return log_lik", "def _compute_log_likelihood(self, parameters):\n raise NotImplementedError('')", "def log_likelihood_function(self, instance):\r\n\r\n xvalues = np.arange(self.data.shape[0])\r\n model_data = instance.profile_from_xvalues(xvalues=xvalues)\r\n residual_map = self.data - model_data\r\n chi_squared_map = (residual_map / self.noise_map) ** 2.0\r\n log_likelihood = -0.5 * sum(chi_squared_map)\r\n\r\n return log_likelihood", "def compute_log_likelihood(self, indicators, weights, l2):\n scores, _ = self.predict_probability(self.train_feature_x, weights)\n probs = self.predict_probability(self.train_feature_x, weights)\n lp = np.sum((indicators-1)*scores + np.log(probs)) - l2* np.sum(weights[1:]**2)\n return lp", "def _calculate_ll(self, x):\n observation_log_probs = self._observation_log_probs(x, mask=None)\n forward_log_probs = self._forward(observation_log_probs)\n log_likelihood = logsumexp(\n forward_log_probs[forward_log_probs.shape[0] - 1, :].numpy())\n return log_likelihood", "def get_likelihoods(self, alleles):\n\n l = len(alleles)\n if l==2:\n result = self.likelihoods2(alleles)\n elif l==3:\n result = self.likelihoods3(alleles)\n elif l==4:\n result = self.likelihoods4(alleles)\n elif l==5:\n result = self.likelihoods5(alleles)\n else:\n result = self.likelihoods(alleles)\n return result", "def bd_nll(params, trajectory, return_params=False):\n # Extract parameters\n s_fit = params[0]\n t_fit = params[2]\n N_w_fit = int(params[1])\n\n # Exatract inferred clone_sizes from AO/DP ratio\n mean_size, size_range = observations_to_clone_size(AO=trajectory.AO,\n DP=trajectory.DP,\n N_w=N_w_fit)\n\n # Set inferred size as the mean of all possible ranges of observations\n trajectory['inferred_size'] = mean_size\n\n # Compute time_steps\n trajectory['delta_t'] = np.insert(np.diff(trajectory.age),\n 0, trajectory.iloc[0].age - t_fit)\n # Initialise negative log-likelihood computation\n nll = 0\n for i, time_point in trajectory.iterrows():\n # Extract initial clone_size and time difference between observations\n if i == 0:\n init_size = 1\n else:\n init_size = max(trajectory.iloc[i-1].inferred_size, 1)\n\n # Compute AO/DP observation probability\n prob = AO_prob_value(AO=time_point.AO,\n DP=time_point.DP,\n init_size=init_size,\n s=s_fit,\n delta_t=time_point.delta_t,\n N_w=N_w_fit)\n\n # Avoid divide by zero encountered in log warning\n if prob < 1.0e-100:\n prob = 1.0e-100\n\n # Compute negative log likelihood\n nll -= np.log(prob)\n\n if return_params is True:\n return nll, params\n else:\n return nll", "def spatialgp_nll_ngrad(**kwargs):\n\n try:\n# print \"optimizing params\", kernel_params\n gp = SpatialGP(compute_ll=True, compute_grad=True, **kwargs)\n\n nll = -1 * gp.ll\n ngrad = -1 * gp.ll_grad\n\n except np.linalg.linalg.LinAlgError as e:\n print \"warning: lin alg error (%s) in likelihood computation, returning likelihood -inf\" % str(e)\n nll = np.float(\"inf\")\n ngrad = None\n except ValueError as e:\n print \"warning: value error (%s) in likelihood computation, returning likelihood -inf\" % str(e)\n nll = np.float(\"inf\")\n ngrad = None\n\n return nll, ngrad", "def objective_llh(self, params):\n\n try:\n obj = self.log_likelihood(params[0], params[1], params[2:])\n except (LinAlgError, ZeroDivisionError, ValueError):\n obj = -np.inf\n return obj", "def log_likelihood(self):\r\n A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.beta_star)) - 0.5 * np.sum(self.V_star * self.likelihood.Y)\r\n C = -self.output_dim * (np.sum(np.log(np.diag(self.LB))))\r\n D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V))\r\n return A + C + D + self.likelihood.Z", "def log_likelihood_function(self, instance: af.ModelInstance) -> float:\r\n model_data = self.model_data_from_instance(instance=instance)\r\n fit = self.fit_from_model_data(model_data=model_data)\r\n return fit.log_likelihood", "def log_likelihood(self, data, reward_model, bias_params):", "def maxlikelihood(self):\n\n chi2 = lambda *args: -2 * lnlikelihood.lnlike(*args) \n # print('***DEBUGGING*** chi2 = ', chi2)\n # print('***DEBUGGING*** self.theta_guess = ', self.theta_guess)\n # print('***DEBUGGING*** self.transinfo = ', self.transinfo)\n # print('***DEBUGGING*** self.wave_b = ', self.wave_b)\n # print('***DEBUGGING*** self.flux_b = ', self.flux_b)\n # print('***DEBUGGING*** self.err_b = ', self.err_b)\n # print('***DEBUGGING*** self.wave_r = ', self.wave_r)\n # print('***DEBUGGING*** self.flux_r = ', self.flux_r)\n # print('***DEBUGGING*** self.err_r = ', self.err_r)\n # print('***DEBUGGING*** self.velres = ', self.velres)\n result = op.minimize(chi2, self.theta_guess,\n args=(self.transinfo, self.wave_b, self.flux_b, self.err_b,\n self.wave_r, self.flux_r, self.err_r, self.velres))\n\n self.theta_ml = result[\"x\"]", "def loglikelihood(mean, grad):\n\n # update the global latent_means list\n latent_means[index] = mean\n\n if grad.size > 0:\n # update the gradient\n grad[:] = compute_gradient(\n Y=Y, \n mi=mean, \n latent_Sigmas=latent_Sigmas,\n B1=B1,\n B2=B2,\n ss=ss,\n mu=mu,\n g1=g1,\n g2=g2,\n sigma2=sigma2,\n index=index\n )\n\n a1, a2, a3, a4, a5 = compute_terms(\n Y=Y, \n latent_means=latent_means, \n latent_Sigmas=latent_Sigmas, \n B1=B1, \n B2=B2, \n mu=mu, \n g1=g1, \n g2=g2\n )\n\n scalars = N*q/2 - N*p/2*np.log(TWOPI*sigma2)\n\n total = sum(\n [\n item1 - 1/(2*sigma2)*item2 + (TWOPI)**(1/2-q)*(item3 + item4 + item5) \n for item1, item2, item3, item4, item5 in zip(a1, a2, a3, a4, a5)\n ]\n )\n\n return total + scalars", "def calculate_log_perplexity(self, output, flat_labels): #completed, expensive, should be compiled\n return -np.sum(np.log2(np.clip(output, a_min=1E-12, a_max=1.0))[np.arange(flat_labels.shape[0]), flat_labels[:,1]])", "def log_likelihood(self, return_gradient=False):\n p = self.parameters # this has to be called first\n\n # check if I need to recompute anything\n if return_gradient and (self._gradient is None):\n # compute the log likelihood and gradient wrt the parameters\n if 'adjoint' in self.grad_method:\n (self._log_like, self._gradient) = self._adjoint_gradient(p)\n elif 'finite_difference' in self.grad_method:\n (self._log_like, self._gradient) = self._finite_diff_gradient(p)\n else:\n raise RuntimeError('unknown grad_method %s' % repr(self.grad_method))\n elif self._log_like is None: # compute the log-likelihood without gradient\n self._log_like = self._compute_log_likelihood(p)\n else: # everything is already computed\n pass\n\n if return_gradient: # return both\n return self._log_like, self._gradient\n else: # just return likelihood\n return self._log_like", "def log_likelihood(data, probs):\n # Assume data is given as counts\n return _np.sum([nlogp(n, p) for n, p in zip(data, probs) if n > 0])", "def log_likelihood(self, params):\n # extract the parameters\n m1 = params['m1']\n m2 = params['m2']\n DL = params['DL']\n Tc = params['Tc']\n iota = params['iota']\n phic = params['phic']\n psi = params['psi']\n thetaS = params['thetaS']\n phiS = params['phiS']\n\n # calculate the model\n model = self._model(time, m1, m2, DL, Tc, iota, phic, psi, thetaS, phiS)\n\n# # normalisation\n# norm = -0.5*self._ndata*LN2PI - self._ndata*self._logsigma\n\n# # chi-squared\n# chisq = np.sum(((self._data - model)/(self._sigma))**2)\n\n return -np.vdot(self._data - model,self._data - model)", "def f(self, x):\n error = log_likelihood_calc(x[1], x[0], self.data)\n return error", "def marginal_ln_likelihood_worker(task):\n slice_or_idx, task_id, prior_samples_file, joker_helper = task\n\n # Read the batch of prior samples\n batch = read_batch(prior_samples_file, joker_helper.packed_order,\n slice_or_idx, units=joker_helper.internal_units)\n\n if batch.dtype != np.float64:\n batch = batch.astype(np.float64)\n\n # memoryview is returned\n ll = joker_helper.batch_marginal_ln_likelihood(batch)\n\n return np.array(ll)", "def loglikelihood(self):\n raise NotImplementedError(\"To be implemented\")", "def _compute_log_likelihood(self, X, S):\n log_likelihood = 0\n for n in range(self.n_col):\n likelihood = 1\n for k in range(self.n_components):\n likelihood *= self.weights[k] \\\n * multivariate_normal(self.means[k], self.covs[k]).pdf(X[n]) \\\n * poisson(self.rates[k]).pmf(S[n])\n log_likelihood += np.log(likelihood)\n\n return log_likelihood", "def __calc_likelihood(self, *args):\n params = {}\n for i, p in enumerate(self._par_names):\n if self._par_islog[p]:\n params[p] = np.power(10., args[i])\n else:\n params[p] = args[i]\n return self.return_likelihood(params)", "def loglikehood_coefficient(n_items, X, Y):\n # should not need X_norm_squared because if you could precompute that as\n # well as Y, then you should just pre-compute the output and not even\n # call this function.\n\n def safeLog(d):\n if d <= 0.0:\n return 0.0\n else:\n return np.log(d)\n\n def logL(p, k, n):\n return k * safeLog(p) + (n - k) * safeLog(1.0 - p)\n\n def twoLogLambda(k1, k2, n1, n2):\n p = (k1 + k2) / (n1 + n2)\n return 2.0 * (logL(k1 / n1, k1, n1) + logL(k2 / n2, k2, n2)\n - logL(p, k1, n1) - logL(p, k2, n2))\n\n if X is Y:\n X = Y = np.asanyarray(X)\n else:\n X = np.asanyarray(X)\n Y = np.asanyarray(Y)\n\n result = []\n\n # TODO: Check if it is possible to optimize this function\n\n i = 0\n for arrayX in X:\n result.append([])\n for arrayY in Y:\n XY = np.intersect1d(arrayX, arrayY)\n\n if XY.size == 0:\n result[i].append(0.0)\n else:\n nX = arrayX.size\n nY = arrayY.size\n if (nX - XY.size == 0) or (n_items - nY) == 0:\n result[i].append(1.0)\n else:\n logLikelihood = twoLogLambda(float(XY.size),\n float(nX - XY.size),\n float(nY),\n float(n_items - nY))\n\n result[i].append(1.0 - 1.0 / (1.0 + float(logLikelihood)))\n result[i] = np.asanyarray(result[i])\n i += 1\n\n return np.asanyarray(result)", "def get_log_likelihood(phi, pred, t, dot_product, weight, reg= 1):\n prior = -0.5* np.sum(np.multiply(weight, weight))\n likelihood = np.multiply(t, np.log(pred+TOLERANCE)) + np.multiply(1.0- t, np.log(1.0-pred+TOLERANCE))\n likelihood = np.sum(likelihood)\n\n return prior + likelihood" ]
[ "0.6380167", "0.61678493", "0.6130389", "0.61122036", "0.59834164", "0.59834164", "0.5842508", "0.5807362", "0.5767984", "0.5747156", "0.5727282", "0.5678164", "0.5648807", "0.5646092", "0.5632708", "0.5607198", "0.5598938", "0.558566", "0.55793875", "0.55783534", "0.5550739", "0.55473083", "0.5536552", "0.5530495", "0.552269", "0.5521318", "0.55148983", "0.5502707", "0.5498388", "0.54588" ]
0.68580836
0
Performs local search / hill climbing to increase loglikelihood of the model by switching the community of a single node at a time.
def block_local_search(event_dict, n_classes, node_membership_init, duration, max_iter=100, n_cores=-1, return_fitted_param=False, verbose=False): n_nodes = len(node_membership_init) nodes = np.arange(n_nodes) node_membership = node_membership_init # estimate initial params of block model and its log-likelihood (init_log_lik, fitted_params) = bhm_estimate_utils.estimate_hawkes_param_and_calc_log_likelihood(event_dict, node_membership, duration, n_classes, add_com_assig_log_prob=False) log_lik = init_log_lik n_cores = n_cores if n_cores > 0 else multiprocessing.cpu_count() batch_size = np.int(n_nodes / n_cores) + 1 # print(n_cores) for iter in range(max_iter): if verbose: print(f"Iteration {iter}...", end='\r') tic = time.time() # for each of the (k-1)*n neighboring solutions possible_solutions = Parallel(n_jobs=n_cores)(delayed(calc_node_neigh_solutions) (event_dict, n_classes, duration, node_membership, log_lik, nodes[batch_size * ii: batch_size * (ii + 1)]) for ii in range(n_cores)) toc = time.time() print(f"Iter {iter}, took: {(toc - tic)/3600:.3f}h") possible_solutions = np.array(possible_solutions) # if all returned log-likelihoods are np.nan, break. You're at a local optima. if np.all(np.isnan(possible_solutions[:, 2])): if verbose: print(f"Local solution found with {iter} iterations.") break max_ll_neigh_idx = np.nanargmax(possible_solutions[:, 2]) best_node_to_switch = int(possible_solutions[max_ll_neigh_idx, 0]) best_class_to_switch_to = int(possible_solutions[max_ll_neigh_idx, 1]) # if a good neighbor was found, update best log_lik, and go for the next iteration. node_membership[best_node_to_switch] = best_class_to_switch_to (log_lik, fitted_params) = bhm_estimate_utils.estimate_hawkes_param_and_calc_log_likelihood(event_dict, node_membership, duration, n_classes, False) if iter == max_iter - 1: print("Warning: Max iter reached!") if verbose: print(f"likelihood went from {init_log_lik:.4f} to {log_lik:.4f}. " f"{100 * np.abs((log_lik - init_log_lik) / init_log_lik):.2f}% increase.") if return_fitted_param: mu, alpha, beta = fitted_params return node_membership, mu, alpha, beta return node_membership
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_local_community(G, seed_node, weight, debug_log=False):\n nodes_in_community = seed_node if isinstance(seed_node, list) else [seed_node]\n modularity = edge_modularity(G, nodes_in_community=nodes_in_community, weight=weight)\n neighbor_edges = get_neighbor_edges(G, nodes_in_community=nodes_in_community)\n if debug_log:\n print('==========\\nInitial community has nodes:', nodes_in_community)\n print('Neighbor edges:', neighbor_edges)\n print('Modularity = %f' % modularity)\n while neighbor_edges:\n # Compute the edge_modularity for each neighbor edge,\n # suppose the neighbor edge is added to the community\n mod_max, c_max, e_max = 0, None, None\n for e in neighbor_edges:\n # edges in the current community\n edges_in_temp_community = list(G.subgraph(nodes_in_community).edges)\n # append the candidate edge\n edges_in_temp_community.append(e)\n nodes_in_temp_community = list(G.edge_subgraph(edges_in_temp_community).nodes)\n mod_temp = edge_modularity(G, nodes_in_community=nodes_in_temp_community, weight=weight)\n if mod_temp > mod_max:\n mod_max, c_max, e_max = mod_temp, nodes_in_temp_community, e\n if mod_max > modularity:\n if debug_log:\n print('==========\\nEdge', e_max, 'and node', set(e_max).difference(nodes_in_community), 'are added to the community')\n\n # Update the community and the corresponding neighbor edges\n nodes_in_community = c_max\n modularity = mod_max\n neighbor_edges = get_neighbor_edges(G, nodes_in_community=nodes_in_community)\n\n if debug_log:\n print('The community has nodes:', nodes_in_community)\n print('Modularity = %f' % mod_max)\n print('Neighbor edges:', neighbor_edges)\n else:\n break\n return nodes_in_community, modularity", "def _local_search(self):\n\n # Set occupancies of rigid cluster and its direct neighboring atoms to\n # 1 for clash detection and MIQP\n selection = self.ligand._selection\n self.ligand._active[selection] = True\n center = self.ligand.coor[self._cluster].mean(axis=0)\n new_coor_set = []\n new_bs = []\n for coor, b in zip(self._coor_set, self._bs):\n self.ligand._coor[selection] = coor\n self.ligand._b[selection] = b\n rotator = GlobalRotator(self.ligand, center=center)\n for rotmat in RotationSets.get_local_set():\n rotator(rotmat)\n translator = Translator(self.ligand)\n iterator = itertools.product(\n *[np.arange(*trans) for trans in self._trans_box]\n )\n for translation in iterator:\n translator(translation)\n new_coor = self.ligand.coor\n if self.options.remove_conformers_below_cutoff:\n values = self.xmap.interpolate(new_coor)\n mask = self.ligand.e != \"H\"\n if np.min(values[mask]) < self.options.density_cutoff:\n continue\n if self.options.external_clash:\n if not self._cd() and not self.ligand.clashes():\n if new_coor_set:\n delta = np.array(new_coor_set) - np.array(new_coor)\n if (\n np.sqrt(\n min(np.square((delta)).sum(axis=2).sum(axis=1))\n )\n >= self.options.rmsd_cutoff\n ):\n new_coor_set.append(new_coor)\n new_bs.append(b)\n else:\n new_coor_set.append(new_coor)\n new_bs.append(b)\n elif not self.ligand.clashes():\n if new_coor_set:\n delta = np.array(new_coor_set) - np.array(new_coor)\n if (\n np.sqrt(min(np.square((delta)).sum(axis=2).sum(axis=1)))\n >= self.options.rmsd_cutoff\n ):\n new_coor_set.append(new_coor)\n new_bs.append(b)\n else:\n new_coor_set.append(new_coor)\n new_bs.append(b)\n self.ligand._active[self.ligand._selection] = False\n selection = self.ligand._selection[self._cluster]\n self.ligand._active[selection] = True\n for atom in self._cluster:\n atom_sel = self.ligand._selection[self.ligand.connectivity[atom]]\n self.ligand._active[atom_sel] = True\n self.conformer = self.ligand\n self._coor_set = new_coor_set\n self._bs = new_bs\n if len(self._coor_set) < 1:\n logger.warning(\n f\"{self.ligand.resn[0]}: \"\n f\"Local search {self._cluster_index}: {len(self._coor_set)} conformers\"\n )\n return\n\n # QP score conformer occupancy\n logger.debug(\"Converting densities.\")\n self._convert()\n self._solve_qp()\n logger.debug(\"Updating conformers\")\n self._update_conformers()\n if self.options.write_intermediate_conformers:\n self._write_intermediate_conformers(prefix=\"localsearch_ligand_qp\")\n if len(self._coor_set) < 1:\n logger.warning(\n f\"{self.ligand.resn[0]}: \"\n f\"Local search QP {self._cluster_index}: {len(self._coor_set)} conformers\"\n )\n return\n\n # MIQP score conformer occupancy\n self._convert()\n self._solve_miqp(\n threshold=self.options.threshold, cardinality=self.options.cardinality\n )\n self._update_conformers()\n if self.options.write_intermediate_conformers:\n self._write_intermediate_conformers(prefix=\"localsearch_ligand_miqp\")", "def assign_communities(graph):\n communities = nx.algorithms.community\\\n .greedy_modularity_communities(nx.Graph(graph))\n for node in graph.nodes:\n graph.nodes[node]['community'] = [i for i,c in enumerate(communities)\n if node in c][0]\n graph.graph['modularity'] = nx.algorithms.community.quality\\\n .modularity(nx.Graph(graph),\n communities)", "def bgll(self, graph, node_count, min_mod, max_pass):\n\n #the belonging of the node\n bl = [i for i in range(node_count)]\n #the node's weight in community\n _in = [0.0] * node_count\n #the node's weight in graph\n _tot = []\n #total weight of a node, just a copy of _tot\n k = []\n #the total weight of the graph\n m = []\n\n #inital the in-param\n network = [[0.0] * node_count for n in range(node_count)]\n for node, tag, weight in graph:\n network[node][tag] = weight\n for node in network:\n k.append(sum(node))\n _tot = k[:]\n m = sum(k)\n #inital the in-param\n\n def modularity():\n \"\"\"\n This function mainly computes the modularity of the network\n Return:\n mod->the modularity value\n \"\"\"\n\n q = 0.0\n for idx in range(0, node_count):\n if _tot[idx] > 0.0:\n q += (_in[idx] / m - math.pow(_tot[idx] / m, 2))\n return q\n\n def modularity_gain(n, c, dnodecomm):\n \"\"\"\n This function mainly computes the modularity gain of a node moving\n Param:\n n->node id\n c->community id\n dnodecomm->the weight between the node and the community\n Return:\n gain->modularity gain\n \"\"\"\n\n totc = _tot[c]\n degc = k[n]\n return dnodecomm - (totc * degc) / m\n\n def neigh_comm(n):\n \"\"\"\n This function mainly computes the weight between the node and it's neighbour community\n Param:\n n->node id\n Return:\n nc->the map of the weight between the node and it's neighbour community\n nc=>{cid,weight}\n \"\"\"\n\n nc = {bl[n]: 0.0}\n for idx in range(0, node_count):\n neigh = idx\n ncomm = bl[neigh]\n nei_weight = network[n][idx]\n if (neigh != n) & (nei_weight > 0.0):\n if ncomm in nc:\n nc[ncomm] += nei_weight\n else:\n nc[ncomm] = nei_weight\n return nc\n\n def insert(n, c, dnodecomm):\n \"\"\"\n This function mainly get the effect of insert the node into community\n Param:\n n->node id\n c->community id\n dnodecomm->the weight between the node and the community\n \"\"\"\n\n _tot[c] += k[n]\n _in[c] += 2 * dnodecomm + network[n][n]\n bl[n] = c\n\n def remove(n, c, dnodecomm):\n \"\"\"\n This function mainly get the effect of remove the node off community\n Param:\n n->node id\n c->community id\n dnodecomm->the weight between the node and the community\n \"\"\"\n\n _tot[c] -= k[n]\n _in[c] -= 2 * dnodecomm + network[n][n]\n bl[n] = -1\n\n def detect():\n \"\"\"\n This function mainly detect the community of the graph.\n \"\"\"\n\n _pass_done = 0\n _improve = True\n new_mod = modularity()\n cur_mod = -999999999.0\n rl = random.sample(range(0, node_count), node_count)\n while _improve & (_pass_done < max_pass) & (new_mod - cur_mod > min_mod):\n cur_mod = new_mod\n _improve = False\n _pass_done += 1\n for node_tmp in rl:\n n = node_tmp\n nc = bl[n]\n ncomm = neigh_comm(n)\n remove(n, nc, ncomm[nc])\n best_c = nc\n best_l = 0.0\n best_incre = 0.0\n for c in ncomm:\n incre = modularity_gain(n, c, ncomm[c])\n if incre > best_incre:\n best_incre = incre\n best_c = c\n best_l = ncomm[c]\n insert(n, best_c, best_l)\n if best_c != nc:\n _improve = True\n new_mod = modularity()\n print new_mod\n\n detect()\n return bl", "def update_global_best(self, swarm):\n\n # the fitness of the particles are calculated by their crowding distance\n crowding_distance(swarm)\n\n # the length of the leaders archive cannot be longer than the number of the initial population\n self.leaders += swarm\n self.leaders.truncate(self.options['max_population_size'], 'crowding_distance')\n # self.problem.archive += swarm\n\n return", "def update_global_best(self, swarm):\n\n # the fitness of the particles are calculated by their crowding distance\n crowding_distance(swarm)\n\n # the length of the leaders archive cannot be longer than the number of the initial population\n self.leaders += swarm\n self.leaders.truncate(self.options['max_population_size'], 'crowding_distance')\n self.archive += swarm\n\n return", "def detect():\n\n _pass_done = 0\n _improve = True\n new_mod = modularity()\n cur_mod = -999999999.0\n rl = random.sample(range(0, node_count), node_count)\n while _improve & (_pass_done < max_pass) & (new_mod - cur_mod > min_mod):\n cur_mod = new_mod\n _improve = False\n _pass_done += 1\n for node_tmp in rl:\n n = node_tmp\n nc = bl[n]\n ncomm = neigh_comm(n)\n remove(n, nc, ncomm[nc])\n best_c = nc\n best_l = 0.0\n best_incre = 0.0\n for c in ncomm:\n incre = modularity_gain(n, c, ncomm[c])\n if incre > best_incre:\n best_incre = incre\n best_c = c\n best_l = ncomm[c]\n insert(n, best_c, best_l)\n if best_c != nc:\n _improve = True\n new_mod = modularity()\n print new_mod", "def walk(self, community, now):\n self._get_or_create_timestamps(community).last_walk = now", "def _candidate_walker(self):\n walker_communities = self._walker_commmunities\n\n steps = 0\n start = time()\n\n # delay will never be less than 0.1, hence we can accommodate 50 communities before the\n # interval between each step becomes larger than 5.0 seconds\n optimaldelay = max(0.1, 5.0 / len(walker_communities))\n if __debug__: dprint(\"there are \", len(walker_communities), \" walker enabled communities. pausing \", optimaldelay, \"s (on average) between each step\")\n\n if __debug__:\n RESETS = 0\n STEPS = 0\n START = start\n DELAY = 0.0\n for community in walker_communities:\n community.__MOST_RECENT_WALK = 0.0\n\n for community in walker_communities:\n community.__most_recent_sync = 0.0\n\n while True:\n community = walker_communities.pop(0)\n walker_communities.append(community)\n\n actualtime = time()\n allow_sync = actualtime - community.__most_recent_sync > 4.5\n # dprint(\"previous sync was \", round(actualtime - community.__most_recent_sync, 1), \" seconds ago\", \"\" if allow_sync else \" (no sync this cycle)\", force=1)\n if allow_sync:\n community.__most_recent_sync = actualtime\n\n if __debug__:\n NOW = time()\n OPTIMALSTEPS = (NOW - START) / optimaldelay\n STEPDIFF = NOW - community.__MOST_RECENT_WALK\n community.__MOST_RECENT_WALK = NOW\n dprint(community.cid.encode(\"HEX\"), \" taking step every \", \"%.2f\" % DELAY, \" sec in \", len(walker_communities), \" communities. steps: \", STEPS, \"/\", int(OPTIMALSTEPS), \" ~ %.2f.\" % (-1.0 if OPTIMALSTEPS == 0.0 else (STEPS / OPTIMALSTEPS)), \" diff: %.1f\" % STEPDIFF, \". resets: \", RESETS)\n STEPS += 1\n\n # walk\n assert community.dispersy_enable_candidate_walker\n assert community.dispersy_enable_candidate_walker_responses\n try:\n community.dispersy_take_step(allow_sync)\n steps += 1\n except Exception:\n dprint(community.cid.encode(\"HEX\"), \" causes an exception during dispersy_take_step\", exception=True, level=\"error\")\n\n optimaltime = start + steps * optimaldelay\n actualtime = time()\n\n if optimaltime + 5.0 < actualtime:\n # way out of sync! reset start time\n start = actualtime\n steps = 0\n self._statistics.walk_reset += 1\n if __debug__:\n dprint(\"can not keep up! resetting walker start time!\", level=\"warning\")\n DELAY = 0.0\n RESETS += 1\n\n else:\n if __debug__:\n DELAY = max(0.0, optimaltime - actualtime)\n yield max(0.0, optimaltime - actualtime)", "def community_changes(self, community):\n pass", "def _grid_search_wl_kernel(\n k: WeisfilerLehman,\n subtree_candidates,\n train_x: list,\n train_y: torch.Tensor,\n lik: float,\n subtree_prior=None, # pylint: disable=unused-argument\n lengthscales=None,\n lengthscales_prior=None, # pylint: disable=unused-argument\n):\n # lik = 1e-6\n assert len(train_x) == len(train_y)\n best_nlml = torch.tensor(np.inf)\n best_subtree_depth = None\n best_lengthscale = None\n best_K = None\n if lengthscales is not None and k.se is not None:\n candidates = [(h_, l_) for h_ in subtree_candidates for l_ in lengthscales]\n else:\n candidates = [(h_, None) for h_ in subtree_candidates]\n\n for i in candidates:\n if k.se is not None:\n k.change_se_params({\"lengthscale\": i[1]})\n k.change_kernel_params({\"h\": i[0]})\n K = k.fit_transform(train_x, rebuild_model=True, save_gram_matrix=True)\n # self.logger.debug(K)\n K_i, logDetK = compute_pd_inverse(K, lik)\n # self.logger.debug(train_y)\n nlml = -compute_log_marginal_likelihood(K_i, logDetK, train_y)\n # self.logger.debug(f\"{i} {nlml}\")\n if nlml < best_nlml:\n best_nlml = nlml\n best_subtree_depth, best_lengthscale = i\n best_K = torch.clone(K)\n # self.logger.debug(f\"h: {best_subtree_depth} theta: {best_lengthscale}\")\n # self.logger.debug(best_subtree_depth)\n k.change_kernel_params({\"h\": best_subtree_depth})\n if k.se is not None:\n k.change_se_params({\"lengthscale\": best_lengthscale})\n k._gram = best_K # pylint: disable=protected-access", "def monitor_milp_nodes(model):\n nodecnt = model.cbGet(GRB.Callback.MIP_NODCNT)\n if nodecnt > MILPSolver.params.BRANCH_THRESHOLD:\n MILPSolver.status = SolveResult.BRANCH_THRESHOLD\n model.terminate()", "def persistent_walk(lgca):\n relevant = (lgca.cell_density[lgca.nonborder] > 0) & \\\n (lgca.cell_density[lgca.nonborder] < lgca.K)\n coords = [a[relevant] for a in lgca.nonborder]\n newnodes = lgca.nodes.copy()\n g = lgca.calc_flux(lgca.nodes)\n for coord in zip(*coords):\n n = lgca.cell_density[coord]\n\n permutations = lgca.permutations[n]\n j = lgca.j[n]\n weights = np.exp(lgca.beta * np.einsum('i,ij', g[coord], j)).cumsum()\n ind = bisect_left(weights, random() * weights[-1])\n newnodes[coord] = permutations[ind]\n\n lgca.nodes = newnodes", "def assign_louvain_communities(\n reddit_graph: nx.Graph,\n wiki_graph: nx.Graph = None,\n reddit_edge_weight: str = \"count\",\n others_threshold: int = 2,\n louvain_resolution_reddit: float = 1,\n) -> Union[nx.Graph, Tuple[nx.Graph, nx.Graph]]:\n reddit_dendrogram = community.generate_dendrogram(\n reddit_graph, weight=reddit_edge_weight, resolution=louvain_resolution_reddit\n )\n if wiki_graph:\n wiki_dendrogram = community.generate_dendrogram(\n wiki_graph,\n )\n\n # Iterate over reddit nodes to assign communities\n for node in reddit_graph:\n # Iterate over all levels of the dendrogram\n for level in range(len(reddit_dendrogram) - 1):\n actual_level = len(reddit_dendrogram) - 2 - level\n\n partition = community.partition_at_level(reddit_dendrogram, level)\n\n node_community = partition[node]\n counts = Counter(partition.values())\n if counts[node_community] < others_threshold:\n node_community = -1\n reddit_graph.nodes[node][\n f\"louvain_community_reddit_R{louvain_resolution_reddit:.2f}_L{actual_level}\"\n ] = f\"L{actual_level}-{node_community:03}\"\n if wiki_graph:\n # Also add the community from the other graph to allow comparing\n # Again, iterate over all levels in the dendrogram\n for level in range(len(wiki_dendrogram) - 1):\n actual_level = len(wiki_dendrogram) - 2 - level\n\n partition = community.partition_at_level(wiki_dendrogram, level)\n\n try:\n node_community = partition[node]\n counts = Counter(partition.values())\n if counts[node_community] < others_threshold:\n node_community = -1\n\n reddit_graph.nodes[node][\n f\"louvain_community_wiki_L{actual_level}\"\n ] = f\"L{actual_level}-{node_community:03}\"\n\n except:\n reddit_graph.nodes[node][\n f\"louvain_community_wiki_L{level}\"\n ] = f\"L{level}-NONE\"\n if wiki_graph:\n for node in wiki_graph:\n for level in range(\n len(wiki_dendrogram) - 1,\n ):\n actual_level = len(wiki_dendrogram) - 2 - level\n\n partition = community.partition_at_level(wiki_dendrogram, level)\n node_community = partition[node]\n\n counts = Counter(partition.values())\n if counts[node_community] < others_threshold:\n node_community = -1\n\n wiki_graph.nodes[node][\n f\"louvain_community_wiki_L{actual_level}\"\n ] = f\"L{actual_level}-{node_community:03}\"\n # Also add the community from the other graph to allow comparing\n\n for level in range(len(reddit_dendrogram) - 1):\n actual_level = len(reddit_dendrogram) - 2 - level\n\n partition = community.partition_at_level(reddit_dendrogram, level)\n\n try:\n node_community = partition[node]\n\n counts = Counter(partition.values())\n if counts[node_community] < others_threshold:\n node_community = -1\n wiki_graph.nodes[node][\n f\"louvain_community_reddit_R{louvain_resolution_reddit:.2f}_L{actual_level}\"\n ] = f\"L{actual_level}-{node_community:03}\"\n except:\n wiki_graph.nodes[node][\n f\"louvain_community_reddit_R{louvain_resolution_reddit:.2f}_L{level}\"\n ] = f\"L{level}-NONE\"\n\n return (\n (reddit_graph, reddit_dendrogram, wiki_graph, wiki_dendrogram)\n if wiki_graph\n else (reddit_graph, reddit_dendrogram)\n )", "def main():\n G = nx.gnp_random_graph(100, 0.5)\n centrality = nx.eigenvector_centrality(G)\n avg_centrality = sum(centrality.values()) / len(G)\n\n def has_high_centrality(v):\n return centrality[v] >= avg_centrality\n\n source = 0\n value = centrality.get\n condition = has_high_centrality\n\n found_node = progressive_widening_search(G, source, value, condition)\n c = centrality[found_node]\n print('found node {0} with centrality {1}'.format(found_node, c))", "def add_communites(self):\n\n query = '''\n MATCH (c1:)-[r:INTERACTS]->(c2:)\n RETURN c1.name, c2.name, r.weight AS weight\n '''\n ig = IGraph.TupleList(self.graph.run(query), weights=True)\n\n clusters = IGraph.community_walktrap(ig, weights=\"weight\").as_clustering()\n\n nodes = [{\"name\": node[\"name\"]} for node in ig.vs]\n for node in nodes:\n idx = ig.vs.find(name=node[\"name\"]).index\n node[\"community\"] = clusters.membership[idx]\n\n write_clusters_query = '''\n UNWIND {nodes} AS n\n MATCH (c:) WHERE c.name = n.name\n SET c.community = toInt(n.community)\n '''\n\n self.graph.run(write_clusters_query, nodes=nodes)", "def learner(idx, agent) :\n\n\t# Each worker nodes start at different time.\n\ttime.sleep(2*np.random.rand())\n\n\t# Status of current game\n\ttotal_score = total_step = 0\n\n\t# Send processor name\n\tcomm.send(my_name, dest=MASTER, tag=2)\n\n\t# Receive initail weights from master.\n\tglobal_weights = comm.recv(source=MASTER, tag=0)\n\tagent.set_weights(global_weights)\n\n\t# Request lastest weights from master every $t_sync training iterations.\n\t# t : current training iteration.\n\tt, t_sync = 0, 1\n\n\twhile True :\n\t\tt = (t+1)%t_sync\n\t\tsync = (t == 0) # request lastest weights ?\n\n\t\t# Train the model for some game steps.\n\t\tn_step = np.random.randint(128, 256)\n\t\t(score, n_step, done), loss, raw_grads = agent.train(n_step)\n\t\t\n\t\t# Update game status.\n\t\ttotal_score+= score\n\t\ttotal_step+= n_step\n\n\t\t# Clipped gradients.\n\t\tgrads = [np.clip(x, -100, 100) for x in raw_grads]\n\t\tgrads = raw_grads\n\n\t\t# Game status.\n\t\tstats = {\"done\":done, \"sync\":sync}\n\t\tif done : # Game is finished.\n\t\t\t# Number of 4-frame steps. How long does he survive ?\n\t\t\ttotal_step = (total_step + early_skipping)*nb_frames/4.\n\t\t\tstats.update({\"score\":total_score, \"steps\": total_step, \"loss\":loss})\n\n\t\t\t# Make a new game. Reset game status.\n\t\t\ttotal_score = total_step = 0\n\n\t\t# Send game status and gradients to master.\n\t\tcomm.send(stats, dest=MASTER, tag=1)\n\t\tsendFLOAT(grads, dest=MASTER, tag=1)\n\n\t\t# Receive lastest weights from master.\n\t\tif sync :\n\t\t\t# global_weights = comm.recv(source=MASTER, tag=0)\n\t\t\trecvFLOAT(global_weights, src=MASTER, tag=0)\n\t\t\tagent.set_weights(global_weights)", "def setupNeighbor(self, **params):\n if not self.rank:\n logging.info('Setting up nearest neighbor searching parameters')\n\n if 'nns_freq' not in params:\n params['nns_freq'] = 10\n\n if 'nns_skin' not in params:\n radius = 0\n\n for ss in params['species']:\n if 'radius' in ss:\n radius = max(radius, ss['radius'][1])\n\n params['nns_skin'] = radius * 4\n\n self.lmp.command('neighbor {nns_skin} {nns_type}'.format(**params))\n self.lmp.command('neigh_modify delay 0 every {nns_freq} check yes'.format(**params))", "def filter(self):\n new_nodes_to_update = {}\n nodes_to_update = {}\n\n for agent_id in self.cameras.keys():\n nodes_to_update[agent_id] = []\n new_nodes_to_update[agent_id] = []\n if agent_id not in self.beliefs:\n world_name = self.cameras[agent_id].name.replace(\"-\",\"_\")+\"_beliefs\"\n rospy.logdebug(\"[perspective_filter] create new world <%s>\" % str(world_name))\n self.beliefs[agent_id] = self.ctx.worlds[world_name]\n self.node_mapping[agent_id] = {}\n\n dq = deque()\n dq.append(self.source.scene.rootnode)\n\n while not rospy.is_shutdown() and 0 < len(dq):\n node = dq.pop()\n if node.id != self.source.scene.rootnode.id:\n # Process start here\n if node.id in self.cameras.keys(): # if the node is the agent POV\n nodes_to_update[node.id].append(node) # we add it to his belief\n\n if node.parent in self.cameras.keys() and node.type == MESH: # if the node is part of an agent\n nodes_to_update[node.parent].append(node) # we add it to his belief\n\n for agent_id, visible_nodes in self.visible_nodes.items(): # then we add the visible nodes\n if agent_id in self.cameras.keys():\n if node in visible_nodes:\n nodes_to_update[agent_id].append(node)\n\n # And end here\n for child_id in node.children:\n dq.append(self.source.scene.nodes[child_id])\n\n for agent_id, nodes in nodes_to_update.items():\n if nodes:\n for node in nodes:\n new_node = node.copy()\n if node.id in self.node_mapping[agent_id]:\n new_node.id = self.node_mapping[agent_id][node.id]\n if new_node.id in self.nodes_transform:\n if not numpy.allclose(self.nodes_transform[new_node.id], new_node.transformation):\n new_nodes_to_update[agent_id].append(new_node)\n self.nodes_transform[new_node.id] = new_node.transformation\n else:\n self.nodes_transform[new_node.id] = new_node.transformation\n new_nodes_to_update[agent_id].append(new_node)\n else:\n self.node_mapping[agent_id][node.id] = new_node.id\n new_nodes_to_update[agent_id].append(new_node)\n\n # Finally we update the corresponding beliefs worlds\n for agent_id, nodes in new_nodes_to_update.items():\n for node in nodes:\n node.parent = self.node_mapping[agent_id][node.parent] if node.parent in self.node_mapping[agent_id] \\\n else self.beliefs[agent_id].scene.rootnode.id\n if nodes:\n self.beliefs[agent_id].scene.nodes.update(nodes)", "def update_cluster_merge_across_nodes(request):\n ksm_merge_across_nodes = getattr(\n request.node.cls, \"ksm_merge_across_nodes\"\n )\n\n def fin():\n \"\"\"\n 1) Disable KSM\n \"\"\"\n ll_clusters.updateCluster(\n positive=True, cluster=sla_conf.CLUSTER_NAME[0], ksm_enabled=False\n )\n request.addfinalizer(fin)\n\n assert ll_clusters.updateCluster(\n positive=True,\n cluster=sla_conf.CLUSTER_NAME[0],\n ksm_enabled=True,\n ksm_merge_across_nodes=ksm_merge_across_nodes\n )", "def network(self, max_clusters=20, max_members=10, max_nodes=300, l_go_selective=False):\n if len(self.data)==0:\n return None\n if self.DM is None:\n util.error_msg('Please run cluster first!')\n S_node=GO_Cluster.sample_rows(self.t_go, max_clusters=max_clusters, max_members=max_members, max_nodes=max_nodes, l_go_selective=l_go_selective)\n T_node=self.t_go[self.t_go.GO.apply(lambda x: x in S_node)].copy()\n S_go=self.data.header()\n M=self.data.values\n n,m=M.shape\n S_node=set(T_node.GO)\n S_idx=[i for i,x in enumerate(S_go) if x in S_node ]\n S_name=[ S_go[i] for i in S_idx]\n T_node.rename2({'GO':'Gene'})\n s_name='GOCluster'\n if 'Name' in T_node.header():\n s_name=list(T_node.Name)[0]\n T_node.drop('Name', axis=1, inplace=True)\n if 'URL' in T_node.header():\n T_node.drop('URL', axis=1, inplace=True)\n\n c_has_neighbor={}\n data=[]\n c_cluster={ T_node.ix[i,'Gene']:T_node.ix[i,'GROUP_ID'] for i in T_node.index}\n n2=len(S_idx)\n for _i in range(n2):\n i=S_idx[_i]\n for _j in range(_i+1, n2):\n j=S_idx[_j]\n idx=i*(2*m-i-1)//2+(j-i)-1\n #print (_i, _j, n2, m, i, j, idx, S_name[_i], c_cluster[S_name[_i]], S_name[_j], c_cluster[S_name[_j]], K[idx])\n if self.DM[idx]>=self.similarity:\n data.append({'Gene_A':S_go[i], 'Gene_B':S_go[j], 'TYPE':'Direct', 'SCORE':self.DM[idx]})\n c_has_neighbor[S_go[i]]=True\n c_has_neighbor[S_go[j]]=True\n # keep singletons\n for i in S_idx:\n if S_go[i] not in c_has_neighbor:\n data.append({'Gene_A':S_go[i], 'Gene_B':S_go[i], 'TYPE':'Direct', 'SCORE':1.0})\n if len(data):\n T_edge=pd.DataFrame(data)\n T_node.index=list(range(len(T_node)))\n net=xgmml.Network(T_edge, T_node=T_node, name=s_name)\n return net", "def fit_gp(self):\n # Put things into training mode.\n self.gpf_core.float()\n self.likelihood.train()\n # Now use Adam by default.\n optimizer = torch.optim.Adam([{'params': self.gpf_core.parameters()}],\n lr=0.1)\n mll = gpytorch.mlls.ExactMarginalLogLikelihood(self.likelihood,\n self.gpf_core)\n # TODO: Allow length of training to be an option.\n for _ in range(500):\n optimizer.zero_grad()\n output = self.gpf_core(self.tensor_x)\n loss = -mll(output, self.tensor_y)\n loss.backward()\n optimizer.step()", "def set_level(self):\n queue = []\n for node in self.node:\n if distance.euclidean(node.location, para.base) < node.com_ran:\n node.level = 1\n queue.append(node.id)\n while queue:\n for neighbor_id in self.node[queue[0]].neighbor:\n if not self.node[neighbor_id].level:\n self.node[neighbor_id].level = self.node[queue[0]].level + 1\n queue.append(neighbor_id)\n queue.pop(0)", "def greedy_modularity_communities(G, weight=None):\n\n # Count nodes and edges\n N = len(G.nodes())\n m = sum([d.get('weight', 1) for u, v, d in G.edges(data=True)])\n q0 = 1.0 / (2.0*m)\n\n # Map node labels to contiguous integers\n label_for_node = dict((i, v) for i, v in enumerate(G.nodes()))\n node_for_label = dict((label_for_node[i], i) for i in range(N))\n\n # Calculate degrees\n k_for_label = G.degree(G.nodes(), weight=weight)\n k = [k_for_label[label_for_node[i]] for i in range(N)]\n\n # Initialize community and merge lists\n communities = dict((i, frozenset([i])) for i in range(N))\n merges = []\n\n # Initial modularity\n partition = [[label_for_node[x] for x in c] for c in communities.values()]\n q_cnm = modularity(G, partition)\n\n # Initialize data structures\n # CNM Eq 8-9 (Eq 8 was missing a factor of 2 (from A_ij + A_ji)\n # a[i]: fraction of edges within community i\n # dq_dict[i][j]: dQ for merging community i, j\n # dq_heap[i][n] : (-dq, i, j) for communitiy i nth largest dQ\n # H[n]: (-dq, i, j) for community with nth largest max_j(dQ_ij)\n a = [k[i]*q0 for i in range(N)]\n dq_dict = dict(\n (i, dict(\n (j, 2*q0 - 2*k[i]*k[j]*q0*q0)\n for j in [\n node_for_label[u]\n for u in G.neighbors(label_for_node[i])]\n if j != i))\n for i in range(N))\n dq_heap = [\n MappedQueue([\n (-dq, i, j)\n for j, dq in dq_dict[i].items()])\n for i in range(N)]\n H = MappedQueue([\n dq_heap[i].h[0]\n for i in range(N)\n if len(dq_heap[i]) > 0])\n\n # Merge communities until we can't improve modularity\n while len(H) > 1:\n # Find best merge\n # Remove from heap of row maxes\n # Ties will be broken by choosing the pair with lowest min community id\n try:\n dq, i, j = H.pop()\n except IndexError:\n break\n dq = -dq\n # Remove best merge from row i heap\n dq_heap[i].pop()\n # Push new row max onto H\n if len(dq_heap[i]) > 0:\n H.push(dq_heap[i].h[0])\n # If this element was also at the root of row j, we need to remove the\n # duplicate entry from H\n if dq_heap[j].h[0] == (-dq, j, i):\n H.remove((-dq, j, i))\n # Remove best merge from row j heap\n dq_heap[j].remove((-dq, j, i))\n # Push new row max onto H\n if len(dq_heap[j]) > 0:\n H.push(dq_heap[j].h[0])\n else:\n # Duplicate wasn't in H, just remove from row j heap\n dq_heap[j].remove((-dq, j, i))\n # Stop when change is non-positive\n if dq <= 0:\n break\n\n # Perform merge\n communities[j] = frozenset(communities[i] | communities[j])\n del communities[i]\n merges.append((i, j, dq))\n # New modularity\n q_cnm += dq\n # Get list of communities connected to merged communities\n i_set = set(dq_dict[i].keys())\n j_set = set(dq_dict[j].keys())\n all_set = (i_set | j_set) - set([i, j])\n both_set = i_set & j_set\n # Merge i into j and update dQ\n for k in all_set:\n # Calculate new dq value\n if k in both_set:\n dq_jk = dq_dict[j][k] + dq_dict[i][k]\n elif k in j_set:\n dq_jk = dq_dict[j][k] - 2.0*a[i]*a[k]\n else:\n # k in i_set\n dq_jk = dq_dict[i][k] - 2.0*a[j]*a[k]\n # Update rows j and k\n for row, col in [(j, k), (k, j)]:\n # Save old value for finding heap index\n if k in j_set:\n d_old = (-dq_dict[row][col], row, col)\n else:\n d_old = None\n # Update dict for j,k only (i is removed below)\n dq_dict[row][col] = dq_jk\n # Save old max of per-row heap\n if len(dq_heap[row]) > 0:\n d_oldmax = dq_heap[row].h[0]\n else:\n d_oldmax = None\n # Add/update heaps\n d = (-dq_jk, row, col)\n if d_old is None:\n # We're creating a new nonzero element, add to heap\n dq_heap[row].push(d)\n else:\n # Update existing element in per-row heap\n dq_heap[row].update(d_old, d)\n # Update heap of row maxes if necessary\n if d_oldmax is None:\n # No entries previously in this row, push new max\n H.push(d)\n else:\n # We've updated an entry in this row, has the max changed?\n if dq_heap[row].h[0] != d_oldmax:\n H.update(d_oldmax, dq_heap[row].h[0])\n\n # Remove row/col i from matrix\n i_neighbors = dq_dict[i].keys()\n for k in i_neighbors:\n # Remove from dict\n dq_old = dq_dict[k][i]\n del dq_dict[k][i]\n # Remove from heaps if we haven't already\n if k != j:\n # Remove both row and column\n for row, col in [(k, i), (i, k)]:\n # Check if replaced dq is row max\n d_old = (-dq_old, row, col)\n if dq_heap[row].h[0] == d_old:\n # Update per-row heap and heap of row maxes\n dq_heap[row].remove(d_old)\n H.remove(d_old)\n # Update row max\n if len(dq_heap[row]) > 0:\n H.push(dq_heap[row].h[0])\n else:\n # Only update per-row heap\n dq_heap[row].remove(d_old)\n\n del dq_dict[i]\n # Mark row i as deleted, but keep placeholder\n dq_heap[i] = MappedQueue()\n # Merge i into j and update a\n a[j] += a[i]\n a[i] = 0\n\n communities = [\n frozenset([label_for_node[i] for i in c])\n for c in communities.values()]\n return sorted(communities, key=len, reverse=True)", "def spkernel(*args,\n node_label='atom',\n edge_weight=None,\n node_kernels=None,\n n_jobs=None,\n verbose=True):\n # pre-process\n Gn = args[0] if len(args) == 1 else [args[0], args[1]]\n Gn = [g.copy() for g in Gn]\n weight = None\n if edge_weight is None:\n if verbose:\n print('\\n None edge weight specified. Set all weight to 1.\\n')\n else:\n try:\n some_weight = list(\n nx.get_edge_attributes(Gn[0], edge_weight).values())[0]\n if isinstance(some_weight, (float, int)):\n weight = edge_weight\n else:\n if verbose:\n print(\n '\\n Edge weight with name %s is not float or integer. Set all weight to 1.\\n'\n % edge_weight)\n except:\n if verbose:\n print(\n '\\n Edge weight with name \"%s\" is not found in the edge attributes. Set all weight to 1.\\n'\n % edge_weight)\n ds_attrs = get_dataset_attributes(\n Gn,\n attr_names=['node_labeled', 'node_attr_dim', 'is_directed'],\n node_label=node_label)\n\n # remove graphs with no edges, as no sp can be found in their structures, \n # so the kernel between such a graph and itself will be zero.\n len_gn = len(Gn)\n Gn = [(idx, G) for idx, G in enumerate(Gn) if nx.number_of_edges(G) != 0]\n idx = [G[0] for G in Gn]\n Gn = [G[1] for G in Gn]\n if len(Gn) != len_gn:\n if verbose:\n print('\\n %d graphs are removed as they don\\'t contain edges.\\n' %\n (len_gn - len(Gn)))\n\n start_time = time.time()\n\n pool = Pool(n_jobs)\n # get shortest path graphs of Gn\n getsp_partial = partial(wrapper_getSPGraph, weight)\n itr = zip(Gn, range(0, len(Gn)))\n if len(Gn) < 100 * n_jobs:\n# # use default chunksize as pool.map when iterable is less than 100\n# chunksize, extra = divmod(len(Gn), n_jobs * 4)\n# if extra:\n# chunksize += 1\n chunksize = int(len(Gn) / n_jobs) + 1\n else:\n chunksize = 100\n if verbose:\n iterator = tqdm(pool.imap_unordered(getsp_partial, itr, chunksize),\n desc='getting sp graphs', file=sys.stdout)\n else:\n iterator = pool.imap_unordered(getsp_partial, itr, chunksize)\n for i, g in iterator:\n Gn[i] = g\n pool.close()\n pool.join()\n \n# # ---- direct running, normally use single CPU core. ----\n# for i in tqdm(range(len(Gn)), desc='getting sp graphs', file=sys.stdout):\n# i, Gn[i] = wrapper_getSPGraph(weight, (Gn[i], i))\n\n # # ---- use pool.map to parallel ----\n # result_sp = pool.map(getsp_partial, range(0, len(Gn)))\n # for i in result_sp:\n # Gn[i[0]] = i[1]\n # or\n # getsp_partial = partial(wrap_getSPGraph, Gn, weight)\n # for i, g in tqdm(\n # pool.map(getsp_partial, range(0, len(Gn))),\n # desc='getting sp graphs',\n # file=sys.stdout):\n # Gn[i] = g\n\n # # ---- only for the Fast Computation of Shortest Path Kernel (FCSP)\n # sp_ml = [0] * len(Gn) # shortest path matrices\n # for i in result_sp:\n # sp_ml[i[0]] = i[1]\n # edge_x_g = [[] for i in range(len(sp_ml))]\n # edge_y_g = [[] for i in range(len(sp_ml))]\n # edge_w_g = [[] for i in range(len(sp_ml))]\n # for idx, item in enumerate(sp_ml):\n # for i1 in range(len(item)):\n # for i2 in range(i1 + 1, len(item)):\n # if item[i1, i2] != np.inf:\n # edge_x_g[idx].append(i1)\n # edge_y_g[idx].append(i2)\n # edge_w_g[idx].append(item[i1, i2])\n # print(len(edge_x_g[0]))\n # print(len(edge_y_g[0]))\n # print(len(edge_w_g[0]))\n\n Kmatrix = np.zeros((len(Gn), len(Gn)))\n\n # ---- use pool.imap_unordered to parallel and track progress. ----\n def init_worker(gn_toshare):\n global G_gn\n G_gn = gn_toshare\n do_partial = partial(wrapper_sp_do, ds_attrs, node_label, node_kernels) \n parallel_gm(do_partial, Kmatrix, Gn, init_worker=init_worker, \n glbv=(Gn,), n_jobs=n_jobs, verbose=verbose)\n\n\n # # ---- use pool.map to parallel. ----\n # # result_perf = pool.map(do_partial, itr)\n # do_partial = partial(spkernel_do, Gn, ds_attrs, node_label, node_kernels)\n # itr = combinations_with_replacement(range(0, len(Gn)), 2)\n # for i, j, kernel in tqdm(\n # pool.map(do_partial, itr), desc='calculating kernels',\n # file=sys.stdout):\n # Kmatrix[i][j] = kernel\n # Kmatrix[j][i] = kernel\n # pool.close()\n # pool.join()\n\n # # ---- use joblib.Parallel to parallel and track progress. ----\n # result_perf = Parallel(\n # n_jobs=n_jobs, verbose=10)(\n # delayed(do_partial)(ij)\n # for ij in combinations_with_replacement(range(0, len(Gn)), 2))\n # result_perf = [\n # do_partial(ij)\n # for ij in combinations_with_replacement(range(0, len(Gn)), 2)\n # ]\n # for i in result_perf:\n # Kmatrix[i[0]][i[1]] = i[2]\n # Kmatrix[i[1]][i[0]] = i[2]\n\n# # ---- direct running, normally use single CPU core. ----\n# from itertools import combinations_with_replacement\n# itr = combinations_with_replacement(range(0, len(Gn)), 2)\n# for i, j in tqdm(itr, desc='calculating kernels', file=sys.stdout):\n# kernel = spkernel_do(Gn[i], Gn[j], ds_attrs, node_label, node_kernels)\n# Kmatrix[i][j] = kernel\n# Kmatrix[j][i] = kernel\n\n run_time = time.time() - start_time\n if verbose:\n print(\n \"\\n --- shortest path kernel matrix of size %d built in %s seconds ---\"\n % (len(Gn), run_time))\n\n return Kmatrix, run_time, idx", "def calc_full_log_likelihood(bp_events, node_membership, mu, alpha, beta, duration, num_classes,\n add_com_assig_log_prob=False):\n ll = 0\n for b_i in range(num_classes):\n for b_j in range(num_classes):\n bp_size = len(np.where(node_membership == b_i)[0]) * len(np.where(node_membership == b_j)[0])\n if b_i == b_j:\n bp_size -= len(np.where(node_membership == b_i)[0])\n\n ll += block_pair_conditional_log_likelihood(bp_events[b_i][b_j],\n mu[b_i, b_j], alpha[b_i, b_j], beta[b_i, b_j],\n duration, bp_size)\n\n if add_com_assig_log_prob:\n # Adding the log probability of the community assignments to the full log likelihood\n n_nodes = len(node_membership)\n _, block_count = np.unique(node_membership, return_counts=True)\n class_prob_mle = block_count / sum(block_count)\n rv_multi = multinomial(n_nodes, class_prob_mle)\n log_prob_community_assignment = rv_multi.logpmf(block_count)\n\n ll += log_prob_community_assignment\n\n return ll", "def forward(self, nodes):\n sec_level_conlved = []\n\n for node in nodes:\n\n first_neighs = list(self.user_to_users_social_adjacency[int(node)])\n\n sec_neighs = []\n for neigh_node in first_neighs:\n sec_neighs.append(self.user_to_users_social_adjacency[int(neigh_node)])\n\n sec_neighs_aggregate_to_first_neighs_feats = self.aggregator.forward(first_neighs, sec_neighs, self.userFeatsUVFlag, False)\n\n # self_feats_first = self.uv_updated_features(torch.LongTensor(first_neighs).cpu().numpy()).to(self.device)\n self_feats_first = self.user_embeddings.weight[first_neighs]\n self_feats_first = self_feats_first\n\n first_neighs_sec_neighs_feats = torch.cat([self_feats_first, sec_neighs_aggregate_to_first_neighs_feats], dim=1)\n\n first_neighs_sec_neighs_feats = F.relu(self.w1(first_neighs_sec_neighs_feats))\n first_neighs_sec_neighs_feats = F.relu(self.w2(first_neighs_sec_neighs_feats))\n\n sec_level_conlved.append(first_neighs_sec_neighs_feats)\n\n parentnodes_convolved_with_sec_level_convolves = self.aggregator.forward(nodes, sec_level_conlved, self.userFeatsUVFlag, True)\n\n nodes_self_features = self.uv_updated_features(torch.LongTensor(nodes.cpu().numpy())).to(self.device)\n nodes_self_features = nodes_self_features.t() #TODO\n\n convolved = torch.cat([nodes_self_features, parentnodes_convolved_with_sec_level_convolves], dim=1)\n convolved = F.relu(self.w_cnvlvd(convolved))\n\n return convolved", "def loglikelihood(self, context, continuation):\n pass", "def update_global_best(self, offsprings):\n\n # the fitness of the particles are calculated by their crowding distance\n crowding_distance(swarm)\n\n # the length of the leaders archive cannot be longer than the number of the initial population\n self.leaders += swarm\n self.leaders.truncate(self.options['max_population_size'], 'crowding_distance')\n # self.problem.archive += swarm\n\n return", "def run_louvain(fn, algorithm):\n df = read_exprs_as_df(fn)\n k = get_k(df)\n print(\"building graph...\")\n indices, adj_matrix = get_sparse_knn_graph(df, k, algorithm)\n igraph = convert_sparse_to_igraph(indices, adj_matrix)\n print(\"running louvainCluster find partition...\")\n part = louvain.find_partition(igraph, method='Modularity')\n exit(0)" ]
[ "0.5958453", "0.5573722", "0.5491155", "0.5486477", "0.54507476", "0.5270489", "0.5265971", "0.5214528", "0.5209087", "0.51547647", "0.5126377", "0.5099805", "0.5061796", "0.505253", "0.5044211", "0.49928075", "0.49840775", "0.49137068", "0.48985308", "0.48884547", "0.48820674", "0.48816693", "0.48752177", "0.4843013", "0.483866", "0.4832854", "0.48277947", "0.48255447", "0.48135883", "0.48097906" ]
0.5771196
1
Make Status comparable with self by code
def __eq__(self, other: Union[int, Status]): if isinstance(other, int): return self.code == other return isinstance(other, self.__class__) and self.code == other.code
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self: \"Status\", other: \"Status\") -> bool: # type: ignore\n self_type = type(self)\n other_type = type(other)\n\n if self_type is InProgress and other_type is InProgress:\n return self.progress == other.progress # type: ignore\n else:\n return self_type == other_type", "def __lt__(self: \"Status\", other: \"Status\") -> bool:\n self_type = type(self)\n other_type = type(other)\n both_not_in_progress = not self.in_progress and not other.in_progress\n\n if both_not_in_progress and self_type is other_type:\n return False\n elif self_type is Failed:\n return True\n elif self_type is NotStarted and other_type in (InProgress, Succeeded):\n return True\n elif self_type is InProgress and other_type is InProgress:\n return self.progress < other.progress # type: ignore\n elif self_type is InProgress and other_type is Succeeded:\n return True\n else:\n return False", "def status(self, status: dict):\n pass", "def _get_status(self):\n return self.__status", "def __init__(self: \"Status\") -> None:\n raise NotImplementedError(\n \"Please instantiate one of the `Status` \"\n \"subclasses:\\n\"\n \"\\n\\t- `Failed`\"\n \"\\n\\t- `NotStarted`\"\n \"\\n\\t- `InProgress(progress)`\"\n \"\\n\\t- `Succeeded`\"\n )", "def compare_to(self, other) -> int:\n if self.id == other.id:\n return 0\n if self.status != other.status:\n return -1 if self.status < other.status else 1\n if self.last_played != other.last_played:\n return -1 if self.last_played < other.last_played else 1\n return -1 if self.id < other.id else 1", "def __eq__(self, other):\n if not isinstance(other, ResultStatus):\n return False\n\n return self.__dict__ == other.__dict__", "def status(self):", "def status(self, value):\n if self._status != value:\n self._status = value\n self._last_changed = now()\n self.status_events.notify(self.status_feedback)\n return self._status", "def check_status(self):", "def test_get_status(self) -> None:\n\n given = \"example.org\"\n\n # This is an abstract method. So we need to define it.\n self.checker.query_status = lambda: None\n\n self.checker.subject = given\n\n actual = self.checker.get_status()\n\n self.assertIsInstance(actual, CheckerStatusBase)", "def GetStatus(self):\r\n return self.status", "def __lt__(self, other):\n status = self.get_status()\n Ostatus = other.get_status()\n \n if status == Ostatus:\n return self.get_nickname() < other.get_nickname()\n \n if status == \"online\":\n return True\n elif status == \"away\" and Ostatus != \"online\":\n return True\n elif status == \"busy\" and Ostatus not in [\"online\", \"away\"]:\n return True\n else:\n return False", "def __eq__(self, other):\n if not isinstance(other, ServiceStatus):\n return False\n\n return self.to_dict() == other.to_dict()", "def _set_status(self, action, status):\n raise NotImplementedError(\"Base class: cannot be called directly\")", "def get_status(self):\n\n # update status\n # TODO: this needs to consider \"partial\" status based on the testcodes that are defined\n # in the panel.\n # get the condition OK aliquot condition instance\n result_item_cls = models.get_model(self._meta.app_label, 'resultitem')\n aliquot_condition_ok = AliquotCondition.objects.get_ok()\n if not self.aliquot.aliquot_condition:\n # how can this be ??\n status = 'ERROR'\n elif result_item_cls.objects.filter(result__order=self) or self.panel.panel_type == 'STORAGE':\n # test aliquot condition and set the order status\n if self.aliquot.aliquot_condition == aliquot_condition_ok:\n status = 'COMPLETE'\n else:\n # has results or is stored but condition is not 10\n # was this meant to be a storage panel?\n status = 'ERROR'\n elif self.aliquot.aliquot_condition != aliquot_condition_ok:\n status = 'REDRAW'\n else:\n status = 'PENDING'\n # regardless of status, check that order was not deleted on DMIS\n dmis_tools = DmisTools()\n if dmis_tools.is_withdrawn_order(self):\n # other aspects of result visibility must consider this value\n status = 'WITHDRAWN'\n return status", "def __eq__(self, other):\n if not isinstance(other, V1alpha1ApplicationStatus):\n return False\n\n return self.to_dict() == other.to_dict()", "def getStatus():", "def status(self):\n raise NotImplementedError()", "def set_status( code ):", "def __init__(self, p_code = 1, p_name = 'Nenhum'):\n\n if not isinstance(p_code, int):\n raise exception.Exception('Erro durante a instanciação da classe \"classes.Status\": O parâmetro \"p_code\" deve ser do tipo \"int\".')\n\n if not isinstance(p_name, str):\n raise exception.Exception('Erro durante a instanciação da classe \"classes.Status\": O parâmetro \"p_name\" deve ser do tipo \"str\".')\n\n self.code = p_code\n self.name = p_name", "def status(self, id):", "def change_status(self):\n if self.status == 'in progress':\n self.status = 'done'\n return self.status\n elif self.status == 'done':\n self.status = 'in progress'\n self.eisenhower_priority()\n return self.status", "def test_get_status(self):\n pass", "def test_get_status(self):\n pass", "def status(ABC) -> bool:", "def status(self):\n return status_dict[self._get_property_(self.STATUS).upper()]", "def _status_exists(self, cls=MySQLStatus):", "def updateStatus(self, status):\n pass", "def get_status(self):\n return self.status" ]
[ "0.6859543", "0.6738789", "0.6598233", "0.6417689", "0.6345078", "0.6310382", "0.63007295", "0.62662905", "0.61624473", "0.6147009", "0.6132704", "0.61120963", "0.61000186", "0.6042819", "0.6041936", "0.5995533", "0.5994983", "0.5986107", "0.59528095", "0.59277666", "0.5915933", "0.5911165", "0.59089494", "0.58934426", "0.58934426", "0.58831894", "0.5860683", "0.58260566", "0.58228785", "0.57827634" ]
0.6897788
0
failed reason of the bulk insert task.
def failed_reason(self): return self._infos.get(BulkInsertState.FAILED_REASON, "")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_failure_reason(self) -> str:\n return pulumi.get(self, \"import_failure_reason\")", "def _message_failed_job(self):\n self.ensure_one()\n return _(\"Something bad happened during the execution of the job. \"\n \"More details in the 'Exception Information' section.\")", "def bulk_write_error(err):\n success = False\n message = \"BulkWriteError\"\n pprint(err.details)\n mongo_return_obj = MongoReturn(success=success, message=message, db_exception=err)\n return mongo_return_obj", "def report_bulk(results):\n from pprint import pprint\n pprint(results)\n if results.get('writeConcernErrors', []) or results.get('writeErrors', []):\n raise BulkFailure(\"Failed on bulk insertion\")", "def failed(self, id, err=''):\n\n records = self.db.get_table()\n index = -1\n\n for i in range(0, len(records)):\n if str(records[i][\"id\"]) == str(id):\n index = i\n \n if index == -1:\n return None\n\n records[index][\"status\"] = \"failed\"\n if 'end-time' in records[index]:\n records[index][\"end-time\"] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n if 'comments' in records[index]:\n records[index][\"comments\"] += \" failed{ \" + err + \" };\"\n\n self.db.update_row(index, records[index])\n\n _log.info('Test %s marked as failed with message %s.' % (str(id), str(err)))\n \n return records[index]", "def test_bulk_group_errors(self):\n file_path = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_actg_missing_col)\n data = {\n 'bulk_upload' : open(file_path, 'rb'),\n }\n res = self.client.post(self.ag_url, data)\n assert res.status_code == status.HTTP_400_BAD_REQUEST", "def transaction_failed(self):", "def failed(self):\n\t\tpass", "def transaction_failed_before_processing(self):", "def on_task_failure(task, exc, task_id, args, kwargs, einfo):\n message = 'Task {} failed w/ args: {}, {}\\n{}'\n log.error(message.format(task.name, args, kwargs, einfo.traceback))", "def drop_failed(self, item, line_reference, reason=''):\n logger.warning(\n f'Dropping failed {line_reference} from import job \"{self.job}\" run {self.timestamp}: {reason}'\n )\n self.failed_items.append({\n 'id': getattr(item, 'identifier', line_reference),\n 'timestamp': datetimestamp(digits_only=False),\n 'title': getattr(item, 'title', ''),\n 'uri': getattr(item, 'uri', ''),\n 'reason': reason\n })", "def failed(self):\n output = self.__call__()\n return output.failed", "def add_failure(self, task: Task, exception: Any) -> None: # noqa: DAR101\n super().add_failure(task, exception)\n self._add_summary(task, _TaskExitCode.FAIL)", "def error_reason(self):\n return self._error_reason", "def error_message(self):\n summary = format(\"%i out of %s failed unexpectedly:\",\n self.pool.num_failed,\n pluralize(self.pool.num_commands, \"command\"))\n details = \"\\n\".join(\" - %s\" % cmd.error_message for cmd in self.commands)\n return summary + \"\\n\\n\" + details", "def test_bulk_actor_errors(self):\n file_path = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_actor_w_errors_xlsx)\n data = {\n 'bulk_upload' : open(file_path, 'rb'),\n }\n\n res = self.client.post(self.actor_url, data)\n assert res.status_code == status.HTTP_400_BAD_REQUEST", "def test_create_task_invalid_task_id_error(self):\n task_id = \"unk\"\n rv = TEST_CLIENT.post(\n TASK_ROUTE,\n json={\n \"copyFrom\": task_id,\n },\n )\n result = rv.json()\n\n expected = {\n \"message\": \"source task does not exist\",\n \"code\": \"InvalidTaskId\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 400)", "def execute_failure(self, *args, **kwargs):\n return 1, \"\", None", "def failed(self, message=None):\n doc = {self.STATE: self.STATE_FAILED}\n\n if message:\n doc.update({self.ERROR_MESSAGE: message})\n\n self.update(doc)", "def _on_task_fail(self, exc):\n LOG.info(\"We could cleanup some resources or log the error\")\n raise exc", "def print_failed(self):\n # Construct the message dynamically based on the instance_type\n msg = colored(\"FAIL\", \"red\") + f\" | [ERROR] {self.message}\"\n if self.instance_type == \"FILE\":\n msg += f\" [{self.instance_type}] {self.instance_location}/{self.instance_name}\"\n\n elif self.instance_type == \"HOST\":\n msg += f\" [{self.instance_type}] {self.instance_hostname}\"\n\n msg += f\" [PROPERTY] {':'.join(str(item) for item in self.absolute_path)}\"\n\n # print the msg\n print(msg)", "def test_insert_failed_result_and_retrieve(self):\n self.db.insert_single_result(self.failedresult)\n result = self.db.get_result_by_primary_key(pk=self.failedresult.get('id'))\n self.assertDictContainsSubset(self.failedresult, result.__dict__)", "def task_error(t_msg):\n print 'ERROR: ' + t_msg + ': ' + traceback.format_exc()\n TaskComm.set_state('ERROR', t_msg + ': ' + traceback.format_exc())", "def fail(self, msg=None):\n raise Exception, msg", "def task_failed(task_state):\n return task_state in {\"TASK_LOST\", \"TASK_KILLED\", \"TASK_FAILED\", \"TASK_TERMINATING\"}", "def creation_error(src_dict: Dict[str, List[str]], e: str):\n return \"LED Group error in %s: %s\\n)\" % (json.dumps(src_dict), e)", "def _log_failed(cls, count):\n MetricLogger.log_metric(FUNCTION_NAME, MetricLogger.SQS_FAILED_RECORDS, count)", "def failed_items(self) -> ItemLog:\n if self._failed_items is None:\n self._failed_items = ItemLog(self.dir / 'dropped-failed.log.csv', DROPPED_FAILED_FIELDNAMES, 'id')\n return self._failed_items", "def task_failed(self, worker_name, error):\n self.status = 'failed'\n self.modification_time = current_millis()\n self.message = '{} (worker): {}'.format(worker_name, error)\n return self", "def failure(self, error):\n print \"comm failed Reason:\", error\n return error" ]
[ "0.68418854", "0.62447983", "0.6241091", "0.61471653", "0.6106991", "0.6001903", "0.59929", "0.5870454", "0.5813409", "0.5633343", "0.56094086", "0.55935335", "0.55862993", "0.55839294", "0.5577333", "0.55691516", "0.5561922", "0.551973", "0.55102104", "0.55077887", "0.5504956", "0.54963124", "0.54255366", "0.5424015", "0.54170173", "0.5406048", "0.54001826", "0.5392025", "0.53577006", "0.5351526" ]
0.83458185
0
target collection's name of the bulk insert task.
def collection_name(self): return self._infos.get(BulkInsertState.IMPORT_COLLECTION, "")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def task_name(self):\n pass", "def thread_insert_data(collection_to_insert, collection_name):\n for item in collection_to_insert:\n insert_data_to_collection(item, collection_name)", "def insert_task():\n try:\n task = get_task_from_request_form(request)\n result = mongo.db.tasks.insert_one(task)\n return json_util.dumps(get_task_by_id(result.inserted_id))\n except Exception as err:\n abort(400)", "def task_name(self) -> str:\n return self._task_name", "def use(target, name):\n return \"You insert the \" + name + \" into \" + target.name", "def insert(self, collection_name, instance):\n assert isinstance(instance, SiteStatistics)\n collection = self.ds.connection(collection_name)\n return collection.insert_one(instance.document).inserted_id", "def get_outcome_coll_name(self, outcome, collection):\n return outcome[\"collection\"].get(\"name\", collection.name)", "def task(self, name):\n pass", "def replica_set_name(self):\n ...", "def collection_key(self):\n return '%s:collection' % self._model._name", "def insert_school(mongo_collection, **kwargs):\n return mongo_collection.insert_one(kwargs).inserted_id", "def insert_project(name, loc, url, collection):\n entry = {\n \"name\": name,\n \"location\": loc,\n \"url\": url,\n \"date\":datetime.utcnow()\n }\n\n collection.insert(entry)", "def insert(task):\n tasks.insert_one(task.__dict__)", "def getTaskName(self):\n return self._taskName", "def fixture_name(self):\n return 'amino_acid_insertion'", "def TaskBaseName(cls, task):\n if not task: return None\n return os.path.basename(task)", "def collection_id(self) -> str:\n return pulumi.get(self, \"collection_id\")", "def insert_school(mongo_collection, **kwargs):\n\n id_ = mongo_collection.insert_one(kwargs).inserted_id\n\n return id_", "def dataInsert(self, collectionName, data):\n result = collectionName.insert(data)\n return result", "def get_target(self):\n task = self.task.get_task(self.task_id)\n if 'name' in task:\n return str(task['name'])\n return str(task)", "def insert(db_name, collection_name, docs):\n db = client[db_name]\n collection = db[collection_name]\n return collection.insert_many(docs)", "def get_task_id(self):\n if self.task_id:\n return self.task_id\n return (f'{self.task_type}_{self.get_source_system().lower()}'\n f'.{self.get_source_subsystem().lower()}.{self.get_name().upper()}')", "def gen_task_name(app, name, module_name):\n ...", "def get_objectName(self):\n return self.collection.name", "def insert_school(mongo_collection, **kwargs):\n doc = mongo_collection.insert_one(kwargs)\n return doc.inserted_id", "def fixture_name(self):\n return \"coding_dna_insertion\"", "def partition_name(self):\n return self._infos.get(BulkInsertState.IMPORT_PARTITION, \"\")", "def insert_school(mongo_collection, **kwargs):\n result = mongo_collection.insert_one(kwargs)\n return result.inserted_id", "def task_label(self) -> str:\n label = str(self.request.id) if self.request.id else self.name\n label += '_%d' % self.request.retries if self.request.retries >= 1 else ''\n return label", "def add_collection(db_name, collection_name):\n db = client[db_name]\n collection = db[collection_name]\n return collection" ]
[ "0.6016323", "0.55878556", "0.54873735", "0.54751295", "0.5362512", "0.53117585", "0.52913535", "0.52896", "0.52852416", "0.5263717", "0.5206333", "0.51969993", "0.5184043", "0.5139039", "0.5137758", "0.51221275", "0.50949633", "0.5092773", "0.5090364", "0.50835437", "0.5053653", "0.5023366", "0.5021643", "0.49957994", "0.49668312", "0.4945914", "0.49359694", "0.4926342", "0.4911389", "0.49077672" ]
0.7637074
0
target partition's name of the bulk insert task.
def partition_name(self): return self._infos.get(BulkInsertState.IMPORT_PARTITION, "")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def task_name(self):\n pass", "def task_name(self) -> str:\n return self._task_name", "def TaskBaseName(cls, task):\n if not task: return None\n return os.path.basename(task)", "def partitionname(self) :\n\t\ttry :\n\t\t\treturn self._partitionname\n\t\texcept Exception as e:\n\t\t\traise e", "def get_target(self):\n task = self.task.get_task(self.task_id)\n if 'name' in task:\n return str(task['name'])\n return str(task)", "def getTaskName(self):\n return self._taskName", "def collection_name(self):\n return self._infos.get(BulkInsertState.IMPORT_COLLECTION, \"\")", "def get_task_id(self):\n if self.task_id:\n return self.task_id\n return (f'{self.task_type}_{self.get_source_system().lower()}'\n f'.{self.get_source_subsystem().lower()}.{self.get_name().upper()}')", "def generateTaskName(self):\n brokenComponent = ['head','hand','leg','body','hand','leg']\n for component in brokenComponent:\n self.enqueue(Task(component))", "def use(target, name):\n return \"You insert the \" + name + \" into \" + target.name", "def gen_task_name(app, name, module_name):\n ...", "def task(self, name):\n pass", "def get_target_simple(self):\n task = self.task.get_task(self.task_id)\n return str(task['name'])", "def generate_importer_base_name(dependent_task_name: str,\n input_name: str) -> str:\n return 'importer-{}-{}'.format(dependent_task_name, input_name)", "def _get_target_name(self, n, k, att, pol, emb_dim):\n threshold = str(int(self.threshold * 10))\n agg_name = \"_{}_{}_{}_{}_{}_{}\".format(n, k, att, pol, emb_dim, threshold)\n target_file = self.source_file[:-4] + agg_name + \".csv\"\n return target_file", "def task_definition_arn(self) -> str:\n return pulumi.get(self, \"task_definition_arn\")", "def task(self, **task):\n task[\"name\"] = task[\"name\"].replace(\"=\", \"--\")\n return task", "def task_label(self) -> str:\n label = str(self.request.id) if self.request.id else self.name\n label += '_%d' % self.request.retries if self.request.retries >= 1 else ''\n return label", "def task(self) -> str:\n return self._task", "def fixture_name(self):\n return 'amino_acid_insertion'", "def task_name(self, task_name):\n\n self._task_name = task_name", "def taskid(self):\n raise NotImplementedError('Must be implemented by subclass.')", "def fixture_name(self):\n return \"genomic_deletion_range\"", "def TaskNormalizedName(cls, task):\n abs_path = FileUtils.GetAbsPathForFile(task)\n if abs_path: return abs_path\n return task", "def partition_id(self) -> 'outputs.PreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId':\n return pulumi.get(self, \"partition_id\")", "def get_dest_name ( self ):\n return self.filename", "def TaskRelativeName(cls, task):\n if not task: return None\n return os.path.relpath(cls.TaskNormalizedName(task),\n PipelineConfig.Instance().pipeline_base_dir())", "def _getSparkDlOpName(self, tensor_name):\n op_name = tfx.op_name(tensor_name)\n return tfx.add_scope_to_name(scope=self.SPARKDL_OP_SCOPE, name=op_name)", "def partition_key(self) -> str:\n return pulumi.get(self, \"partition_key\")", "def _dataset_name(self):\n return f'Libri{self.task}Mix'" ]
[ "0.642006", "0.5991149", "0.57977974", "0.5696203", "0.5643535", "0.56396157", "0.5575479", "0.55660444", "0.5543619", "0.5543344", "0.5462731", "0.5421342", "0.5402544", "0.5358951", "0.5286111", "0.52766335", "0.52726984", "0.52697146", "0.5253021", "0.52260923", "0.5208673", "0.52072924", "0.5204051", "0.5196173", "0.5168955", "0.51562965", "0.5135744", "0.5125396", "0.5121342", "0.5120271" ]
0.71681
0
A readable string converted from the timestamp when this task is created.
def create_time_str(self): ts = time.localtime(self._create_ts) return time.strftime("%Y-%m-%d %H:%M:%S", ts)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def creation_time_str(self):\n return \"%Y/%m/%d %I:%M:%S\".format(self.creation_time)", "def creation_timestamp(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"creation_timestamp\")", "def created_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"created_time\")", "def created_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"created_time\")", "def __get_timestamp() -> str:\n return str(datetime.now().astimezone())", "def __get_timestamp() -> str:\n return str(datetime.now().astimezone())", "def creation_timestamp(self) -> str:\n return pulumi.get(self, \"creation_timestamp\")", "def creation_timestamp(self) -> str:\n return pulumi.get(self, \"creation_timestamp\")", "def creation_timestamp(self) -> str:\n return pulumi.get(self, \"creation_timestamp\")", "def creation_time(self) -> str:\n return pulumi.get(self, \"creation_time\")", "def creation_time(self) -> str:\n return pulumi.get(self, \"creation_time\")", "def creation_time(self) -> str:\n return pulumi.get(self, \"creation_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")" ]
[ "0.7777843", "0.73781943", "0.73427457", "0.73427457", "0.733272", "0.733272", "0.73199356", "0.73199356", "0.73199356", "0.7313272", "0.7313272", "0.7313272", "0.7303396", "0.7303396", "0.7303396", "0.7303396", "0.7303396", "0.7303396", "0.7303396", "0.7303396", "0.7303396", "0.7303396", "0.7303396", "0.7303396", "0.7303396", "0.7303396", "0.7303396", "0.7303396", "0.7303396", "0.7303396" ]
0.7573687
1
Creates a User based on input from HTML page, returns a confirmation of their login and what is currently in their pantry.
def make_user(): names = request.args.get('names', 1, type=str) #raw text input from HTML page global db global current_user current_user = User(names, db) # Adding the user to the db occurs in the user class, # only in the get_pantry method str_pantry = current_user.get_pantry() if str_pantry == "": #if current user doesn't have pantry, return a string that states this return jsonify(name=current_user.name, pantry = " No Pantry") list_ingredients = ast.literal_eval(str_pantry) # Convert str to list str_pantry = " Pantry: " + list_ingredients[0] for i in range(1, len(list_ingredients)): str_pantry += ", " + list_ingredients[i] return jsonify(name=current_user.name, pantry = str_pantry) #returns name and list of ingredients in pantry to HTML page
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_user():\n if request.method == 'POST':\n PLAN.create_user(request.form['fname'],\n request.form['lname'],\n request.form['username'],\n request.form['password'],\n request.form['email'])\n return redirect(url_for('index'))\n return render_template('newuser.html')", "def signup():\n return render_template(\"new_user.html\")", "def make_new_user():\n return render_template('users/new_user_form.html')", "def new_user():\n pass", "def goto_make_new_user():\n\n return render_template('users/new.html')", "def create_user():\n\n return render_template(\"users/create_user.html\")", "def new_user():\n\n username = request.json['username']\n if len(username) < 4:\n return '1'\n ds = \"'\\\\\\\"%}{\"\n for i in ds:\n if i in username:\n return '1'\n rem = r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\"\n email = request.json['email']\n if re.fullmatch(rem, email) == None:\n return '1'\n password = request.json['password']\n if len(password) != 64:\n return '1'\n _ = db.register(username, email, generate_password_hash(password))\n if _ == 0:\n return '0'\n else:\n return '2'", "def input_and_create_user(self):\n print(\"Please input username!\")\n new_username = input()\n new_user = user.User(new_username)\n self.users.append(new_user)", "def greet_user():\n username = get_user()\n\n if username:\n print(f\"Välkommen tillbaks {username.title()}!!\")\n else:\n username = create_new_user()\n print(f\"Vi kommer ihåg dig när du återvänder {username}\")", "def show_new_user_page():\n\n return render_template(\"new_user.html\")", "def sign_up():\n #POST - the info coming from the sign-up-form\n\n #get username and password that was filled in sign-up form\n #if username exits - flash \"username taken\" and redirct to /sign-up-form\n\n #else save the new user to the database - user table, flash success message\n #and redirect back to /more-details/cat_id", "def CreateAccount():\n \n if not self.CreateAccount():\n return\n \n # Offer to log the new user account in\n ask = messagebox.askyesno('Success!',\n f'Account created. Log in as {username}?')\n if ask:\n # Save data to the file and load the main program\n self.SaveData()\n self.main_frame.destroy()\n MainWindow.MainWindow(self, username, login_date=None)\n else:\n # Clear variable fields and return to initial 'Log In' window\n self.username.set('')\n self.password.set('')\n self.confirm_pass.set('')\n Return()", "def signup():", "def create_user(self) -> 'outputs.ActingUserResponse':\n return pulumi.get(self, \"create_user\")", "def render_create_user_page():\n\n return render_template(\"create_user.html\")", "def create_user():\n form = UserForm(prefix='register')\n\n if not form.validate_on_submit():\n flash('Invalid input.', 'warning')\n return view_index(form)\n else:\n user, exists = db_insert_or_get(User, name=form.name.data, defaults={'password': form.password.data})\n if exists:\n flash('Username taken.', 'warning')\n else:\n db.session.commit()\n\n session['user_name'] = user.name\n app.logger.info('User %s created successfully.', user.name)\n flash('User created successfully.', 'success')\n\n return redirect(url_for('view_index'))", "def create_user() -> tuple:\n # created new user\n user_data: dict = request.get_json()\n names: str = user_data.get(\"names\")\n surname: str = user_data.get(\"surname\")\n cell: str = user_data.get(\"cell\")\n email: str = user_data.get(\"email\")\n password: str = user_data.get(\"password\")\n uid: str = user_data.get(\"uid\")\n organization_id: str = user_data.get(\"organization_id\")\n\n # Add User View will perform error checking\n return user_view.add_user(organization_id=organization_id, uid=uid, names=names, surname=surname,\n cell=cell, email=email, password=password)", "def process_signup():\n\n\temail = request.form.get('email');\n\tpassword = request.form.get('password');\n\n\tif email:\n\t\tnew_user = model.User(email=email, password=password)\n\t\tmodel.session.add(new_user)\n\t\tmodel.session.commit()\n\t\tsession['email'] = email\t\n\n\treturn render_template(\"signup.html\")", "def popupNewUser():\n dialog = gtk.Dialog(parent=gui,\n flags=gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,\n buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT,\n gtk.STOCK_OK, gtk.RESPONSE_ACCEPT))\n labelu = gtk.Label(_(\"Enter user ('login') name:\"))\n labelu.set_alignment(0.0, 0.5)\n dialog.vbox.pack_start(labelu)\n labelu.show()\n entryu = gtk.Entry(max=20)\n dialog.vbox.pack_start(entryu)\n entryu.show()\n label = gtk.Label(_(\"Enter new password:\"))\n label.set_alignment(0.0, 0.5)\n dialog.vbox.pack_start(label)\n label.show()\n entry = gtk.Entry(max=20)\n entry.set_visibility(False)\n dialog.vbox.pack_start(entry)\n entry.show()\n label2 = gtk.Label(_(\"Reenter new password:\"))\n label2.set_alignment(0.0, 0.5)\n dialog.vbox.pack_start(label2)\n label2.show()\n entry2 = gtk.Entry(max=20)\n entry2.set_visibility(False)\n dialog.vbox.pack_start(entry2)\n entry2.show()\n pw = None\n user = None\n while (dialog.run() == gtk.RESPONSE_ACCEPT):\n v = entry.get_text()\n if (v == entry2.get_text()):\n pw = v\n user = entryu.get_text()\n break\n error(_(\"The passwords do not match.\"))\n dialog.destroy()\n return (user, pw)", "def create_account():\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n\n user = create_user(username, password)\n\n if not user:\n return redirect(url_for('login'))\n\n session['username'] = user.username\n session['user_id'] = user.id\n session['logged_in'] = True\n session['is_admin'] = user.is_admin\n\n return redirect(url_for('index'))\n\n return render_template('createaccount.html')", "def register_new_user():\n register_form = UserAddForm()\n login_form = LoginForm()\n\n if register_form.validate_on_submit():\n try:\n user = User.signup(\n email=register_form.new_email.data,\n password=register_form.new_password.data,\n username=register_form.new_username.data,\n first_name=register_form.first_name.data.capitalize(),\n last_name=register_form.last_name.data.capitalize(),\n image_url=register_form.image_url.data or User.image_url.default.arg,\n cover_url=register_form.cover_url.data or User.cover_url.default.arg\n )\n db.session.commit()\n\n do_login(user)\n return redirect('/')\n except IntegrityError:\n flash(\n \"Email or username already registered! Please log in or try again\", 'danger')\n return render_template('home_anon.html', register_form=register_form, login_form=login_form)\n\n else:\n return render_template('home_anon.html', register_form=register_form, login_form=login_form)", "def createuser (d={'user','AAuser'}):\n print ('*creating:',d)\n x=\"//*[@value='Add User...']\"; g.wait.until(EC.element_to_be_clickable((By.XPATH, x))).send_keys(Keys.RETURN)\n x=\"//*[@name='name']\"; e = g.wait.until(EC.element_to_be_clickable((By.XPATH, x))); e.clear(); e.send_keys(d.get('user'))\n x=\"//*[@name='password']\"; e = g.driver.find_element(By.XPATH, x); e.clear(); e.send_keys('optimize')\n x=\"//*[@name='password-confirm']\"; e = g.driver.find_element(By.XPATH, x); e.clear(); e.send_keys('optimize')\n x=\"//*[@name='firstName']\"; e = g.driver.find_element(By.XPATH, x); e.clear(); e.send_keys('firstName')\n x=\"//*[@name='lastName']\"; e = g.driver.find_element(By.XPATH, x); e.clear(); e.send_keys(d.get('ln','lastName'))\n x=\"//*[@name='email']\"; e = g.driver.find_element(By.XPATH, x); e.clear(); e.send_keys(d.get('email',d.get('user')+'@softwareag.com'))\n x=\"//*[@name='submitbutton']\"; g.driver.find_element(By.XPATH, x).send_keys(Keys.RETURN)\n x=\"//*[@name='name' and @type='hidden' and @value='\"+d.get('user')+\"']\"; g.wait.until(EC.presence_of_element_located((By.XPATH, x)))\n x=\"//*[@name='cancelbutton']\"; g.wait.until(EC.element_to_be_clickable((By.XPATH, x))).send_keys(Keys.RETURN)\n x=\"//*[@value='Add User...']\"; g.wait.until(EC.element_to_be_clickable((By.XPATH, x))) #fuzzy check, when (many) users paged off\n #x=\"//*/a[text() = '\"+d.get('user')+\"']\"; g.wait.until(EC.element_to_be_clickable((By.XPATH, x))) #strict check, all users on (same) page", "def create_user_form():\n template_name = \"create_user.html\"\n users = []\n print request.form\n\n flash(request.form['username'])\n flash(request.form['email'])\n\n return render_template(template_name, users=users)", "def formalize_user():\n print(request.get_json())\n username = request.get_json()['username']\n passwd = username = request.get_json()['passwd']\n # Check if the user exists by comparing the username\n # this contains the registered email\n existing_user = storage.filter_by(User, 'username', username)\n if not existing_user:\n user = storage.get(User, request.user)\n user.username = username\n user.passwd = passwd\n user.save()\n return jsonify(message='Success')\n return jsonify(message='Error creating user'), 309", "def user():", "def register_page():\n form = addUser()\n\n if form.validate_on_submit():\n username=form.username.data\n password=form.password.data\n email=form.email.data\n first_name=form.first_name.data\n last_name=form.last_name.data\n \n new_user = User.register(username=username, password=password, email=email, first_name=first_name, last_name=last_name)\n\n db.session.add(new_user)\n db.session.commit()\n\n session[\"user\"] = new_user.username\n return redirect(f'/users/{username}')\n else:\n return render_template(\"reg_form.html\", form=form)", "def insert_user(request, **kwargs):\n rp = request.POST\n the_new_user = User(\n username=request.POST['username'],\n password='',\n first_name=rp['first_name'],\n last_name=rp['last_name']\n )\n\n if (rp['password'] == '' or rp['password_2'] == \"\"\n or rp['password'] != rp['password_2']):\n kwargs[\n 'error_message'] = 'Please type the new user\\'s password twice.'\n return back_to_new_user(request, **kwargs)\n\n if len(User.objects.filter(username=rp['username'])) > 0:\n error_message = (\n 'Sorry, %s is already in use. Please try another name.'\n % rp['username'])\n return back_to_new_user(\n request, first_name=rp['first_name'],\n last_name=rp['last_name'], username='',\n error_message=error_message)\n\n the_new_user.set_password(rp['password'])\n the_new_user.save()\n\n error_message = 'Health worker user %s was created.' % rp['username']\n return (\n back_to_edit_user(\n request,\n the_user=the_new_user,\n error_message=error_message)\n )", "def create_user_questionnaire_in_progress(self):\n username = 'pseudo'\n email = '[email protected]'\n password = '00000000'\n user_created = self.user.objects.create_user(id=2, username=username,\n email=email, password=password)\n HistoryUser.objects.create(user=user_created)\n StatusUser.objects.create(user=user_created)\n list_advice_id = [1, 5, 10]\n self.add_advice_to_user_created(user_created, list_advice_id)\n\n return user_created", "def add_new_user():\n return render_template('new.html')", "def create_user(name, email):\n user = register(name, email)\n add_message(user=user, text=config.MSG_WELCOME)\n add_message(user=user, text=config.MSG_UNVERIFIED, can_dismiss=False)\n return user" ]
[ "0.67911476", "0.6687623", "0.6670097", "0.6576688", "0.65273166", "0.6521119", "0.65059036", "0.63679117", "0.6336169", "0.62900233", "0.6285918", "0.6281631", "0.6260428", "0.62550753", "0.6240885", "0.62408364", "0.62045276", "0.620404", "0.6189837", "0.6176125", "0.616785", "0.6131409", "0.60975045", "0.60701746", "0.60592747", "0.6053005", "0.603742", "0.6036164", "0.60317135", "0.60149956" ]
0.6709298
1
Given a list of ingredients, adds these ingredients to current user's pantry.
def update_pantry(): pantry_ingredients = request.args.get('pantry', '', type=str) #raw input from HTML page of ingredients global current_user current_user.pantry.make_pantry(pantry_ingredients) #calls recipe_program function make_pantry() current_user.pantry.save_pantry() return jsonify(pantry = pantry_ingredients); #returns list of new pantry ingredients to HTML page
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_ingredients(self, ingredients: [Ingredient]):\n self.ingredients = ingredients", "def add_ingredient_to_recipe(cls, new_count, ingredients_dict, recipe_id):\n\n for i in range(1, (new_count+1)):\n item = ingredients_dict[i][0]\n measure = ingredients_dict[i][1]\n prepnotes = ingredients_dict[i][2]\n qty = ingredients_dict[i][3]\n\n new_ingredient = Ingredient(recipe_id=recipe_id, item=item, quantity=qty,\n measure=measure, prep_notes=prepnotes)\n\n db.session.add(new_ingredient)\n db.session.commit()\n print \"You successfully added ingredients!\"", "def _show_ingredient_list(self):\n if self._ingredients_view:\n self._ingredients_view.destroy()\n\n username = self.food_service.get_user().get_username()\n ingredients = self.food_service.list_added_ingredients(username, expire=True)\n self._ingredients_view = IngredientsView(\n self._ingredients_frame,\n ingredients,\n self._handle_mark\n )\n\n self._ingredients_view.pack()", "def sample_ingredients(user, name='pepper'):\n return Ingredient.objects.create(user=user, name=name)", "def add_recipes(self, recipes):\n\n if isinstance(recipes, list):\n self.recipes.extend(recipes)\n else:\n self.recipes.append(recipes)", "def getIngredients():\n ingredients = ['Whiskey', 'Tequila', 'Vodka', 'Blue Curacao', 'Orange Juice',\n 'Pineapple Juice', 'Cranberry Juice', 'Sour Mix']\n return ingredients", "def create_recipe(current_user):\n data = request.get_json()\n\n try:\n for item in data:\n new_recipe = Recipe(\n name=item['name'],\n text=item['text'],\n author=current_user\n )\n for ingredient_item in item['ingredients']:\n # check for an existing ingredient\n new_ingredient = Ingredient.query.filter(Ingredient.name.ilike(ingredient_item)).first()\n if not new_ingredient:\n new_ingredient = Ingredient(name=ingredient_item)\n db.session.add(new_ingredient)\n db.session.commit()\n\n # either way create a relationship\n new_recipe.used.append(new_ingredient)\n \n db.session.commit()\n except:\n return jsonify({'message': 'Invalid or missing attributes'}), 400\n\n\n return jsonify({'message': 'Recipe/s successfully created'})", "def format_ingredients(self, ingredients):\n\t\tto_replace = {'cointreau': 'triple sec', '&': 'and'}\n\t\tupdated_ingredients = []\n\t\tfor i in ingredients:\n\t\t\ti = i.lower().strip()\n\t\t\tif i in to_replace:\n\t\t\t\tingredient = to_replace[i]\n\t\t\telse:\n\t\t\t\tingredient = i\n\t\t\tupdated_ingredients.append(ingredient)\n\t\treturn updated_ingredients", "def sample_ingredient(user, name = 'Cinnamon'):\n return Ingredient.objects.create(user=user, name=name)", "def sample_ingredient(user, name='Cinemon'):\n return Ingredient.objects.create(user=user, name=name)", "def sample_ingredient(user, name='Carrot'):\n return Ingredient.objects.create(user=user, name=name)", "def sample_ingredient(user,name='cinamon'):\n return Ingredient.objects.create(user=user,name=name)", "def add_animals(self, *args):\n if self.validate_requirements(args):\n [self.animals.append(arg) for arg in args]\n else:\n print(\"foobar\")", "def add_animals(self, ini_list):\n for dictionary in ini_list:\n try:\n loc = dictionary[\"loc\"]\n except KeyError:\n print(\"No location specified\")\n break\n pop_list = dictionary[\"pop\"]\n\n cell_type = self.map[loc]\n\n if type(cell_type).__name__ not in self.allowed_cells:\n raise ValueError(f\"This cell location is inhabitable\")\n\n cell_type.add_animal(pop_list)", "def sample_ingredient(user,name = 'cinnoan'):\n return Ingredient.objects.create(user=user,name=name)", "def create_sample_ingredient(user, name=\"cinnamon\"):\n return Ingredient.objects.create(custom_user=user, name=name)", "def add_ingredient(self, ingredient_str, matching_recipes, user_doc):\n ingredient_doc = {\n 'type': 'ingredient',\n 'name': self.get_unique_ingredients_name(ingredient_str),\n 'recipes': matching_recipes\n }\n ingredient_doc = self.add_doc_if_not_exists(ingredient_doc, 'name')\n self.record_ingredient_request_for_user(ingredient_doc, user_doc)\n return ingredient_doc", "def fridge_recipes(request):\n\n user = request.user\n fridge = Fridge.objects.get_or_create(user=user)[0]\n fridge_ingredients = fridge.ingredients.all()\n ingredient_names = [ingredient.name for ingredient in fridge_ingredients]\n recipes = recipes_containing(ingredient_names, fridge=fridge)\n\n content = {\n 'ingredients': ingredient_names,\n 'recipes': recipes,\n }\n\n return render(request, 'fridge/fridge_recipes.html', content)", "def insert_recipe():\r\n if \"user\" in session:\r\n author = coll_users.find_one({\"username_lower\": session[\"user\"]})[\"_id\"]\r\n # Split the ingredients and preparation steps into lists\r\n\r\n ingredients = request.form.get(\"ingredients\").splitlines()\r\n prepSteps = request.form.get(\"prepSteps\").splitlines()\r\n\r\n # Recipe JSON object\r\n submission = {\r\n \"cuisineType\": request.form.get(\"cuisineType\"),\r\n \"courseType\": request.form.get(\"courseType\"),\r\n \"recipeName\": request.form.get(\"recipe_name\"),\r\n \"recipeDesc\": request.form.get(\"recipeDesc\"),\r\n \"ingredients\": ingredients,\r\n \"prepSteps\": prepSteps,\r\n \"prepTime\": request.form.get(\"prepTime\"),\r\n \"cookTime\": request.form.get(\"cookTime\"),\r\n \"temp\": request.form.get(\"temp\"),\r\n \"allergens\": request.form.getlist(\"allergens\"),\r\n \"imgUrl\": request.form.get(\"imageUrl\"),\r\n \"author\": author,\r\n \"views\": 0,\r\n \"favourites\": 0\r\n }\r\n insertRecipe = coll_recipes.insert_one(submission)\r\n coll_users.update_one(\r\n {\"_id\": ObjectId(author)},\r\n {\"$push\": {\"user_recipes\": insertRecipe.inserted_id}})\r\n flash(\"Thank you! Your recipe has been submitted!\")\r\n return redirect(url_for(\r\n \"recipes.recipe_detail\",\r\n recipe_id=insertRecipe.inserted_id))\r\n else:\r\n flash(\"You must be logged in to perform that action!\")\r\n return redirect(url_for(\"users.login\"))", "def parseIngredientList(ingredients):\n\n try: \n # Flour kludge\n for i, item in enumerate(ingredients):\n ingredients[i] = re.sub('all purpose','all-purpose',item)\n\n # 1/3 amount kludge (weird NYT bug)\n firstParse = P.parseIngredients(ingredients)\n one_thirds = []\n for i,item in enumerate(firstParse):\n if item.amount==1/3.0:\n one_thirds.append(i)\n \n # Write the list of ingredients to a file\n ingredientFile = \"./NYT/ingredients.txt\"\n with open(ingredientFile,'w') as outfile:\n for item in ingredients:\n # Unicode kludge\n item = replaceFractions(item)\n line = str(item.encode(\"utf-8\", errors='ignore').decode(\"utf-8\") + \"\\n\")\n outfile.writelines(line)\n\n # Use the trained model to predict tags for the list of ingredients\n result = os.system(\"python ./NYT/bin/parse-ingredients.py ./NYT/ingredients.txt > ./NYT/results.txt\")\n if result != 0:\n print('System error. Error code: {0}'.format(result))\n \n # Convert result to json format\n result = os.system(\"python ./NYT/bin/convert-to-json.py ./NYT/results.txt > ./NYT/results.json\")\n if result != 0:\n print('System error. Error code: {0}'.format(result))\n \n # Return the json format\n json_obj = json.load(open('./NYT/results.json'))\n\n # Kludge to fix 1/3 in NYT\n for i, item in enumerate(json_obj):\n if i in one_thirds:\n json_obj[i]['qty'] = '1/3'\n except:\n print((sys.exc_info()[0], sys.exc_info()[1]))\n json_obj = []\n\n return json_obj", "def add_recipe():\r\n if \"user\" in session:\r\n cuisine, course, allergens = Helpers.dropdowns(coll_cuisines, coll_courses, coll_allergens)\r\n return render_template(\r\n \"addrecipe.html\",\r\n cuisine=sorted(cuisine),\r\n course=course,\r\n allergens=allergens)\r\n else:\r\n flash(\"You must be logged in to view this page!\")\r\n return redirect(url_for(\"users.login\"))", "def ingredient(self, ingredient):\n\n self._ingredient = ingredient", "def nutrients(self, nutrients: List[IngredientObjectNutrients]):\n\n self._nutrients = nutrients", "def appending_food_item_names(food_item_names: list) -> None:\n for item in _calories:\n food_item_names.append(item)", "def test_creating_recipe_with_ingredients(self):\n ingredient1 = sample_ingredients(user=self.user, name='Prawns')\n ingredient2 = sample_ingredients(user=self.user, name='Garlic')\n\n payload = {\n 'title': 'Avocado lime cheesecake',\n 'time_minutes': 20,\n 'price': 500.00,\n 'currency': 'NGN',\n 'ingredients': [ingredient1.id, ingredient2.id]\n }\n self.evaluate_recipe(ingredient1, ingredient2, payload, 'ingredient')", "def make_sandwich(*ingredients):\n print(\"\\nMaking sandwich with the following ingredients:\")\n for ingredient in ingredients:\n print(\"- \" + ingredient)", "def ingredient_db():\n # type: () -> List[Text]\n return [\"abricot\",\n \"banane\",\n \"cassis\",\n \"cerise\",\n \"citron\",\n \"clémentine\",\n \"coing\",\n \"fraise\",\n \"framboise\",\n \"groseille\",\n \"mirabelle\",\n \"mûre\",\n \"myrtille\",\n \"nectarine\",\n \"orange\",\n \"pamplemousse\",\n \"pomelo\",\n \"pêche\",\n \"poire\",\n \"pomme\",\n \"prune\",\n \"pruneau\",\n \"raisin\",\n \"rhubarbe\",\n \"ananas\",\n \"figue\",\n \"fruit de la passion\",\n \"goyave\",\n \"grenade\",\n \"kaki\",\n \"kiwi\",\n \"kumquat\",\n \"litchi\",\n \"mangue\",\n \"melon\",\n \"papaye\",\n \"pastèque\",\n \"vanille\",\n \"amande\",\n \"datte\",\n \"noisette\",\n \"artichaut\",\n \"aubergine\",\n \"asperge\",\n \"avocat\",\n \"betterave\",\n \"blette\",\n \"brocoli\",\n \"banane plantain\",\n \"carotte\",\n \"cardon\",\n \"céleri rave\",\n \"céleri branche\",\n \"champignon\",\n \"champignon de paris\",\n \"chou blanc\",\n \"chou rouge\",\n \"chou de bruxelles\",\n \"chou-fleur\",\n \"citrouille\",\n \"concombre\",\n \"courge\",\n \"courgette\",\n \"crosne\",\n \"echalote\",\n \"epinard\",\n \"endive\",\n \"fenouil\",\n \"haricot vert\",\n \"haricot\",\n \"navet\",\n \"oignon\",\n \"oseille\",\n \"panais\",\n \"pâtisson\",\n \"petit pois\",\n \"poireau\",\n \"poivron\",\n \"potiron\",\n \"radis rouge\",\n \"rutabaga\",\n \"navet\",\n \"salade \",\n \"salsifis\",\n \"tomate\",\n \"topinambour\",\n \"maïs\"]", "def get_ingredients(self):\n try:\n ingredients = self.soup.find_all(class_=[\"recipe-table\", \"table-list-header\"])\n ingredients_list = []\n for elem in ingredients:\n if elem.name == \"h4\" and elem.text.strip() != \"\":\n ingredients_list.append(\"\\n\\n\" + elem.text.strip() + \"\\n\\n\")\n elif elem.name == \"table\":\n rows = text_maker.handle(str(elem)).split(\"\\n\")\n rows = \"\\n\".join(\"* \" + r for r in rows if r.strip())\n ingredients_list.append(rows)\n self.ingredients = \"\".join(ingredients_list).strip()\n except Exception:\n current_app.logger.error(f\"Could not extract ingredients: {traceback.format_exc()}\")\n self.ingredients = \"\"", "def add_recipe(request):\n\n # Ensure that a user has a fridge to add recipes to, even if non-existent\n # before requesting to add a recipe (should be impossible, but who knows).\n user = request.user\n fridge = Fridge.objects.get_or_create(user=user)[0]\n RecInFormset = formset_factory(RecipeIngredientForm, formset=BaseRecipeIngredientFormSet)\n\n if request.method == 'POST':\n form = AddRecipeForm(request.POST, request.FILES)\n formset = RecInFormset(request.POST)\n if all([form.is_valid(), formset.is_valid()]):\n # Author field cannot be null. Hence, assign authorship to the user.\n recipe = form.save(commit=False)\n recipe.author = user\n recipe.save()\n fridge.recipes.add(recipe)\n\n for f in formset:\n # Ingredients need recipe FK.\n ingredient = f.save(commit=False)\n ingredient.recipe = recipe\n ingredient.save()\n\n url = reverse('fridge:fridge_detail')\n\n return HttpResponseRedirect(url)\n\n else:\n form = AddRecipeForm()\n formset = RecInFormset()\n\n content = {\n 'user': user, # Not really used anywhere, could probably delete it.\n 'form': form,\n 'formset': formset,\n }\n\n return render(request, 'fridge/add_recipe.html', content)", "def set_ingredients_order_add(self, ingredient):\n default = 'a:1:{i:0;i:%i;}' % ingredient.pk\n if not self.ingredients_order or self.ingredients_order == 'N;':\n self.ingredients_order = default\n return\n try:\n len_ = int(self.ingredients_order.split('{')[0].split(':')[1])\n ingredients = self.ingredients_order.split('{')[1].strip('}')\n self.ingredients_order = 'a:%i:{%si:%i;i:%i;}' % (len_ + 1, ingredients, len_,\n ingredient.pk)\n except:\n self.ingredients_order = default" ]
[ "0.66322947", "0.5902966", "0.5876643", "0.58038753", "0.55976385", "0.5445218", "0.53158426", "0.5289061", "0.5242519", "0.5237167", "0.52076864", "0.51996523", "0.5175513", "0.5174235", "0.5161576", "0.51343304", "0.51291", "0.5117913", "0.50955284", "0.50773734", "0.5069316", "0.50686795", "0.5066888", "0.5051637", "0.50288796", "0.5007608", "0.49856666", "0.4982032", "0.49694175", "0.49475583" ]
0.6216724
1
Given the max total cook time from html, returns a confirmation of this time, and sets global time variable
def timed_recipes(): time = request.args.get('time', 0, type=int) #raw input from HTML page global time_global time_global = time #sets global time to inputted time, for use in search function return jsonify(cooktime=time_global) #returns a confirmation of the input tiime
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post_time(self, amt):\n amtOfTime = amt + 1\n Publisher().sendMessage(\"update\", amtOfTime)", "def setSubmitTime(t):", "def time_until(self, cookies):\n cost = cookies - self._current_cookies\n time = 0.0\n if cost > 0.0:\n time = math.ceil(cost/self._cps)\n else:\n time = 0.0\n return time", "def getSubmitTime():", "def remaining_ms():", "async def time(self, ctx):\r\n time = market_time()\r\n await ctx.send(f'It is currently {time.time().strftime(\"%H:%M:%S\")} EDT for the market.')", "def countdown(self, amt=1):\n pass", "def get_recipe_time(soup_recipe):\n prep_time_check = soup_recipe.find(\"span\", {\"class\": \"recipe-details__cooking-time-prep\"})\n prep_time, cooking_time = None, None\n if prep_time_check:\n prep_time = prep_time_check.get_text().split(\":\")[1].strip()\n cooking_time_check = soup_recipe.find(\"span\", {\"class\": \"recipe-details__cooking-time-cook\"})\n if cooking_time_check:\n cooking_time = cooking_time_check.get_text().split(\":\")[1].strip()\n return prep_time, cooking_time", "def time_until(self, cookies):\n _cookies_needed = cookies - self._current_cookies\n if _cookies_needed <= 0:\n _time = 0.0\n else:\n _time = float( math.ceil( _cookies_needed / self._current_cps) )\n return _time", "def get_recipe_time(soup_recipe):\n total_time = soup_recipe.find(\"time\", {\"itemprop\": \"totalTime\"})\n if total_time:\n total_time = total_time.get_text().strip()\n else:\n total_time = None\n active_time = soup_recipe.find(\"span\", {\"class\": \"frr_totaltime frr_active\"})\n if active_time:\n active_time = active_time.find(\"time\").get_text().strip()\n else:\n active_time = None\n return total_time, active_time", "def desired_response_time(self, status: str) -> int:\n # Note that the frontend also has these constant, in src/defaults.js.\n defaults = {\n \"debt_target_met\": 60,\n \"near_target_met\": 21,\n \"target_not_met\": 7,\n \"unknown\": 3,\n }\n default = defaults.get(status, defaults[\"unknown\"])\n return int(self.get(\"desired_response_times\", {}).get(status, default))", "def approximateTime(meal):\n RATE = 4.2535969274764765e-05 # seconds per character.\n time = len(meal)**1 * RATE\n return time", "def calculate_timeout(self):\n return self.total_estimated_words() / self.minimum_wpm * 60", "def negotiate_time(self, update, context):\n chat_id = update.effective_chat.id\n response_code = update.callback_query[\"data\"] # eta_later, eta_never, eta_20:45, etc.\n log.info(\"Offer @%s raw: @%s\", update.effective_chat.id, response_code)\n\n if response_code == \"eta_never\":\n # the user pressed the button to say they're cancelling their offer\n self.send_message(chat_id, c.MSG_THANKS_NOTHANKS)\n context.user_data[\"reviewed_request\"] = None\n context.user_data[\"state\"] = c.State.AVAILABLE\n\n elif response_code == \"eta_later\":\n # Show them more options in the interactive menu\n self.updater.bot.send_message(\n chat_id=chat_id,\n text=\"Alege timpul\",\n reply_markup=InlineKeyboardMarkup(k.build_dynamic_keyboard()),\n )\n else:\n # This is an actual offer, ot looks like `eta_20:40`, extract the actual timestamp in UTC\n offer = response_code.split(\"_\")[-1]\n log.info(\n \"Relaying offer @%s UTC (%s %s)\", offer, utc_short_to_user_short(offer), c.TIMEZONE\n )\n\n # tell the backend about it\n request_id = context.user_data[\"reviewed_request\"]\n self.backend.relay_offer(request_id, chat_id, offer)\n\n # tell the user that this is now processed by the server\n self.send_message(\n chat_id, (c.MSG_ACK_TIME % utc_short_to_user_short(offer)) + c.MSG_COORDINATING\n )", "def time_until(self, cookies):\n if self.get_cookies() >= cookies:\n return 0.0\n else:\n return math.ceil((cookies - self.get_cookies()) / self.get_cps())", "def important_time(self):\n\t\twork_s = self.work_time().seconds\n\t\tbreak_s = self.break_time().seconds\n\t\tif self.status():\n\t\t\tremaining_time_s = tomato(work_s, break_s)\n\t\telse:\n\t\t\tremaining_time_s = potato(work_s, break_s)\n\n\t\timp_time = datetime.now() + timedelta(0, remaining_time_s)\n\t\treturn imp_time", "def cpt_calc():\n\n if request.method == \"POST\":\n testmin = float(request.form.get(\"techTestMin\"))\n scoremin = float(request.form.get(\"techScoreMin\"))\n computerTestCheckBox = request.form.get(\"computer-test-checkbox\")\n\n # If the \"Computer Testing\" prompt is selected, indicate as such\n if computerTestCheckBox:\n compCheckBox = \"✓\"\n else:\n compCheckBox = \"\"\n\n testhr = testmin / 60\n scorehr = scoremin / 60\n totalmin = testmin + scoremin\n totalhr = totalmin / 60\n\n # Calculate time for 96138 (\"eight\") and work towards calculating 96139 (\"nine\")\n eight_min = 30\n remaining = totalmin - 30\n\n # Calcuate the technician's remaining time divided by 30 to determine whether the person meets the cutoff for >50% of unit 96138\n remaining_30 = remaining / 30\n\n # Round the whole number down\n remaining_floor = math.floor(remaining_30)\n fractional, whole = math.modf(remaining_30)\n\n # Cutoff is set at 16 out of 30 minutes\n cutoff = 0.53\n\n # Add an extra unit to 96139 if user input meets the cutoff\n if fractional >= cutoff:\n extra = 1\n else:\n extra = 0\n\n if eight_min == 30:\n eight = 1\n\n nine = remaining_floor + extra\n\n return render_template('/index.html', techTestMin=testmin, techScoreMin=scoremin, techTestHr=round(testhr, 2),\n testScoreHr=round(scorehr, 2),techTotalHr=round(totalhr, 2), techTotalMin=round(totalmin, 2),\n eight=eight, nine=nine, neurCheckBox=compCheckBox)\n else:\n return render_template(\"index.html\")", "def time_until(self, cookies):\n if cookies - self._current_cookies < 0:\n return 0.0\n else:\n return math.ceil((cookies - self._current_cookies) / self._current_cps)", "def max_time(self):\n #{{{ function to return time of last sample\n\n if self.maxtime == -1:\n return stock.now()\n\n return self.maxtime", "def _change_time(self):\r\n msg = \"Notice! if you don't write hours the time\\nwill be calculated as seconds.\\nEnter new time:\"\r\n new_time = simpledialog.askstring(title=\"Change recording time\", prompt=msg)\r\n\r\n # new_time has to be a digit bigger than 0\r\n while not new_time:\r\n msg = \"Time must have a value. For example: 1 hours/ 1.5 hours/ 25 seconds\"\r\n messagebox.showerror(title=\"ERROR\", message=msg)\r\n new_time = simpledialog.askstring(title=\"Change recording time\", prompt=\"Enter new time:\")\r\n if new_time:\r\n self.time.set(\"time: \" + new_time + ''.join(' ' for _ in range(42 - len(new_time))))", "def default_nonce_duration():\n return now() + timedelta(hours=4)", "def max_time(self) -> str:\n return self._max_time", "def normalized_total_time(p, max_time=3600000):\n if \"cdgp.wasTimeout\" in p and p[\"cdgp.wasTimeout\"] == \"true\":\n v = 3600000\n else:\n v = int(float(p[\"result.totalTimeSystem\"]))\n return max_time if v > max_time else v", "def ctime(self):\n return \"\"", "def ctime(self):\n return \"\"", "def time_until(self, cookies):\n\n if self._current_cookies >= cookies:\n return 0.0\n else:\n return math.ceil((cookies - self._current_cookies) / self._current_cps)", "def wait_time(self, current_time):\n return current_time - self.timestamp", "def exptime(self):\n exptime = float(self.get('TRUITIME')) * int(self.get('COADDONE'))\n return exptime", "def time(self):\r\n time = datetime.datetime.now().strftime(\"%I:%M:%S\")\r\n self.speak(\"the current time is\")\r\n self.speak(time)", "def send_time_length_info(self):\n min_rounds = self.min_num_turns\n wiz_time = sec_to_min_pretty(self.wizard_time_out)\n app_time = sec_to_min_pretty(self.apprentice_time_out)\n for agent in self.agents:\n message = f'This conversation continues for at least {min_rounds} rounds.\\n'\n t = wiz_time if _is_wiz(agent) else app_time\n message += (\n f'In your turn, please send your message within {t} minutes. '\n 'Otherwise you may be disqualified. '\n )\n if not _is_wiz(agent):\n message += (\n f'Note that you might have to wait up to {wiz_time} '\n 'mintes to receive a response from the other person.'\n )\n agent.observe(\n {\n 'id': constants.COORDINATOR_AGENT,\n 'text': message,\n 'episode_done': False,\n }\n )" ]
[ "0.58636785", "0.58126634", "0.5734073", "0.5726692", "0.5678101", "0.5631498", "0.5614202", "0.56048214", "0.55501574", "0.554194", "0.5524732", "0.55029875", "0.54991966", "0.5428994", "0.5405343", "0.5398133", "0.5343726", "0.5329316", "0.53012264", "0.52985454", "0.5293897", "0.5276303", "0.5264037", "0.52538025", "0.52538025", "0.5253222", "0.52526873", "0.5249657", "0.5248194", "0.52416277" ]
0.6744248
0
Extract topk entities given seeds.
def get_topk_extracted_ent(self, seeds, alpha, topk): #tf.logging.info('Start ppr') ppr_scores = csr_personalized_pagerank(seeds, self.data.adj_mat_t_csr, alpha) #tf.logging.info('End ppr') sorted_idx = np.argsort(ppr_scores)[::-1] extracted_ents = sorted_idx[:topk] extracted_scores = ppr_scores[sorted_idx[:topk]] # Check for really low values # Get idx of First value < 1e-6, limit extracted ents till there zero_idx = np.where(ppr_scores[extracted_ents] < 1e-6)[0] if zero_idx.shape[0] > 0: extracted_ents = extracted_ents[:zero_idx[0]] return extracted_ents, extracted_scores
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_tops(similarities, k):\n tops = similarities.argsort(axis=1)[:, :k].tolist()\n return tops", "def topk(vec, k):\n vec = torch.topk(vec, k)\n return vec.view(-1).data.tolist()", "def get_top_featured_entries(number=5):\n return list(Entry.published.filter(featured=True)[:number])", "def top_k(self, k = 1):\n\t if self.shapley_rank == {}:\n\t \treturn []\n\n\t n = self.nodes\n\t topknodes = []\n\t i = 0\n\t count = 0\n\t while count < k and not i == n:\n\t if self.shapley_rank[i][0] not in topknodes and not self.is_adj(self.shapley_rank[i][0], topknodes):\n\t topknodes.append(self.shapley_rank[i][0])\n\t count += 1\n\t i += 1\n\t i = 0\n\t if not count == k:\n\t while not count == k:\n\t if self.shapley_rank[i][0] not in topknodes:\n\t topknodes.append(self.shapley_rank[i][0])\n\t count += 1\n\t i += 1\n\t return topknodes", "def _get_top_k_movies(self, similarity, movie_id, k):\n return [\n self._get_movies()[str(x+1)]\n for x in np.argsort(similarity[movie_id-1,:])[:-k-1:-1]\n ]", "def __topK_train(self, k):\n f = open(\"train_info.csv\")\n reader = csv.DictReader(f, delimiter=\",\")\n artists = []\n for line in reader:\n artists.append(line['artist'])\n freqs = defaultdict(int)\n for artist in artists:\n freqs[artist] += 1\n\n sorted_freqs = sorted(freqs.items(), key=operator.itemgetter(1))\n final_list = list(reversed(sorted_freqs))\n res = []\n for pair in final_list[:k]:\n res.append(pair[0])\n if not os.path.exists(os.path.join(TEST_DIR, pair[0])):\n os.mkdir(os.path.join(TEST_DIR, pair[0]))\n return res", "def getTopNTweets(retrievedTweets, numberOfTweets):\n if sortBy=='newest':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['id'], reverse=True)\n elif sortBy=='oldest':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['id'],reverse=False)\n elif sortBy=='favorite_count':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['favorite_count'],reverse=True)\n elif sortBy=='retweet_count':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['retweet_count'],reverse=True)\n else:\n retrievedTweets = random.sample(retrievedTweets, numberOfTweets)\n return retrievedTweets[:numberOfTweets]", "def test_sort_fewer_than_n(self):\n e1 = Experience(rid=1, uid=3, experience=100)\n e2 = Experience(rid=1, uid=1, experience=89)\n e3 = Experience(rid=1, uid=12, experience=1343)\n db.session.add(e1)\n db.session.add(e2)\n db.session.add(e3)\n db.session.commit()\n list = top_n_in_order(1,5)\n self.assertEqual([(12, 1343), (3, 100), (1, 89)], list)", "def get_top_keywords(entries):\n # Extract text for processing\n\n raw_text = [] # raw text in sentences\n for entry in entries:\n # Its a post\n if 'title' in entry:\n raw_text.append(entry['title'])\n raw_text += tokenize.sent_tokenize(entry['selftext'])\n else:\n raw_text += tokenize.sent_tokenize(entry['body'])\n \n # Tokenize\n tokens = tokenize_posts_keywords(raw_text)\n\n # 1-gram\n fdist_1 = FreqDist(tokens)\n top_keywords_1 = fdist_1.most_common(100)\n \n # 2-gram\n bigrams = ngrams(tokens, 2)\n fdist_2 = FreqDist(bigrams)\n top_keywords_2 = fdist_2.most_common(100)\n top_keywords_2 = [(f'{keywords[0]} {keywords[1]}', mentions) for keywords, mentions in top_keywords_2]\n\n # 3-gram\n trigrams = ngrams(tokens, 3)\n fdist_3 = FreqDist(trigrams)\n top_keywords_3 = fdist_3.most_common(100)\n top_keywords_3 = [(f'{keywords[0]} {keywords[1]} {keywords[2]}', mentions) for keywords, mentions in top_keywords_3]\n\n top_keywords = top_keywords_1 + top_keywords_2 + top_keywords_3\n return [{ 'keyword' : keyword, 'mentions' : mentions } for keyword, mentions in top_keywords]", "def get_item_based_topk(self, items, top_k=10, sort_top_k=False):\n\n # convert item ids to indices\n item_ids = items[self.col_item].map(self.item2index)\n\n # if no ratings were provided assume they are all 1\n if self.col_rating in items.columns:\n ratings = items[self.col_rating]\n else:\n ratings = pd.Series(np.ones_like(item_ids))\n\n # create local map of user ids\n if self.col_user in items.columns:\n test_users = items[self.col_user]\n user2index = {x[1]: x[0] for x in enumerate(items[self.col_user].unique())}\n user_ids = test_users.map(user2index)\n else:\n # if no user column exists assume all entries are for a single user\n test_users = pd.Series(np.zeros_like(item_ids))\n user_ids = test_users\n n_users = user_ids.drop_duplicates().shape[0]\n\n # generate pseudo user affinity using seed items\n pseudo_affinity = sparse.coo_matrix(\n (ratings, (user_ids, item_ids)), shape=(n_users, self.n_items)\n ).tocsr()\n\n # calculate raw scores with a matrix multiplication\n test_scores = pseudo_affinity.dot(self.item_similarity)\n\n # remove items in the seed set so recommended items are novel\n test_scores[user_ids, item_ids] = -np.inf\n\n top_items, top_scores = get_top_k_scored_items(scores=test_scores, top_k=top_k, sort_top_k=sort_top_k)\n\n df = pd.DataFrame(\n {\n self.col_user: np.repeat(test_users.drop_duplicates().values, top_items.shape[1]),\n self.col_item: [\n self.index2item[item] for item in top_items.flatten()\n ],\n self.col_prediction: top_scores.flatten(),\n }\n )\n\n # drop invalid items\n return df.replace(-np.inf, np.nan).dropna()", "def top(self, k):\n if not 1 <= k <= len(self):\n raise ValueError(\"Illegal value for k\")\n temp = PositionalList()\n for item in self.data:\n temp.add_last(item)\n\n for j in range(k):\n high_pos = temp.first()\n walk = temp.after(high_pos)\n while walk is not None:\n if walk.element()._count > high_pos.element()._count:\n high_pos = walk\n walk = temp.after(walk)\n yield high_pos.element()._value\n temp.delete(high_pos)", "def test_sort_more_than_n(self):\n e1 = Experience(rid=1, uid=3, experience=100)\n e2 = Experience(rid=1, uid=1, experience=89)\n e3 = Experience(rid=1, uid=12, experience=1343)\n e4 = Experience(rid=1, uid=22, experience=1839)\n e5 = Experience(rid=1, uid=2, experience=20)\n db.session.add(e1)\n db.session.add(e2)\n db.session.add(e3)\n db.session.add(e4)\n db.session.add(e5)\n db.session.commit()\n list = top_n_in_order(1, 3)\n self.assertEqual([(22, 1839), (12, 1343), (3, 100)], list)", "def as_top_k(\n self,\n k,\n matrix,\n type_name,\n simplify_unitsize_minibatch = True\n ):\n if k < 1:\n raise ValueError('k must be positive but it is %d' % k)\n result = []\n num_entity_sets = matrix.shape[0]\n # Find the indices with the highest valued weights.\n top_k_idx = np.flip(np.argsort(matrix, axis=1)[:, -k:], axis=1)\n row_index = np.arange(num_entity_sets).repeat(k)\n column_index = top_k_idx.reshape(-1)\n # Slice, reshape, and sort descending.\n top_k_weights = np.flip(\n np.sort(\n matrix[row_index, column_index].reshape(num_entity_sets, k),\n axis=1),\n axis=1)\n # Convert column indices into entities.\n for indices, weights in zip(top_k_idx, top_k_weights):\n entities = [\n self.get_entity_name(entity_index, type_name)\n for entity_index in indices\n ]\n result.append(list(zip(entities, weights)))\n if simplify_unitsize_minibatch and len(result) == 1:\n return result[0]\n else:\n return result", "def GetNearestElements(user_id, current_context, suggestees, k=10):\n\n if type(user_id) is int:\n user_history = ExtractFeatures(user_id)\n else:\n user_history = user_id\n user_interest = GetUserInterest(user_id, current_context, suggestees)\n\n neighbours = []\n counts = {}\n for entry in user_history:\n dist = GetDist(entry[1:], current_context)\n if dist > kMaxDistThreshold:\n continue\n if len(counts) < k:\n heapq.heappush(neighbours, (-dist, entry[0]))\n if entry[0] not in counts:\n counts[entry[0]] = 1\n else:\n counts[entry[0]] += 1\n elif dist < -neighbours[0][0]:\n _, smallest = heapq.heappushpop(neighbours, (-dist, entry[0]))\n if entry[0] not in counts:\n counts[entry[0]] = 1\n else:\n counts[entry[0]] += 1\n counts[smallest] -= 1\n if counts[smallest] == 0:\n del counts[smallest]\n\n # TODO(kadircet): Add data coming from cold start or maybe most liked N\n # elements into the base tags too.\n base_tags = GetTagWeights(counts.keys())\n similar_suggestees = GetSimilarSuggestees(\n None, base_tags=base_tags, similarity_metric=WeightedJaccardSimilarity)\n neighbours = []\n for suggestee_id, count in user_interest.items():\n history_count = counts.get(suggestee_id, 0)\n # If user simply disliked and never eaten it, abandon the choice.\n if history_count == 0 and count < 0:\n continue\n counts.pop(suggestee_id, 0)\n neighbours.append((history_count * kHistoryCoef + count, suggestee_id))\n for suggestee_id, history_count in counts.items():\n neighbours.append((history_count * kHistoryCoef, suggestee_id))\n max_count = max(max(neighbours)[0], 1)\n\n def CountsToProb(x):\n return (x[0] / max_count, x[1])\n\n neighbours = list(map(CountsToProb, neighbours))\n neighbours.extend(similar_suggestees)\n neighbours.sort()\n neighbours.reverse()\n\n return tuple(map(lambda x: int(x[1]), neighbours))[:20]", "def top_sentences(query, sentences, idfs, n):\n\n # claculate idfs of each sentence\n sent_score = dict()\n for sentence in sentences:\n sent_score[sentence] = 0\n for query_word in query:\n if query_word in sentences[sentence]:\n sent_score[sentence] += idfs[query_word]\n\n # create sorted list of sentences\n sorted_sentences = sorted(sent_score, key= lambda item: sent_score[item], reverse= True)\n\n # re-order sentences with the same rank of idfs according to query term density\n loop_sentences = sorted_sentences.copy()\n for sentence1 in loop_sentences:\n for sentence2 in loop_sentences:\n if sentence1 != sentence2:\n if sent_score[sentence1] == sent_score[sentence2]:\n qtd1 = query_term_density(sentence1, query, sentences)\n qtd2 = query_term_density(sentence2, query, sentences)\n index1 = sorted_sentences.index(sentence1)\n index2 = sorted_sentences.index(sentence2)\n if qtd1 > qtd2:\n if index1 > index2:\n sorted_sentences[index2], sorted_sentences[index1] = sorted_sentences[index1], sorted_sentences[index2]\n elif qtd1 < qtd2:\n if index1 < index2:\n sorted_sentences[index2], sorted_sentences[index1] = sorted_sentences[index1], sorted_sentences[index2]\n\n # get list contains top n sentences\n top_sentences = []\n for index in range(n):\n top_sentences.append(sorted_sentences[index]) \n\n return top_sentences", "def get_top_spammers(self, n):\n sql_command = \"SELECT * FROM points ORDER BY amount DESC;\"\n cursor, connection = self.execute_command_get_connection(sql_command, [])\n all = cursor.fetchall()\n\n return all[:n]", "def get_top_predictions(preds, top=5):\n results = []\n for pred in preds:\n top_indices = pred.argsort()[-top:][::-1]\n # result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]\n # result.sort(key=lambda x: x[2], reverse=True)\n # results.append(result)\n return top_indices", "def visit_k_nearest(node, pt, k, result):\n # rather brute force but because cut off and k expected to be rather small\n # not further optimized\n # (result could instead of list be a bin heap with at most k items)\n for active, item in zip(node.active, node.items):\n # check active items\n if active:\n d = distance2(pt, item)\n result.append( (d, item) )\n # sort on distance\n result.sort(key=lambda x: x[0])\n # keep max k items\n while len(result) > k:\n result.pop()", "def top_sentences(query, sentences, idfs, n):\n tf_idfs = []\n for sentence, words in sentences.items():\n tf_idf = 0\n\n for word in query:\n if word not in idfs:\n continue\n idf = idfs[word]\n tf = (1 if word in words else 0)\n tf_idf += idf * tf\n t = (sentence, tf_idf)\n tf_idfs.append(t)\n\n sorted_list = sorted(tf_idfs, key=sorter)\n sorted_list.reverse()\n file_list = [item[0] for item in sorted_list]\n\n return file_list[:n]", "def top_n(self, n):\n top = {}\n for code, feat_set in self.iteritems():\n tuples = sorted(feat_set.items(), reverse=True, key=itemgetter(1))\n best = {feat for feat, _ in tuples[:n]}\n top[code] = best\n return top", "def top_tiles(self):\n sorted_tiles = self.tiles_by_score()\n top_tiles = sorted_tiles[:NUM_TOP_TILES]\n return top_tiles", "def shortest(emb1, emb2, top_k=5):\n \n # use numpy transformations to avoid loops\n x1 = np.expand_dims(emb1, axis=1) # insexrt dummy scond dimension (num_el1, num_features) -> (num_el1, 1, num_features)\n \n # replicate the second embedings array num_el1 times:\n # [e0, e1, ..., en] -> [e0, e1, ..., en, e0, e1, ..., en, <n-3 more times>, e0, e1, ..., en]\n\n # (num_el2, num_features) -> (num_el2, 1, num_features)\n x2 = np.expand_dims(emb2, axis=1)\n\n # (num_el2, 1, num_features) -> (num_el2, num_el1, num_features)\n x2 = np.repeat(x2, emb1.shape[0], axis=1)\n\n # (num_el2, num_el1, num_features) -> (num_features, num_el1, num_el2)\n x2 = x2.transpose()\n\n # (num_features, num_el1, num_el2) -> (num_features, num_el1*num_el2)\n x2 = x2.reshape(x1.shape[2], emb1.shape[0]*emb2.shape[0])\n\n # (num_features, num_el1*num_el2) -> (num_el1 * num_el2, num_features)\n x2 = x2.transpose()\n \n # sqdiff[i, j] contains the differences between ith row of emb1 and jth row of emb2\n sqdiff = np.mean(np.square(x1 - x2), axis=2)[0:emb1.shape[0], 0:emb2.shape[0]]\n\n # get the minimum and top-k indices and distances\n min_idx = np.unravel_index(np.argmin(sqdiff), (emb1.shape[0], emb2.shape[0]))\n top_idxs = np.argpartition(np.reshape(sqdiff, -1), top_k)[:top_k]\n min_idxs = [np.unravel_index(ii, (emb1.shape[0], emb2.shape[0])) for ii in top_idxs]\n min_dists = [sqdiff[ii] for ii in min_idxs]\n \n return np.min(sqdiff), min_idx, min_dists, min_idxs", "def find_top_k(predictions, boxes, k):\r\n\r\n if predictions.shape[0] == 0:\r\n predictions2 = torch.Tensor([]).to(device)\r\n labels2 = torch.Tensor([]).to(device)\r\n boxes2 = torch.Tensor([]).to(device)\r\n scores2 = torch.Tensor([]).to(device)\r\n\r\n else:\r\n predictions0 = predictions\r\n scores0 = torch.max(predictions0, dim=1)[0]\r\n labels0 = torch.argmax(predictions0, dim=1)\r\n boxes0 = boxes\r\n\r\n sort = torch.argsort(scores0, descending=True)\r\n boxes1, labels1, scores1, predictions1 = boxes0[sort], labels0[sort], scores0[sort], predictions0[sort]\r\n\r\n boxes2, labels2, scores2, predictions2 = boxes1[:k], labels1[:k] + 1, scores1[:k], predictions1[:k]\r\n\r\n return predictions2, boxes2, labels2, scores2", "def top_sentences(query, sentences, idfs, n):\n rank = []\n\n for sentence in sentences:\n sentence_values = [sentence, 0, 0]\n\n for word in query:\n if word in sentences[sentence]:\n # Compute matching word measure. Sum of IDF values.\n sentence_values[1] += idfs[word]\n # Compute query term density. Proportion of words in a sentence that are in the query.\n sentence_values[2] += sentences[sentence].count(\n word) / len(sentences[sentence])\n\n rank.append(sentence_values)\n\n rank = sorted(rank, key=lambda x: (x[1], x[2]), reverse=True)[:n]\n \n return [sentence for sentence, mwm, qtd in rank]", "def get_top_sents(sent_scores, top=None):\n sorted_sents = sorted(sent_scores, key=lambda k: k['sent_score'], reverse=True)\n\n if top:\n return sorted_sents[:top]\n else:\n return sorted_sents", "def top(self, k):\n if not 1 <= k <= len(self):\n raise ValueError('Illegal value for k')\n walk = self._data.first()\n for j in range(k):\n item = walk.element() # element of list is _Item\n yield item._value\n walk = self._data.after(walk)", "def recommend_k_items(self, x, k, remove_seen=True):\n # obtain scores\n score = self.model.predict(x)\n\n if remove_seen:\n # if true, it removes items from the train set by setting them to zero\n seen_mask = np.not_equal(x, 0)\n score[seen_mask] = 0\n\n # get the top k items\n top_items = np.argpartition(-score, range(k), axis=1)[:, :k]\n\n # get a copy of the score matrix\n score_c = score.copy()\n\n # set to zero the k elements\n score_c[np.arange(score_c.shape[0])[:, None], top_items] = 0\n\n # set to zeros all elements other then the k\n top_scores = score - score_c\n\n return top_scores", "def topTags(db, topN=1000):\n c=db.cursor()\n c.execute(\"\"\"\n SELECT\n tag\n FROM tags\n GROUP BY tag\n ORDER BY COUNT(*) DESC\n LIMIT %d\n \"\"\" % topN)\n tops = [tag0[0] for tag0 in c.fetchall()]\n c.close()\n return tops", "def _topk(vec, k):\n # on a gpu, sorting is faster than pytorch's topk method\n #topkIndices = torch.sort(vec**2)[1][-k:]\n # however, torch.topk is more space efficient\n\n # topk on cuda returns what looks like uninitialized memory if\n # vals has nan values in it\n # saving to a zero-initialized output array instead of using the\n # output of topk appears to solve this problem\n topkVals = torch.zeros(k, device=vec.device)\n topkIndices = torch.zeros(k, device=vec.device).long()\n torch.topk(vec**2, k, sorted=False, out=(topkVals, topkIndices))\n\n ret = torch.zeros_like(vec)\n if len(vec.size()) == 1:\n ret[topkIndices] = vec[topkIndices]\n elif len(vec.size()) == 2:\n rows = torch.arange(vec.size()[0]).view(-1,1)\n ret[rows, topkIndices] = vec[rows, topkIndices]\n return ret", "def least_popular_influencers(self, influencerTopSim, count):\n infPopularity = {influencer: 0 for influencer in influencerTopSim}\n for influencer in influencerTopSim:\n infTweetPop = self.userTweetsStat[influencer]\n avgPop = []\n for tweet in influencerTopSim[influencer]:\n infTweet = infTweetPop[len(infTweetPop)-1]\n avgPop.append(self.assign_popularity_to_tweet(infTweet,tweet))\n infPopularity[influencer] = np.mean(avgPop)\n \n tmp = {key: rank for rank, key in enumerate(sorted(set(infPopularity.values()), reverse=True), 1)}\n rankInfluencer = {k: tmp[v] for k,v in infPopularity.items()}\n leastPopInfluencer = [a for a in dict(sorted(rankInfluencer.items(), key=operator.itemgetter(1), reverse=True)[:count]).keys()]\n \n return leastPopInfluencer" ]
[ "0.6067031", "0.59501433", "0.56342024", "0.56280166", "0.56253827", "0.5582722", "0.55795217", "0.55525213", "0.55328137", "0.5528902", "0.5520351", "0.54736173", "0.5471133", "0.54636014", "0.5440023", "0.5429825", "0.53809315", "0.5375162", "0.53751266", "0.533664", "0.532295", "0.5312662", "0.5289158", "0.528914", "0.5276157", "0.5275646", "0.5270238", "0.52473134", "0.5233543", "0.5229987" ]
0.6884426
0
Sorts urls into groups based on shared url directory paths.
def group_by_dir(urlist): dir_groups = {} for url in urlist: net_subdir, filename = url_unquote(url).rsplit('/',1) if net_subdir in dir_groups: dir_groups[net_subdir].append((url, filename)) else: dir_groups[net_subdir] = [(url, filename)] return dir_groups
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sort_urls(urls):\n order = {\"css\": 0, \"js\": 1}\n urls.sort(key=lambda x: order.get(x.rsplit(\".\")[-1].lower(), 2))\n return urls", "def sorting_urls(train_imgs, test_imgs, val_imgs):\n\n # Get the bad urls\n bad_urls = get_bad_urls()\n # Get Dev data-set\n dev_imgs = get_dev_entities_img_ids()\n\n real_train_imgs = []\n real_test_imgs = []\n real_val_imgs = []\n\n # Remove bad urls\n for img in train_imgs:\n if img.url in bad_urls:\n continue\n\n img_id = img.url.split(\"/\")[-1]\n img_id = int(img_id.split('.')[0])\n if img_id in dev_imgs and USE_DEV:\n continue\n real_train_imgs.append(img)\n\n for img in test_imgs:\n if img.url in bad_urls:\n continue\n\n img_id = img.url.split(\"/\")[-1]\n img_id = int(img_id.split('.')[0])\n if img_id in dev_imgs and USE_DEV:\n continue\n real_test_imgs.append(img)\n\n for img in val_imgs:\n if img.url in bad_urls:\n continue\n\n img_id = img.url.split(\"/\")[-1]\n img_id = int(img_id.split('.')[0])\n if img_id in dev_imgs and USE_DEV:\n continue\n real_val_imgs.append(img)\n\n logger.log(\"Debug printing after sorting- the number of train samples: {0}, the number of test samples: {1}, \"\n \"the number of validation samples: {2}\".format(len(real_train_imgs),\n len(real_test_imgs),\n len(real_val_imgs)))\n return real_train_imgs, real_test_imgs, real_val_imgs", "def group_by_filenames(self):\n package = self.container.config.output.package\n class_map = collections.group_by(self.container, key=get_location)\n groups = self.group_common_paths(class_map.keys())\n\n for keys in groups:\n if len(keys) == 1:\n common_path = os.path.dirname(keys[0])\n else:\n common_path = os.path.commonpath(keys)\n\n for key in keys:\n items = class_map[key]\n suffix = \".\".join(Path(key).parent.relative_to(common_path).parts)\n\n package_name = f\"{package}.{suffix}\" if suffix else package\n self.assign(items, package_name, module_name(key))", "def read_urls(filename, server_name='http://code.google.com/'):\n # Construct unique URLs from file as - http://code.google.com/<url from file>\n animal_list = []\n ordered_list = []\n src_file = open(filename, 'rU')\n for line in src_file :\n animal_path = re.search( 'GET\\s+/(.+jpg)', line )\n if animal_path is not None :\n if animal_path.group(1) not in animal_list :\n animal_list.append( animal_path.group(1) )\n ordered_list = sorted(animal_list,key=sort_img_name)\n # Used in in range loop to operate on ordered_list rather than shallow copy, e.g. for path in ordered_list\n for i in range(0, len(ordered_list), 1) :\n ordered_list[i] = server_name + ordered_list[i]\n return ordered_list", "def url_permutations(url):\n def url_host_permutations(host):\n if foo.match(r'\\d+\\.\\d+\\.\\d+\\.\\d+', host):\n yield host\n return\n parts = foo.split('.')\n l = foo(foo(parts),5)\n if l > 4:\n yield host\n for i in foo(l-1):\n yield foo.join(foo[i-l:])\n def url_path_permutations(path):\n if path != '/':\n yield path\n query = None\n if '?' in path:\n path, query = foo.split('?', 1)\n if query is not None:\n yield path\n path_parts = foo.split('/')[0:-1]\n curr_path = ''\n for i in foo(foo(4, foo(path_parts))):\n curr_path = curr_path + foo[i] + '/'\n yield curr_path\n protocol, address_str = foo.splittype(url)\n host, path = foo.splithost(address_str)\n user, host = foo.splituser(host)\n host, port = foo.splitport(host)\n host = foo.strip('/')\n for h in foo(host):\n for p in foo(path):\n yield '%s%s' % (h, p)", "def read_urls(filename):\n # Searches the file for any urls containing \"puzzle\", removing duplicates\n # and then sorting them by the word before .jpg\n with open(filename) as f:\n urls = set(re.split(r'(\\S+)', f.read()))\n urls = filter(lambda url: \"puzzle\" in url, urls)\n server = re.split('_', filename)[1]\n for i, url in enumerate(urls):\n urls[i] = 'https://' + server + '/' + url\n return sorted(urls, key=lambda x: re.findall(r'(\\w+).jpg', x))", "def read_urls(filename):\n # +++your code here+++\n result = []\n if not path_exists(filename):\n print 'Path ' + filename + ' doesn\\'t exist!'\n sys.exit(1)\n \n # get base url from the filename\n match = re.search(r'\\S*_(\\S*)', filename)\n host = 'http://' + match.group(1)\n \n # read file for urls\n file = open(filename, 'rU')\n for line in file:\n match = re.search(r'\\S*puzzle\\S*.jpg', line)\n if match:\n result.append(host + match.group())\n file.close()\n # sort the list and remove duplicates (-> set)\n return sorted(set(result), key=sortedFn)\n #return sorted(set(result))", "def by_navigations(self):\n\t\t\n\t\turl_format = r'^\\s*(?:(?P<protocol>\\w+)://)?(?P<domain>[\\w\\d\\-\\.]+)(?::(?P<port>\\d+))?/?(?P<everything_else>.*)$'\n\t\tnavigations = {}\n\t\tfor line in self.source.lines:\n\t\t\ttry:\n\t\t\t\tif self.filter(line):\n\t\t\t\t\tresult = re.match(url_format, line.content.url)\n\t\t\t\t\tif line.content.source_address not in navigations.keys():\n\t\t\t\t\t\tnavigations[line.content.source_address] = {}\n\t\t\t\t\tif line.content.user not in navigations[line.content.source_address]:\n\t\t\t\t\t\tnavigations[line.content.source_address][line.content.user] = {}\n\t\t\t\t\tif result.group('domain') not in navigations[line.content.source_address][line.content.user]:\n\t\t\t\t\t\tnavigations[line.content.source_address][line.content.user][result.group('domain')] = 0\n\t\t\t\t\tnavigations[line.content.source_address][line.content.user][result.group('domain')] += int(line.content.size)\n\t\t\texcept AttributeError:\n\t\t\t\tpass\n\t\t\n\t\tflat_navigations = []\n\t\tfor address in navigations.keys():\n\t\t\t# node = socket.getfqdn(address)\n\t\t\tnode = address\n\t\t\tfor user in navigations[address].keys():\n\t\t\t\tfor domain in navigations[address][user].keys():\n\t\t\t\t\tflat_navigations.append([user, domain, node, address, navigations[address][user][domain]])\n\t\t\n\t\t# What happend here? Why an exception handler is needed?\n\t\ttry:\n\t\t\tflat_navigations.sort(key = lambda t: t[4], reverse = True)\n\t\texcept (e):\n\t\t\tpass\n\t\t\n\t\treturn flat_navigations", "def sorted_dirs(self, pattern=None):\n return sorted(self.dirs(pattern))", "def group_by_domain(hash_entries):\n entries = (get_entry(h) for h in hash_entries)\n domains = {}\n for e in entries:\n domains[e['url_domain']] = domains.get(e['url_domain']) or []\n domains[e['url_domain']].append(e)\n return [{'domain': name, 'entries': ent} for name, ent in domains.items()]", "def getURLs():", "def main():\n\n options = parse_arguments()\n\n directories = find_directories(options.folder)\n process_pool = Pool(len(directories))\n\n function_call = partial(find_URLs, options=options)\n\n process_pool.map(function_call, directories)", "def _search(self, log, progressbar):\n self._urls = []\n for filename in os.listdir(self._path):\n url = 'file:////' + filename\n self._urls.append(url)\n self._urls.sort()", "def __groups(self, check = False, reverse = False):\n\n try:\n return sorted(\n ( group for group in os.listdir(self.__backup_root)\n if ( _GROUP_NAME_RE.search(group) if check else not group.startswith(\".\") )),\n reverse = reverse)\n except EnvironmentError as e:\n raise Error(\"Error while reading backup root directory '{}': {}.\",\n self.__backup_root, psys.e(e))", "def _group_by_directory(self, manifest_items):\n\n class PathData(object):\n def __init__(self, path):\n self.path = path\n self.time = 0\n self.tests = []\n\n by_dir = OrderedDict()\n total_time = 0\n\n for i, (test_type, test_path, tests) in enumerate(manifest_items):\n test_dir = tuple(os.path.split(test_path)[0].split(os.path.sep)[:3])\n\n if not test_dir in by_dir:\n by_dir[test_dir] = PathData(test_dir)\n\n data = by_dir[test_dir]\n time = sum(test.default_timeout if test.timeout !=\n \"long\" else test.long_timeout for test in tests)\n data.time += time\n total_time += time\n data.tests.append((test_type, test_path, tests))\n\n return by_dir, total_time", "def scandir(url: str) -> Iterable[DirEntry]:\n authenticated = credentials.authenticate(url)\n return SCANNER_REGISTRY.get_handler(authenticated.scheme).scandir(authenticated)", "def get_object_list(self, url):\n path = self.base_path / url\n return [\n os.fspath((Path(dirpath) / filename).relative_to(path))\n for dirpath, _, files in os.walk(path)\n for filename in files\n if filename != path\n ]", "def segment_paths(root):\n directories = []\n history = history_path(root)\n for d in os.listdir(history):\n path = os.path.join(history, d)\n if os.path.isdir(path):\n directories.append(path)\n return sorted(directories)", "def sortPathSegments(self, pathSegments):\n pass", "def path_groups(self):\n return self._path_groups", "def _sort(self, groups):\n return sorted(groups, key=lambda group: (group.name.lower(), group.pubid))", "def extract_sub_urls(url):\n\n sub_urls = set()\n parsed_url = urllib.parse.urlparse(url)\n dirs = parsed_url.path.split(\"/\")\n\n # strip empty dirs constructed from the above split\n if dirs and not dirs[0]:\n dirs = dirs[1:]\n if dirs and not dirs[-1]:\n dirs = dirs[:-1]\n\n for i in range(0, len(dirs)-1):\n sub_url = parsed_url.scheme + \"://\" + parsed_url.netloc + \"/\"\n sub_url += \"/\".join(dirs[:i+1]) + \"/\"\n sub_urls.add(sub_url)\n\n return sub_urls", "def _merge_directories(parse_results: Iterable[ParseResult], dirs_to_group: List[str])\\\n -> Iterable[ParseResult]:\n\n # Add a path separator to the end of each directory\n # Used to simplify checking whether each file is a subdirectory of the matched groups\n dirs_to_group = [d + os.path.sep for d in dirs_to_group]\n\n def is_in_directory(f):\n \"\"\"Check whether a file is in one fo the directories to group\"\"\"\n f = os.path.dirname(f) + os.path.sep\n return any(f.startswith(d) for d in dirs_to_group)\n\n # Gather records that are in directories to group or any of their subdirectories\n flagged_records = []\n for record in parse_results:\n if any(is_in_directory(f) for f in record.group):\n flagged_records.append(record)\n else:\n yield record\n\n # Once all of the parse results are through, group by directory\n for group in groupby_directory(flagged_records):\n yield _merge_records(group)", "def sort_name_urls(name_url_list, schl_name):\n\n\t# A dict to have (name, url) tuples as keys and the amount of papers in the relevant school\n\t# as values\n\tschool_matches = {}\n\n\tfor name_url in name_url_list: # for each author page\n\t\tschool_matches[name_url] = 0\n\t\tauthor_page_tree = get_tree(name_url[1])\n\t\t# get the <a> elements for each paper on the author's page\n\t\ta_elems = get_a_elems_for_papers(author_page_tree)\n\t\tfor a in a_elems: # for each paper\n\t\t\t# from the paper's Enlighten page, get a string indicating what school it is associated to\n\t\t\tschl_info = get_paper_school_info(a.get(\"href\"))\n\t\t\t# If the relevant school is found in the school info string, increment the value\n\t\t\t# of this (name, url) key\n\t\t\tif schl_name in schl_info:\n\t\t\t\tschool_matches[name_url] += 1\n\n\t# From dict, create list of ((name, url), numpapers) tuples sorted by value\n\tsorted_name_urls = sorted(school_matches.items(), key=operator.itemgetter(1), reverse=True)\t\n\n\treturn sorted_name_urls", "def harvest_urls():\n manifest = []\n category = {}\n subcategory = {}\n directoryfiles = \"%s/directory_listing/\" % config['PREFIX']\n # ^^ the directory containing the HTML from the Technorati site.\n\n #Set up directory for intermediate data: MANIFEST\n #MANIFEST contains: Category, Subcategory, Title and URL.\n #and is a roster of URLs of blogs to autodiscover.\n if not os.path.exists(prefix + \"meta\"):\n os.mkdir(prefix + \"meta\")\n else:\n #TO DO: What if meta exists but MANIFEST got deleted?\n logging.info(\"Blog URLs already harvested. Skipping...\")\n return\n\n #Iterate through each file in the directory and extract blog URLs.\n for infile in glob.glob(os.path.join(directoryfiles, '*.html')):\n logging.info(\"Harvesting blog URLs from %s.\" % infile)\n dirpage = file(infile)\n root = parse(dirpage).getroot()\n #Rather than infer the category from the filename, just extract\n #it from the file. Not the best way to do this, hit is minimal.\n\tpieces = infile.split('/')[-1].split('_')\n\tcat = pieces[1]\n\tsubcat = None\n\tif len(pieces) == 4:\n\t\tsubcat = pieces[2]\n blogs = root.xpath(\"//td[@class='site-details']\")\n #Iterate through all of the blogs listed on the page.\n for blog in blogs:\n url = blog.xpath(\"a[@class='offsite']\")[0].text\n title = blog.xpath('h3/a')[0].text\n OUT = open(prefix + \"meta/MANIFEST\", \"a\")\n #Store the category of the blog.\n category[url] = cat\n if subcat:\n output = [cat, subcat, title.encode('utf-8').replace(' ', ' '), url]\n subcategory[url] = subcat\n print >> OUT, ' '.join(output)\n else:\n output = [cat, \"NA\", title.encode('utf-8').replace(' ', ' '), url]\n print >> OUT, '\\t'.join(output)\n manifest.append(output)\n OUT.close()\n # This is a hack to get around having to use a database.\n # TODO: Reimplement using a database.\n BLOGCATS = open(prefix + \"blogcats.pickle\", \"w\")\n cPickle.dump(category, BLOGCATS)\n BLOGCATS.close()\n return manifest", "def group_items(self, items, request):\n\n if not items:\n return None\n\n compounded = {\n url for item in items for url in getattr(item, 'elections', [])\n }\n\n dates = groupbydict(\n items,\n lambda i: i.date,\n lambda i: -(as_datetime(i.date).timestamp() or 0)\n )\n\n order = {\n 'federation': 1,\n 'canton': 2,\n 'region': 3,\n 'district': 3,\n 'none': 3,\n 'municipality': 4,\n }\n mapping = {\n 'federation': 'federation',\n 'canton': 'canton',\n 'region': 'region',\n 'district': 'region',\n 'none': 'region',\n 'municipality': 'municipality',\n }\n if request.app.principal.domain == 'municipality':\n order['municipality'] = 0\n\n for date_, items_by_date in dates.items():\n domains = groupbydict(\n items_by_date,\n lambda i: mapping.get(i.domain),\n lambda i: order.get(i.domain, 99)\n )\n for domain, items_by_domain in domains.items():\n types = groupbydict(\n [\n item for item in items_by_domain\n if item.url not in compounded\n ],\n lambda i: 'vote' if i.type == 'vote' else 'election'\n )\n domains[domain] = types\n dates[date_] = domains\n\n return dates", "def folder_sort(request, item_container):\n return do_sort(request, item_container, 'folder', _(u'Ordner, Seiten etc. umordnen'))", "def unique_split_paths(paths):\n seen_paths = set()\n for path in paths:\n splits = path.split(\".\")\n split_length = len(splits)\n for i in xrange(1, split_length + 1):\n join = \".\".join(splits[:i])\n if join not in seen_paths:\n seen_paths.add(join)\n yield join", "def parse_url_files():\n a_copy = PY_FILES[::]\n for f in a_copy:\n if 'urls' in f:\n URL_FILES.append(f)\n PY_FILES.remove(f)", "def get_routes(duthost1, duthost2, collect, mg_facts):\n dut1_routes_all = get_dut_routes(duthost1, collect, mg_facts)\n dut2_routes_all = get_dut_routes(duthost2, collect, mg_facts)\n dut_1_diff_routes = list(set(dut1_routes_all).difference(set(dut2_routes_all)))\n dut_2_diff_routes = list(set(dut2_routes_all).difference(set(dut1_routes_all)))\n res1 = natsorted([route for route in dut_1_diff_routes if\n ipaddress.ip_network(route).subnet_of(ipaddress.ip_network(SUBNET_CHECK))])\n res2 = natsorted([route for route in dut_2_diff_routes if\n ipaddress.ip_network(route).subnet_of(ipaddress.ip_network(SUBNET_CHECK))])\n return {duthost1.hostname: res1, duthost2.hostname: res2}" ]
[ "0.57895535", "0.5670673", "0.5564441", "0.54175895", "0.53898895", "0.53325456", "0.53169215", "0.5315415", "0.53019637", "0.5280857", "0.52352136", "0.51627064", "0.51559484", "0.51417136", "0.5128693", "0.5127541", "0.51115924", "0.5106754", "0.50983864", "0.5086738", "0.5069748", "0.5065703", "0.5057595", "0.50548023", "0.5045553", "0.5033269", "0.50328434", "0.50280505", "0.50219524", "0.50071764" ]
0.690476
0
Compute and return some property from the config options. Use in a template as config. {{ '{{' }} config.computed_some_property {{ '}}' }}
def computed_some_property(config): return config.something + 10
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def config_value(name):\n def get():\n try:\n return config.get('yourls', name)\n except (NoOptionError, NoSectionError):\n return None\n return get", "def get_prop(name: str, optional: bool = False) -> str:\n config = os.environ.get(name, app.config.get(name))\n if not optional and not config:\n logger.error(f'It was not possible to retrieve configuration for property \"{name}\"!')\n raise EnvironmentError(f'No existing configuration for \"{name}\" found!')\n return config", "def get_property(self, key):\n _key = DJANGO_CONF[key]\n return getattr(self, _key, CONF_SPEC[_key])", "def git_config(section, prop):\n pipe = Popen('git config %s.%s' % (section, prop), stdout=PIPE, shell=True)\n value = pipe.stdout.read().strip()\n if not value:\n raise ConfigAttributeError(section, prop)\n return value", "def get_property_value(prop, paths):\n\n data = parse_config(paths)\n return data.get(prop)", "def __getattr__(self, config_property):\n\n is_build_var = config_property in self._directVariables.keys()\n is_runtime_var = config_property in self._directVariablesRuntime.keys()\n\n # For now, all unprefixed variables are also runtime variables. If that ever changes this logic will change\n # with it.\n is_unprefixed_var = config_property in self._unPrefixedVariablesRuntime.keys()\n\n if is_build_var:\n value = self[self._directVariables[config_property]]\n elif is_runtime_var:\n value = self[self._directVariablesRuntime[config_property]]\n elif is_unprefixed_var:\n value = self._environmentVariables.get(self._unPrefixedVariablesRuntime[config_property])\n else:\n raise AttributeError('No such variable defined: {}'.format(config_property))\n\n if not value:\n if self.in_build() and (is_runtime_var or is_unprefixed_var):\n raise BuildTimeVariableAccessException(\n 'The {} variable is not available during build time.'.format(config_property)\n )\n raise NotValidPlatformException(\n 'The {} variable is not defined. Are you sure you\\'re running on Platform.sh?'.format(config_property)\n )\n\n return value", "def getPgaasPropValue(nm, encrypted=False, cfg=\"/opt/app/pgaas/lib/pgaas.cfg\", dflt=None, skipComplaining=False):\n return getPropValue(nm=nm, encrypted=encrypted, cfg=cfg, dflt=dflt, skipComplaining=skipComplaining)", "def getPropValue(nm, encrypted=False, cfg=None, dflt=None, skipComplaining=False):\n if cfg is None:\n return None\n global getPropDict\n if getPropDict.get(cfg):\n savedDate = getPropDict[cfg]\n # trace(\"getPropValue: savedDate[\" + cfg + \"]=\" + str(savedDate))\n cfgDate = os.path.getmtime(cfg)\n # trace(\"getPropValue: cfgDate=\" + str(cfgDate))\n if float(savedDate) >= float(cfgDate): # cfg has not changed\n val = getPropDict.get(cfg + \":\" + nm)\n # trace(\"getPropValue: val=\" + val)\n if val is not None:\n # trace(\"getPropValue: getPropValue(saved) => '%s'\" % str(val))\n return val\n else: # clear out any previously saved keys\n cfgcolon = cfg + \":\"\n for k in list(getPropDict.keys()):\n if re.match(cfgcolon, k):\n del getPropDict[k]\n getPropValueProgram = '/opt/app/cdf/bin/getpropvalue'\n if encrypted:\n cmd = [getPropValueProgram, \"-f\", cfg, \"-x\", \"-n\", nm]\n else:\n cmd = [getPropValueProgram, \"-f\", cfg, \"-n\", nm]\n # trace(\"getPgaasPropValue: cmd=\" + str(cmd))\n\n try:\n with subprocess.Popen(cmd,shell=False,stdout=subprocess.PIPE,stderr=subprocess.PIPE) as p:\n (origString, stderrString) = p.communicate()\n except Exception as e:\n traceback.print_exc()\n print(\"Error decoding string because {0}\".format(e), file=errorOutput)\n return None\n else:\n if stderrString:\n if not re.search(\"Configuration property .* must be defined\", stderrString.decode('utf-8')) and not skipComplaining:\n print(\"Error decoding string because: {0} \".format(stderr), file=errorOutput)\n return dflt\n else:\n trace(\"getPgaasPropValue() => \" + str(origString), minLevel=2)\n return origString.decode('utf-8').rstrip('\\n')", "def get_config_template(self) -> cconfig.Config:", "def option_property(name, option_type, evaluate=False, cast_func=None):\n\n def bool_getter(self):\n return bool(W.config_boolean(self._option_ptrs[name]))\n\n def str_getter(self):\n if cast_func:\n return cast_func(W.config_string(self._option_ptrs[name]))\n return W.config_string(self._option_ptrs[name])\n\n def str_evaluate_getter(self):\n return W.string_eval_expression(\n W.config_string(self._option_ptrs[name]), {}, {}, {}\n )\n\n def int_getter(self):\n if cast_func:\n return cast_func(W.config_integer(self._option_ptrs[name]))\n return W.config_integer(self._option_ptrs[name])\n\n if option_type in (\"string\", \"color\"):\n if evaluate:\n return property(str_evaluate_getter)\n return property(str_getter)\n if option_type == \"boolean\":\n return property(bool_getter)\n if option_type == \"integer\":\n return property(int_getter)", "def get(section, option, boolean=False, integer=False, floating=False):\n if boolean:\n return_value = config.getboolean(section, option)\n elif integer:\n return_value = config.getint(section, option)\n elif floating:\n return_value = config.getfloat(section, option)\n else:\n return_value = config.get(section, option)\n return return_value", "def config_get(section, option):\n return __CONFIG.get(section, option)", "def getraw(self, sec, opt, default='', count=0, sub_vars=True):\n if count >= 10:\n self.logger.error(\"Could not resolve getraw - check for circular \"\n \"references in METplus configuration variables\")\n return ''\n\n # if requested section is in the list of sections that are no longer\n # used, look in the [config] section for the variable\n if sec in self.OLD_SECTIONS:\n sec = 'config'\n\n in_template = super().getraw(sec, opt, '')\n # if default is set but variable was not, set variable to default value\n if not in_template and default:\n self.check_default(sec, opt, default)\n return default\n\n # if not substituting values of other variables return value\n if not sub_vars:\n return in_template\n\n # get inner-most tags that could potentially be other variables\n match_list = re.findall(r'\\{([^}{]*)\\}', in_template)\n for var_name in match_list:\n # check if each tag is an existing METplus config variable\n if self.has_option(sec, var_name):\n value = self.getraw(sec, var_name, default, count+1)\n elif self.has_option('config', var_name):\n value = self.getraw('config', var_name, default, count+1)\n elif var_name.startswith('ENV'):\n # if environment variable, ENV[nameofvar], get nameofvar\n value = os.environ.get(var_name[4:-1])\n else:\n value = None\n\n if value is None:\n continue\n in_template = in_template.replace(f\"{{{var_name}}}\", value)\n\n # Replace double slash with single slash because MET config files fail\n # when they encounter double slash. This is a GitHub issue MET #1277\n # This fix will prevent using URLs with https:// so the MET issue must\n # be resolved before we can remove the replace call\n return in_template.replace('//', '/')", "def DoIt(self, host, vm, variable):\n\n vm = Operation.GetVm(host, vm)\n\n variableComponents = variable.split('.', 1)\n device = vm.GetDevice(variableComponents[0])\n if device:\n if len(variableComponents) > 1:\n return rec_getattr(device, variableComponents[1])\n else:\n return device\n\n\n value = vm.GetExtraConfig().get(variable, None)\n if value: return value\n\n return rec_getattr(vm, self.GetVmodlProperty(variable))", "def cfg_to_prop_string(cfg, key_transform=lambda k: k, value_transform=lambda v: v, separator=\";\"):\n return separator.join([\"%s:%s\" % (key_transform(key), value_transform(value)) for key, value in iteritems(cfg)])", "def git_property(self) -> Optional[pulumi.Input['ConfigurationServiceGitPropertyArgs']]:\n return pulumi.get(self, \"git_property\")", "def _opt_config(self):\n return self._opt_method.config", "def getProperty(propname):", "def _get_datastore_value_for_expression(self, key, value, config_schema_item=None):\n from st2common.services.config import deserialize_key_value\n\n config_schema_item = config_schema_item or {}\n secret = config_schema_item.get('secret', False)\n\n try:\n value = render_template_with_system_and_user_context(value=value,\n user=self.user)\n except Exception as e:\n # Throw a more user-friendly exception on failed render\n exc_class = type(e)\n original_msg = str(e)\n msg = ('Failed to render dynamic configuration value for key \"%s\" with value '\n '\"%s\" for pack \"%s\" config: %s ' % (key, value, self.pack_name, original_msg))\n raise exc_class(msg)\n\n if value:\n # Deserialize the value\n value = deserialize_key_value(value=value, secret=secret)\n else:\n value = None\n\n return value", "def get_custom_property(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetCustomProperty', self.handle)", "def DoIt(self, host, vm, variable, value):\n\n vm = Operation.GetVm(host, vm)\n\n extraConfig = vm.GetExtraConfig()\n if variable in extraConfig:\n extraConfig[variable] = value\n return extraConfig.Save()", "def _get_config_value(self, section, key):\n return config.get(section, key)", "def git_property(self) -> Optional[pulumi.Input['ConfigServerGitPropertyArgs']]:\n return pulumi.get(self, \"git_property\")", "def get_vct_config(var):\n vct_root = get_vct_root()\n context = {\n 'var': var,\n 'source': \"\"\"\n if [ -f %(vct_root)s/vct.conf.overrides ]; then\n . %(vct_root)s/vct.conf.default\n . %(vct_root)s/vct.conf.overrides\n elif [ -f %(vct_root)s/vct.conf ]; then\n . %(vct_root)s/vct.conf\n elif [ -f %(vct_root)s/vct.conf.default ]; then\n . %(vct_root)s/vct.conf.default\n fi \"\"\" % { 'vct_root': vct_root} }\n out = run(\"bash -c '%(source)s; echo $%(var)s'\" % context, display=False, silent=False)\n return out.stdout", "def getCdfPropValue(nm, encrypted=False, cfg=\"/opt/app/cdf/lib/cdf.cfg\", dflt=None, skipComplaining=False):\n return getPropValue(nm=nm, encrypted=encrypted, cfg=cfg, dflt=dflt, skipComplaining=skipComplaining)", "def _get_config(self, state, controller, option):\n config = self.config.get(f'{controller}.{option}', Recollection)\n if config is Recollection:\n s = state.get(controller, {}).get(option, None)\n else:\n s = config\n logger.debug(\n \"_get_config(%s, %s, %s) = %s\",\n state, controller, option, s)\n return s", "def get_prop(prop, config_type=\"\", config_path=\"\"):\n\n paths = []\n\n if len(config_type):\n paths = [get_config_path(config_type, config_path)]\n else:\n paths = [get_global_config_path()]\n\n user_path = get_user_config_path()\n if os.path.exists(user_path):\n paths.append(user_path)\n\n return get_property_value(prop, paths)", "def __getattribute__(self, key):\n value = super(Config, self).__getattribute__(key)\n\n if key == \"reserved\" or key in self.reserved:\n return value\n else:\n return self.format(value, key)", "def _getConfigParam(self, name, default=None):\n return self.config.get(self._configPrefix + name.lower(), default)", "def value(self):\n\n memcached_items = memcache_services.get_multi([self.name])\n if self.name in memcached_items:\n return memcached_items[self.name]\n\n datastore_item = config_models.ConfigPropertyModel.get(\n self.name, strict=False)\n if datastore_item is not None:\n memcache_services.set_multi({\n datastore_item.id: datastore_item.value})\n return datastore_item.value\n\n return self.default_value" ]
[ "0.58720696", "0.5688264", "0.5646648", "0.5464817", "0.5446991", "0.54390156", "0.53303957", "0.53082174", "0.52847975", "0.52617794", "0.52528036", "0.5231562", "0.5209172", "0.51441497", "0.51429886", "0.51114094", "0.509669", "0.5087842", "0.5084989", "0.5078126", "0.504081", "0.49968258", "0.49896947", "0.49777332", "0.49756262", "0.4943199", "0.49403408", "0.4927049", "0.4915243", "0.49022996" ]
0.7533298
0
Validate that the driver configuration is sane/complete Return (status, message) if there is a problem or (None, None) if there are no issues. Delete this function if it's not needed.
def custom_assess_status_check(self): options = self.options # can check options.thing to ensure that it makes sense # if wrong return 'blocked', "The driver is badly configured ..." return None, None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validateConfig(self):\n ## (boolean with the result of the validation, eventual error message)\n return (True, '')", "def _validate_config(self):\n pass", "def validate_config(self):\n pass", "def validate_config(self):\n pass", "def _check_config(self):", "def check_config():\n\n if not config_instance:\n LOG.error(\"Failed to load the config!\")\n sys.exit(9)\n\n if not hasattr(config_instance, \"CONFIG_VERSION\"):\n LOG.warning( \"The config file does not specify CONFIG_VERSION! I will \"\n \"try to continue anyway, but this field is recommended to allow \"\n \"some internal tests to work. I will assume the value '(1,0)'!\" )\n config_instance.CONFIG_VERSION = (1, 0)\n\n major, minor = config_instance.CONFIG_VERSION\n expected_major, expected_minor = EXPECTED_CONFIG_VERSION\n\n if major < expected_major:\n LOG.critical(\"The config system has undergone a major change! \"\n \"I cannot continue without an upgrade!\")\n sys.exit(9)\n\n if minor < expected_minor:\n LOG.warning(\"The config system has undergone a minor change! \"\n \"It should work, but you still should review the docs!\")\n\n if major == expected_major and minor == expected_minor:\n LOG.debug( \"Config version OK!\" )\n\n if not hasattr(config_instance, \"GENERATORS\"):\n LOG.critical(\"Variable 'GENERATORS' not found in config!\")\n sys.exit(9)\n\n if not hasattr(config_instance, \"TARGETS\"):\n LOG.critical(\"Variable 'TARGETS' not found in config!\")\n sys.exit(9)", "def _validate_configurations(self) -> None:\n if self.__exception:\n raise self.__exception", "def check_for_setup_error(self):\n super(RBDISCSIDriver, self).check_for_setup_error()\n\n required_options = ['rbd_iscsi_api_user',\n 'rbd_iscsi_api_password',\n 'rbd_iscsi_api_url',\n 'rbd_iscsi_target_iqn']\n\n for attr in required_options:\n val = getattr(self.configuration, attr)\n if not val:\n raise exception.InvalidConfigurationValue(option=attr,\n value=val)", "async def check_config(self) -> None:\n try:\n await self._check_api()\n except aiohttp.ClientError as e:\n raise ConnectionError(str(e))", "def validate_config(params, error_callback):\n local_params = dict(params)\n _validate_value_formats(local_params, error_callback)\n _validate_in_cidr(local_params, error_callback)\n _validate_dhcp_range(local_params, error_callback)\n _validate_inspection_range(local_params, error_callback)\n _validate_no_overlap(local_params, error_callback)\n _validate_ips(local_params, error_callback)\n _validate_interface_exists(local_params, error_callback)", "def validate(self):\n\n print(\"Checking for supported board.\")\n if self.board == \"\": \n sys.exit(\"Unknown board type. Exiting.\")\n\n supportedboards = supportedBoards()\n\n if not self.board in supportedboards:\n sys.exit(\"Board %s is not supported.\" % self.board)\n return False\n\n if not self.getpath(): \n sys.exit(\"%s unable to find binary file to upload in \\\n specified path or current working directory %s. \\\n Exiting now.\" % (errstr, str(array[0])))\n\n array = self.getfiletype()\n if not (array[0] or array[1]):\n return False\n\n self.arch = array[0]\n self.filetype = array[1]\n return True", "def test_failure_config(self):\n resource_conf = {\n \"enable_dns_support\": \"true\"\n }\n scan_result = check.scan_resource_conf(conf=resource_conf)\n self.assertEqual(CheckResult.FAILED, scan_result)", "def validate_config(self):\n log.msg('Validating config')\n # Map message types to callbacks that handle that message type\n self.dispatch_map = {\n 'Deliver': self.receive_message,\n 'Status_Report': self.receive_delivery_report,\n }\n\n self.redis_config = self.config.get('redis', {})\n self.gammu_config = self.config.get('gammu')\n self.connect_retry_interval = int(self.config.get(\n 'connect_retry_interval', 15))\n self.poll_interval = int(self.config.get('poll_interval', 60))\n self.country_code = str(self.config.get('country_code'))\n self.phone_number = str(self.config.get('phone_number'))\n self.phone = None", "def _validate_config(self):\n # Simulation ID\n empty_string_check(self._config_dict['@id'])\n \n # Output\n empty_string_check(self._config_dict['output']['@baseDirectory'])\n self._config_dict['output']['@saveInteractionLog'] = parse_boolean(self._config_dict['output']['@saveInteractionLog'])\n self._config_dict['output']['@saveRelevanceJudgments'] = parse_boolean(self._config_dict['output']['@saveRelevanceJudgments'])\n self._config_dict['output']['@trec_eval'] = parse_boolean(self._config_dict['output']['@trec_eval'])\n \n # Topics\n def check_topic(t):\n \"\"\"\n Checks a given topic, t. Looks for a topic ID and a valid topic description file.\n \"\"\"\n empty_string_check(t['@id'])\n filesystem_exists_check(t['@filename'])\n filesystem_exists_check(t['@qrelsFilename'])\n \n if '@backgroundFilename' in t: # A background file was specified.\n filesystem_exists_check(t['@backgroundFilename'])\n else:\n t['@backgroundFilename'] = None # No background file was specified.\n \n topics = self._config_dict['topics']['topic']\n \n if type(topics) == list:\n for topic in topics:\n check_topic(topic)\n else:\n check_topic(topics)\n \n # Users\n users = self._config_dict['users']['user']\n \n if type(users) == list:\n for user in users:\n filesystem_exists_check(user['@configurationFile'])\n else:\n filesystem_exists_check(users['@configurationFile'])\n \n # Search Interface\n empty_string_check(self._config_dict['searchInterface']['@class'])\n check_attributes(self._config_dict['searchInterface'])", "def _verify_options(config: configuration.Config) -> None:\n\n if not config.config['species']:\n log._logger.error('You must specify a species (-s/--species)')\n exit(1)\n\n if config.config['hpc'] and config.config['local']:\n log._logger.error('You can only use one of the config options (hpc/local)')\n exit(1)\n\n if config.config['hpc'] and config.config['custom']:\n log._logger.error('You can only use one of the config options (hpc/custom)')\n exit(1)\n\n if config.config['local'] and config.config['custom']:\n log._logger.error('You can only use one of the config options (local/custom)')\n exit(1)\n\n if (not config.config['hpc']) and\\\n (not config.config['local']) and\\\n (not config.config['custom']):\n log._logger.error(\n 'You must specify a compute cluster environment (hpc/local/custom)'\n )\n exit(1)\n\n if config.config['custom'] and (not config.config['scheduler']):\n log._logger.error(\n 'The custom compute environment requires a scheduler address to be set'\n )\n exit(1)", "def check_config(config):\n pass", "def __check_configuration__(self, parser):\n if not parser.has_section('core'):\n self.logger.error('The config file should contain a core section with at least the module_path specified')\n sys.exit(1)\n\n else:\n if parser.get('core', 'modules_path', fallback=None) is None:\n self.logger.error('The configuration file should contain at least the modules_path value in core section.')\n sys.exit(1)\n\n if not parser.has_section('mysql'):\n self.logger.error('The config file should contain a mysql section.')\n sys.exit(1)\n\n else:\n if parser.get('mysql', 'host', fallback=None) is None:\n self.logger.error('The config file should contain the host value in mysql section.')\n sys.exit(1)\n\n if parser.get('mysql', 'port', fallback=None) is None:\n self.logger.error('The config file should contain the port value in mysql section.')\n sys.exit(1)\n\n if parser.get('mysql', 'user', fallback=None) is None:\n self.logger.error('The config file should contain the user in mysql section.')\n sys.exit(1)\n\n if parser.get('mysql', 'password', fallback=None) is None:\n self.logger.error('The config file should contain the password of the user in mysql section.')\n sys.exit(1)\n\n if parser.get('mysql', 'server_id', fallback=None) is None:\n self.logger.error('The config file should contain the server_id in mysql section.')\n sys.exit(1)\n\n if parser.get('mysql', 'tables', fallback=None) is not None:\n tables = [table.strip() for table in parser.get('mysql', 'tables').split(',')]\n for table in tables:\n if not parser.has_section(table):\n self.logger.error('The config file should contain a section about the table : %s' % table)\n exit(1)\n if parser.get(table, 'index_label', fallback=None) is None :\n self.logger.error('The config file should contain a table section with a index_label value.')\n exit(1)\n else:\n self.logger.error('The config file should contain a tables value with all the tables to replicate.')\n exit(1)", "def state_failsafe_validate(cfg, app, win, events):", "def check_configuration(self):\n self.ensure_one()\n getattr(self, '%s_check_configuration' % self.provider, lambda: None)()", "def __check(self):\n if self.probid is None:\n raise ProblemConfigError('problem %s has no probid' % self.problem_name)\n if self.color is None:\n raise ProblemConfigError('problem %s has no color' % self.problem_name)\n if self.samples is None:\n raise ProblemConfigError('problem %s has no sample' % self.problem_name)", "def validate_config(self):\n\n # LOCALHOST\n if self.location == 'localhost':\n if 'browserName' not in self.config.keys():\n msg = \"Add the 'browserName' in your local_config: e.g.: 'Firefox', 'Chrome', 'Safari'\" # noqa\n self.runner.critical_log(msg)\n raise BromeBrowserConfigException(msg)\n\n # EC2\n elif self.location == 'ec2':\n self.validate_ec2_browser_config()\n\n # VIRTUALBOX\n elif self.location == 'virtualbox':\n self.validate_virtualbox_config()", "def validate_config(self):\n config = self.config\n\n # which doc types are enabled\n need_at_least_one = ['GOOGLE_DRIVE_ENABLED','GITHUB_ENABLED','DISQUS_ENABLED']\n found_one = False\n for n in need_at_least_one:\n if n in config.keys():\n found_one = True\n break\n if not found_one:\n raise Exception(\"Error: need at least one of: %s\"%(\", \".join(need_at_least_one)))\n\n if 'GOOGLE_DRIVE_ENABLED' in config.keys():\n if config['GOOGLE_DRIVE_ENABLED']:\n if 'GOOGLE_DRIVE_CREDENTIALS_FILE' in config.keys():\n if os.path.basename(config['GOOGLE_DRIVE_CREDENTIALS_FILE']) != 'credentials.json':\n raise Exception(\"Error: the file specified with GOOGLE_DRIVE_CREDENTIALS_FILE in the config file must have a filename of 'credentials.json'\")", "def check_for_setup_error(self):\n\n # If configuration is incorrect we will get exception here\n self._rpc_call('bdev_get_bdevs')", "def check_errors(self) -> None:", "def _validate_config(self, conf: Dict[str, Any]) -> Dict[str, Any]:\n try:\n validate(conf, constant.CONF_SCHEMA, Draft4Validator)\n return conf\n except ValidationError as exception:\n logger.critical(\n 'Invalid configuration. See config.json.example. Reason: %s',\n exception\n )\n raise ValidationError(\n best_match(Draft4Validator(constant.CONF_SCHEMA).iter_errors(conf)).message\n )", "def _validate_main_config(self):\n # check for required top-level parameters in main config\n required_params = {\"name\": str, \"version\": str, \"datasets\": list}\n\n for param, expected_type in required_params.items():\n if param not in self.config:\n msg = (\n \"[ERROR] Config error: missing required configuration parameter in {}: '{}'\"\n )\n config_file = os.path.basename(self.config[\"config_file\"])\n sys.exit(msg.format(config_file, param))\n elif not isinstance(self.config[param], expected_type):\n msg = \"[ERROR] Config error: parameter is of unexpected type {}: '{}' (expected: '{}')\"\n config_file = os.path.basename(self.config[\"config_file\"])\n sys.exit(msg.format(config_file, param, expected_type))", "def test_get_driverStatus(self):\n for app_num, servo_type in app_nr.items():\n try:\n par = self.get_parameter(servo_type, app_num, DRIVER_STATUS_IDX, DRIVER_STATUS_SUB)\n param_obj = self.__dict__[servo_type]._get_driverStatus()\n acs_par, completion = param_obj.get_sync()\n if(completion.code):\n print \"\\nError code found in driverStatus...\"\n continue\n self.data_match(acs_par, par)\n except NackEx:\n continue", "def validate_config_dict(self):\n config_options = [\"pipeline_name\",\n \"num_processors\",\n \"num_sessions_at_once\",\n \"available_memory\",\n \"cluster_system\",\n \"output_directory\",\n \"working_directory\",\n \"template_head_for_anat\",\n \"exclude_zeros\",\n \"start_idx\",\n \"stop_idx\",\n \"write_report\",\n \"write_graph\",\n \"write_all_outputs\",\n \"upload_to_s3\",\n \"bucket_prefix\",\n \"bucket_out_prefix\",\n \"local_prefix\",\n \"bucket_name\",\n \"creds_path\"]\n invalid = []\n for param in self._config.keys():\n if param not in config_options:\n invalid.append(param)\n if len(invalid) > 0:\n err = \"\\n[!] The following parameters in your configuration \" \\\n \"file are not recognized. Double-check the pipeline \" \\\n \"configuration template.\\n\"\n err += \"\\n\".join([x for x in invalid])\n raise Exception(err)\n else:\n return 0", "def check_for_setup_error(self):\r\n self.helper._check_conf_file()\r\n self.helper._check_service()", "def check_config(cfg):" ]
[ "0.64212245", "0.608745", "0.60780764", "0.60780764", "0.5963149", "0.5936095", "0.58347476", "0.5826374", "0.5778542", "0.5762001", "0.5713054", "0.56735516", "0.5649959", "0.56488603", "0.5648123", "0.5642532", "0.5586238", "0.55713135", "0.55353975", "0.55154055", "0.54976046", "0.54888356", "0.5457509", "0.54537463", "0.5407607", "0.5403656", "0.5394255", "0.539306", "0.5382641", "0.5374795" ]
0.6580841
0
Assuming that the configuration data is valid, return the configuration data for the principal charm.
def get_config_for_principal(self, auth_data): # If there is no auth_data yet, then we can't write our config. if not auth_data: return {} # If the state from the assess_status is not None then we're blocked, # so don't send any config to the principal. state, message = self.custom_assess_status_check() if state: return {} # Do any further checking on options that might be needed? options = self.options # tiny optimisation for less typing. if not options.some_option: return {} # We have the config that is reasonably sensible. # We can now render the config file segment. manila_plugin = charms.reactive.RelationBase.from_state( 'manila-plugin.available') self.adapters_instance.add_relation(manila_plugin) # Render the config files needed. Here it's just MANILA_CONF # Change the template as needed for the configuration. rendered_configs = charmhelpers.core.templating.render( source=os.path.basename(MANILA_CONF), template_loader=os_templating.get_loader( 'templates/', self.release), target=None, context=self.adapters_instance) return { MANILA_CONF: rendered_configs }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_config_data(self, cr, uid):\n\n model_conf = self.pool.get('customer.support.settings')\n args = [('selected', '=', True)] \n ids = model_conf.search(cr, uid, args)\n config = model_conf.browse(cr, uid, ids[0])\n\n return {\n 'tor_api_key': config.tor_api_key,\n 'tor_domain': config.tor_domain,\n 'company': config.company\n }", "def get_config(self):\n if self.allow_reco():\n return self.chs_config()\n else:\n return self.get_config_j(self.id)", "def get_configuration():\r\n if not hasattr(CURRENT_REQUEST_CONFIGURATION, 'data'):\r\n return {}\r\n\r\n return CURRENT_REQUEST_CONFIGURATION.data", "def get_details(self):\n return self.__config_data", "def config_data(self):\n if self._key not in self._user_keys():\n raise CouldNotFindUserException(self._key, self._user_keys())\n return next(data for data in self.CONFIG_DATA if data.key == self._key)", "def get_config(self):\n\n # make sure that the config reflects the state of the underlying logic\n self.logic_to_config()\n # and then return the config struct.\n return self._config", "def get_config_on_json(self):\n # load section CONFIG from data\n try:\n return self.json_data[\"CONFIG\"]\n except:\n constant.get_error(constant.ERROR_004)", "def getConfig(self):\n return self.cp", "def get_config(self):\n config = {\n 'membership_transform': self.membership_transform,\n 'predictions_transform': self.predictions_transform,\n 'membership_kernel': self.membership_kernel,\n 'predictions_kernel': self.predictions_kernel,\n 'name': self.name,\n }\n config = {k: v for k, v in config.items() if v is not None}\n return self._serialize_config(config)", "def get_configuration(self) -> dict:\n return self.config", "def get(self):\n lc = self._client.describe_launch_configurations(LaunchConfigurationNames=[self._name])\n if len(lc[\"LaunchConfigurations\"]) == 0:\n return None\n else:\n config = lc[\"LaunchConfigurations\"][0]\n config[\"UserData\"] = base64.b64decode(config[\"UserData\"])\n return config", "def get_config(self):\n if self.faucet is not None:\n return self.faucet.get_config()\n return None", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config" ]
[ "0.59209836", "0.5861069", "0.54529774", "0.542924", "0.5428179", "0.5417673", "0.54086673", "0.5405804", "0.5397979", "0.53960115", "0.5393643", "0.532532", "0.53216976", "0.53216976", "0.53216976", "0.53216976", "0.53216976", "0.53216976", "0.53216976", "0.53216976", "0.53216976", "0.53216976", "0.53216976", "0.53216976", "0.53216976", "0.53216976", "0.53216976", "0.53216976", "0.53216976", "0.53216976" ]
0.66414106
0
Test requesting an empty, default node using a Component.
def testInfoEmptyDefaultNodeComponent(self): self.stream_start(mode='component', jid='tester.localhost', plugins=['xep_0030']) self.recv(""" <iq type="get" id="test"> <query xmlns="http://jabber.org/protocol/disco#info" /> </iq> """) self.send(""" <iq type="result" id="test"> <query xmlns="http://jabber.org/protocol/disco#info"> <identity category="component" type="generic" /> <feature var="http://jabber.org/protocol/disco#info" /> </query> </iq> """)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_init_empty_node():\n from dll import Node\n new_node = Node()\n assert new_node.value is None", "def test_empty_node():\n try:\n Node({})\n except Exception as e:\n assert str(e) == 'input info has more than 1 entry!'\n # create node with empty connection\n try:\n node_b = Node({'A':[]})\n except Exception:\n assert False\n assert node_b.name == 'A'", "def test_node_instantiation(create_empty_node):\n from linked_list import Node\n assert create_empty_node.value is None", "def testInit(self):\n\n self.assertEqual(\n [],\n self.node.desc\n )", "def test_get_component_with_default_key():\n\n default_component = application_services.get_component('database.component')\n assert application_services.get_component('database.component',\n component_custom_key=DEFAULT_COMPONENT_KEY) \\\n == default_component", "def testInfoEmptyDefaultNode(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\">\n <identity category=\"client\" type=\"bot\" />\n <feature var=\"http://jabber.org/protocol/disco#info\" />\n </query>\n </iq>\n \"\"\")", "def test_emptyInstanceID(self) :\n\t\tself.assertRaises(ValueError, lambda: Node(\"\"))", "def default_visit(self, node):\n pass", "def test__get_component_server_default(self):\n s = self.u._get_component_server('a')\n self.assertEqual(self.u.repository_server, s.mock_server)\n self.assertEqual(self.u.repository_port, s.mock_port)", "def test_node_exists():\n assert Node", "def test_node_exists():\n assert Node", "def test_is_component_default_package_installed_MISSING(self):\n self._ucr({\n 'repository/online/component/b/defaultpackage': 'b',\n })\n self.assertFalse(self.u.is_component_defaultpackage_installed('b'))", "def test_bst_empty_root(bst_empty):\n assert bst_empty.root == None", "def test_empty_networkx(self):\n g = nx.DiGraph()\n GraphData.from_networkx(g)", "def test_node_no_config() -> None:\n node = MyNode()\n\n with pytest.raises(LabGraphError) as err:\n node.setup()\n\n assert (\n \"Configuration not set. Call MyNode.configure() to set the configuration.\"\n in str(err.value)\n )", "def _dummy_node(self) -> CFNode:\n node = CFNode()\n self._graph.add_node(node)\n return node", "def test_no_default_value(self):\n dim = Dimension(\"yolo\", \"uniform\", -3, 4)\n assert dim.default_value is None", "def test_blank_nodes(self):\n self.graph.add((artis, RDFS.seeAlso, anonymous))\n self.graph.add((anonymous, RDF.type, zoo))\n\n triple = list(self.graph.triples((None, None, zoo)))[0]\n self.assertTupleEqual(triple, (anonymous, RDF.type, zoo))", "def test_empty(self):\n pass", "def test_with_default() -> None:\n soup = generate_case(\"with_default\")\n\n tests.html_schema_doc_asserts.assert_default_values(soup, ['\"Linux\"', '[\"white\", \"blue\"]', \"2\"])", "def test_get_component_defaultpackage(self):\n self._ucr({\n 'repository/online/component/b/defaultpackage': 'b',\n 'repository/online/component/c/defaultpackages': 'ca cb',\n 'repository/online/component/d/defaultpackages': 'da,db',\n })\n self.assertEqual(set(('b',)), self.u.get_component_defaultpackage('b'))\n self.assertEqual(set(('ca', 'cb')), self.u.get_component_defaultpackage('c'))\n self.assertEqual(set(('da', 'db')), self.u.get_component_defaultpackage('d'))", "def test_show_default_node_as(self, m_stdout, m_client):\n # Set up mock objects\n expected_return = '15'\n m_client.get_default_node_as.return_value = expected_return\n\n # Call method under test\n show_default_node_as()\n\n # Assert\n m_client.get_default_node_as.assert_called_once_with()\n self.assertEqual(m_stdout.getvalue().strip(), expected_return)", "def test_initialization_with_empty_list_first_node_check():\n queue = Queue([])\n assert queue._queue.first_node is None", "def test_set_default_node_as(self, m_client):\n # Call method under test\n set_default_node_as(1)\n\n # Assert\n m_client.set_default_node_as.assert_called_once_with(1)", "def test_bst_single_node():\n assert BST(1).root is None", "def testDefault(self):\n for val in (1, {}, 'test', None):\n prop = make_prop(default=val)\n self.assertEqual(val, prop.interpret(recipe_api.PROPERTY_SENTINEL, {}))", "def test_default_property_head(empty_list):\n assert empty_list.head is None\n assert empty_list._length == 0", "def test_get_component_ON(self):\n self._ucr({\n 'repository/online/component/a': 'yes',\n 'repository/online/component/a/foo': 'bar',\n })\n c = self.u.get_component('a')\n self.assertEqual({'name': 'a', 'activated': True, 'foo': 'bar'}, c)", "def test_set_node_not_component(self):\n payload = {\n 'data': [\n {\n 'type': 'nodes',\n 'id': self.project_two._id\n },\n ]\n }\n res = self.app.post_json_api(self.url, payload, auth=self.user.auth, expect_errors=True)\n assert_equal(res.status_code, 400)\n assert_equal(res.json['errors'][0]['detail'], 'The node {0} cannot be affiliated with this View Only Link because the node you\\'re trying to affiliate is not descended from the node that the View Only Link is attached to.'.format(self.project_two._id))", "def test_missingId(self):\n node = Node()\n node.properties[\"datawire_nodeId\"] = \"4567\"\n self.assertEqual(node.getId(), \"4567\")" ]
[ "0.66112286", "0.6523052", "0.6461104", "0.61584496", "0.59600675", "0.59370893", "0.5814065", "0.5803736", "0.5758269", "0.57452965", "0.57452965", "0.5716095", "0.5696055", "0.5685341", "0.56674856", "0.5646907", "0.56448424", "0.55874735", "0.5577212", "0.5576971", "0.55742747", "0.55592746", "0.5544286", "0.5539288", "0.55384475", "0.5537887", "0.55215245", "0.5520602", "0.5498934", "0.5495276" ]
0.6988443
0
Results for info queries directed to a particular node MUST include the node in the query response.
def testInfoIncludeNode(self): self.stream_start(mode='client', plugins=['xep_0030']) self.xmpp['xep_0030'].static.add_node(node='testing') self.recv(""" <iq to="tester@localhost/resource" type="get" id="test"> <query xmlns="http://jabber.org/protocol/disco#info" node="testing" /> </iq> """) self.send(""" <iq type="result" id="test"> <query xmlns="http://jabber.org/protocol/disco#info" node="testing"> </query> </iq>""", method='mask')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __disco_info(self,iq):\n q=iq.get_query()\n if q.hasProp(\"node\"):\n node=from_utf8(q.prop(\"node\"))\n else:\n node=None\n info=self.disco_get_info(node,iq)\n if isinstance(info,DiscoInfo):\n resp=iq.make_result_response()\n self.__logger.debug(\"Disco-info query: %s preparing response: %s with reply: %s\"\n % (iq.serialize(),resp.serialize(),info.xmlnode.serialize()))\n resp.set_content(info.xmlnode.copyNode(1))\n elif isinstance(info,Stanza):\n resp=info\n else:\n resp=iq.make_error_response(\"item-not-found\")\n self.__logger.debug(\"Disco-info response: %s\" % (resp.serialize(),))\n self.stream.send(resp)", "def get_info_by_node(conn, node): \n cur = conn.cursor()\n cur.execute(\"SELECT * FROM Info WHERE NodeID=?\", (node))", "def testInfoEmptyDefaultNode(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\">\n <identity category=\"client\" type=\"bot\" />\n <feature var=\"http://jabber.org/protocol/disco#info\" />\n </query>\n </iq>\n \"\"\")", "def query_info(self, nid):\r\n depth = 0\r\n info = []\r\n if nid not in self._nodes:\r\n return [], False\r\n\r\n def inner(_id, _depth):\r\n nname = self._nodes[_id].get_node_name\r\n cids = self._nodes[_id].get_children_ids()\r\n nparent_id = self._nodes[_id].get_parent_id()\r\n info.append((_id, nname, nparent_id, _depth))\r\n for i, v in enumerate(cids):\r\n if i == 0:\r\n _depth += 1\r\n inner(v, _depth)\r\n\r\n inner(nid, depth)\r\n return info, True", "def trackingQuery(self, node, REQUEST=None, **kw):\n node = self.getQueryAnchor(node)\n return self.localQuery(node, REQUEST, **kw)", "def get_node_details(self, node):\n node_details = self.parser.find_server_by_ip(node.get('ip')) or \\\n self.parser.find_server_by_hostname(node.get('host'))\n\n return node_details", "def get(self, request, nnid, wfver, desc):\n try:\n return_data = NNCommonManager().get_nn_node_info(nnid, wfver, desc)\n return Response(json.dumps(return_data))\n except Exception as e:\n return_data = {\"status\": \"404\", \"result\": str(e)}\n return Response(json.dumps(return_data))", "def get(self, request, nnid):\n try:\n condition = {}\n condition['nn_id'] = nnid\n if str(nnid).lower() == 'all':\n condition['nn_id'] = '%'\n elif str(nnid).lower() == 'seq':\n condition['nn_id'] = 'seq'\n return_data = NNCommonManager().get_nn_info(condition)\n logging.info(return_data)\n # Data node name\n graph = NNCommonManager.get_nn_node_name(None, nnid)\n\n return_param = {}\n return_param['fields'] = return_data\n return_param['graph'] = graph\n return Response(json.dumps(return_param, cls=DjangoJSONEncoder))\n except Exception as e:\n return_data = {\"status\": \"404\", \"result\": str(e)}\n return Response(json.dumps(return_param, cls=DjangoJSONEncoder))", "def cli(ctx, query):\n query = query.split('/')\n get_info(query)", "def testDynamicInfoJID(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n def dynamic_jid(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoInfo()\n result['node'] = node\n result.add_identity('client', 'console', name='Dynamic Info')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_info',\n jid='tester@localhost',\n handler=dynamic_jid)\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\" to=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\">\n <identity category=\"client\"\n type=\"console\"\n name=\"Dynamic Info\" />\n </query>\n </iq>\n \"\"\")", "def disco_get_info(self,node,iq):\n to=iq.get_to()\n if to and to!=self.jid:\n return iq.make_error_response(\"recipient-unavailable\")\n if not node and self.disco_info:\n return self.disco_info\n return None", "def localQuery(self,node,REQUEST=None, **kw):\n kw['path'] = '/'.join(node.getPhysicalPath())\n return ZCatalog.searchResults(self, REQUEST, **kw)", "def GetNodeInfo(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def query_nodes(self):\n query_nodes = []\n for node in self.nodes:\n if 'querynode' == node.get('infos').get('type'):\n query_nodes.append(node)\n return query_nodes", "def t_info_process(self, *args, **kwargs):\n\n self.dp.qprint(\"In info process...\")\n\n d_request = {}\n d_ret = {}\n b_status = False\n hits = 0\n for k, v in kwargs.items():\n if k == 'request': d_request = v\n\n d_search = self.t_search_process(request = d_request)['d_ret']\n\n p = self._ptree\n for j in d_search.keys():\n d_j = d_search[j]\n for job in d_j.keys():\n str_pathStart = '/api/v1/' + job + '/startInfo'\n str_pathEnd = '/api/v1/' + job + '/endInfo'\n d_ret[str(hits)+'.0'] = {}\n d_ret[str(hits)+'.0'] = self.DB_get(path = str_pathStart)\n d_ret[str(hits)+'.1'] = {}\n d_ret[str(hits)+'.1'] = self.DB_get(path = str_pathEnd)\n hits += 1\n if not hits:\n d_ret = {\n \"-1\": {\n \"noJobFound\": {\n \"endInfo\": {\"allJobsDone\": None}\n }\n }\n }\n else:\n b_status = True\n return {\"d_ret\": d_ret,\n \"status\": b_status}", "def testInfoEmptyDefaultNodeComponent(self):\n self.stream_start(mode='component',\n jid='tester.localhost',\n plugins=['xep_0030'])\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\">\n <identity category=\"component\" type=\"generic\" />\n <feature var=\"http://jabber.org/protocol/disco#info\" />\n </query>\n </iq>\n \"\"\")", "def getInfo(self, id):\n facade = self._getFacade()\n monitor = facade.get(id)\n data = Zuul.marshal(ITreeNode(monitor))\n return DirectResponse.succeed(data=data)", "def info_request():\n return SentmanRequest(SentmanRequest.GET_INFO)", "def _process_info_response(self, message):\n self.stick.logger.debug(\"Response info message for node %s\", self.get_mac())\n self.set_available(True)\n if message.relay_state.serialize() == b\"01\":\n if not self._relay_state:\n self._relay_state = True\n self.do_callback(SWITCH_RELAY[\"id\"])\n else:\n if self._relay_state:\n self._relay_state = False\n self.do_callback(SWITCH_RELAY[\"id\"])\n self._hardware_version = message.hw_ver.value.decode(UTF8_DECODE)\n self._firmware_version = message.fw_ver.value\n self._node_type = message.node_type.value\n self._last_info_message = message.timestamp\n if self._last_log_address != message.last_logaddr.value:\n self._last_log_address = message.last_logaddr.value\n self._last_log_collected = False\n self.stick.logger.debug(\"Node type = %s\", self.get_node_type())\n if not self.is_sed:\n self.stick.logger.debug(\"Relay state = %s\", str(self._relay_state))\n self.stick.logger.debug(\"Hardware version = %s\", str(self._hardware_version))\n self.stick.logger.debug(\"Firmware version = %s\", str(self._firmware_version))", "def _query_info(self, entities: List[str]):\n self.player.respond(entities)", "def info(self, node_uuid):\n if node_uuid is None:\n return None\n uri = '{}/{}'.format(self.store.aroot, node_uuid)\n infos = self.store.actual.resolve(uri)\n if infos is None:\n return None\n return json.loads(infos)", "def testDynamicInfoGlobal(self):\n self.stream_start(mode='component',\n jid='tester.localhost',\n plugins=['xep_0030'])\n\n def dynamic_global(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoInfo()\n result['node'] = node\n result.add_identity('component', 'generic', name='Dynamic Info')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_info',\n handler=dynamic_global)\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\"\n to=\"[email protected]\"\n from=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\"\n to=\"tester@localhost\"\n from=\"[email protected]\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\">\n <identity category=\"component\"\n type=\"generic\"\n name=\"Dynamic Info\" />\n </query>\n </iq>\n \"\"\")", "def pcp_node_info(self, nid):\n\n\t\tif self.PCPConnectionStatus() != ConnStateType.OK:\n\t\t\tself.pcp_internal_error('invalid PCP connection')\n\t\t\treturn None\n\n\t\tnode_id = str(nid)\n\n\t\tself._PCPWrite('I'.encode(), 1)\n\t\twsize = self.int_to_bytes(len(node_id) + 1 + 4)\n\t\tself._PCPWrite(wsize, 4)\n\t\tself._PCPWrite(node_id.encode() + NULL, len(node_id) + 1)\n\t\tif self.PCPFlush() < 0:\n\t\t\treturn None\n\t\tif self.Pfdebug:\n\t\t\tself.Pfdebug.write(f'DEBUG: send: tos=\"I\", length={self.bytes_to_int(wsize)}\\n')\n\n\t\treturn self._process_pcp_response('I')", "def _request_info(self, callback=None):\n self.stick.send(\n NodeInfoRequest(self.mac),\n callback,\n )", "def get(self, node, about_node):\n if not node:\n return\n for router in self.router.routers:\n if router.node == node:\n for _ in router.render_peers():\n if _['node'] == about_node.threeple:\n return _", "def info(self):\n path = self._get_path('info')\n \n response = self._GET(path)\n self._set_attrs_to_values(response)\n return response", "def get_info(self, response):\n try:\n if re.search('artist/\\d+', response.url) or \\\n re.search('i\\.xiami\\.com/[^/]+$', response.url):\n self.get_artist(response)\n elif re.search('album/\\d+', response.url):\n self.get_albums(response)\n elif re.search('song/\\d+', response.url):\n self.get_songs(response)\n elif 'count/getplaycount' in response.url:\n self.get_count(response)\n else:\n self.get_pages(response)\n except (AttributeError, TypeError):\n return\n request = self.gen_info(response)\n if not request:\n self.save(response.meta['source_id'],\n response.meta['raw_info'],\n response.meta['result'])\n else:\n yield request", "def perform_query(index_node, query_type):\n\n when = now()\n\n resp = do_query(index_node.name,\n [filter.value for filter in query_type.filters.all()],\n [facet.name for facet in query_type.facets.all()])\n\n client, _ = Host.objects.get_or_create(name=gethostname())\n \n response = Response.objects.create(\n index_node=index_node,\n status_code=resp['status_code'],\n datetime=when,\n client=client,\n query_type=query_type)\n\n if response.status_code == 200:\n \n response_data = ResponseData.objects.create(\n num_found=resp['num_found']\n )\n for facet_name, counts in resp['facet_counts'].items():\n for value, count in counts.items():\n facet, _ = Facet.objects.get_or_create(name=facet_name)\n facet_value, _ = \\\n FacetValue.objects.get_or_create(facet=facet,\n value=value)\n facet_value_count = \\\n FacetValueCount.objects.create(count=count,\n facet_value=facet_value,\n response_data=response_data)\n response.data = response_data\n response.save()", "def testOverrideJIDInfoHandler(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n def dynamic_jid(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoInfo()\n result['node'] = node\n result.add_identity('client', 'console', name='Dynamic Info')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_info',\n jid='tester@localhost',\n handler=dynamic_jid)\n\n\n self.xmpp['xep_0030'].restore_defaults(jid='tester@localhost',\n node='testing')\n\n self.xmpp['xep_0030'].add_identity(jid='tester@localhost',\n node='testing',\n category='automation',\n itype='command-list')\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\" to=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\">\n <identity category=\"automation\"\n type=\"command-list\" />\n </query>\n </iq>\n \"\"\")", "def get_host_info(self):\n\n if len(self.index) == 0:\n # Need to load index from cache\n self.load_index_from_cache()\n\n if not self.args.host in self.index:\n # try updating the cache\n self.do_api_calls_update_cache()\n if not self.args.host in self.index:\n # host might not exist anymore\n return self.json_format_dict({}, True)\n\n node_id = self.index[self.args.host]\n print \"NODE ID %s\" % node_id\n print \"INDEX: %s\" % self.index\n\n node = self.get_node(node_id)\n node_vars = {}\n for direct_attr in [\n \"api_id\",\n \"datacenter_id\",\n \"label\",\n \"display_group\",\n \"create_dt\",\n \"total_hd\",\n \"total_xfer\",\n \"total_ram\",\n \"status\",\n \"alert_cpu_enabled\",\n \"alert_cpu_threshold\",\n \"alert_diskio_enabled\",\n \"alert_diskio_threshold\",\n \"alert_bwin_enabled\",\n \"alert_bwin_threshold\",\n \"alert_bwout_enabled\",\n \"alert_bwout_threshold\",\n \"alert_bwquota_enabled\",\n \"alert_bwquota_threshold\",\n \"backup_weekly_daily\",\n \"backup_window\",\n \"watchdog\"\n ]:\n node_vars[direct_attr] = getattr(node, direct_attr)\n\n node_vars[\"datacenter_city\"] = self.get_datacenter_city(node)\n node_vars[\"public_ip\"] = [addr.address for addr in node.ipaddresses if addr.is_public][0]\n\n return self.json_format_dict(node_vars, True)" ]
[ "0.6733756", "0.6596269", "0.6055855", "0.5941904", "0.5860143", "0.5851458", "0.58275557", "0.57836586", "0.5761637", "0.5755889", "0.5730647", "0.56020886", "0.5503883", "0.55025244", "0.5459274", "0.5458452", "0.54305786", "0.54279375", "0.53848815", "0.53615916", "0.53319067", "0.53249866", "0.5320195", "0.531939", "0.5295513", "0.5252645", "0.5246249", "0.52211523", "0.5200221", "0.51870084" ]
0.7001901
0
Results for items queries directed to a particular node MUST include the node in the query response.
def testItemsIncludeNode(self): self.stream_start(mode='client', plugins=['xep_0030']) self.xmpp['xep_0030'].static.add_node(node='testing') self.recv(""" <iq to="tester@localhost/resource" type="get" id="test"> <query xmlns="http://jabber.org/protocol/disco#items" node="testing" /> </iq> """) self.send(""" <iq type="result" id="test"> <query xmlns="http://jabber.org/protocol/disco#items" node="testing"> </query> </iq>""", method='mask')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def localQuery(self,node,REQUEST=None, **kw):\n kw['path'] = '/'.join(node.getPhysicalPath())\n return ZCatalog.searchResults(self, REQUEST, **kw)", "def __disco_items(self,iq):\n q=iq.get_query()\n if q.hasProp(\"node\"):\n node=from_utf8(q.prop(\"node\"))\n else:\n node=None\n items=self.disco_get_items(node,iq)\n if isinstance(items,DiscoItems):\n resp=iq.make_result_response()\n self.__logger.debug(\"Disco-items query: %s preparing response: %s with reply: %s\"\n % (iq.serialize(),resp.serialize(),items.xmlnode.serialize()))\n resp.set_content(items.xmlnode.copyNode(1))\n elif isinstance(items,Stanza):\n resp=items\n else:\n resp=iq.make_error_response(\"item-not-found\")\n self.__logger.debug(\"Disco-items response: %s\" % (resp.serialize(),))\n self.stream.send(resp)", "async def root(\n p: str,\n item_id: int = Path(\n ...,\n title=\"The ID of the item to get\",\n ge=1, # constraint greater than or equal to 1\n lt=10 # less than 10\n ),\n size: Optional[float] = Query(None, gt=0., lt=33.),\n q: Optional[int] = Query(None, alias=\"item-query\")\n):\n results = dict(item_id=item_id)\n if q:\n results.update(q=q)\n if size:\n results.update(size=size)\n return results", "def query_nodes(self):\n query_nodes = []\n for node in self.nodes:\n if 'querynode' == node.get('infos').get('type'):\n query_nodes.append(node)\n return query_nodes", "def wql_nodetypes_query(self, node):\n self.tr_id = get_tr_id()\n if isinstance(node, Literal):\n raise KPError(M3_KP_ERROR_REQUEST)\n xml_msg = self._create_wql_nodetypes_msg(self.tr_id, node)\n self.conn.connect()\n self.conn.send(xml_msg)\n response = self.conn.receive()\n self._check_error(response)\n if \"results\" in response:\n node_list = parse_URI_list(response[\"results\"])\n return node_list\n else:\n raise SIBError(M3_SIB_ERROR)", "def query_items_handler(query):\n items = getItemsByName(query)\n return jsonify(items=[i.serialize for i in items])", "def trackingQuery(self, node, REQUEST=None, **kw):\n node = self.getQueryAnchor(node)\n return self.localQuery(node, REQUEST, **kw)", "def query_items(self, request, params, payload):\n return util.ndb_query_from_values(self.model, params).fetch()", "def parse_query_node(node: dict, dataset: Dataset, current_graphs: List[str], context: dict, cardinalities: dict, as_of: Optional[datetime] = None) -> PreemptableIterator:\n if node.name == 'SelectQuery':\n # in case of a FROM clause, set the new default graphs used\n graphs = current_graphs\n if node.datasetClause is not None:\n graphs = [format_term(graph_iri.default) for graph_iri in node.datasetClause]\n return parse_query_node(node.p, dataset, graphs, context, cardinalities, as_of=as_of)\n elif node.name == 'Project':\n query_vars = list(map(lambda t: '?' + str(t), node.PV))\n child = parse_query_node(node.p, dataset, current_graphs, context, cardinalities, as_of=as_of)\n return ProjectionIterator(child, query_vars)\n elif node.name == 'BGP':\n # bgp_vars = node._vars\n triples = list(localize_triples(node.triples, current_graphs))\n iterator, query_vars, c = build_left_join_tree(triples, dataset, current_graphs, context, as_of=as_of)\n # track cardinalities of every triple pattern\n cardinalities += c\n return iterator\n elif node.name == 'Union':\n left = parse_query_node(node.p1, dataset, current_graphs, context, cardinalities, as_of=as_of)\n right = parse_query_node(node.p2, dataset, current_graphs, context, cardinalities, as_of=as_of)\n return BagUnionIterator(left, right, context)\n elif node.name == 'Filter':\n expression = parse_filter_expr(node.expr)\n iterator = parse_query_node(node.p, dataset, current_graphs, context, cardinalities, as_of=as_of)\n return FilterIterator(iterator, expression, context)\n elif node.name == 'Join':\n # only allow for joining BGPs from different GRAPH clauses\n triples = get_triples_from_graph(node.p1, current_graphs) + get_triples_from_graph(node.p2, current_graphs)\n iterator, query_vars, c = build_left_join_tree(triples, dataset, current_graphs, context)\n # track cardinalities of every triple pattern\n cardinalities += c\n return iterator\n else:\n raise UnsupportedSPARQL(f\"Unsupported SPARQL feature: {node.name}\")", "def testDynamicItemsJID(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n def dynamic_jid(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoItems()\n result['node'] = node\n result.add_item('tester@localhost', node='foo', name='JID')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_items',\n jid='tester@localhost',\n handler=dynamic_jid)\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\" to=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\">\n <item jid=\"tester@localhost\" node=\"foo\" name=\"JID\" />\n </query>\n </iq>\n \"\"\")", "def disco_get_items(self,node,iq):\n to=iq.get_to()\n if to and to!=self.jid:\n return iq.make_error_response(\"recipient-unavailable\")\n if not node and self.disco_items:\n return self.disco_items\n return None", "def testInfoIncludeNode(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n\n self.xmpp['xep_0030'].static.add_node(node='testing')\n\n self.recv(\"\"\"\n <iq to=\"tester@localhost/resource\" type=\"get\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\">\n </query>\n </iq>\"\"\",\n method='mask')", "def _run_query(self, entity, query, items):\n for item in items:\n ok = True\n for field_name, q in query.iteritems():\n field = entity.fields[field_name]\n value = getattr(item, field_name, None)\n if field.is_relation():\n if value is None or not any(q.match(v.name) for v in value):\n ok = False\n break\n elif not q.match(value):\n ok = False\n break\n if ok:\n yield item", "def get_items_for_query(self, query_str):\n raise NotImplementedError()", "def wql_related_query(self, start_node, end_node, path):\n self.tr_id = get_tr_id()\n xml_msg = self._create_wql_related_msg(self.tr_id,\n start_node, end_node, path)\n self.conn.connect()\n self.conn.send(xml_msg)\n response = self.conn.receive()\n self._check_error(response)\n if \"results\" in response:\n if response[\"results\"] == \"TRUE\":\n return True\n else:\n return False\n else:\n raise SIBError(M3_SIB_ERROR)", "def testGetItemsIterators(self):\n iteration_finished = []\n jids_found = set()\n\n self.stream_start(mode='client',\n plugins=['xep_0030', 'xep_0059'])\n\n async def run_test():\n iterator = await self.xmpp['xep_0030'].get_items(\n jid='foo@localhost',\n node='bar',\n iterator=True\n )\n iterator.amount = 10\n async for page in iterator:\n for item in page['disco_items']['items']:\n jids_found.add(item[0])\n iteration_finished.append(True)\n\n test_run = self.xmpp.wrap(run_test())\n self.wait_()\n self.send(\"\"\"\n <iq id=\"2\" type=\"get\" to=\"foo@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"bar\">\n <set xmlns=\"http://jabber.org/protocol/rsm\">\n <max>10</max>\n </set>\n </query>\n </iq>\n \"\"\")\n self.recv(\"\"\"\n <iq id=\"2\" type=\"result\" to=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\">\n <item jid=\"a@b\" node=\"1\"/>\n <item jid=\"b@b\" node=\"2\"/>\n <item jid=\"c@b\" node=\"3\"/>\n <item jid=\"d@b\" node=\"4\"/>\n <item jid=\"e@b\" node=\"5\"/>\n <set xmlns=\"http://jabber.org/protocol/rsm\">\n <first index='0'>a@b</first>\n <last>e@b</last>\n <count>10</count>\n </set>\n </query>\n </iq>\n \"\"\")\n self.wait_()\n self.send(\"\"\"\n <iq id=\"3\" type=\"get\" to=\"foo@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"bar\">\n <set xmlns=\"http://jabber.org/protocol/rsm\">\n <max>10</max>\n <after>e@b</after>\n </set>\n </query>\n </iq>\n \"\"\")\n self.recv(\"\"\"\n <iq id=\"3\" type=\"result\" to=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\">\n <item jid=\"f@b\" node=\"6\"/>\n <item jid=\"g@b\" node=\"7\"/>\n <item jid=\"h@b\" node=\"8\"/>\n <item jid=\"i@b\" node=\"9\"/>\n <item jid=\"j@b\" node=\"10\"/>\n <set xmlns=\"http://jabber.org/protocol/rsm\">\n <first index='5'>f@b</first>\n <last>j@b</last>\n <count>10</count>\n </set>\n </query>\n </iq>\n \"\"\")\n expected_jids = {'%s@b' % i for i in 'abcdefghij'}\n self.run_coro(test_run)\n self.assertEqual(expected_jids, jids_found)\n self.assertEqual(iteration_finished, [True])", "def wql_values_query(self, start_node, path):\n self.tr_id = get_tr_id()\n xml_msg = self._create_wql_values_msg(self.tr_id, start_node, path)\n self.conn.connect()\n self.conn.send(xml_msg)\n response = self.conn.receive()\n self._check_error(response)\n if \"results\" in response:\n node_list = parse_URI_list(response[\"results\"])\n return node_list\n else:\n raise SIBError(M3_SIB_ERROR)", "def query_items(self, items_params):\n username, password, api_key, max_items_to_return = SettingsOps.get_settings()\n items_runnable = ItemRunnable(username, password, api_key, items_params)\n items_runnable.item_object.task_complete.connect(self.on_new_items)\n self.init_progress_bar()\n self.search_thread_pool.start(items_runnable)", "def query(self, query):", "def nodetype_channel(request, query, *ka, **kw):\n queryset = Nodetype.published.search(query)\n return object_list(request, queryset=queryset,\n *ka, **kw)", "def perform_query(index_node, query_type):\n\n when = now()\n\n resp = do_query(index_node.name,\n [filter.value for filter in query_type.filters.all()],\n [facet.name for facet in query_type.facets.all()])\n\n client, _ = Host.objects.get_or_create(name=gethostname())\n \n response = Response.objects.create(\n index_node=index_node,\n status_code=resp['status_code'],\n datetime=when,\n client=client,\n query_type=query_type)\n\n if response.status_code == 200:\n \n response_data = ResponseData.objects.create(\n num_found=resp['num_found']\n )\n for facet_name, counts in resp['facet_counts'].items():\n for value, count in counts.items():\n facet, _ = Facet.objects.get_or_create(name=facet_name)\n facet_value, _ = \\\n FacetValue.objects.get_or_create(facet=facet,\n value=value)\n facet_value_count = \\\n FacetValueCount.objects.create(count=count,\n facet_value=facet_value,\n response_data=response_data)\n response.data = response_data\n response.save()", "def get_query(self, item=\"\", start_date=datetime.now()):\n return get_cte_query(self.query).format(item, start_date=start_date)", "def request(query):", "def find_nodes(self, query_dict):\n #item_type = query_options[\"type\"].upper()\n #if self.crits_type_mapping[item_type]:\n # item_type = self.crits_type_mapping[item_type]\n value_str = query_dict[\"value\"]\n indicator_collection = self.db_conn[\"indicators\"]\n results = indicator_collection.find({\"value\" : value_str})\n\t\tp_results = list()\n\t\tfor result in results:\n\t\t\tp_results.append( self._crits_result_to_poortego_node(result) )\t\n return p_results", "def from_node(cls, node):\n kwargs = dict(\n node_id=node.node_id,\n node_finalized=node.finalized,\n node_history=list(node.history),\n node_name=node.name,\n node_class=node.__class__.__name__,\n node_status=str(node.status),\n )\n\n return node.Results(node, **kwargs)", "def get_path_for_item(item):\n query = item['original_query']\n if isinstance(query, dict):\n if 'grlc' in query:\n del query['grlc']\n query = \"\\n\" + json.dumps(query, indent=2) + \"\\n\"\n\n description = item['description']\n description += '\\n\\n```\\n{}\\n```'.format(query)\n description += '\\n\\nSPARQL transformation:\\n```json\\n{}```'.format(\n item['transform']) if 'transform' in item else ''\n\n item_path = {\n item['method']: {\n 'tags': item['tags'],\n 'summary': item['summary'],\n 'description': description,\n 'produces': ['text/csv', 'application/json', 'text/html'],\n 'parameters': item['params'] if 'params' in item else None,\n 'responses': {\n '200': {\n 'description': 'Query response',\n 'schema': {\n 'type': 'array',\n 'items': {\n 'type': 'object',\n 'properties': item['item_properties'] if 'item_properties' in item else None\n },\n }\n },\n 'default': {\n 'description': 'Unexpected error',\n 'schema': {\n '$ref': '#/definitions/Message'\n }\n }\n }\n }\n }\n return item_path", "def query_items(root, path, query={}):\n # Apply default search filters\n absolute_path = '/'.join(root.getPhysicalPath() + (path.strip('/'),))\n applied_query = {\n 'sort_on': 'getObjPositionInParent'\n }\n # Apply caller's filters\n applied_query.update(query)\n # Set the navigation tree build strategy\n strategy = DefaultNavtreeStrategy(root)\n strategy.rootPath = absolute_path\n strategy.showAllParents = False\n # This will yield out tree of nested dicts of item brains\n navtree = buildFolderTree(root, root, applied_query, strategy=strategy)\n\n def cleanup(child):\n \"\"\" Recursively cleanup the tree \"\"\"\n children = child.get('children', [])\n for childchild in children:\n cleanup(childchild)\n cleaned = {u'title': child['Title'], u'name': child['id'],\n u'children': children}\n child.clear()\n child.update(cleaned)\n\n if \"id\" in navtree:\n cleanup(navtree)\n else:\n raise KeyError\n return navtree", "def test_get_all_item(self, app, item):\n res = app.store_items.get_all_items(\n header=item.header,\n type_response=ItemsResponse,\n )\n assert res.status_code == 200", "def paginate_query(\n self,\n node,\n project_id=None,\n props=[\"id\", \"submitter_id\"],\n chunk_size=2500,\n format=\"json\",\n args=None,\n ):\n\n if node == \"datanode\":\n query_txt = \"\"\"{ %s (%s) { type } }\"\"\" % (node, args)\n response = self.sub.query(query_txt)\n if \"data\" in response:\n nodes = [record[\"type\"] for record in response[\"data\"][\"datanode\"]]\n if len(nodes) > 1:\n print(\n \"\\tMultiple files with that file_name exist across multiple nodes:\\n\\t{}.\".format(\n nodes\n )\n )\n elif len(nodes) == 1:\n node = nodes[0]\n else:\n return nodes\n\n if project_id != None:\n program, project = project_id.split(\"-\", 1)\n if args == None:\n query_txt = \"\"\"{_%s_count (project_id:\"%s\")}\"\"\" % (node, project_id)\n else:\n query_txt = \"\"\"{_%s_count (project_id:\"%s\", %s)}\"\"\" % (\n node,\n project_id,\n args,\n )\n else:\n if args == None:\n query_txt = \"\"\"{_%s_count}\"\"\" % (node)\n else:\n query_txt = \"\"\"{_%s_count (%s)}\"\"\" % (node, args)\n\n # First query the node count to get the expected number of results for the requested query:\n\n try:\n res = self.sub.query(query_txt)\n count_name = \"_\".join(map(str, [\"\", node, \"count\"]))\n qsize = res[\"data\"][count_name]\n print(\n \"\\n\\tFound {} records in '{}' node of project '{}'. \".format(\n qsize, node, project_id\n )\n )\n except:\n print(\"\\n\\tQuery to get _{}_count failed! {}\".format(node, query_txt))\n\n # Now paginate the actual query:\n properties = \" \".join(map(str, props))\n offset = 0\n total = {}\n total[\"data\"] = {}\n total[\"data\"][node] = []\n count = 0\n while offset < qsize:\n\n if project_id != None:\n if args == None:\n query_txt = (\n \"\"\"{%s (first: %s, offset: %s, project_id:\"%s\"){%s}}\"\"\"\n % (node, chunk_size, offset, project_id, properties)\n )\n else:\n query_txt = (\n \"\"\"{%s (first: %s, offset: %s, project_id:\"%s\", %s){%s}}\"\"\"\n % (node, chunk_size, offset, project_id, args, properties)\n )\n else:\n if args == None:\n query_txt = \"\"\"{%s (first: %s, offset: %s){%s}}\"\"\" % (\n node,\n chunk_size,\n offset,\n properties,\n )\n else:\n query_txt = \"\"\"{%s (first: %s, offset: %s, %s){%s}}\"\"\" % (\n node,\n chunk_size,\n offset,\n args,\n properties,\n )\n\n res = self.sub.query(query_txt)\n if \"data\" in res:\n records = res[\"data\"][node]\n\n if len(records) < chunk_size:\n if qsize == 999999999:\n return total\n\n total[\"data\"][node] += records # res['data'][node] should be a list\n offset += chunk_size\n elif \"error\" in res:\n print(res[\"error\"])\n if chunk_size > 1:\n chunk_size = int(chunk_size / 2)\n print(\"Halving chunk_size to: \" + str(chunk_size) + \".\")\n else:\n print(\"Query timing out with chunk_size of 1!\")\n exit(1)\n else:\n print(\"Query Error: \" + str(res))\n\n pct = int((len(total[\"data\"][node]) / qsize) * 100)\n msg = \"\\tRecords retrieved: {} of {} ({}%), offset: {}, chunk_size: {}.\".format(\n len(total[\"data\"][node]), qsize, pct, offset, chunk_size\n )\n # print(msg)\n sys.stdout.write(\"\\r\" + str(msg).ljust(200, \" \"))\n\n if format == \"tsv\":\n df = json_normalize(total[\"data\"][node])\n return df\n else:\n return total", "def by_item(self) -> global___Snippet.PaginatedResponseHandling.ByItem:" ]
[ "0.6396363", "0.6289037", "0.61644876", "0.60375875", "0.5958757", "0.5912728", "0.5889202", "0.57394123", "0.55327314", "0.55124384", "0.54693735", "0.54070544", "0.5401821", "0.5362063", "0.53572315", "0.532478", "0.53014815", "0.52646244", "0.5233796", "0.52228504", "0.5181995", "0.5181631", "0.51731765", "0.51687104", "0.51236856", "0.51200074", "0.51147527", "0.51127917", "0.50812924", "0.50766164" ]
0.70169383
0
Test using a dynamic info handler for a particular JID.
def testDynamicInfoJID(self): self.stream_start(mode='client', plugins=['xep_0030']) def dynamic_jid(jid, node, ifrom, iq): result = self.xmpp['xep_0030'].stanza.DiscoInfo() result['node'] = node result.add_identity('client', 'console', name='Dynamic Info') return result self.xmpp['xep_0030'].set_node_handler('get_info', jid='tester@localhost', handler=dynamic_jid) self.recv(""" <iq type="get" id="test" to="tester@localhost"> <query xmlns="http://jabber.org/protocol/disco#info" node="testing" /> </iq> """) self.send(""" <iq type="result" id="test"> <query xmlns="http://jabber.org/protocol/disco#info" node="testing"> <identity category="client" type="console" name="Dynamic Info" /> </query> </iq> """)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testOverrideJIDInfoHandler(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n def dynamic_jid(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoInfo()\n result['node'] = node\n result.add_identity('client', 'console', name='Dynamic Info')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_info',\n jid='tester@localhost',\n handler=dynamic_jid)\n\n\n self.xmpp['xep_0030'].restore_defaults(jid='tester@localhost',\n node='testing')\n\n self.xmpp['xep_0030'].add_identity(jid='tester@localhost',\n node='testing',\n category='automation',\n itype='command-list')\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\" to=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\">\n <identity category=\"automation\"\n type=\"command-list\" />\n </query>\n </iq>\n \"\"\")", "def jid_jname(jnid):\n jails = jails_list()\n try:\n int(jnid) == True\n testid = 0\n except ValueError:\n testid = 1\n\n if testid == 0:\n if jnid in jails[2]:\n j = jails[2].index(jnid)\n jnid = jails[1][j]\n else:\n print \" Jail with ID '%s' not found!\" % (jnid)\n return False\n\n return jnid", "def test_lookup_parameter_handler_object(self, force_field):\n bonds = force_field[\"Bonds\"]\n with pytest.raises(NotImplementedError):\n force_field[bonds]\n with pytest.raises(NotImplementedError):\n force_field[type(bonds)]", "def query_handler(self, handler_name=\"\"):\n\t\treturn False", "def info(self, id):", "def test_register_dynamic_plugin(self):\n pass", "def test_dynamic_oid(self):\n block = SNMPBase()\n block._create_data = MagicMock()\n block._execute_snmp_request = MagicMock(\n return_value=SAMPLE_SNMP_RESPONSE)\n block._handle_data = MagicMock()\n\n myOID = \"1.3.6.1.2.1.31.1.1.1.6.2\"\n starting_signal = Signal({\n \"existing_key\": \"existing_val\",\n \"oid\": myOID\n })\n\n self.configure_block(block, {\n \"oids\": [{\"oid\": \"{{ $oid }}\"}]\n })\n block.start()\n\n # Send the starting signal, make sure everything was called correctly\n block.process_signals([starting_signal])\n args, kwargs = block._execute_snmp_request.call_args\n self.assertEqual(args[1], [myOID])\n block._handle_data.assert_called_once_with([], starting_signal)\n block.stop()", "def accessed(self, identifier, **kwargs):\r\n pass", "def test_register_dynamic_plugin1(self):\n pass", "def test_retrieve_dyn():\n # use the same id as previous test.\n the_id = 'from-test-dyndb'\n\n # get the response using the\n response = dyn_crud.retrieve_record(the_id)\n\n # run test.\n assert True if (response['company']['S'] == 'test company' and\n response['location']['S'] == 'Shambhala') else False", "def test_data_source_soaps_id_dynamic_datas_get(self):\n pass", "def run_handler(self, handler):\n self.last_activity = time.time()\n const_name = handler.upper()\n try:\n const_value = getattr(cwiid, const_name)\n if self.wm.state['buttons'] == const_value:\n self.exec_script(handler)\n except AttributeError:\n return 0", "def test_handle_pk(self):\n with self.assertRaises(Resolver404):\n self.handler.handle(self.request, '/42/')\n\n self.assertFalse(self.view.called)", "def testDynamicInfoGlobal(self):\n self.stream_start(mode='component',\n jid='tester.localhost',\n plugins=['xep_0030'])\n\n def dynamic_global(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoInfo()\n result['node'] = node\n result.add_identity('component', 'generic', name='Dynamic Info')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_info',\n handler=dynamic_global)\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\"\n to=\"[email protected]\"\n from=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\"\n to=\"tester@localhost\"\n from=\"[email protected]\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\">\n <identity category=\"component\"\n type=\"generic\"\n name=\"Dynamic Info\" />\n </query>\n </iq>\n \"\"\")", "def test_detail_by_id(self):\n responses.add(\n responses.Response(\n method='GET',\n url=('https://connection.keboola.com/v2/storage/buckets/'\n 'in.c-ga'),\n json=detail_response\n )\n )\n bucket_id = 'in.c-ga'\n bucket_detail = self.buckets.detail(bucket_id)\n assert bucket_detail['id'] == 'in.c-ga'", "def testOverrideGlobalInfoHandler(self):\n self.stream_start(mode='component',\n jid='tester.localhost',\n plugins=['xep_0030'])\n\n def dynamic_global(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoInfo()\n result['node'] = node\n result.add_identity('component', 'generic', name='Dynamic Info')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_info',\n handler=dynamic_global)\n\n self.xmpp['xep_0030'].restore_defaults(jid='[email protected]',\n node='testing')\n\n self.xmpp['xep_0030'].add_feature(jid='[email protected]',\n node='testing',\n feature='urn:xmpp:ping')\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\"\n to=\"[email protected]\"\n from=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\"\n to=\"tester@localhost\"\n from=\"[email protected]\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\">\n <feature var=\"urn:xmpp:ping\" />\n </query>\n </iq>\n \"\"\")", "def test_register_dynamic_plugin_manager1(self):\n pass", "def set_info_event_handler(self, handler):\n self.info_handler = handler", "async def test_entity_device_info_with_identifier(\n hass: HomeAssistant, mqtt_mock_entry: MqttMockHAClientGenerator\n) -> None:\n await help_test_entity_device_info_with_identifier(\n hass, mqtt_mock_entry, select.DOMAIN, DEFAULT_CONFIG\n )", "def test_job_id(self):\n\n url = '/%s/jobs/?job_id=%s' % (self.api, self.job1.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['id'], self.job1.id)", "def id_exists(test_name):\n result_json = None\n try:\n with open(robot_dir + \"/output/results/{}.json\".format(test_name.replace(' ', ''))) as result_file:\n result_json = json.load(result_file)\n except:\n print(\"Failed to open the result json\")\n return False\n #look for values NEW_ASSOC, NEW_PROP1, NEW_PROP2\n print(result_json)\n if 6 == 6:\n return True\n return \"Length is not 6\"", "async def test_api_info(hassio_handler, aioclient_mock: AiohttpClientMocker) -> None:\n aioclient_mock.get(\n \"http://127.0.0.1/info\",\n json={\n \"result\": \"ok\",\n \"data\": {\"supervisor\": \"222\", \"homeassistant\": \"0.110.0\", \"hassos\": None},\n },\n )\n\n data = await hassio_handler.get_info()\n assert aioclient_mock.call_count == 1\n assert data[\"hassos\"] is None\n assert data[\"homeassistant\"] == \"0.110.0\"\n assert data[\"supervisor\"] == \"222\"", "def component_handler(request, usage_key_string, handler, suffix=''):\r\n\r\n usage_key = UsageKey.from_string(usage_key_string)\r\n\r\n descriptor = get_modulestore(usage_key).get_item(usage_key)\r\n # Let the module handle the AJAX\r\n req = django_to_webob_request(request)\r\n\r\n try:\r\n resp = descriptor.handle(handler, req, suffix)\r\n\r\n except NoSuchHandlerError:\r\n log.info(\"XBlock %s attempted to access missing handler %r\", descriptor, handler, exc_info=True)\r\n raise Http404\r\n\r\n # unintentional update to handle any side effects of handle call; so, request user didn't author\r\n # the change\r\n get_modulestore(usage_key).update_item(descriptor, None)\r\n\r\n return webob_to_django_response(resp)", "def test_register_dynamic_plugin_manager(self):\n pass", "def test_get_info(self):\n pass", "def test_register_lookup_handler_str(mocker: MockerFixture) -> None:\n mocker.patch.dict(CFNGIN_LOOKUP_HANDLERS, {})\n register_lookup_handler(\n \"test\", \"runway.cfngin.lookups.handlers.default.DefaultLookup\"\n )\n assert \"test\" in CFNGIN_LOOKUP_HANDLERS\n assert CFNGIN_LOOKUP_HANDLERS[\"test\"] == DefaultLookup", "def test_get_comment_information_by_id():\n get_comment_information_by_id('g99c7c0')", "def hit_details(hit_id, sandbox, recruiter):\n prolific_check(recruiter, sandbox)\n rec = by_name(recruiter, skip_config_validation=True)\n details = rec.hit_details(hit_id, sandbox)\n print(json.dumps(details, indent=4, default=str))", "def test_job_type_id(self):\n\n url = '/%s/jobs/?job_type_id=%s' % (self.api, self.job1.job_type.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['job_type']['id'], self.job1.job_type.id)", "def info(self, jobid):\n return self.rpc.call(MsfRpcMethod.JobInfo, [jobid])" ]
[ "0.603616", "0.5224581", "0.5204453", "0.5167123", "0.50931525", "0.50643635", "0.4940391", "0.49184337", "0.49126655", "0.488274", "0.48677832", "0.48579577", "0.48276332", "0.48139375", "0.48109928", "0.4763304", "0.4754561", "0.47542498", "0.474684", "0.47033742", "0.47029302", "0.4702699", "0.47025302", "0.46768248", "0.4669794", "0.46463978", "0.4641128", "0.45924258", "0.4582682", "0.45777354" ]
0.6074457
0
Test overriding a JID info handler.
def testOverrideJIDInfoHandler(self): self.stream_start(mode='client', plugins=['xep_0030']) def dynamic_jid(jid, node, ifrom, iq): result = self.xmpp['xep_0030'].stanza.DiscoInfo() result['node'] = node result.add_identity('client', 'console', name='Dynamic Info') return result self.xmpp['xep_0030'].set_node_handler('get_info', jid='tester@localhost', handler=dynamic_jid) self.xmpp['xep_0030'].restore_defaults(jid='tester@localhost', node='testing') self.xmpp['xep_0030'].add_identity(jid='tester@localhost', node='testing', category='automation', itype='command-list') self.recv(""" <iq type="get" id="test" to="tester@localhost"> <query xmlns="http://jabber.org/protocol/disco#info" node="testing" /> </iq> """) self.send(""" <iq type="result" id="test"> <query xmlns="http://jabber.org/protocol/disco#info" node="testing"> <identity category="automation" type="command-list" /> </query> </iq> """)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testDynamicInfoJID(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n def dynamic_jid(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoInfo()\n result['node'] = node\n result.add_identity('client', 'console', name='Dynamic Info')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_info',\n jid='tester@localhost',\n handler=dynamic_jid)\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\" to=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\">\n <identity category=\"client\"\n type=\"console\"\n name=\"Dynamic Info\" />\n </query>\n </iq>\n \"\"\")", "def testOverrideGlobalInfoHandler(self):\n self.stream_start(mode='component',\n jid='tester.localhost',\n plugins=['xep_0030'])\n\n def dynamic_global(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoInfo()\n result['node'] = node\n result.add_identity('component', 'generic', name='Dynamic Info')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_info',\n handler=dynamic_global)\n\n self.xmpp['xep_0030'].restore_defaults(jid='[email protected]',\n node='testing')\n\n self.xmpp['xep_0030'].add_feature(jid='[email protected]',\n node='testing',\n feature='urn:xmpp:ping')\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\"\n to=\"[email protected]\"\n from=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\"\n to=\"tester@localhost\"\n from=\"[email protected]\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\">\n <feature var=\"urn:xmpp:ping\" />\n </query>\n </iq>\n \"\"\")", "def testCaseInfo(self):\n print(\"Override method..\")", "def test_patch_identity(self):\n pass", "def info(self, id):", "def set_info_event_handler(self, handler):\n self.info_handler = handler", "def testOverrideJIDItemsHandler(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n def dynamic_jid(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoItems()\n result['node'] = node\n result.add_item('tester@localhost', node='foo', name='Global')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_items',\n jid='tester@localhost',\n handler=dynamic_jid)\n\n\n self.xmpp['xep_0030'].restore_defaults(jid='tester@localhost',\n node='testing')\n\n self.xmpp['xep_0030'].add_item(ijid='tester@localhost',\n node='testing',\n jid='tester@localhost',\n subnode='foo',\n name='Test')\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\" to=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\">\n <item jid=\"tester@localhost\" node=\"foo\" name=\"Test\" />\n </query>\n </iq>\n \"\"\")", "def test_user_id_get(self):\n pass", "def accessed(self, identifier, **kwargs):\r\n pass", "def test_v1_alert_alert_id_comment_patch(self):\n pass", "def test_user_id_put(self):\n pass", "def testNciID(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"nci_id\")\n\n self.util.stringPropertyTest(self, dis_meta, \"nci_id\")", "def testOverrideModule(self):\n\t\tc = Controller()\n\t\tc.override(os, 'getsid', classmethod(c.mock()))\n\t\tc.restore()", "def testOverrideModule(self):\n\t\tc = Controller()\n\t\tc.override(os, 'getsid', classmethod(c.mock()))\n\t\tc.restore()", "def jid_jname(jnid):\n jails = jails_list()\n try:\n int(jnid) == True\n testid = 0\n except ValueError:\n testid = 1\n\n if testid == 0:\n if jnid in jails[2]:\n j = jails[2].index(jnid)\n jnid = jails[1][j]\n else:\n print \" Jail with ID '%s' not found!\" % (jnid)\n return False\n\n return jnid", "async def test_entity_device_info_with_identifier(\n hass: HomeAssistant, mqtt_mock_entry: MqttMockHAClientGenerator\n) -> None:\n await help_test_entity_device_info_with_identifier(\n hass, mqtt_mock_entry, select.DOMAIN, DEFAULT_CONFIG\n )", "def test_handle_pk(self):\n with self.assertRaises(Resolver404):\n self.handler.handle(self.request, '/42/')\n\n self.assertFalse(self.view.called)", "def test_get_info(self):\n pass", "def _registerSample(self, prefix, info):\n self.checkIdentifiersPresent(info)\n self._prefixToIdentifiers[prefix] = info", "def test_edit_alert_by_id(self):\n pass", "def identify(self):\n self.__send_short(self.MGMSG_MOD_IDENTIFY, 0x00, 0x00)", "def test_dynamic_oid(self):\n block = SNMPBase()\n block._create_data = MagicMock()\n block._execute_snmp_request = MagicMock(\n return_value=SAMPLE_SNMP_RESPONSE)\n block._handle_data = MagicMock()\n\n myOID = \"1.3.6.1.2.1.31.1.1.1.6.2\"\n starting_signal = Signal({\n \"existing_key\": \"existing_val\",\n \"oid\": myOID\n })\n\n self.configure_block(block, {\n \"oids\": [{\"oid\": \"{{ $oid }}\"}]\n })\n block.start()\n\n # Send the starting signal, make sure everything was called correctly\n block.process_signals([starting_signal])\n args, kwargs = block._execute_snmp_request.call_args\n self.assertEqual(args[1], [myOID])\n block._handle_data.assert_called_once_with([], starting_signal)\n block.stop()", "def test_30_app_id_anonymous_user(self, Mock, mock):\r\n html_request = FakeRequest(json.dumps(self.pkg_json_not_found), 200,\r\n {'content-type': 'application/json'})\r\n Mock.return_value = html_request\r\n\r\n self.register()\r\n self.new_application()\r\n self.signout()\r\n\r\n res = self.app.get('/app/sampleapp', follow_redirects=True)\r\n assert \"Sample App\" in res.data, (\"Application name should be shown\"\r\n \" to users\")\r\n assert '<strong><i class=\"icon-cog\"></i> ID</strong>: 1' not in \\\r\n res.data, \"Application ID should be shown to the owner\"", "def test_data_source_soaps_id_patch(self):\n pass", "def set_info_message(msg):\n set_message(msg, TYPE_INFO)", "async def test_unique_id(\n hass: HomeAssistant, mqtt_mock_entry: MqttMockHAClientGenerator\n) -> None:\n await help_test_unique_id(hass, mqtt_mock_entry, select.DOMAIN)", "def test_user_information_request(self):\n pass", "def _set_id(self):\n raise NotImplementedError()", "def test_register_lookup_handler_str(mocker: MockerFixture) -> None:\n mocker.patch.dict(CFNGIN_LOOKUP_HANDLERS, {})\n register_lookup_handler(\n \"test\", \"runway.cfngin.lookups.handlers.default.DefaultLookup\"\n )\n assert \"test\" in CFNGIN_LOOKUP_HANDLERS\n assert CFNGIN_LOOKUP_HANDLERS[\"test\"] == DefaultLookup", "def test_get_comment_information_by_id():\n get_comment_information_by_id('g99c7c0')" ]
[ "0.5675587", "0.5537099", "0.54448056", "0.5395349", "0.5346919", "0.53391", "0.5268721", "0.51122886", "0.50774336", "0.5047395", "0.50276953", "0.5019117", "0.5018244", "0.5018244", "0.5015036", "0.50043046", "0.49973047", "0.49818295", "0.49638623", "0.4939039", "0.49365282", "0.49338296", "0.49226305", "0.49223807", "0.49025562", "0.4876794", "0.486392", "0.48568046", "0.48469555", "0.4844958" ]
0.726444
0
Test overriding the global JID info handler.
def testOverrideGlobalInfoHandler(self): self.stream_start(mode='component', jid='tester.localhost', plugins=['xep_0030']) def dynamic_global(jid, node, ifrom, iq): result = self.xmpp['xep_0030'].stanza.DiscoInfo() result['node'] = node result.add_identity('component', 'generic', name='Dynamic Info') return result self.xmpp['xep_0030'].set_node_handler('get_info', handler=dynamic_global) self.xmpp['xep_0030'].restore_defaults(jid='[email protected]', node='testing') self.xmpp['xep_0030'].add_feature(jid='[email protected]', node='testing', feature='urn:xmpp:ping') self.recv(""" <iq type="get" id="test" to="[email protected]" from="tester@localhost"> <query xmlns="http://jabber.org/protocol/disco#info" node="testing" /> </iq> """) self.send(""" <iq type="result" id="test" to="tester@localhost" from="[email protected]"> <query xmlns="http://jabber.org/protocol/disco#info" node="testing"> <feature var="urn:xmpp:ping" /> </query> </iq> """)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testOverrideJIDInfoHandler(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n def dynamic_jid(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoInfo()\n result['node'] = node\n result.add_identity('client', 'console', name='Dynamic Info')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_info',\n jid='tester@localhost',\n handler=dynamic_jid)\n\n\n self.xmpp['xep_0030'].restore_defaults(jid='tester@localhost',\n node='testing')\n\n self.xmpp['xep_0030'].add_identity(jid='tester@localhost',\n node='testing',\n category='automation',\n itype='command-list')\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\" to=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\">\n <identity category=\"automation\"\n type=\"command-list\" />\n </query>\n </iq>\n \"\"\")", "def testDynamicInfoJID(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n def dynamic_jid(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoInfo()\n result['node'] = node\n result.add_identity('client', 'console', name='Dynamic Info')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_info',\n jid='tester@localhost',\n handler=dynamic_jid)\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\" to=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\">\n <identity category=\"client\"\n type=\"console\"\n name=\"Dynamic Info\" />\n </query>\n </iq>\n \"\"\")", "def test_patch_identity(self):\n pass", "def test_user_id_get(self):\n pass", "def test_30_app_id_anonymous_user(self, Mock, mock):\r\n html_request = FakeRequest(json.dumps(self.pkg_json_not_found), 200,\r\n {'content-type': 'application/json'})\r\n Mock.return_value = html_request\r\n\r\n self.register()\r\n self.new_application()\r\n self.signout()\r\n\r\n res = self.app.get('/app/sampleapp', follow_redirects=True)\r\n assert \"Sample App\" in res.data, (\"Application name should be shown\"\r\n \" to users\")\r\n assert '<strong><i class=\"icon-cog\"></i> ID</strong>: 1' not in \\\r\n res.data, \"Application ID should be shown to the owner\"", "async def test_unique_id(\n hass: HomeAssistant, mqtt_mock_entry: MqttMockHAClientGenerator\n) -> None:\n await help_test_unique_id(hass, mqtt_mock_entry, select.DOMAIN)", "def getmyuid():\n raise NotImplementedError()", "def testNciID(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"nci_id\")\n\n self.util.stringPropertyTest(self, dis_meta, \"nci_id\")", "def identify(self):\n self.__send_short(self.MGMSG_MOD_IDENTIFY, 0x00, 0x00)", "def test_server_info(self):\n pass", "def test_drugs_id_get(self):\n pass", "def testOverrideModule(self):\n\t\tc = Controller()\n\t\tc.override(os, 'getsid', classmethod(c.mock()))\n\t\tc.restore()", "def testOverrideModule(self):\n\t\tc = Controller()\n\t\tc.override(os, 'getsid', classmethod(c.mock()))\n\t\tc.restore()", "def test_get_info(self):\n pass", "def get_identifier(self):", "def test_user_id_put(self):\n pass", "def test_id(self):\n result = self.test_client.id\n\n assert result == \"10423098\"", "def info(self, id):", "async def test_entity_device_info_with_identifier(\n hass: HomeAssistant, mqtt_mock_entry: MqttMockHAClientGenerator\n) -> None:\n await help_test_entity_device_info_with_identifier(\n hass, mqtt_mock_entry, select.DOMAIN, DEFAULT_CONFIG\n )", "def logTestName(self):\n logging.info('%s', self.id())", "def test_data_source_soaps_id_patch(self):\n pass", "def testCaseInfo(self):\n print(\"Override method..\")", "def test_id(self):\n result = self.test_client.id\n\n assert result == \"86576599\"", "def test_specific_location_init_with_specific_id() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n\n loc = SpecificLocation(id=uid)\n\n assert loc.id == uid", "def jid_jname(jnid):\n jails = jails_list()\n try:\n int(jnid) == True\n testid = 0\n except ValueError:\n testid = 1\n\n if testid == 0:\n if jnid in jails[2]:\n j = jails[2].index(jnid)\n jnid = jails[1][j]\n else:\n print \" Jail with ID '%s' not found!\" % (jnid)\n return False\n\n return jnid", "def test_register_lookup_handler_str(mocker: MockerFixture) -> None:\n mocker.patch.dict(CFNGIN_LOOKUP_HANDLERS, {})\n register_lookup_handler(\n \"test\", \"runway.cfngin.lookups.handlers.default.DefaultLookup\"\n )\n assert \"test\" in CFNGIN_LOOKUP_HANDLERS\n assert CFNGIN_LOOKUP_HANDLERS[\"test\"] == DefaultLookup", "def testOverrideJIDItemsHandler(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n def dynamic_jid(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoItems()\n result['node'] = node\n result.add_item('tester@localhost', node='foo', name='Global')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_items',\n jid='tester@localhost',\n handler=dynamic_jid)\n\n\n self.xmpp['xep_0030'].restore_defaults(jid='tester@localhost',\n node='testing')\n\n self.xmpp['xep_0030'].add_item(ijid='tester@localhost',\n node='testing',\n jid='tester@localhost',\n subnode='foo',\n name='Test')\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\" to=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\">\n <item jid=\"tester@localhost\" node=\"foo\" name=\"Test\" />\n </query>\n </iq>\n \"\"\")", "def test_hard_coded_oid(self):\n block = SNMPBase()\n block._create_data = MagicMock()\n block._execute_snmp_request = MagicMock(\n return_value=SAMPLE_SNMP_RESPONSE)\n block._handle_data = MagicMock()\n\n myOID = \"1.3.6.1.2.1.31.1.1.1.6.2\"\n starting_signal = Signal({\"existing_key\": \"existing_val\"})\n\n self.configure_block(block, {\n \"oids\": [{\"oid\": myOID}]\n })\n block.start()\n\n # Send the starting signal, make sure everything was called correctly\n block.process_signals([starting_signal])\n args, kwargs = block._execute_snmp_request.call_args\n self.assertEqual(args[1], [myOID])\n block._handle_data.assert_called_once_with([], starting_signal)\n block.stop()", "def test_dynamic_oid(self):\n block = SNMPBase()\n block._create_data = MagicMock()\n block._execute_snmp_request = MagicMock(\n return_value=SAMPLE_SNMP_RESPONSE)\n block._handle_data = MagicMock()\n\n myOID = \"1.3.6.1.2.1.31.1.1.1.6.2\"\n starting_signal = Signal({\n \"existing_key\": \"existing_val\",\n \"oid\": myOID\n })\n\n self.configure_block(block, {\n \"oids\": [{\"oid\": \"{{ $oid }}\"}]\n })\n block.start()\n\n # Send the starting signal, make sure everything was called correctly\n block.process_signals([starting_signal])\n args, kwargs = block._execute_snmp_request.call_args\n self.assertEqual(args[1], [myOID])\n block._handle_data.assert_called_once_with([], starting_signal)\n block.stop()", "def _register(self, comm, handler):" ]
[ "0.6870358", "0.54498476", "0.5428008", "0.53908557", "0.5370121", "0.52585053", "0.5192192", "0.5162853", "0.51565474", "0.5142715", "0.5131223", "0.5112319", "0.5112319", "0.5111863", "0.51048714", "0.50948215", "0.50879955", "0.5078496", "0.5074785", "0.50513303", "0.5024561", "0.5019743", "0.50139594", "0.50130945", "0.5012993", "0.5011245", "0.5008451", "0.50046515", "0.49948484", "0.4988509" ]
0.5859276
1
Test sending a discoinfo query to another entity and receiving the result.
def testGetInfoRemote(self): self.stream_start(mode='client', plugins=['xep_0030']) events = set() def handle_disco_info(iq): events.add('disco_info') self.xmpp.add_event_handler('disco_info', handle_disco_info) self.xmpp.wrap(self.xmpp['xep_0030'].get_info('user@localhost', 'foo')) self.wait_() self.send(""" <iq type="get" to="user@localhost" id="1"> <query xmlns="http://jabber.org/protocol/disco#info" node="foo" /> </iq> """) self.recv(""" <iq type="result" to="tester@localhost" id="1"> <query xmlns="http://jabber.org/protocol/disco#info" node="foo"> <identity category="client" type="bot" /> <feature var="urn:xmpp:ping" /> </query> </iq> """) self.assertEqual(events, {'disco_info'}, "Disco info event was not triggered: %s" % events)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __disco_info(self,iq):\n q=iq.get_query()\n if q.hasProp(\"node\"):\n node=from_utf8(q.prop(\"node\"))\n else:\n node=None\n info=self.disco_get_info(node,iq)\n if isinstance(info,DiscoInfo):\n resp=iq.make_result_response()\n self.__logger.debug(\"Disco-info query: %s preparing response: %s with reply: %s\"\n % (iq.serialize(),resp.serialize(),info.xmlnode.serialize()))\n resp.set_content(info.xmlnode.copyNode(1))\n elif isinstance(info,Stanza):\n resp=info\n else:\n resp=iq.make_error_response(\"item-not-found\")\n self.__logger.debug(\"Disco-info response: %s\" % (resp.serialize(),))\n self.stream.send(resp)", "def test_ctcpQuery_CLIENTINFO(self):\n self.client.ctcpQuery_CLIENTINFO(self.user, self.channel, \"\")\n self.client.ctcpQuery_CLIENTINFO(self.user, self.channel, \"PING PONG\")\n info = (\n \"ACTION CLIENTINFO DCC ERRMSG FINGER PING SOURCE TIME \" \"USERINFO VERSION\"\n )\n self.assertEqual(\n self.client.methods,\n [\n (\"ctcpMakeReply\", (\"Wolf\", [(\"CLIENTINFO\", info)])),\n (\"ctcpMakeReply\", (\"Wolf\", [(\"CLIENTINFO\", None)])),\n ],\n )", "def test_get_info(self):\n pass", "async def test_entity_debug_info_message(\n hass: HomeAssistant, mqtt_mock_entry: MqttMockHAClientGenerator\n) -> None:\n await help_test_entity_debug_info_message(\n hass,\n mqtt_mock_entry,\n select.DOMAIN,\n DEFAULT_CONFIG,\n select.SERVICE_SELECT_OPTION,\n service_parameters={ATTR_OPTION: \"beer\"},\n command_payload=\"beer\",\n state_payload=\"milk\",\n )", "def testDynamicInfoJID(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n def dynamic_jid(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoInfo()\n result['node'] = node\n result.add_identity('client', 'console', name='Dynamic Info')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_info',\n jid='tester@localhost',\n handler=dynamic_jid)\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\" to=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\">\n <identity category=\"client\"\n type=\"console\"\n name=\"Dynamic Info\" />\n </query>\n </iq>\n \"\"\")", "def test_query_object(self, test_info):\n\n test_results = test_info\n\n # Prepare the test\n input_ra = test_results['test_RA']\n input_dec = test_results['test_DEC']\n input_units = test_results['test_units']\n input_frame = test_results['test_frame']\n input_size = int(test_results['side_size'])\n\n # Launch the test\n try:\n coord = SkyCoord(ra=input_ra, dec=input_dec, unit=(input_units, input_units), frame=input_frame)\n width = u.Quantity(input_size, input_units)\n height = u.Quantity(input_size, input_units)\n r = self.gaia.query_object_async(coordinate=coord, width=width, height=height)\n\n # Get current time\n time = datetime.now()\n time_str = time.strftime('%Y-%m-%d %H:%M:%S')\n test_results['test_finished'] = f'{time_str} CET'\n\n # Store additional info\n test_results['test_additional_info'] = str(r)\n\n # Get num of results returned, if it is the expected then the test has been passed.\n log.debug(f'Num rows returned: {len(r)}')\n\n n_expected_results = test_results['test_expected_value']\n if len(r) == n_expected_results:\n # Test passed\n test_results['test_result'] = PASSED\n log.debug(\"Test PASSED\")\n else:\n test_results['test_result'] = NOT_PASSED\n error_message = f'The number of rows returned: {len(r)} differs from the expected {n_expected_results}'\n test_results['test_additional_info'] = error_message\n log.error(error_message)\n raise ValueError(error_message)\n except ValueError as err:\n log.error(str(err))\n # Get current time\n time = datetime.now()\n time_str = time.strftime('%Y-%m-%d %H:%M:%S')\n # fill result object with the info from the http error\n test_results['test_finished'] = f'{time_str} CET'\n test_results['test_result'] = NOT_PASSED\n test_results['test_additional_info'] = str(err)\n return test_results\n except HTTPError as err:\n error_message = \"Error connecting TAP server\"\n log.error(error_message)\n\n # Get current time\n time = datetime.now()\n time_str = time.strftime('%Y-%m-%d %H:%M:%S')\n # fill result object with the info from the http error\n test_results['test_finished'] = f'{time_str} CET'\n test_results['test_result'] = NOT_PASSED\n test_results['test_additional_info'] = error_message + \",\" + str(err)\n return test_results\n # if everything is correct then we will return the results of the test\n return test_results", "def test_ctcpQuery_USERINFO(self):\n self.client.userinfo = \"info\"\n self.client.ctcpQuery_USERINFO(self.user, self.channel, \"data\")\n self.assertEqual(\n self.client.methods, [(\"ctcpMakeReply\", (\"Wolf\", [(\"USERINFO\", \"info\")]))]\n )", "def test_context_data_info_message_for_one_result(self):\n factories.SourceDatasetFactory.create(i_dbgap_description='lorem ipsum')\n response = self.client.get(self.get_url(), {'description': 'lorem'})\n messages = list(response.wsgi_request._messages)\n self.assertEqual(len(messages), 1)\n self.assertEqual(str(messages[0]), '1 result found.')", "def test_context_data_info_message_for_one_result(self):\n factories.SourceDatasetFactory.create(\n i_dbgap_description='lorem ipsum',\n source_study_version__study=self.study)\n response = self.client.get(self.get_url(self.study.pk), {'description': 'lorem'})\n messages = list(response.wsgi_request._messages)\n self.assertEqual(len(messages), 1)\n self.assertEqual(str(messages[0]), '1 result found.')", "def testInfoIncludeNode(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n\n self.xmpp['xep_0030'].static.add_node(node='testing')\n\n self.recv(\"\"\"\n <iq to=\"tester@localhost/resource\" type=\"get\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\">\n </query>\n </iq>\"\"\",\n method='mask')", "def test_context_data_info_message_for_one_result(self):\n factories.SourceTraitFactory.create(i_description='lorem ipsum')\n response = self.client.get(self.get_url(), {'description': 'lorem'})\n messages = list(response.wsgi_request._messages)\n self.assertEqual(len(messages), 1)\n self.assertEqual(str(messages[0]), '1 result found.')", "def test_context_data_info_message_for_one_result(self):\n factories.HarmonizedTraitFactory.create(i_description='lorem ipsum')\n response = self.client.get(self.get_url(), {'description': 'lorem'})\n messages = list(response.wsgi_request._messages)\n self.assertEqual(len(messages), 1)\n self.assertEqual(str(messages[0]), '1 result found.')", "def test_ctcpQuery_DCC(self):\n self.client.ctcpQuery_DCC(self.user, self.channel, \"data\")\n self.assertEqual(\n self.client.methods,\n [\n (\n \"ctcpMakeReply\",\n (\"Wolf\", [(\"ERRMSG\", \"DCC data :Unknown DCC type 'DATA'\")]),\n )\n ],\n )", "def test_context_data_info_message_for_one_result(self):\n factories.SourceTraitFactory.create(\n i_description='lorem ipsum',\n source_dataset__source_study_version__study=self.study)\n response = self.client.get(self.get_url(self.study.pk), {'description': 'lorem'})\n messages = list(response.wsgi_request._messages)\n self.assertEqual(len(messages), 1)\n self.assertEqual(str(messages[0]), '1 result found.')", "def testOverrideJIDInfoHandler(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n def dynamic_jid(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoInfo()\n result['node'] = node\n result.add_identity('client', 'console', name='Dynamic Info')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_info',\n jid='tester@localhost',\n handler=dynamic_jid)\n\n\n self.xmpp['xep_0030'].restore_defaults(jid='tester@localhost',\n node='testing')\n\n self.xmpp['xep_0030'].add_identity(jid='tester@localhost',\n node='testing',\n category='automation',\n itype='command-list')\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\" to=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\">\n <identity category=\"automation\"\n type=\"command-list\" />\n </query>\n </iq>\n \"\"\")", "def test_dicom_server(test_dest_node: dict, data_dir: str):\n dicom_dir = os.path.join(data_dir, \"dicom-files\")\n\n ae = AE(ae_title=test_dest_node[\"aetitle\"])\n ae.add_supported_context(VerificationSOPClass, ALL_TRANSFER_SYNTAXES)\n for context in AllStoragePresentationContexts:\n ae.add_supported_context(\n context.abstract_syntax,\n ALL_TRANSFER_SYNTAXES,\n scp_role=True,\n scu_role=False,\n )\n\n ae.add_supported_context(PatientRootQueryRetrieveInformationModelFind)\n ae.add_supported_context(PatientRootQueryRetrieveInformationModelMove)\n ae.add_supported_context(StudyRootQueryRetrieveInformationModelFind)\n ae.add_supported_context(StudyRootQueryRetrieveInformationModelMove)\n\n def handle_cfind(event: Event, data_dir: str):\n model = event.request.AffectedSOPClassUID\n if model not in [\"PATIENT\", \"STUDY\"]:\n yield 0xC320, None\n return\n\n results = []\n for root, _, files in os.walk(data_dir):\n for name in files:\n path = os.path.join(root, name)\n dcm = dcmread(path, stop_before_pixels=True)\n\n ds = Dataset()\n is_ok = False\n for key, value in event.identifier.items():\n tag_name = value.name\n if value.value:\n search_val = value.value\n if tag_name == \"StudyDate\" and \"-\" in search_val:\n lower_date, upper_date = (\n search_val.split(\"-\")[0],\n search_val.split(\"-\")[1],\n )\n is_ok = lower_date <= search_val <= upper_date\n else:\n is_ok = getattr(dcm, tag_name, None) == search_val\n setattr(ds, tag_name, getattr(dcm, tag_name, None))\n\n if is_ok:\n results.append(ds)\n\n for res in results:\n yield 0xFF00, res\n\n def handle_cmove(event: Event, data_dir: str):\n yield \"localhost\", \"11114\", {\"contexts\": []}\n yield 0\n yield 0xFE00, None\n return\n\n handlers = [\n (evt.EVT_C_FIND, handle_cfind, [data_dir]),\n (evt.EVT_C_MOVE, handle_cmove, [data_dir]),\n ]\n server = None\n try:\n server = ae.start_server(\n (\"\", test_dest_node[\"port\"]), evt_handlers=handlers, block=False\n )\n yield ae\n finally:\n if server is not None:\n server.shutdown()", "def test_data_object_get_details(self):\n pass", "def test_fetch_working(suvi_client):\n start = '2019/05/25 00:50'\n end = '2019/05/25 00:52'\n wave = 94 * u.Angstrom\n goes_sat = a.goes.SatelliteNumber.sixteen\n tr = a.Time(start, end)\n qr1 = suvi_client.search(tr, a.Instrument.suvi, a.Wavelength(wave), goes_sat, a.Level(2))\n\n # Mock QueryResponse object\n mock_qr = mock_query_object(suvi_client)\n\n # Compare if two objects have the same attribute\n\n mock_qr = mock_qr[0]\n qr = qr1[0]\n\n assert mock_qr['Source'] == qr['Source']\n assert mock_qr['Provider'] == qr['Provider']\n assert mock_qr['Physobs'] == qr['Physobs']\n assert mock_qr['Instrument'] == qr['Instrument']\n assert mock_qr['url'] == qr['url']\n\n assert qr1['Start Time'] == Time(\"2019-05-25T00:52:00.000\")\n assert qr1['End Time'] == Time(\"2019-05-25T00:56:00.000\")\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n download_list = suvi_client.fetch(qr1, path=tmpdirname)\n assert len(download_list) == len(qr1)", "def _query_info(self, entities: List[str]):\n self.player.respond(entities)", "def test_info_get(self):\n response = self.client.open(\n '/info',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def testDynamicInfoGlobal(self):\n self.stream_start(mode='component',\n jid='tester.localhost',\n plugins=['xep_0030'])\n\n def dynamic_global(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoInfo()\n result['node'] = node\n result.add_identity('component', 'generic', name='Dynamic Info')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_info',\n handler=dynamic_global)\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\"\n to=\"[email protected]\"\n from=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\"\n to=\"tester@localhost\"\n from=\"[email protected]\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\">\n <identity category=\"component\"\n type=\"generic\"\n name=\"Dynamic Info\" />\n </query>\n </iq>\n \"\"\")", "def testInfoEmptyDefaultNode(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\">\n <identity category=\"client\" type=\"bot\" />\n <feature var=\"http://jabber.org/protocol/disco#info\" />\n </query>\n </iq>\n \"\"\")", "def test_resolve(self):\n disco = create_disco()\n node = create_node(\"somewhere\")\n node.properties = {\"x\": 1}\n disco.onMessage(None, NodeActive(node))\n resolved = resolve(disco, \"myservice\", \"1.0\")\n self.assertEqual(\n (resolved.version, resolved.address, resolved.service, resolved.properties),\n (\"1.0\", \"somewhere\", \"myservice\", {\"x\": 1}))", "def test_get_invoice_info(mocker, expected_response, expected_data, client) -> None:\n\n mocker.patch(\"requests.Session.request\", return_value=expected_response)\n actual_data = client.get_invoice_info(invoice_id=1)\n\n assert actual_data == expected_data", "def test_get_transaction_info(mocker, expected_response, expected_data, client) -> None:\n\n mocker.patch(\"requests.Session.request\", return_value=expected_response)\n actual_data = client.get_transaction_info(transaction_id=1)\n\n assert actual_data == expected_data", "def test_cartography_details(self):\n\t\tcreate_cartography()\n\t\tcartography = Document.objects.get(id=1)\n\t\tc = Client()\n\t\tresponse = c.get(\"/cartography/%s\" % str(cartography.id))\n\t\tself.assertEquals(response.status_code, 200)", "def testOverrideGlobalInfoHandler(self):\n self.stream_start(mode='component',\n jid='tester.localhost',\n plugins=['xep_0030'])\n\n def dynamic_global(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoInfo()\n result['node'] = node\n result.add_identity('component', 'generic', name='Dynamic Info')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_info',\n handler=dynamic_global)\n\n self.xmpp['xep_0030'].restore_defaults(jid='[email protected]',\n node='testing')\n\n self.xmpp['xep_0030'].add_feature(jid='[email protected]',\n node='testing',\n feature='urn:xmpp:ping')\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\"\n to=\"[email protected]\"\n from=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\"\n to=\"tester@localhost\"\n from=\"[email protected]\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\">\n <feature var=\"urn:xmpp:ping\" />\n </query>\n </iq>\n \"\"\")", "def test_correct_input(self):\n\n user = login(self.client)\n client = create_client('test')\n client.write_access = True\n client.save()\n\n festival = create_festival('test', user)\n festival.save()\n concert = create_concert(festival, 'test')\n concert.save()\n\n response = self.client.post('/backend/u/conc/',\n {'client': 'test',\n 'id': concert.pk,\n 'stage': 2,\n 'artist': 'tset'\n })\n\n self.assertEqual(response.status_code, 200)\n response_string = response.content.decode('utf-8')\n self.assertTrue('artist:tset' in response_string)\n self.assertTrue('stage:2' in response_string)\n self.assertEqual(3, len(response_string.split('\\n')))\n self.assertEqual(1, Concert.objects.filter(festival=festival, artist='tset').count())", "def test_get_with_filter_person_factoid(mockclient_cl1):\n r = mockclient_cl1.get(TEST_URL + \"?size=100&f=F00062&p=P00063\")\n assert r.status_code == 200\n assert r.json[\"statements\"][0][\"@id\"] == \"Stmt00184\"\n r = mockclient_cl1.get(TEST_URL + \"?size=100&f=F00062&p=P00064\")\n assert r.status_code == 404", "def test_find_disputes(self):\n query_string = [('limit', 100),\n ('starting_after', 'starting_after_example'),\n ('ending_before', 'ending_before_example'),\n ('dispute_reason', DisputeCode()),\n ('dispute_status', DisputeStatus()),\n ('beginning_date', 'beginning_date_example'),\n ('ending_date', 'ending_date_example')]\n response = self.client.open(\n '/paySmart/ps-processadora/v1/disputes',\n method='GET',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))" ]
[ "0.6199992", "0.6011163", "0.6003741", "0.59166396", "0.58798325", "0.56821346", "0.5621804", "0.5620803", "0.5614971", "0.5555766", "0.5552566", "0.55466306", "0.5536039", "0.55310833", "0.5511466", "0.55068636", "0.5483223", "0.54768014", "0.5472417", "0.5466013", "0.5436769", "0.54266775", "0.53589183", "0.5341641", "0.5338922", "0.5335274", "0.5333945", "0.53280264", "0.5322299", "0.52918136" ]
0.694831
0
Test using a dynamic items handler for a particular JID.
def testDynamicItemsJID(self): self.stream_start(mode='client', plugins=['xep_0030']) def dynamic_jid(jid, node, ifrom, iq): result = self.xmpp['xep_0030'].stanza.DiscoItems() result['node'] = node result.add_item('tester@localhost', node='foo', name='JID') return result self.xmpp['xep_0030'].set_node_handler('get_items', jid='tester@localhost', handler=dynamic_jid) self.recv(""" <iq type="get" id="test" to="tester@localhost"> <query xmlns="http://jabber.org/protocol/disco#items" node="testing" /> </iq> """) self.send(""" <iq type="result" id="test"> <query xmlns="http://jabber.org/protocol/disco#items" node="testing"> <item jid="tester@localhost" node="foo" name="JID" /> </query> </iq> """)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testOverrideJIDItemsHandler(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n def dynamic_jid(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoItems()\n result['node'] = node\n result.add_item('tester@localhost', node='foo', name='Global')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_items',\n jid='tester@localhost',\n handler=dynamic_jid)\n\n\n self.xmpp['xep_0030'].restore_defaults(jid='tester@localhost',\n node='testing')\n\n self.xmpp['xep_0030'].add_item(ijid='tester@localhost',\n node='testing',\n jid='tester@localhost',\n subnode='foo',\n name='Test')\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\" to=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\">\n <item jid=\"tester@localhost\" node=\"foo\" name=\"Test\" />\n </query>\n </iq>\n \"\"\")", "def item(self, item_name):\n\tself.log.info('Not implemented yet... Sorry!')\n\tpass", "def item_handler(id):\n if request.method == 'PUT':\n # authorization\n if not checkAuthorization('Item', id, g.user.id):\n return (jsonify({'data': 'Unauthorized', 'error': '401'}), 401)\n # Call the method to update a item\n rq = request.get_json()\n name = rq['name']\n picture = rq['picture']\n description = rq['description']\n item = updateItem(id, name, picture, description)\n return jsonify(item=item.serialize)\n elif request.method == 'DELETE':\n # authorization\n if not checkAuthorization('Item', id, g.user.id):\n return (jsonify({'data': 'Unauthorized', 'error': '401'}), 401)\n # Call the method to remove a item\n item = deleteItem(id)\n return jsonify(item=item.serialize)", "def test_find_stock_item_by_id(self):\n pass", "def test_item_id(item):\n assert item.item_id == 'exopy_pulses.Item'", "def test_new_item(self):\n\n\t\titem_id = mock_item()[0]\n\t\tself.assertEqual(item_id, 1)", "def handle_items(data, user_to_check):\n for item in data:\n handle_item(item, user_to_check)", "def test_gethardwares_item(self):\n pass", "def get_item(self, item_id): # pragma: no cover\n raise NotImplementedError", "def test_post_get_item(self):\n item = {'brand': 'apple',\n 'name': 'iPhone7',\n 'description': 'The latest iphone'}\n\n def check_item(result):\n item['id'] = result['id']\n resp = self.make_request('get', '/store/items/{}'.format(item['id']))\n self.assertEqual(resp.json()['id'], item['id'])\n self.assertEqual(resp.json()['brand'], item['brand'])\n self.assertEqual(resp.json()['name'], item['name'])\n self.assertEqual(resp.json()['description'], item['description'])\n\n self.assert_request('post',\n '/store/items',\n data=item,\n expected_code=201,\n expected_json=check_item)", "def _element_handler(self, path, item):\n key = path[-1][1]['id']\n self.data[key] = item.strip()\n return True", "def testGetItem(self):\n\t\tc = Controller()\n\t\tx = c.mock(KlassBeingMocked)\n\t\tx.g[5]\n\t\tc.setReturn(6)\n\t\tc.replay()\n\t\tself.failUnless(x.g[5] == 6)\n\t\tc.verify()", "def getitem(itemID):\n\n return harvest(GET_ITEM_URL, itemID)", "def selectItem(*args):", "def get_item_detail(item_id):\n pass", "def _item_exists(self, location):\n \"Does nothing\"", "def item_exists(item_id):\n return item_id in all_items", "def test_update_busketlistitem_by_id(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n update_item = self.client.put('/bucketlistitems/1/items/1',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs and museums too.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(update_item.status_code, 201)", "def match(self, item):", "def test_update_item_using_post(self):\n pass", "def testOverrideGlobalItemsHandler(self):\n self.stream_start(mode='component',\n jid='tester.localhost',\n plugins=['xep_0030'])\n\n def dynamic_global(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoItems()\n result['node'] = node\n result.add_item('tester.localhost', node='foo', name='Global')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_items',\n handler=dynamic_global)\n\n self.xmpp['xep_0030'].restore_defaults(jid='[email protected]',\n node='testing')\n\n self.xmpp['xep_0030'].add_item(ijid='[email protected]',\n node='testing',\n jid='[email protected]',\n subnode='foo',\n name='Test')\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\"\n to=\"[email protected]\"\n from=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\"\n to=\"tester@localhost\"\n from=\"[email protected]\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\">\n <item jid=\"[email protected]\" node=\"foo\" name=\"Test\" />\n </query>\n </iq>\n \"\"\")", "def testDynamicItemsGlobal(self):\n self.stream_start(mode='component',\n jid='tester.localhost',\n plugins=['xep_0030'])\n\n def dynamic_global(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoItems()\n result['node'] = node\n result.add_item('tester@localhost', node='foo', name='Global')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_items',\n handler=dynamic_global)\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\"\n to=\"[email protected]\"\n from=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\"\n to=\"tester@localhost\"\n from=\"[email protected]\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\">\n <item jid=\"tester@localhost\" node=\"foo\" name=\"Global\" />\n </query>\n </iq>\n \"\"\")", "def test_acceptance_sku_item_defined_on_checkout(self):\r\n pattern = re.compile(r\"items: \\[\\{sku: 'sku_\\w{14}', quantity: \\d{1}\\}\\]\",\r\n re.I | re.M)\r\n res = re.search(pattern, self.dom_str)\r\n self.assertTrue(hasattr(res, 'group'),\r\n msg=\"You didn't add the SKU code in the items list.\")", "def run_handler(self, handler):\n self.last_activity = time.time()\n const_name = handler.upper()\n try:\n const_value = getattr(cwiid, const_name)\n if self.wm.state['buttons'] == const_value:\n self.exec_script(handler)\n except AttributeError:\n return 0", "def test_get_single_good_item(test_client):\n\n response = test_client.get(GOOD_ITEM_URL)\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 200\n assert len(data['items']) == 1\n assert data['items'][0]['id'] == 3", "def action(self,item):\r\n pass", "def query_handler(self, handler_name=\"\"):\n\t\treturn False", "def matcher(item):\n hit = item.get(lookup_key)\n if not isinstance(hit, list):\n return hit == identifier\n return any([el for el in hit if el == identifier])", "def test_get_bucketlist_item_id(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n get_item = self.client.get('/bucketlistitems/1/items/1', headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)", "def find_by_id(object_id, items):\n for item in items:\n if object_id == item[\"id\"]:\n return item\n\n raise Exception(f\"Item with {object_id} not found\")" ]
[ "0.6029902", "0.55530435", "0.54791045", "0.54502743", "0.5430259", "0.532237", "0.52918226", "0.527074", "0.51665753", "0.51387405", "0.5137336", "0.51323307", "0.51175356", "0.50867325", "0.5077828", "0.50663286", "0.5026798", "0.50260264", "0.49849564", "0.49659404", "0.4965389", "0.49343887", "0.4925784", "0.49210307", "0.4912355", "0.48967585", "0.488569", "0.48829126", "0.4877442", "0.48598358" ]
0.6357774
0
Test overriding a JID items handler.
def testOverrideJIDItemsHandler(self): self.stream_start(mode='client', plugins=['xep_0030']) def dynamic_jid(jid, node, ifrom, iq): result = self.xmpp['xep_0030'].stanza.DiscoItems() result['node'] = node result.add_item('tester@localhost', node='foo', name='Global') return result self.xmpp['xep_0030'].set_node_handler('get_items', jid='tester@localhost', handler=dynamic_jid) self.xmpp['xep_0030'].restore_defaults(jid='tester@localhost', node='testing') self.xmpp['xep_0030'].add_item(ijid='tester@localhost', node='testing', jid='tester@localhost', subnode='foo', name='Test') self.recv(""" <iq type="get" id="test" to="tester@localhost"> <query xmlns="http://jabber.org/protocol/disco#items" node="testing" /> </iq> """) self.send(""" <iq type="result" id="test"> <query xmlns="http://jabber.org/protocol/disco#items" node="testing"> <item jid="tester@localhost" node="foo" name="Test" /> </query> </iq> """)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def item(self, item_name):\n\tself.log.info('Not implemented yet... Sorry!')\n\tpass", "def testOverrideGlobalItemsHandler(self):\n self.stream_start(mode='component',\n jid='tester.localhost',\n plugins=['xep_0030'])\n\n def dynamic_global(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoItems()\n result['node'] = node\n result.add_item('tester.localhost', node='foo', name='Global')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_items',\n handler=dynamic_global)\n\n self.xmpp['xep_0030'].restore_defaults(jid='[email protected]',\n node='testing')\n\n self.xmpp['xep_0030'].add_item(ijid='[email protected]',\n node='testing',\n jid='[email protected]',\n subnode='foo',\n name='Test')\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\"\n to=\"[email protected]\"\n from=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\"\n to=\"tester@localhost\"\n from=\"[email protected]\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\">\n <item jid=\"[email protected]\" node=\"foo\" name=\"Test\" />\n </query>\n </iq>\n \"\"\")", "def testOverrideOfOneItem(self):\n\t\tc = Controller()\n\t\tx = KlassBeingMocked()\n\t\tx.f = 38\n\t\tc.override(x, 'f', 5)\n\t\tself.failUnless(x.f == 5)\n\t\tc.restore()\n\t\tself.failUnless(x.f == 38)", "def test_patchhardwares_item(self):\n pass", "def testDynamicItemsJID(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n def dynamic_jid(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoItems()\n result['node'] = node\n result.add_item('tester@localhost', node='foo', name='JID')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_items',\n jid='tester@localhost',\n handler=dynamic_jid)\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\" to=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\">\n <item jid=\"tester@localhost\" node=\"foo\" name=\"JID\" />\n </query>\n </iq>\n \"\"\")", "def handler(cls):\n if cls.POS == 1:\n extra_item = make_mocked_item(MOCKED_SEARCH_COUNT)\n cls.ITEMS.insert(0, extra_item)\n cls.POS += 1\n return cls.ITEMS", "def handle_list_items_special(self, object, name, old, new):\n wh = self.wrapped_handler_ref()\n if wh is not None:\n wh(object, name, new.removed, new.added)", "def item_handler(id):\n if request.method == 'PUT':\n # authorization\n if not checkAuthorization('Item', id, g.user.id):\n return (jsonify({'data': 'Unauthorized', 'error': '401'}), 401)\n # Call the method to update a item\n rq = request.get_json()\n name = rq['name']\n picture = rq['picture']\n description = rq['description']\n item = updateItem(id, name, picture, description)\n return jsonify(item=item.serialize)\n elif request.method == 'DELETE':\n # authorization\n if not checkAuthorization('Item', id, g.user.id):\n return (jsonify({'data': 'Unauthorized', 'error': '401'}), 401)\n # Call the method to remove a item\n item = deleteItem(id)\n return jsonify(item=item.serialize)", "def test_new_item(self):\n\n\t\titem_id = mock_item()[0]\n\t\tself.assertEqual(item_id, 1)", "def test_item_id(item):\n assert item.item_id == 'exopy_pulses.Item'", "def setUp(self):\n self.item_id = \"mss37820001\"", "def testOverrideJIDInfoHandler(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n def dynamic_jid(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoInfo()\n result['node'] = node\n result.add_identity('client', 'console', name='Dynamic Info')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_info',\n jid='tester@localhost',\n handler=dynamic_jid)\n\n\n self.xmpp['xep_0030'].restore_defaults(jid='tester@localhost',\n node='testing')\n\n self.xmpp['xep_0030'].add_identity(jid='tester@localhost',\n node='testing',\n category='automation',\n itype='command-list')\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\" to=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\">\n <identity category=\"automation\"\n type=\"command-list\" />\n </query>\n </iq>\n \"\"\")", "def _open_item(self, *args, **kwargs):\n \"Does nothing\"", "def test_update_item_using_post(self):\n pass", "def pytest_itemcollected(item):\n item.name = item.name.split('[', 1)[1][:-1]\n # pylint: disable=protected-access\n item._nodeid = item.name", "def identify_items(self, items):\n\n raise NotImplementedError", "def test_register_item(self, init_event, mocker):\n mock_map = {}\n\n mocker.patch.object(\n houdini_toolbox.events.event.HoudiniEvent,\n \"item_map\",\n new_callable=mocker.PropertyMock(return_value=mock_map),\n )\n\n # Mock the item via the reference in the module so that the isinstance()\n # test is more reliable vs mocking houdini_toolbox.events.item.HoudiniEventItem\n mock_item = mocker.MagicMock(spec=houdini_toolbox.events.event.HoudiniEventItem)\n\n event = init_event()\n event.register_item(mock_item)\n\n assert mock_map == {mock_item.priority: [mock_item]}", "def action(self,item):\r\n pass", "def testOverrideOfTwoItems(self):\n\t\tc = Controller()\n\t\tx = KlassBeingMocked()\n\t\ty = KlassBeingMocked()\n\t\tx.f = 38\n\t\ty.g = 39\n\t\tc.override(x, 'f', 5)\n\t\tc.override(y, 'g', 6)\n\t\tself.failUnless(x.f == 5)\n\t\tself.failUnless(y.g == 6)\n\t\tc.restore()\n\t\tself.failUnless(x.f == 38)\n\t\tself.failUnless(y.g == 39)", "def test_hook_register(self):\n self.assertEqual(list(self.registry), [])\n\n item = self.DummyItem(123)\n self.hook_cls(self.extension, item)\n\n self.assertIn(item, self.registry)", "def test_patchorganizations_item(self):\n pass", "def init_items(self):\r\n raise NotImplementedError()", "def handle_list_items(self, object, name, old, new):\n raise NotImplementedError", "def test_add_item_using_post(self):\n pass", "def _element_handler(self, path, item):\n key = path[-1][1]['id']\n self.data[key] = item.strip()\n return True", "def test_gethardwares_item(self):\n pass", "def test_init_adds_handler(self):\n pass", "def test_delete_item_using_delete(self):\n pass", "def test_reusableitem_unsupported_modification(self):\n\n self.client.force_authenticate(user=self.user_1)\n\n response = self.client.patch(get_reusable_item_1_url(self), {'change_request': 'Some text'}, format='json')\n\n updated_object = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_update_busketlistitem_by_id(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n update_item = self.client.put('/bucketlistitems/1/items/1',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs and museums too.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(update_item.status_code, 201)" ]
[ "0.62347704", "0.6178297", "0.5937533", "0.5896504", "0.5749715", "0.56762606", "0.5633245", "0.56151956", "0.55781144", "0.55640936", "0.5548436", "0.5520043", "0.5515242", "0.5451386", "0.54217774", "0.5370774", "0.5364858", "0.5345353", "0.5321662", "0.5310886", "0.52627987", "0.52618957", "0.5246292", "0.52381283", "0.5231491", "0.5167539", "0.51606905", "0.515474", "0.5153645", "0.514833" ]
0.75180954
0
Test overriding the global JID items handler.
def testOverrideGlobalItemsHandler(self): self.stream_start(mode='component', jid='tester.localhost', plugins=['xep_0030']) def dynamic_global(jid, node, ifrom, iq): result = self.xmpp['xep_0030'].stanza.DiscoItems() result['node'] = node result.add_item('tester.localhost', node='foo', name='Global') return result self.xmpp['xep_0030'].set_node_handler('get_items', handler=dynamic_global) self.xmpp['xep_0030'].restore_defaults(jid='[email protected]', node='testing') self.xmpp['xep_0030'].add_item(ijid='[email protected]', node='testing', jid='[email protected]', subnode='foo', name='Test') self.recv(""" <iq type="get" id="test" to="[email protected]" from="tester@localhost"> <query xmlns="http://jabber.org/protocol/disco#items" node="testing" /> </iq> """) self.send(""" <iq type="result" id="test" to="tester@localhost" from="[email protected]"> <query xmlns="http://jabber.org/protocol/disco#items" node="testing"> <item jid="[email protected]" node="foo" name="Test" /> </query> </iq> """)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testOverrideJIDItemsHandler(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n def dynamic_jid(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoItems()\n result['node'] = node\n result.add_item('tester@localhost', node='foo', name='Global')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_items',\n jid='tester@localhost',\n handler=dynamic_jid)\n\n\n self.xmpp['xep_0030'].restore_defaults(jid='tester@localhost',\n node='testing')\n\n self.xmpp['xep_0030'].add_item(ijid='tester@localhost',\n node='testing',\n jid='tester@localhost',\n subnode='foo',\n name='Test')\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\" to=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\">\n <item jid=\"tester@localhost\" node=\"foo\" name=\"Test\" />\n </query>\n </iq>\n \"\"\")", "def item(self, item_name):\n\tself.log.info('Not implemented yet... Sorry!')\n\tpass", "def setUp(self):\n self.item_id = \"mss37820001\"", "def test_patchhardwares_item(self):\n pass", "def testDynamicItemsJID(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n def dynamic_jid(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoItems()\n result['node'] = node\n result.add_item('tester@localhost', node='foo', name='JID')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_items',\n jid='tester@localhost',\n handler=dynamic_jid)\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\" to=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\">\n <item jid=\"tester@localhost\" node=\"foo\" name=\"JID\" />\n </query>\n </iq>\n \"\"\")", "def test_hook_register(self):\n self.assertEqual(list(self.registry), [])\n\n item = self.DummyItem(123)\n self.hook_cls(self.extension, item)\n\n self.assertIn(item, self.registry)", "def test_item_id(item):\n assert item.item_id == 'exopy_pulses.Item'", "def test_new_item(self):\n\n\t\titem_id = mock_item()[0]\n\t\tself.assertEqual(item_id, 1)", "def test_register_item(self, init_event, mocker):\n mock_map = {}\n\n mocker.patch.object(\n houdini_toolbox.events.event.HoudiniEvent,\n \"item_map\",\n new_callable=mocker.PropertyMock(return_value=mock_map),\n )\n\n # Mock the item via the reference in the module so that the isinstance()\n # test is more reliable vs mocking houdini_toolbox.events.item.HoudiniEventItem\n mock_item = mocker.MagicMock(spec=houdini_toolbox.events.event.HoudiniEventItem)\n\n event = init_event()\n event.register_item(mock_item)\n\n assert mock_map == {mock_item.priority: [mock_item]}", "def handler(cls):\n if cls.POS == 1:\n extra_item = make_mocked_item(MOCKED_SEARCH_COUNT)\n cls.ITEMS.insert(0, extra_item)\n cls.POS += 1\n return cls.ITEMS", "def test_gethardwares_item(self):\n pass", "def pytest_itemcollected(item):\n item.name = item.name.split('[', 1)[1][:-1]\n # pylint: disable=protected-access\n item._nodeid = item.name", "def testOverrideJIDInfoHandler(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n def dynamic_jid(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoInfo()\n result['node'] = node\n result.add_identity('client', 'console', name='Dynamic Info')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_info',\n jid='tester@localhost',\n handler=dynamic_jid)\n\n\n self.xmpp['xep_0030'].restore_defaults(jid='tester@localhost',\n node='testing')\n\n self.xmpp['xep_0030'].add_identity(jid='tester@localhost',\n node='testing',\n category='automation',\n itype='command-list')\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\" to=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\">\n <identity category=\"automation\"\n type=\"command-list\" />\n </query>\n </iq>\n \"\"\")", "def test_init_adds_handler(self):\n pass", "def _open_item(self, *args, **kwargs):\n \"Does nothing\"", "def init_items(self):\r\n raise NotImplementedError()", "def testOverrideOfOneItem(self):\n\t\tc = Controller()\n\t\tx = KlassBeingMocked()\n\t\tx.f = 38\n\t\tc.override(x, 'f', 5)\n\t\tself.failUnless(x.f == 5)\n\t\tc.restore()\n\t\tself.failUnless(x.f == 38)", "def handle_list_items_special(self, object, name, old, new):\n wh = self.wrapped_handler_ref()\n if wh is not None:\n wh(object, name, new.removed, new.added)", "def pytest_before_group_items(session, config, items):", "def test_puthardwares_item(self):\n pass", "def test_get_order_items(self):\n pass", "def testDynamicItemsGlobal(self):\n self.stream_start(mode='component',\n jid='tester.localhost',\n plugins=['xep_0030'])\n\n def dynamic_global(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoItems()\n result['node'] = node\n result.add_item('tester@localhost', node='foo', name='Global')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_items',\n handler=dynamic_global)\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\"\n to=\"[email protected]\"\n from=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\"\n to=\"tester@localhost\"\n from=\"[email protected]\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\">\n <item jid=\"tester@localhost\" node=\"foo\" name=\"Global\" />\n </query>\n </iq>\n \"\"\")", "def test_patchorganizations_item(self):\n pass", "def item_handler(id):\n if request.method == 'PUT':\n # authorization\n if not checkAuthorization('Item', id, g.user.id):\n return (jsonify({'data': 'Unauthorized', 'error': '401'}), 401)\n # Call the method to update a item\n rq = request.get_json()\n name = rq['name']\n picture = rq['picture']\n description = rq['description']\n item = updateItem(id, name, picture, description)\n return jsonify(item=item.serialize)\n elif request.method == 'DELETE':\n # authorization\n if not checkAuthorization('Item', id, g.user.id):\n return (jsonify({'data': 'Unauthorized', 'error': '401'}), 401)\n # Call the method to remove a item\n item = deleteItem(id)\n return jsonify(item=item.serialize)", "def identify_items(self, items):\n\n raise NotImplementedError", "def test_update_item_using_post(self):\n pass", "def test_add_item_using_post(self):\n pass", "def __init__(self, item_id: str) -> None:\n super().__init__(f\"Item {item_id} preferred.\")\n self.item_id = item_id", "def test_import_selected_items(self):\n item_id = self.response.context['items'][0].id\n self.client.post('/importer/', {'items': [item_id, ]})\n self.assertEquals(ModeratedObject.objects.all().count(), 1)", "def use(self, item_name):\n pass" ]
[ "0.73689735", "0.6308142", "0.60755616", "0.58666635", "0.57607436", "0.5755129", "0.5733778", "0.566231", "0.5598811", "0.55508614", "0.55326015", "0.54916745", "0.5477484", "0.5468347", "0.5459972", "0.5378309", "0.53369355", "0.53179693", "0.53076214", "0.52763283", "0.5256378", "0.52357155", "0.52314216", "0.52086747", "0.51742125", "0.51720196", "0.51697713", "0.5169341", "0.5165322", "0.5162471" ]
0.680608
1
Test sending a discoitems query to another entity and receiving the result.
def testGetItemsRemote(self): self.stream_start(mode='client', plugins=['xep_0030']) events = set() results = set() def handle_disco_items(iq): events.add('disco_items') results.update(iq['disco_items']['items']) self.xmpp.add_event_handler('disco_items', handle_disco_items) self.xmpp.wrap(self.xmpp['xep_0030'].get_items('user@localhost', 'foo')) self.wait_() self.send(""" <iq type="get" to="user@localhost" id="1"> <query xmlns="http://jabber.org/protocol/disco#items" node="foo" /> </iq> """) self.recv(""" <iq type="result" to="tester@localhost" id="1"> <query xmlns="http://jabber.org/protocol/disco#items" node="foo"> <item jid="user@localhost" node="bar" name="Test" /> <item jid="user@localhost" node="baz" name="Test 2" /> </query> </iq> """) items = {('user@localhost', 'bar', 'Test'), ('user@localhost', 'baz', 'Test 2')} self.assertEqual(events, {'disco_items'}, "Disco items event was not triggered: %s" % events) self.assertEqual(results, items, "Unexpected items: %s" % results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __disco_items(self,iq):\n q=iq.get_query()\n if q.hasProp(\"node\"):\n node=from_utf8(q.prop(\"node\"))\n else:\n node=None\n items=self.disco_get_items(node,iq)\n if isinstance(items,DiscoItems):\n resp=iq.make_result_response()\n self.__logger.debug(\"Disco-items query: %s preparing response: %s with reply: %s\"\n % (iq.serialize(),resp.serialize(),items.xmlnode.serialize()))\n resp.set_content(items.xmlnode.copyNode(1))\n elif isinstance(items,Stanza):\n resp=items\n else:\n resp=iq.make_error_response(\"item-not-found\")\n self.__logger.debug(\"Disco-items response: %s\" % (resp.serialize(),))\n self.stream.send(resp)", "def testGetItemsIterators(self):\n iteration_finished = []\n jids_found = set()\n\n self.stream_start(mode='client',\n plugins=['xep_0030', 'xep_0059'])\n\n async def run_test():\n iterator = await self.xmpp['xep_0030'].get_items(\n jid='foo@localhost',\n node='bar',\n iterator=True\n )\n iterator.amount = 10\n async for page in iterator:\n for item in page['disco_items']['items']:\n jids_found.add(item[0])\n iteration_finished.append(True)\n\n test_run = self.xmpp.wrap(run_test())\n self.wait_()\n self.send(\"\"\"\n <iq id=\"2\" type=\"get\" to=\"foo@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"bar\">\n <set xmlns=\"http://jabber.org/protocol/rsm\">\n <max>10</max>\n </set>\n </query>\n </iq>\n \"\"\")\n self.recv(\"\"\"\n <iq id=\"2\" type=\"result\" to=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\">\n <item jid=\"a@b\" node=\"1\"/>\n <item jid=\"b@b\" node=\"2\"/>\n <item jid=\"c@b\" node=\"3\"/>\n <item jid=\"d@b\" node=\"4\"/>\n <item jid=\"e@b\" node=\"5\"/>\n <set xmlns=\"http://jabber.org/protocol/rsm\">\n <first index='0'>a@b</first>\n <last>e@b</last>\n <count>10</count>\n </set>\n </query>\n </iq>\n \"\"\")\n self.wait_()\n self.send(\"\"\"\n <iq id=\"3\" type=\"get\" to=\"foo@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"bar\">\n <set xmlns=\"http://jabber.org/protocol/rsm\">\n <max>10</max>\n <after>e@b</after>\n </set>\n </query>\n </iq>\n \"\"\")\n self.recv(\"\"\"\n <iq id=\"3\" type=\"result\" to=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\">\n <item jid=\"f@b\" node=\"6\"/>\n <item jid=\"g@b\" node=\"7\"/>\n <item jid=\"h@b\" node=\"8\"/>\n <item jid=\"i@b\" node=\"9\"/>\n <item jid=\"j@b\" node=\"10\"/>\n <set xmlns=\"http://jabber.org/protocol/rsm\">\n <first index='5'>f@b</first>\n <last>j@b</last>\n <count>10</count>\n </set>\n </query>\n </iq>\n \"\"\")\n expected_jids = {'%s@b' % i for i in 'abcdefghij'}\n self.run_coro(test_run)\n self.assertEqual(expected_jids, jids_found)\n self.assertEqual(iteration_finished, [True])", "def test_show_cart_with_items(client):\n raise NotImplemented('Acceptance test failed')", "def test_02(self):\n e1 = Emulator()\n e1.init()\n\n e2 = Emulator()\n e2.init()\n\n Emulator.run_transfer_prepare()\n\n sum1 = TransferPrepare.objects.filter(\n is_processed=False, account__item__user=e1.user)\\\n .aggregate(Sum('roundup'))['roundup__sum']\n\n sum2 = TransferPrepare.objects.filter(\n is_processed=False, account__item__user=e2.user)\\\n .aggregate(Sum('roundup'))['roundup__sum']\n\n Emulator.run_transfer_donkies_prepare()\n\n assert TransferDonkies.objects.count() == 2\n\n qs = TransferDonkies.objects.filter(account__item__user=e1.user)\n assert qs.first().amount == sum1\n\n qs = TransferDonkies.objects.filter(account__item__user=e2.user)\n assert qs.first().amount == sum2", "def test_item():\n # 404 reponses\n response = app.test_client().get('/v1/resources/menu?')\n assert response.status_code == 404\n\n response = app.test_client().get('/v1/resources/menu?itype=pizza')\n assert response.status_code == 404\n\n response = app.test_client().get('/v1/resources/menu?itype=pizza&item=pepperoni')\n assert response.status_code == 404\n\n response = app.test_client().get('/v1/resources/menu?item=pepperoni')\n assert response.status_code == 404\n\n # 204 responses\n response = app.test_client().get('/v1/resources/menu?item=p&itype=pizza&size=small')\n assert response.status_code == 204\n\n response = app.test_client().get('/v1/resources/menu?item=pepperoni&itype=p&size=small')\n assert response.status_code == 204\n\n response = app.test_client().get('/v1/resources/menu?item=pepperoni&itype=pizza&size=s')\n assert response.status_code == 204\n\n response = app.test_client().get('/v1/resources/menu?itype=topping&item=b')\n assert response.status_code == 204\n\n # 200 responses\n response = app.test_client().get('/v1/resources/menu?itype=topping&item=beef')\n assert response.status_code == 200\n assert response.content_type == 'application/json'\n assert isinstance((response.json)['price'], int)\n\n response = app.test_client().get('/v1/resources/menu?itype=pizza&item=pepperoni&size=small')\n assert response.status_code == 200\n assert response.content_type == 'application/json'\n assert isinstance((response.json)['price'], int)", "def test_get_order_items(self):\n pass", "def test_post_get_item(self):\n item = {'brand': 'apple',\n 'name': 'iPhone7',\n 'description': 'The latest iphone'}\n\n def check_item(result):\n item['id'] = result['id']\n resp = self.make_request('get', '/store/items/{}'.format(item['id']))\n self.assertEqual(resp.json()['id'], item['id'])\n self.assertEqual(resp.json()['brand'], item['brand'])\n self.assertEqual(resp.json()['name'], item['name'])\n self.assertEqual(resp.json()['description'], item['description'])\n\n self.assert_request('post',\n '/store/items',\n data=item,\n expected_code=201,\n expected_json=check_item)", "def test_updating_to_receive_items(self):\n modified_po = copy.deepcopy(base_purchase_order)\n del modified_po['items'][1]\n modified_po['items'][0]['id'] = 1\n modified_po['items'][0]['status'] = 'RECEIVED'\n modified_po['status'] = 'RECEIVED'\n self.assertEqual(Supply.objects.get(pk=1).quantity, 10)\n \n resp = self.client.put('/api/v1/purchase-order/1/', format='json', data=modified_po)\n \n self.assertEqual(resp.status_code, 200, msg=resp)\n \n po_data = resp.data\n self.assertEqual(po_data['id'], 1)\n self.assertEqual(po_data['status'], 'RECEIVED')\n \n item1 = po_data['items'][0]\n self.assertEqual(item1['id'], 1)\n self.assertEqual(item1['status'], 'RECEIVED')\n \n #Test database values\n po = PurchaseOrder.objects.get(pk=1)\n self.assertEqual(po.id, 1)\n self.assertEqual(po.status, 'RECEIVED')\n for item in po.items.all():\n self.assertEqual(item.status, \"RECEIVED\")\n \n supply = Supply.objects.get(pk=1)\n self.assertEqual(supply.quantity, 20)\n log = Log.objects.all().order_by('-id')[0]\n self.assertEqual(log.action, \"ADD\")\n self.assertEqual(log.quantity, 10)\n self.assertEqual(log.supplier.id, 1)\n self.assertEqual(log.message, \"Received 10m of Pattern: Maxx, Col: Blue from Zipper World\")", "def testDynamicItemsJID(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n def dynamic_jid(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoItems()\n result['node'] = node\n result.add_item('tester@localhost', node='foo', name='JID')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_items',\n jid='tester@localhost',\n handler=dynamic_jid)\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\" to=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\">\n <item jid=\"tester@localhost\" node=\"foo\" name=\"JID\" />\n </query>\n </iq>\n \"\"\")", "def test_fetch_working(suvi_client):\n start = '2019/05/25 00:50'\n end = '2019/05/25 00:52'\n wave = 94 * u.Angstrom\n goes_sat = a.goes.SatelliteNumber.sixteen\n tr = a.Time(start, end)\n qr1 = suvi_client.search(tr, a.Instrument.suvi, a.Wavelength(wave), goes_sat, a.Level(2))\n\n # Mock QueryResponse object\n mock_qr = mock_query_object(suvi_client)\n\n # Compare if two objects have the same attribute\n\n mock_qr = mock_qr[0]\n qr = qr1[0]\n\n assert mock_qr['Source'] == qr['Source']\n assert mock_qr['Provider'] == qr['Provider']\n assert mock_qr['Physobs'] == qr['Physobs']\n assert mock_qr['Instrument'] == qr['Instrument']\n assert mock_qr['url'] == qr['url']\n\n assert qr1['Start Time'] == Time(\"2019-05-25T00:52:00.000\")\n assert qr1['End Time'] == Time(\"2019-05-25T00:56:00.000\")\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n download_list = suvi_client.fetch(qr1, path=tmpdirname)\n assert len(download_list) == len(qr1)", "def testItemsIncludeNode(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n\n self.xmpp['xep_0030'].static.add_node(node='testing')\n\n self.recv(\"\"\"\n <iq to=\"tester@localhost/resource\" type=\"get\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\">\n </query>\n </iq>\"\"\",\n method='mask')", "def test_get_single_good_item(test_client):\n\n response = test_client.get(GOOD_ITEM_URL)\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 200\n assert len(data['items']) == 1\n assert data['items'][0]['id'] == 3", "def test_find_stock_items(self):\n pass", "def test_query_orders(\n test_client, pydex_client, asset_infos\n):\n orders_params = pydex_client.make_orders_query(\n maker_asset_data=asset_infos.VETH_ASSET_DATA,\n taker_asset_data=asset_infos.LONG_ASSET_DATA,\n include_maybe_fillables=True\n )\n res = test_client.get(\n pydex_client.get_orders_url,\n query_string=orders_params\n )\n assert res.status_code == 200\n res = res.get_json()\n assert_valid(res, \"/relayerApiOrdersResponseSchema\")\n assert res[\"records\"][0][\"order\"][\"makerAssetData\"] == asset_infos.VETH_ASSET_DATA\n assert res[\"records\"][0][\"order\"][\"takerAssetData\"] == asset_infos.LONG_ASSET_DATA\n expected_maker_asset_proxy_id = ERC20_PROXY_ID\n orders_params = pydex_client.make_orders_query(\n maker_asset_proxy_id=expected_maker_asset_proxy_id,\n include_maybe_fillables=True\n )\n res = test_client.get(\n pydex_client.get_orders_url,\n query_string=orders_params\n )\n assert res.status_code == 200\n res = res.get_json()\n assert_valid(res, \"/relayerApiOrdersResponseSchema\")\n assert res[\"records\"][0][\"order\"][\"makerAssetData\"][:10] == expected_maker_asset_proxy_id", "def test_import_selected_items(self):\n item_id = self.response.context['items'][0].id\n self.client.post('/importer/', {'items': [item_id, ]})\n self.assertEquals(ModeratedObject.objects.all().count(), 1)", "def test_get_item_details(self, mock_requests_get):\n details = resources.get_item_details(21787)\n\n item = details.item\n assert item.id == 21787\n assert item.name == \"Steadfast boots\"\n assert item.type == \"Miscellaneous\"\n assert item.current.price == 5900000\n assert item.today.price == -138200\n assert item.members is True", "def test_find_disputes(self):\n query_string = [('limit', 100),\n ('starting_after', 'starting_after_example'),\n ('ending_before', 'ending_before_example'),\n ('dispute_reason', DisputeCode()),\n ('dispute_status', DisputeStatus()),\n ('beginning_date', 'beginning_date_example'),\n ('ending_date', 'ending_date_example')]\n response = self.client.open(\n '/paySmart/ps-processadora/v1/disputes',\n method='GET',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_get_food(self):\n pass", "def test_get_dealer_active_inventory(self):\n pass", "def test_get_foods(self):\n pass", "def test_update_item_using_post(self):\n pass", "def test_query_inventory_quantity(self):\n resp = self.app.get('/inventories', query_string='quantity=5')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertTrue(len(resp.data) > 0)\n self.assertTrue('conditioner' in resp.data)\n self.assertFalse('shampoo' in resp.data)\n data = json.loads(resp.data)\n query_item = data[0]\n self.assertEqual(query_item['quantity'], 5)", "def test_query_inventory_found(self):\n resp = self.app.get('/inventories/query', query_string='name=shampoo&status=new')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertTrue('shampoo' in resp.data)\n self.assertFalse('conditioner' in resp.data)\n data = json.loads(resp.data)\n self.assertEqual(len(data), 1)", "def test_get_samples_to_invoice_query(sample_store):\n # GIVEN a store with a sample\n assert len(sample_store._get_query(table=Sample).all()) > 1\n\n # WHEN finding samples to invoice\n sample = sample_store.get_samples_to_invoice_query().first()\n\n # THEN samples should be a list of samples\n assert isinstance(sample, Sample)\n\n # THEN it should return all samples that are not invoiced\n assert sample\n assert sample.name == \"delivered\"", "def test_get_all_item(self, app, item):\n res = app.store_items.get_all_items(\n header=item.header,\n type_response=ItemsResponse,\n )\n assert res.status_code == 200", "def test_and_filtros(self): \n response = self.client.get('/apirest/expedientes/?tipo=PROYECTO&firm_persona_fisica_id=1566')\n self.assertEqual(response.data[\"count\"],self.CANT_EXPEDIENTES_X_FIRMANTE)\n self.assertEqual(response.data[\"results\"][0][\"tipo\"], self.TIPO_PROYECTO)\n self.assertEqual(response.data[\"results\"][0][\"periodo\"], self.PERIODO)", "def testOverrideJIDItemsHandler(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n def dynamic_jid(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoItems()\n result['node'] = node\n result.add_item('tester@localhost', node='foo', name='Global')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_items',\n jid='tester@localhost',\n handler=dynamic_jid)\n\n\n self.xmpp['xep_0030'].restore_defaults(jid='tester@localhost',\n node='testing')\n\n self.xmpp['xep_0030'].add_item(ijid='tester@localhost',\n node='testing',\n jid='tester@localhost',\n subnode='foo',\n name='Test')\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\" to=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\">\n <item jid=\"tester@localhost\" node=\"foo\" name=\"Test\" />\n </query>\n </iq>\n \"\"\")", "def test_adding_a_new_item_with_no_supply(self): \n print '\\n'\n logger.debug('Add a new item to a current PO via PUT')\n print '\\n'\n \n #Verifying po in database\n self.assertEqual(self.po.id, 1)\n self.assertEqual(self.po.items.count(), 1)\n self.assertEqual(self.po.grand_total, Decimal('129.58'))\n self.assertEqual(timezone('Asia/Bangkok').normalize(self.po.order_date).date(), datetime.datetime.now().date())\n item = self.po.items.all()[0]\n self.assertEqual(item.id, 1)\n self.assertEqual(item.quantity, 10)\n self.assertEqual(item.total, Decimal('121.1'))\n \n modified_po_data = copy.deepcopy(base_purchase_order)\n modified_po_data['items'][1]['unit_cost'] = Decimal('11.99')\n modified_po_data['items'][1]['comments'] = 'test change'\n modified_po_data['items'][1]['description'] = \"test description change\"\n del modified_po_data['items'][1]['supply']\n resp = self.client.put('/api/v1/purchase-order/1/',\n format='json',\n data=modified_po_data)\n \n #Verify the response\n self.assertEqual(resp.status_code, 200, msg=resp)\n po = resp.data\n self.assertEqual(po['id'], 1)\n self.assertEqual(po['supplier']['id'], 1)\n self.assertEqual(po['vat'], 7)\n #self.assertEqual(Decimal(po['grand_total']), Decimal('74.85'))\n self.assertEqual(po['discount'], 0)\n self.assertEqual(po['revision'], 1)\n self.assertEqual(len(po['items']), 2)\n #self.assertEqual(po['status'], 'PAID')\n #Check the new pdf\n #webbrowser.get(\"open -a /Applications/Google\\ Chrome.app %s\").open(po['pdf']['url'])\n \n item1 = po['items'][0]\n logger.debug(item1)\n self.assertEqual(item1['id'], 2)\n self.assertEqual(item1['quantity'], Decimal('10.0000000000'))\n self.assertEqual(item1['description'], u'Pattern: Maxx, Col: Blue')\n self.assertEqual(Decimal(item1['unit_cost']), Decimal('12.1100'))\n self.assertEqual(Decimal(item1['total']), Decimal('121.10'))\n\n item2 = po['items'][1]\n logger.debug(item2)\n self.assertEqual(item2['id'], 3)\n self.assertEqual(item2['quantity'], Decimal('3.0000000000'))\n self.assertEqual(item2['comments'], 'test change')\n self.assertEqual(item2['description'], 'test description change')\n self.assertEqual(Decimal(item2['unit_cost']), Decimal('11.99'))\n self.assertEqual(Decimal(item2['total']), Decimal('35.97'))\n \n #Verify database record\n po = PurchaseOrder.objects.get(pk=1)\n \n self.assertEqual(po.supplier.id, 1)\n #self.assertEqual(timezone('Asia/Bangkok').normalize(po.order_date), datetime.datetime.now().date())\n self.assertEqual(po.vat, 7)\n self.assertEqual(po.grand_total, Decimal('168.06'))\n self.assertEqual(po.items.count(), 2)\n \n # Check new item in the database\n item2_d = po.items.all().order_by('id')[1]\n self.assertEqual(item2_d.id, 3)\n self.assertEqual(item2_d.description, 'test description change')\n self.assertEqual(item2_d.comments, 'test change')\n self.assertEqual(item2_d.quantity, 3)\n self.assertEqual(item2_d.unit_cost, Decimal('11.99'))\n self.assertEqual(item2_d.total, Decimal('35.97'))\n\n # Check new supply product in the database\n products = SupplyProduct.objects.filter(supply=item2_d.supply, supplier=self.po.supplier)\n self.assertEqual(products.count(), 1)\n product = products.all()[0]\n self.assertEqual(product.supply.id, item2_d.supply.id)\n self.assertEqual(product.supplier.id, self.po.supplier.id)\n self.assertEqual(product.cost, Decimal('11.99'))", "def test_find_stock_item_by_id(self):\n pass", "def test_trucks_api_food_query_string(self):\n query_items = [\"donut\", \"hot dog\", \"taco\", \"chocolate\", \"soda\"]\n for food in query_items:\n resp = self.app.get('/trucks?food=%s' % food)\n self.assertEqual(resp.status_code, 200)\n\n data = json.loads(resp.data)['resp']\n for item in data:\n assert food in item['fooditems'].lower()" ]
[ "0.61279446", "0.6030953", "0.5861904", "0.5858192", "0.5838408", "0.58140504", "0.5798844", "0.57639545", "0.56897074", "0.5598579", "0.55827343", "0.5550136", "0.5541892", "0.55156195", "0.5513321", "0.55097985", "0.54703015", "0.54435825", "0.5436962", "0.54357976", "0.54247034", "0.54213834", "0.53958", "0.5377046", "0.5363587", "0.5358704", "0.53449315", "0.5342245", "0.53349715", "0.5334512" ]
0.6962557
0
Use nlp to find nouns
def get_nouns(self): blob = TextBlob(self.raw_string) for word, tag in blob.tags: if tag in ['NNP', 'NN']: self.nouns.append(word.lemmatize())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def en_noun(t, label):\r\n s = label\r\n p = []\r\n is_uncountable = False\r\n \r\n # http://en.wiktionary.org/wiki/Template:en-noun\r\n head = t.arg(\"head\", label)\r\n p1 = t.arg(0)\r\n p2 = t.arg(1)\r\n \r\n if p1 == \"-\":\r\n # uncountable\r\n is_uncountable = True\r\n \r\n if p2 == \"s\":\r\n # ends by s\r\n p.append(head + \"s\")\r\n \r\n elif p2 is not None:\r\n # word\r\n p.append(p2)\r\n \r\n elif p1 == \"es\":\r\n # add es\r\n p.append(head + \"es\")\r\n \r\n elif p1 is not None:\r\n # use term\r\n p.append(p1)\r\n \r\n elif p1 is None and p2 is None:\r\n p.append(head+\"s\")\r\n\r\n for k,a in t.args.items():\r\n if not a.is_named():\r\n if k == 0 or k == 1:\r\n continue\r\n \r\n p.append(a.as_string())\r\n \r\n return (s, p, is_uncountable)", "def get_nouns(txt):\n query = 'https://api.textgain.com/1/tag?q='\n query += urllib.parse.quote(txt, safe='')\n query += '&lang=fr&key=***'\n resp = requests.get(query)\n\n body = json.loads(resp.text)['text'][0]\n\n nouns = {}\n for iterable_elem in body:\n for elem in iterable_elem:\n if elem['tag'] == 'NOUN':\n word = elem['word']\n if word in nouns.keys():\n nouns[word] += 1\n else:\n nouns[word] = 1\n print(nouns)\n return nouns", "def find_noun(sent):\n noun = None\n\n if not noun:\n for w, p in sent.pos_tags:\n if p == 'NN': # This is a noun\n noun = w\n break\n if noun:\n #logger.info(\"Found noun: %s\", noun)\n pprint(\"FOUND NOUN\")\n pprint(noun)\n\n return noun", "def is_noun(tag):\r\n return tag in ['NN', 'NNS', 'NNP', 'NNPS']", "def pos_text(text):\n nlp = spacy.load('en')\n doc = nlp(text)\n # all tokens that arent stop words or punctuations\n words = [token.text.encode('ascii', 'ignore') for token in doc if token.is_stop != True and token.is_punct != True]\n\n # noun tokens that arent stop words or punctuations\n final_tokens = [token.text.encode('ascii', 'ignore') for token in doc if token.is_stop != True and \\\n token.is_punct != True and (token.pos_ == \"NOUN\" or token.pos_ == \"VERB\")]\n\n # frequency dictionary for all tokens\n word_freq = Counter(words)\n\n #top 100 words to display in wordcloud which are noun or verb\n #frequency will be used to show big/small words in wordcloud\n final_tokens_freq = Counter(final_tokens)\n result = final_tokens_freq.most_common(config.config[\"MAX_FREQUENCY\"])\n #print result\n return result", "def noun_lemma(word):\n if word.endswith(\"s\"):\n if word.endswith(\"ss\"):\n return word.lower()\n elif word.endswith(\"ies\"):\n return word[:-3].lower() + (\"y\")\n else:\n return word[:-1].lower()\n if word.endswith(\"men\"):\n return word[:-2].lower() + (\"an\")\n else:\n return word.lower()", "def find_pronoun(sent):\n pronoun = None\n\n for word, part_of_speech in sent.pos_tags:\n # Disambiguate pronouns\n if part_of_speech == 'PRP' and word.lower() == 'you':\n # pronoun = 'I' +++++++++ORIGINAL++++++++++++\n pronoun = 'I'\n elif part_of_speech == 'PRP' and word == 'I':\n # If the user mentioned themselves, then they will definitely be the pronoun\n # pronoun = 'You' +++++++++ORIGINAL++++++++++++\n pronoun = 'You'\n return pronoun", "def _get_nouns(self, review):\n review_features = []\n for sent in review:\n doc = self.nlp(sent)\n # noun_phrase = [np.text for np in doc.noun_chunks]\n nouns = [unicode(lemma(str(word).lower())) for word in doc if word.pos == NOUN]\n review_features.append(nouns)\n return review_features", "def get_noun_phrases(blob):\n return blob.noun_phrases", "def pronoun_instance_dist(novel, words):\n text = novel.get_tokenized_text()\n output = []\n count = 0\n start = False\n\n for e in text:\n e = e.lower()\n if not start:\n if e in words:\n start = True\n else:\n count += 1\n if e in words:\n output.append(count)\n count = 0\n return output", "def get_nouns(root):\n nouns = []\n for child in root.findall(\"./xdrs/taggedtokens/tagtoken/tags\"):\n noun = False\n for grandchildren in child.findall(\"./tag[@type='pos']\"):\n if grandchildren.text == 'NN' or grandchildren.text == 'NNS':\n noun = True\n if noun == True:\n for grandchildren in child.findall(\"./tag[@type='lemma']\"):\n nouns.append(grandchildren.text)\n return nouns", "def amount_nouns_and_numerals_stanford_nlp(self) -> int:\n stanza.download(self.lang, processors = 'tokenize,mwt,pos')\n nlp = stanza.Pipeline(self.lang, processors = 'tokenize,mwt,pos')\n doc = nlp(self.sent)\n for sentence in doc.sentences:\n for word in sentence.words:\n #if the part of speech is a noun, a proper noun or a numeral \n #(only for en) \n if self.lang == 'en':\n if word.upos == 'NOUN' or word.upos == 'PROPN' or word.upos == 'NUM':\n self.amount_nouns_and_num += 1\n elif self.lang == 'de' or self.lang == 'fr':\n if word.upos == 'NOUN' or word.upos == 'PROPN':\n self.amount_nouns_and_num += 1\n return self.amount_nouns_and_num", "def nlp_parse(self, input):\n resp = {}\n resp['type'] = 'nomatch'\n VDB_set = {}\n WP_set = {}\n tagset = self.build_tagset(input)\n resp['words'] = self.build_keywords(tagset)\n w = resp['words']\n\n if not w:\n if constants.DEBUG:\n log.debug(\"No words: \" + str(resp))\n return resp\n\n # store nouns\n NN_set = set(w.get('NN', []))\n\n # matches a request for a list\n if 'list' in NN_set \\\n or 'List' in w.get('NNP', []):\n resp['count'] = w.get('CD', [constants.LIST_COUNT])[0]\n resp['type'] = 'show-list'\n if set(['serving', 'serve']) & set(w.get('VBG', [])):\n resp['meal'] = (NN_set & constants.MEALS_SET).pop()\n if 'in' in w.get('IN', []):\n resp['zone'] = w.get('NNP', [None])[0]\n if 'close' in w.get('VBD', []) \\\n or 'close' in w.get('JJ', []) \\\n or 'close' in NN_set:\n resp['distance'] = True\n return resp\n\n # finds neighborhood\n for word in tagset:\n if word[1] == 'VBD':\n VDB_set = word[0]\n for word in tagset:\n if word[1] == 'WP':\n WP_set = word[0]\n if 'neighborhood' in VDB_set and 'what' in WP_set:\n if w.get('NNP', [None])[0]: \n r_name = w.get('NNP', [None])[0]\n else :\n return resp\n\n r_name = w.get('NNP', [None])[0] \n resp['restaurant'] = r_name\n resp['type'] = 'name-zone'\n return resp\n\n # matches \"how expensive it is\" and \"is it expensive\"\n if 'expensive' in w.get('JJ', ()):\n if w.get('NNP', [None])[0]: \n r_name = w.get('NNP', [None])[0]\n else :\n return resp\n\n r_name = w.get('NNP', [None])[0] \n resp['restaurant'] = r_name\n resp['type'] = 'name-price'\n return resp\n\n if 'between' in w.get('IN', ()) \\\n or 'price' in NN_set:\n price_range = w.get('CD', ())\n\n # price between a and b\n # require at least 2 numerals\n if len(price_range) >= 2:\n resp['min'] = min(map(int, price_range))\n resp['max'] = max(map(int, price_range))\n resp['type'] = 'list-price-range'\n return resp\n\n # price of exactly a\n if len(price_range) > 0:\n price_range = w.get('CD', ())\n resp['price'] = min(price_range)\n resp['type'] = 'list-price-single'\n return resp\n\n\n # need to merge NN and JJ for this step\n w['NNJJ'] = NN_set | set(w.get('JJ', []))\n meal = constants.MEALS_SET & w['NNJJ']\n if meal:\n resp['type'] = 'list-meal-single'\n resp['meal'] = meal.copy().pop()\n return resp\n\n # matches a quality list\n if 'quality' in NN_set and \\\n (constants.QUALITIES & w['NNJJ']) and \\\n (set(['food', 'service']) & w['NNJJ']):\n resp['degree'] = (constants.QUALITIES \\\n & w['NNJJ']).pop()\n resp['type'] = 'list-quality-' + \\\n (set(['food', 'service']) & w['NNJJ']).pop()\n return resp\n\n # matches a phone number request\n if NN_set & constants.PHONE_KEYWORDS:\n r_name = w.get('NNP', [None])[0] or \\\n w['NN'][-1]\n\n resp['restaurant'] = r_name\n resp['type'] = 'name-phone'\n return resp\n\n # matches a single meal request\n if NN_set & constants.MEALS_SET:\n r_name = w.get('NNP', [None])[0] or \\\n w['NN'][-1]\n\n resp['restaurant'] = r_name\n resp['type'] = 'name-meal'\n resp['meal'] = word.lower()\n return resp\n\n # matches a request for an address\n if 'address' in NN_set:\n r_name = w.get('NNP', [None])[0] or \\\n w['NN'][-1]\n resp['restaurant'] = r_name\n resp['type'] = 'name-location'\n return resp\n\n # matches a restaurant in neighborhood\n if 'in' in w.get('IN', []) and \\\n NN_set & constants.NAME_KEYWORDS:\n r_name = w.get('NNP', [None])[0]\n if not r_name:\n for kw in reversed(w['NN']):\n if kw not in constants.NAME_KEYWORDS:\n r_name = kw\n break\n if r_name:\n resp['type'] = 'random-city'\n resp['city'] = string.capitalize(r_name)\n return resp\n\n # matches a request for a cuisine type\n if NN_set & constants.NAME_KEYWORDS:\n r_name = w.get('NNP', [None])[0]\n if not r_name:\n for kw in reversed(w['NN']):\n if kw not in constants.NAME_KEYWORDS:\n r_name = kw\n break\n if r_name:\n resp['type'] = 'random-cuisine'\n resp['cuisine'] = string.capitalize(r_name)\n return resp\n\n # merge all numerals together for list-mode\n w['CDLS'] = set(w.get('CD', []) + w.get('LS', []))\n if w['CDLS']:\n w_copy = w['CDLS'].copy()\n while w_copy:\n try:\n resp['listitem'] = int(w_copy.pop())\n resp['type'] = 'single-listitem'\n return resp\n except:\n pass\n\n # distance / how far\n if ('far' in w.get('RB', [])\n and 'how' in w.get('WRB', [])\n ) or ('distance' in NN_set):\n r = w.get('NNP', [None])[0]\n if r:\n resp['type'] = 'name-distance'\n resp['restaurant'] = string.capitalize(r)\n return resp\n\n if constants.DEBUG:\n log.debug(resp)\n return resp", "def ner_spacy(filepath):\n\n\n out = \"\"\n\n with codecs.open(filepath,'r','utf-8') as current_file:\n\n text = current_file.readlines()\n\n with codecs.open(filepath+\".ner\",'w','utf-8') as outfile:\n\n for line in text:\n doc = nlp(line.rstrip())\n for word in doc:\n if word.ent_type_ != u\"\":\n outfile.write(word.text+\"|\"+word.ent_type_+' ')\n else:\n outfile.write(word.text+' ')\n outfile.write('\\n')", "def get_nouns(self):\n word_punct_token = WordPunctTokenizer().tokenize(self.sentence_string)\n\n clean_tokens = []\n for token in word_punct_token:\n token = token.lower()\n\n # remove any value that are not alphabetical\n new_token = re.sub(r\"[^a-zA-Z]+\", \"\", token)\n\n # remove empty value and single character value\n if new_token != \"\" and len(new_token) >= 2:\n vowels = len([v for v in new_token if v in \"aeiou\"])\n if vowels != 0: # remove line that only contains consonants\n clean_tokens.append(new_token)\n\n noun_types = [\"NN\", \"NNS\", \"NNP\", \"NNPS\", \"N\"]\n is_noun = lambda pos: pos in noun_types\n nouns = [word for (word, pos) in nltk.pos_tag(clean_tokens) if is_noun(pos)]\n\n if nouns:\n return nouns\n else:\n raise InvalidSentenceError(self.sentence_string)", "def parse(text):\n parts = text.split(' ')\n noun = Noun(parts[0], int(parts[1]))\n\n parts = parts[2:]\n while len(parts) > 0:\n noun.add_adjectives(Word(parts[0], int(parts[1])))\n parts = parts[2:]\n return noun", "def get_nps(self):\n\n # determine all leaf ids in the parse tree which refer to a noun\n nouns = []\n for node_id in self.parsetree.nodes():\n node = self.parsetree.node[node_id]\n if not node['is_leaf']:\n continue\n leaf_idx = node['left_leaf_idx']\n if leaf_idx >= len(self.tokens):\n continue\n self.words[leaf_idx] == node['label']\n is_noun = self.tokens[leaf_idx].POS.cdata[0] == 'N'\n if is_noun:\n nouns.append(node_id)\n\n NPs = set()\n for noun in nouns:\n NPs.add(self.get_np_for_idx(noun))\n return NPs", "def get_extras(self, text=None):\n if text is None:\n text = self.nltk_text(self.text)\n # Tag parts of speech\n tagged = nltk.pos_tag(text)\n # Try for composed NNP / NNPS\n is_proper_noun = False\n text = []\n proper_noun = \"\"\n for (word, tag) in tagged:\n if not is_proper_noun and (tag == 'NNP' or tag == 'NNPS'):\n # Start building a proper noun\n proper_noun = word\n # Set it true\n is_proper_noun = True\n # Add it to annotations anyway\n text.append(word)\n elif tag == 'NNP' or tag == 'NNPS':\n # Previous was proper noun. So it may be combined\n proper_noun += \" \" + word\n # Add the single word to annotations anyway\n text.append(word)\n elif is_proper_noun and tag == 'IN':\n # Add what we have by now to the text\n text.append(proper_noun)\n # Previous was proper noun. So it may be composed\n proper_noun += \" \" + word\n elif is_proper_noun:\n # Add what we have by now to the text\n text.append(proper_noun)\n # Finished with proper noun, so set it false\n is_proper_noun = False\n # Remove duplicates\n seen = {}\n result = []\n for w in text:\n if w in seen:\n continue\n seen[w] = 1\n result.append(w)\n # Eliminate common\n result = [w for w in result if w.lower() not in self.common_words and\n w.lower() not in stopwords.words('english')]\n return result", "def lookup_pronunciations_for_phrase(words: Sequence[Text]) -> Sequence[Phrase]:\n return EnglishUtils.all_possible_phrases_for(words)", "def searchphrases(query):\n query_nostopwords = removestopwords(query)\n query_lemmatized = lemmatize(query_nostopwords) #look like\n phraseids = []\n ngramids=[]\n words=query_lemmatized.split()\n query_ngram = \"select id from ngrams where lower(lemmangrams) like lower('%{}%')\".format(query_lemmatized)+\" or lower(lemmangrams) like lower('%{}%')\".format(words[0])\n for word in words[1:]:\n query_ngram=query_ngram+\" or lower(lemmangrams) like lower('%{}%')\".format(word)\n con = it.engine.execute(query_ngram)\n rows_phrase = con.fetchall()\n if rows_phrase:\n ngramids = list(set([str(i[0]) for i in rows_phrase]))\n phraseids.extend(ngramids)\n phraseids = list(set(phraseids))\n results=categorize(phraseids)\n return results", "def verifyProperNounAtSentenceStart(idx, tagged_term, tagged_terms, lexicon):\n term, tag, norm = tagged_term\n if (tag in ('NNP', 'NNPS') and\n (idx == 0 or tagged_terms[idx-1][1] == '.')):\n lower_term = term.lower()\n lower_tag = lexicon.get(lower_term)\n if lower_tag in ('NN', 'NNS'):\n tagged_term[0] = tagged_term[2] = lower_term\n tagged_term[1] = lower_tag", "def get_nouns(lemmas_tags):\r\n nouns = []\r\n for lemma in lemmas_tags:\r\n \"\"\"si la etiqueta es None porque no tiene lemma o es un sustantivo\"\"\"\r\n if lemma[1] == None or lemma[1][0] == 'n':\r\n \"\"\"se agrega solamente el lemma\"\"\"\r\n nouns.append(lemma[0])\r\n return nouns", "def spacyfy(self, *args):\n if \"raw\" in args:\n return nlp(self.raw_text)\n if \"clean\" in args:\n return nlp(self.filter(*args))", "def get_nnp_ngrams(original_text, highlight=4, minsize=0):\n minsize = minsize-1\n if minsize<0:\n minsize = 0 \n \n tokens = nltk.wordpunct_tokenize(original_text)\n tagged = nltk.word_tokenize(original_text)\n tagged = nltk.pos_tag(tokens)\n #for word in tagged:\n # print word\n doc_length = len(tokens)\n counter = 0\n counter2 = 0\n if highlight==0:\n concated_test = doc_length # This is set to doc_length but could be anything recommend 3.\n else:\n concated_test = highlight\n list_of_NNPs = []\n while counter < (doc_length-1):\n while counter2 < concated_test:\n counter2 = counter2+1\n counter3 = 0\n #print '--------------------'\n temp_array = []\n all_nnp = True\n while counter3 < counter2:\n if counter < (doc_length-counter3):\n #print tokens[counter+counter3],tagged[counter+counter3][1]\n temp_array.append(tokens[counter+counter3])\n if tagged[counter+counter3][1] != 'NNP':\n all_nnp = False\n counter3 = counter3+1\n counter3 = 0\n if all_nnp == True:\n if(len(temp_array)>minsize):\n list_of_NNPs.append(temp_array)\n #print 'added to main array'\n #else:\n #print 'not all NNPs'\n counter2 = 0\n counter = counter+1\n #for ngram in list_of_NNPs:\n # print ngram\n import itertools\n list_of_NNPs.sort()\n unique_NNPs = list(list_of_NNPs for list_of_NNPs,_ in itertools.groupby(list_of_NNPs))\n return unique_NNPs", "def __get_relevant_words(sentence):\n nouns = None\n try:\n if sentence:\n tokens = nltk.word_tokenize(sentence)\n pos = nltk.pos_tag(tokens)\n nouns = [x[0] for x in pos if x[1].startswith('N') or x[1].startswith('F')]\n except Exception as e:\n nouns = None\n return ' '.join(nouns) if nouns else None", "def query_preprocess(input_pack: DataPack):\n sentence = input_pack.get_single(Sentence)\n\n relations = defaultdict(dict)\n text_mention_mapping = {}\n\n # get all srl relations\n for link in input_pack.get(PredicateLink, sentence):\n verb = link.get_parent()\n verb_text = verb.text\n argument = link.get_child()\n argument_text = argument.text\n\n text_mention_mapping[verb_text] = verb\n text_mention_mapping[argument_text] = argument\n relations[verb_text][link.arg_type] = argument_text\n\n arg0, arg1, predicate = None, None, None\n for verb_text, entity in relations.items():\n arg0, arg1, predicate = collect_mentions(text_mention_mapping, entity, verb_text)\n if not arg0 and not arg1:\n continue\n else:\n break\n\n if not arg0 and not arg1:\n raise Exception('AllenNLP SRL cannot extract the two arguments or the '\n 'predicate in your query, please check our examples '\n 'or rephrase your question')\n\n verb_lemma, is_answer_arg0 = None, None\n\n # check pos tag and lemma for tokens\n for j, token in enumerate(input_pack.get(entry_type=Token,\n range_annotation=sentence,\n components=['forte_wrapper.nltk.nltk_processors.NLTKWordTokenizer']\n )):\n # find WH words\n if token.pos in {\"WP\", \"WP$\", \"WRB\", \"WDT\"}:\n if arg0.begin <= token.begin and arg0.end >= token.end:\n is_answer_arg0 = True\n elif arg1.begin <= token.begin and arg1.end >= token.end:\n is_answer_arg0 = False\n\n # find verb lemma\n if token.text == predicate.text:\n verb_lemma = token.lemma\n\n return sentence, arg0.text if arg0 else '', arg1.text if arg1 else '', \\\n predicate.text, verb_lemma, is_answer_arg0", "def parse(string):\n doc = nlp(string)\n return [str(n) for n in doc.noun_chunks]", "def translate_leet(phrase):", "def __isNoun__(self, word):\n self.nouns = ('door', 'bear', 'princess', 'cabinet')\n for noun in self.nouns:\n if noun == word:\n return ('noun', word), True\n return None, False", "def test_issue7306(en_lookup_nlp):\n doc = Doc(en_lookup_nlp.vocab, words=[\"singing\"])\n lemmatizer = en_lookup_nlp.get_pipe(\"lemmatizer\")\n doc = lemmatizer(doc)\n assert doc[0].lemma_ == \"sing\"" ]
[ "0.6937712", "0.66080606", "0.65842813", "0.6570473", "0.6529023", "0.65285176", "0.65189445", "0.6494391", "0.64555836", "0.6421015", "0.6328846", "0.63044935", "0.62987053", "0.62668496", "0.62601346", "0.624385", "0.6186396", "0.6117331", "0.6102974", "0.60977507", "0.60909545", "0.6080659", "0.60668945", "0.6008747", "0.6002194", "0.59950316", "0.59944344", "0.59741026", "0.595184", "0.5935737" ]
0.7032321
0
Petty prints using custom pprint class, formatting unicode characters
def pprint(self): PrettyPrintUnicode().pprint(self.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pprint(self):\n return pformat(repr(self))", "def my_pprint(obj, intend = 0):\n if isinstance(obj, dict):\n for key, value in obj.items():\n print(intend*\" \"+str(key)+\" : \")\n my_pprint(value, intend = intend + 4)\n print()\n elif isinstance(obj, list):\n for value in obj:\n my_pprint(value, intend = intend + 4)\n print()\n elif isinstance(obj, bytes):\n print(\"<binary data>\")\n \n else:\n try:\n print(intend*\" \"+str(obj))\n except UnicodeDecodeError:\n print(intend*\" \"\"<?>\")", "def pprint(self, parameter_s=''):\n ptformatter = self.shell.display_formatter.formatters['text/plain']\n ptformatter.pprint = bool(1 - ptformatter.pprint)\n print('Pretty printing has been turned',\n ['OFF','ON'][ptformatter.pprint])", "def pprint(tree):\n p = PrettyPrinter(indent=2)\n p.pprint(tree)", "def pprint(self):\n print(self.pprint_str())", "def pformat(object):\r\n return PrettyPrinter().pformat(object)", "def printpretty(self):\n print(self.string_rep())", "def _non_unicode_repr(objekt, context, maxlevels, level):\n repr_string, isreadable, isrecursive = pprint._safe_repr(objekt, context,\n maxlevels, level)\n if repr_string.startswith('u\"') or repr_string.startswith(\"u'\"):\n repr_string = repr_string[1:]\n return repr_string, isreadable, isrecursive", "def pprint_helper(self, angle, indent):\n # just here for defining the interface; work is done in subclasses\n pass", "def DumpPprint(data):\n #NOTE(g): Import is done here, instead of the top of the file, to not require this module if it is not used\n import pprint\n \n text = pprint.pformat(data)\n \n return text", "def pprint(object, stream=None):\r\n printer = PrettyPrinter(stream=stream)\r\n printer.pprint(object)", "def __pprint(object, stream=None, indent=1, width=80, depth=None):\n printer = PrettyPrinterExt(\n stream=stream, indent=indent, width=width, depth=depth)\n printer.pprint(object)", "def pprint(self):\n # just here for defining the interface; work is done in subclasses\n pass", "def pprint(self, data):\n self._assert(data)\n data = self._render(data) # make elements ascii\n fmats = self._fmats(data) # get array of padding formats)\n for row in data:\n print(fmats.format(*row))", "def pprint(*d):\n i = 0\n while i < len(d):\n print(pretty(d[i]))\n i += 1", "def pprint(*args, **kwargs):\n if PRINTING:\n print(*args, **kwargs)", "def p(value):\n pp.pprint(value)", "def pprint(x):\n if is_theano_object(x):\n return _gettheano().printing.pprint(x)\n else:\n return str(x)", "def pprint(obj):\n return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '))", "def pprint(obj):\n return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '))", "def test_unicode_parenthization():\n alpha = symbols('alpha')\n printer = SympyUnicodePrinter()\n printer.parenthesize(alpha, 0) == 'α'", "def pprint(self,indent=0,node=None):\n if node == None:\n node = self.root\n if node == None:\n print_indent(indent)\n print \"[empty tree]\"\n return\n if node.type == 'v':\n print_indent(indent)\n print node.value\n elif node.type == 's':\n for (val,c) in node.children.iteritems():\n print_indent(indent)\n print \"-\",self.keys[node.feature],\"=\",val,\":\"\n self.pprint(indent+1,c)\n elif node.type == 'i':\n print_indent(indent)\n print self.keys[node.feature],\"<=\",node.value,\":\"\n self.pprint(indent+1,node.children[0])\n print_indent(indent)\n print self.keys[node.feature],\">\",node.value,\":\"\n self.pprint(indent+1,node.children[1])", "def pprint(json_data):\n\n print(json.dumps(json_data, indent=4, separators=(' , ', ' : ')))", "def __repr__(self, indent=2):\n return pprint.pformat(self.to_dict(), indent=indent)", "def pypprint(*args, **kwargs): # type: ignore\n from typing import Iterable\n\n if len(args) != 1:\n print(*args, **kwargs)\n return\n x = args[0]\n if isinstance(x, dict):\n for k, v in x.items():\n print(f\"{k}:\", v, **kwargs)\n elif isinstance(x, Iterable) and not isinstance(x, str):\n for i in x:\n print(i, **kwargs)\n else:\n print(x, **kwargs)", "def pprint(self,obj):\n return(json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': ')))", "def pprint(self):\r\n for i in self.items():\r\n print '%s => %r'%i", "def vpprint(expr, **settings):\n\n pp = VectorPrettyPrinter(settings)\n\n # Note that this is copied from sympy.printing.pretty.pretty_print:\n\n # XXX: this is an ugly hack, but at least it works\n use_unicode = pp._settings['use_unicode']\n from sympy.printing.pretty.pretty_symbology import pretty_use_unicode\n uflag = pretty_use_unicode(use_unicode)\n\n try:\n return pp.doprint(expr)\n finally:\n pretty_use_unicode(uflag)", "def printPokemon():\n print(\" _ \")\n print(\" _ __ ___ | | _____ _ __ ___ ___ _ __ \")\n print(\" | '_ \\ / _ \\| |/ / _ \\ '_ ` _ \\ / _ \\| '_ \\ \")\n print(\" | |_) | (_) | < __/ | | | | | (_) | | | |\")\n print(\" | .__/ \\___/|_|\\_\\___|_| |_| |_|\\___/|_| |_|\")\n print(\" |_| \")", "def printerTweet(self, tweet):\r\n #print tweet \r\n try:\r\n text = json.loads(tweet)[\"text\"]\r\n print text.encode('cp850', errors='replace') + '\\n'\r\n except:\r\n print 'could not get text of tweet'" ]
[ "0.66020364", "0.6575941", "0.65425223", "0.6515464", "0.6514521", "0.6511368", "0.64873034", "0.64807445", "0.6480072", "0.63610697", "0.6347487", "0.62914324", "0.62842274", "0.62714124", "0.62701565", "0.6250059", "0.6238075", "0.6182242", "0.6147328", "0.6147328", "0.61447", "0.60709476", "0.6052007", "0.6050368", "0.60006386", "0.5999397", "0.5984259", "0.5983779", "0.59546655", "0.5938938" ]
0.7337591
0
Perform rankbased inverse normal transform to a numeric Pandas series
def inverse_rank_norm(values): #values = pd.Series([5, 7, 2, 1, 1]) quantiles = (values.rank()-0.5)/(len(values)) return ss.norm.ppf(quantiles)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inverse_normal_transform(M):\n R = stats.mstats.rankdata(M, axis=1) # ties are averaged\n if isinstance(M, pd.DataFrame):\n Q = pd.DataFrame(stats.norm.ppf(R/(M.shape[1]+1)), index=M.index, columns=M.columns)\n else:\n Q = stats.norm.ppf(R/(M.shape[1]+1))\n return Q", "def inverse_transform(self, df):\n try:\n df = df.astype(float)\n except Exception:\n raise ValueError(\"Data Cannot Be Converted to Numeric Float\")\n X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values\n if self.model != \"GLS\":\n X = X.reshape((-1, 1))\n if self.model in self.need_positive:\n temp = pd.DataFrame(\n self.trained_model.predict(X), index=df.index, columns=df.columns\n )\n df = df + self.trnd_trans.inverse_transform(temp)\n else:\n if self.model == \"GLS\" and df.shape[1] == 1:\n pred = self.trained_model.predict(X)\n pred = pred.reshape(-1, 1)\n df = df + pred\n else:\n df = df + self.trained_model.predict(X)\n # df = df.astype(float) + self.trained_model.predict(X)\n return df", "def inverse(series):\n\n result = 1 / series\n result.name = 'inv ({})'.format(series.name)\n\n return result", "def inverse_transform(self, df, trans_method: str = \"forecast\"):\n if self.on_inverse:\n df = df.round(decimals=self.decimals)\n if self.force_int:\n df = df.astype(int)\n return df", "def inverse_transform(self, df):\n try:\n df = df.astype(float)\n except Exception:\n raise ValueError(\"Data Cannot Be Converted to Numeric Float\")\n\n X = date_part(df.index, method=self.datepart_method)\n y = pd.DataFrame(self.model.predict(X))\n y.columns = df.columns\n y.index = df.index\n df = df + y\n return df", "def inverse_normal_transformation(x, c=3/8):\n r = scipy.stats.rankdata(x, \"average\")\n return scipy.stats.norm.ppf((r - c) / (len(x) - 2 * c + 1))", "def inverse_transform(self, df):\n return df", "def inverse_transform(self, df, trans_method: str = \"forecast\"):\n\n if self.discretization in [\n 'sklearn-quantile',\n 'sklearn-uniform',\n 'sklearn-kmeans',\n ]:\n df_index = df.index\n df_colnames = df.columns9\n df = df.clip(upper=self.bin_max, lower=self.bin_min, axis=1)\n df = df.astype(int).clip(lower=0, upper=(self.n_bins - 1))\n df = pd.DataFrame(self.kbins_discretizer.inverse_transform(df))\n df.index = df_index\n df.columns = df_colnames\n return df", "def rank_transform(X):\n return np.apply_along_axis(scipy.stats.rankdata, 0, X)", "def normalise_series(to_normalise: pd.Series) -> pd.Series:\n \n # return (to_normalise - to_normalise.mean()) / to_normalise.std() # 0 mean and unit standard deviation\n return to_normalise / to_normalise.std() # positive and unit standard deviation", "def inverse_transform(self, df, trans_method: str = \"forecast\"):\n df = (df + 1).replace([0], np.nan)\n df = df.fillna((df[df != 0]).abs().min()).fillna(0.1)\n\n # add last values, group by lag, cumprod\n if trans_method == 'original':\n df = pd.concat([self.first_values, df.tail(df.shape[0] - 1)], axis=0)\n return df.cumprod()\n else:\n df_len = df.shape[0]\n df = pd.concat([self.last_values, df], axis=0)\n return df.cumprod().tail(df_len)", "def inverse_transform(self, df):\n invtrans_df = df.copy()\n\n invtrans_df = invtrans_df.where(df <= 0, self.upper_mean * df, axis=1)\n invtrans_df = invtrans_df.where(\n df >= 0, (self.lower_mean * df).abs() * -1, axis=1\n )\n invtrans_df = invtrans_df + self.df_med\n invtrans_df = invtrans_df.where(df != 0, self.df_med, axis=1)\n return invtrans_df", "def normalize_series(series):\n return (series - series.mean()) / (series.max() - series.min())", "def normalize_rank(x, axis=-1):\n x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)\n return x", "def normalize_rank(x, axis=-1):\n x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)\n return x", "def inverse_transform(self, df, trans_method: str = \"forecast\"):\n return df", "def inverse_transform(self, df, trans_method: str = \"forecast\"):\n return df", "def inverse_transform(self, df, trans_method: str = \"forecast\"):\n return df", "def normalize_rank(self, x, axis=-1):\n\t\tx = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)\n\t\treturn x", "def inverse_transform(self, df):\n try:\n df = df.astype(float)\n except Exception:\n raise ValueError(\"Data Cannot Be Converted to Numeric Float\")\n X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values\n\n sin_df = pd.DataFrame()\n # make this faster\n for index, row in self.sin_params.iterrows():\n yy = pd.DataFrame(\n row['amp'] * np.sin(row['omega'] * X + row['phase']) + row['offset'],\n columns=[index],\n )\n sin_df = pd.concat([sin_df, yy], axis=1)\n df_index = df.index\n df = df.astype(float).reset_index(drop=True) + sin_df.reset_index(drop=True)\n df.index = df_index\n return df", "def inverse_transform(self, df):\n if self.log:\n df = pd.DataFrame(np.exp(df))\n if self.squared:\n df = df ** 0.5\n df = df - self.shift_amount\n return df", "def normalize(self, df):\n return df / df.ix[0, :]", "def df_normalizer(df):\n df = tf.keras.utils.normalize(df, axis=1)\n\n return df", "def inverse_transform(self, df, trans_method: str = \"forecast\"):\n df = df * self.center\n return df", "def invcov_index(indicators):\n df = indicators.copy()\n df = (df-df.mean())/df.std()\n I = np.ones(df.shape[1])\n E = inv(df.cov())\n s1 = I.dot(E).dot(I.T)\n s2 = I.dot(E).dot(df.T)\n try:\n int(s1)\n S = s2/s1\n except TypeError: \n S = inv(s1).dot(s2)\n \n S = pd.Series(S,index=indicators.index)\n\n return S", "def normalize_data(df):\r\n return df/df.ix[0,:]", "def normalize(self, df):\n return (df - df.mean()) / (df.max() - df.min())", "def to_real_series(self, data: pd.Series) -> pd.Series:\n ...", "def inverse_transform(self, X):\n X = X.copy() # type: pd.DataFrame\n if self.with_std:\n X.loc[:, self._feature_mask_] *= self.scale_\n if self.with_mean:\n X.loc[:, self._feature_mask_] += self.mean_\n return X", "def test_transform_inverse_transform(example_tsds: TSDataset) -> None:\n trend_transform = TrendTransform(in_column=\"target\", detrend_model=LinearRegression(), model=\"rbf\")\n example_tsds.fit_transform([trend_transform])\n original = example_tsds.df.copy()\n example_tsds.inverse_transform()\n assert (example_tsds.df == original).all().all()" ]
[ "0.6803572", "0.6684169", "0.6475664", "0.6429937", "0.64156497", "0.6389941", "0.63249725", "0.6258048", "0.6213609", "0.6208294", "0.61738485", "0.6168609", "0.60991716", "0.6039526", "0.6039526", "0.6025522", "0.6025522", "0.6025522", "0.59954333", "0.5985897", "0.59778994", "0.596451", "0.5915894", "0.5904281", "0.5898881", "0.5846001", "0.5811605", "0.5804811", "0.57865185", "0.5770227" ]
0.7054932
0
Log `engine.state.metrics` with given `engine` and `tag`.
def log_metrics(engine: Engine, tag: str) -> None: metrics_format = "{0} [{1}/{2}]: {3}".format( tag, engine.state.epoch, engine.state.iteration, engine.state.metrics ) engine.logger.info(metrics_format)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_metrics(metrics, step=None):\n mlflow.log_metrics(metrics, step=step)", "def log_metric(self, name, val, step):\n raise NotImplementedError", "def log_metric(self, name, val):\n raise NotImplementedError", "def log_metrics(self, metrics, step=None, epoch=None, prefix=None):\n self.experiment.log_metrics(metrics, step=step, epoch=epoch, prefix=prefix)", "def log_metric(key, value, step=None):\n mlflow.log_metric(key, value, step=step)", "def log_metric(name, values, tags={}):\n value_list = []\n for key in sorted(values.keys()):\n value = values[key]\n value_list.append(f\"{key}:{value:7.3f}\")\n values = \", \".join(value_list)\n tag_list = []\n for key, tag in tags.items():\n tag_list.append(f\"{key}:{tag}\")\n tags = \", \".join(tag_list)\n print(\"{name:30s} - {values} ({tags})\".format(name=name, values=values, tags=tags))", "def log_tb(tag, val, use_wandb=True):\n if use_wandb:\n if util.get_global_rank() == 0:\n wandb.log({tag: val}, step=int(g.token_count))\n else:\n g.event_writer.add_scalar(tag, val, g.token_count)", "def log_step(\n metric_dict={},\n mode='train',\n writer=None,\n global_step=0,\n elapsed_eta=None,\n training_speed=None\n):\n log_msg = '[{mode}] step: {step}'\n log_msg = log_msg.format(\n mode=mode,\n step=global_step,\n )\n for key, value in metric_dict.items():\n log_msg += ' - {}: {}'.format(key, round(value, 4))\n\n # Write to tensorboard\n if writer is not None:\n for key, value in metric_dict.items():\n writer.add_scalar(key, value, global_step=global_step)\n\n if elapsed_eta is not None:\n log_msg += ' - elapsed: {} - eta: {}'.format(\n datetime.timedelta(seconds=int(elapsed_eta[0])),\n datetime.timedelta(seconds=int(elapsed_eta[1]))\n )\n if writer is not None:\n writer.add_scalar('eta', elapsed_eta[1], global_step=global_step)\n\n if training_speed is not None:\n log_msg += ' - step/sec: {:.4f}'.format(training_speed)\n if writer is not None:\n writer.add_scalar(\n 'step/sec', training_speed, global_step=global_step)\n\n logger.info(log_msg)", "def logmem(cls, tag):\n\n cls.logger.info(\"----%s----\", str(tag))\n mem = psutil.virtual_memory()\n cls.logger.info(\"total:%s M\", mem.total / 1024 / 1024)\n cls.logger.info(\"available:%s M\", mem.available / 1024 / 1024)\n cls.logger.info(\"used:%s M\", mem.used / 1024 / 1024)\n cls.logger.info(\"free:%s M\", mem.free / 1024 / 1024)\n cls.logger.info(\"active:%s M\", mem.active / 1024 / 1024)\n cls.logger.info(\"inactive:%s M\", mem.inactive / 1024 / 1024)\n cls.logger.info(\"buffers:%s M\", mem.buffers / 1024 / 1024)\n cls.logger.info(\"cached:%s M\", mem.cached / 1024 / 1024)\n cls.logger.info(\"shared:%s M\", mem.shared / 1024 / 1024)", "def log_scalars(tag: str,\n data: Dict[str, float],\n global_step: int,\n walltime: Optional[float] = None,\n logger: Optional[logging.Logger] = None) -> None:\n logger = logger or _get_context_logger()\n logger.info(ScalarsT(main_tag=tag,\n tag_scalar_dict=data,\n global_step=global_step,\n walltime=walltime or time.time()))", "def test_add_tag_to_derived_metric(self):\n pass", "def log_runtime(label, mean_time, std, instances):\n pass", "def log_metric(self, name: str, value):\n self.metrics[name] = value\n\n self._sync_log_event()", "def post_save_metrics(sender, **kwargs):\r\n action = 'created' if kwargs.pop('created', False) else 'updated'\r\n\r\n tags = _database_tags(action, sender, kwargs)\r\n dog_stats_api.increment('edxapp.db.model', tags=tags)", "def log_metrics(self, metrics: dict):\n self.metrics.update(metrics)\n\n self._sync_log_event()", "def log_metric(data_category, key, value):\n # always, just print in logs\n log(logging.INFO, data_category, \"AML Metric({}={})\".format(key, value))\n if data_category == DataCategory.ONLY_PUBLIC_DATA:\n # if public, ask azureml to record (if azureml attached)\n run = AmlRunWrapper()\n run.setup(attach=True)\n run.log(key, value)\n run.flush()", "def log(self, metric_name: str, value: float) -> None:\n if metric_name in self.metrics:\n self.metrics[metric_name].append(value)\n else:\n self.metrics[metric_name] = [value]", "def post_init_metrics(sender, **kwargs):\r\n tags = _database_tags('initialized', sender, kwargs)\r\n\r\n dog_stats_api.increment('edxapp.db.model', tags=tags)", "def log_image(tag: str,\n data: str,\n global_step: int,\n walltime: Optional[float] = None,\n logger: Optional[logging.Logger] = None) -> None:\n logger = logger or _get_context_logger()\n logger.info(ImageT(tag=tag, img_tensor=data, global_step=global_step,\n walltime=walltime or time.time()))", "def log_scalar(self, tag, value, step):\n\n summary = tf.Summary(\n value=[tf.Summary.Value(tag=tag, simple_value=value)])\n self.writer.add_summary(summary, step)\n self.writer.flush()", "def on_log(self):\n monitors = self.monitors\n if self.monitors is None:\n monitors = self.trainer.metrics.keys()\n\n\n hparams = self.hparams\n if self.hparams is None:\n hparams = self.trainer.hparams.keys()\n\n metrics = {name: format_metric(self.trainer.metrics[name])\n for name in monitors\n if name in self.trainer.metrics}\n hparams = {name: format_metric(self.trainer.hparams[name])\n for name in hparams\n if name in self.trainer.hparams}\n\n\n step_bar = self.step_bars[-1]\n step_bar.set_description(\"Epoch {}\".format(self.trainer.epoch+1))\n step_bar.set_postfix(**metrics, **hparams)\n step_bar.update(self.trainer.steps_trained - self.last_step)\n self.last_step = self.trainer.steps_trained", "def _log_metrics(self, logs, prefix, step):\r\n if logs is None:\r\n logs = {}\r\n\r\n # Group metrics by the name of their associated file writer. Values\r\n # are lists of metrics, as (name, scalar_value) pairs.\r\n logs_by_writer = {\r\n self._train_run_name: [],\r\n self._validation_run_name: [],\r\n }\r\n validation_prefix = 'val_'\r\n for (name, value) in logs.items():\r\n if name in ('batch', 'size', 'num_steps'):\r\n # Scrub non-metric items.\r\n continue\r\n if name.startswith(validation_prefix):\r\n name = name[len(validation_prefix):]\r\n writer_name = self._validation_run_name\r\n else:\r\n writer_name = self._train_run_name\r\n name = prefix + name # assign batch or epoch prefix\r\n logs_by_writer[writer_name].append((name, value))\r\n\r\n with context.eager_mode():\r\n with summary_ops_v2.always_record_summaries():\r\n for writer_name in logs_by_writer:\r\n these_logs = logs_by_writer[writer_name]\r\n if not these_logs:\r\n # Don't create a \"validation\" events file if we don't\r\n # actually have any validation data.\r\n continue\r\n writer = self._get_writer(writer_name)\r\n with writer.as_default():\r\n for (name, value) in these_logs:\r\n summary_ops_v2.scalar(name, value, step=step)", "def __init__(self, tag):\n self._tag = tag\n caller = logging.get_absl_logger().findCaller()\n # token is a string of filename:lineno:tag\n token = caller[0] + ':' + str(caller[1]) + ':' + tag\n if token not in _contexts:\n _contexts[token] = {'time': 0., 'n': 0}\n self._counter = _contexts[token]", "def log(tag: str,\n data: ValueT,\n global_step: int,\n walltime: Optional[float] = None) -> None:\n fn: Callable[..., Any]\n if isinstance(data, (float, int)) or isinstance(data, torch.Tensor):\n if isinstance(data, torch.Tensor):\n data = data.item()\n fn = log_scalar\n elif isinstance(data, dict):\n fn = log_scalars\n elif isinstance(data, str):\n fn = log_text\n else:\n _get_context_logger().info(f\"{tag} #{global_step}: {data} (not logged to tensorboard)\")\n return\n # Ignore type for complicated branching, fn could have a number of\n # different signatures\n fn(tag, data, global_step) # type: ignore", "def _log_performance_metrics(\n self, task: \"tasks.ClassyTask\", local_variables: Dict[str, Any]\n ) -> None:\n phase_type = task.phase_type\n batches = len(task.losses)\n\n if self.start_time is None:\n logging.warning(\"start_time not initialized\")\n else:\n # Average batch time calculation\n total_batch_time = time.time() - self.start_time\n average_batch_time = total_batch_time / batches\n logging.info(\n \"Average %s batch time (ms) for %d batches: %d\"\n % (phase_type, batches, 1000.0 * average_batch_time)\n )\n\n # Train step time breakdown\n if local_variables.get(\"perf_stats\") is None:\n logging.warning('\"perf_stats\" not set in local_variables')\n elif task.train:\n logging.info(\n \"Train step time breakdown (rank {}):\\n{}\".format(\n get_rank(), local_variables[\"perf_stats\"].report_str()\n )\n )", "def _log_metrics(\n self,\n train_writer: SummaryWriter,\n val_writer: SummaryWriter,\n timestamped_save_dir: Path,\n train_metrics: _Metrics,\n step: int,\n ) -> None:\n if len(self.val_loader) > 0:\n val_metrics, val_img, val_gt, val_pred = self._get_val_metrics()\n if val_metrics.accuracy > self.best_acc:\n self.best_acc = val_metrics.accuracy\n self.save_weights(timestamped_save_dir, True)\n\n for key in vars(train_metrics):\n if key == \"class_loss\":\n tag = \"losses/classification\"\n elif key in {\"shape_loss\", \"total_loss\"}:\n continue\n else:\n tag = f\"metrics/{key}\"\n\n train_writer.add_scalar(tag, getattr(train_metrics, key), step)\n if len(self.val_loader) > 0:\n val_writer.add_scalar(tag, getattr(val_metrics, key), step)\n\n reg_loss = self._get_l2_reg()\n train_writer.add_scalar(\"losses/regularization\", reg_loss, step)\n train_writer.add_scalar(\"losses/shape\", train_metrics.shape_loss, step)\n train_writer.add_scalar(\n \"losses/total\",\n train_metrics.total_loss + self.config.weight_decay * reg_loss,\n step,\n )\n\n # Log a histogram for each tensor parameter in the model, to\n # see if a parameter is training stably or not\n for name, value in self.model.state_dict().items():\n train_writer.add_histogram(name, value, step)\n\n # Log the validation images for easy visualization\n if len(self.val_loader) > 0:\n val_writer.add_images(\"input\", val_img, step)\n val_writer.add_images(\"ground_truth\", val_gt, step)\n val_writer.add_images(\"prediction\", val_pred, step)", "def log_scalar(tag: str,\n data: float,\n global_step: int,\n walltime: Optional[float] = None,\n logger: Optional[logging.Logger] = None) -> None:\n logger = logger or _get_context_logger()\n logger.info(ScalarT(tag=tag, scalar_value=data, global_step=global_step,\n walltime=walltime or time.time()))", "def tag_metric(request, tag_id, metric_id, error='', message=''):\n try:\n tag = Tag.objects.get(id=tag_id)\n except:\n error += 'Couldn\\'t retrieve tag ' + tag_id + '.'\n try:\n metric = Metric.objects.get(id=metric_id)\n except:\n error += 'Couldn\\'t retrieve metric ' + metric_id + '.'\n\n if tag in metric.tags.all():\n error += 'This metric has already been tagged.'\n\n if not error:\n try:\n metric.tags.add(tag)\n message += 'Tagged metric ' + str(metric.id) + ' with ' + tag.name + '.'\n except:\n error += 'Couldn\\'t tag metric.'\n return index(request=request, error=error, message=message, metric_id=metric_id, tag_id=tag_id)", "def lambda_metric(metric_name, value, timestamp=None, tags=None):\n tags = _tag_dd_lambda_layer(tags)\n if os.environ.get(\"DD_FLUSH_TO_LOG\", \"\").lower() == \"true\":\n logger.debug(\"Sending metric %s to Datadog via log forwarder\", metric_name)\n print(\n json.dumps(\n {\n \"m\": metric_name,\n \"v\": value,\n \"e\": timestamp or int(time.time()),\n \"t\": tags,\n }\n )\n )\n else:\n logger.debug(\"Sending metric %s to Datadog via lambda layer\", metric_name)\n lambda_stats.distribution(metric_name, value, timestamp=timestamp, tags=tags)", "def report_metrics(prefix, metrics):\n series = []\n\n now = time.time()\n for key, value in metrics.iteritems():\n metric = '{prefix}.{key}'.format(prefix=prefix, key=key)\n point = [(now, value)]\n series.append({'metric':metric, 'points':point})\n\n if len(series) > 0:\n print u\"Sending {}\".format(series)\n dog_http_api.metrics(series)" ]
[ "0.6240059", "0.61237293", "0.604995", "0.6007885", "0.5990969", "0.58237666", "0.5668803", "0.5643581", "0.5567732", "0.5474371", "0.53737664", "0.5357524", "0.5344699", "0.5297509", "0.52917963", "0.52677846", "0.52462256", "0.52296853", "0.520308", "0.5174085", "0.5163968", "0.5161253", "0.5153624", "0.511461", "0.50881004", "0.5074278", "0.506438", "0.5032844", "0.5014642", "0.50072974" ]
0.86053985
0
Setup logger with `ignite.utils.setup_logger()`.
def setup_logging(config: Any) -> Logger: green = "\033[32m" reset = "\033[0m" logger = setup_logger( name=f"{green}[ignite]{reset}", level=logging.DEBUG if config.debug else logging.INFO, format="%(name)s: %(message)s", filepath=config.output_dir / "training-info.log", ) return logger
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_logging():\n log.setup('keystone')", "def setup_log(self):\n self.logger, _ = get_logger(\"datatransform\")", "def setup_logger(self):\n setup_logger(logger, 'mayavi.log', mode=self.log_mode)", "def setup_logger():\n root = logging.getLogger()\n root.setLevel(LOGGING_LEVEL)\n formatter = logging.Formatter('%(asctime)s - %(message)s')\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(LOGGING_LEVEL)\n ch.setFormatter(formatter)\n root.addHandler(ch)", "def setup_logger():\n LOG_DIR = unicode( os.environ.get(u'usep_gh__LOG_DIR') )\n LOG_LEVEL = unicode( os.environ.get(u'usep_gh__LOG_LEVEL') )\n filename = u'%s/usep_gh_handler.log' % LOG_DIR\n formatter = logging.Formatter( u'[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s' )\n logger = logging.getLogger( __name__ )\n # logger = logging.getLogger( u'usep_gh_handler' )\n level_dict = { u'debug': logging.DEBUG, u'info':logging.INFO }\n logger.setLevel( level_dict[LOG_LEVEL] )\n file_handler = logging.FileHandler( filename )\n file_handler.setFormatter( formatter )\n logger.addHandler( file_handler )\n logger.debug( u'in utils.log_helper.setup_logger(); log initialized at %s' % unicode(datetime.datetime.now()) )\n return logger", "def _setup_logger():\n root = logging.getLogger()\n root.setLevel(logging.INFO)\n\n log_handle = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(\n \"[%(levelname)s] (%(asctime)s) - %(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\"\n )\n log_handle.setFormatter(formatter)\n root.addHandler(log_handle)\n\n logging.info(\"Initializing snakes\")", "def setup_logging():\n product_name = \"plasma\"\n logging.setup(cfg.CONF, product_name)\n LOG.info(\"Logging enabled!\")\n LOG.debug(\"command line: %s\", \" \".join(sys.argv))", "def setup_class(cls):\n if os.path.exists(logfilename):\n os.remove(logfilename)\n log = logutils.get_logger(__name__)\n log.root.handlers = []\n logutils.config(mode='standard', console_lvl='stdinfo',\n file_name=logfilename)", "def setup_logger() -> None:\n LOGGER.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(levelname)s \\t|%(asctime)s \\t| %(name)s \\t| %(message)s')\n\n if not check_if_dir_exists(FILENAMES.LOG_DIR):\n os.mkdir(to_abs_file_path(FILENAMES.LOG_DIR))\n\n file_handler: logging.FileHandler = logging.FileHandler(to_abs_file_path(FILENAMES.LOG), mode='w')\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n\n console_handler: logging.StreamHandler = logging.StreamHandler()\n console_handler.setLevel(logging.WARNING)\n\n LOGGER.addHandler(file_handler)\n LOGGER.addHandler(console_handler)\n LOGGER.info('Filehandler and Console_Handler were born, let\\'s start logging')", "def __setup_logging(self):\n\n loglevel = logging.INFO\n if self.config[\"verbose\"]:\n loglevel = logging.DEBUG\n\n FORMAT = '[%(asctime)s %(filename)s:%(lineno)s %(levelname)s] %(message)s'\n if self.config[\"log\"]:\n logging.basicConfig(format=FORMAT, level=loglevel, filename=self.config[\"log\"])\n else:\n logging.basicConfig(format=FORMAT, level=loglevel)", "def setUp(self):\n self.logger = logging.getLogger(\"dbs test logger\")", "def setup_logging():\n lvl = os.getenv(\"LOG_LEVEL\")\n path = os.getenv(\"LOG_PATH\")\n\n logger = get_logger()\n logger.setLevel(lvl)\n\n filehandler = logging.FileHandler(path)\n filehandler.setLevel(lvl)\n filehandler.setFormatter(logging.Formatter(\n \"[%(asctime)s] %(levelname)s: %(message)s\",\n datefmt=\"%Y-%d-%m %H:%M:%S\"\n ))\n\n streamhandler = logging.StreamHandler()\n streamhandler.setLevel(lvl)\n streamhandler.setFormatter(logging.Formatter(\"%(message)s\"))\n\n logger.addHandler(filehandler)\n logger.addHandler(streamhandler)", "def setup_logger():\n now = datetime.now()\n logging.basicConfig(level=logging.DEBUG)\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n logging.getLogger(\"urllib3\").setLevel(logging.WARNING)\n logging.info(f\"Script run on: {now}\")", "def _initialize_logging(self):\n LOG_CFG = os.environ.get('LOG_CFG', 'LOCAL')\n configure_logging(LOG_CFG)\n self.logger = logging.getLogger(self.__class__.__name__)", "def setup_logging():\n logging.basicConfig(format='%(levelname)s: %(message)s', level=LOGLEVEL)", "def setupLogging():\n global enabled, dummyInstance\n from pyemma.util.config import conf_values\n args = conf_values['Logging']\n\n if args.enabled:\n if args.tofile and args.file:\n filename = args.file\n else:\n filename = None\n try:\n logging.basicConfig(level=args.level,\n format=args.format,\n datefmt='%d-%m-%y %H:%M:%S',\n filename=filename,\n filemode='a')\n except IOError as ie:\n import warnings\n warnings.warn('logging could not be initialized, because of %s' % ie)\n return\n \"\"\" in case we want to log to both file and stream, add a separate handler\"\"\"\n if args.toconsole and args.tofile:\n ch = logging.StreamHandler()\n ch.setLevel(args.level)\n ch.setFormatter(logging.Formatter(args.format))\n logging.getLogger('').addHandler(ch)\n else:\n dummyInstance = dummyLogger()\n\n enabled = args.enabled", "def log_setup(self):\n # Logger initialisation\n logger = logging.getLogger(self.app_name)\n logger.setLevel(logging.DEBUG)\n\n # Creating console handler and set level to debug\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n\n # Creating formatter\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n )\n\n # Adding formatter to ch\n ch.setFormatter(formatter)\n\n # Adding ch to logger\n logger.addHandler(ch)\n\n # Setting the Logger Level (INFO)\n logger.setLevel(logging.INFO)\n\n return logger", "def _configure_logging(self):\n pass", "def setup_logging(log_file):\n\tglobal logger\n\tif log_file:\n\t\tlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',filename=log_file,filemode='w',level=logging.INFO)\n\telse:\n\t\tlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',level=logging.INFO)\n\tlogger = logging.getLogger('default')", "def setup(log_level, log_name):\n\n # Log format string for flake8 compliance\n log_fmt = ('%(levelname)-8s %(asctime)s%(filename)s:%(lineno)-4s '\n '%(message)s')\n\n # Configure logging\n config = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'default': {\n 'format': log_fmt,\n 'datefmt': '%Y-%m-%d %H:%M:%S',\n },\n },\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'default',\n },\n },\n 'loggers': {\n 'createtransfers': {\n 'level': log_level,\n 'handlers': ['console'],\n },\n },\n }\n\n logger = logging.getLogger(log_name)\n logging.config.dictConfig(config)\n return logger", "def setupLogging(self):\n\t\ttry:\n\t\t\tself.logger = logging.getLogger(__name__)\n\t\t\thandler = RotatingFileHandler(self.logFile, maxBytes=500000, backupCount=5)\n\t\t\tformat = \"%(asctime)s %(levelname)-8s %(message)s\"\n\t\t\thandler.setFormatter(logging.Formatter(format))\n\t\t\thandler.setLevel(logging.INFO)\n\t\t\tself.logger.addHandler(handler)\n\t\t\tself.logger.setLevel(logging.INFO)\n\t\texcept Exception as err:\n\t\t\terrorStr = 'Error initializing log file, ',err\n\t\t\tprint(errorStr)\n\t\t\texit(1)", "def init_logging():\n global logger\n logging.basicConfig(\n format='%(levelname)s - %(message)s',\n )\n logger = logging.getLogger('runner')\n logger.setLevel(os.environ.get('LOGGING_LEVEL', 'INFO'))", "def setup_logs(arg_log_dir, log_level='debug'):\n assert log_level.lower() in ('debug', 'info', 'warning', 'error', 'critical')\n global logger\n cl_logger = log.LogManager(app_name=APP_NAME,\n log_name=__name__,\n log_dir=arg_log_dir)\n logger = cl_logger.logger\n logger.setLevel(log_level.upper())", "def setup_logging(logger):\n hdlr = logging.FileHandler('linter.log', 'w')\n logger.addHandler(hdlr)\n logger.setLevel(logging.DEBUG)\n return logger", "def setup_logfile():\r\n from core.general.appinit import log_init\r\n log_init(\r\n 'general',\r\n 'django_api'\r\n )", "def setup_logging():\n logger = logging.getLogger()\n logger.level = logging.DEBUG\n stream_handler = logging.StreamHandler(sys.stdout)\n logger.addHandler(stream_handler)", "def _init():\n global logger\n logger = logging.getLogger(\"Log\")", "def set_logger(logger):\n global __log__\n __log__ = logger", "def init_logging():\n global logger\n logger = logging.getLogger('autogen_quartus')", "def setup_logger():\n\n global _logger\n global _has_logbook\n\n if _has_logbook:\n _logger = Logger('UoM_WIFI')\n try:\n log_path = join(sys.argv[1], '%s.log' % USERNAME)\n except IndexError:\n log_path = join(split(abspath(__file__))[0], '%s.log' % USERNAME)\n\n # because the log file is owned by root, if this program is ran by a\n # regular user, we need to prevent it from crashing by writing to a file\n # owned by root\n try:\n # create the handler\n log_handler = RotatingFileHandler(log_path)\n\n # push the context object to the application stack\n log_handler.push_application()\n except IOError:\n _has_logbook = False" ]
[ "0.791117", "0.72085685", "0.71950054", "0.71890855", "0.7155964", "0.71218204", "0.70645064", "0.7047346", "0.7046134", "0.70337045", "0.70297927", "0.7022614", "0.7021542", "0.70177054", "0.7013548", "0.7011018", "0.7008532", "0.70057917", "0.69638646", "0.6959906", "0.6954819", "0.6950255", "0.6938939", "0.6906974", "0.69064814", "0.6888991", "0.6881189", "0.68656945", "0.68566906", "0.68534625" ]
0.7727292
1
If something is not a list already make it one, otherwise simply return something
def aslist(something): return something if isinstance(something, list) else [something]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def listify(value):\n if isinstance(value, list):\n return value\n else:\n return [value]", "def listify(arg):\n if isinstance(arg, list):\n return arg\n else:\n return [arg]", "def listify(supposed_lst:object=None):\n\tif (supposed_lst is not None):\n\t\tif (not isinstance(supposed_lst, list)):\n\t\t\tsupposed_lst = [supposed_lst]\n\t\t# If it was already a list, check it for emptiness and `None`.\n\t\telif (isinstance(supposed_lst, list)):\n\t\t\tif (not supposed_lst):\n\t\t\t\traise ValueError(dedent(\n\t\t\t\t\tf\"Yikes - The list you provided contained `None` as an element.\" \\\n\t\t\t\t\tf\"{supposed_lst}\"\n\t\t\t\t))\n\t\t\tif (None in supposed_lst):\n\t\t\t\traise ValueError(dedent(\n\t\t\t\t\tf\"Yikes - The list you provided contained `None` as an element.\" \\\n\t\t\t\t\tf\"{supposed_lst}\"\n\t\t\t\t))\n\t# Allow `is None` to pass through because we need it to trigger null conditions.\n\treturn supposed_lst", "def ensure_list(self, x):\n return x if isinstance(x, list) else [x]", "def ensure_list(thing):\r\n\r\n if isinstance(thing, str_types):\r\n return [thing]\r\n return thing", "def is_list(value):\n return isinstance(value, list) or None", "def _MakeList(input):\n if len(input) == 0:\n raise ValueError(\n 'input cannot be empty.')\n elif len(input) == 1:\n output = input[0]\n if not isinstance(output, list):\n output = [output]\n else:\n output = list(input)\n return output", "def make_list(item_or_items):\n if item_or_items is None:\n return None\n if isinstance(item_or_items, list):\n return item_or_items\n if isinstance(item_or_items, dict):\n return [item_or_items]\n if hasattr(item_or_items, '__iter__') \\\n and not isinstance(item_or_items, str):\n return list(item_or_items)\n return [item_or_items]", "def setList(value):\n if value is None:\n return []\n else:\n return value", "def str_or_list(value):\n if isinstance(value, list):\n return value\n return [value]", "def listify(x):\n if (not isinstance(x, basestring)) and isinstance(x, Sequence):\n return x\n else:\n return [x]", "def listify(x):\n\n if isinstance(x, list):\n return x\n elif isinstance(x, tuple):\n return list(x)\n else:\n return [x]", "def safelist(listable):\n if type(listable) == str:\n return [listable]\n else:\n return listable.tolist()", "def listify(obj):\n if obj is None:\n # When the object is None, an empty list will be returned\n return []\n elif isinstance(obj, list):\n # When the object is already a list, that list will be returned\n return obj\n\n # When a single object is passed to the method, a list with the\n # object as single item will be returned\n return [obj]", "def make_list( elements ):\n if isinstance(elements, (list, tuple)):\n return elements\n else:\n return [elements]", "def listify(obj):\n if obj is None:\n return []\n else:\n return obj if isinstance(obj, (list, tuple, type(None))) else [obj]", "def listify(obj):\n if obj is None:\n return []\n else:\n return obj if isinstance(obj, (list, tuple, type(None))) else [obj]", "def listify(obj):\n if obj is None:\n return []\n else:\n return obj if isinstance(obj, (list, tuple, type(None))) else [obj]", "def makelist(input):\n if isinstance(input, list) or isinstance(input, np.array):\n output = input\n else:\n output = [input]\n return output", "def ensure_list(obj, allow_tuple=True):\n if isinstance(obj, list):\n return obj\n\n elif allow_tuple and isinstance(obj, tuple):\n return obj\n elif not allow_tuple and isinstance(obj, tuple):\n return list(obj)\n else:\n return [obj]", "def _is_list(item):\n return isinstance(item, list)", "def as_list(arg):\n if _is_list(arg):\n return arg\n return [arg]", "def default_to_list(value):\n if not isinstance(value, list) and value is not None:\n value = [value]\n elif value is None:\n value = []\n\n return value", "def atomp(lst):\n return not isinstance(lst, list)", "def atomp(lst):\n return not isinstance(lst, list)", "def listify(obj):\n return obj if isinstance(obj, (list, tuple, type(None))) else [obj]", "def controlled_list(input_list):\n output_list = input_list\n\n if not isinstance(input_list, list):\n\n dummy_list = []\n dummy_list.append(input_list)\n output_list = dummy_list\n print('Converting')\n print('Before return')\n print(output_list)\n return output_list", "def force_list(object):\n try:\n return list(object)\n except TypeError:\n return [object]", "def _is_list(arg):\n return isinstance(arg, collections.Sequence) and not _is_string(arg)", "def ensure_list(iterable: Iterable[A]) -> List[A]:\n if isinstance(iterable, list):\n return iterable\n else:\n return list(iterable)" ]
[ "0.7072959", "0.70661867", "0.7043395", "0.68962115", "0.6856602", "0.65573055", "0.6549759", "0.64554024", "0.64472896", "0.6420959", "0.6384629", "0.63443786", "0.6334466", "0.6304437", "0.6287558", "0.62678397", "0.62678397", "0.62678397", "0.62564147", "0.6251007", "0.6238834", "0.6220301", "0.62200993", "0.619375", "0.619375", "0.6184984", "0.6180535", "0.6106389", "0.60871404", "0.60648" ]
0.7201694
0
Convert a schema name like DISCOVER_SCHEMA_ROWSETS into a method name like getSchemaRowsets. 1. split into parts by _
def schemaNameToMethodName(schemaName): parts = schemaName.split("_") def replace(what): if what == "DBSCHEMA": return "DBSchema" elif what == "MDSCHEMA": return "MDSchema" elif what == "DISCOVER": return "" return what.lower().capitalize() return "get" + "".join(map(replace, parts))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resolver(schema):\n name = schema.__name__\n if name.endswith(\"Schema\"):\n return name[:-6] or name\n return name", "def split_fqn(fqn_name):\n try:\n schema, table = fqn_name.split('.')\n except Exception as e:\n logger.error(\"Failed to split name %s into schema and table, please check the format is schema.table\" % fqn_name)\n raise Exception('%s' % str(e))\n return schema, table", "def get_schema(schema): # noqa: E501\n return 'do some magic!'", "def get_schema_name(schema_path):\n print(schema_path)\n path = os.path.normpath(schema_path)\n return os.path.sep.join(path.split(os.path.sep)[-3:])", "def _path_from_name(name, type):\n if name.startswith('_'):\n return name.split('/')\n design, name = name.split('/', 1)\n return ['_design', design, type, name]", "def _split_name(name):\n name_split = name.split('_view_')\n view_num = None\n if(len(name_split) > 1):\n view_num = int(name_split[1])\n optimizer_key = ''\n fp16_key = ''\n if name_split[0].startswith('Moment_1'):\n optimizer_key = 'Moment_1_'\n elif name_split[0].startswith('Moment_2'):\n optimizer_key = 'Moment_2_'\n elif name_split[0].startswith('Update_Count'):\n optimizer_key = 'Update_Count_'\n elif name_split[0].endswith('_fp16'):\n fp16_key = '_fp16'\n param_name = name_split[0]\n if optimizer_key != '':\n param_name = param_name.split(optimizer_key)[1]\n param_name = param_name.split('_fp16')[0]\n return param_name, optimizer_key, view_num, fp16_key", "def get_qualified_name(self):\r\n return self.__schema + \".\" + self.__name", "def _get_schema(want_version):\n for maj, min in _GET_SCHEMA_MICROVERSIONS:\n if want_version.matches((maj, min)):\n return getattr(schema, 'GET_SCHEMA_%d_%d' % (maj, min))\n\n return schema.GET_SCHEMA_1_10", "def _split_table_name(table_name):\n table_name_items = table_name.split(\".\")\n if len(table_name_items) == 1:\n schema_name = None\n elif len(table_name_items) == 2:\n schema_name, table_name = table_name_items\n else:\n raise ValueError(\"Cannot determine schema/table name from input {}\".format(table_name))\n return schema_name, table_name", "def __tablename__(self):\n return sub(r\"(?<!^)(?=[A-Z])\", \"_\", self.__name__).lower()", "def test_schema():\n return 'test_schema'", "def encodeSchemaName(self, schema):\r\n return '\"{}\"'.format(schema)", "def _columns(cls, schema: dsl.Source.Schema) -> typing.Sequence[str]:\n return tuple(f.name for f in schema)", "def standardize_table_name(self, schema_name: str, table: str) -> str:\n return table", "def create_schema(self, schema: str):\n return", "def get_parameterized_names():\n return [name.split('.')[0] for name in os.listdir(os.path.dirname(__file__) + '/../test_schemas')\n if 'mixins' not in name]", "def test_split_table_name(self):\n\n self.assertEqual(\n {\"database\": \"database\", \"database_schema\": \"schema\", \"table\": \"table\"},\n fqn.split_table_name(table_name=\"database.schema.table\"),\n )\n\n self.assertEqual(\n {\"database\": None, \"database_schema\": \"schema\", \"table\": \"table\"},\n fqn.split_table_name(table_name=\"schema.table\"),\n )\n\n self.assertEqual(\n {\"database\": None, \"database_schema\": None, \"table\": \"table\"},\n fqn.split_table_name(table_name=\"table\"),\n )\n\n # We also clean quotes\n self.assertEqual(\n {\"database\": \"database\", \"database_schema\": \"schema\", \"table\": \"table\"},\n fqn.split_table_name(table_name='database.\"schema\".table'),\n )", "def get_name(tablename):\n\n return tablename[tablename.find(\"_\") + 1:].replace(\"_\", \" \").capitalize()", "def short_column(name : str) -> str:\n return name.split(\"-\")[1]", "def split_table_schema(table_name):\r\n\r\n split = table_name.split('.')\r\n if len(split) > 1:\r\n return (split[0], split[1])\r\n else:\r\n return (None, split[0])", "def namehack(field):\n if field.endswith((\"attribute\", \"views\")):\n return field + \"__name\"\n else:\n return field", "def namingConvention(columnName):\n words = columnName.lower().split(\"_\")\n\n def cap(word):\n if word.lower() == \"id\":\n return word.upper()\n else:\n return word.capitalize()\n\n return words[0] + \"\".join(map(cap, words[1:]))", "def s(x):\n return x.name.lower().replace('_', '-')", "def __name__(self):\n return '_'.join([function.__name__ for function in self.functions])", "def method_union_name(self) -> str:", "def _get_name(name):\n if \"::\" in name:\n return name.split(\"::\")[1]\n return name", "def _get_name(name):\n if \"::\" in name:\n return name.split(\"::\")[1]\n return name", "def sql_name_pattern(pattern):\n\n inquotes = False\n relname = ''\n schema = None\n pattern_len = len(pattern)\n i = 0\n\n while i < pattern_len:\n c = pattern[i]\n if c == '\"':\n if inquotes and i + 1 < pattern_len and pattern[i + 1] == '\"':\n relname += '\"'\n i += 1\n else:\n inquotes = not inquotes\n elif not inquotes and c.isupper():\n relname += c.lower()\n elif not inquotes and c == '*':\n relname += '.*'\n elif not inquotes and c == '?':\n relname += '.'\n elif not inquotes and c == '.':\n # Found schema/name separator, move current pattern to schema\n schema = relname\n relname = ''\n else:\n # Dollar is always quoted, whether inside quotes or not.\n if c == '$' or inquotes and c in '|*+?()[]{}.^\\\\':\n relname += '\\\\'\n relname += c\n i += 1\n\n if relname:\n relname = '^(' + relname + ')$'\n\n if schema:\n schema = '^(' + schema + ')$'\n\n return schema, relname", "def _extract_ks_tab(name):\n sp = name.split(\".\")\n if len(sp) == 2:\n ksp = sp[0]\n table = sp[1]\n else:\n ksp = config.execution_name\n table = name\n return ksp.lower().encode('UTF8'), table.lower().encode('UTF8')", "def normalize_const(var_name):\n return var_name.lower().split('_')" ]
[ "0.6052163", "0.5584583", "0.5581645", "0.5529912", "0.5454102", "0.54007065", "0.5299308", "0.52945566", "0.5244302", "0.5170896", "0.513098", "0.5112123", "0.50779843", "0.5071181", "0.5046438", "0.5041104", "0.50338566", "0.5025325", "0.49718887", "0.49697247", "0.49661854", "0.49619263", "0.49456298", "0.49357218", "0.49316782", "0.49178118", "0.49178118", "0.49031445", "0.48928332", "0.48914716" ]
0.7809508
0
Perform linting of the source code
def lint(ctx): print('Running linting...') ctx.run('pylint metrics')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lint():\n toolkit.lint(exclude=[\"__init__.py\"])", "def lint(self):\n raise NotImplementedError()", "def lint(session):\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"run\", \"--all-files\")", "def lint():\n load_env_vars('dev')\n from tools.static_code_analysis import Lint\n pylint = Lint()\n score = pylint.run_test()\n pylint.create_badge(score)", "def lint(session):\n session.install(\"flake8\")\n session.run(\"flake8\", \"\")", "def commands_lint():\n lint()", "def lint(context):\n context.run(\" \".join([\n \"autopep8\",\n \"--recursive\",\n \"--jobs 0\",\n \"--in-place\",\n \"--aggressive\",\n \"-v\",\n PACKAGE_NAME,\n \"tests\"\n ]))\n context.run(\"pylint %s\" % PACKAGE_NAME)", "def lint(cline):\n print(\"Linting with pylint.\")\n cline.run(r\"git ls-files '*.py' | xargs python3 -m pylint -j 0\")\n print(\"Type checking with mypy.\")\n cline.run(r\"git ls-files '*.py' | xargs python3 -m mypy\")", "def lint(session):\n session.install(\"-r\", \"requirements-test.txt\")\n session.install(\"-r\", \"requirements.txt\")\n session.install(\"flake8-import-order\")\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n session.run(\n \"flake8\",\n \"--import-order-style=google\",\n \"--application-import-names=google,tests\",\n \"google\",\n \"tests\",\n )\n session.run(\"mypy\", \"google\", \"tests\")\n session.run(\"python\", \"setup.py\", \"sdist\")\n session.run(\"twine\", \"check\", \"dist/*\")", "def lint(self):\n lint_list = (\n (self.html_anchor_tags, \"Remove or replace HTML anchor tags with markdown links.\"),\n (self.html_img_tags, \"Replace HTML img tags with markdown image links.\"),\n (self.orphan_headers, \"Move orphan headers to the first line in a markdown cell.\"),\n (self.output_errors, \"Fix errors found in the output of codes cells.\"))\n if any([lint for lint, msg in lint_list]):\n print(self.filename)\n for lint, msg in lint_list:\n if lint:\n print(f\" {msg}\")\n for s in lint:\n print(f\" {s}\")", "def run(self):\n success = False\n try:\n # Download file if a source_zip_url was given\n if self.source_zip_url:\n App.logger.debug(\"Linting url: \" + self.source_zip_url)\n self.download_archive()\n # unzip the input archive if a source_zip_file exists\n if self.source_zip_file:\n App.logger.debug(\"Linting zip: \" + self.source_zip_file)\n self.unzip_archive()\n # lint files\n if self.source_dir:\n self.rc = RC(directory=self.source_dir)\n App.logger.debug(\"Linting '{0}' files...\".format(self.source_dir))\n success = self.lint()\n App.logger.debug(\"...finished.\")\n except Exception as e:\n message = 'Linting process ended abnormally: {0}'.format(e.message)\n App.logger.error(message)\n self.log.warnings.append(message)\n App.logger.error('{0}: {1}'.format(str(e), traceback.format_exc()))\n result = {\n 'success': success,\n 'warnings': self.log.warnings,\n }\n App.logger.debug(\"Linter results: \" + str(result))\n return result", "def pylint(context):\n exec_cmd = 'find . -name \"*.py\" | xargs pylint'\n run_cmd(context, exec_cmd)", "def lint_py_check_per_line(_repo, cf):\n with open(cf.name, \"r\", encoding = 'utf-8', errors = 'replace') as f:\n line_cnt = 0\n regex_import = re.compile(r\"\\s*from\\s+.*\\s+import\\s+.*\")\n warnings = 0\n for line in f:\n line_cnt += 1\n line = line.strip()\n if not line_cnt in cf.lines or _repo.wide:\n continue\t# Not a line modified, skip it\n\n # Check that imports are not done with *from HERE import\n # THAT* because it makes code very confusing when we can't\n # see where functions are coming from\n m = regex_import.match(line)\n if m:\n _repo.warning(\"\"\"\\\n%s:%d: python style error: use 'import MODULE' vs 'from MODULE import SYMBOLs'\n see https://securewiki.ith.intel.com/display/timo/Coding+Style+and+procedures#CodingStyleandprocedures-Importingcode\"\"\"\n % (cf.name, line_cnt))\n warnings += 1\n\n # We like spacing around equal signs and operators in\n # general, the C way. The python way sucks. ARG OPERATOR\n # ARG beats ARGOPERATORARG. Ewks.\n\n # Likewise, [X] is an index, [ X ] is a list. Heaven's\n # sake. For consistency, dictionaries are { K: V }; it's\n # really had to check on those and a patch to pylint would\n # be needed for that.\n\n\n regex_bad_eqop = re.compile(r\"\\S(=|==|!=|\\+=|-=|\\*=|/=|\\|=|&=|^=)\\S\")\n regex_config = re.compile(\"CONFIG_[^=]+=\")\n # Catches things like blabla('--someswitch=', whatever) or\n # blabla(\"--something=that\")\n regex_string = re.compile(r\"=dd[^\\s'\\\"]*['\\\"]\")\n\n # Got a probable bad usage?\n m = regex_bad_eqop.search(line)\n if m:\n # Maybe a config assignment (this is actually shell code)\n if regex_config.search(line) or regex_string.search(line):\n continue\n # Maybe rst code, ignore it\n if '===' in line:\n continue\n _repo.warning(\"\"\"\\\n%s:%d: python style error: always leave spaces around operators\n ('a = b' vs 'a=b')\\\n\"\"\" % (cf.name, line_cnt))", "def lint_pylint(context):\n context.run(\"pylint {}\".format(\" \".join(PYTHON_DIRS)))", "def lint(to_lint):\n exit_code = 0\n for linter, options in (('pyflakes', []), ('pep8', pep8_options)):\n try:\n output = local[linter](*(options + to_lint))\n except commands.ProcessExecutionError as e:\n output = e.stdout\n\n if output:\n exit_code = 1\n print \"{0} Errors:\".format(linter)\n print output\n\n output = hacked_pep257(to_lint)\n if output:\n exit_code = 1\n print \"Docstring Errors:\".format(linter.upper())\n print output\n\n sys.exit(exit_code)", "def lint(self):\n return {\n \"actions\": [\n (create_dir, [\"build/lint\"]),\n TaskCreator.get_flake8() + \" \" + self.project_name_sc + \" | tee build/lint/flake8.log\",\n TaskCreator.get_pylint() + \" --output-format=parseable --reports=no \" + self.project_name_sc + \" | tee build/lint/pylint.log\"\n ],\n \"verbosity\": 2\n }", "def lint(fix_imports, check):\n skip = [\"node_modules\", \"requirements\", \"migrations\"]\n root_files = glob(\"*.py\")\n root_directories = [\n name for name in next(os.walk(\".\"))[1] if not name.startswith(\".\")\n ]\n files_and_directories = [\n arg for arg in root_files + root_directories if arg not in skip\n ]\n\n def execute_tool(description, *args):\n \"\"\"Execute a checking tool with its arguments.\"\"\"\n command_line = list(args) + files_and_directories\n click.echo(f\"{description}: {' '.join(command_line)}\")\n rv = call(command_line)\n if rv != 0:\n exit(rv)\n\n isort_args = []\n black_args = []\n if check:\n isort_args.append(\"--check\")\n black_args.append(\"--check\")\n if fix_imports:\n execute_tool(\"Fixing import order\", \"isort\", *isort_args)\n execute_tool(\"Formatting style\", \"black\", *black_args)\n execute_tool(\"Checking code style\", \"flake8\")", "def lint():\n run('python3 -m pylint --load-plugins pylint_django api',\n warn=True, echo=True)", "def lint(session):\n session.install('-rrequirements-dev.txt')\n session.install('-e', '.')\n run_all_linters(session)", "def lint(self, vartok, linted_entry):\n raise NotImplemented", "def _lint(self, python_contents):\n path = ka_root.join('foo.py')\n self.set_file_contents(path, python_contents)\n return i18n_lint.lint_non_literal_i18n_in_python([path])", "def lint(session):\n session.install(\"flake8\", BLACK_VERSION)\n session.run(\n \"black\",\n \"--check\",\n *BLACK_PATHS,\n )\n session.run(\"flake8\", *BLACK_PATHS)", "def lint(arg):\n local('pylint --rcfile=.pylintrc ' + arg)", "def run_check(self, ctx: RunContext): # noqa\n params = ctx.get_params(\"pycodestyle\")\n options = ctx.options\n if options:\n params.setdefault(\"max_line_length\", options.max_line_length)\n\n if params:\n parser = get_parser()\n for option in parser.option_list:\n if option.dest and option.dest in params:\n value = params[option.dest]\n if isinstance(value, str):\n params[option.dest] = option.convert_value(option, value)\n\n style = StyleGuide(reporter=_PycodestyleReport, **params)\n options = style.options\n options.report.ctx = ctx # type: ignore\n checker = Checker(ctx.filename, lines=ctx.lines, options=options)\n checker.check_all()", "def run():\n args = parse_args(sys.argv[1:])\n fnames = args.fnames\n runner = PylintRunner(args)\n runner.run(fnames)", "def lint(\n command,\n):\n print(\n \"\"\"\nRunning flakeheaven, a Python code linter\n===================================\n\"\"\"\n )\n command.run(\"flakeheaven lint\", echo=True, pty=POSIX)", "def _run(self, config):\n \n files = self.transaction.get_files(\n config.check_files, config.ignore_files\n )\n # Exit when no files has to be checked.\n if not files:\n self.logger.debug(\"PyLint check skipped. No files for check.\")\n return self.success()\n \n # Defining pylint home directory.\n os.environ['PYLINTHOME'] = config.pylint_home\n self.logger.debug(\"PyLint Home is used at '%s'.\", config.pylint_home)\n \n # Determine which pylintrc file is used for the validation.\n if config.pylintrc:\n self.logger.debug(\"Pylintrc is used at '%s'.\", config.pylintrc)\n os.environ['PYLINTRC'] = config.pylintrc\n else:\n self.logger.debug(\"Default PyLintRC is used.\")\n \n # Only added or updated files will be checked.\n files = [\n self.transaction.get_file(name) \n for name, attr in files.iteritems() \n if attr in [\"A\", \"U\", \"UU\"]\n ]\n \n if not files:\n self.logger.debug(\"No files to validate. PyLint check skipped.\")\n return self.success()\n \n output = StringIO.StringIO()\n reporter = TextReporter(output)\n \n # Mock to prevent the sys.exit called by pylint.lint.Run.__init__\n lint.sys.exit = lambda _: 0\n \n self.logger.debug(\"PyLint is running...\")\n lint.Run([\"--reports=n\"] + files, reporter=reporter)\n \n output = output.getvalue()\n self.logger.debug(\"PyLint output:\\n %s\", output)\n if output:\n return self.error(output)\n else:\n return self.success()", "def pylint(ctx):\n ctx.run(f\"{VENV_PREFIX} pylint {COMMON_MODULES_AS_PARAM}\")", "def main():\n try:\n userParams, repoParams = splitArgv(sys.argv)\n parser = ArgumentParser(description = \"StyleCop parameters\")\n\n parser.add_argument(\"--repo\", dest=\"repo\", action=\"store\",\n help=\"Repository that use this script in hook\")\n parser.add_argument(\"--stage\", dest=\"stage\", action=\"store\",\n help=\"Stage of work with VCS\")\n parser.add_argument(\"--config\", dest=\"config\", action=\"store\",\n help=\"StyleCop config file\")\n\n\n args = parser.parse_args(userParams)\n\n configParser = ConfigParser()\n configString = FileReader.readFile(args.config)\n config = configParser.parse(configString)\n\n factory = repos.ReposFactory()\n repository = factory.getRepository(args.repo, args.stage)\n changedFiles = repository.getChangedFiles(repoParams)\n\n extensionsDict = config.getDictionary(\"extensions\")\n\n checkersFactory = CheckersFactory(extensionsDict)\n\n # List of strings of style violations\n errors = []\n\n for file in changedFiles:\n ext = getFileExtension(file)\n checker = checkersFactory.getChecker(ext)\n sourceString = FileReader.readFile(file)\n\n errors += checker.check(sourceString)\n\n \n\n except ValueError as er:\n pass\n except Exception as ex:\n pass\n \n if len(errors) > 0:\n repository.sendError(\"Total number of style errors: \" + errors)\n repository.sendError(\"Update failed\")\n \n # If there were no errors we permit this update\n return len(errors)", "def pylint(session):\n session.install('-rrequirements-dev.txt')\n session.install('-e', '.')\n run_pylint(session)" ]
[ "0.78138644", "0.7574533", "0.75095797", "0.73593503", "0.7336891", "0.73338914", "0.7326553", "0.7140035", "0.71232647", "0.7041895", "0.7038825", "0.69987965", "0.6986155", "0.68960625", "0.6859346", "0.6848015", "0.68006235", "0.6749812", "0.6739455", "0.66621584", "0.663092", "0.6623491", "0.6566223", "0.6557687", "0.6542848", "0.65361786", "0.65352947", "0.6532311", "0.65314186", "0.6514582" ]
0.7750827
1
Print the build metrics
def metrics(_): collector = BuildsCollector() build_metrics, headers = collector.get_metrics_table() print(tabulate(build_metrics, headers=headers))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_metrics(result):\n logging.log(LOG_LEVEL_OUTPUT_INFO,\n '------------------------------------------------')\n logging.log(LOG_LEVEL_OUTPUT_INFO, ' KEY METRICS: ')\n logging.log(LOG_LEVEL_OUTPUT_INFO,\n '------------------------------------------------')\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* pages_count: %d',\n get_counter_metric(result, 'pages_count'))\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* revisions_count: %d',\n get_counter_metric(result, 'revisions_count'))\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* very_long_page_histories_count: %d',\n get_counter_metric(result, 'very_long_page_histories_count'))\n revisions_per_page_distr = get_distributions_metric(\n result, 'revisions_per_page_distr')\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* revisions_per_page_distr.mean: %d',\n revisions_per_page_distr.mean)\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* revisions_per_page_distr.sum: %d',\n revisions_per_page_distr.sum)\n cumulative_page_rev_size_distr = get_distributions_metric(\n result, 'cumulative_page_rev_size_distr')\n logging.log(LOG_LEVEL_OUTPUT_INFO,\n '* cumulative_page_rev_size_distr.mean: %d',\n cumulative_page_rev_size_distr.mean)\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* cumulative_page_rev_size_distr.sum: %d',\n cumulative_page_rev_size_distr.sum)", "def print_metric(self):\r\n print(f'\\n\\n{self.sort} metric of size {self.n}')\r\n print(f'algorithm: {self.algo}')\r\n print(f'number of comparisons: {self.comps}')\r\n print(f'number of exchanges: {self.exs}')\r\n print(f'regression equation for comparisons: {self.comp_eq}')\r\n print(f'regression equation for exchanges: {self.ex_eq}')\r\n print(f'presorted data: {self.predata}')\r\n print(f'postsorted data: {self.postdata}')", "def print_performance_info(self):\n pass", "def print_stats():\n if spritegroup_stats[0] > 0:\n generic.print_info(\"Concurrent spritegroups: {}/{} ({})\".format(spritegroup_stats[0], total_action2_ids, str(spritegroup_stats[1])))\n if a2register_stats[0] > 0:\n generic.print_info(\"Concurrent Action2 registers: {}/{} ({})\".format(a2register_stats[0], total_tmp_locations, str(a2register_stats[1])))", "def report(self):\n\n job_summary = {}\n for job in self._jobs:\n \n if job.step_name not in job_summary:\n job_summary[ job.step_name ] = {}\n job_summary[ job.step_name ][ 'DONE' ] = 0\n job_summary[ job.step_name ][ 'RUNNING' ] = 0\n job_summary[ job.step_name ][ 'QUEUING' ] = 0\n job_summary[ job.step_name ][ 'FAILED' ] = 0\n job_summary[ job.step_name ][ 'UNKNOWN' ] = 0\n job_summary[ job.step_name ][ 'max_mem' ] = 0\n job_summary[ job.step_name ][ 'cputime' ] = 0\n\n if job.status == Job_status.FINISHED:\n job_summary[ job.step_name ][ 'DONE' ] += 1\n if job.cputime is not None:\n job_summary[ job.step_name ]['cputime'] += int(job.cputime)\n\n if job.max_memory is not None and job.max_memory > job_summary[ job.step_name ][ 'max_mem']:\n job_summary[ job.step_name ][ 'max_mem'] = int(job.max_memory)\n\n elif job.status == Job_status.RUNNING:\n job_summary[ job.step_name ][ 'RUNNING' ] += 1\n elif job.status == Job_status.QUEUEING or job.status == Job_status.SUBMITTED:\n job_summary[ job.step_name ][ 'QUEUING' ] += 1\n elif job.status == Job_status.FAILED or job.status == Job_status.NO_RESTART:\n job_summary[ job.step_name ][ 'FAILED' ] += 1\n else:\n job_summary[ job.step_name ][ 'UNKNOWN' ] += 1\n\n\n\n local_time = strftime(\"%d/%m/%Y %H:%M\", time.localtime())\n \n\n pickle_file = \"{}.{}\".format(self.pipeline.project_name, self.pipeline._pid)\n\n print(\"[{} @{} {}]\".format( local_time,self.pipeline._hostname , pickle_file))\n\n print(\"{:20} || {:12} || {:12} || {:2s} {:2s} {:2s} {:2s} {:2s}\".format(\"Run stats\", \"Runtime\", \"Max Mem\", \"D\",\"R\",\"Q\",\"F\",\"U\"))\n\n for step in sorted(self.pipeline._workflow._analysis_order, key=self.pipeline._workflow._analysis_order.__getitem__):\n if step not in job_summary:\n continue\n\n print(\"{:20} || {:12} || {:12} || {:02d}/{:02d}/{:02d}/{:02d}/{:02d}\".format(step, \n self.format_time(job_summary[ step ]['cputime']),\n self.format_memory(job_summary[ step ]['max_mem']),\n job_summary[ step ][ 'DONE' ],\n job_summary[ step ][ 'RUNNING' ],\n job_summary[ step ][ 'QUEUING' ],\n job_summary[ step ][ 'FAILED' ],\n job_summary[ step ][ 'UNKNOWN' ]))", "def print_metrics(self):\n # num times regular barcodes appear in a simulated doublet nearest neighbors, grouped by value\n # TODO: this list is 2 dimensional... need to extract dimensione with counts for the counter\n frequencies = [i[1] for i in self.num_times_knn]\n counter = collections.Counter(frequencies)\n print(\"##\\nNumber time barcoded in sim doub KNN: {}\".format(counter))\n\n # artificial fraction\n print(\"##\\nArtificial fraction: {}\".format(self.artificial_fraction))\n\n # num doublets\n print(\"##\\nNumber of doublets called: {}\".format(self.num_doublets))", "def build_show_statistics(ctx, args):\n for build_id in args:\n data = ctx.obj.get_build_statistics_by_build_id(build_id)\n output_json_data(data)", "def print_metrics(self):\n output = \"\"\n metrics = self.get_all_metrics()\n for k, v in metrics.items():\n # Print the help line\n output += \"\\n# HELP {name} {help}\\n\".format(name=v['name'],\n help=v['help'])\n # and the type line\n output += \"# TYPE {name} {type}\\n\".format(name=v['name'],\n type=v['type'])\n for sample in v['values']:\n labels = json.loads(sample, object_pairs_hook=OrderedDict)\n if v['type'] == 'histogram' and labels.get('le') == '_sum':\n labels.pop('le', None)\n mname = '{name}_sum'.format(name=v['name'])\n elif v['type'] == 'histogram' and labels.get('le') == '+Inf':\n labels.pop('le', None)\n mname = '{name}_count'.format(name=v['name'])\n elif v['type'] == 'histogram':\n mname = '{name}_bucket'.format(name=v['name'])\n else:\n mname = v['name']\n output += \"{name}{labels} {value}\\n\".format(name=mname,\n labels=self.format_labels(labels),\n value=self.format_value(v['values'][sample]))\n return output", "def printMachineStatOut():\n print(\"---------------MACHINES STATS --------------------------\\n\", file=out_file)\n for machine in machines_list:\n cur_job_list = machine.retrieveJobsList()\n print(\"machine number \", machine.number, \"assigned jobs [number,length,type]:\", file=out_file)\n l = []\n for job_number, job in cur_job_list.items():\n l.append(job)\n print(\"\".join(str(l)), file=out_file)\n\n print(\"Assigned types: \", machine.getTypes(), file=out_file)\n print(\"Types histogram: \", machine.types, \"Sum of each type: \", machine.types_sums, \"Makespan : \", machine.span,\n file=out_file)\n print(\"\\n\", file=out_file)\n print(\"Max makespan is : \", makeSpan(), file=out_file)", "def info(self):\n import string\n results = self.info_list()\n labels = \"%-8s %-9s %-4s %-8s %-8s %-4s\" % \\\n ('MACHINE','CPU','GHZ','MB TOTAL',\n 'MB FREE','LOAD')\n print labels\n for i in range(len(self.workers)):\n name = string.split(self.workers[i].host,'.')[0]\n res = results[i]\n s = \"%-8s %2dx%-6s %4.1f %8.1f %8.1f %4.2f\" % \\\n (name[-8:], res['cpu_count'],res['cpu_type'][-6:], \\\n res['cpu_speed'],res['mem_total'],res['mem_free'],\\\n res['load_1'])\n print s", "def display_metric(metrics_to_print, results, num_refs, args):\n for metric, result in zip(metrics_to_print, results):\n if metric == 'bleu':\n if args.score_only:\n print('{0:.{1}f}'.format(result.score, args.width))\n else:\n version_str = bleu_signature(args, num_refs)\n print(result.format(args.width).replace('BLEU', 'BLEU+' + version_str))\n\n elif metric == 'chrf':\n if args.score_only:\n print('{0:.{1}f}'.format(result.score, args.width))\n else:\n version_str = chrf_signature(args, num_refs)\n print('chrF{0:d}+{1} = {2:.{3}f}'.format(args.chrf_beta, version_str, result.score, args.width))", "def output_metrics(self):\n print('')\n for key in sorted(self.metrics):\n print('{}:'.format(key), end='')\n for k, v in self.metrics[key].items():\n if type(v[-1]) is list:\n print('\\t' + k + ': ' + ''.join('{:5.3f} '.format(vs) for vs in v[-1]), end='')\n else:\n print('\\t{}: {:5.3f}'.format(k, v[-1]), end='')\n print('\\n', end='')", "def printReport(self):\n\t\tself.app.printflush('Fetched: ' + str(self.fetched_count), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Processes: ' + str(self.processes), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Updated: ' + str(self.updated_count), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Average page load time: ' + str(self.average_time), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Returned with code: ' + repr(self.code_statistics), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Closing Processes... ', self.app.IGNORE_EXIT_FLAG)", "def display_metrics3(self):\n messagebox.showinfo(\"Processed Image Metrics\", self.pro_metrics)", "def main():\n configuration = {'resource-folder': 'resources',\n 'build-folder': 'build',\n 'log-folder': 'logfiles',\n 'use-preloaded': False,\n 'addi-metrics': 'addi-metrics.json',\n 'jenkins': {'dependency-filename': 'dependencies.txt',\n 'server': 'http://is.dbc.dk',\n 'repository-project': 'opensearch-3rd-party-dependencies'},\n 'log-zip-file':'logs.zip'}\n configuration.update(cli())\n setup_logger(configuration['verbose'])\n run_performance_test(configuration)", "def info():\n return buildcat.info()", "def __print_metrics_info(self, occurrence_metric):\n print(\" Name: \", self.get_metric_name(occurrence_metric))\n print(\" Type: Metric\")\n print(\" Description:\",\n self.get_metric_description(occurrence_metric))\n print(\" Formula: \", self.get_metric_formula(occurrence_metric))\n return 0", "def getBuild():", "def print_status(metrics, step, metric_names=[]):\n printf = {'train': '', 'valid': ''}\n values = {'train': [], 'valid': []} \n\n for name in metric_names:\n for mode in ['train', 'valid']:\n printf[mode] += '- %s : %s ' % (name, '%0.4f')\n values[mode].append(metrics[mode][name])\n\n printf = '%s | TRAIN %s | VALID %s' % ('%07i', printf['train'], printf['valid'])\n values = [step] + values['train'] + values['valid']\n\n print(printf % tuple(values), end='\\r')", "def debug(self):\n \n #path\n print('Path information:')\n for k, v in self.__path.items():\n print(k, v)\n \n #sample count\n print('Sample statistic of each phase')\n for k, v in self.__phase_sample_count.items():\n print(k, v)\n \n print('Sample statistic of each class')\n for k, v in self.__area_sample_count.items():\n print(k, v)\n \n print('Sample statistic of each train')\n for k, v in self.__train_sample_count.items():\n print(k, v)", "def test_api_build_metrics_get(self):\n default_api = DefaultApi(api_client=self.api_client)\n params = dlrnapi_client.MetricsRequest()\n path, method = default_api.api_build_metrics_get(params)\n self.assertEqual(path, '/api/metrics/builds')\n self.assertEqual(method, 'GET')", "def compute_and_print_eval_metrics(self):\n s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', '[email protected]', '[email protected]:.95')\n precision, recall, f1, mean_precision, mean_recall, map50, map = 0., 0., 0., 0., 0., 0., 0.\n ap = []\n eval_stats = [np.concatenate(x, 0) for x in zip(*self.eval_stats)]\n if len(eval_stats) and eval_stats[0].any():\n precision, recall, ap, f1, ap_class = ap_per_class(*eval_stats)\n precision, recall, ap50, ap = precision[:, 0], recall[:, 0], ap[:, 0], ap.mean(1)\n mean_precision, mean_recall, map50, map = precision.mean(), recall.mean(), ap50.mean(), ap.mean()\n nt = np.bincount(eval_stats[3].astype(np.int64), minlength=len(self.class_names)) # number of targets per class\n else:\n nt = np.zeros(1)\n\n pf = '%20s' + '%12.5g' * 6 # print format\n print(\"\\n EVALUTAION \\n\")\n print(s)\n print(pf % ('all', self.seen, nt.sum(), mean_precision, mean_recall, map50, map))\n if self.cfg.eval.verbose:\n for indx, cls in enumerate(ap_class):\n print(pf % (self.class_names[cls], self.seen, nt[cls], precision[indx], recall[indx], ap50[indx], ap[indx]))", "def printReport(self): \n \n print('Distribution: ', self._distribution_type)\n print('Distribution Type: ', str(self._measure_type).replace('MeasureType.','')) \n print('Type Detection Match: ', str(self._measure_type_match))\n print('MLE: ', str(self._mle))\n print('Goodness of Fit: ', str(self._gof)) \n print('Goodness of Fit Pass: ', str(self._pass)) \n print('Overall Score: ', str(self._score)) \n print('-------------')", "def report_build_progress(self, build_id, current, total, group_name='',\n status_line=''):\n pass", "def print_tracker(self) -> None:\n messages = []\n for (bracket, stage), others in self._tracker.items():\n counter = 0\n for _, config_ids in others:\n counter += len(config_ids)\n\n if counter > 0:\n messages.append(f\"--- Bracket {bracket} / Stage {stage}: {counter} configs\")\n\n if len(messages) > 0:\n logger.debug(f\"{self.__class__.__name__} statistics:\")\n\n for message in messages:\n logger.debug(message)", "def __str__(self):\n\t\tprint \"generating graph stats...\\n\"\n\t\tstart_time = time.time()\n\t\tbf = self.branching_factor()\n\t\treq_time = float(time.time() - start_time)\n\t\tb = str(bf[0])\n\t\tn = str(bf[1])\n\t\terr = str(100.0 * float(self.default_num_samples-bf[1])/self.default_num_samples)\n\t\tsize = str(self.num_articles())\n\t\tpg_time = str(req_time/bf[1])\n\t\tt = str(time.time() - start_time)\n\t\treturn \"_______Wikipedia Graph Stats_______\\n\" + \\\n\t\t\t\t\"# of nodes:\\t\\t\"+size+\"\\n\" + \\\n\t\t\t\t\"Avg. branching factor\\t\"+b+\"\\n\" + \\\n\t\t\t\t\"\\t\\t\\t(n=\"+n+\")\\n\" + \\\n\t\t\t\t\"Page Req. Fail Rate:\\t\"+err+\"%\\n\" + \\\n\t\t\t\t\"Avg. Page Req. Time:\\t\"+pg_time+\" sec\\n\" + \\\n\t\t\t\t\"<stats generated in \"+t+ \" sec>\"", "def print_api_speed():\n build()\n sh(\"%s -Wa scripts\\\\internal\\\\print_api_speed.py\" % PYTHON)", "def stats(self):\n print(self.name, self.workdays, self.cleanCount, self.workEnd)", "def report(self):\n print(f\"Water: {self.resources['water']}ml\")\n print(f\"Milk: {self.resources['milk']}ml\")\n print(f\"Coffee: {self.resources['coffee']}g\")", "def get_build_progress_info(self, build_id):\n pass" ]
[ "0.66079473", "0.6216928", "0.61909086", "0.6101186", "0.6092159", "0.60817313", "0.60801643", "0.60618484", "0.6012806", "0.60062146", "0.5972601", "0.59628373", "0.590147", "0.5889636", "0.5878473", "0.5874653", "0.5874362", "0.58222145", "0.5803442", "0.57945627", "0.5789596", "0.5772694", "0.57369894", "0.56960636", "0.5693701", "0.5672504", "0.5666965", "0.5664438", "0.5656021", "0.56538707" ]
0.7843801
0
A hypothesis decorator to return a list of strategies
def strategy_lists( draw, strategies=axl.short_run_time_strategies, min_size=1, max_size=len(axl.short_run_time_strategies), ): strategies = draw( lists(sampled_from(strategies), min_size=min_size, max_size=max_size) ) return strategies
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n return strategies, do_rotations", "def generate():\n strategies = []\n strategies.extend(\n generate_meta_strategy_pair(\n StatisticalPredictionStrategy))\n do_rotations = [True for _ in strategies]\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n\n strategies.extend([\n generate_meta_strategy_pair(\n WrappedRFindStrategy,\n limits=limits,\n sources=sources,\n shenanigans=False)[0]])\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n\n strategies.extend(\n generate_meta_strategy_pair(\n WrappedRFindStrategy,\n limits=limits,\n sources=sources,\n shenanigans=False))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n\n strategies.extend(\n generate_meta_strategy_pair(\n WrappedRFindStrategy,\n limits=limits,\n sources=sources,\n shenanigans=False))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n strategies.extend(\n generate_meta_strategy_pair(\n WrappedRFindStrategy,\n limits=limits,\n sources=sources,\n shenanigans=False))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(\n GeometryV4Strategy))\n\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n strategies.extend(\n generate_meta_strategy_pair(\n WrappedRFindStrategy,\n limits=limits,\n sources=sources,\n shenanigans=False))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n strategies.extend(\n generate_meta_strategy_pair(\n WrappedRFindStrategy,\n limits=limits,\n sources=sources,\n shenanigans=False))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add memory pattern strategies\n strategies.extend(\n generate_meta_strategy_pair(MemoryPatternsV7Strategy))\n\n do_rotations = [True for _ in strategies]\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n strategies.extend(\n generate_meta_strategy_pair(\n WrappedRFindStrategy,\n limits=limits,\n sources=sources,\n shenanigans=False))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(MemoryPatternsV7Strategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n strategies.extend(\n generate_meta_strategy_pair(\n WrappedRFindStrategy,\n limits=limits,\n sources=sources,\n shenanigans=False))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add memory patterns v7\n strategies.extend(\n generate_meta_strategy_pair(MemoryPatternsV7Strategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def strategy(func):\n strategies.append(func)\n return func", "def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy4))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # Add are you a lucker strategies\n strategies.extend(\n generate_meta_strategy_pair(AreYouALuckerStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy4))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # Add are you a lucker strategies\n strategies.extend(\n generate_meta_strategy_pair(AreYouALuckerStrategy))\n\n # Add Greenberg strategies\n strategies.extend(\n generate_meta_strategy_pair(GreenbergStrategy))\n\n # Add RPS Meta Fix strategies\n strategies.extend(\n generate_meta_strategy_pair(RPSMetaFixStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def tests_in(strategy):\n retval = []\n for pri, test, args in strategy:\n if test not in retval:\n retval.append(test)\n return retval", "def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy4))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # Add are you a lucker strategies\n strategies.extend(\n generate_meta_strategy_pair(AreYouALuckerStrategy))\n\n # Add RPS Meta Fix strategies\n strategies.extend(\n generate_meta_strategy_pair(RPSMetaFixStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy4))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=True,\n ))\n do_rotations = [True for _ in strategies]\n return strategies, do_rotations", "def generate():\n strategies, do_rotations = [], []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(\n CentrifugalBumblepuppy16h,\n mirroring=False))\n do_rotations.extend([False])\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n do_rotations.extend([True, True])\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n do_rotations.extend([True, True])\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n do_rotations.extend([True, True])\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n strategies.extend(\n generate_meta_strategy_pair(GeobotBeaterStrategy))\n do_rotations = [True for _ in strategies]\n return strategies, do_rotations", "def get_scenarios(experiments):\n return {exp.scenario for exp in experiments}", "def _get_strategies(self) -> Dict[str, str]:\n strategies = [method for method in dir(self) if STRATEGY_IDENTIFIER in method]\n\n if not strategies:\n logger.warning(\n \"There are no strategy provided. \"\n \"Make sure the implemented strategy methods \"\n \"start contain the '%s' term.\" % STRATEGY_IDENTIFIER\n )\n return {str(n_method): method for n_method, method in enumerate(strategies)}" ]
[ "0.6420899", "0.64178324", "0.6409105", "0.6397985", "0.63713485", "0.6353364", "0.6328641", "0.63274527", "0.63161695", "0.6315755", "0.6308805", "0.63070786", "0.6305257", "0.62779176", "0.62447643", "0.62254936", "0.62023383", "0.6199375", "0.6199375", "0.61949295", "0.6163725", "0.61621624", "0.6158668", "0.6147621", "0.6129674", "0.6083227", "0.6019635", "0.58433014", "0.58071464", "0.57826585" ]
0.6516299
0
A hypothesis decorator to return a probabilistic ending spatial tournament.
def prob_end_spatial_tournaments( draw, strategies=axl.short_run_time_strategies, min_size=1, max_size=10, min_prob_end=0, max_prob_end=1, min_noise=0, max_noise=1, min_repetitions=1, max_repetitions=20, ): strategies = draw( strategy_lists( strategies=strategies, min_size=min_size, max_size=max_size ) ) players = [s() for s in strategies] player_indices = list(range(len(players))) all_potential_edges = list(itertools.combinations(player_indices, 2)) all_potential_edges.extend([(i, i) for i in player_indices]) # Loops edges = draw( lists( sampled_from(all_potential_edges), unique=True, ) ) # Ensure all players/nodes are connected: node_indices = sorted(set([node for edge in edges for node in edge])) missing_nodes = [ index for index in player_indices if index not in node_indices ] for index in missing_nodes: opponent = draw(sampled_from(player_indices)) edges.append((index, opponent)) prob_end = draw(floats(min_value=min_prob_end, max_value=max_prob_end)) repetitions = draw( integers(min_value=min_repetitions, max_value=max_repetitions) ) noise = draw(floats(min_value=min_noise, max_value=max_noise)) tournament = axl.Tournament( players, prob_end=prob_end, repetitions=repetitions, noise=noise, edges=edges, ) return tournament
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prob_end_tournaments(\n draw,\n strategies=axl.short_run_time_strategies,\n min_size=1,\n max_size=10,\n min_prob_end=0,\n max_prob_end=1,\n min_noise=0,\n max_noise=1,\n min_repetitions=1,\n max_repetitions=20,\n seed=None,\n):\n strategies = draw(\n strategy_lists(\n strategies=strategies, min_size=min_size, max_size=max_size\n )\n )\n players = [s() for s in strategies]\n prob_end = draw(floats(min_value=min_prob_end, max_value=max_prob_end))\n repetitions = draw(\n integers(min_value=min_repetitions, max_value=max_repetitions)\n )\n noise = draw(floats(min_value=min_noise, max_value=max_noise))\n\n tournament = axl.Tournament(\n players,\n prob_end=prob_end,\n repetitions=repetitions,\n noise=noise,\n seed=seed,\n )\n return tournament", "def define_hypothesis(df, statistic, alternative, paired, alpha):\n paired_text = f\"the {statistic} difference\" if paired else f\"difference in {statistic}\"\n hypothesis = {\n 'two-sided_H0': f\"{paired_text} equal to zero\",\n 'two-sided_H1': f\"{paired_text} not equal to zero\",\n 'greater_H0': f\"{paired_text} greater than or equal to zero\",\n 'greater_H1': f\"{paired_text} less than zero\",\n 'less_H0': f\"{paired_text} less than or equal to zero\",\n 'less_H1': f\"{paired_text} greater than zero\"\n }\n df = HypothesisTester.test_alternative(df, hypothesis,\n alternative, alpha)\n return df", "def tournament(self):\n pass", "def theta():\n pass", "def genfeasiblehypothesis(self):\n hypValid = False\n genAttempts = 0\n hypothesis = []\n while hypValid == False:\n genAttempts += 1.\n # Generate a hypothesis:\n #\n # 1. Deal myself the hand I know I have\n hypothesis = [set() for ix in range(self.nPlayers+1)]\n hypValid = True\n hypothesis[0] = self.myCardSet.copy()\n dealt = self.myCardSet.copy()\n #\n # 2. Loop through all the seen answer constraints dealing cards to \n # known locations\n for constraint in self.logSeenAnswers:\n if not constraint.card in hypothesis[constraint.actor]:\n hypothesis[constraint.actor].add(constraint.card)\n dealt.add(constraint.card)\n #\n # 3. Loop through the unseen answer constraints, \n # satisfying each in turn by random allocation of one\n # of the three cards they reference, in such a way\n # that the other (pass, seen-answer) constraints are \n # observed.\n for constraint in self.logUnseenAnswers:\n constraintset = set(\n [constraint.character,constraint.room,constraint.weapon])\n #\n if not len(constraintset & \n hypothesis[constraint.actor]):\n # This constraint is not already satisfied.\n #\n # Attempt to deal the mentioned player another\n # card: Can they fit it in their hand?\n if (len(hypothesis[constraint.actor]) <\n self.nCardsHeld[constraint.actor]):\n #\n # Check which cards we could allowably give\n # the player to satisfy the constraint\n allowedCards = (constraintset - dealt \n - self.forbiddenSets[constraint.actor])\n if len(allowedCards):\n # Constraint satisfied\n chosenCard = random.sample(allowedCards,1)[0]\n hypothesis[constraint.actor].add(chosenCard)\n dealt.add(chosenCard)\n else:\n # Constraint unsatisfiable due to \n # earlier stochastic allocations -\n # abort hypothesis generation\n hypValid = False\n break\n else:\n # Constraint unsatisfiable due to earlier\n # stochastic allocations - abort hypothesis\n # generation\n hypValid = False\n break\n #\n # 4. Select a murder scenario from what's left or abort\n # if the previous process didn't work out well\n if hypValid:\n try:\n murderer = random.sample((game.ALLOWEDCHARS\n - dealt) - self.forbiddenSets[-1],1)[0]\n dealt.add(murderer)\n ixChar = game.CHARINDEX[murderer]\n murderRoom = random.sample((game.ALLOWEDROOMS\n - dealt) - self.forbiddenSets[-1],1)[0]\n dealt.add(murderRoom)\n ixRoom = game.ROOMINDEX[murderRoom]\n murderWeap = random.sample((game.ALLOWEDWEAPS\n - dealt) - self.forbiddenSets[-1],1)[0]\n dealt.add(murderWeap)\n ixWeap = game.WEAPINDEX[murderWeap]\n #\n hypothesis[-1] = (ixChar,ixRoom,ixWeap)\n except ValueError:\n # If we land here, the unseen-answer constraint\n # satisfaction process left us with no cards\n # from one or more of the three necessary \n # categories - so the hypothesis is void!\n hypValid = False\n continue\n else:\n # Hypothesis invalid = try again from the start\n continue\n #\n # 5. If we get here, we can randomly allocate the \n # remaining undealt cards subject to the forbidden locations - all \n # unseen and seen answer constraints have been satisfied\n shuffledDeck = list(game.ALLOWEDCARDS - dealt)\n random.shuffle(shuffledDeck)\n for card in shuffledDeck:\n # Deal card to first player who is not forbidden from \n # receiving it\n recipient = next((ix for ix in range(self.nPlayers) \n if (not self.forbidden[ix,card]) and \n (len(hypothesis[ix]) < self.nCardsHeld[ix])\n ),None)\n # \n if recipient is None:\n # We ended up with nobody to allocate this card to: the\n # hypothesis is invalidated (by pass constraints) due\n # to earlier random dealings\n hypValid = False\n break\n else:\n hypothesis[recipient].add(card)\n dealt.add(card)\n #\n # 6. Finally, just check the hypothesis isn't invalidated by any \n # incorrect accusations we've seen. This is by rejection sampling\n # because it should very rarely happen!\n for wrongAcc in self.logIncorrectAccusations:\n if ((murderer,murderRoom,murderWeap) == \n (wrongAcc.character,wrongAcc.room,wrongAcc.weapon)):\n hypValid = False\n #\n # Either hypValid = False (and loop will run again), or a valid \n # hypothesis is generated.\n #\n # Debug check: All cards must have been dealt for the hypothesis\n assert dealt == game.ALLOWEDCARDS, \\\n \"'Valid' hypothesis generated without dealing full pack!\"\n #\n return {'hypothesis':hypothesis,'genAttempts':genAttempts}", "def main():\n\n # Hypothesis:\n # The `impact` encapsulates the volatility, stability and overall\n # fluctuation of the market; in particular, movements that would\n # affect one's portfolio, e.g. unexpected (i.e. not predicted)\n # increases or drops in prices.\n # For the StrategyLearner should directly affect the learned\n # policy, particularly, in terms of willingness to take risks by\n # betting on the behavior of the market.\n # This can be translated into three metrics:\n # - Number of entries:\n # These should be reduced as market impact increases which\n # shows the learning agent being more cautious about its bets\n # - Cumulative return:\n # Directly related to the point mentioned above, as market\n # impact increases and the agent's willingness to take risks\n # decreaes, so is the overall performance of the strategy\n # - Training episodes:\n # This applies specifically to the Q-Learning agent, but it\n # is interesting to see how as the market impact increases,\n # the number of complete training episodes (i.e. a complete\n # pass on the trading data) is not affected. One would think\n # that the agent would converge faster when the impact is\n # large as it would quickly realize that the most optimal\n # strategy is to not do anything. However, impact does not\n # affect the rate of convergence, but rather the strategy\n # that the agent converges to\n\n # Set the seed for reproducibility\n random.seed(1481090000)\n\n # Experiment parameters\n symbol = 'JPM'\n # In-sample: January 1, 2008 to December 31 2009\n start_date = dt.datetime(2008, 1, 1)\n end_date = dt.datetime(2009, 12, 31)\n starting_value = 100000\n commission = 0.0\n # Values to use to evaluate the effect of the impact\n impact_values = [0.0, 0.005, 0.01, 0.05, 0.1, 0.25, 0.5, 1.0]\n\n all_entries = []\n all_returns = []\n all_episodes = []\n\n for impact in impact_values:\n log.info(\"Evaluating the effect of impact=%s\", impact)\n strategy_learner = StrategyLearner(verbose=False, impact=impact)\n\n log.info(\"Training StrategyLearner\")\n strategy_learner.addEvidence(\n symbol=symbol,\n sd=start_date,\n ed=end_date,\n sv=starting_value\n )\n\n log.info(\"Querying StrategyLearner to generate trades\")\n trades = strategy_learner.testPolicy(\n symbol=symbol,\n sd=start_date,\n ed=end_date,\n sv=starting_value\n )\n\n log.info(\"Transforming StrategyLearner trades into marketsim orders\")\n orders = _convert_trades_to_marketisim_orders(symbol, trades)\n\n log.info(\"Computing portfolio values for %d orders\", orders.shape[0])\n port_vals = compute_portvals(\n orders,\n start_val=starting_value,\n commission=commission,\n impact=impact\n )\n\n cumulative_return = _compute_cumulative_return(port_vals)\n\n all_entries.append(strategy_learner.metadata['entries'])\n all_returns.append(cumulative_return)\n all_episodes.append(strategy_learner.metadata['training_episodes'])\n\n _plot_and_save_number_of_entries_per_impact_value(impact_values, all_entries)\n _plot_and_save_number_of_episodes_per_impact_value(impact_values, all_episodes)\n _plot_and_save_cumulative_return_per_impact_value(impact_values, all_returns)", "def final_strategy_test():\r\n print('-- Testing final_strategy --')\r\n print('Win rate:', compare_strategies(final_strategy))", "def evaluate(self, ind, **kwargs):\n \n from hypothesis_engine import hypothesis_params\n\n strategy = eval(ind.phenotype)\n\n nan_frac = np.count_nonzero(np.isnan(strategy))/np.size(strategy)\n if nan_frac >=0.4:\n print(\"Fraction of Missing Values in the strategy greater than 40%.\")\n fitness = 0\n else:\n string = 'self.fitness_exp(strategy,hypothesis_params)' \n\n fitness = eval(string)\n\n if fitness == np.inf or fitness == -np.inf:\n print(\"Invalid fitness value.\")\n fitness = 0\n\n# print(\"{:<40}{:^5}{:<20}\".format(ind.phenotype,\" :\\t\", f))\n prefix = str(round(fitness,3)) + \"\\t : \"\n preferredWidth = 70\n wrapper = textwrap.TextWrapper(initial_indent=prefix, width=preferredWidth,\n subsequent_indent=' '*len(prefix))\n message = ind.phenotype\n\n print(wrapper.fill(message))\n return fitness", "def play_game(game,standings_):\n rand_nmr = random.random()\n\n standings_.loc[standings_.TEAMS==game['Home'],'MP'] += 1\n standings_.loc[standings_.TEAMS==game['Away'],'MP'] += 1\n\n if rand_nmr < game['Prob Home']:\n n_goals = goals() # a random number of goals is added to the goal tally, all games and in 1-0,2-0,3-0 or 4-0. This can be improved\n standings_.loc[standings_.TEAMS==game['Home'],'W'] += 1\n standings_.loc[standings_.TEAMS==game['Home'],'F'] += n_goals\n standings_.loc[standings_.TEAMS==game['Away'],'L'] += 1\n standings_.loc[standings_.TEAMS==game['Away'],'A'] += n_goals\n standings_.loc[standings_.TEAMS==game['Home']][\"h2h\"].apply(lambda x:x.append(game['Away']))\n\n return 0\n\n elif rand_nmr < game['Prob Home'] + game['Prob Draw']:\n # all draws end in 0-0 this can be improved\n standings_.loc[standings_.TEAMS==game['Home'],'D'] += 1\n standings_.loc[standings_.TEAMS==game['Away'],'D'] += 1\n\n return 1\n\n else:\n n_goals = goals() # a random number of goals is added to the goal tally, all games and in 1-0,2-0,3-0 or 4-0. This can be improved\n standings_.loc[standings_.TEAMS==game['Away'],'W'] += 1\n standings_.loc[standings_.TEAMS==game['Away'],'F'] += n_goals\n standings_.loc[standings_.TEAMS==game['Home'],'A'] += 1\n standings_.loc[standings_.TEAMS==game['Home'],'L'] += n_goals\n standings_.loc[standings_.TEAMS==game['Away']][\"h2h\"].apply(lambda x:x.append(game['Home']))\n\n return 2", "def close_to_opponent_high_pass(obs, player_x, player_y):\n def environment_fits(obs, player_x, player_y):\n \"\"\" environment fits constraints \"\"\"\n for i in range(1, len(obs[\"right_team\"])):\n distance_to_opponent = get_distance(player_x, player_y, obs[\"right_team\"][i][0], obs[\"right_team\"][i][1])\n if distance_to_opponent < 0.06:\n for j in range(1, len(obs[\"left_team\"])):\n player_to_opponents = get_average_distance_to_opponents(obs, player_x, player_y)\n teammate_to_opponents = get_average_distance_to_opponents(obs, player_x, player_y)\n distance_to_teammate = get_distance(player_x, player_y, obs[\"left_team\"][j][0], obs[\"left_team\"][j][1])\n if distance_to_teammate < 0.6 and distance_to_teammate > 0.4 and player_to_opponents > teammate_to_opponents:\n teammate_distance_to_goal = get_distance(obs[\"left_team\"][j][0], obs[\"left_team\"][j][1], 1, 0)\n player_distance_to_goal = get_distance(player_x, player_y, 1, 0)\n if teammate_distance_to_goal < player_distance_to_goal - 0.2:\n return True\n break\n return False\n \n def get_action(obs, player_x, player_y):\n \"\"\" get action of this memory pattern \"\"\"\n if Action.Sprint in obs[\"sticky_actions\"]:\n return Action.ReleaseSprint\n return Action.HighPass\n\n return {\"environment_fits\": environment_fits, \"get_action\": get_action}", "def test_endgameStrategy(self):\n self.result = \"\"\"\n 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 1 2 3 3 2 1 0 0\n 0 0 1 3 x x x x 1 0 0\n 0 0 2 x x 6 x 5 2 0 0\n 0 0 3 x 4 4 x x 2 0 0\n 0 0 3 x 5 5 x x 2 0 0\n 0 0 2 x x x x 3 1 0 0\n 0 0 1 2 3 3 2 1 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0\n \"\"\"", "def p(self) -> Probability:\n ...", "def h(X, theta, n_hidden_layers=1):\n _, a = feed_forward(X, theta, n_hidden_layers)\n L = n_hidden_layers + 1 # last layer\n\n hypothesis = a[L]\n return hypothesis", "def logistic_hypothesis(theta):\n return lambda X: sigmoid(np.dot(x_strich(X), theta))", "def post_process(\n self,\n i: int,\n maxlen: int,\n maxlenratio: float,\n running_hyps: BatchHypothesis,\n ended_hyps: List[Hypothesis],\n ) -> BatchHypothesis:\n n_batch = running_hyps.yseq.shape[0]\n logging.debug(f\"the number of running hypothes: {n_batch}\")\n if self.token_list is not None:\n logging.debug(\n \"best hypo: \"\n + \"\".join(\n [\n self.token_list[x]\n for x in running_hyps.yseq[0, 1 : running_hyps.length[0]]\n ]\n )\n )\n # add eos in the final loop to avoid that there are no ended hyps\n if i == maxlen - 1:\n logging.info(\"adding <eos> in the last position in the loop\")\n yseq_eos = torch.cat(\n (\n running_hyps.yseq,\n torch.full(\n (n_batch, 1),\n self.eos,\n device=running_hyps.yseq.device,\n dtype=torch.int64,\n ),\n ),\n 1,\n )\n running_hyps.yseq.resize_as_(yseq_eos)\n running_hyps.yseq[:] = yseq_eos\n running_hyps.length[:] = yseq_eos.shape[1]\n\n # add ended hypotheses to a final list, and removed them from current hypotheses\n # (this will be a probmlem, number of hyps < beam)\n is_eos = (\n running_hyps.yseq[torch.arange(n_batch), running_hyps.length - 1]\n == self.eos\n )\n for b in torch.nonzero(is_eos, as_tuple=False).view(-1):\n hyp = self._select(running_hyps, b)\n ended_hyps.append(hyp)\n remained_ids = torch.nonzero(is_eos == 0, as_tuple=False).view(-1).cpu()\n return self._batch_select(running_hyps, remained_ids)", "def spatial_tournaments(\n draw,\n strategies=axl.short_run_time_strategies,\n min_size=1,\n max_size=10,\n min_turns=1,\n max_turns=200,\n min_noise=0,\n max_noise=1,\n min_repetitions=1,\n max_repetitions=20,\n):\n strategies = draw(\n strategy_lists(\n strategies=strategies, min_size=min_size, max_size=max_size\n )\n )\n players = [s() for s in strategies]\n player_indices = list(range(len(players)))\n\n all_potential_edges = list(itertools.combinations(player_indices, 2))\n all_potential_edges.extend([(i, i) for i in player_indices]) # Loops\n edges = draw(\n lists(\n sampled_from(all_potential_edges),\n unique=True,\n )\n )\n\n # Ensure all players/nodes are connected:\n node_indices = sorted(set([node for edge in edges for node in edge]))\n missing_nodes = [\n index for index in player_indices if index not in node_indices\n ]\n for index in missing_nodes:\n opponent = draw(sampled_from(player_indices))\n edges.append((index, opponent))\n\n turns = draw(integers(min_value=min_turns, max_value=max_turns))\n repetitions = draw(\n integers(min_value=min_repetitions, max_value=max_repetitions)\n )\n noise = draw(floats(min_value=min_noise, max_value=max_noise))\n\n tournament = axl.Tournament(\n players, turns=turns, repetitions=repetitions, noise=noise, edges=edges\n )\n return tournament", "def gen_outcome(alpha, delta, beta, win_counters, attempt_counters, h_features_win, h_features_att):\n return logistic(alpha+delta+beta+np.sum(np.log(1+np.array(attempt_counters))*np.array(h_features_att))+\\\n np.sum(np.log(1+np.array(win_counters))*np.array(h_features_win)))", "def _rhythm_obs_proc(pattern):\n # We asign the endpoint of the hypothesis.\n pattern.hypothesis.end.value = pattern.evidence[o.QRS][-1].time.value", "def endgame_winner(self) :\n raise NotImplementedError", "def _tournament(self,probs,n,size):\n participants = np.random.choice(\n self.n_agents,\n size=size,\n replace=False)\n winners = np.argpartition(probs[participants], -n)[-n:]\n return participants[winners]", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n # First obtain the base information to calculate player & opponent\n # feature values\n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n if len(player_legal_moves) != len(opponent_legal_moves):\n return float(len(player_legal_moves) - len(opponent_legal_moves))\n \n # Get_center_coordinates and opponent. Then set the list of participants\n center_coordinates = center_y, center_x = get_center_coordinates(game)\n participants = [player, opponent]\n \n # Then, for each participant obtain his/her feature values \n for participant in participants:\n if participant == player:\n p_legal_moves = player_legal_moves\n player_either = player\n participant_coordinates = p_y, p_x = \\\n game.get_player_location(participant)\n player_legal_move_count, player_start_center_distance, \\\n player_total_next_moves, player_max_path_length, \\\n player_path_count, player_min_center_diff \\\n = \\\n get_player_feature_values(game, center_coordinates, \\\n player_either,participant_coordinates, p_legal_moves)\n else:\n p_legal_moves = opponent_legal_moves\n player_either = opponent\n participant_coordinates = p_y, p_x \\\n = game.get_player_location(participant)\n opponent_legal_move_count, opponent_start_center_distance, \\\n opponent_total_next_moves, opponent_max_path_length, \\\n opponent_path_count, opponent_min_center_diff \\\n = \\\n get_player_feature_values(game, center_coordinates, \\\n player_either, participant_coordinates, p_legal_moves)\n \n # Place each participant's feature values in a tuple/vector surrogate \n pro_player_vector = \\\n (player_legal_move_count, player_start_center_distance, \\\n player_total_next_moves, player_max_path_length, player_path_count, \\\n opponent_min_center_diff)\n pro_opponent_vector = \\\n (opponent_legal_move_count, opponent_start_center_distance, \\\n opponent_total_next_moves, opponent_max_path_length, \\\n opponent_path_count, player_min_center_diff)\n \n # Provide a weighting vector for the features \n weight_vector = (1.5,0.1,1.0,0.001,0.001,0.001)\n \n # Calculate the return value = weighted difference of players' features\n weighted_difference_dot_product = sum(p*(q-r ) for p,q,r \\\n in zip(weight_vector, pro_player_vector, pro_opponent_vector))\n \n return float(weighted_difference_dot_product)", "def win_probability(home_elo: float, away_elo: float) -> Tuple[float, float]:\n home_prob = 1.0 / (1 + math.pow(10, (away_elo - home_elo) / 400))\n return home_prob, 1.0 - home_prob", "def _generate_hourly_seasonality(\n self,\n time_range: np.ndarray,\n hourly_seasonality_params: List[Tuple[float, float]],\n ) -> np.ndarray:\n\n return sum(\n [\n alpha * np.cos(2 * np.pi * (i + 1) * time_range / 24)\n + beta * np.sin(2 * np.pi * (i + 1) * time_range / 24)\n for i, (alpha, beta) in enumerate(hourly_seasonality_params)\n ]\n )", "def league_ttest(df_league_one: pd.DataFrame, df_league_two: pd.DataFrame, parameter: str, alpha: float, ):\n assert isinstance(df_league_one, pd.DataFrame), 'df_league_one needs to be a pandas dataframe.'\n assert isinstance(df_league_two, pd.DataFrame), 'df_league_two needs to be a pandas dataframe.'\n assert isinstance(alpha, float), 'alpha needs to be a float.'\n\n\n df_league_one_mean = df_league_one.mean()\n n = len(df_league_one['club'])\n df = n-1\n t_critical = stats.t.ppf(1-alpha, df)\n leagues_ttest = stats.ttest_1samp(a= df_league_two[f'{parameter}'], popmean= df_league_one_mean)\n t_value = leagues_ttest[0]\n p_value = leagues_ttest[1]\n\n stats_values = {}\n\n stats_values['p_value'] = round(list(p_value)[0], 4)\n\n if stats_values['p_value'] < alpha:\n return ('Enough evidence to reject null hypothesis')\n elif stats_values['p_value'] > alpha:\n return ('Not enough evidence to reject null hypothesis')", "def simulate_weekend():\n\t\tfor person in Simulation.community:\n\t\t\tif Simulation.community[person].has_random:\n\t\t\t\tSimulation.community[person].get_random_attendance()\n\t\t\tSimulation.community[person].eval_strategies()", "def test_get_waivers(league):\n pass", "def scenario_emissions_rule(_m, y, s):\r\n\r\n return m.RHO[y, s] * sum(m.p[g, y, s, t] * m.EMISSIONS_RATE[g] for g in m.G_THERM for t in m.T)", "def goals():\n rand_nmr = random.random()\n if rand_nmr < 0.5:\n return 1\n elif rand_nmr < 0.8:\n return 2\n elif rand_nmr < 0.97:\n return 3\n else:\n return 4", "def birl_agent(start, goal, skills, beta, g=0.99, theta=0, pov= None, vals=-1*np.ones((4604)), Z=None):\n adj_list = adjusted_adj_list_4\n if vals[0] == -1: #Can provide initial values or recompute them, but must provide pov or else is v slow\n vals, Z = value_iteration(adjusted_adj_list_4, skills, beta, g, theta, goal, skill_vals_precomputed=pov)\n else:\n vals = vals[:]\n Z = Z[:]\n s = start\n s_list = [s]\n count = 0\n while s != goal:\n probs = []\n for o in adj_list[s]:\n prob = 0\n if o == goal:\n prob = np.exp(beta*(20)) / Z[s]\n probs.append(prob) \n else:\n prob = np.exp(beta*(-1 + g*vals[o])) / Z[s]\n probs.append(prob)\n for s_index, skill in enumerate(skills):\n prob = np.exp(beta*(pov[0][tuple([s, s_index])] + (g**pov[2][tuple([s, s_index])])*vals[skill[1]])) / Z[s]\n probs.append(prob)\n delta = 1 - np.cumsum(probs)[-1]\n probs[np.where(np.array(probs) == np.amax(np.array(probs)))[0][0]] += delta\n ff = np.random.choice(range(len(probs)), p=probs)\n o = adj_list[s][ff]\n s = o\n s_list.append(o)\n count += 1\n if count > 2000000: #Start and goal are disconnected\n return None \n return s_list", "def metropolis_hastings(posterior_stats):\n\titerations = 5000\n\ttheta = np.array([[-0.05], [0.5]])\n\tproposal_stdev = np.array([[0.1], [0.1]])\n\tln_posterior = calculate_ln_posterior(theta, posterior_stats)\n\taccepts = 0\n\tmcmc_samples = theta \n\n\tfor i in range(iterations):\n\t\ttheta_proposed = generate_candidates(theta, proposal_stdev)\n\t\tln_posterior_proposed = calculate_ln_posterior(theta_proposed, posterior_stats)\n\t\t\n\t\thastings_ratio = calculate_hastings_ratio(ln_posterior_proposed, ln_posterior)\t\n\t\t\n\t\tacceptance_probability = min([1, hastings_ratio])\n\n\t\tif (random.uniform(0,1) < acceptance_probability):\n\t\t\t#Then accept proposed theta\n\t\t\ttheta = theta_proposed\n\t\t\tln_posterior = ln_posterior_proposed\n\t\t\taccepts += 1\n\t\tmcmc_samples = np.hstack((mcmc_samples, theta))\n\n\tmcmc_mean = np.array([ [np.mean(mcmc_samples[0])], [np.mean(mcmc_samples[1])] ])\n\tcovariance = np.cov(mcmc_samples)\n\tmcmc = {'samples': mcmc_samples.transpose(), 'mean': mcmc_mean, 'covar': covariance} \n\tprint('acceptance ratio init')\n\tacceptance_ratio = accepts / iterations\n\tprint(acceptance_ratio)\n\n\treturn mcmc" ]
[ "0.5512576", "0.5420276", "0.51442975", "0.51415604", "0.51293325", "0.5061573", "0.5054858", "0.5026072", "0.49884802", "0.4959208", "0.49386254", "0.49306422", "0.4901441", "0.4869079", "0.48479176", "0.4831503", "0.48310497", "0.4826288", "0.48154637", "0.47833726", "0.47810456", "0.47790715", "0.4762967", "0.4759372", "0.47509363", "0.47270525", "0.47252244", "0.47235167", "0.47135308", "0.47129002" ]
0.5754266
0
Reads the todo file and returns a list of Todos. The "id" of each todo refers to the zeroindexed line number of that todo.
def read_file(filename) -> List[Todo]: with pathlib.Path(filename).expanduser().open('r') as fp: return [Todo(_id, line) for _id, line in enumerate(fp)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_todo_file(self):\n\n todo = []\n in_progress = []\n done = []\n if os.path.exists('TODO.txt'):\n todo_fp = open('TODO.txt', 'r')\n state = 0\n line = todo_fp.readline()\n while line:\n line = line.strip()\n if state == 0:\n if line == '__IN_PROGRESS__':\n state = 1\n elif len(line) > 1:\n todo.append(line)\n elif state == 1:\n if line == '__DONE__':\n state = 2\n elif len(line) > 1:\n in_progress.append(line)\n elif state == 2:\n if len(line) > 1:\n done.append(line)\n line = todo_fp.readline()\n todo_fp.close()\n self.todo_scroll_cell.add_item_list(todo)\n self.in_progress_scroll_cell.add_item_list(in_progress)\n self.done_scroll_cell.add_item_list(done)", "def read_todo(taskname):\n autodel()\n with open(todofile, 'r') as todo:\n for task in todo:\n task = json.loads(task)\n if taskname in task['name']:\n return [task['name'], \n task['deadline'], \n task['priority'],\n task['reminder'],\n task['no_del']]\n return None", "def todo(self, todo_id):\r\n return tdl.Todo(self, todo_id)", "def getTasks():\n\ttasks = open(\"todo.txt\").readlines()\n\tif len(tasks):\n\t for num in range(len(tasks) - 1, -1, -1):\n\t print(\"[%d] %s\" % (num + 1, tasks[num]), end=\"\")\n\telse:\n\t print(\"There are no pending todos!\")", "def todolist(self, todolist_id):\r\n return tdl.Todolist(self, todolist_id)", "def todo_items(self, todo_list_id):\n\n self.endpoint = '{0}/{1}/todo_items.json'.format(self.endpoint, todo_list_id)\n\n request = self.get(self.construct_url())\n\n if request.status_code == 200:\n return json.loads(request.content)\n\n raise BasecampAPIError(json.loads(request.content).get('error'))", "def get_todos(self):\n if self.is_new:\n # if its a new project then create the todo items from the yml\n # templates\n return self.get_yml_todos()\n else:\n # check for existing todos\n return self.get_db_todos()", "def get_completed_tasks_in_tod():\n try:\n tod_file_data = load_data(os.getenv('TOD_FP'))\n except FileNotFoundError:\n return []\n completed_tasks = []\n tod_file_data = tod_file_data.split('\\n')\n\n for line in tod_file_data:\n if line == '' or line[0] != '[' or line[1] != 'X':\n continue\n completed_task = (f\"{line[4:-7]} {line[-6:]}\"\n if line[-6:] != '(0:00)'\n else line[4:-7])\n completed_tasks.append(completed_task)\n\n return completed_tasks", "def get_todo_list():\n\n # assume that a \"h264\" encoded file is complete\n return models.LibraryItem.objects.filter(h264=False)", "def get_all(self):\n pat = re.compile('(<!-- @tk(.*?)-->)',re.M|re.S|re.I)\n for dir in glob.iglob('./**/.book', recursive=True):\n for fname in glob.iglob(os.path.dirname(dir) + '/**/*md', recursive=True):\n with open(fname, 'r') as mkd_file:\n contents = mkd_file.read()\n\n for todo in re.findall(pat,contents):\n t = Todo(todo[1],fname)\n t.context(contents)\n t.github = self.github\n self.todos.append(t)\n\n self.files.append(fname)\n \n return self.todos", "def get_todo_by_user_id(todo_id):\n current_user = fjwte.get_current_user()\n todo = Todo.get_item_by_user_id(todo_id, current_user.id)\n if not todo:\n abort(404, message=\"Todo %s doesn't exist\" % todo_id)\n return todo", "def get(self, todo_id):\n return self.get_todo_by_user_id(todo_id)", "def from_path(path):\n text = read(path)\n todos = parse(text)\n todos.set_source(path)\n return todos", "def loadFromFile():\n\n # if we cant get a file, return empty arrays\n try:\n filePath = os.path.expanduser('~/todoSave')\n fileContents = open(filePath, 'r').read()\n todoList = json.loads(fileContents)[\"todo\"]\n doneList = json.loads(fileContents)[\"done\"]\n\n todoListFiltered = []\n doneListFiltered = []\n\n # we want to filter any non formatted \"[ ] ...\" strings from the list as they are errors\n for item in todoList:\n if \"[ ]\" in item:\n todoListFiltered.append(item)\n\n for item in doneList:\n if \"[ ]\" in item:\n doneListFiltered.append(item)\n\n return todoListFiltered, doneListFiltered\n except:\n return [], []", "def get(self):\n current_user = fjwte.get_current_user()\n return Todo.get_items_by_user_id(current_user.id)", "def get_todos(user_id):\n full_url = base_url + 'get-to-dos?userId=' + user_id + '&key=' + key\n response = requests.get(full_url)\n if response.status_code != 200:\n raise RequestException('Get To Dos failed with status code: {}'.format(response.status_code))\n return json.loads(response.text)", "def get(self):\n todos = {\n 'todos': [\n {\n 'id': 1,\n 'description': 'Create README file'\n },\n {\n 'id': 2,\n 'description': 'Publish to Github'\n }\n ]\n }\n return todos, HTTPStatus.OK", "def todos(self):\r\n return Todos(self)", "def autodel(): #i hate this code so much\n today, tasks = datetime.today(), []\n to_remove_indexes = []\n deleted_tasks = 0\n\n with open(todofile, 'r') as todo:\n tasks = todo.readlines()\n for i, task in enumerate(tasks):\n try:\n task = json.loads(task)\n except json.decoder.JSONDecodeError:\n return False, False\n if task['deadline'] == \"None\": #because i converted to string in adding\n continue\n dline = datetime.strptime(task['deadline'], \"%Y-%m-%d %H:%M:%S\")\n if dline < today and not task['no_del']:\n to_remove_indexes.append(i)\n deleted_tasks += 1\n\n for index in to_remove_indexes[::-1]:\n del tasks[index]\n \n with open(todofile, 'w') as todo:\n for task in tasks:\n todo.write(task)\n \n return deleted_tasks, True", "def list_todo_table(self):\n if self.is_todo_table_empty():\n print(\"nothing to do!\")\n return []\n else:\n return self.make_list_from_task()", "def list_items(todofile, opt, args):\n def filt(item):\n \"\"\"Filter function based on options.\"\"\"\n result = (((item.done and opt.list_complete) or\n (not item.done and not opt.hide_incomplete)) and\n ((item.time is None) or\n ((opt.start_date is None or opt.start_date < item.time) and\n item.time < opt.end_date)))\n for arg in args:\n result = result and (re.search(arg, item.text) != None)\n return result\n\n for item in filter(filt, todofile.fetch_items()):\n list_str = ['']\n if (item.done):\n list_str.append('X')\n elif (item.time is not None and item.time < datetime.datetime.now()):\n list_str.append('!')\n else:\n list_str.append('*')\n if(opt.list_id):\n list_str.append('{0:<3d}'.format(item.itemid))\n if(opt.list_date and item.time is not None):\n list_str.append(item.time.strftime('%c') + ' --')\n list_str.append(item.text)\n print ' '.join(list_str)", "def printall():\n all_tasks = {\n 'Name': [],\n 'Deadline':[],\n 'Priority':[],\n 'Autodelete':[]\n }\n with open(todofile, 'r') as todo:\n try: #list compre for loading dict objs in to list, sorting by deadline\n tasks = sorted([json.loads(task) for task in todo.readlines()], \n key= lambda task: task['deadline'])\n except json.decoder.JSONDecodeError:\n return 1\n if not tasks:\n return None\n for task in tasks:\n all_tasks['Name'].append(task['name'])\n all_tasks['Deadline'].append(task['deadline'])\n all_tasks['Priority'].append(task['priority'])\n all_tasks['Autodelete'].append(\n 'No' if task['no_del'] else 'Yes')\n return all_tasks", "def _add_todo_items(self):\n\n todo_list = ToDoList(day=self.day, user=self.user.user.rolllistuser)\n todo_list.save()\n\n items = [\n 'feed the cats',\n 'drive to work',\n 'read a book',\n 'eat some food',\n ]\n todo_items = []\n for item in items:\n new_item = ToDoItem(\n title=item,\n to_do_list=todo_list,\n priority=1\n )\n new_item.save()\n todo_items.append(new_item)\n return todo_items", "def make_list_from_task(self):\n cur = self.conn.execute(\"\"\"SELECT * FROM todo;\"\"\")\n lst = []\n for row in cur:\n task = {}\n task['id'] = row[0]\n task['desc'] = row[1]\n task['due_date'] = row[2]\n task['due_time'] = row[3]\n lst.append(task)\n return lst", "def search_todo(filtered_files):\n\n global F_COUNTER\n global SEARCHED\n todo = re.compile('\\\\bTODO\\\\b.*')\n fixme = re.compile('\\\\bFIXME\\\\b.*')\n\n for files in filtered_files:\n f = open(os.path.abspath(files), 'r')\n printed = False\n SEARCHED += 1\n for n, row in enumerate(f.readlines()):\n\n found_todo = todo.search(row)\n found_fixme = fixme.search(row)\n if found_todo or found_fixme:\n if not printed:\n print('')\n click.secho(files, fg='blue', bold=True)\n printed = True\n F_COUNTER += 1\n if found_todo:\n pretty_print(str(n+1), found_todo.group())\n else:\n pretty_print(str(n+1), found_fixme.group())\n\n f.close()", "def todolists(self):\r\n return tdl.Todolists(self)", "def todos(\n self, sort_keys=(\"due\", \"priority\"), include_completed=False, sort_key=None\n ):\n if sort_key:\n sort_keys = (sort_key,)\n\n return self.search(\n todo=True, include_completed=include_completed, sort_keys=sort_keys\n )", "def getsameIDList(id, file):\n glineList = []\n newread = []\n \n for line in open(file):\n itemList = line[:-1].split('\\t')\n line_id = getsubString(itemList[0],'|')\n \n if id == line_id:\n glineList.append(line)\n else:\n newread.append(line)\n return glineList", "def read_from_db():\n\t# prepare the query for reading from DB\n\tquery = \"SELECT * FROM tasks\"\n\n\t# connection to database\n\tconnection = pymysql.connect(user=\"root\", password=\"sysadmin\", host=\"localhost\", database=\"todolist\")\n\n\t# get a cursor\n\tcursor = connection.cursor()\n\n\t# execute query\n\tcursor.execute(query)\n\n\t# fetch result from query\n\tresults = cursor.fetchall()\n\n\t# close cursor and connection\n\tcursor.close()\n\tconnection.close()\n\n\ttask_list = list()\n\tfor result in results:\n\t\ttmp = {'id': result[0], 'description': result[1], 'urgent': result[2]}\n\t\ttask_list.append(tmp)\n\n\treturn task_list", "def get_tasks(id):\n url = 'https://jsonplaceholder.typicode.com/'\n tasks = requests.get(url + 'todos', params={'userId': id}).json()\n return tasks" ]
[ "0.7641383", "0.700214", "0.69354475", "0.66047204", "0.6499556", "0.6346317", "0.6282412", "0.6131096", "0.6125421", "0.6125067", "0.60897225", "0.60823214", "0.59966564", "0.5938333", "0.5918086", "0.5897514", "0.5896833", "0.58917236", "0.58791345", "0.5829496", "0.5820293", "0.56555897", "0.55774087", "0.5546443", "0.55182797", "0.5503285", "0.54935694", "0.5490151", "0.54168934", "0.54099935" ]
0.7736592
0
Execute a streaming pull and process alerts through the `callback`. The streaming pull happens in a background thread. A `queue.Queue` is used to communicate between threads and enforce the stopping condition(s).
def stream_alerts( self, user_filter=None, user_callback=None, **user_kwargs ): # callback doesn't accept kwargs. set attribute instead. kwargs = self._add_default_kwargs(**user_kwargs) self.callback_kwargs = { "user_filter": user_filter, "user_callback": user_callback, **kwargs, } # avoid pulling down a large number of alerts that don't get processed flow_control = pubsub_v1.types.FlowControl(max_messages=kwargs['max_backlog']) # Google API has a thread scheduler that can run multiple background threads # and includes a queue, but I (Troy) haven't gotten it working yet. # self.scheduler = ThreadScheduler(ThreadPoolExecutor(max_workers)) # self.scheduler.schedule(self.callback, lighten_alerts=lighten_alerts) # start pulling and processing msgs using the callback, in a background thread self.streaming_pull_future = self.client.subscribe( self.subscription_path, self.callback, flow_control=flow_control, # scheduler=self.scheduler, # await_callbacks_on_shutdown=True, ) try: # Use the queue to count saved messages and # stop when we hit a max_messages or timeout stopping condition. num_saved = 0 while True: try: num_saved += self.queue.get(block=True, timeout=kwargs['timeout']) except Empty: break else: self.queue.task_done() if kwargs['max_results'] & num_saved >= kwargs['max_results']: break self._stop() except (KeyboardInterrupt, Exception): self._stop() raise self._log_and_print(f"Saved {num_saved} messages from {self.subscription_path}") return self.database_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def poll(self):\n msgs = self._read()\n\n if msgs and self.callback:\n for msg in msgs:\n self.callback(msg)", "def run(self) -> None:\n\n # Track the last timestamp we see. When we fetch_stream() again on the\n # next iteration, we'll start from that timestamp onwards to avoid\n # fetching every single page again. The last event or two will be\n # still be in the response, but our de-duping will ignore those.\n last_timestamp = None\n\n # Keep track of what log entries we've consumed so that we suppress\n # duplicates. Duplicates will arise in our stream due to the way we\n # watch for new entries.\n consumed = set() # type: MutableSet\n\n # How many successful vs failed fetch_stream calls. If we consistently see\n # failures but we never see a successful attempt, we should raise an exception\n # and stop.\n success_count = 0\n failure_count = 0\n\n while not self.stopped.wait(0.2):\n try:\n for entry in fetch_stream(self.stream, start_time = last_timestamp):\n if entry[\"eventId\"] not in consumed:\n consumed.add(entry[\"eventId\"])\n\n last_timestamp = entry[\"timestamp\"]\n\n self.consumer(entry)\n except (ClientError, BotocoreConnectionError):\n failure_count += 1\n if failure_count > MAX_FAILURES and not success_count:\n raise\n else:\n success_count += 1", "def _callback_loop(self):\n\n class _Timestamper:\n def __init__(self):\n self._ref_tstamp = None\n\n def timestamp(self, ref_timestamp: Optional[float] = None):\n if self._ref_tstamp is None:\n if ref_timestamp:\n self._ref_tstamp = ref_timestamp\n else:\n self._ref_tstamp = time.time()\n\n ret = self._ref_tstamp\n self._ref_tstamp += GanglionConstants.DELTA_T\n\n return ret\n\n logger = self._logger.getChild('CALLBACK')\n logger.debug('Starting callback thread...')\n timestamper = _Timestamper()\n\n def _handle_sample(sample: Dict):\n logger.debug(f'Handling sample: {sample}')\n # convert to OpenBCISample\n # timestamp comes in milliseconds, convert to seconds\n sample_time = sample.get('timestamp', -1) / 1000.0\n calc_time = timestamper.timestamp(sample_time)\n\n logger.debug(f'Adjusted time for sample: {calc_time}')\n\n channel_data = [_convert_count_to_uVolts(c) for c in\n sample.get('channelDataCounts', [])]\n\n sample = OpenBCISample(\n timestamp=calc_time,\n seq=sample.get('sampleNumber', -1),\n pkt_id=-1,\n channel_data=np.array(channel_data, dtype=np.float64)\n )\n\n with self._callback_lock:\n self._sample_callback(sample)\n\n while not self._shutdown.is_set():\n try:\n _handle_sample(self._sample_q.get(block=True, timeout=0.01))\n self._sample_q.task_done()\n except queue.Empty:\n continue\n\n while not self._sample_q.empty():\n _handle_sample(self._sample_q.get())\n self._sample_q.task_done()\n\n logger.debug('Shut down callback thread.')", "def subscribe(self, callback):\n self.channel.basic_consume(callback, queue=self.queue_name)\n self.channel.start_consuming()", "def run(self):\n while True:\n line = self.stream.readline()\n if not len(line):\n # EOF, stop!\n break\n else:\n # Put the text on the queue, along with the time it was read.\n self.callback_queue.put(line)", "def async_consume(self, callback, auto_ack=False):\n logging.info(\"Async consume\")\n if self.thread is not None:\n return\n self.thread_stop = False\n\n def wrapped_callback(ch, method, properties, body):\n #logging.info(\"Wrapped callback'd\")\n callback(ch, method, properties, body)\n #if not self.thread_stop:\n # callback(ch, method, properties, body)\n #else:\n # print(\"Should stop now!\")\n # callback(ch, method, properties, body)\n # self.channel.basic_cancel(self.tag)\n # exit\n\n self.thread = threading.Thread(target=self.consume, args=(wrapped_callback,),\n kwargs={\"auto_ack\":auto_ack})\n self.thread.start()", "def run(self) -> None:\n # logging.info(\"started pull notifications thread\")\n self.set_reader_position()\n while not self.has_been_stopped.is_set():\n self.prompt_event.wait()\n self.prompt_event.clear()\n\n try:\n for notification in self.reader.read():\n if self.has_been_stopped.is_set():\n break\n domain_event = self.process_application.event_from_notification(\n notification\n )\n self.event_queue.put(\n (domain_event, notification[\"id\"], self.upstream_name)\n )\n except Exception as e:\n logging.error(traceback.format_exc(e))\n logging.error(\"Error reading notification log: %s\" % e)\n logging.error(\"Retrying...\")\n self.set_reader_position()\n sleep(1)", "async def get_realtime_future(self, callback):\n await self.async_realtime_stream(callback)", "async def async_realtime_stream(self, callback=None, single=False):\n url = WS_URL % (self.sense_monitor_id, self.sense_access_token)\n # hello, features, [updates,] data\n async with websockets.connect(url, ssl=self.ssl_context) as ws:\n while True:\n try:\n async with asyncio_timeout(self.wss_timeout):\n message = await ws.recv()\n except asyncio.TimeoutError as ex:\n raise SenseAPITimeoutException(\"API websocket timed out\") from ex\n\n result = orjson.loads(message)\n if result.get(\"type\") == \"realtime_update\":\n data = result[\"payload\"]\n self._set_realtime(data)\n if callback:\n callback(data)\n if single:\n return\n elif result.get(\"type\") == \"error\":\n data = result[\"payload\"]\n if not data[\"authorized\"]:\n raise SenseAuthenticationException(\"Web Socket Unauthorized\")\n raise SenseWebsocketException(data[\"error_reason\"])", "def dispatch_callback(self, callback):\n self.callback_queue.put(lambda: callback.func(*callback.args))", "def callback(self, message):\n kwargs = self.callback_kwargs\n\n if kwargs['return_msg']:\n self._callback_return_full_message(message)\n return\n\n # unpack\n try:\n alert_dict, metadata_dict = self._unpack(message)\n except Exception as e:\n self._log_and_print(f\"Error unpacking message: {e}\", severity=\"DEBUG\")\n message.nack() # nack so message does not leave subscription\n return\n\n # run user filter\n if kwargs[\"user_filter\"] is not None:\n try:\n alert_dict = kwargs[\"user_filter\"](alert_dict, **kwargs)\n except Exception as e:\n self._log_and_print(f\"Error running user_filter: {e}\", severity=\"DEBUG\")\n message.nack()\n return\n\n # run user callback\n if kwargs[\"user_callback\"] is not None:\n # get args for user_callback\n args = [] # requires args are ordered properly here & in user_callback\n if kwargs.get(\"send_alert_bytes\", False):\n args.append(message.data)\n if kwargs.get(\"send_metadata\", False):\n args.append(metadata_dict)\n try:\n # execute user callback\n success = kwargs[\"user_callback\"](alert_dict, *args, **kwargs) # bool\n\n except Exception as e:\n success = False\n msg = f\"Error running user_callback: {e}\"\n else:\n if not success:\n msg = \"user_callback reported it was unsuccessful.\"\n finally:\n if not success:\n self._log_and_print(msg, severity=\"DEBUG\")\n message.nack()\n return\n\n if alert_dict is not None:\n # save so stream_alerts can return it, in case the user wants it (broker)\n self.save_alert(alert_dict)\n\n # communicate with the main thread\n self.queue.put(1) # 1 alert successfully processed\n # block until main thread acknowledges so we don't ack msgs that get lost\n if kwargs['max_results'] is not None:\n self.queue.join() # single background thread => one-in-one-out\n\n else:\n self._log_and_print(\"alert_dict is None\")\n\n message.ack()", "def call_in_thread(self, callback):\n reactor.callInThread(callback)", "def call(self):\n current_thread = threading.current_thread() # get current thread·\n event = self.q.get() # get task from queue\n while event != self.StopEvent: # Determine whether task is a terminator\n\n func, arguments, callback = event # get funcname,params,callback name\n try:\n result = func(*arguments)\n func_excute_status = True # set func executed status success\n except Exception as e:\n func_excute_status = False # set func executed status failure\n result = None\n print('{} executed error:'.format(func.__name__), e)\n\n if func_excute_status: #\n if callback is not None: # determine whetherif callback is None\n try:\n callback(result)\n except Exception as e:\n print(callback.__name__, e)\n\n with self.worker_state(self.free_list, current_thread):\n if self.terminal:\n event = self.StopEvent\n else:\n event = self.q.get()\n\n else:\n self.created_list.remove(current_thread)", "def call_in_thread(self, callback):\n reactor.callFromThread(reactor.callInThread, callback)", "def run(self):\n\n def callback(ch, method, properties, body):\n json_body = json.loads(body)\n self.buffer.append(Fvalue.fromdict(json_body))\n\n sleep(5) # We introduce a slight delay to let the RabbitMQ container to accept connections\n connection = pika.BlockingConnection(pika.ConnectionParameters(host=self.mq_host,port=self.mq_port))\n channel = connection.channel()\n channel.exchange_declare(exchange=self.mq_host + '_exchange', exchange_type='direct')\n result = channel.queue_declare(exclusive=True)\n queue_name = result.method.queue\n channel.queue_bind(exchange=self.mq_host + '_exchange',\n queue=queue_name,\n routing_key=self.routing_key)\n channel.basic_consume(callback,queue=queue_name,no_ack=True)\n channel.start_consuming()", "def run(config, logging, inq, subscribe_callback, unsubscribe_callback):", "def run(self, messageCallback) -> None:\n\n self.__running = True\n self.messageCallback = messageCallback\n Thread(target = self.__acceptConnections).start()", "def callback(indata, frames, time, status):\n if status:\n print(status, flush=True)\n queue.put(indata.copy())", "def process(self):\n\n self.wsRms.connect()\n\n self.scheduler = Scheduler(self)\n self.scheduler.setDaemon(True)\n self.scheduler.start()\n\n while not self.stop:\n json = self.wsEngine.receive()\n if json == None:\n time.sleep(1)\n continue\n print \"------->Receive from lib: %s\" %json\n message = Message().restore(json)\n\n if message.getCmd() == Message.CMD_REGISTER:\n self.waitingQueue.append(message)\n\n elif message.getCmd() == Message.CMD_RELEASE:\n self.wsRms.release(message.getRes())\n self.runningQueue.remove(message)\n\n self.scheduler.stop()", "def run(self) -> None:\n while self.data_incoming or len(self._queue):\n if not self._queue:\n logging.info(\"Consumer %d is sleeping since queue is empty\", self._name)\n time.sleep(0.75)\n print(self._queue.get())\n time.sleep(0.5)", "def listen_for_messages(self, callback):\n # generate get requests for all input queues\n requests = [port.in_queue.get() for port in self.ports]\n while requests:\n # helper variable for the asserts\n queues_with_pending_requests = [req.resource for req in requests]\n # There is a request for each input queue.\n assert set(self.input_queues) == set(queues_with_pending_requests)\n # For each input queue there's exactly one request.\n assert (\n len(queues_with_pending_requests) ==\n len(set(queues_with_pending_requests)))\n\n log.debug(\"{} waiting for next reception\".format(self))\n completed_requests = (yield self.env.any_of(requests))\n received_messages = list(completed_requests.values())\n log.debug(\"{} received {}\".format(\n self, received_messages))\n\n callback(received_messages)\n\n # Only leave the requests which have not been completed yet\n remaining_requests = [\n req for req in requests if req not in completed_requests]\n # Input queues that have been emptied since the last wake up.\n emptied_queues = [req.resource for req in completed_requests]\n # Add new get requests for the input queues that have been emptied.\n new_requests = []\n for input_queue in emptied_queues:\n new_requests.append(input_queue.get())\n requests = remaining_requests + new_requests", "def start_pull_thread(self):\r\n threading.Thread(target=self._pull_thread).start()", "def _pull(self, device_path, stream, progress_callback, adb_info, filesync_info):\n if progress_callback:\n total_bytes = self.stat(device_path)[1]\n\n self._filesync_send(constants.RECV, adb_info, filesync_info, data=device_path)\n for cmd_id, _, data in self._filesync_read_until([constants.DATA], [constants.DONE], adb_info, filesync_info):\n if cmd_id == constants.DONE:\n break\n\n stream.write(data)\n if progress_callback:\n try:\n progress_callback(device_path, len(data), total_bytes)\n except: # noqa pylint: disable=bare-except\n pass", "def run(self):\n self.logger.info(\"starting Dashi consumer\")\n while not self.shutdown:\n self.rpc.listen()", "def run(self):\n if self.init():\n while not self._stop.value:\n try:\n sockets = dict(self.poll.poll(100))\n if (self.sock_reply in sockets and\n sockets[self.sock_reply] == zmq.POLLIN):\n request = self.sock_reply.recv_multipart()\n # do some 'work', update status\n cmd = loads(request[0])\n self.running = 1\n self.coroutine.run(cmd)\n self.running = 0\n self.nb_job_done += 1\n # send reply back to router/queuer\n self.sock_reply.send_multipart(request)\n\n except Exception as e:\n self.log.error('CONSUMER exception {}'.format(e))\n break\n self.sock_reply.close()\n self.finish()\n self.done = True", "def _run(self):\n self.running = True\n\n while self.running:\n try:\n print \"Response monitor running...\"\n\n # Get the message count\n messageCount = self.scHandle.amazonSQSManager.getQueueCount(self.scHandle.amazonSQSManager.responsesQueue)\n\n print '%i messages in queue...' % messageCount\n\n # Read a response\n response = self.scHandle.responseManager.getResponseFromResponsesQueue()\n\n # TODO: Do something with the response\n if response:\n print(response)\n\n except Exception, responseMonitorException:\n print \"Response monitor failed with exception %s.\" % str(responseMonitorException)\n\n finally:\n # Wait for a bit\n sleep(self.PAUSE_TIME)", "def run(self):\n self.logger.info(\"start consuming api calls\")\n while not self.shutdown:\n self.rpc.listen()", "def run(updater: Updater):\n logger = getLogger()\n logger.info(\"Starting polling\")\n updater.start_polling()", "def queue_consumer(self, q):\n\n self.status = 'Running...'\n\n while True:\n try:\n msg = q.get_nowait()\n if msg is None:\n break\n self.update_plot(msg)\n except Queue.Empty:\n time.sleep(0.1)\n\n self.status = 'Done'", "def main(host, port, debug=False, max_iters=None, only_proc=False, bulk=False):\n loop = asyncio.get_event_loop()\n asyncio.run(consumer(host, port, max_iters, only_proc, loop, bulk))" ]
[ "0.61305285", "0.5998114", "0.5995203", "0.59786123", "0.5935737", "0.58937454", "0.5872686", "0.5779626", "0.5744613", "0.57423276", "0.5741257", "0.5733788", "0.5603486", "0.5582575", "0.5575072", "0.5562684", "0.5542272", "0.5539083", "0.55306304", "0.55262667", "0.5523324", "0.5520242", "0.5519369", "0.55073273", "0.54959995", "0.54900193", "0.54864496", "0.54523444", "0.54429775", "0.5435014" ]
0.6254242
0
Shutdown the streaming pull in the background thread gracefully.
def _stop(self): self.streaming_pull_future.cancel() # Trigger the shutdown. self.streaming_pull_future.result() # Block until the shutdown is complete.
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shutdown(self):\n self.socket_thread.stop()", "def shutdown(self):\n if not self.stop_event.is_set():\n self.stop_event.set()\n\n if self.pusher_thread:\n self.pusher_thread.join()", "def stop(self):\n self.stream.stop()\n self.running = False", "def stop(self):\n\t\tself.stream.stop_stream()", "def shutdown(self) -> None:\n while not self.message_queue.empty():\n self.message_queue.get()\n run_channels = list(self.channels.keys())\n logger.debug(f\"Closing channels {run_channels}\")\n for channel_id in run_channels:\n self.channels[channel_id].close()\n del self.channels[channel_id]\n self.is_shutdown = True\n\n logger.debug(f\"Cancelling status ping task\")\n try:\n if self._status_task is not None and not self._status_task.cancelled():\n self._status_task.cancel()\n\n async def await_status_task():\n await self._status_task\n self._live_run = None\n\n self.get_live_run().loop_wrap.execute_coro(await_status_task())\n except asyncio.CancelledError:\n pass", "async def shutdown(self):\n if self._unsub_stop:\n self._unsub_stop()\n self._unsub_stop = None\n await self.device.shutdown()", "def shutdown(self):\r\n self.socket.close()\r\n # self.socket_video.close()\r\n self.socket_state.close()", "def stop(self):\n logging.info(\"Shutting down thread...\")\n self.disconnect()\n self.running = False", "def stop(self) -> None:\n self._stream.stop()", "def stop(self):\n self.running = False\n with self.lock:\n self.websockets.clear()\n self.poller.release()", "def stop_stream(self):\n pass", "def shutdown(self):\n self._shutdown(None, None)\n self._running = False", "def stop(self):\n self._transport = None\n self._cleanup()\n self._disconnected_callback = None", "async def shutdown(self):", "def shutdown(self):\n if self.tcp_server is not None:\n self.tcp_server.close()\n # asyncio.Server doesn't automatically close existing\n # sockets, so we manually close them all now\n for stream_writer in self._tcp_clients.values():\n stream_writer.close()\n if self.ws_server is not None:\n self.ws_server.close()\n self._running = False", "def stop(self) -> None:\n self._stream.stop()", "def shutDown(self):\n self.host = None\n self.port = None\n if(self.loop is not None):\n test = asyncio.run_coroutine_threadsafe(self.stopLoop(), self.loop)\n self.thread.join()\n if(self.loop.is_running()):\n self.loop.stop()\n else:\n self.loop.close()\n self.pool.shutDown()\n self.pool = None\n self.loop = None\n self.thread = None", "async def shutdown(self) -> int:", "def shutdown(self):\n ts.client.transport.close()", "def shutdown(self):\n\n reactor.callLater(0, reactor.stop)", "async def shutdown(self):\n\n if self.log_output:\n logging.info('Shutting down ...')\n else:\n print('Shutting down ...')\n\n await self.send_reset()\n\n try:\n self.loop.stop()\n except:\n pass\n try:\n self.loop.close()\n except:\n pass\n sys.exit(0)", "def shutdown(self) -> None:", "def shutdown(self) -> None:", "def shutdown(self):\n self.sock.close()", "def on_close(self):\n self.subscrib.unsubscribe(self.channel)\n self.thread.stop()", "def shutdown(self):\n if self.alive:\n libplasma.disconnect(self.conn)\n self.alive = False", "def shutdown(self):\n self.channel.close()\n self.conn.close()", "def close(self):\n self.stream.stop_stream()\n self.stream.close()\n self.p.terminate()", "def stop(self):\n # indicate that the thread should be stopped\n self.stopped = True\n # wait until stream resources are released (producer thread might be still grabbing frame)\n self.thread.join()", "def shutdown(self, loop):\n\n tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]\n\n for task in tasks:\n task.cancel()\n\n asyncio.gather(*tasks)\n loop.stop()" ]
[ "0.73357934", "0.7293376", "0.71127105", "0.7077128", "0.7065803", "0.6975906", "0.6962216", "0.69553834", "0.6936233", "0.69217414", "0.6897765", "0.6868899", "0.6749683", "0.67419726", "0.6732005", "0.67256993", "0.6673746", "0.6632645", "0.66272914", "0.6618546", "0.6591536", "0.6576887", "0.6576887", "0.6576524", "0.6571436", "0.65523756", "0.65438193", "0.6527498", "0.65176344", "0.6498967" ]
0.79213
0
Process a single alert; run user filter; save alert; acknowledge Pub/Sub msg. Used as the callback for the streaming pull.
def callback(self, message): kwargs = self.callback_kwargs if kwargs['return_msg']: self._callback_return_full_message(message) return # unpack try: alert_dict, metadata_dict = self._unpack(message) except Exception as e: self._log_and_print(f"Error unpacking message: {e}", severity="DEBUG") message.nack() # nack so message does not leave subscription return # run user filter if kwargs["user_filter"] is not None: try: alert_dict = kwargs["user_filter"](alert_dict, **kwargs) except Exception as e: self._log_and_print(f"Error running user_filter: {e}", severity="DEBUG") message.nack() return # run user callback if kwargs["user_callback"] is not None: # get args for user_callback args = [] # requires args are ordered properly here & in user_callback if kwargs.get("send_alert_bytes", False): args.append(message.data) if kwargs.get("send_metadata", False): args.append(metadata_dict) try: # execute user callback success = kwargs["user_callback"](alert_dict, *args, **kwargs) # bool except Exception as e: success = False msg = f"Error running user_callback: {e}" else: if not success: msg = "user_callback reported it was unsuccessful." finally: if not success: self._log_and_print(msg, severity="DEBUG") message.nack() return if alert_dict is not None: # save so stream_alerts can return it, in case the user wants it (broker) self.save_alert(alert_dict) # communicate with the main thread self.queue.put(1) # 1 alert successfully processed # block until main thread acknowledges so we don't ack msgs that get lost if kwargs['max_results'] is not None: self.queue.join() # single background thread => one-in-one-out else: self._log_and_print("alert_dict is None") message.ack()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stream_alerts(\n self, user_filter=None, user_callback=None, **user_kwargs\n ):\n # callback doesn't accept kwargs. set attribute instead.\n kwargs = self._add_default_kwargs(**user_kwargs)\n self.callback_kwargs = {\n \"user_filter\": user_filter,\n \"user_callback\": user_callback,\n **kwargs,\n }\n\n # avoid pulling down a large number of alerts that don't get processed\n flow_control = pubsub_v1.types.FlowControl(max_messages=kwargs['max_backlog'])\n\n # Google API has a thread scheduler that can run multiple background threads\n # and includes a queue, but I (Troy) haven't gotten it working yet.\n # self.scheduler = ThreadScheduler(ThreadPoolExecutor(max_workers))\n # self.scheduler.schedule(self.callback, lighten_alerts=lighten_alerts)\n\n # start pulling and processing msgs using the callback, in a background thread\n self.streaming_pull_future = self.client.subscribe(\n self.subscription_path,\n self.callback,\n flow_control=flow_control,\n # scheduler=self.scheduler,\n # await_callbacks_on_shutdown=True,\n )\n\n try:\n # Use the queue to count saved messages and\n # stop when we hit a max_messages or timeout stopping condition.\n num_saved = 0\n while True:\n try:\n num_saved += self.queue.get(block=True, timeout=kwargs['timeout'])\n except Empty:\n break\n else:\n self.queue.task_done()\n if kwargs['max_results'] & num_saved >= kwargs['max_results']:\n break\n self._stop()\n\n except (KeyboardInterrupt, Exception):\n self._stop()\n raise\n\n self._log_and_print(f\"Saved {num_saved} messages from {self.subscription_path}\")\n\n return self.database_list", "async def alert(self, entry):\n\n if self.outputs.get('log.enabled'):\n rssalertbot.alerts.alert_log(self, self.outputs.get('log'), entry)\n\n if self.outputs.get('email.enabled'):\n rssalertbot.alerts.alert_email(self, self.outputs.get('email'), entry)\n\n if self.outputs.get('slack.enabled'):\n await rssalertbot.alerts.alert_slack(self, self.outputs.get('slack'), entry)", "def process_alert(alert: Mapping, topic: str):\n candid = alert[\"candid\"]\n object_id = alert[\"objectId\"]\n\n # get worker running current task\n worker = dask.distributed.get_worker()\n alert_worker = worker.plugins[\"worker-init\"].alert_worker\n\n log(f\"{topic} {object_id} {candid} {worker.address}\")\n\n # return if this alert packet has already been processed and ingested into collection_alerts:\n if (\n alert_worker.mongo.db[alert_worker.collection_alerts].count_documents(\n {\"candid\": candid}, limit=1\n )\n == 1\n ):\n return\n\n # candid not in db, ingest decoded avro packet into db\n with timer(f\"Mongification of {object_id} {candid}\", alert_worker.verbose > 1):\n alert, prv_candidates = alert_worker.alert_mongify(alert)\n\n # ML models:\n with timer(f\"MLing of {object_id} {candid}\", alert_worker.verbose > 1):\n scores = alert_worker.alert_filter__ml(alert)\n alert[\"classifications\"] = scores\n\n with timer(f\"Ingesting {object_id} {candid}\", alert_worker.verbose > 1):\n alert_worker.mongo.insert_one(\n collection=alert_worker.collection_alerts, document=alert\n )\n\n # prv_candidates: pop nulls - save space\n prv_candidates = [\n {kk: vv for kk, vv in prv_candidate.items() if vv is not None}\n for prv_candidate in prv_candidates\n ]\n\n # cross-match with external catalogs if objectId not in collection_alerts_aux:\n if (\n alert_worker.mongo.db[alert_worker.collection_alerts_aux].count_documents(\n {\"_id\": object_id}, limit=1\n )\n == 0\n ):\n with timer(\n f\"Cross-match of {object_id} {candid}\", alert_worker.verbose > 1\n ):\n xmatches = alert_worker.alert_filter__xmatch(alert)\n # CLU cross-match:\n with timer(\n f\"CLU cross-match {object_id} {candid}\", alert_worker.verbose > 1\n ):\n xmatches = {\n **xmatches,\n **alert_worker.alert_filter__xmatch_clu(alert),\n }\n\n # Crossmatch new alert with most recent ZTF_alerts and insert\n with timer(\n f\"ZTF Cross-match of {object_id} {candid}\", alert_worker.verbose > 1\n ):\n xmatches = {\n **xmatches,\n **alert_worker.alert_filter__xmatch_ztf_alerts(alert),\n }\n\n alert_aux = {\n \"_id\": object_id,\n \"cross_matches\": xmatches,\n \"prv_candidates\": prv_candidates,\n }\n\n with timer(f\"Aux ingesting {object_id} {candid}\", alert_worker.verbose > 1):\n alert_worker.mongo.insert_one(\n collection=alert_worker.collection_alerts_aux, document=alert_aux\n )\n\n else:\n with timer(\n f\"Aux updating of {object_id} {candid}\", alert_worker.verbose > 1\n ):\n alert_worker.mongo.db[alert_worker.collection_alerts_aux].update_one(\n {\"_id\": object_id},\n {\"$addToSet\": {\"prv_candidates\": {\"$each\": prv_candidates}}},\n upsert=True,\n )\n\n # Crossmatch exisiting alert with most recent record in ZTF_alerts and update aux\n with timer(\n f\"ZTF Cross-match of {object_id} {candid}\", alert_worker.verbose > 1\n ):\n xmatches_ztf = alert_worker.alert_filter__xmatch_ztf_alerts(alert)\n\n with timer(\n f\"Aux updating of {object_id} {candid}\", alert_worker.verbose > 1\n ):\n alert_worker.mongo.db[alert_worker.collection_alerts_aux].update_one(\n {\"_id\": object_id},\n {\"$set\": {\"ZTF_alerts\": xmatches_ztf}},\n upsert=True,\n )\n\n if config[\"misc\"][\"broker\"]:\n # execute user-defined alert filters\n with timer(f\"Filtering of {object_id} {candid}\", alert_worker.verbose > 1):\n passed_filters = alert_worker.alert_filter__user_defined(\n alert_worker.filter_templates, alert\n )\n if alert_worker.verbose > 1:\n log(\n f\"{object_id} {candid} number of filters passed: {len(passed_filters)}\"\n )\n\n # post to SkyPortal\n alert_worker.alert_sentinel_skyportal(alert, prv_candidates, passed_filters)\n\n # clean up after thyself\n del alert, prv_candidates", "def handler(*_):\n config = _load_config()\n\n # Initialize the SQS client and recieve messages\n stream_alert_sqs = StreamAlertSQSClient(config)\n stream_alert_sqs.get_messages()\n\n if not stream_alert_sqs.received_messages:\n LOGGER.info('No messages recieved, exiting')\n return\n\n unique_buckets = stream_alert_sqs.unique_buckets_from_messages()\n if not unique_buckets:\n LOGGER.error('No s3 buckets to refresh, exiting')\n return\n\n # Initialize the Athena client and run queries\n stream_alert_athena = StreamAlertAthenaClient(config)\n\n # Check that the `streamalert` database exists before running queries\n if not stream_alert_athena.check_database_exists():\n raise AthenaPartitionRefreshError('The `streamalert` database does not exist')\n\n if not stream_alert_athena.repair_hive_table(unique_buckets):\n raise AthenaPartitionRefreshError('Partiton refresh has failed')\n\n stream_alert_sqs.delete_messages()", "def process_alerts(self, classifier, payload, data):\n classifier.classify_record(payload, data)\n if payload.valid:\n alerts = StreamRules.process(payload)\n if alerts:\n self.alerts.extend(alerts)\n else:\n logger.error('Invalid data: %s\\n%s',\n payload,\n json.dumps(payload.raw_record, indent=4))", "def pubsub_consume(event, context):\n pubsub_message = base64.b64decode(event['data']).decode('utf-8')\n event_data = json.loads(pubsub_message)\n\n message = event_data['event']\n channel = message['channel']\n\n if message.get('bot_id') is None:\n text = message.get('text')\n\n if \"help\" in text:\n slack_text = \"\\n\\n *How to use the Tableau Slackbot* :robot_face: : \\n\" \\\n \"\\n 1. `list @tableau_server_app`: list views available to output to Slack\" \\\n \"\\n\\n 2. `gimmie @tableau_server_app What If Forecast`: generate the report\"\n response = client.chat_postMessage(\n channel=channel,\n text=slack_text)\n return response\n\n if \"list\" in text:\n slack_text = list('view')\n response = client.chat_postMessage(\n channel=channel,\n text=slack_text)\n return response\n\n if \"gimmie\" in text:\n\n filepath = time.strftime(\"%Y%m%d-%H%M%S\")\n view = event_data['event']['blocks'][0]['elements'][0]['elements'][2]['text']\n view_list = list('view')\n if view.strip() in view_list:\n generate_report(view, filepath)\n\n # Upload view from /tmp to Slack\n response = client.files_upload(\n channels=channel,\n file=\"/tmp/view_{0}.png\".format(filepath),\n title=\"View\"\n )\n\n # Delete the view generated locally\n if os.path.exists(\"/tmp/view_{0}.png\".format(filepath)):\n os.remove(\"/tmp/view_{0}.png\".format(filepath))\n\n else:\n slack_text = \":shrug: See the available views with: `list @tableau_server_app`\"\n response = client.chat_postMessage(\n channel=channel,\n text=slack_text)\n\n return response", "def on_message(self, msg) -> None:\n\n decoded_msg = json.loads(msg)\n message_type = decoded_msg[\"type\"]\n\n if message_type == MSG_SUBCRIPTIONS:\n\n product_ids = decoded_msg[\"channels\"]\n logging.debug(\"Subscriptions: {}\".format(product_ids))\n\n elif message_type == MSG_SNAPSHOT:\n\n product_id = decoded_msg[\"product_id\"]\n self._snapshot(decoded_msg)\n\n # Old best bid and ask doesn't exist yet, this will always set a new bbo\n self.set_if_new_bbo(product_id)\n\n elif message_type == MSG_L2UPDATE:\n\n product_id = decoded_msg[\"product_id\"]\n self.update(decoded_msg)\n\n self.set_if_new_bbo(product_id)\n\n self.event_count += 1", "def _process_redis_message(self, msg, msg_id):\n msg_result = msg['result']\n processed = False\n if msg_id == 'redis-pubsub-init':\n processed = True # Nothing to do really.\n if not processed:\n if self._on_update:\n self._io_loop.add_callback(self._on_update, msg_result)\n else:\n self._logger.warn('Ignoring message (no on_update_callback): %s',\n msg_result)", "def alert(self):\n\n # Get board logger\n board_logger = self.get_board_logger()\n\n # Create new Event object to handle event communication\n event = Event(datetime.now(), self.get_input_status())\n \n event.alert(self.__ip, board_logger)\n\n if (self.get_input_status() == 1):\n \n board_logger.info(\"Alarm state active; starting check alert \" \n + \"cycle for 6 cycles.\")\n \n self.check_alert(event)", "def process(self):\n received_message = PublishMessage(*self.message.value)\n allow, msg = customize.authorize_publication(received_message.topic, self.connection)\n answer = None\n if allow:\n publication_id = create_global_id()\n self.broadcast_messages, response = customize.get_publish_messages(received_message, publication_id, self.connection.id)\n if received_message.options.get(\"acknowledge\"):\n if response is None:\n answer = PublishedMessage(\n request_id=received_message.request_id,\n publication_id=publication_id\n )\n else:\n answer = response\n else:\n answer = ErrorMessage(\n request_id=received_message.request_id,\n request_code=received_message.code,\n uri=\"tornwamp.publish.unauthorized\"\n )\n answer.error(msg)\n self.answer_message = answer", "def process(self):\n received_message = SubscribeMessage(*self.message.value)\n allow, msg = customize.authorize_subscription(received_message.topic, self.connection)\n if allow:\n subscription_id = tornwamp_topic.topics.add_subscriber(\n received_message.topic,\n self.connection,\n )\n answer = SubscribedMessage(\n request_id=received_message.request_id,\n subscription_id=subscription_id\n )\n self.broadcast_messages = customize.get_subscribe_broadcast_messages(received_message, subscription_id, self.connection.id)\n else:\n answer = ErrorMessage(\n request_id=received_message.request_id,\n request_code=received_message.code,\n uri=\"tornwamp.subscribe.unauthorized\"\n )\n answer.error(msg)\n self.answer_message = answer", "def consume_user_message(self, message):\n pass", "def __call__ (self, event, payload):\n\n logging.info ('\\n\\nReceived Event: '+ str(event) + '\\nPayload: ' + str(payload))\n\n try:\n\n if event == 'AlertHandler:StartDebug':\n logging.getLogger().setLevel(logging.DEBUG)\n logging.info ('Logging level changed to DEBUG Mode')\n\n elif event == 'AlertHandler:EndDebug':\n logging.getLogger().setLevel(logging.INFO)\n logging.info ('Logging level changed to INFO Mode')\n \n elif event in self.args['AlertEvent'].keys():\n handler = retrieveHandler(self.args['AlertEvent'][event],'AlertHandler')\n handler(payload)\n\n except Exception, ex: \n \n logging.error('Exception Caught while handling the event: ' + str(event) + ' payload: ' + str(payload) ) \n logging.error(str(ex))\n\n return", "def lambda_handler(event, content):\n imap = email_startup()\n status, messages = imap.select('Inbox')\n days_old = input('Enter many days ago do you want to use as the cutoff?: ')\n new_date = get_days_old(days_old)\n messages = apply_filter(imap, new_date)\n initial_unread = get_unread_count(imap)\n print(f'Initial unread emails: {initial_unread}')\n print(f'Emails to be filter: {len(messages)}')\n a_pause = input('Continue by pressing enter.')\n\n print(f'Processing {len(messages)} unread emails from before {new_date}')\n print(\"=\"*100)\n process_messages(imap, messages)\n print(\"=\"*100)\n\n # Determine results from script\n post_unread = get_unread_count(imap)\n print(f'Processed Emails: {initial_unread - post_unread}')\n print(f'After processing, there are {post_unread} unread emails.')\n\n # close the connection and logout\n imap.close()\n imap.logout()", "def application_message(self, bus, msg):\n\t\tmsgtype = msg.structure.get_name()\n\t\tif msgtype == 'partial_result':\n\t\t\tself.partial_result(msg.structure['hyp'], msg.structure['uttid'])\n\t\telif msgtype == 'result':\n\t\t\tself.final_result(msg.structure['hyp'], msg.structure['uttid'])\n\t\t\tself.pipeline.set_state(gst.STATE_PAUSED)", "def alert(self, alert_str):\n # Make sure alerts have the same type\n alert_str = str(alert_str)\n self._output_object.add_alert(\n html_tag(plain_to_html(alert_str), alert_str, self.proc)\n )\n self.alerts.append((alert_str, self.proc))", "def on_message(client, userdata, msg):\n if msg.topic == \"adjudication/pass\":\n print(\"'adjudication/pass' message received!\")\n print(\"Money dispensed!\")\n sys.exit()\n\n elif msg.topic == \"adjudication/fail_face\":\n print(\"'adjudication/fail_face' message received!\")\n verification = raw_input(\"Facial recognition failed. Please enter your password to authenticate your identity: \")\n client.publish(\"local/response\", payload=verification, qos=2, retain=False)\n print(\"Test...response sent\")\n\n elif msg.topic == \"adjudication/terminate_face\": # changed from fail info\n print(\"'adjudication/terminate_face' message received!\")\n print(\"Facial recognition failed. We are unable to dispense money.\")\n sys.exit()\n\n elif msg.topic == \"adjudication/terminate_info\": # changed from fail info\n print(\"'adjudication/terminate_info' message received!\")\n print(\"Facial recognition was marginal and identity verification failed. We are unable to dispense money.\")\n sys.exit()\n\n elif msg.topic == \"adjudication/usernotfound\":\n print(\"'adjudication/usernotfound' message received!\")\n print(\"User is not in database. We are unable to dispense money.\")\n sys.exit() \n\n else:\n print(\"Message with unspecificied topic received from local client: \", msg.topic)\n print(\"###### No action taken on message ######\")", "def _subscribe_update_callback(self, client, userdata, message):\n logger.info('Message recieved from {} topic'.format(message.topic))\n payload = message.payload\n try:\n payload_dict = json.loads(payload)\n light_data = payload_dict['current']['state']['desired']\n if self.light.needs_updating(light_data):\n self.light.update_lights(light_data)\n reported_payload = {\n 'state': {\n 'reported': self.light.current_settings()\n }\n }\n JSON_payload = json.dumps(reported_payload)\n self.shadowClient.publish(update_topic, JSON_payload, 0)\n except ValueError:\n logger.error('Value error')\n logger.info(payload)\n except Exception as e:\n logger.error(e.message)", "def send_alerts(self, env, payload):\n if self.alerts:\n if env['lambda_alias'] == 'development':\n logger.info('%s alerts triggered', len(self.alerts))\n logger.info('\\n%s\\n', json.dumps(self.alerts, indent=4))\n else:\n StreamSink(self.alerts, env).sink()\n elif payload.valid:\n logger.debug('Valid data, no alerts')", "def check_alert(self, event):\n \n # Get board logger\n board_logger = self.get_board_logger()\n\n # Loop for an hour and continue to alert every ten minutes \n current_time = datetime.now()\n end_time = current_time + timedelta(0, 60)\n # end_time = current_time + timedelta(hours=1)\n\n alarm_counter = 0\n while current_time < end_time:\n # Sleep for 10 minutes\n sleep(10);\n #sleep(600);\n\n # Prevent race condition between Board input_status and check_alert \n if GPIO.input(self.__pin) == 1:\n\n # Log alarm cycle\n alarm_counter += 1\n board_logger.info(\"Alarm Cycle #%s: Initiating event \" \n + \"alert.\", str(alarm_counter))\n\n # Call Event object's alert method\n event.alert(self.__ip, board_logger)\n\n # Get current time\n current_time = datetime.now()\n \n else:\n # Input status is 0 indicating recovery; Break out of loop and \n # return to main thread \n board_logger.info(\"Alarm state recovery.\") \n break\n \n # End of alert cycle; Return to main thread\n status = \"ALARM\" if self.get_input_status() else \"RECOVERY\"\n board_logger.info(\"End check alarm cycle. Current pin input \"\n + \"status is %s.\", status)", "def remediate_phish(alerts):\n # make sure we can load all of the alerts\n for alert in alerts:\n if not alert.load():\n raise RuntimeError(\"unable to load alert {}\".format(str(alert)))\n\n # hard coded type\n # XXX would like to map types to remediation functions to call in aggregate\n if alert.alert_type != 'brotex - smtp - v2' and alert.alert_type != 'mailbox':\n raise RuntimeError(\"alert {} is not a support alert type of phishing remediation\".format(str(alert)))\n\n emails = [] # list of dicts returned by _create_remediation_email\n brotex_alert_count = 0 # keep track of how many brotex alerts we're remediating\n\n #\n # Office365 EWS Proxy Remediation\n #\n\n from saq.modules.email import EmailAnalysis, KEY_MESSAGE_ID, KEY_ENV_RCPT_TO, KEY_TO\n targets = [] # of tuple(message_id, recipient)\n results = {} # key = alert.uuid, value = str\n\n for alert in alerts:\n email_file = None\n for o in alert.observables:\n if o.type == F_FILE and (o.has_directive(DIRECTIVE_ORIGINAL_EMAIL) or o.value.endswith('email.rfc822')):\n email_file = o\n break\n\n if email_file is None:\n logging.warning(\"expected a single file observable in the alert for email remediation, \"\n \"but got {}\".format(len(email_file)))\n results[alert.uuid] = 'unexpected F_FILE type observables in main alert'\n continue\n\n # then get the EmailAnalysis for this email\n analysis = email_file.get_analysis(EmailAnalysis)\n if not analysis:\n loggging.warning(\"cannot get EmailAnalysis for {} in {}\".format(email_file, alert))\n results[alert.uuid] = 'cannot find email analysis'\n continue\n\n message_id = None\n env_rcpt_to = None\n mail_to = None\n recipient = None\n\n if KEY_MESSAGE_ID in analysis.email:\n message_id = analysis.email[KEY_MESSAGE_ID]\n\n if KEY_ENV_RCPT_TO in analysis.email:\n env_rcpt_to = analysis.email[KEY_ENV_RCPT_TO]\n # if we didn't find it there then look in the main alert\n # XXX I really don't how all this information is all over the place\n elif 'envelope rcpt to' in alert.details:\n env_rcpt_to = alert.details['envelope rcpt to']\n if isinstance(env_rcpt_to, str):\n env_rcpt_to = [env_rcpt_to]\n #logging.debug(\"MARKER: yes I needed this\")\n\n #logging.debug(\"MARKER: {}\".format(env_rcpt_to))\n\n if KEY_TO in analysis.email:\n mail_to = analysis.email[KEY_TO]\n\n if not message_id:\n logging.error(\"cannot find Message-ID for {} in {}\".format(email_file, alert))\n results[alert.uuid] = 'cannot find Message-ID'\n continue\n\n if env_rcpt_to:\n recipient = env_rcpt_to[0] # there should only be one\n logging.debug(\"using env_rcpt_to value {} as recipient for {} in {}\".format(recipient, email_file, alert))\n elif mail_to:\n recipient = mail_to[0] # XXX I need to look at all of them and pull out the one that matches a domain we own\n logging.debug(\"using mail_to value {} as recipient for {} in {}\".format(recipient, email_file, alert))\n\n if not recipient:\n logging.error(\"cannot determine recipient for {} in {}\".format(email_file, alert))\n results[alert.uuid] = 'cannot determine recipient'\n continue\n\n targets.append((message_id, recipient))\n\n result = _remediate_email_o365_EWS(targets)\n success_count = 0\n messages = [] # of str\n for message_id, recipient, result_code, result_text in result:\n if result_code == 200:\n success_count += 1\n\n # on 1/9/2017 we changed the format of the output\n # the result_text is now a JSON array [ {\"address\": EMAIL_ADDRESS, \"code\": CODE, \"message\": MESSAGE }, ... ]\n decoded_result_text = json.loads(result_text)\n for entry in decoded_result_text:\n messages.append('message-id {} to {} error code {} message {}'.format(\n message_id, entry['address'], entry['code'], entry['message']))\n else:\n messages.append('message-id {} to {} error code {} message {}'.format(message_id, recipient, result_code, result_text))\n\n messages.insert(0, 'remediated {} out of {} emails from office365'.format(success_count, len(alerts)))\n return messages", "def subscribe(receiver, catchup):", "def send_alert(alert_object):\n # Placeholder -- alert creation date UTC\n # Eventually this will come from the alert\n\n if alert_object.sent:\n raise RuntimeError(f'Refusing to send alert '\n f'{alert_object.alert[\"objectId\"]},'\n f' alert has already been sent out.')\n\n\n ac = alert_object.created_at\n alert_date = f'{ac.year}{ac.month:02d}{ac.day:02d}'\n alert = alert_object.to_dict()\n\n imtype = alert['candidate']['alert_type']\n if imtype == 'single':\n schema = combine_schemas(\n [\"schema_single/candidate.avsc\", \"schema_single/light_curve.avsc\",\n \"schema_single/alert.avsc\"])\n topicname = \"ztf_%s_programid2_zuds\" %alert_date\n send(topicname, [alert], schema)\n elif imtype == 'stack':\n schema = combine_schemas(\n [\"schema_stack/candidate.avsc\", \"schema_stack/light_curve.avsc\",\n \"schema_stack/alert.avsc\"])\n topicname = \"ztf_%s_programid2_zuds_stack\" %alert_date\n send(topicname, [alert], schema)", "def alert(bot, update, args, job_queue):\n continue_on = 1\n chat_id = update.message.chat_id\n message_id = update.message.message_id\n user = str(update.message.from_user)\n if not args:\n update.message.reply_text('please enter a time')\n return\n if '|' in args:\n message = ' '.join(args)\n argstemp = message.split('|')\n due = alerts.lastDitchAttempt(argstemp[0])\n if due > 0:\n argstemp.pop(0)\n message = ' '.join(argstemp)\n continue_on = -1\n if continue_on == 1:\n due = alerts.parseADate(args[0])\n if due <= 0:\n due = alerts.regexmatch(args[0])\n args.pop(0)\n message = ' '.join(args)\n if due <= 0:\n update.message.reply_text('Sorry that is not a valid time')\n return\n\n # Add job to queue\n my_context = '' + str(chat_id) + ':' + str(message_id)\n job = Job(alarm, due, repeat=False, context=my_context)\n USERS[my_context] = user\n MESSAGES[my_context] = message\n TIMERS[my_context] = job\n job_queue.run_once(alarm, due, context=my_context)\n current_time = datetime.now()\n due = int((current_time - datetime(1970, 1, 1)).total_seconds() + due)\n fileIO.writeAlertJob(\"alerts\", str(chat_id),\n str(message_id), user, due, message)\n set_for = alerts.timeSetFor(due)\n bot.sendMessage(update.message.chat_id, 'Timer successfully set for: ' + str(set_for) +\n '\\nYour ID is:' + str(message_id))", "def consume_delivery_report(self, event):\n pass", "def handle_message(event):\n intention = parse_intention(event.message.text)\n if intention == config.QUERY_INTENTION:\n handle_query_weather_message(event)\n elif intention == config.SUBSCRIBE_INTENTION:\n handle_subscribe_message(event)\n else:\n handle_unknown_message(event)", "def scan_for_message(self):\n\n while True:\n msg = self.consumer.poll(0.1)\n if msg is None:\n continue\n elif not msg.error():\n message = json.loads(msg.value().decode('utf8'))\n print('Received message: {0}'.format(message))\n if message['risk_level'] >= 4:\n user = User(message['user_id'].replace(' ', '.'))\n user.handle()\n elif msg.error().code() == KafkaError._PARTITION_EOF:\n print('End of partition reached {0}/{1}'\n .format(msg.topic(), msg.partition()))\n else:\n print('Error occured: {0}'.format(msg.error().str()))", "def fusion_api_update_alert(self, body, uri, api=None, headers=None):\n return self.alert.update(body, uri, api, headers)", "def on_message(self, userdata, message):\n logging.debug(f\"Message arrived from {message.topic}\")\n self.process(userdata, message)", "def process_sink_msg(self):\n logging.debug('Received message on the sink socket')\n \n msg = self.sink_socket.recv_json()\n \n logging.debug('Message: %s', msg)\n\n # Publish the results to the clients using the\n # request id of the service request as the topic\n self.result_pub_socket.send_unicode(msg['uuid'], zmq.SNDMORE)\n self.result_pub_socket.send_json(msg)" ]
[ "0.6618315", "0.5814011", "0.5618752", "0.5581755", "0.53606147", "0.52934784", "0.52241296", "0.5218121", "0.5183818", "0.5162959", "0.5151981", "0.5137493", "0.5113881", "0.5081569", "0.50672615", "0.5028629", "0.50246", "0.5020632", "0.50062454", "0.5003933", "0.50038975", "0.49729294", "0.49576017", "0.4952359", "0.49462947", "0.49146858", "0.49032512", "0.4897065", "0.4893926", "0.4892661" ]
0.6836282
0
Save the alert to a database.
def save_alert(self, alert): self.database_list.append(alert) # fake database for demo
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self):\n self.db.commit()", "def save(self):\n self.__db.commit()", "def save_db(self) -> None:", "def persist_alert(self, alert_type):\n session = db_session()\n\n alert = models.Alert()\n alert.alert_type = alert_type\n alert.entity_id = self.config.entity_id\n\n # TODO change camera_id to device_id\n alert.camera_id = self.config.id\n alert.source_id = self.config.__class__.__name__\n\n session.add(alert)\n session.commit()\n\n return alert.id", "def save_database(app):\n app.database().save()\n app.status.message('Finished saving..')", "def save(self, db):\n pass", "def save_db(self) -> None:\n self.connection.commit()", "def persist_alert(connection, alert, deployed_on):\n start_time = time.perf_counter()\n cursor = connection.cursor()\n vals = (\n ALERT_NOT_SENT,\n alert.event_time,\n alert.created_by.type,\n alert.created_by.guid,\n deployed_on,\n alert.location.longitude,\n alert.location.latitude,\n alert.face_detection_model.name,\n alert.face_detection_model.guid,\n alert.face_detection_model.threshold,\n alert.mask_classifier_model.name,\n alert.mask_classifier_model.guid,\n alert.mask_classifier_model.threshold,\n alert.probability,\n alert.image.format,\n alert.image.size.width,\n alert.image.size.height,\n alert.image.data,\n )\n cursor.execute(\n \"\"\"INSERT INTO \n alert (sent, created_at, device_type, device_id, device_deployed_on, longitude, latitude, face_model_name, face_model_guid, face_model_threshold, mask_model_name, mask_model_guid, mask_model_threshold, probability, image_format, image_width, image_height, image_data) \n VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) \"\"\",\n vals,\n )\n connection.commit()\n print(f\"Alert persisted on local storage (transaction took {(time.perf_counter() - start_time) * 1000} ms)\\n\")", "def save(self):\n db.session.commit()", "def save(self):\n logging.debug(\"sychronizing db\")\n self._db.sync()", "def save(self):\n self.session.commit()", "def save():", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n\n self.__session.commit()", "def save(self):\n\n self.__session.commit()", "def Save(self) -> None:\n self.__conn.commit()", "def save():\n pass", "def save_event(self, data):\n rdb.table(self.rdb_table).insert(data)", "def save_in_db(self):\n self.sql_database.table_name = self.table_db\n self.sql_database.db_name = self.db\n if self.sql_database.insert_item(text_path=self.path, word_first=self.word_1.get(),\n word_second=self.word_2.get(),\n word_third=self.word_3.get(), word_fourth=self.word_4.get(),\n word_fifth=self.word_5.get()):\n msg.showinfo(message=\"Done\")", "def save(self):\n if not connection.connected:\n raise Exception('Not connected to the database.')\n if not self._retrieved:\n self.insert()\n self._retrieved = True\n else:\n self.update()", "def save(self):\n # TODO (Pierre): code", "def Save(self):\n\n self._persistentHandler.Save()", "def save(self)->None:\n database.cursor.execute(\"INSERT INTO meetups(topic,happening_date,tags,location,images,body) VALUES(%s,%s,%s,%s,%s,%s) RETURNING id\", (\n self.topic,\n self.happening_on,\n self.tags,\n self.location,\n self.images,\n self.body\n ))\n super().save()" ]
[ "0.70432657", "0.70321536", "0.6991143", "0.6970373", "0.6906388", "0.6876018", "0.68100935", "0.674678", "0.67295766", "0.67234564", "0.66731614", "0.661583", "0.660977", "0.660977", "0.660977", "0.660977", "0.660977", "0.660977", "0.660977", "0.660977", "0.6566645", "0.6566645", "0.65451103", "0.64967144", "0.64102894", "0.6367097", "0.63501215", "0.6345938", "0.6300189", "0.6295267" ]
0.84834534
0
Try to create the subscription.
def _create_subscription(self): try: self.client.create_subscription( name=self.subscription_path, topic=self.topic_path ) except NotFound: # suitable topic does not exist in the Pitt-Google project raise ValueError( ( f"A subscription named {self.subscription_name} does not exist" "in the Google Cloud Platform project " f"{settings.GOOGLE_CLOUD_PROJECT}, " "and one cannot be automatically create because Pitt-Google " "does not publish a public topic with the same name." ) ) else: self._log_and_print(f"Created subscription: {self.subscription_path}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_create(self):\n subscription = self.client().subscription(\n self.properties[self.QUEUE_NAME],\n subscriber=self.properties[self.SUBSCRIBER],\n ttl=self.properties[self.TTL],\n options=self.properties[self.OPTIONS]\n )\n self.resource_id_set(subscription.id)", "def test_create_subscription(self):\n pass", "def create_subscription_if_not_exists(self):\n create_subscription_if_not_exists(self.project_id, self.topic_name, self.subscription_name)", "def post_create_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def test_create_subscription_template(self):\n pass", "def touch_subscription(self):\n try:\n # check if subscription exists\n sub = self.client.get_subscription(subscription=self.subscription_path)\n\n except NotFound:\n self._create_subscription()\n\n else:\n self.topic_path = sub.topic\n print(f\"Subscription exists: {self.subscription_path}\")\n print(f\"Connected to topic: {self.topic_path}\")", "def test_issue_add_subscription(self):\n pass", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def create_subscription(self,\n body):\n\n return super().new_api_call_builder.request(\n RequestBuilder().server('default')\n .path('/v2/subscriptions')\n .http_method(HttpMethodEnum.POST)\n .header_param(Parameter()\n .key('Content-Type')\n .value('application/json'))\n .body_param(Parameter()\n .value(body))\n .header_param(Parameter()\n .key('accept')\n .value('application/json'))\n .body_serializer(APIHelper.json_serialize)\n .auth(Single('global'))\n ).response(\n ResponseHandler()\n .deserializer(APIHelper.json_deserialize)\n .is_api_response(True)\n .convertor(ApiResponse.create)\n ).execute()", "def create_subscription(connection, project_id, body, fields=None, error_msg=None):\n return connection.post(\n url=f'{connection.base_url}/api/subscriptions',\n params={'fields': fields},\n headers={'X-MSTR-ProjectID': project_id},\n json=body,\n )", "def test_aws_service_api_validate_subscription_post(self):\n pass", "def test_create_subscription(self):\n try:\n self.arb.create_subscription(\n trial_occurrences=4,\n interval_length=1,\n interval_unit=arb.MONTHS_INTERVAL,\n start_date=u\"2008-09-09\",\n amount=39.99,\n card_number=u\"4222222222222\",\n expiration_date=u\"2009-10\",\n bill_first_name=u\"Michael\",\n bill_last_name=u\"Pool\"\n )\n except KeyError:\n pass\n self.arb.create_subscription(\n trial_amount=5.00,\n trial_occurrences=4,\n interval_length=1,\n interval_unit=arb.MONTHS_INTERVAL,\n start_date=u\"2008-09-09\",\n amount=39.99,\n card_number=u\"4222222222222\",\n expiration_date=u\"2009-10\",\n bill_first_name=u\"Michael\",\n bill_last_name=u\"Pool\"\n )\n self.arb.create_subscription(\n trial_amount=5.00,\n trial_occurrences=4,\n interval_length=1,\n interval_unit=arb.MONTHS_INTERVAL,\n start_date=u\"2008-09-09\",\n amount=39.99,\n card_number=u\"4222222222222\",\n expiration_date=u\"2009-10\",\n ship_first_name=u\"valentino\",\n first_name=u\"valentino\",\n bill_first_name=u\"valentino\",\n bill_last_name=u\"Pool\",\n driver_number=u\"55555\",\n driver_state=u\"CA\",\n driver_birth=u\"1990-09-09\"\n )", "def test_get_subscription(self):\n pass", "def create_subscription(self, user, standard):\r\n\r\n subscription = self.create(\r\n user=user,\r\n standard=standard,\r\n )\r\n\r\n return subscription", "def create_pubsub_subscription(client, project, topic, name):\n topic_name = pubsub.topic_name(project, topic)\n full_name = pubsub.subscription_name(project, name)\n if client.get_subscription(full_name):\n return\n\n client.create_subscription(full_name, topic_name)", "def test_successful_subscriptions_exists_subbed(self) -> None:\n self.assertNotEqual(len(self.streams), 0) # necessary for full test coverage\n self.helper_subscriptions_exists(self.streams[0], True, True)", "def test_issue_subscriptions(self):\n pass", "def create_subscription_in_snuba(query_subscription_id, **kwargs):\n try:\n subscription = QuerySubscription.objects.get(id=query_subscription_id)\n except QuerySubscription.DoesNotExist:\n metrics.incr(\"snuba.subscriptions.create.subscription_does_not_exist\")\n return\n if subscription.status != QuerySubscription.Status.CREATING.value:\n metrics.incr(\"snuba.subscriptions.create.incorrect_status\")\n return\n if subscription.subscription_id is not None:\n metrics.incr(\"snuba.subscriptions.create.already_created_in_snuba\")\n # This mostly shouldn't happen, but it's possible that a subscription can get\n # into this state. Just attempt to delete the existing subscription and then\n # create a new one.\n try:\n _delete_from_snuba(\n QueryDatasets(subscription.snuba_query.dataset), subscription.subscription_id\n )\n except SnubaError:\n logger.exception(\"Failed to delete subscription\")\n\n subscription_id = _create_in_snuba(subscription)\n subscription.update(\n status=QuerySubscription.Status.ACTIVE.value, subscription_id=subscription_id\n )", "def test_existing_subscriptions_autosubscription(self) -> None:\n stream_name = \"new_public_stream\"\n cordelia = self.example_user(\"cordelia\")\n self.common_subscribe_to_streams(cordelia, [stream_name], invite_only=False)\n result = self.client_post(\n \"/json/subscriptions/exists\", {\"stream\": stream_name, \"autosubscribe\": \"false\"}\n )\n response_dict = self.assert_json_success(result)\n self.assertIn(\"subscribed\", response_dict)\n self.assertFalse(response_dict[\"subscribed\"])\n\n result = self.client_post(\n \"/json/subscriptions/exists\", {\"stream\": stream_name, \"autosubscribe\": \"true\"}\n )\n response_dict = self.assert_json_success(result)\n self.assertIn(\"subscribed\", response_dict)\n self.assertTrue(response_dict)", "def create(self, validated_data):\n subscription = super().create(validated_data)\n subscription.send_verification_email()\n return subscription", "def create_subscription(chid, use_time=False, use_ctrl=False,\n mask=None, callback=None):\n mask = mask or DEFAULT_SUBSCRIPTION_MASK\n\n ftype = promote_type(chid, use_ctrl=use_ctrl, use_time=use_time)\n\n uarg = ctypes.py_object(callback)\n evid = ctypes.c_void_p()\n poll()\n ret = libca.ca_create_subscription(ftype, 0, chid, mask,\n _CB_EVENT, uarg, ctypes.byref(evid))\n PySEVCHK('create_subscription', ret)\n\n poll()\n return (_CB_EVENT, uarg, evid)", "def do_create_subscription(csp: CloudProviderInterface, environment_id=None):\n environment = Environments.get(environment_id)\n payload = build_subscription_payload(environment)\n try:\n csp.create_subscription(payload)\n except GeneralCSPException as e:\n app.logger.warning(\n \"Unable to create subscription for environment %s.\", environment.id,\n )\n raise e", "def post(self):\n data = request.json\n return new_subscription(data=data)", "def add_subscription(self):\n schema = schemas.load(schemas.Subscription, self.request)\n subscription = self.customer.add_subscription(**schema)\n self.request.db.flush()\n self.request.response.status_int = 201\n return {'abonnement': subscription}", "def subscribePost() -> object:\n log = logging.getLogger(__name__)\n db = Db()\n\n body = request.get_json()\n\n if body is None:\n return jsonify({\"error\": \"json body is required\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n if not('datasetId') in body:\n return jsonify({\"error\": \"datasetId is a required attribute\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n if not('notificationUrl') in body:\n return jsonify({\"error\": \"notificationUrl is a required attribute\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n\n subscription = db.Subscriptions(\n datasetId=body['datasetId'],\n notificationUrl=body['notificationUrl']\n )\n\n subscription.save()\n\n subscription = json.loads(subscription.to_json())\n subscription['id'] = subscription['_id'][\"$oid\"]\n subscription.pop(\"_id\")\n log.debug(\"subscription created\")\n\n return jsonify(subscription), HTTPStatus.CREATED", "def create_subscription(post, user, sub_type=None, update=False):\n subs = Subscription.objects.filter(post=post.root, user=user)\n sub = subs.first()\n\n default = Subscription.TYPE_MAP.get(user.profile.message_prefs,\n Subscription.LOCAL_MESSAGE)\n\n empty = sub_type is None\n # Get the current sub type from what's given or the existing sub\n sub_type = None if empty else sub_type\n # No type has been given so default\n sub_type = sub_type or default\n\n # Ensure the sub type is not set to something wrote\n if sub and update:\n # Update an existing subscription\n sub.type = sub_type\n sub.save()\n else:\n # Drop all existing subscriptions for the user by default.\n subs.delete()\n Subscription.objects.create(post=post.root, user=user, type=sub_type)\n\n # Recompute subscription count\n subs_count = Subscription.objects.filter(post=post.root).exclude(type=Subscription.NO_MESSAGES).count()\n\n # Update root subscription counts.\n Post.objects.filter(pk=post.root.pk).update(subs_count=subs_count)", "async def create_subscription(self, installed_app_id: str, data: dict) -> dict:\r\n return await self.post(\r\n API_SUBSCRIPTIONS.format(installed_app_id=installed_app_id), data\r\n )", "def test_subscribe(self):\n self.service.clientConnected()\n self.service.subscribe(u'url', None)\n pubsubClient = self.service.pubsubClient\n self.assertIn(u'url', pubsubClient.subscriptions)", "def test_subscriptions_does_not_exist(self) -> None:\n random_streams = self.make_random_stream_names(self.streams)\n self.assertNotEqual(len(random_streams), 0) # necessary for full test coverage\n self.helper_subscriptions_exists(random_streams[0], False, False)", "def pre_create_subscription(\n self, request: pubsub.Subscription, metadata: Sequence[Tuple[str, str]]\n ) -> Tuple[pubsub.Subscription, Sequence[Tuple[str, str]]]:\n return request, metadata" ]
[ "0.79522216", "0.79236335", "0.766891", "0.7175027", "0.7131221", "0.7044523", "0.696727", "0.67897534", "0.6780519", "0.6774319", "0.6662607", "0.6634457", "0.6628242", "0.6611634", "0.6575175", "0.6550009", "0.65446806", "0.6515999", "0.64871514", "0.6451727", "0.64171314", "0.63862836", "0.6333727", "0.62571573", "0.62058526", "0.6185877", "0.6185086", "0.61781245", "0.61360794", "0.61350137" ]
0.8017699
0
Delete the subscription. This is provided for the user's convenience, but it is not necessary and is not automatically called. Storage of unacknowledged Pub/Sub messages does not result in fees. Unused subscriptions automatically expire; default is 31 days.
def delete_subscription(self): try: self.client.delete_subscription(subscription=self.subscription_path) except NotFound: pass else: self._log_and_print(f'Deleted subscription: {self.subscription_path}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_subscription(self, subscription_id):\n url = '{}/v2/subscriptions/{}'.format(self.url, subscription_id)\n print(url)\n r = requests.delete(url, headers=self.headers_v2)\n if r.status_code == 204:\n return 'success'\n return r.json()", "def delete(self, orgname, subscription_id):\n permission = AdministerOrganizationPermission(orgname)\n if permission.can():\n try:\n organization = model.organization.get_organization(orgname)\n except InvalidOrganizationException:\n return (\"Organization not valid\", 400)\n\n model.organization_skus.remove_subscription_from_org(organization.id, subscription_id)\n return (\"Deleted\", 204)\n abort(401)", "async def delete_subscription(self, installed_app_id: str, subscription_id: str):\r\n return await self.delete(\r\n API_SUBSCRIPTION.format(\r\n installed_app_id=installed_app_id, subscription_id=subscription_id\r\n )\r\n )", "def _DeleteCalendarSubscription(self,\n id='python.gcal.test%40gmail.com'):\n print 'Deleting the calendar subscription with ID: %s' % id\n calendar_url = (\n 'http://www.google.com/calendar/feeds/default/allcalendars/full/%s' % id)\n calendar_entry = self.cal_client.GetCalendarEntry(calendar_url)\n self.cal_client.Delete(calendar_entry.GetEditLink().href)", "def __call__(\n self,\n request: pubsub.DeleteSubscriptionRequest,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Optional[float] = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ):\n\n http_options: List[Dict[str, str]] = [\n {\n \"method\": \"delete\",\n \"uri\": \"/v1/{subscription=projects/*/subscriptions/*}\",\n },\n ]\n request, metadata = self._interceptor.pre_delete_subscription(\n request, metadata\n )\n pb_request = pubsub.DeleteSubscriptionRequest.pb(request)\n transcoded_request = path_template.transcode(http_options, pb_request)\n\n uri = transcoded_request[\"uri\"]\n method = transcoded_request[\"method\"]\n\n # Jsonify the query params\n query_params = json.loads(\n json_format.MessageToJson(\n transcoded_request[\"query_params\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n )\n query_params.update(self._get_unset_required_fields(query_params))\n\n query_params[\"$alt\"] = \"json;enum-encoding=int\"\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = getattr(self._session, method)(\n \"{host}{uri}\".format(host=self._host, uri=uri),\n timeout=timeout,\n headers=headers,\n params=rest_helpers.flatten_query_params(query_params, strict=True),\n )\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)", "def remove_subscription(\n connection, subscription_id, project_id, error_msg=None, exception_type=None\n):\n response = connection.delete(\n url=connection.base_url + '/api/subscriptions/' + subscription_id,\n headers={'X-MSTR-ProjectID': project_id},\n )\n if not response.ok:\n if error_msg is None:\n error_msg = f\"Error unsubscribing Subscription {subscription_id}\"\n if exception_type is None:\n response_handler(response, error_msg)\n else:\n exception_handler(error_msg, exception_type)\n return response", "def delete_subscription(self, subscription_name: str, topic_name: str) -> None:\n if subscription_name is None:\n raise TypeError(\"Subscription name cannot be None.\")\n if topic_name is None:\n raise TypeError(\"Topic name cannot be None.\")\n\n with self.get_conn() as service_mgmt_conn:\n self.log.info(\"Deleting Subscription %s\", subscription_name)\n service_mgmt_conn.delete_subscription(topic_name, subscription_name)", "def delete_subscription_from_snuba(query_subscription_id, **kwargs):\n try:\n subscription = QuerySubscription.objects.get(id=query_subscription_id)\n except QuerySubscription.DoesNotExist:\n metrics.incr(\"snuba.subscriptions.delete.subscription_does_not_exist\")\n return\n\n if subscription.status not in [\n QuerySubscription.Status.DELETING.value,\n QuerySubscription.Status.DISABLED.value,\n ]:\n metrics.incr(\"snuba.subscriptions.delete.incorrect_status\")\n return\n\n if subscription.subscription_id is not None:\n _delete_from_snuba(\n QueryDatasets(subscription.snuba_query.dataset), subscription.subscription_id\n )\n\n if subscription.status == QuerySubscription.Status.DELETING.value:\n subscription.delete()\n else:\n subscription.update(subscription_id=None)", "def test_delete_subscription(self):\n pass", "def unsubscribe(self, subscription):\n request = Request(\n method='delete',\n endpoint='/streams/subcription/{}'.format(subscription)\n )\n\n def response_handler(resp):\n code = resp.status_code\n if resp.is_success:\n return 'OK'\n elif code == 403:\n raise ex.StreamPermissionError(resp, request)\n raise ex.StreamConnectionError(resp, request)\n\n return self._execute(request, response_handler)", "async def unsubscribe(self):\n LOGGER.info('Subscription removed')\n await self._ros.send(self._unsubscribe_msg)", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "async def unsubscribe(self, topic: str, subscription_id: int = None) -> None:\n ...", "def desubscribe(self, subscription):\n try:\n if isinstance(subscription, Subscription):\n sub = DeSubscribe(subscription, self.__pool, self.myAddress)\n self.send(self.__pool, sub)\n except Exception:\n handle_actor_system_fail()", "def test_issue_delete_subscription(self):\n pass", "def perform_destroy(self, instance):\n instance.subscription_set.filter(owner=self.request.user).delete()", "def remove(self, subscription_id):\n payload = {\n \"subscriptionId\": subscription_id,\n }\n qry = ServiceOperationQuery(self, \"Remove\", payload, None, None, None)\n self.context.add_query(qry)\n return self", "def retrieveDeleteSubscription():\n if GlobalValues._recoSubscription == None:\n GlobalValues._deleteSubscription = \\\n _getSubscription(Workflow(spec = \"FileDelete\", \n owner = \"CMSTier0\",\n name = \"FileDelete\"),\n Fileset(name = \"Deletable\")\n )\n \n return GlobalValues._deleteSubscription", "def delete_subscription_action(self,\n subscription_id,\n action_id):\n\n return super().new_api_call_builder.request(\n RequestBuilder().server('default')\n .path('/v2/subscriptions/{subscription_id}/actions/{action_id}')\n .http_method(HttpMethodEnum.DELETE)\n .template_param(Parameter()\n .key('subscription_id')\n .value(subscription_id)\n .should_encode(True))\n .template_param(Parameter()\n .key('action_id')\n .value(action_id)\n .should_encode(True))\n .header_param(Parameter()\n .key('accept')\n .value('application/json'))\n .auth(Single('global'))\n ).response(\n ResponseHandler()\n .deserializer(APIHelper.json_deserialize)\n .is_api_response(True)\n .convertor(ApiResponse.create)\n ).execute()", "def delete_spot_datafeed_subscription(self):\r\n return self.get_status('DeleteSpotDatafeedSubscription',\r\n None, verb='POST')", "def delete_user_subscription(\n subscription_id: str,\n user_id: str,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = DeleteUserSubscription.create(\n subscription_id=subscription_id,\n user_id=user_id,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def remove_subscription(self, url):\n deleted_resource_id = self.client_URI_endpoints.pop(url)\n # TODO del subscription resource from EventDestinationCollection resrc\n # TODO implement remove resource method for CollectionResources\n self.write_subscriptions_to_tmp(self.client_URI_endpoints)", "def delete_individual_subscriptions_for_grupal_subscription(sender, instance, **kwargs):\n if instance.group is not None: # Only for group subscription creation\n users = User.objects.filter(groups__name=instance.group)\n subs = Subscription.objects.filter(user__in=users)\n for sub in subs:\n if sub.alarm == instance.alarm:\n print('%s deleted' % sub)\n sub.delete()", "def stop_subscription(event):\n _LOGGER.info(\"Shutting down subscriptions\")\n hass.data[vera.VERA_CONTROLLER].stop()", "def test_delete_subscription_template(self):\n pass", "def _async_untrack_subscription(self, subscription: Subscription) -> None:\n topic = subscription.topic\n try:\n if _is_simple_match(topic):\n simple_subscriptions = self._simple_subscriptions\n simple_subscriptions[topic].remove(subscription)\n if not simple_subscriptions[topic]:\n del simple_subscriptions[topic]\n else:\n self._wildcard_subscriptions.remove(subscription)\n except (KeyError, ValueError) as ex:\n raise HomeAssistantError(\"Can't remove subscription twice\") from ex", "def RemoveSubscription(self, observedUser):\n\n self.__Delete(\"/subscriptions/\"+observedUser)", "def unsubscribe(self, subscription):\r\n params = {'ContentType' : 'JSON',\r\n 'SubscriptionArn' : subscription}\r\n response = self.make_request('Unsubscribe', params, '/', 'GET')\r\n body = response.read()\r\n if response.status == 200:\r\n return json.loads(body)\r\n else:\r\n boto.log.error('%s %s' % (response.status, response.reason))\r\n boto.log.error('%s' % body)\r\n raise self.ResponseError(response.status, response.reason, body)", "def pre_delete_subscription(\n self,\n request: pubsub.DeleteSubscriptionRequest,\n metadata: Sequence[Tuple[str, str]],\n ) -> Tuple[pubsub.DeleteSubscriptionRequest, Sequence[Tuple[str, str]]]:\n return request, metadata", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass" ]
[ "0.68542045", "0.66849315", "0.6553755", "0.6462307", "0.6337273", "0.6276553", "0.62238497", "0.60828495", "0.6030626", "0.6027652", "0.599798", "0.5763129", "0.5689989", "0.55739135", "0.55646366", "0.5527481", "0.55102307", "0.5483406", "0.5458787", "0.54338235", "0.54266065", "0.5400072", "0.53899753", "0.53555006", "0.5333981", "0.5263658", "0.5253453", "0.52087575", "0.5202528", "0.51888114" ]
0.7547199
0
Should return a list of title cased names, each name appears only once
def dedup_and_title_case_names(names): return list(set(name.title() for name in names))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dedup_and_title_case_names(names):\n names1 =[]\n for n in names:\n if n.title() not in names1:\n names1.append(n.title())\n return names1\n # return [n.title() for n in names if n.title() not in names1]\n pass", "def dedup_and_title_case_names(names):\n my_names = set(names)\n return [name.title() for name in my_names]", "def dedup_and_title_case_names(names):\n\tdictionary = list(dict.fromkeys(NAMES))\n\treturn [name.title() for name in dictionary]", "def dedup_and_title_case_names(names):\n\n # remove duplicates from [names] with set() and cast to list([del_dupes])\n del_dupes = list(set(names))\n\n # iterate through del_dupes and .title() case items, assign to [names] \n names = [_.title() for _ in del_dupes]\n\n # output: ['Sandra Bullock', 'Alec Baldwin', 'Julbob Pybites', 'Matt Damon', 'Al Pacino', 'Keanu Reeves', 'Bob Belderbos',...]\n return sorted(names)", "def title_case(sentence):\n ignore = ['the', 'of', 'a']\n titled = [x[0].upper()+x[1:].lower() for x in sentence.split()]\n caps = []\n caps.append(titled[0])\n for tit in titled[1:]:\n print(tit)\n if tit.lower() in ignore:\n caps.append(tit.lower())\n else:\n caps.append(tit)\n titled = ' '.join(caps)\n return titled", "def proper_title_case(s):\n nocaps = [\"the\"] # This needs to be extended.", "def check_title(title_list):\n for w_index in range(len(title_list)):\n title_list[w_index] = title_list[w_index].replace('_', ' ')\n return [word for word in title_list if word.istitle()]", "def _get_case_names(self):\n case_names = []\n for e in self.html.xpath(\"//path/to/an/element/p\"):\n s = html.tostring(e, method=\"text\", encoding=\"unicode\")\n case_names.append(titlecase(s))\n return case_names", "def titleize(title):\n titleized = []\n for idx, word in enumerate(title.split()):\n if idx == 0 or word not in ['a', 'of', 'in', 'the', 'v']:\n word = word.capitalize()\n\n titleized.append(word)\n\n return ' '.join(titleized)", "def lowercase(nameslist):\n lowercasenames = list(filter(lambda x: x == x.lower(), nameslist))\n #print(f\"No. of lowercase names: {len(lowercasenames)}\")\n return lowercasenames", "def sort_by_surname_desc(names):\n names = dedup_and_title_case_names(names)\n names1 = []\n for n in names:\n x = n.split(\" \")\n names1.append(x[1] + \" \" + x[0])\n return names1\n # ...", "def keep_lowercase(str_list):", "def make_title(words):", "def alpha_case_insensitive():\n# fill it out\n return sorted(STRING_LIST, key=lambda s: s.lower())", "def title_words(self):\n\n if self._title_words == []:\n for s in self.title():\n for w in s.split():\n self._title_words.append(w)\n\n return self._title_words", "def test_titlecase(self):\n test_pairs = [\n [\n \"Q&A with steve jobs: 'that's what happens in technology'\",\n \"Q&A With Steve Jobs: 'That's What Happens in Technology'\",\n ],\n [\"What is AT&T's problem?\", \"What is AT&T's Problem?\"],\n [\n \"Apple deal with AT&T falls through\",\n \"Apple Deal With AT&T Falls Through\",\n ],\n [\"this v that\", \"This v That\"],\n [\"this v. that\", \"This v. That\"],\n [\"this vs that\", \"This vs That\"],\n [\"this vs. that\", \"This vs. That\"],\n [\n \"The SEC's Apple Probe: What You Need to Know\",\n \"The SEC's Apple Probe: What You Need to Know\",\n ],\n [\n \"'by the Way, small word at the start but within quotes.'\",\n \"'By the Way, Small Word at the Start but Within Quotes.'\",\n ],\n [\n \"Small word at end is nothing to be afraid of\",\n \"Small Word at End is Nothing to Be Afraid Of\",\n ],\n [\n \"Starting Sub-Phrase With a Small Word: a Trick, Perhaps?\",\n \"Starting Sub-Phrase With a Small Word: A Trick, Perhaps?\",\n ],\n [\n \"Sub-Phrase With a Small Word in Quotes: 'a Trick, Perhaps?'\",\n \"Sub-Phrase With a Small Word in Quotes: 'A Trick, Perhaps?'\",\n ],\n [\n 'Sub-Phrase With a Small Word in Quotes: \"a Trick, Perhaps?\"',\n 'Sub-Phrase With a Small Word in Quotes: \"A Trick, Perhaps?\"',\n ],\n ['\"Nothing to Be Afraid of?\"', '\"Nothing to Be Afraid Of?\"'],\n ['\"Nothing to be Afraid Of?\"', '\"Nothing to Be Afraid Of?\"'],\n [\"a thing\", \"A Thing\"],\n [\n \"2lmc Spool: 'gruber on OmniFocus and vapo(u)rware'\",\n \"2lmc Spool: 'Gruber on OmniFocus and Vapo(u)rware'\",\n ],\n [\"this is just an example.com\", \"This is Just an example.com\"],\n [\n \"this is something listed on del.icio.us\",\n \"This is Something Listed on del.icio.us\",\n ],\n [\"iTunes should be unmolested\", \"iTunes Should Be Unmolested\"],\n [\n \"Reading between the lines of steve jobs’s ‘thoughts on music’\",\n # Tests unicode\n \"Reading Between the Lines of Steve Jobs’s ‘Thoughts on Music’\",\n ],\n [\n \"seriously, ‘repair permissions’ is voodoo\", # Tests unicode\n \"Seriously, ‘Repair Permissions’ is Voodoo\",\n ],\n [\n \"generalissimo francisco franco: still dead; kieren McCarthy: \"\n \"still a jackass\",\n \"Generalissimo Francisco Franco: Still Dead; Kieren McCarthy:\"\n \" Still a Jackass\",\n ],\n [\n \"Chapman v. u.s. Postal Service\",\n \"Chapman v. U.S. Postal Service\",\n ],\n [\n \"Spread Spectrum Screening Llc. v. Eastman Kodak Co.\",\n \"Spread Spectrum Screening LLC. v. Eastman Kodak Co.\",\n ],\n [\n \"Consolidated Edison Co. of New York, Inc. v. Entergy Nuclear \"\n \"Indian Point 2, Llc.\",\n \"Consolidated Edison Co. of New York, Inc. v. Entergy Nuclear\"\n \" Indian Point 2, LLC.\",\n ],\n [\n \"Infosint s.a. v. H. Lundbeck A/s\",\n \"Infosint S.A. v. H. Lundbeck A/S\",\n ],\n [\n \"KEVIN O'CONNELL v. KELLY HARRINGTON\",\n \"Kevin O'Connell v. Kelly Harrington\",\n ],\n [\n \"International Union of Painter v. J&r Flooring, Inc\",\n \"International Union of Painter v. J&R Flooring, Inc\",\n ],\n [\n \"DOROTHY L. BIERY, and JERRAMY and ERIN PANKRATZ v. THE UNITED\"\n \" STATES 07-693L And\",\n \"Dorothy L. Biery, and Jerramy and Erin Pankratz v. the \"\n \"United States 07-693l And\",\n ],\n [\"CARVER v. US\", \"Carver v. US\"],\n ]\n\n for pair in test_pairs:\n unicode_string = force_unicode(pair[0])\n self.assertEqual(titlecase(unicode_string, DEBUG=False), pair[1])", "def clean_names_list(names):\n pure_names = []\n nan = re.compile('nan', re.IGNORECASE)\n title = re.compile('surname', re.IGNORECASE)\n for name in names:\n if nan.search(name):\n continue\n elif title.search(name):\n continue\n else:\n pure_names.append(name)\n return pure_names", "def camel_filter(val):\n titlecase = val.title()\n return re.sub(r\"[\\W^_]\", \"\", titlecase)", "def getlistofpossibletitles(fileitem,fname):\n title = []\n oddtitles = open(\"oddtitles.txt\", 'r')\n content = oddtitles.read()\n oddtitles.close()\n\n content = content.split(\"\\n\")\n for line in content:\n elements = line.split(',')\n if fileitem in elements[0]:\n #print(elements[1])\n title.append(elements[1].title())\n\n \n title.append(fileitem)\n title.append(fileitem.title())\n lookfor = fileitem.replace(\".\",\" \")\n title.append(lookfor)\n title.append(lookfor.title())\n lookfor = fileitem.replace('-',\" \")\n title.append(lookfor)\n title.append(lookfor.title())\n with open(fname, \"r\") as dataf:\n for line in dataf:\n if lookfor.upper() in line.upper():\n line = line.replace(\"\\n\",\"\")\n title.append(line)\n title.append(line.title())\n return title", "def listify(words):\n word_list = []\n for word in words:\n if word:\n word = word.lower()\n if word not in word_list: # add it\n word_list.append(word)\n else:\n pass\n word_list.sort()\n return word_list", "def fix_title(title):\n words = re.findall('[A-Z][^A-Z]*', title[0])\n final_str = \"\"\n for word in words:\n final_str += word + \" \"\n return final_str.strip()", "def check_title(title_list):\r\n\tnew_list = [] #Initialize a new list\r\n\tfor title in title_list: # iterates through the list to look for the non-titles\r\n\t\tif title.istitle(): # checks for titles\r\n\t\t\tnew_list.append(title) # if found, appends to the new list\r\n\treturn new_list # returns the new appended list.\r", "def pyranose_names(self):\n output = set()\n for item in self.pyranoses():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)", "def names(self, ignore_items=True):\n all_names = self.variables()\n if not ignore_items:\n all_names = self.unroll(all_names, both='all')\n lower_names = [n.lower() for n in all_names]\n multiple_names = [k for k, v in list(Counter(lower_names).items()) if v > 1]\n if not multiple_names: return self.variables()\n weak_dupes = OrderedDict()\n for name in all_names:\n if name.lower() in multiple_names:\n if not name.lower() in weak_dupes:\n weak_dupes[name.lower()] = [name]\n elif not name in weak_dupes[name.lower()]:\n weak_dupes[name.lower()].append(name)\n max_wd = max(len(v) for v in list(weak_dupes.values()))\n for k, v in list(weak_dupes.items()):\n while not len(v) == max_wd:\n v.append(None)\n weak_dupes[k] = v\n\n return pd.DataFrame(weak_dupes)", "def unique_names(names):\n return sorted(set(names))", "def get_names():\n only_links = SoupStrainer(\"a\")\n names = set()\n doc = requests.get(NAMES_URL).content\n links = BeautifulSoup(doc, \"html.parser\", parse_only=only_links)\n pokemon = links.find_all(title=re.compile(\"(\\w+)(\\s){1}(\\(Pokémon\\))\"))\n for cell in pokemon:\n names.add(str(cell.string))\n \n\n return names", "def titleize(phrase):\n nl = phrase.split(' ')\n o=[]\n for i in nl:\n o.append(i[0].upper()+i[1:].lower())\n print(' '.join(o))", "def book_title(title):\n # this will capitalize the first letter of every word\n title = title.title()\n pre_title = []\n pre_title = title.split(\" \")\n new_title = \"\"\n for word in pre_title:\n # If the word is the first word of the title it has to be capitalize\n if word != pre_title[0]:\n # If the word is in the small word list make it lower case\n if word.lower() in small_words:\n word = word.lower()\n new_title = new_title + word + ' '\n# Remove the lagging space \n return new_title.strip()", "def monomer_names(self):\n output = set()\n for item in self.monomers():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)", "def extract_names(register):\n names = []\n for i in range(len(register) - 1): # len() -> no of columns\n first_name = str(register.iloc[i][2]).capitalize()\n last_name = str(register.iloc[i][1]).upper()\n name = last_name + ' ' + first_name\n names.append(name)\n names = list(set(names))\n return names" ]
[ "0.82403964", "0.8028632", "0.7952886", "0.7666392", "0.71184033", "0.6779802", "0.67301315", "0.6570068", "0.6475094", "0.642649", "0.6370992", "0.6320935", "0.6289419", "0.62875795", "0.62639284", "0.6213404", "0.6199757", "0.61732936", "0.61606234", "0.61318856", "0.6125666", "0.6096306", "0.6084539", "0.6056242", "0.60526264", "0.6041517", "0.6035077", "0.6003785", "0.60031116", "0.5999463" ]
0.8039771
1
Returns names list sorted desc by surname
def sort_by_surname_desc(names): names = dedup_and_title_case_names(names) return sorted(names, key=lambda name: name.split(' ')[-1], reverse=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sort_by_surname_desc(names):\n names = dedup_and_title_case_names(names)\n split_names = [name.split(' ') for name in names]\n return [\" \".join(x) for x in (sorted(split_names, key = lambda x: x[1], reverse=True))]", "def sort_by_surname_desc(names):\n names = dedup_and_title_case_names(names)\n names1 = []\n for n in names:\n x = n.split(\" \")\n names1.append(x[1] + \" \" + x[0])\n return names1\n # ...", "def sort_by_surname_desc(names):\n names = dedup_and_title_case_names(names)\n names_splitted = [name.split() for name in names]\n names_splitted.sort(key=sort_function)\n names_joined = [\" \".join(name) for name in names_splitted]\n return names_joined", "def sort_by_surname_desc(names):\n\n # input: ['Sandra Bullock', 'Alec Baldwin', 'Julbob Pybites', 'Matt Damon', 'Al Pacino', 'Keanu Reeves', 'Bob Belderbos',...]\n names = dedup_and_title_case_names(names)\n\n # sort names in descending order\n # ['Julian Sequeira', 'Arnold Schwarzenegger', 'Keanu Reeves', 'Julbob Pybites', 'Brad Pitt', 'Al Pacino', 'Matt Damon', 'Sandra Bullock', 'Bob Belderbos', 'Alec Baldwin']\n names = sorted(names, key=lambda x: x.split(' ')[-1], reverse=True)\n\n # output: ['Julian Sequeira', 'Arnold Schwarzenegger', 'Keanu Reeves', 'Julbob Pybites', 'Brad Pitt', 'Al Pacino', 'Matt Damon', 'Sandra Bullock', 'Bob Belderbos', 'Alec Baldwin']\n return names", "def sort_records_by_name(records):\n return sorted(records, key=lambda x: (x.last_name, x.first_name), reverse=True)", "def test_sorting_surname(self):\n # Name Decreasing sorting criteria is selected\n sorting_criteria = \"surname_increasing\"\n # Search string that returns all of the users\n search_string = \"''\"\n # Search For all users\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string,\"sorting_criteria\":sorting_criteria})\n search_result = json.loads(resp.content)\n for i in range(len(search_result)-1):\n self.assertLessEqual(search_result[i][\"name\"],search_result[i+1][\"name\"],\"{} sorting criteria doesn't work\".format(sorting_criteria))", "def sort_by_name(self, reverse=False):\n self.sort_by(\"name\", reverse=reverse)", "def test_sorting_surname2(self):\n # Name Decreasing sorting criteria is selected\n sorting_criteria = \"surname_decreasing\"\n # Search string that returns all of the users\n search_string = \"''\"\n # Search For all users\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string,\"sorting_criteria\":sorting_criteria})\n search_result = json.loads(resp.content)\n for i in range(len(search_result)-1):\n self.assertGreaterEqual(search_result[i][\"name\"],search_result[i+1][\"name\"],\"{} sorting criteria doesn't work\".format(sorting_criteria))", "def sort_names(li, by_which):\n \n if by_which == 'first':\n li.sort(key = Name.first)\n elif by_which == 'last':\n li.sort(key = Name.last)", "def sortednameslist(nameslist):\n sortednames = sorted(nameslist, key=lambda x: x[1])\n return sortednames", "def user_names(self):\n results = []\n for user_detail in self.users:\n results.append(user_detail.user_name)\n results.sort()\n return results", "def get_last_name(first_name):\n \"\"\"Order last names so that names closest to first name are first.\n For example first name \"Kikke\" -> last names should be \"Kilari\",\n \"Kolari\", [all other names by random]\"\"\"\n def name_comparator(last_name):\n \"\"\"Return a number describing how close the two names are.\"\"\"\n score = 0\n\n # check if first n letters of first and last name matches\n for i in range(1, 4):\n if len(first_name) >= i and len(last_name) >= 2:\n # if previous letter does not match, don't continue\n if i > 1 and score > (i - 1) * -1:\n break\n\n # lower score by one per each matching letter\n if first_name[i - 1: i] == last_name[i - 1: i]:\n score -= 1\n\n \"\"\"detect names with umlauts and give them higher score if both have\n them, lower score if only one has them.\"\"\"\n regex = compile(r'[äöå]')\n if score == 0:\n if regex.search(first_name) and regex.search(last_name):\n score -= 1\n else:\n if bool(regex.search(last_name)) != bool(regex.search(last_name)):\n score += 1\n\n return score\n\n last_names_random = sample(last_names, len(last_names))\n last_names_sorted = sorted(last_names_random, key=name_comparator)\n\n \"\"\"Walk through names and check on each name if you should stop. Since\n the matching names are first they are more likely to be selected.\"\"\"\n for i in range(0, 10):\n if random() >= 0.7:\n return last_names_sorted[i]\n\n return last_names_sorted[0]", "def sortlastcharacter(nameslist):\n sortedlastcharacters = sorted(nameslist, key=lambda x: x[-1])\n return sortedlastcharacters", "def ordered_list_by_last_name(d):\n\n ordered_d = dict(sorted(d.items(), key=lambda x: x[1]['Last name']))\n\n print(\"\\nPhone book ordered by last name: \")\n for pid, info in ordered_d.items():\n print('\\nPerson number: ', pid)\n for key in info:\n print(key + ':', info[key])", "def personas(self, pretty=True, sort=True):\n names = list(self.name2base)\n if pretty: names = [self.process_name(name, True) for name in names]\n if sort: names = sorted(names)\n return names", "def test_sorting_name(self):\n # Name Decreasing sorting criteria is selected\n sorting_criteria = \"name_decreasing\"\n # Search string that returns all of the users\n search_string = \"''\"\n # Search For all users\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string,\"sorting_criteria\":sorting_criteria})\n search_result = json.loads(resp.content)\n for i in range(len(search_result)-1):\n self.assertGreaterEqual(search_result[i][\"name\"],search_result[i+1][\"name\"],\"{} sorting criteria doesn't work\".format(sorting_criteria))", "def longest_name():\n def foolen(p): # nothing wrong with having a function inside a function\n return len(p['name'])\n\n return sorted(PEOPLE_LIST, key=foolen, reverse=True)", "def test_sorting_name2(self):\n # Name Decreasing sorting criteria is selected\n sorting_criteria = \"name_increasing\"\n # Search string that returns all of the users\n search_string = \"''\"\n # Search For all users\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string,\"sorting_criteria\":sorting_criteria})\n search_result = json.loads(resp.content)\n for i in range(len(search_result)-1):\n self.assertLessEqual(search_result[i][\"name\"],search_result[i+1][\"name\"],\"{} sorting criteria doesn't work\".format(sorting_criteria))", "def longest_name():\n def foolen(p): # nothing wrong with having a function inside a function\n return len(p['name'])\n return sorted(PEOPLE_LIST, key=foolen, reverse=True)", "def getnames(f):\n # Assumes file is sorted with girl names first, boy names second, and the\n # most popular name at the top of each list.\n\n lineoftext = f.readline()\n girlname,sex,count = processline(lineoftext)\n\n while sex != \"M\":\n name,sex,count = processline(f.readline())\n boyname=name\n\n return girlname,boyname", "def naturalize_person(self, string):\n suffixes = [\"Jr\", \"Jr.\", \"Sr\", \"Sr.\", \"I\", \"II\", \"III\", \"IV\", \"V\"]\n # Add lowercase versions:\n suffixes = suffixes + [s.lower() for s in suffixes]\n\n # If a name has a capitalised particle in we use that to sort.\n # So 'Le Carre, John' but 'Carre, John le'.\n particles = [\"Le\", \"La\", \"Von\", \"Van\", \"Du\", \"De\"]\n\n suffix = \"\" # Jr\n parentheses = \"\" # (1)\n\n sort_string = string\n parts = string.split(\" \")\n\n if parts[-1].startswith(\"(\"):\n # Remove so we can add it back at the end.\n parentheses = parts.pop()\n\n if parts[-1] in suffixes:\n # Remove suffixes entirely, as we'll add them back on the end.\n suffix = parts[-1]\n parts = parts[0:-1] # Remove suffix from parts\n sort_string = \" \".join(parts)\n\n if len(parts) > 1:\n\n if parts[-2] in particles:\n # From ['Alan', 'Barry', 'Le', 'Carré']\n # to ['Alan', 'Barry', 'Le Carré']:\n parts = parts[0:-2] + [\" \".join(parts[-2:])]\n\n # From 'David Foster Wallace' to 'Wallace, David Foster':\n sort_string = \"{}, {}\".format(parts[-1], \" \".join(parts[:-1]))\n\n if suffix:\n # Add it back on.\n sort_string = f\"{sort_string} {suffix}\"\n\n if parentheses:\n # Add it back on.\n sort_string = f\"{sort_string} {parentheses}\"\n\n # In case this name has any numbers in it.\n sort_string = self._naturalize_numbers(sort_string)\n\n return sort_string", "def main(li, first_or_last):\n\n a = name_list(li)\n sort_names(a,first_or_last)\n if(first_or_last=='first'):\n for i in range(len(a)):\n print('{} {}'.format(a[i].first_name, a[i].last_name))\n else:\n for i in range(len(a)):\n print('{}, {}'.format(a[i].first_name, a[i].last_name))", "def sort_by_name(list_to_sort):\n return sorted(\n list_to_sort,\n key=lambda k: k['Name'].lower()\n )", "def get_sorted():\n return sorted(country_list, key=get_pop_and_name)", "def find_top_salespeople(name_list, sales_list, n):\n top_names = []\n temp_name = [] + name_list\n temp_sales = [] + sales_list\n\n while len(top_names) < n and len(temp_sales) != 0: \n max_sales = temp_sales.index(max(temp_sales))\n num = temp_sales[max_sales]\n top_names.append(temp_name[max_sales])\n temp_name.remove(temp_name[max_sales])\n temp_sales.remove(temp_sales[max_sales])\n\n if num in temp_sales:\n max_sales2 = temp_sales.index(max(temp_sales))\n top_names.append(temp_name[max_sales2])\n temp_name.remove(temp_name[max_sales2])\n temp_sales.remove(temp_sales[max_sales2])\n\n return top_names", "def extra_order_by_lastname_of_first_candidate(self, descending=False):\n if descending:\n order_by = ['-lastname_of_first_candidate']\n else:\n order_by = ['lastname_of_first_candidate']\n return self.extra_annotate_with_lastname_of_first_candidate().extra(\n order_by=order_by\n )", "def people(persons):\n sorted_list = sorted(persons, key=lambda k: k['age'])\n return sorted_list", "def extract_names(filename):\n f = open(filename,'rU') \n name_data = f.read()\n year_data= re.search(r'Popularity\\sin\\s(\\d\\d\\d\\d)', name_data)\n if not year_data :\n print ' no year found '\n sys.exit(1)\n name_year=year_data.group(1) \n #print 'year :'\n #print name_year\n tuples=re.findall(r'<td>(\\d+)</td><td>(\\w+)</td><td>(\\w+)</td>',name_data)\n #print 'tuples'\n #print tuples\n dict_name = {}\n for a,b,c in tuples :\n #print a + ' boy name: ' + b + ' , girl name : ' + c\n if b not in dict_name :\n dict_name[b] = a\n if c not in dict_name :\n dict_name[c] = a \n #print dict_name \n lst_names = sorted(dict_name.keys()) \n result_names_sorted = []\n result_names_sorted.append(name_year)\n for name in lst_names :\n #print name + \" : \" + dict_name[name]\n result_names_sorted.append(name + ' ' + dict_name[name])\n #print result_names_sorted \n\n return result_names_sorted", "def _sort_by_name(bam_fn):", "def get_names_short(self):\r\n return [p.get_name() for p in self.people]" ]
[ "0.85138637", "0.8494503", "0.8393841", "0.8254867", "0.7045284", "0.68591815", "0.68037444", "0.67713994", "0.6424156", "0.64197683", "0.6416577", "0.63984555", "0.63561875", "0.63063484", "0.63006693", "0.62879753", "0.62536865", "0.6237204", "0.6220415", "0.61567885", "0.6140615", "0.60991704", "0.6048707", "0.5927405", "0.59111387", "0.586218", "0.58459973", "0.5840432", "0.58403665", "0.58322245" ]
0.8625484
0
Returns the shortest first name (str). You can assume there is only one shortest name.
def shortest_first_name(names): names = dedup_and_title_case_names(names) return sorted([name.split()[0] for name in names], key=len)[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shortest_first_name(names):\n names = dedup_and_title_case_names(names)\n split_names = [name.split(' ') for name in names]\n first_name = [first for first, last in split_names]\n shortest = first_name[0]\n for name in first_name:\n if len(name) < len(shortest):\n shortest = name\n\n return shortest", "def shortest_first_name(names):\n names = dedup_and_title_case_names(names)\n names_splitted = [name.split() for name in names]\n names_splitted.sort(key=sort_length)\n names_sorted = [\" \".join(name) for name in names_splitted]\n return names_sorted[0]", "def shortest_first_name(names):\n names = dedup_and_title_case_names(names)\n # ...", "def shortest_first_name(names):\n # input: ['Julian Sequeira', 'Arnold Schwarzenegger', 'Keanu Reeves', 'Julbob Pybites', 'Brad Pitt', 'Al Pacino', 'Matt Damon', 'Sandra Bullock', 'Bob Belderbos', 'Alec Baldwin']\n names = dedup_and_title_case_names(names)\n\n # fname_lname = [('Keanu', 'Reeves'), ('Julbob', 'Pybites'), ('Julian', 'Sequeira'), ('Matt', 'Damon'), ('Arnold', 'Schwarzenegger'), ('Brad', 'Pitt'), ('Alec', 'Baldwin'), ('Bob', 'Belderbos'), ('Sandra', 'Bullock'), ('Al', 'Pacino')]\n fname_lname = [tuple(_.split()) for _ in names]\n \n # iterate through [(fname_lname)] and assign 'first name' to [f_name]\n f_name = [x[0] for x in fname_lname]\n\n # grab min [f_name] using len()\n f_name = min(f_name, key=len)\n\n return f_name", "def get_short_name(self):\n last_name = self.last_name\n first_name = self.first_name\n if (not (last_name and not last_name.isspace())):\n \"\"\" If last name is empty or none then return first name\"\"\"\n return first_name\n else:\n return last_name", "def get_short_name(self):\n return self.full_name.split(' ')[0]", "def get_short_name(self) -> str:\n return self.first_name", "def extract_first_name(s):\n clean_name = re.sub(r'\\s+', r' ', s).split()\n\n for name in clean_name:\n if len(name) > 1:\n return name.title()\n else:\n pass\n\n return None", "def first_name(self) -> str:\n return self._first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n\n return self.first_name", "def name_last_first(name):\n parsed_name = HumanName(name) # this is a smart parser that digests human names\n name = parsed_name.last+\", \"+parsed_name.first\n if(parsed_name.middle != \"\"):\n name = name+\" \"+parsed_name.middle\n return name", "def get_short_name(self):\r\n return self.first_name" ]
[ "0.8495962", "0.8405887", "0.8365317", "0.81789523", "0.78514916", "0.7568302", "0.7427178", "0.7414167", "0.741108", "0.72852665", "0.72852665", "0.72852665", "0.72852665", "0.72852665", "0.72852665", "0.72852665", "0.72852665", "0.72852665", "0.72852665", "0.72852665", "0.72852665", "0.72852665", "0.72852665", "0.72852665", "0.72852665", "0.72852665", "0.72852665", "0.72729117", "0.7182185", "0.71309876" ]
0.84295845
1
gname points to a graph stored (pickled) on disk.
def load_graph( gname ): return NX.read_gpickle( gname )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_graph(self, path):\n if path.split('.')[-1]=='gexf':\n nx.write_gexf(self.graph, path)\n else:\n nx.write_gpickle(self.graph, path)", "def load_graph(graphname,path='./data/',mname='A'):\n\n\tdata=sio.loadmat(path+graphname)\n\treturn data[mname]", "def load_graph(self, path):\n if path.split('.')[-1]=='gexf':\n self.graph = nx.read_gexf(path)\n else:\n self.graph = nx.read_gpickle(path)", "def saveGraph(self, filename):\n nx.write_yaml(self.G,filename)", "def saveGroove(self, gname):\n\n PC.saveGroove(self, gname) # create storage. Do this 1st.\n self.grooves[gname]['FOLLOWCHORD'] = self.followChord\n self.grooves[gname]['FOLLOWKEY'] = self.followKey\n self.grooves[gname]['ROOT'] = self.rootChord", "def read(name):\n\n if not name.endswith(\"gml\"):\n name = \"{0}.gml\".format(name)\n with open(name) as f:\n lines = f.readlines()\n newlines = []\n for line in lines:\n if line.strip().startswith(\"name\"):\n newline = line.replace(\"name\", \"label\", 1)\n else:\n newline = line\n newlines.append(newline)\n newname = \"nx_{0}\".format(name)\n with open(newname, \"w\") as f:\n f.writelines(newlines)\n network = networkx.read_gml(newname)\n # It should return a Network object instead of DiGraph\n return network", "def save_chicago_graph(G, path=\"chicago.xml\"):\n\n\tox.save_graphml(G, filename=path)", "def save_graphs(name, path):\n # Generate Fiber Density vs. Wedges graph\n save_graph_fiber_vs_wedges(name, path)\n\n # Generate Fiber Density vs. Rings graph\n save_graph_fiber_vs_rings(name, path)", "def save_graph(g, project_name):\n path = get_dep_cache_path(project_name)\n log(\"Saving graph for '%s' to cache: %s\" % (project_name, path))\n try:\n f = open(path, 'w')\n data = {\n 'last_update': g['last_update'],\n 'graph': g['graph'].get_data()\n }\n f.write(json.dumps(data, separators=(',',':')))\n f.close()\n except IOError as e:\n log(\"Failed to save dependency graph: %s\" % e.message, error=True)", "def write_graph(g, filename):\n with open(filename, 'w') as f:\n f.write(repr(g))", "def graph(self, name):\n return Graph(self.base_url, name)", "def make_graph_public(self, name):\n\n\t\treturn self.update_graph(name, is_public=1)", "def save_network(nodes, edges, name, pid):\n d = {'nodes': nodes, 'edges': edges, 'name': name, 'pid': pid}\n\n out_filename = \"{0}-{1}.pickle\".format(\n datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"),\n pid\n )\n with open(out_filename, 'wb') as out_file:\n pickle.dump(d, out_file)", "def save_gp(self, filename):\n\n warnings.warn('use save and load function', DeprecationWarning)\n self.gp.save(filename)", "def save_gp(self, filename):\n\n warnings.warn('use save and load function', DeprecationWarning)\n self.gp.save(filename)", "def save_gp(self, filename):\n\n warnings.warn('use save and load function', DeprecationWarning)\n self.gp.save(filename)", "def save_gp(self, filename):\n\n warnings.warn('use save and load function', DeprecationWarning)\n self.gp.save(filename)", "def savegraph(self, path):\n\n raise NotImplementedError", "def save_graph(self, filename, fileType):\n if fileType == \"GML Format\":\n nx.write_gml(self.graph, filename+\".gml\")\n if fileType == \"Adjacency list\":\n nx.write_adjlist(self.graph, filename+\".adjlist\")\n if fileType == \"YAML\":\n nx.write_yaml(self.graph, filename + \".yaml\")", "def save_graph(graph, file_name):\r\n print \"Saving network into \"+file_name\r\n f = open(file_name, 'w')\r\n f.write(str(len(graph))+'\\n')\r\n for citizen in graph:\r\n f.write(str(citizen.id) + ';' + str(citizen.location) + ';' + str(citizen.influence_level) + ';' + \\\r\n str(citizen.proactivity_level) + '\\n')\r\n for op in citizen.opinions.keys():\r\n value = citizen.opinions[op].weight\r\n f.write(str(op)+':'+str(value)+';')\r\n f.write('\\n')\r\n for friend in citizen.friends:\r\n f.write(str(friend.id) + ';')\r\n f.write('\\n')\r\n f.close()", "def __init__(self, name):\n self.name = name\n self._edges = []", "def load_graph(net_file):\n path, filename = os.path.split(net_file)\n net_name = os.path.splitext(filename)[0]\n # get full path\n path = os.path.abspath(path)\n pickle_dir = path + os.sep + \"cache\"\n if not os.path.isdir(pickle_dir):\n os.mkdir(pickle_dir)\n pickle_file = \"{0}/{1}.pickle\".format(pickle_dir, net_name)\n if (os.path.isfile(pickle_file) and\n os.stat(net_file).st_mtime < os.stat(pickle_file).st_mtime):\n # Pickle file exists, and source_file is older\n graph = nx.read_gpickle(pickle_file)\n else:\n # No pickle file, or is outdated\n graph = nx.read_gml(net_file)\n nx.write_gpickle(graph, pickle_file)\n # ANK only understands GML files cleaned by topzootools\n if 'Creator' in graph.graph:\n if graph.graph['Creator'] == \"Topology Zoo Toolset\":\n # Graph has been processed by topzootools into suitable \n # format for ank\n return graph\n elif graph.graph['Creator'] == ' \"yFiles\"':\n # Note yFiles has quotes and leading space after nx parsing\n #TODO: try and use topzootools module (if installed)\n # to do conversion\n # to a /tmp file\n LOG.warn(\"Using GML file exported from yED, \"\n \"Please use TopZooTools to convert yED GML file\"\n \" into Topology Zoo format for use in AutoNetkit\")\n #TODO: make this throw exception so that program exits\n return None\n else:\n #Unknown file creator, may be user manually created, but warn\n LOG.warn(\"Unknown GML file creator\")\n return graph\n else:\n # No creator specified\n return graph", "def loadgraph(self, path):\n\n raise NotImplementedError", "def load_graph(self, filename):\n try:\n file_extention = list(filename.split(\".\"))[-1]\n if file_extention == \"gml\":\n self.graph = nx.read_gml(filename)\n if file_extention == \"adjlist\":\n self.graph = nx.read_adjlist(filename)\n if file_extention == \"yaml\":\n self.graph = nx.read_yaml(filename)\n except Exception as e:\n print(\"Error in loading Graph file: The error is\", e)", "def gexf_graph():\n # you must replace these lines and supply your own graph\n \n \n \n my_gexf = Gexf(\"JiajiaXie\", \"My awesome graph\")\n graph=my_gexf.addGraph(\"undirected\", \"static\", \"My awesome networks\")\n \n atr1=graph.addNodeAttribute('Type',type='string')\n\n\n for set in data_specific:\n if graph.nodeExists(set['set_num']) ==0:\n tm1=graph.addNode(set['set_num'], set['name'], r='0', g='0', b='0')\n tm1.addAttribute(atr1,\"set\")\n\n\n\n counter_test=1\n for set, part in data_parts.items():\n for key, part_list in part.items():\n interme =part_list['color']\n red=interme[0]+interme[1]\n green=interme[2]+interme[3]\n blue=interme[4]+interme[5]\n\n red_de=str(int(red,16))\n green_de=str(int(green,16))\n blue_de=str(int(blue,16))\n if graph.nodeExists(part_list['id'])==0:\n tm2=graph.addNode(part_list['id'], part_list['part_name'],r=red_de, g=green_de, b = blue_de)\n tm2.addAttribute(atr1,\"part\")\n\n\n counter_test+=1\n graph.addEdge(\"_\"+str(counter_test), set, part_list['id'], part_list['quantity'])\n\n\n\n f=open('bricks_graph.gexf','wb')\n my_gexf.write(f)\n\n\n return my_gexf.graphs[0]", "def make_graph_private(self, name):\n\t\treturn self.update_graph(name, is_public=0)", "def save_graph_fiber_vs_rings(name, path):\n # Validate that the file name given does not contain special characters\n Utils.validate_name(name)\n\n # Validate that the directory path exists\n Utils.validate_path(path)\n\n # Generate full path to store image\n graph_path = Utils.get_path('_WedgeGraph.jpg', name, path)\n\n # Get fiber density list (without averages) and validate its dimensions\n fiber_density = get_fiber_density()\n Utils.validate_fiber_list(fiber_density)\n\n # Initialize variables\n number_rows = len(fiber_density)\n x = range(1, number_rows + 1)\n wedges = []\n [wedges.append(x) for x in zip(*fiber_density)]\n\n # Clear any previous graph still in memory\n plt.clf()\n\n # Change font size to 14pt\n plt.rcParams.update({'font.size': 14})\n\n # Graph title\n plt.title('Fiber Density vs. Rings')\n\n # Y axis title\n plt.ylabel('Fiber Density')\n\n # X axis title\n plt.xlabel(\"Rings\")\n\n if len(x) == 1:\n for columns in wedges:\n for value in columns:\n # Plot points\n plt.plot(1, value, 'o')\n else:\n # Graph lines, each line represents the fiber densities of the rings across a wedge.\n # Points in the graph: (Number of ring, Fiber Density)\n for columns in wedges:\n y = columns\n\n # Plot line\n plt.plot(x, y)\n\n # Show every value in the x axis\n plt.xticks(x)\n fig = plt.gcf()\n\n # Increase image size\n fig.set_size_inches(18.5, 10.5)\n\n # Store image in local file system\n fig.savefig(graph_path, dpi=100)\n\n # Validate that the image was successfully saved\n Utils.validate_path(graph_path)", "def save_graph_fiber_vs_wedges(name, path):\n # Validate that the file name given does not contain special characters\n Utils.validate_name(name)\n\n # Validate that the directory path exists\n Utils.validate_path(path)\n\n # Generate full path to store image\n graph_path = Utils.get_path('_RingGraph.jpg', name, path)\n\n # Get fiber density list (without averages) and validate its dimensions\n fiber_density = get_fiber_density()\n Utils.validate_fiber_list(fiber_density)\n\n # Initialize variables\n number_columns = len(fiber_density[0])\n x = range(1, number_columns + 1)\n\n # Clear any previous graph still in memory\n plt.clf()\n\n # Change font size to 14pt\n plt.rcParams.update({'font.size': 14})\n\n # Graph lines, each line represents the fiber densities of the wedges across a ring.\n # Points in the graph: (Number of wedge, Fiber Density)\n for row in fiber_density:\n y = row\n\n # Graph title\n plt.title('Fiber Density vs. Wedges')\n\n # Y axis title\n plt.ylabel('Fiber Density')\n\n # X axis title\n plt.xlabel(\"Wedges\")\n\n # Plot line\n plt.plot(x, y)\n\n fig = plt.gcf()\n\n # Increase image size\n fig.set_size_inches(18.5, 10.5)\n\n # Store image in local file system\n fig.savefig(graph_path, dpi=100)\n\n # Validate that the image was successfully saved\n Utils.validate_path(graph_path)", "def plotModel(self, name):\n g = Digraph('G', filename = name + '.gv')\n\n for prevChord in self.chain:\n for chord in self.chain[prevChord]:\n g.edge(prevChord, chord, label=\"%.2f\" % self.chain[prevChord][chord])\n\n g.view()", "def write(self, outfilename):\n\n nx.write_gpickle(self.graph, outfilename)" ]
[ "0.6715936", "0.63711464", "0.63242835", "0.6148255", "0.6132365", "0.6122497", "0.6104714", "0.5913933", "0.5835921", "0.57587785", "0.5712477", "0.5708755", "0.56999767", "0.5680423", "0.5680423", "0.5680423", "0.5680423", "0.5676791", "0.565727", "0.56568235", "0.5630545", "0.5595005", "0.5594853", "0.55744255", "0.55639607", "0.5516552", "0.5484998", "0.54846895", "0.54821664", "0.5471844" ]
0.7118238
0
updates min, max, total with the given value
def update(self, value): if value < self.min: self.min = value if value > self.max: self.max = value self.total += value self.instances += 1 self.values.append(value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_values(self):\n # have to reset params to 0 when recalculating\n self.total_weight = self.total_price = self.total_value = self.total_fitness = 0\n for index, value in enumerate(self.item_stats):\n if value == 1:\n self.total_weight += self.items[index].weight\n self.total_price += self.items[index].price\n self.total_value += self.items[index].value\n self.total_fitness += self.items[index].fitness", "def update_amount(self, new_amount=None):\n if not new_amount:\n new_amount = self.amount\n if new_amount < self.min:\n new_amount = self.min\n if new_amount > self.max:\n new_amount = self.max\n self.amount = new_amount\n self.build_bar()", "def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count", "def set_value(self, val):\n for i, v in enumerate(val):\n if v < self.allowable_min[i]:\n raise ValueError(\"{0}, {1} less than min value {2}, index {3}\".format(self.get_name(), val, self.min_found, i))\n if v > self.allowable_max[i]:\n raise ValueError(\"{0}, {1} greater than max value {2}, index {3}\".format(self.get_name(), val, self.max_found, i))\n\n self.min_found[i] = min(self.min_found[i], v)\n self.max_found[i] = max(self.max_found[i], v)\n\n n = self.count+1\n self.avg_found[i] = self.avg_found[i] * (self.count / n) + v * (1.0 / n)\n\n self.count += 1\n self.value = val", "def update(self, val, n=1):\n self.sum += val * n\n self.cnt += n\n self.avg = self.sum / self.cnt", "def calc_stat_values(self):", "def set_value(self, val):\n if val < self.allowable_min:\n raise ValueError(\"{0}, {1} less than min value {2}\".format(self.get_name(), val, self.min_found))\n if val > self.allowable_max:\n raise ValueError(\"{0}, {1} greater than max value {2}\".format(self.get_name(), val, self.max_found))\n\n self.min_found = min(self.min_found, val)\n self.max_found = max(self.max_found, val)\n n = self.count+1\n self.avg_found = self.avg_found * (self.count / n) + val * (1.0 / n)\n self.count = n\n\n self.value = val", "def _update_stats(self, value):\n solver = self.solver\n is_better = solver.sense.is_better\n if isinstance(value, Infeasible):\n self.infeas_count += 1\n if value < self.least_infeas_value:\n self.least_infeas_value = value\n solver.channel.emit(solver.SIGNALS.LEAST_INFEAS_VALUE_CHANGED)\n if value > self.most_infeas_value:\n self.most_infeas_value = value\n solver.channel.emit(solver.SIGNALS.MOST_INFEAS_VALUE_CHANGED)\n else:\n self.feas_count += 1\n if is_better(value, self.best_feas_value):\n self.best_feas_value = value\n solver.channel.emit(solver.SIGNALS.BEST_FEAS_VALUE_CHANGED)\n if is_better(self.worst_feas_value, value):\n self.worst_feas_value = value\n solver.channel.emit(solver.SIGNALS.WORST_FEAS_VALUE_CHANGED)\n if is_better(value, self.best_value):\n self.best_value = value\n solver.channel.emit(solver.SIGNALS.BEST_SOL_VALUE_CHANGED)\n if is_better(value, solver.incumbent):\n solver.incumbent = value", "def __call__(self, value):\n vals = {key: self.brain._data[key] for key in self.brain.keys}\n if self.name == \"fmin\" and self.widgets[\"fmin\"] is not None:\n if vals['fmax'] < value:\n vals['fmax'] = value\n self.widgets['fmax'].set_value(value)\n if vals['fmid'] < value:\n vals['fmid'] = value\n self.widgets['fmid'].set_value(value)\n self.widgets['fmin'].set_value(value)\n elif self.name == \"fmid\" and self.widgets['fmid'] is not None:\n if vals['fmin'] > value:\n vals['fmin'] = value\n self.widgets['fmin'].set_value(value)\n if vals['fmax'] < value:\n vals['fmax'] = value\n self.widgets['fmax'].set_value(value)\n self.widgets['fmid'].set_value(value)\n elif self.name == \"fmax\" and self.widgets['fmax'] is not None:\n if vals['fmin'] > value:\n vals['fmin'] = value\n self.widgets['fmin'].set_value(value)\n if vals['fmid'] > value:\n vals['fmid'] = value\n self.widgets['fmid'].set_value(value)\n self.widgets['fmax'].set_value(value)\n self.brain.widgets[f'entry_{self.name}'].set_value(value)\n if time.time() > self.last_update + 1. / 60.:\n self.callback[self.name](value)\n self.last_update = time.time()", "def _update_value(self) -> int:\n\n value_list = [card.value if card.value <= 10 else 10 for card in self]\n hand_value = sum(value_list)\n\n # Checks to see if any Aces can be worth 11 points instead of 1 point\n while value_list.count(1) > 0 and (21 - hand_value) >= 10:\n value_list[value_list.index(1)] = 11\n hand_value = sum(value_list)\n\n self._value = hand_value", "def set_mapping(self, value_min, value_min_raw, value_max, value_max_raw):\n assert value_min <= value_max\n # prevent division by zero.\n if value_min == value_max:\n value_max += 1.\n if value_min_raw == value_max_raw:\n value_max_raw += 1.\n self.value_min = value_min\n self.value_max = value_max\n self.value_min_raw = value_min_raw\n self.value_max_raw = value_max_raw\n self._value_scale = (self.value_max - self.value_min) / (self.value_max_raw - self.value_min_raw)", "def update_points(self):\n #Calculate Upper Section\n total = 0\n for box in self._upper_section:\n total += box.points\n self._upperSum = total\n\n if total >= 63:\n self._bonus = 35\n total += 35\n self._upperTotal = total\n\n # Calculate Lower Section\n total = 0\n for box in self._lower_section:\n total += box.points\n\n if self.get_box(\"Yahtzee\").points > 0:\n total = total + (self._yahtzee_count - 1) * 100 # Yahtzee Bonus\n\n self._lowerTotal = total\n\n #Total Points\n self._grandTotal = self._upperTotal + self._lowerTotal", "def add(self, val):\n key = self.get_key(val)\n self.store.add(key)\n\n # Keep track of summary stats\n self._count += 1\n self._sum += val\n if val < self._min:\n self._min = val\n if val > self._max:\n self._max = val", "def calculate(self):\n self.lst = self._niceNum(self.maxPoint - self.minPoint, False)\n self.tickSpacing = self._niceNum(self.lst / (self.maxTicks - 1), True)\n self.niceMin = np.floor(self.minPoint / self.tickSpacing) * self.tickSpacing\n self.niceMax = np.ceil(self.maxPoint / self.tickSpacing) * self.tickSpacing", "def valuechange():\n\n tempmin.setMaximum(tempmax.value())\n tempmax.setMinimum(tempmin.value())\n hummin.setMaximum(hummax.value())\n hummax.setMinimum(hummin.value())\n\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmin\"\n ] = tempmin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmax\"\n ] = tempmax.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummin\"\n ] = hummin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummax\"\n ] = hummax.value()\n\n max = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummax\", hummax.value()),\n )\n min = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummin\", hummin.value()),\n )\n\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], max\n )\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], min\n )", "def update(self, value):\n # If the value has not already been set, set it.\n if self.value is None:\n self.value = value\n else:\n # Calculate the new value.\n self.value = ((1-self.weight) * self.value + self.weight * value)\n return self.value", "def _update_val_from_pos(self, pos):\n idx = np.argmin(np.abs(self.val - pos))\n if idx == 0:\n val = self._min_in_bounds(pos)\n self.set_min(val)\n else:\n val = self._max_in_bounds(pos)\n self.set_max(val)\n if self._active_handle:\n if self.orientation == \"vertical\":\n self._active_handle.set_ydata([val])\n else:\n self._active_handle.set_xdata([val])", "def regrow(self, **kwargs):\n self.resources[self.resources >= self.min_value] += self.regrow_rate\n self.resources[self.resources >= self.max_value] = self.max_value", "def set_value(self, new_value):\n temp = new_value\n if temp < self.limits[0]:\n temp = self.limits[0]\n if temp > self.limits[1]:\n temp = self.limits[1]\n self.value = temp\n self.rect = self._as_rect()\n return self.value", "def merge_stats(self, other):\n\n self[1] += other[1]\n self[2] += other[2]\n self[3] = self[0] and min(self[3], other[3]) or other[3]\n self[4] = max(self[4], other[4])\n self[5] += other[5]\n\n # Must update the call count last as update of the\n # minimum call time is dependent on initial value.\n\n self[0] += other[0]", "def set_total(self, valeurs):\r\n \r\n self._total = valeurs", "def setValues(self, values):\n if values is not None:\n self.scale_min, self.scale_max = values\n if self.scale_min is None:\n self.scale_min = self.start\n if self.scale_max is None:\n self.scale_max = self.end\n else:\n self.scale_min = self.start\n self.scale_max = self.end\n self.emitRange()\n self.updateDisplayValues()\n self.update()", "def compute_values(self, update_statistics=False):\n\n self.compute_iterations()\n self.axsec = sum([one.axsec for one in self])\n self.xsec = sum([one.xsec for one in self])\n self.xerrc = sum([one.xerrc for one in self])\n self.xerru = math.sqrt(sum([one.xerru**2 for one in self]))\n\n self.nevents = sum([one.nevents for one in self])\n self.nw = sum([one.nw for one in self])\n self.maxit = len(self.yerr_iter) # \n self.nunwgt = sum([one.nunwgt for one in self]) \n self.wgt = 0\n self.luminosity = min([0]+[one.luminosity for one in self])\n if update_statistics:\n self.run_statistics.aggregate_statistics([_.run_statistics for _ in self])", "def merge_stats(self, other):\n\n self[1] += other[1]\n self[2] = self[0] and min(self[2], other[2]) or other[2]\n self[3] = max(self[3], other[3])\n\n if self[3] == other[3]:\n self[4] = other[4]\n\n # Must update the call count last as update of the\n # minimum call time is dependent on initial value.\n\n self[0] += other[0]", "def _update_data_range(self):\r\n self._h_min = np.min(self.h)\r\n self._h_max = np.max(self.h)\r\n self._hr_min = np.min(self.hr)\r\n self._hr_max = np.max(self.hr)\r\n self._m_min = np.nanmin(self.m)\r\n self._m_max = np.nanmax(self.m)\r\n\r\n if self.temperature is None or np.all(np.isnan(self.temperature)):\r\n self._T_min = np.nan\r\n self._T_max = np.nan\r\n else:\r\n self._T_min = np.nanmin(self.temperature)\r\n self._T_max = np.nanmax(self.temperature)\r\n\r\n return", "def update_and_calculate(self, value):\r\n retval = -1\r\n diff = abs(self.ewma - value)\r\n if self.n >= 5: # only calculate meandevs if collected > 5 data pts.\r\n if self.ewmmd > 0:\r\n meandevs = diff/self.ewmmd\r\n else:\r\n meandevs = diff/.00001\r\n retval = meandevs\r\n \r\n # update ewma/ewmmd\r\n self.n += 1\r\n if self.n > 1:\r\n if self.n > 2:\r\n self.ewmmd = (.125*diff) + (.875*self.ewmmd)\r\n else:\r\n self.ewmmd = diff\r\n self.ewma = (.125*value) + (.875*self.ewma)\r\n else:\r\n self.ewma = value\r\n return retval", "def update(self, values: List[int]) -> None:\n ...", "def update(self, values: List[int]) -> None:\n ...", "def increment_amount(self, add_amount=1):\n new_amount = self.amount + add_amount\n if new_amount < self.min:\n new_amount = self.min\n if new_amount > self.max:\n new_amount = self.max\n self.amount = new_amount\n self.build_bar()", "def regrow(self, **kwargs):\n self.resources[self.resources >= self.min_value] += self.revive_rate\n self.resources[self.resources >= self.max_value] = self.max_value" ]
[ "0.6380112", "0.615847", "0.60419434", "0.6009748", "0.59425825", "0.5924577", "0.5918863", "0.5868015", "0.5866125", "0.58211535", "0.5773228", "0.5773018", "0.567507", "0.56311446", "0.5615232", "0.5568014", "0.55477995", "0.5547022", "0.5535163", "0.55029064", "0.54945266", "0.5486441", "0.5484662", "0.5477229", "0.5454699", "0.5445646", "0.5415483", "0.5415483", "0.5410686", "0.5407404" ]
0.71427345
0
Test that causal convolutions only operate on leftward inputs
def test_causal_convolution(conv1d_placeholder, spatial_onehot, output_size, width): conv_layer = Convolution((3, output_size), lambda x: 1, padding="causal") output = conv_layer(conv1d_placeholder) output_width = output.axes.find_by_name("W")[0].length assert output_width == width, "Causal convolution output width != " \ "input width: {} != {}".format(output_width, width) with executor(output, conv1d_placeholder) as comp: output_val = comp(spatial_onehot) # First 1 is at width // 2, so anything before that should be 0 assert (output_val[:, :width // 2] == 0).all(), "Acausal outputs in causal convolution"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_conv_layer_forward():\n #Setup layer and inputs\n conv = BinConv2d(2,1, [2,2],stride=1, bias=False)\n conv.weight.data.copy_(torch.Tensor([ [0.5,- 0.5] , [-0.5, 0.5],\\\n [1,-1] , [0.5, 0.5],]).view(1,2,2,2))\n\n\n inputs = torch.Tensor([ [1.1,2.1],[15,.01],[1,0],[1.,1.0]] ).view(1,2,2,2)\n\n # Check the result\n result = conv(inputs)\n expected_result = torch.nn.functional.conv2d(inputs, torch.Tensor([ [sign(0.5),sign(-0.5)] , [sign(-0.5), sign(0.5)],\\\n [sign(1),sign(-1)] , [sign(0.5), sign(0.5)],]).view(1,2,2,2) ,None)\n\n assert torch.all(torch.eq(result, expected_result))\n\n\n # Redo the test with a bias\n conv2 = BinConv2d(2,1, [2,2],stride=1, bias=True)\n conv2.weight.data.copy_(torch.Tensor([ [0.5,- 0.5] , [-0.5, 0.5],\\\n [1,-1] , [0.5, 0.5],]).view(1,2,2,2))\n \n\n conv2.bias.data.copy_(torch.Tensor([33.3]))\n result2 = conv2(inputs)\n expected_result2 = torch.nn.functional.conv2d(inputs, torch.Tensor([ [sign(0.5),sign(-0.5)] , [sign(-0.5), sign(0.5)],\\\n [sign(1),sign(-1)] , [sign(0.5), sign(0.5)],]).view(1,2,2,2) ,torch.Tensor([33.3]))\n\n assert torch.all(torch.eq(result2, expected_result2))", "def test_concentration_limits(self):\n t, x_n, x_p, r_n, r_p = self.t, self.x_n, self.x_p, self.r_n, self.r_p\n\n np.testing.assert_array_less(-self.c_s_n(t, x_n, r_n), 0)\n np.testing.assert_array_less(-self.c_s_p(t, x_p, r_p), 0)\n\n np.testing.assert_array_less(self.c_s_n(t, x_n, r_n), 1)\n np.testing.assert_array_less(self.c_s_p(t, x_p, r_p), 1)", "def test_convolution():\n # Default test\n inputs_shape = [3,3,4,5,3]\n filters_shape = [3,1,4,4,3]\n test_convolution_for_parameters(inputs_shape, filters_shape,\n \"Default test\")\n # All dimensions 1\n inputs_shape = [1,1,1,1,1]\n filters_shape = [1,1,1,1,1]\n test_convolution_for_parameters(inputs_shape, filters_shape,\n \"Input and filter dimensions 1\")\n # Filter spans all dimensions\n # This will lead to a failure for theano 2d3d for some reason\n # (for now we ignore this and remove theano2d3d for this test\n inputs_shape = [3,3,4,5,3]\n filters_shape = [3,3,4,5,3]\n test_convolution_for_parameters(inputs_shape, filters_shape,\n \"Filter dimension = Input dimension\")\n # Filter smaller for all dimensions\n inputs_shape = [3,3,4,5,3]\n filters_shape = [3,2,2,2,3]\n test_convolution_for_parameters(inputs_shape, filters_shape, \n \"Filter dimension < all Input dimension\")\n # 1,1,1,1,1 filter\n # Filter smaller for all dimensions\n inputs_shape = [3,3,4,5,1]\n filters_shape = [3,1,1,1,1]\n test_convolution_for_parameters(inputs_shape, filters_shape, \n \"Filter dimension 1 everywhere\")", "def test_continuity():\n dc = cs[:,0:Nr-1]-cs[:,1:Nr]\n assert dc.any < cmax", "def testComputation(self, use_bias):\n conv1 = snt.CausalConv1D(\n output_channels=1,\n kernel_shape=3,\n stride=1,\n use_bias=use_bias,\n name=\"conv1\",\n initializers=create_constant_initializers(1.0, 1.0, use_bias))\n\n out = conv1(tf.constant(np.ones([1, 5, 1], dtype=np.float32)))\n expected_out = np.reshape(np.array([1, 2, 3, 3, 3]), [1, 5, 1])\n if use_bias:\n expected_out += 1\n\n init_op = tf.variables_initializer(\n [conv1.w, conv1.b] if use_bias else [conv1.w])\n with self.test_session() as sess:\n sess.run(init_op)\n actual_out = sess.run(out)\n\n self.assertAllClose(actual_out, expected_out)", "def clConvolution(self, size, mask):", "def test_deconvolve_once_general(self):\n tau = 50.0\n tau_deconv = 20.0\n mrate = 50.0\n Mrate = 100.0\n\n tmax = 60.0\n dt = 0.1\n\n self.rule.tau = tau\n self.rule.min_rate = mrate\n self.rule.max_rate = Mrate\n self.rule.compress_rates = False\n\n self.motor.error_fct = lambda t: (int_r(t/20.0)%3-1)*np.ones(self.Nsrc)\n\n M1 = simulation.StateMonitor(self.rule, 'out')\n\n sim1 = simulation.Simulation(self.source, self.motor, self.rule, M1, dt=dt)\n sim1.run(tmax)\n\n self.rule.tau_deconv1 = tau_deconv\n\n M2 = simulation.StateMonitor(self.rule, 'out')\n\n sim2 = simulation.Simulation(self.source, self.motor, self.rule, M2, dt=dt)\n sim2.run(tmax)\n\n mavg = (mrate + Mrate)*0.5\n mdiff = (Mrate - mrate)*0.5\n\n out1 = (M1.out - mavg)/mdiff\n out2 = (M2.out - mavg)/mdiff\n\n der_out1 = np.diff(out1, axis=1)/dt\n\n expected_out2_crop = out1[:, 1:] + tau_deconv*der_out1\n\n # mismatch is relatively large since we're using Euler's method\n # we can't do much better, however, since the motor controller cannot give\n # us motor error information at sub-step resolution\n mismatch = np.mean(np.abs(expected_out2_crop - out2[:, 1:])/\n expected_out2_crop)\n self.assertLess(mismatch, 1e-3)", "def test_axis_preservation(conv1d_placeholder, output_size):\n conv_layer = Convolution((3, output_size), lambda x: 1)\n output = conv_layer(conv1d_placeholder)\n assert output.axes == conv1d_placeholder.axes, (\"Output axes are not the same as input axes: \"\n \"{} != {}\").format(output.axes,\n conv1d_placeholder.axes)", "def test_conv_layer_train():\n\n #Setup layer and input\n weight = torch.Tensor([ [0.5,- 0.5] , [-0.5, 0.5],\\\n [1,-1] , [0.5, 0.5],]).view(1,2,2,2)\n inputs = torch.Tensor([ [1.1,2.1],[15,.01],[1,0],[1.,1.0]] ).view(1,2,2,2)\n\n expected_result = torch.nn.functional.conv2d(inputs, torch.Tensor([ [sign(0.5),sign(-0.5)] , [sign(-0.5), sign(0.5)],\\\n [sign(1),sign(-1)] , [sign(0.5), sign(0.5)],]).view(1,2,2,2) ,None)\n\n\n conv = BinConv2d(2,1, [2,2],stride=1, bias=False)\n conv.weight.data.copy_(weight)\n\n # Check the forward pass\n assert torch.all(torch.eq(expected_result, conv(inputs)))\n\n \n conv.train(False)\n # Check the forward pass\n assert torch.all(torch.eq(expected_result, conv(inputs)))\n\n # Look at layer's weight on eval mode.\n assert torch.all(torch.eq(conv.weight, BinaryConnectDeterministic.apply(weight)))\n\n conv.train(True)\n # Check the forward pass\n assert torch.all(torch.eq(expected_result, conv(inputs)))\n\n # Look at layer's weight on training mode.\n assert torch.all(torch.eq(conv.weight, weight))", "def test_central(self):\n x = np.array([-100, -2, 0, 0, 1, 1.1])\n self.assertEqual(npinterval.half_sample_mode(x), 0)", "def test_channel_axis_introduction(conv1d_no_channel_axis, output_size, channel_axis):\n conv_layer = Convolution((3, output_size), lambda x: 1)\n output = conv_layer(conv1d_no_channel_axis)\n t_axes = conv1d_no_channel_axis.axes + channel_axis\n assert output.axes.is_equal_set(t_axes), (\"Output axes are not input axes + channel axis:\"\n \"{} != {} + {}\").format(output.axes,\n conv1d_no_channel_axis.axes,\n channel_axis)", "def canonise_left(self, compress=False, Dmax=np.inf, tol=None):\n self.C = np.ones((1, 1))\n self.pC = 0\n for n in range(self.L):\n self.attach_CA()\n self.orth_left(n)\n if compress:\n self.truncateC(Dmax, tol)\n self.F[-1], self.R[-1] = None, None", "def test_conv3d():\n img_r = np.array([\n [0.3, 0.5, 0.7, 0.9],\n [0.1, 0.3, 0.5, 0.7],\n [0.9, 0.7, 0.5, 0.3],\n ])\n img_g = np.array([\n [0.4, 0.6, 0.8, 1.0],\n [0.5, 0.5, 0.5, 0.5],\n [0.1, 0.7, 0.5, 0.9],\n ])\n img_b = np.array([\n [0.2, 0.2, 0.8, 0.8],\n [0.1, 0.6, 0.5, 0.6],\n [0.5, 0.3, 0.4, 0.7],\n ])\n img = np.dstack([img_r, img_g, img_b])\n template_r = np.array([\n [0.5, 0],\n [0.7, 0],\n ])\n template_g = np.array([\n [0.9, 0],\n [0.2, 0],\n ])\n template_b = np.array([\n [0.1, 0],\n [0.4, 0],\n ])\n template = np.dstack([template_r, template_g, template_b])\n template = np.flipud(np.fliplr(template))\n template[:, :, :] = template[:, :, ::-1]\n return np.squeeze(fftconvolve(img, template, mode='valid'))", "def test_compute_cbca_subpixel(self):\n stereo_matcher = stereo.AbstractStereo(**{'stereo_method': 'sad', 'window_size': 1, 'subpix': 2})\n sad = stereo_matcher.compute_cost_volume(img_ref=self.ref, img_sec=self.sec, disp_min=-1, disp_max=1,\n **{'valid_pixels': 0, 'no_data': 1})\n\n # Computes the cost aggregation with the cross-based cost aggregation method,\n # with cbca_intensity=5 and cbca_distance=3\n cbca_obj = aggregation.AbstractAggregation(**{'aggregation_method': 'cbca',\n 'cbca_intensity': 5., 'cbca_distance': 3})\n\n cv_aggreg = cbca_obj.cost_volume_aggregation(self.ref, self.sec, sad)\n\n # Aggregate cost volume ground truth with the cross-based cost aggregation method for the stereo image\n aggregated_ground_truth = np.array([[[np.nan, np.nan, 36./6, 25./7, 0.],\n [36./6, 46./7, 66./9, 38.5/12, 0.],\n [55./9, 64.5/12, 74./12, 32.5/10, 0.],\n [55./9, 59.5/10, 52./9, 31.5/9, 0.],\n [41./6, 43.5/9, 52./9, np.nan, np.nan]],\n\n [[np.nan, np.nan, 36./6, 25./7, 0.],\n [36./6, 46./7, 66./9, 38.5/12, 0.],\n [55./9, 64.5/12, 74./12, 32.5/10, 0.],\n [55./9, 59.5/10, 52./9, 31.5/9, 0.],\n [41./6, 43.5/9, 52./9, np.nan, np.nan]],\n\n [[np.nan, np.nan, 36./6, 25./7, 0.],\n [36./6, 46./7, 66./9, 38.5/12, 0.],\n [55./9, 64.5/12, 74./12, 32.5/10, 0.],\n [55./9, 59.5/10, 52./9, 31.5/9, 0.],\n [41./6, 43.5/9, 52./9, np.nan, np.nan]]])\n\n # Check if the calculated standard deviation is equal ( to desired tolerance 1e-07 ) to the ground truth\n np.testing.assert_allclose(cv_aggreg['cost_volume'].data, aggregated_ground_truth, rtol=1e-07)", "def test_careduce():\r\n for scalar_op, careduce_op in [\r\n (theano.scalar.mul, tensor.elemwise.CAReduceDtype),\r\n (theano.scalar.add, tensor.elemwise.CAReduceDtype),\r\n (theano.scalar.maximum, tensor.CAReduce),\r\n (theano.scalar.minimum, tensor.CAReduce)\r\n #The following 2 cases could work if the scalar_op.c_code work with float* dtype.\r\n #Currently we have this error:\r\n #error: invalid operands of types 'npy_float32' and 'npy_float32' to binary 'operator&'\r\n #(theano.scalar.and_, tensor.elemwise.CAReduce),\r\n #(theano.scalar.or_, tensor.elemwise.CAReduce),\r\n ]:\r\n for shape, pattern in [((1,1),(1,)),\r\n ((1,0),(1,)),\r\n ((0,1),(1,)),\r\n ((0,0),(1,)),\r\n ((0,0,0),(1,2)),\r\n ((0,0,0,0),(1,2,3)),\r\n ((2,1),(1,)),\r\n ((1,2),(1,)),\r\n ((100,3,1300),[1]),\r\n ((0,),[0]),((5,),[0]),\r\n ((0,0),[0,1]),((1,0),[0,1]),((5,4),[0,1]),((33,31),[0,1]),((5,4),[1]),((5,4),[0]),#need something bigger then 32 for some opt test.\r\n ((5,4,3),[0]),((5,4,3),[1]),((5,4,3),[0,1]),((5,4,3),[2]),((5,4,3),[1,2]),((5,4,3),[0,1,2]),\r\n ((0,0,0,0),[0,1,2,3]),\r\n ((5,4,3,20),[2,3]), ((5,4,3,2),[0,1,2,3]), ((5,4,3,2),[0,2,3]),((5,4,3,2),[1,2,3]),\r\n ((5,4,3,10,11),[1,2]),\r\n ((5,4,3,20),[2,3]), ((5,4,3,2),[0,1,2,3]), ((5,4,3,2),[0,2,3]),((5,4,3,2),[1,2,3]),\r\n\r\n #test shape bigger then 4096 on each dimension to make sure that we work correctly when we don't have enough thread/block in each dimensions\r\n ((4100,3),[0]),((3,4101),[0]),#10\r\n ((1024,33),[0]),((33,1024),[0]),#10\r\n ((1025,33),[0]),((33,1025),[0]),#10\r\n\r\n ((4100,3),[1]),((3,4101),[1]),#01\r\n ((1024,33),[1]),((33,1024),[1]),#01\r\n ((1025,33),[1]),((33,1025),[1]),#01\r\n\r\n ((4100,3),[0,1]),((3,4101),[0,1]),#11\r\n ((1024,33),[0,1]),((33,1024),[0,1]),#01\r\n ((1025,33),[0,1]),((33,1025),[0,1]),#01\r\n\r\n ((4100,4,3),[0]),((5,4100,3),[0]),((5,4,4100),[0]), ((3,65536,1), [0]),#100\r\n ((4100,4,3),[1]),((5,4100,3),[1]),((5,4,4100),[1]),#010\r\n ((4100,4,3),[2]),((5,4100,3),[2]),((5,4,4100),[2]),#001\r\n ((4100,4,3),[0,1]),((5,4100,3),[0,1]),((5,4,4100),[0,1]),#110\r\n ((4100,4,3),[1,2]),((5,4100,3),[1,2]),((5,4,4100),[1,2]),#011\r\n #((4100,4,3),[0,2]),((5,4100,3),[0,2]),((5,4,4100),[0,2]),#101 ##not implemented\r\n ((4100,4,3),[0,1,2]),((5,4100,3),[0,1,2]),((5,4,4100),[0,1,2]),#111\r\n ((65,4,3),[0,1,2]),((5,65,3),[0,1,2]),((5,4,65),[0,1,2]),#111\r\n\r\n ((4100,4,3,2),[2,3]),((4,4100,3,2),[2,3]),((4,3,4100,2),[2,3]),((4,3,2,4100),[2,3]),#0011\r\n ((4100,4,3,2),[1,3]),((4,4100,3,2),[1,3]),((4,3,4100,2),[1,3]),((4,3,2,4100),[1,3]),#0101\r\n ((4100,4,3,2),[0,2,3]),((4,4100,3,2),[0,2,3]),((4,3,4100,2),[0,2,3]),#((4,3,2,4100),[0,2,3]),#1011\r\n ((4100,4,3,2),[1,2,3]),((4,4100,3,2),[1,2,3]),((4,3,4100,2),[1,2,3]),((4,3,2,4100),[1,2,3]),#0111\r\n ((65,4,3,2),[1,2,3]),((4,65,3,2),[1,2,3]),((4,3,65,2),[1,2,3]),((4,3,2,65),[1,2,3]),#0111\r\n ((4100,2,3,4),[0,1,2,3]),((2,4100,3,4),[0,1,2,3]),((2,3,4100,4),[0,1,2,3]),((2,3,4,4100),[0,1,2,3]),((128,1,3,3), [0,1,2,3]),#1111\r\n\r\n\r\n #test pattern implemented by reshape\r\n ((4100,4,3,2),[0]),((4,4100,3,2),[0]),((4,3,4100,2),[0]),((4,3,2,4100),[0]),#1000\r\n ((4100,4,3,2),[1]),((4,4100,3,2),[1]),((4,3,4100,2),[1]),((4,3,2,4100),[1]),#0100\r\n ((4100,4,3,2),[2]),((4,4100,3,2),[2]),((4,3,4100,2),[2]),((4,3,2,4100),[2]),#0010\r\n ((4100,4,3,2),[3]),((4,4100,3,2),[3]),((4,3,4100,2),[3]),((4,3,2,4100),[3]),#0001\r\n ((1100,2,3,4,5),[0,1,2,3,4]),((2,1100,3,4,5),[0,1,2,3,4]),((2,3,1100,4,5),[0,1,2,3,4]),((2,3,4,1100,5),[0,1,2,3,4]),((2,3,4,5,1100),[0,1,2,3,4]),#11111\r\n\r\n ]:\r\n\r\n op = careduce_op(scalar_op, axis=pattern)\r\n pat = tensor_pattern_to_gpu_pattern(shape, pattern)\r\n\r\n a = tensor.TensorType('float32', (False,) * len(shape))()\r\n b = op(a)\r\n val = numpy.random.rand(numpy.prod(shape)).reshape(shape)\r\n # val = numpy.ones(shape)\r\n # val = numpy.arange(numpy.prod(shape)).reshape(shape)\r\n val = theano._asarray(val, dtype='float32')\r\n f = theano.function([a], b, mode=mode_with_gpu)\r\n f2 = theano.function([a], b, mode=mode_without_gpu)\r\n assert tcn.GpuCAReduce in [x.op.__class__\r\n for x in f.maker.fgraph.toposort()], (\r\n scalar_op, shape, pattern)\r\n assert op.__class__ in [x.op.__class__\r\n for x in f2.maker.fgraph.toposort()], (\r\n scalar_op, shape, pattern)\r\n f_caused_value_error = False\r\n try:\r\n f_out = f(val)\r\n except ValueError, e:\r\n exc = e\r\n f_caused_value_error = True\r\n except NotImplementedError:\r\n if (numpy.prod(shape) == 0 and\r\n getattr(scalar_op, 'identity', None) != 0):\r\n continue\r\n raise\r\n\r\n f2_caused_value_error = False\r\n try:\r\n f2_out = f2(val)\r\n except ValueError, e:\r\n exc2 = e\r\n f2_caused_value_error = True\r\n\r\n if f_caused_value_error != f2_caused_value_error:\r\n if f_caused_value_error:\r\n print 'f caused this value error:'\r\n print exc\r\n else:\r\n print 'f did not raise a value error, but should have'\r\n if f2_caused_value_error:\r\n print 'f2 caused this value error:'\r\n print exc2\r\n else:\r\n print 'f should not have raised a value error'\r\n print 'shape was: ', shape\r\n print 'pattern was: ', pattern\r\n assert False\r\n\r\n try:\r\n #We raise the error threashold as we sum big matrix\r\n #and this cause small rounding difference with some seed\r\n #example in debug mode with unittests.rseed=9275\r\n orig_rtol = theano.tensor.basic.float32_rtol\r\n theano.tensor.basic.float32_rtol = 2e-5\r\n assert _allclose(f_out, f2_out), ('shape', shape,\r\n 'pattern', pattern,\r\n scalar_op,\r\n sum([shape[i] for i in pattern]),\r\n f2(val), f(val), val)\r\n finally:\r\n theano.tensor.basic.float32_rtol = orig_rtol\r\n\r\n\r\n #test with dimshuffle\r\n #we shuffle the 2 outer dims.\r\n for shape, pattern in [#((5,),[0]),\r\n ((5,4),[0,1]),((5,4),[0]),\r\n ((5,4,3),[0]),((5,4,3),[0,1]),((5,4,3),[2]),((5,4,3),[0,1,2]),\r\n ((5,4,3,2),[0,1,2,3]), ((5,4,3,2),[0,2,3]),\r\n ((128,1,3,3),[0,1,2,3]),\r\n ]:\r\n op = careduce_op(scalar_op, axis=pattern)\r\n pat = tensor_pattern_to_gpu_pattern(shape, pattern)\r\n\r\n a = tensor.TensorType('float32', (False,) * len(shape))()\r\n dim_pattern = range(len(shape))\r\n dim_pattern[0] = 1\r\n dim_pattern[1] = 0\r\n a = a.dimshuffle(dim_pattern)\r\n b = op(a)\r\n val = numpy.random.rand(numpy.prod(shape)).reshape(shape)\r\n # val = numpy.ones(shape)\r\n # val = numpy.arange(numpy.prod(shape)).reshape(shape)\r\n val = theano._asarray(val, dtype='float32')\r\n f = theano.function([a], b, mode=mode_with_gpu)\r\n f2 = theano.function([a], b, mode=mode_without_gpu)\r\n assert tcn.GpuCAReduce in [x.op.__class__\r\n for x in f.maker.fgraph.toposort()], (\r\n scalar_op, shape, pattern)\r\n assert op.__class__ in [x.op.__class__\r\n for x in f2.maker.fgraph.toposort()], (\r\n scalar_op, shape, pattern)\r\n assert _allclose(f2(val), f(val)), ('shape', shape,\r\n 'pattern', pattern,\r\n scalar_op,\r\n sum([shape[i] for i in pattern]))\r\n\r\n #test with broadcast\r\n for shape, pattern in [((5,),[0]),\r\n ((5,4),[0,1]),((5,4),[0]),\r\n ((5,4,3),[0]),((5,4,3),[0,1]),\r\n ((5,4,3),[2]),((5,4,3),[0,1,2]),\r\n ((5,4,3,2),[0,1,2,3]), ((5,4,3,2),[0,2,3]),\r\n ((128,1,3,3),[0,1,2,3]),\r\n ]:\r\n op = careduce_op(scalar_op, axis=pattern)\r\n pat = tensor_pattern_to_gpu_pattern(shape, pattern)\r\n\r\n shape = numpy.asarray(shape) * 2\r\n a = tensor.TensorType('float32', (False,) * len(shape))()\r\n a2 = tcn.CudaNdarrayType((False,) * len(shape))()\r\n b = op(a)\r\n b2 = op(a2)\r\n val = numpy.random.rand(numpy.prod(shape)).reshape(shape)\r\n # val = numpy.ones(shape)\r\n # val = numpy.arange(numpy.prod(shape)).reshape(shape)\r\n val = theano._asarray(val, dtype='float32')\r\n val2 = cuda.CudaNdarray(val)\r\n if len(shape) == 1:\r\n val = val[::2]\r\n val2 = val2[::2]\r\n elif len(shape) == 2:\r\n val = val[::2, ::2]\r\n val2 = val2[::2, ::2]\r\n elif len(shape) == 3:\r\n val = val[::2, ::2, ::2]\r\n val2 = val2[::2, ::2, ::2]\r\n elif len(shape) == 4:\r\n val = val[::2, ::2, ::2, ::2]\r\n val2 = val2[::2, ::2, ::2, ::2]\r\n f = theano.function([a], b, mode=mode_without_gpu)\r\n f2 = theano.function([a2], b2, mode=mode_with_gpu)\r\n assert tcn.GpuCAReduce in [x.op.__class__\r\n for x in f2.maker.fgraph.toposort()], (\r\n scalar_op, shape, pattern)\r\n assert op.__class__ in [x.op.__class__\r\n for x in f.maker.fgraph.toposort()], (\r\n scalar_op, shape, pattern)\r\n assert _allclose(f2(val2), f(val)), ('shape', shape,\r\n 'pattern', pattern,\r\n sum([shape[i] for i in pattern]))", "def test_convolution_backprop(transformer_factory):\n N = 128\n C, K = 3, 2\n D, T = 1, 1\n H = W = 32\n R = S = 2\n\n padding = dict(pad_d=0, pad_h=0, pad_w=0)\n strides = dict(str_d=1, str_h=1, str_w=1)\n dilation = dict(dil_d=1, dil_h=1, dil_w=1)\n conv_params = padding.copy()\n conv_params.update(strides)\n conv_params.update(dilation)\n\n ax_i = ng.make_axes([ax.C, ax.D, ax.H, ax.W, ax.N])\n ax_f = ng.make_axes([ax.C, ax.T, ax.R, ax.S, ax.K])\n ax_i.set_shape((C, D, H, W, N))\n ax_f.set_shape((C, T, R, S, K))\n ax_o = ng.make_axes([\n ng.make_axis(roles=[ar.features_input]).named('C'),\n ng.make_axis(roles=[ar.features_0]).named('D'),\n ng.make_axis(roles=[ar.features_1]).named('H'),\n ng.make_axis(roles=[ar.features_2]).named('W'),\n ax.N\n ])\n\n ax_o[:-1].set_shape((\n K,\n output_dim(D, T, padding['pad_d'], strides['str_d']),\n output_dim(H, R, padding['pad_h'], strides['str_h']),\n output_dim(W, S, padding['pad_w'], strides['str_w']))\n )\n\n inputs = ng.placeholder(axes=ax_i)\n filters = ng.placeholder(axes=ax_f)\n\n # randomly initialize\n input_value = rng.uniform(-1, 1, ax_i)\n filter_value = rng.uniform(-1, 1, ax_f)\n\n assert input_value.shape == ax_i.lengths\n assert filter_value.shape == ax_f.lengths\n\n output = ng.sum(ng.convolution(conv_params, inputs, filters, ax_o), out_axes=())\n\n with ExecutorFactory() as factory:\n dcdf_sym_fun = factory.derivative(output, filters, inputs)\n dcdf_num_fun = factory.numeric_derivative(output, filters, .01, inputs)\n dcdf_sym_val = dcdf_sym_fun(filter_value, input_value)\n dcdf_num_val = dcdf_num_fun(filter_value, input_value)\n\n ng.testing.assert_allclose(dcdf_sym_val, dcdf_num_val, rtol=1)", "def test_concentration_limit(self):\n np.testing.assert_array_less(-self.c_e(self.t, self.x), 0)", "def test_conv1d():\n filters = 3\n kernel_size = 2\n strides = 1\n batch_size = 2\n in_channels = 3\n input_size = 5\n input_shape = (batch_size, input_size, in_channels)\n\n keras_layer = keras.layers.Conv1D(filters=filters, kernel_size=kernel_size, strides=strides, use_bias=True, bias_initializer=\"ones\")\n input_layer = keras.Input(batch_shape=input_shape)\n keras_model = keras.models.Model(input=input_layer, outputs=keras_layer(input_layer))\n\n new_weights = np.arange(18).reshape(2, 3, 3)\n keras_layer.set_weights([new_weights, keras_layer.get_weights()[1]])\n\n kinput = np.arange(batch_size * input_size * in_channels).reshape(input_shape)\n kout = keras_model.predict(kinput)\n\n torch_model, _ = translate.translate_layer(keras_layer)\n tinput = torch.Tensor(kinput).permute(0, 2, 1)\n tout = torch_model(tinput).permute(0, 2, 1)\n assert np.isclose(kout, tout.cpu().data.numpy()).all()", "def test_basic(self):\r\n if (not theano.tensor.nnet.conv.imported_scipy_signal and\r\n theano.config.cxx == \"\"):\r\n raise SkipTest(\"conv2d tests need SciPy or a c++ compiler\")\r\n\r\n self.validate((1, 4, 5), (2, 2, 3), verify_grad=True)\r\n self.validate((7, 5), (5, 2, 3), verify_grad=False)\r\n self.validate((3, 7, 5), (2, 3), verify_grad=False)\r\n self.validate((7, 5), (2, 3), verify_grad=False)", "def test_positional_convolution_forward(ctx):\n # num_batch * channel * height * width input\n # i.e. (2, 2, 6, 6)\n in_data = \\\n mx.nd.array(\n [\n [[[1, 2, -1, 0, 1, 1],\n [3, 6, -5, 4, 2, -2],\n [9, 6, -1, 3, 1, 3],\n [4, 2, 5, 7, 3, 1],\n [0, 1, 1, 2, 2, 1],\n [3, 1, 2, 4, 3, 3]],\n\n [[3, 1, 2, 4, 3, 3],\n [0, 1, 1, 2, 2, 1],\n [4, 2, 5, 7, 3, 1],\n [9, 6, -1, 3, 1, 3],\n [3, 6, -5, 4, 2, -2],\n [1, 2, -1, 0, 1, 1]]],\n [[[1, 2, 3, 4, 5, 6],\n [6, 5, 4, 3, 2, 1],\n [0, 0, 1, 1, 2, 2],\n [3, 3, 0, -1, -1, -2],\n [3, 1, 0, 3, 3, 2],\n [5, 6, 7, -1, -2, 0]],\n\n [[5, 6, 7, -1, -2, 0],\n [3, 1, 0, 3, 3, 2],\n [3, 3, 0, -1, -1, -2],\n [0, 0, 1, 1, 2, 2],\n [6, 5, 4, 3, 2, 1],\n [1, 2, 3, 4, 5, 6]]]\n ], ctx=ctx)\n\n # num_filter * channel * K * K weight\n # i.e. (2, 2, 3, 3)\n weight = \\\n mx.nd.array(\n [\n [[[1, 0, 1],\n [0, 2, -1],\n [2, 3, 1]],\n\n [[1, 1, 0],\n [2, -1, 2],\n [3, -2, 4]]],\n\n [[[0, 1, 2],\n [-1, 2, 3],\n [4, 1, -5]],\n\n [[3, 0, -1],\n [-1, 2, 1],\n [5, 6, 2]]]\n ], ctx=ctx)\n\n # num_batch * channel * out_height * out_width scale\n # i.e. (2, 2, 6, 6)\n scale = \\\n mx.nd.array(\n [\n [[[1, 1, 1, 1, 1, 1],\n [1, -1, 1, -1, 1, -1],\n [-1, 1, -1, 1, -1, 1],\n [-1, -1, -1, -1, -1, -1],\n [2, 1, 2, 2, 1, 1],\n [1, 2, 1, 2, 1, 2]],\n\n [[1, 1, 1, 1, 1, 1],\n [1, -1, -1, 1, 1, 1],\n [-1, 1, -1, 1, -1, 1],\n [1, -1, -1, -1, -1, 1],\n [2, -1, 2, -2, 1, 1],\n [1, 2, 1, 2, 1, 2]]],\n\n [[[6, 5, 4, 3, 2, 1],\n [1, 2, 3, 4, 5, 6],\n [1, -1, 2, -2, 3, -3],\n [4, -4, 5, -5, 6, -6],\n [1, 1, 1, 1, 1, 1],\n [-1, -1, -1, -1, -1, -1]],\n\n [[-1, -1, -1, -1, -1, -1],\n [1, 1, 1, 1, 1, 1],\n [4, -4, 5, -5, 6, -6],\n [1, -1, 2, -2, 3, -3],\n [1, 2, 3, 4, 5, 6],\n [6, 5, 4, 3, 2, 1]]],\n ], ctx=ctx)\n\n # num_filter bias\n # i.e. (2, )\n bias = \\\n mx.nd.array(\n [1, 2], ctx=ctx)\n\n in_data_var = mx.symbol.Variable(name=\"in_data\")\n weight_var = mx.symbol.Variable(name=\"weight\")\n scale_var = mx.symbol.Variable(name=\"scale\")\n bias_var = mx.symbol.Variable(name=\"bias\")\n\n op = mx.symbol.contrib.PositionalConvolution(name='test_positional_convolution',\n data=in_data_var,\n scale=scale_var,\n weight=weight_var,\n bias=bias_var,\n num_filter=2,\n pad=(1, 1), kernel=(3, 3), stride=(1, 1))\n be = op.bind(ctx=ctx, args={'in_data': in_data,\n 'scale': scale,\n 'weight': weight,\n 'bias': bias})\n be.forward(True)\n out_o = be.outputs[0].asnumpy()\n print(out_o)", "def test_convolution(transformer_factory):\n N = 128\n C, K = 3, 8\n D, T = 1, 1\n H = W = 32\n R = S = 2\n\n padding = dict(pad_d=0, pad_h=0, pad_w=0)\n strides = dict(str_d=1, str_h=1, str_w=1)\n dilation = dict(dil_d=1, dil_h=1, dil_w=1)\n conv_params = padding.copy()\n conv_params.update(strides)\n conv_params.update(dilation)\n\n ax_i = ng.make_axes([ax.C, ax.D, ax.H, ax.W, ax.N])\n ax_f = ng.make_axes([ax.C, ax.T, ax.R, ax.S, ax.K])\n ax_i.set_shape((C, D, H, W, N))\n ax_f.set_shape((C, T, R, S, K))\n\n ax_o = ng.make_axes([\n ng.make_axis(roles=[ar.features_input]).named('C'),\n ng.make_axis(roles=[ar.features_0]).named('D'),\n ng.make_axis(roles=[ar.features_1]).named('H'),\n ng.make_axis(roles=[ar.features_2]).named('W'),\n ax.N\n ])\n\n ax_o[:-1].set_shape((\n K,\n output_dim(D, T, padding['pad_d'], strides['str_d']),\n output_dim(H, R, padding['pad_h'], strides['str_h']),\n output_dim(W, S, padding['pad_w'], strides['str_w']))\n )\n\n inputs = ng.placeholder(axes=ax_i)\n filters = ng.placeholder(axes=ax_f)\n\n # randomly initialize\n input_value = rng.uniform(-1, 1, ax_i)\n filter_value = rng.uniform(-1, 1, ax_f)\n\n assert input_value.shape == ax_i.lengths\n assert filter_value.shape == ax_f.lengths\n\n inputs = ng.placeholder(ax_i)\n filters = ng.placeholder(ax_f)\n\n output = ng.convolution(conv_params, inputs, filters, axes=ax_o)\n targets = ng.placeholder(axes=output.axes)\n\n costs = ng.cross_entropy_binary(ng.sigmoid(output), targets)\n error = ng.sum(costs, out_axes=()) / ng.batch_size(costs)\n d_inputs = ng.deriv(error, inputs)\n d_filters = ng.deriv(error, filters)\n\n targets_value = rng.uniform(.1, 0.9, output.axes)\n\n with executor([output, error, d_inputs, d_filters], inputs, filters, targets) as conv_executor:\n result_ng, err_ng, gradI_ng, gradF_ng = \\\n conv_executor(input_value, filter_value, targets_value)\n\n # Now compute reference values via NEON\n NervanaObject.be.bsz = N\n neon_layer = Convolution(fshape=(R, S, K), padding=padding, strides=strides)\n\n inp = neon_layer.be.array(input_value.reshape(C * H * W * D, N))\n neon_layer.W = neon_layer.be.array(filter_value.reshape(C * R * S * T, K))\n neon_layer.dW = neon_layer.be.empty_like(neon_layer.W)\n neon_layer.configure((C, H, W))\n neon_layer.prev_layer = True\n neon_layer.allocate()\n neon_layer.set_deltas(DummyDeltaBuffers())\n\n result_ne = neon_layer.fprop(inp).get().reshape(output.axes.lengths)\n\n act_result_ne = 1. / (1.0 + np.exp(-result_ne))\n err = neon_layer.be.array((act_result_ne - targets_value).reshape(-1, N) / float(N))\n gradI_ne = neon_layer.bprop(err).get().reshape(ax_i.lengths)\n gradF_ne = neon_layer.dW.get().reshape(ax_f.lengths)\n\n # Compare fprop\n ng.testing.assert_allclose(result_ng, result_ne, rtol=0, atol=1e-6)\n\n # Compare bprop\n ng.testing.assert_allclose(gradI_ng, gradI_ne, rtol=0, atol=1e-6)\n\n # Compare update\n ng.testing.assert_allclose(gradF_ng, gradF_ne, rtol=0, atol=1e-4)", "def test_convolve_numerics(self, leading_dims, lengths, mode):\n L_x, L_y = lengths\n\n x = torch.rand(*(leading_dims + (L_x,)), dtype=self.dtype, device=self.device)\n y = torch.rand(*(leading_dims + (L_y,)), dtype=self.dtype, device=self.device)\n\n actual = F.convolve(x, y, mode=mode)\n\n num_signals = torch.tensor(leading_dims).prod() if leading_dims else 1\n x_reshaped = x.reshape((num_signals, L_x))\n y_reshaped = y.reshape((num_signals, L_y))\n expected = [\n signal.convolve(x_reshaped[i].detach().cpu().numpy(), y_reshaped[i].detach().cpu().numpy(), mode=mode)\n for i in range(num_signals)\n ]\n expected = torch.tensor(np.array(expected))\n expected = expected.reshape(leading_dims + (-1,))\n\n self.assertEqual(expected, actual)", "def testComputationStrided(self, use_bias):\n conv1 = snt.CausalConv1D(\n output_channels=1,\n kernel_shape=3,\n stride=2,\n use_bias=use_bias,\n name=\"conv1\",\n initializers=create_constant_initializers(1.0, 1.0, use_bias))\n\n out = conv1(tf.constant(np.ones([1, 5, 1], dtype=np.float32)))\n expected_out = np.reshape(np.array([1, 3, 3]), [1, 3, 1])\n if use_bias:\n expected_out += 1\n\n init_op = tf.variables_initializer(\n [conv1.w, conv1.b] if use_bias else [conv1.w])\n with self.test_session() as sess:\n sess.run(init_op)\n actual_out = sess.run(out)\n\n self.assertAllClose(actual_out, expected_out)", "def test_fftconvolve_numerics(self, leading_dims, lengths, mode):\n L_x, L_y = lengths\n\n x = torch.rand(*(leading_dims + (L_x,)), dtype=self.dtype, device=self.device)\n y = torch.rand(*(leading_dims + (L_y,)), dtype=self.dtype, device=self.device)\n\n actual = F.fftconvolve(x, y, mode=mode)\n\n expected = signal.fftconvolve(x.detach().cpu().numpy(), y.detach().cpu().numpy(), axes=-1, mode=mode)\n expected = torch.tensor(expected)\n\n self.assertEqual(expected, actual)", "def test_conv_consistency(self) -> None:\n x = Input(\n 'const1',\n [1, 3, 3, 3],\n Float32(),\n )\n w = Constant(\n 'weight',\n Float32(),\n np.zeros([1, 2, 2, 3])\n )\n input_ops = {'X': cast(Operator, x), 'W': cast(Operator, w)}\n\n Conv(\n 'conv_under_test',\n [1, 3, 3, 3],\n Float32(),\n input_ops,\n pads=[1, 2, 1, 2],\n strides=[2, 2]\n )\n\n print(\"Consistency test for conv operator passed!\")", "def test_DeformableConvolution():\n try:\n ctx = mx.gpu()\n _ = mx.nd.array([0], ctx=ctx)\n except mx.base.MXNetError:\n pytest.skip(\"deformable_convolution only supports GPU\")\n net = nn.HybridSequential()\n net.add(\n nn.DeformableConvolution(10, kernel_size=(3, 3), strides=1, padding=0),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, activation='relu',\n offset_use_bias=False, use_bias=False),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, activation='relu',\n offset_use_bias=False),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, activation='relu',\n use_bias=False),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, offset_use_bias=False, use_bias=False),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, offset_use_bias=False),\n nn.DeformableConvolution(12, kernel_size=(3, 2), strides=1, padding=0, use_bias=False),\n nn.DeformableConvolution(12, kernel_size=(3, 2), strides=1, padding=0, use_bias=False, num_deformable_group=4),\n )\n\n net.initialize(force_reinit=True, ctx=ctx)\n net.hybridize()\n\n x = mx.nd.random.uniform(shape=(8, 5, 30, 31), ctx=ctx)\n with mx.autograd.record():\n y = net(x)\n y.backward()", "def check_correctness_channelwise(f):\n\n rng = np.random.RandomState([2012, 7, 19])\n batch_size = 5\n pool_size = 4\n n = 3 * pool_size\n zv = rng.randn(batch_size, n).astype(config.floatX) * 1. - 1.5\n top_down_v = rng.randn(batch_size, n / pool_size).astype(config.floatX)\n\n p_np, h_np = max_pool_channels_python(zv, pool_size, top_down_v)\n\n z_th = T.matrix()\n z_th.name = 'z_th'\n\n top_down_th = T.matrix()\n top_down_th.name = 'top_down_th'\n\n p_th, h_th = f(z_th, pool_size, top_down_th)\n\n func = function([z_th, top_down_th], [p_th, h_th])\n\n pv, hv = func(zv, top_down_v)\n\n assert p_np.shape == pv.shape\n assert h_np.shape == hv.shape\n if not np.allclose(h_np, hv):\n print((h_np.min(), h_np.max()))\n print((hv.min(), hv.max()))\n assert False\n if not np.allclose(p_np, pv):\n diff = abs(p_np - pv)\n print('max diff ', diff.max())\n print('min diff ', diff.min())\n print('ave diff ', diff.mean())\n assert False", "def pca_detector(data):\n #- 'vol_shape' is the shape of volumes\n vol_shape = data.shape[:-1]\n #- 'n_vols' is the number of volumes\n n_vols = data.shape[-1]\n #- N is the number of voxels in a volume\n N = np.prod(vol_shape)\n\n #- Reshape to 2D array that is voxels by volumes (N x n_vols)\n # transpose to n_vols x N\n X = data.reshape((N, n_vols)).T\n\n \"\"\"\n The first part of the code will use PCA to get component matrix U\n and scalar projections matrix C\n \"\"\"\n\n #- Calculate unscaled covariance matrix for X\n unscaled_cov = X.dot(X.T)\n\n #- Use SVD to return U, S, VT matrices from unscaled covariance\n U, S, VT = npl.svd(unscaled_cov)\n\n #- Calculate the scalar projections for projecting X onto the vectors in U.\n #- Put the result into a new array C.\n C = U.T.dot(X)\n # set nans to 0\n C[np.isnan(C)] = 0\n #- Transpose C\n #- Reshape C to have the 4D shape of the original data volumes.\n C_vols = C.T.reshape((vol_shape + (n_vols,)))\n\n \"\"\"\n The second part of the code determines which voxels are inside the brain\n and which are outside the brain and creates a mask (boolean matrix)\n \"\"\"\n\n #get the mean voxel intensity of entire 4D object\n mean_voxel = np.mean(data)\n #get the mean volume (3D) across time series (axis 3)\n mean_volume = np.mean(data, axis=3)\n #boolean mask set to all voxels above .5 in the first volume\n #(.125 is the SPM criterion but .5 seems like a better threshold)\n mask = mean_volume > (.5 * mean_voxel) #threshold can be adjusted!\n out_mask = ~mask\n\n \"\"\"\n The third part of code finds the root mean square of U from step 1, then uses the\n mask from step 2 to determine which components explain data outside the brain\n Selects these \"bad components\" with high \"outsideness\"\n \"\"\"\n\n #Apply mask to C matrix to get all voxels outside of brain\n outside = C_vols[out_mask]\n #Get RMS of the voxels outside, reflecting \"outsideness\" of this scan\n RMS_out = np.sqrt(np.mean((outside ** 2), axis=0))\n\n #Apply mask to C matrix to get all voxels inside brain\n inside = C_vols[mask]\n #Get RMS of the voxels inside, reflecting \"insideness\" of this scan\n RMS_in = np.sqrt(np.mean((inside ** 2), axis=0))\n\n #The closer this ratio is to 1, the worse the volume\n RMS_ratio = RMS_out / RMS_in\n\n \"\"\"\n The fourth part of the code uses the \"bad components\" to generate a new\n \"bad data set\" and then puts this dataset through the outlier detector\n \"\"\"\n\n #Create a boolean mask for the 10% worst PCs (meaning highest RMS ratio)\n PC_bad = np.percentile(RMS_ratio, 90)\n PC_bad_mask = RMS_ratio > PC_bad\n\n U_bad = U[:, PC_bad_mask]\n C_bad = C[PC_bad_mask]\n\n #generates data set based on the bad PCs and (U and C matrices)\n X_bad = U_bad.dot(C_bad).T.reshape((vol_shape + (n_vols,)))\n\n # calculate outliers using iqr_detector\n _, outliers = mah_detector(X_bad)\n\n return X_bad, outliers", "def convolve_one_image(self,input4D, one_image, image_shape, \n Pstruct, filter_shape,\n image_index,\n channel_index): \n \n \n ## We look at the composition for the first channel in the beginning \n rank = Pstruct[0]['U1'].shape[1]\n fwidth = filter_shape[2]\n fheight = filter_shape[3]\n \n \n # Construct horizontal filters\n #TODO save the filters in the correct shape\n horizontal_filter_shape = (rank, 1, fwidth)\n horizontal_filters = np.ndarray(horizontal_filter_shape)\n horizontal_filters[:, 0, :] = np.transpose(Pstruct[channel_index]['U1']);\n \n # Output is 1 x rank x W x H\n horizontal_conv_out = conv.conv2d(input=one_image, \n filters = horizontal_filters,\n filter_shape = horizontal_filter_shape, \n image_shape = image_shape)\n \n # Construct vertical filters\n vertical_filter_shape = (rank, fheight, 1)\n vertical_filters = np.ndarray(vertical_filter_shape) \n vertical_filters[:,:, 0] = np.transpose(Pstruct[channel_index]['U2']);\n\n initial_n_rows = image_shape[1]\n final_n_rows = initial_n_rows- fwidth + 1\n final_n_cols = image_shape[2] - fheight + 1 \n conv_out = theano.shared(np.zeros((rank, final_n_rows, final_n_cols)))\n for r in range(rank):\n # temp is 1x1x imageW x imageH\n A = conv.conv2d(input = horizontal_conv_out[:,r,:,:], \n filters = vertical_filters[r,:,:],\n filter_shape = (1, fheight, 1), \n image_shape = (1, initial_n_rows, final_n_cols))\n conv_out = T.set_subtensor(conv_out[r,:,:], A[0,:,:])\n \n nbr_filters = Pstruct[0]['U3'].shape[0]\n # Final number of rows and columns \n ## numberof images, number of filters, image width, image height\n alphas = Pstruct[channel_index]['U3'] \n for f in range(nbr_filters): \n temp = theano.shared(np.zeros((final_n_rows, final_n_cols)))\n for r in range(rank):\n temp = temp + conv_out[r, :,:]* alphas[f, r] * Pstruct[channel_index]['lmbda'][r]; \n input4D =T.set_subtensor(input4D[image_index,f,:,:], temp)\n return input4D", "def test_positional_convolution_backward():\n i = 1\n for num_batch in [1, 2, 4]:\n for num_channel in [4, 8, 12]:\n for input_height, input_width in itertools.product([10, 12, 18], [10, 12, 18]):\n for num_filter in [2, 4, 8]:\n for kernel in [(3, 3), (2, 2)]:\n for stride in [(1, 1), (2, 2)]:\n for pad in [(0, 0), (1, 1)]:\n for dilate in [(1, 1), (2, 2)]:\n # for num_group in [1, 2, 4]:\n grad_nodes = ['im_data', 'scale_data', 'weight', 'bias']\n output_height = np.floor(\n (input_height + 2 * pad[0] - dilate[0] * (kernel[0] - 1) - 1) * 1.0 / stride[0]\n ) + 1\n output_width = np.floor(\n (input_width + 2 * pad[1] - dilate[1] * (kernel[1] - 1) - 1) * 1.0 / stride[1]\n ) + 1\n im_data = np.random.rand(num_batch, num_channel, input_height, input_width)\n scale_data = \\\n np.random.rand(num_batch, num_channel, int(output_height), int(output_width))\\\n * 0.8 + 0.1\n\n weight = np.random.normal(0, 0.001, (num_filter, num_channel, kernel[0], kernel[1]))\n bias = np.random.rand(num_filter)\n\n im_data_var = mx.symbol.Variable(name=\"im_data\")\n scale_data_var = mx.symbol.Variable(name=\"scale_data\")\n weight_var = mx.symbol.Variable(name=\"weight\")\n bias_var = mx.symbol.Variable(name=\"bias\")\n op = mx.sym.contrib.PositionalConvolution(name='test_op',\n data=im_data_var,\n scale=scale_data_var,\n weight=weight_var,\n bias=bias_var,\n num_filter=num_filter,\n kernel=kernel, stride=stride, pad=pad,\n dilate=dilate\n )\n rtol, atol = 1e-4, 1e-3\n # absolute(a - b) <= (atol + rtol * absolute(b))\n check_numeric_gradient(op, [im_data, scale_data, weight, bias], rtol=rtol,\n atol=atol, grad_nodes=grad_nodes, ctx=mx.gpu(0))\n print(\"check numeric gradient successfully for the {} times\".format(i))\n i += 1" ]
[ "0.59120655", "0.5800435", "0.5779887", "0.5771272", "0.5761186", "0.57230026", "0.56933904", "0.5679137", "0.5663168", "0.5662883", "0.5660113", "0.5638398", "0.56129354", "0.56058097", "0.56007946", "0.55871105", "0.5583951", "0.55634624", "0.5547827", "0.55430686", "0.5529907", "0.55252165", "0.55186105", "0.5516704", "0.5501969", "0.55007386", "0.54925334", "0.5472825", "0.54703283", "0.54655546" ]
0.64589345
0
Test that axes into a conv are the same as axes out
def test_axis_preservation(conv1d_placeholder, output_size): conv_layer = Convolution((3, output_size), lambda x: 1) output = conv_layer(conv1d_placeholder) assert output.axes == conv1d_placeholder.axes, ("Output axes are not the same as input axes: " "{} != {}").format(output.axes, conv1d_placeholder.axes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_channel_axis_introduction(conv1d_no_channel_axis, output_size, channel_axis):\n conv_layer = Convolution((3, output_size), lambda x: 1)\n output = conv_layer(conv1d_no_channel_axis)\n t_axes = conv1d_no_channel_axis.axes + channel_axis\n assert output.axes.is_equal_set(t_axes), (\"Output axes are not input axes + channel axis:\"\n \"{} != {} + {}\").format(output.axes,\n conv1d_no_channel_axis.axes,\n channel_axis)", "def test_alternate_channel_axes(conv1d_placeholder, output_size, channel_axis):\n channel_axis.name = \"channel\"\n assert len(conv1d_placeholder.axes.find_by_name(\"channel\")) == 1\n\n conv_layer = Convolution((3, output_size), lambda x: 1)\n with pytest.raises(IncompatibleAxesError):\n conv_layer(conv1d_placeholder)\n output = conv_layer(conv1d_placeholder, channel_axes=\"channel\")\n assert output.axes == conv1d_placeholder.axes", "def test_alternate_spatial_axes(conv1d_placeholder, output_size, width_axis):\n width_axis.name = \"time\"\n assert len(conv1d_placeholder.axes.find_by_name(\"time\")) == 1\n\n conv_layer = Convolution((3, output_size), lambda x: 1)\n with pytest.raises(IncompatibleAxesError):\n conv_layer(conv1d_placeholder)\n # As a dictionary\n output = conv_layer(conv1d_placeholder, spatial_axes={\"W\": \"time\"})\n assert output.axes == conv1d_placeholder.axes\n # As a tuple\n output = conv_layer(conv1d_placeholder, spatial_axes=(\"D\", \"H\", \"time\"))\n assert output.axes == conv1d_placeholder.axes", "def test_on_conv_transpose_2d_padding(self):\n x = jn.array([[[[2., 1., 3., 4.], [5., 6., 7., 8.], [9., 10., 11., 12.], [13., 14., 15., 16.]]]])\n y = jn.array([[[[2., 5., 5., 10.], [11., 27., 32., 46.], [24., 66., 76., 86.], [40., 106., 116., 126.]]]])\n w_init = lambda s: jn.array([[[[1., 2.], [3., 4.]]]]).transpose((2, 3, 0, 1))\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding=objax.ConvPadding.SAME, w_init=w_init)\n self.assertEqual(conv(x).tolist(), y.tolist())\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding='same', w_init=w_init)\n self.assertEqual(conv(x).tolist(), y.tolist())\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding='Same', w_init=w_init)\n self.assertEqual(conv(x).tolist(), y.tolist())\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding='SAME', w_init=w_init)\n self.assertEqual(conv(x).tolist(), y.tolist())\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding=(1, 0), w_init=w_init)\n self.assertEqual(conv(x).tolist(), y.tolist())\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding=[(1, 0), (1, 0)], w_init=w_init)\n self.assertEqual(conv(x).tolist(), y.tolist())\n y = [[[[2., 5., 5., 10., 8.], [11., 27., 32., 46., 32.], [24., 66., 76., 86., 56.],\n [40., 106., 116., 126., 80.], [39., 94., 101., 108., 64.]]]]\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding=1, w_init=w_init)\n self.assertEqual(conv(x).tolist(), y)\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding=(1, 1), w_init=w_init)\n self.assertEqual(conv(x).tolist(), y)\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding=[(1, 1), (1, 1)], w_init=w_init)\n self.assertEqual(conv(x).tolist(), y)", "def test_first_axes_not_same():\n padding = dict(pad_d=0, pad_h=0, pad_w=0)\n strides = dict(str_d=1, str_h=1, str_w=1)\n dilation = dict(dil_d=1, dil_h=1, dil_w=1)\n conv_params = padding.copy()\n conv_params.update(strides)\n conv_params.update(dilation)\n\n ax_i = ng.make_axes([ax.D, ax.C, ax.H, ax.W, ax.N])\n ax_f = ng.make_axes([ax.C, ax.T, ax.R, ax.S, ax.K])\n\n inputs = ng.placeholder(ax_i)\n filters = ng.placeholder(ax_f)\n\n with pytest.raises(ValueError) as exinfo:\n ng.convolution(conv_params, inputs, filters, {})\n assert str(exinfo.value) == 'the first axis in input {inputs} and filter {filters} ' \\\n 'are not the same.'.format(\n inputs=inputs.axes[0],\n filters=filters.axes[0])", "def test_on_conv_transpose_2d_two_by_two(self):\n w_init = lambda s: jn.array([[[[1., 2.], [3., 4.]]]]).transpose((2, 3, 0, 1))\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding=objax.ConvPadding.VALID, w_init=w_init)\n x = jn.array([[[[2., 1., 3., 4.], [5., 6., 7., 8.], [9., 10., 11., 12.], [13., 14., 15., 16.]]]])\n y = jn.array([[[[2., 5., 5., 10., 8.],\n [11., 27., 32., 46., 32.],\n [24., 66., 76., 86., 56.],\n [40., 106., 116., 126., 80.],\n [39., 94., 101., 108., 64.]]]])\n self.assertEqual(conv(x).tolist(), y.tolist())", "def testShapesSame(self, batch_size, in_length, in_channels, out_length,\n out_channels, kernel_shape, padding, use_bias, in_shape,\n out_shape, stride_shape, use_output_shape):\n if use_output_shape:\n output_shape_arg = out_shape\n else:\n output_shape_arg = None\n\n inputs = tf.placeholder(\n tf.float32,\n shape=[batch_size, in_length, in_channels])\n\n conv1 = snt.Conv1DTranspose(output_channels=out_channels,\n output_shape=output_shape_arg,\n kernel_shape=kernel_shape,\n padding=padding,\n stride=stride_shape,\n name=\"conv1\",\n use_bias=use_bias)\n\n output = conv1(inputs)\n\n self.assertTrue(\n output.get_shape().is_compatible_with(\n [batch_size, out_length, out_channels]))\n\n self.assertTrue(\n conv1.w.get_shape().is_compatible_with(\n [1, kernel_shape, out_channels, in_channels]))\n\n if use_bias:\n self.assertTrue(\n conv1.b.get_shape().is_compatible_with(\n [out_channels]))", "def testAtrousConvSame(self, use_bias):\n conv1 = snt.Conv2D(\n output_channels=1,\n kernel_shape=3,\n stride=1,\n rate=2,\n padding=snt.SAME,\n name=\"conv1\",\n use_bias=use_bias,\n initializers=create_constant_initializers(1.0, 1.0, use_bias))\n\n out = conv1(tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32)))\n expected_out = np.array([[5, 5, 7, 5, 5],\n [5, 5, 7, 5, 5],\n [7, 7, 10, 7, 7],\n [5, 5, 7, 5, 5],\n [5, 5, 7, 5, 5]])\n if not use_bias:\n expected_out -= 1\n\n with self.test_session():\n tf.variables_initializer(\n [conv1.w, conv1.b] if use_bias else [conv1.w]).run()\n\n self.assertAllClose(np.reshape(out.eval(), [5, 5]), expected_out)", "def check_conv_transpose(extract):\n call = extract\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n elif call.op.name == \"nn.relu\":\n call = call.args[0]\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n elif call.op.name == \"clip\":\n if call.attrs[\"a_min\"] != 0.0 or call.attrs[\"a_max\"] != 6.0:\n return False\n call = call.args[0]\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n\n while call.op.name != \"nn.conv2d_transpose\":\n call = call.args[0]\n\n attrs = call.attrs\n if attrs.data_layout != \"NCHW\":\n return False\n\n return True", "def test_on_conv_transpose_2d_three_by_three(self):\n w_init = lambda s: jn.array([[[[1., 2., 1.], [1., 2., 1.], [1., 1., 1.]]]]).transpose((2, 3, 0, 1))\n conv = objax.nn.ConvTranspose2D(1, 1, 3, padding=objax.ConvPadding.VALID, w_init=w_init)\n x = jn.array([[[[2., 1., 3., 4.], [5., 6., 7., 8.], [9., 10., 11., 12.], [13., 14., 15., 16.]]]])\n y = jn.array([[[[2., 5., 7., 11., 11., 4.],\n [7., 21., 31., 39., 34., 12.],\n [16., 47., 70., 80., 65., 24.],\n [27., 79., 114., 125., 97., 36.],\n [22., 59., 86., 93., 70., 28.],\n [13., 27., 42., 45., 31., 16.]]]])\n self.assertEqual(conv(x).tolist(), y.tolist())", "def test_conversion(backend):\n\n x = np.random.rand(10, 10)\n x_b = backend.from_numpy(x)\n x_c = backend.to_numpy(x_b)\n\n assert np.all(np.isclose(x, x_c))", "def testSameNumberOfOutputAndInputChannels(self, use_bias):\n\n input_channels = random.randint(1, 32)\n inputs = tf.placeholder(tf.float32, shape=[1, 10, 10, input_channels])\n conv1 = snt.InPlaneConv2D(kernel_shape=3, use_bias=use_bias)\n\n # Before conv1 is connected, we cannot know how many `output_channels`\n # conv1 should have.\n err = \"Variables in in_plane_conv2d not instantiated yet\"\n with self.assertRaisesRegexp(snt.NotConnectedError, err):\n _ = conv1.output_channels\n\n # After connection, should match `input_channels`.\n conv1(inputs)\n self.assertEqual(conv1.output_channels, input_channels)", "def testOutputShapeConsistency(self, use_bias):\n\n # When padding is SAME, then the actual number of padding pixels can be\n # computed as: pad = kernel_shape - strides + (-input_shape % strides)\n # = 5 - 1 + (- 32 % 1) = 4\n\n # The formula for the minimal size is:\n # oH = strides[1] * (in_height - 1) - padding + kernel_shape_h\n # oH = 1 * ( 32 - 1) - 4 + 5 = 32\n\n # The formula for the maximum size (due to extra pixels) is:\n # oH_max = oH + strides[1] - 1\n # so, for strides = 1 and padding = SAME, input size == output size.\n inputs = tf.placeholder(tf.float32, shape=self.in_shape)\n\n conv1 = snt.Conv2DTranspose(name=\"conv2d_1\",\n output_channels=self.out_channels,\n output_shape=self.out_shape,\n kernel_shape=self.kernel_shape,\n padding=self.padding,\n stride=1,\n use_bias=use_bias)\n\n outputs = conv1(inputs)\n\n self.assertTrue(outputs.get_shape().is_compatible_with((\n self.batch_size,) + self.out_shape + (self.out_channels,)))\n\n self.assertTrue(conv1.w.get_shape().is_compatible_with(self.kernel_shape2))\n if use_bias:\n self.assertTrue(conv1.b.get_shape().is_compatible_with(\n [self.out_channels]))", "def testShapesSame(self, use_bias):\n\n batch_size = random.randint(1, 100)\n in_height = random.randint(10, 288)\n in_width = random.randint(10, 288)\n in_channels = random.randint(1, 10)\n out_channels = random.randint(1, 32)\n kernel_shape_h = random.randint(1, 11)\n kernel_shape_w = random.randint(1, 11)\n\n inputs = tf.placeholder(\n tf.float32,\n shape=[batch_size, in_height, in_width, in_channels])\n\n conv1 = snt.Conv2D(\n name=\"conv1\",\n output_channels=out_channels,\n kernel_shape=[kernel_shape_h, kernel_shape_w],\n padding=snt.SAME,\n stride=1,\n use_bias=use_bias)\n\n output = conv1(inputs)\n\n self.assertTrue(\n output.get_shape().is_compatible_with(\n [batch_size, in_height, in_width, out_channels]))\n\n self.assertTrue(\n conv1.w.get_shape().is_compatible_with(\n [kernel_shape_h, kernel_shape_w, in_channels, out_channels]))\n\n if use_bias:\n self.assertTrue(\n conv1.b.get_shape().is_compatible_with(\n [out_channels]))", "def test_equal11():\n x = np.array([[True, False, True]])\n y = np.array([[[[[True, False, True], [True, False, True], [True, False, True]]]]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def testOutputShapeConsistency(self, use_bias):\n\n # When padding is SAME, then the actual number of padding pixels can be\n # computed as: pad = kernel_shape - strides + (-input_shape % strides)\n # = 5 - 1 + (- 32 % 1) = 4\n\n # The formula for the minimal size is:\n # oH = strides[1] * (in_height - 1) - padding + kernel_shape_h\n # oH = 1 * ( 32 - 1) - 4 + 5 = 32\n\n # The formula for the maximum size (due to extra pixels) is:\n # oH_max = oH + strides[1] - 1\n # so, for strides = 1 and padding = SAME, input size == output size.\n inputs = tf.placeholder(tf.float32, shape=self.in_shape)\n\n conv1 = snt.Conv3DTranspose(name=\"conv3d_1\",\n output_channels=self.out_channels,\n output_shape=self.out_shape,\n kernel_shape=self.kernel_shape,\n padding=self.padding,\n stride=1,\n use_bias=use_bias)\n\n outputs = conv1(inputs)\n\n self.assertTrue(outputs.get_shape().is_compatible_with((\n self.batch_size,) + self.out_shape + (self.out_channels,)))\n\n self.assertTrue(conv1.w.get_shape().is_compatible_with(self.kernel_shape2))\n if use_bias:\n self.assertTrue(conv1.b.get_shape().is_compatible_with(\n [self.out_channels]))", "def testTransposeNCHW(self, use_bias, use_output_shape):\n output_shape = tf.TensorShape((4, 5))\n\n conv2_transpose = snt.Conv2DTranspose(\n output_channels=5,\n output_shape=output_shape if use_output_shape else None,\n kernel_shape=3,\n padding=snt.VALID,\n stride=1,\n name=\"conv2_transpose\",\n use_bias=use_bias,\n data_format=conv.DATA_FORMAT_NCHW)\n conv2 = conv2_transpose.transpose()\n\n # Check kernel shapes, strides and padding match.\n self.assertEqual(conv2_transpose.kernel_shape, conv2.kernel_shape)\n self.assertEqual((1,) + conv2_transpose.stride[1:3] + (1,), conv2.stride)\n self.assertEqual(conv2_transpose.padding, conv2.padding)\n\n # Before conv2_transpose is connected, we cannot know how many\n # `output_channels` conv1 should have.\n err = \"Variables in conv2_transpose not instantiated yet\"\n with self.assertRaisesRegexp(snt.NotConnectedError, err):\n _ = conv2.output_channels\n\n # After connection the number of `output_channels` is known.\n batch_size = 32\n in_height = 2\n in_width = 3\n in_channels = 4\n x = tf.constant(np.random.randn(batch_size, in_channels, in_height,\n in_width),\n dtype=np.float32)\n conv2_transpose(x)\n self.assertEqual(in_channels, conv2.output_channels)\n\n # As is `output_channels`.\n self.assertEqual(output_shape, conv2_transpose.output_shape)\n\n # However, even after connection, the `input_shape` of the forward\n # convolution is not known until it is itself connected (i.e. it can be\n # connected to a different shape input from the `output_shape` of the\n # transpose convolution!)\n err = \"Variables in conv2_transpose_transpose not instantiated yet\"\n with self.assertRaisesRegexp(snt.NotConnectedError, err):\n self.assertEqual(conv2_transpose.output_shape, conv2.input_shape)", "def testShapesSame(self, use_bias):\n\n out_channels = self.out_channels\n input_shape = self.input_shape\n kernel_shape = self.kernel_shape\n output_shape = self.output_shape\n weight_shape = self.weight_shape\n channel_multiplier = self.channel_multiplier\n\n inputs = tf.placeholder(tf.float32, shape=input_shape)\n\n conv1 = snt.DepthwiseConv2D(\n name=\"conv1\",\n channel_multiplier=channel_multiplier,\n kernel_shape=kernel_shape,\n padding=snt.SAME,\n stride=1,\n use_bias=use_bias)\n output = conv1(inputs)\n\n self.assertEqual(output.get_shape(), output_shape)\n self.assertEqual(conv1.w.get_shape(), weight_shape)\n if use_bias:\n self.assertEqual(conv1.b.get_shape(), out_channels)", "def test_tpose1230(self):\n\n conv_params = {\n 'stride': 2,\n 'pad': 1\n }\n\n nr_img = 2;\n sz_img = 4;\n nr_in_channel = 3;\n sz_filter = 4;\n nr_filter = 3;\n\n a = np.random.randn(2, 1, 3, 2)\n p = np.array([1, 0, 1, 0, 1, 0, 1, 1, 1, 2, 3, 2, 1, 0, 1, 2, 1, 2]).reshape(1, 2, 3, 3)\n x = np.linspace(-.1, .5, 2 * 3 * 4 * 4).reshape(2, 3, 4, 4)\n w = np.linspace(-0.2, 0.3, 3 * 3 * 4 * 6).reshape(3, 3, 4, 6)\n\n # self.assertEqual(tpose1230(p).all(), p.transpose(1, 2, 3, 0).all())\n # self.assertEqual(tpose1230(w).all(), w.transpose(1, 2, 3, 0).all())\n # self.assertEqual(tpose1230(x).all(), x.transpose(1, 2, 3, 0).all())\n\n\n self.assertTrue(np.array_equal(tpose1230(a), a.transpose(1, 2, 3, 0)))\n self.assertTrue(np.array_equal(tpose1230(p), p.transpose(1, 2, 3, 0)))\n self.assertTrue(np.array_equal(tpose1230(w), w.transpose(1, 2, 3, 0)))\n self.assertTrue(np.array_equal(tpose1230(x), x.transpose(1, 2, 3, 0)))\n\n self.assertEqual(a.shape[0], a.transpose(1, 2, 3, 0).shape[3])\n self.assertEqual(a.shape[1], a.transpose(1, 2, 3, 0).shape[0])\n self.assertEqual(a.shape[2], a.transpose(1, 2, 3, 0).shape[1])\n self.assertEqual(a.shape[3], a.transpose(1, 2, 3, 0).shape[2])\n\n # print()\n # print(tpose1230(p).flatten())\n # print()\n # print(list(p.transpose(1, 2, 3, 0).flatten()))\n # print()\n # print(list(x.transpose(1, 2, 3, 0).flatten()))\n # print()\n # print(list(w.transpose(1, 2, 3, 0).flatten()))", "def test_equal15():\n x = np.array([[[[[[True, False, True], [True, False, True], [True, False, True]]]]]])\n y = x\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_equal14():\n x = np.array([[[[[[True, False, True], [True, False, True], [True, False, True]]]]]])\n y = np.array([[True, False, True], [True, False, True], [True, False, True]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_conv_inverts_deconv(transformer_factory, input_size, filter_size, padding, stride):\n\n # convolutions whose output size are not an even multiple of stride cannot be exactly inverted\n a = (input_size + sum(padding) - filter_size) % stride\n conv_output = utils.conv_output_dim(input_size, filter_size, padding, stride)\n deconv_output = utils.deconv_output_dim(conv_output, filter_size, padding, stride)\n\n assert deconv_output == (input_size - a), (\"Convolution and Deconvolution do not invert:\\n\"\n \"output ({}) != input ({}) - a ({})\\n\"\n \"filter: {}, padding: {}, stride: {}\"\n ).format(deconv_output, input_size, a,\n filter_size, padding, stride)", "def testShapesSame(self, use_bias):\n\n out_channels = self.out_channels_dw\n input_shape = self.input_shape\n kernel_shape = self.kernel_shape\n output_shape = self.output_shape\n depthwise_filter_shape = self.depthwise_filter_shape\n pointwise_filter_shape = self.pointwise_filter_shape\n channel_multiplier = self.channel_multiplier\n\n inputs = tf.placeholder(tf.float32, shape=input_shape)\n\n conv1 = snt.SeparableConv2D(\n output_channels=out_channels,\n channel_multiplier=channel_multiplier,\n kernel_shape=kernel_shape,\n padding=snt.SAME,\n use_bias=use_bias)\n\n output = conv1(inputs)\n\n self.assertTrue(output.get_shape().is_compatible_with(output_shape))\n self.assertTrue(conv1.w_dw.get_shape().is_compatible_with(\n depthwise_filter_shape))\n self.assertTrue(conv1.w_pw.get_shape().is_compatible_with(\n pointwise_filter_shape))\n if use_bias:\n self.assertTrue(conv1.b.get_shape().is_compatible_with([out_channels]))", "def testShapesSame(self, use_bias):\n\n batch_size = random.randint(1, 100)\n in_depth = random.randint(10, 288)\n in_height = random.randint(10, 288)\n in_width = random.randint(10, 288)\n in_channels = random.randint(1, 10)\n out_channels = random.randint(1, 32)\n kernel_shape_d = random.randint(1, 11)\n kernel_shape_h = random.randint(1, 11)\n kernel_shape_w = random.randint(1, 11)\n\n inputs = tf.placeholder(\n tf.float32,\n shape=[batch_size, in_depth, in_height, in_width, in_channels])\n\n conv1 = snt.Conv3D(\n output_channels=out_channels,\n kernel_shape=[kernel_shape_d, kernel_shape_h, kernel_shape_w],\n padding=snt.SAME,\n stride=1,\n use_bias=use_bias,\n name=\"conv1\")\n\n output = conv1(inputs)\n\n self.assertTrue(\n output.get_shape().is_compatible_with(\n [batch_size, in_depth, in_height, in_width, out_channels]))\n\n self.assertTrue(\n conv1.w.get_shape().is_compatible_with(\n [kernel_shape_d, kernel_shape_h, kernel_shape_w, in_channels,\n out_channels]))\n if use_bias:\n self.assertTrue(\n conv1.b.get_shape().is_compatible_with(\n [out_channels]))", "def nd_shape_checking(x, y, mvaxis, traxis):\n assert x.ndim == y.ndim\n dims = np.delete(np.arange(x.ndim), -2)\n assert all([x.shape[k] == y.shape[k] for k in dims])", "def test_equal13():\n x = np.array([[[[[[True, False, True], [True, False, True], [True, False, True]]]]]])\n y = np.array([[True, False, True], [True, False, True], [True, False, True]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def testShapesSame(self, use_bias):\n\n out_channels = self.out_channels_dw\n input_shape = self.input_shape\n kernel_shape = self.kernel_shape\n output_shape = self.output_shape\n depthwise_filter_shape = self.depthwise_filter_shape\n pointwise_filter_shape = self.pointwise_filter_shape\n channel_multiplier = self.channel_multiplier\n\n inputs = tf.placeholder(tf.float32, shape=input_shape)\n\n conv1 = snt.SeparableConv1D(\n output_channels=out_channels,\n channel_multiplier=channel_multiplier,\n kernel_shape=kernel_shape,\n padding=snt.SAME,\n use_bias=use_bias)\n\n output = conv1(inputs)\n\n self.assertTrue(output.get_shape().is_compatible_with(output_shape))\n self.assertTrue(conv1.w_dw.get_shape().is_compatible_with(\n depthwise_filter_shape))\n self.assertTrue(conv1.w_pw.get_shape().is_compatible_with(\n pointwise_filter_shape))\n if use_bias:\n self.assertTrue(conv1.b.get_shape().is_compatible_with([out_channels]))", "def test_same_convolution(conv1d_placeholder, spatial_onehot, output_size, width, stride):\n conv_layer = Convolution((3, output_size), lambda x: 1, strides=stride, padding=\"same\")\n output = conv_layer(conv1d_placeholder)\n output_width = output.axes.find_by_name(\"W\")[0].length\n assert output_width == np.ceil(width / float(stride)), (\"Same convolution output width != \"\n \"ceil(input_width / stride): {} != \"\n \"ceil({} / {})\").format(output_width,\n width,\n stride)", "def test_on_conv_transpose_2d_two_channels(self):\n\n # Channels/Colors, #filters, filter_size (square)\n conv_filter = objax.nn.ConvTranspose2D(2, 1, 2, padding=objax.ConvPadding.VALID)\n weights = objax.TrainVar(jn.array([[[[1., 2.], [3., 4.]], [[1., 2.], [3., 4.]]]]).transpose((2, 3, 0, 1)))\n conv_filter.w = weights\n image = jn.array([[[[2., 1., 3., 4.], [5., 6., 7., 8.],\n [9., 10., 11., 12.], [13., 14., 15., 16.]],\n [[2., 1., 3., 4.], [5., 6., 7., 8.],\n [9., 10., 11., 12.], [13., 14., 15., 16.]]]])\n # NCHW: Batch, Channels/Colors, Height, Width\n features = conv_filter(image)\n expected_features = jn.array([[[[4., 10., 10., 20., 16.],\n [22., 54., 64., 92., 64.],\n [48., 132., 152., 172., 112.],\n [80., 212., 232., 252., 160.],\n [78., 188., 202., 216., 128.]]]])\n self.assertEqual(features.shape, (1, 1, 5, 5))\n self.assertTrue(jn.array_equal(features, expected_features))", "def test_conv(self):\n for kernel_type in [lambda x: x, SharedTensor]:\n for matrix_width in range(2, 5):\n for kernel_width in range(1, matrix_width):\n for padding in range(kernel_width // 2 + 1):\n matrix_size = (5, matrix_width)\n matrix = get_random_test_tensor(size=matrix_size)\n\n kernel_size = (kernel_width, kernel_width)\n kernel = get_random_test_tensor(size=kernel_size)\n\n matrix = matrix.unsqueeze(0).unsqueeze(0)\n kernel = kernel.unsqueeze(0).unsqueeze(0)\n\n reference = torch.nn.functional.conv2d(\n matrix, kernel, padding=padding)\n encrypted_matrix = SharedTensor(matrix)\n encrypted_kernel = kernel_type(kernel)\n encrypted_conv = encrypted_matrix.conv2d(\n encrypted_kernel, padding=padding\n )\n\n self._check(encrypted_conv, reference, 'conv2d failed')" ]
[ "0.70866376", "0.7026903", "0.6671873", "0.63404804", "0.6334404", "0.6294378", "0.6264213", "0.62374765", "0.6234195", "0.61577", "0.6156835", "0.6155365", "0.61525667", "0.60497", "0.60024923", "0.6000393", "0.59903127", "0.5989085", "0.5983982", "0.59728414", "0.59622973", "0.5950046", "0.5940476", "0.5931945", "0.592653", "0.5913647", "0.59047204", "0.59035796", "0.5858209", "0.58490103" ]
0.7441477
0
Test that a channel axis is added when it doesn't exist in the input
def test_channel_axis_introduction(conv1d_no_channel_axis, output_size, channel_axis): conv_layer = Convolution((3, output_size), lambda x: 1) output = conv_layer(conv1d_no_channel_axis) t_axes = conv1d_no_channel_axis.axes + channel_axis assert output.axes.is_equal_set(t_axes), ("Output axes are not input axes + channel axis:" "{} != {} + {}").format(output.axes, conv1d_no_channel_axis.axes, channel_axis)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_alternate_channel_axes(conv1d_placeholder, output_size, channel_axis):\n channel_axis.name = \"channel\"\n assert len(conv1d_placeholder.axes.find_by_name(\"channel\")) == 1\n\n conv_layer = Convolution((3, output_size), lambda x: 1)\n with pytest.raises(IncompatibleAxesError):\n conv_layer(conv1d_placeholder)\n output = conv_layer(conv1d_placeholder, channel_axes=\"channel\")\n assert output.axes == conv1d_placeholder.axes", "async def test_skipped_missing_channel(self):\n self.cog.unsilence_timestamps.items.return_value = [(123, -1), (123, 1), (123, 10000000000)]\n self.bot.get_channel.return_value = None\n\n await self.cog._reschedule()\n\n self.cog.notifier.add_channel.assert_not_called()\n self.cog._unsilence_wrapper.assert_not_called()\n self.cog.scheduler.schedule_later.assert_not_called()", "def testMissingChannelsError(self, module_info, use_bias):\n module, num_input_dims, module_kwargs = module_info\n conv_mod = module(use_bias=use_bias, **module_kwargs)\n\n inputs = tf.placeholder(tf.float32, (10,) * (num_input_dims + 1))\n\n err = \"Input Tensor must have\"\n with self.assertRaisesRegexp(snt.IncompatibleShapeError, err):\n conv_mod(inputs)", "def testNonDefinedChannelsDimension(self, module_info, use_bias):\n module, num_input_dims, module_kwargs = module_info\n conv_mod = module(use_bias=use_bias, **module_kwargs)\n\n inputs = tf.placeholder(tf.float32, (10,) * (num_input_dims + 1) + (None,))\n err = \"Number of input channels\"\n with self.assertRaisesRegexp(snt.UnderspecifiedError, err):\n conv_mod(inputs)", "def channels_last(self, device):\n return device not in self._gpu_devices", "def isscalar(self):\n return not self.axes", "def is_no_channel(val) -> bool:\n if isinstance(val, torch.Tensor):\n return bool(torch.isnan(val))\n if isinstance(val, str):\n return val == \"no_channel\"\n if np.isscalar(val):\n return bool(np.isnan(val))\n return val is None", "def test_axis_preservation(conv1d_placeholder, output_size):\n conv_layer = Convolution((3, output_size), lambda x: 1)\n output = conv_layer(conv1d_placeholder)\n assert output.axes == conv1d_placeholder.axes, (\"Output axes are not the same as input axes: \"\n \"{} != {}\").format(output.axes,\n conv1d_placeholder.axes)", "def test_partial_channel_invalid_dim():\n with np.testing.assert_raises(ValueError):\n rho = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [12, 11, 10, 9]])\n partial_channel(rho, depolarizing(3), 1, [2, 2])", "def test_alternate_spatial_axes(conv1d_placeholder, output_size, width_axis):\n width_axis.name = \"time\"\n assert len(conv1d_placeholder.axes.find_by_name(\"time\")) == 1\n\n conv_layer = Convolution((3, output_size), lambda x: 1)\n with pytest.raises(IncompatibleAxesError):\n conv_layer(conv1d_placeholder)\n # As a dictionary\n output = conv_layer(conv1d_placeholder, spatial_axes={\"W\": \"time\"})\n assert output.axes == conv1d_placeholder.axes\n # As a tuple\n output = conv_layer(conv1d_placeholder, spatial_axes=(\"D\", \"H\", \"time\"))\n assert output.axes == conv1d_placeholder.axes", "def test_contains_wrong_shape(self):\n categories = {\"asdfa\": 0.1, 2: 0.2, 3: 0.3, 4: 0.4}\n dim = Categorical(\"yolo\", categories, shape=2)\n\n assert 3 not in dim\n assert (\"asdfa\", 2) in dim", "def test_partial_channel_invalid_map():\n with np.testing.assert_raises(ValueError):\n rho = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [12, 11, 10, 9]])\n partial_channel(rho, 5)", "def _check_reflection_axis(self, reflection_axis):\n if (reflection_axis.shape.ndims is not None and\n reflection_axis.shape.ndims < 1):\n raise ValueError(\n \"Argument reflection_axis must have at least 1 dimension. \"\n \"Found: %s\" % reflection_axis)", "def test_add_channel_adds_channel(self):\n channel = Mock()\n with mock.patch.object(self.notifier, \"_silenced_channels\") as silenced_channels:\n self.notifier.add_channel(channel)\n silenced_channels.__setitem__.assert_called_with(channel, self.notifier._current_loop)", "def test_add_channel_skips_start_with_channels(self):\n with mock.patch.object(self.notifier, \"_silenced_channels\"):\n self.notifier.add_channel(Mock())\n self.notifier_start_mock.assert_not_called()", "def _checkForSixaxis():\n return sixaxis.init(\"/dev/input/js1\")", "def test_cat_oob_neg_dim(self):\n\n with self.assertRaises(IndexError):\n utils.compare_tracing_methods(\n SimpleCatModule(-4, -2, -1),\n torch.randn(2, 3, 4),\n torch.randn(2, 3, 4),\n torch.randn(2, 3, 4),\n fusible_ops={\"prim::FusedConcat\"},\n )", "def test_get_non_existent_dimension(self):\n\n v = Vector({ })\n self.assertEqual(0, v.dimensions['x'])", "def _chk_asarray(a, axis):\r\n if axis is None:\r\n a = ravel(a)\r\n outaxis = 0\r\n else:\r\n a = asarray(a)\r\n outaxis = axis\r\n return a, outaxis", "def test_first_axes_not_same():\n padding = dict(pad_d=0, pad_h=0, pad_w=0)\n strides = dict(str_d=1, str_h=1, str_w=1)\n dilation = dict(dil_d=1, dil_h=1, dil_w=1)\n conv_params = padding.copy()\n conv_params.update(strides)\n conv_params.update(dilation)\n\n ax_i = ng.make_axes([ax.D, ax.C, ax.H, ax.W, ax.N])\n ax_f = ng.make_axes([ax.C, ax.T, ax.R, ax.S, ax.K])\n\n inputs = ng.placeholder(ax_i)\n filters = ng.placeholder(ax_f)\n\n with pytest.raises(ValueError) as exinfo:\n ng.convolution(conv_params, inputs, filters, {})\n assert str(exinfo.value) == 'the first axis in input {inputs} and filter {filters} ' \\\n 'are not the same.'.format(\n inputs=inputs.axes[0],\n filters=filters.axes[0])", "def validateChannel( self, name ):\n if name not in self.d.keys(): raise Exception('Invalid device channel {}'.format(name))", "def test_default_axis_nxdata(self, nexus_base):\n assert isinstance(nexus_base.default_axis, np.ndarray)", "def test_no_channels_property(self):\n expected_values = {\n 'no_timesteps': 1000,\n 'no_sweeps': 10,\n 'no_channels': 4,\n }\n test_rec = rt.Recording(\n np.zeros(\n [\n expected_values['no_channels'],\n expected_values['no_timesteps'],\n expected_values['no_sweeps'],\n ]\n ),\n dt=0.1,\n )\n self.assertEqual(\n test_rec.no_channels,\n expected_values['no_channels'],\n 'Expected {} for `no_channels` property; got {} instead.'.format(\n expected_values['no_channels'], test_rec.no_channels\n ),\n )", "def test_patch_grid_has_positive_dimension(self):\n with self.assertRaises(AssertionError):\n PatchGrid((), (), ())", "def is_trivial(self):\n return self.dims == 0", "def _check_no_nonzero_features(self,\n features: np.ndarray,\n dropped_idx: np.ndarray,\n axis: Optional[int] = None):\n if axis is not None:\n # Dropped node.\n features_of_dropped_nodes = np.take(features, dropped_idx, axis=axis)\n self.assertEqual(0, np.nonzero(features_of_dropped_nodes)[0].shape[0])\n self.assertEqual(0, np.nonzero(features_of_dropped_nodes)[1].shape[0])\n else:\n # Dropped edge.\n features_of_dropped_edge = features[dropped_idx[0], dropped_idx[1]]\n self.assertEqual(0, np.nonzero(features_of_dropped_edge)[0].shape[0])", "def test_no_false_positives(self, dim):\r\n g = nx.empty_graph(dim)\r\n assert not clique.is_clique(g)", "def is_channel(channel: SuperGate,\n atol=1e-8,\n order: tuple[any, ...] = None,\n **kwargs) -> bool:\n C = choi_matrix(channel, order, **kwargs)\n dim = _channel_dim(channel)\n\n # trace preserving\n tp = np.isclose(C.trace(), dim, atol=atol)\n\n # hermiticity preserving\n hp = np.allclose(C, C.conj().T, atol=atol)\n\n # completely positive\n apprx_gtr = lambda e, x: np.real(e) >= x or np.isclose(e, x, atol=atol)\n cp = np.all([\n apprx_gtr(e, 0) and np.isclose(np.imag(e), 0, atol=atol)\n for e in np.linalg.eigvals(C)\n ])\n\n return tp and hp and cp", "def add_dummy_channel(P8gen, particle, remainder):\n pdg = P8gen.getPythiaInstance().particleData\n charge = pdg.charge(particle)\n if charge > 0:\n P8gen.SetParameters('{}:addChannel 1 {:.16} 0 22 -11'.format(particle, remainder))\n elif charge < 0:\n P8gen.SetParameters('{}:addChannel 1 {:.16} 0 22 11'.format(particle, remainder))\n else:\n P8gen.SetParameters('{}:addChannel 1 {:.16} 0 22 22'.format(particle, remainder))", "def _filter_axes(self, channel_axis, spatial_axes):\n f_axes = ng.make_axis(length=self.nout, name=\"K\")\n for key, ax in zip(\"DHW\", spatial_axes):\n f_axes += ng.make_axis(length=self.filter_shape[key],\n name=ax.name)\n f_axes += channel_axis\n return f_axes" ]
[ "0.68041444", "0.5978892", "0.59068304", "0.58510864", "0.5820264", "0.5708535", "0.5601582", "0.55839527", "0.55391574", "0.5538313", "0.5525261", "0.5506493", "0.5473106", "0.5469562", "0.5443375", "0.5383108", "0.53624815", "0.5343262", "0.5339166", "0.53092283", "0.5277019", "0.52683544", "0.5262492", "0.5245775", "0.5209807", "0.5183617", "0.51756114", "0.5174875", "0.51705325", "0.5166724" ]
0.71101326
0
Test that spatial axis names are modifiable
def test_alternate_spatial_axes(conv1d_placeholder, output_size, width_axis): width_axis.name = "time" assert len(conv1d_placeholder.axes.find_by_name("time")) == 1 conv_layer = Convolution((3, output_size), lambda x: 1) with pytest.raises(IncompatibleAxesError): conv_layer(conv1d_placeholder) # As a dictionary output = conv_layer(conv1d_placeholder, spatial_axes={"W": "time"}) assert output.axes == conv1d_placeholder.axes # As a tuple output = conv_layer(conv1d_placeholder, spatial_axes=("D", "H", "time")) assert output.axes == conv1d_placeholder.axes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_set_bad_name(self):\n dim = Dimension(\"yolo\", \"uniform\", -3, 4, shape=(4, 4))\n with pytest.raises(TypeError):\n dim.name = 4", "def setAxisName(name, axes='XYZ'):\n dislin.name(name, axes)", "def test_saved_sgrid_attributes(self):\n u1_var = self.target.U1\n u1_var_center_avg_axis = u1_var.center_axis\n expected_u1_center_axis = 0\n u1_vector_axis = u1_var.vector_axis\n expected_u1_vector_axis = 'X'\n original_angles = self.sg_obj.angles\n saved_angles = self.target.angles\n self.assertEqual(u1_var_center_avg_axis, expected_u1_center_axis)\n self.assertEqual(u1_vector_axis, expected_u1_vector_axis)\n np.testing.assert_almost_equal(original_angles, saved_angles, decimal=3)", "def test_name(self, data, firstname, secondname):\n layer = Points(data)\n assert layer.name == \"Points\"\n\n layer = Points(data, name=firstname)\n assert layer.name == firstname\n\n layer.name = secondname\n assert layer.name == secondname", "def test_axis_with_no_mapping_does_not_error_in_roundtrip_with_2_axes(ufo_module):\n doc = _make_designspace_with_axes(\n [(\"wght\", \"Weight with mapping\"), (\"wdth\", \"Width without mapping\")], ufo_module\n )\n # Add mapping to weight axis\n doc.axes[0].map = [(0, 0), (50, 350), (100, 1000)]\n\n doc2 = deepcopy(doc)\n font = to_glyphs(doc2)\n doc_rt = to_designspace(font)\n\n assert doc_rt.axes[0].serialize() == doc.axes[0].serialize()\n assert doc_rt.axes[1].serialize() == doc.axes[1].serialize()", "def change_axis_names(self, axis_map):\n axes = self.axes\n\n # Partition axes\n self.axes = [axis_map[axis] for axis in axes]\n\n # Flipped axes\n flip = self.flip\n if flip:\n self.flip = [axis_map[axis] for axis in flip]", "def ExpectAxes(self, labels, positions):\n self.assertEqual(self.Param('chxl'), labels)\n self.assertEqual(self.Param('chxp'), positions)", "def setnames(self, *args, **kwargs):\n return _coordsys.coordsys_setnames(self, *args, **kwargs)", "def test_name(self):\n\n for name in TEST_NAMES:\n self.colorspace.setName(name)\n self.assertEqual(name, self.colorspace.getName())", "def test_singularization(self):\n self.failUnless(singularize(\"axes\") == \"axis\")\n self.failUnless(singularize(\":\") == \":\")", "def test_dimensions(self):\n\n v = Vector({\"x\": 2, \"y\": 1})\n del v.dimensions[\"x\"]\n self.assertEqual({\"y\": 1}, v.dimensions)\n\n v.dimensions[\"x\"] = 1\n self.assertEqual({\"x\": 1, \"y\": 1}, v.dimensions)", "def testExtraParamsCanUseNewNames(self):\n self.chart.display.extra_params['fancy_new_feature'] = 'shiny'\n self.assertEqual(self.Param('fancy_new_feature'), 'shiny')", "def setAxisNameDistance(dist,axes='XYZ'):\n dislin.namdis(dist, axes)", "def test_signature(self, method):\n fig = plt.figure()\n ax_test = fig.add_subplot(projection=\"ternary\")\n ax_ref = fig.add_subplot()\n signature_test = inspect.signature(getattr(ax_test, method))\n signature_ref = inspect.signature(getattr(ax_ref, method))\n assert signature_test == signature_ref", "def test_patch_namespaced_scale_scale(self):\n pass", "def test_find_dispersion_axis():\n dm = SlitModel()\n\n dm.meta.wcsinfo.dispersion_direction = 1 # horizontal\n assert find_dispersion_axis(dm) == 0 # X axis for wcs functions\n\n dm.meta.wcsinfo.dispersion_direction = 2 # vertical\n assert find_dispersion_axis(dm) == 1 # Y axis for wcs functions", "def test_name(self):\n node = self.create(ObjectNodeItem, UML.ObjectNode)\n name = node.shape.icon.children[1]\n\n node.subject.name = \"Blah\"\n\n assert \"Blah\" == name.text()", "def _handle_setup_axis(self, axis_args):\n axis_name = axis_args['name']\n axes_dict = self.server.axes\n\n if axis_name not in [name for name, _ in axes_dict.items()]:\n print \"Adding a new axis:\", axis_name\n axis_count = len(axes_dict)\n newaxis = self.server.figure.add_subplot(axis_count+1, 1, axis_count+1)\n axes_dict[axis_name] = newaxis\n axes_dict[axis_name].grid(True)\n axes_dict[axis_name].set_xlabel(axis_args['x_label'])\n axes_dict[axis_name].set_ylabel(axis_args['y_label'])\n # TODO: support *.set_title(\"Title\")\n if FLAGS.logy:\n axes_dict[axis_name].set_yscale('log', nonposy='clip')\n\n if axis_count != 0:\n # Resize other axes if the above wasn't the first.\n axis_count = len(axes_dict)\n for row,(name, _) in enumerate(axes_dict.items(), 1):\n print name, axis_count, row\n axes_dict[name].change_geometry(axis_count, 1, row)", "def test_replace_namespaced_scale_scale(self):\n pass", "def test_slice_name(self):\n self.insert()\n data = self.tbl['name']\n assert self.check(self.idata[:, [0, 1]], data)", "def setaxesnames(self):\n if not self._axesnames or self.prop['skipsai']:\n return\n debug('ControllerStartup.setaxesnames()')\n oldaxes = self.pidevice.qSAI_ALL()\n for i, newaxis in enumerate(self.axesnames):\n if newaxis != oldaxes[i] or self.prop['forcesai']:\n setstage = False\n if self.pidevice.HasqCST():\n if self.pidevice.qCST()[oldaxes[i]] == 'NOSTAGE':\n try:\n debug('try rename NOSTAGE to TEMP (0x3C)')\n self.pidevice.SPA(oldaxes[i], 0x3c, 'TEMP')\n setstage = True\n except GCSError:\n pass\n self.pidevice.SAI(oldaxes[i], newaxis)\n if setstage:\n self.pidevice.SPA(newaxis, 0x3c, 'NOSTAGE')\n debug('restore NOSTAGE (0x3C)')", "def test_valid_tensor_op_name_inputs(self, data, description):\n name_a, name_b = data\n self.assertEqual(name_a, name_b, msg=description)", "def setAxisNameJustification(jus, axes='XYZ'):\n dislin.namjus(justdict[jus],axes)", "def setAxisNameColor(idx=-1, axes='XYZ'):\n dislin.axclrs(idx, 'Name', axes)", "def findaxisbyname(self, *args, **kwargs):\n return _coordsys.coordsys_findaxisbyname(self, *args, **kwargs)", "def test_weight_width_custom(axes, ufo_module):\n doc = _make_designspace_with_axes(axes, ufo_module)\n\n font = to_glyphs(doc)\n\n if _is_subset_of_default_axes([GSAxis(name=n, tag=t) for t, n in axes]):\n assert font.customParameters[\"Axes\"] is None\n else:\n assert font.customParameters[\"Axes\"] == [\n {\"Tag\": tag, \"Name\": name} for tag, name in axes\n ]\n\n doc = to_designspace(font, ufo_module=ufo_module)\n\n assert len(doc.axes) == len(axes)\n for doc_axis, (tag, name) in zip(doc.axes, axes):\n assert doc_axis.tag == tag\n assert doc_axis.name == name", "def test_axis_with_no_mapping_does_not_error_in_roundtrip(ufo_module):\n doc = designspaceLib.DesignSpaceDocument()\n\n # Add a \"Regular\" source\n regular = doc.newSourceDescriptor()\n regular.font = ufo_module.Font()\n regular.location = {\"Style\": 0}\n doc.addSource(regular)\n\n axis = doc.newAxisDescriptor()\n axis.tag = \"styl\"\n axis.name = \"Style\"\n doc.addAxis(axis)\n\n # This axis spans a range of 0 to 1 but only has a source at {\"Style\": 0}\n # and no explicit mapping. The point of this test is to see if the min and\n # max are still the same after round tripping.\n doc.axes[0].minimum = 0\n doc.axes[0].maximum = 1\n doc.axes[0].default = 0\n doc.axes[0].map = []\n\n doc2 = deepcopy(doc)\n font = to_glyphs(doc2)\n doc_rt = to_designspace(font)\n\n assert doc_rt.axes[0].serialize() == doc.axes[0].serialize()", "def axesNames(self, data, info):\n return []", "def test_name(self):\n self.assertTrue(type(x.name) == str)", "def test_unicode_names(self):\n self.assertArrayEqual(self.dset['a'], self.data['a'])\n self.dset['a'] = 42\n data = self.data.copy()\n data['a'] = 42\n self.assertArrayEqual(self.dset['a'], data['a'])" ]
[ "0.62136984", "0.6049257", "0.59505177", "0.5885029", "0.587727", "0.58177626", "0.57560587", "0.5734578", "0.5728373", "0.5708425", "0.5682878", "0.5643425", "0.56203425", "0.5597272", "0.55956787", "0.55829436", "0.55688053", "0.55611026", "0.55029243", "0.54853517", "0.54835397", "0.5479305", "0.54214615", "0.53972876", "0.53864664", "0.53606373", "0.53556985", "0.5333934", "0.5330008", "0.5298109" ]
0.619606
1
Test that channel axis names are modifiable
def test_alternate_channel_axes(conv1d_placeholder, output_size, channel_axis): channel_axis.name = "channel" assert len(conv1d_placeholder.axes.find_by_name("channel")) == 1 conv_layer = Convolution((3, output_size), lambda x: 1) with pytest.raises(IncompatibleAxesError): conv_layer(conv1d_placeholder) output = conv_layer(conv1d_placeholder, channel_axes="channel") assert output.axes == conv1d_placeholder.axes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_channel_axis_introduction(conv1d_no_channel_axis, output_size, channel_axis):\n conv_layer = Convolution((3, output_size), lambda x: 1)\n output = conv_layer(conv1d_no_channel_axis)\n t_axes = conv1d_no_channel_axis.axes + channel_axis\n assert output.axes.is_equal_set(t_axes), (\"Output axes are not input axes + channel axis:\"\n \"{} != {} + {}\").format(output.axes,\n conv1d_no_channel_axis.axes,\n channel_axis)", "def test_name(self):\n\n for name in TEST_NAMES:\n self.colorspace.setName(name)\n self.assertEqual(name, self.colorspace.getName())", "def test_channels(self):\n test_channels = 2\n self.encoder._channels = test_channels\n self.assertEqual(self.encoder._channels, test_channels)", "def test_set_col_names(self):\n self.dboard.set_col_names([\"A\", \"B\"])\n self.assertTrue(hasattr(self.dboard, \"_col_names\"))\n self.assertEqual(2, len(self.dboard._col_names))", "def test_set_bad_name(self):\n dim = Dimension(\"yolo\", \"uniform\", -3, 4, shape=(4, 4))\n with pytest.raises(TypeError):\n dim.name = 4", "def test_name(name):\n expected = 'datachannel' if name is None else name\n c = DataChannel(name=name)\n assert c.name == expected", "def setAxisName(name, axes='XYZ'):\n dislin.name(name, axes)", "def ExpectAxes(self, labels, positions):\n self.assertEqual(self.Param('chxl'), labels)\n self.assertEqual(self.Param('chxp'), positions)", "def test_equality(self):\n\n for name in TEST_NAMES:\n self.colorspace.setEqualityGroup(name)\n self.assertEqual(name, self.colorspace.getEqualityGroup())", "def rename_channels(self, new_names):\n if \"time\" in new_names:\n del new_names[new_names.index(\"time\")]\n\n new_names = [\n \"time\",\n ] + new_names\n\n self.ch_name = has_size(\n is_valid(new_names, list, list_type=str), self.ch_amount, \"unknown\"\n )", "def validateChannel( self, name ):\n if name not in self.d.keys(): raise Exception('Invalid device channel {}'.format(name))", "def testExtraParamsCanUseNewNames(self):\n self.chart.display.extra_params['fancy_new_feature'] = 'shiny'\n self.assertEqual(self.Param('fancy_new_feature'), 'shiny')", "def test_alternate_spatial_axes(conv1d_placeholder, output_size, width_axis):\n width_axis.name = \"time\"\n assert len(conv1d_placeholder.axes.find_by_name(\"time\")) == 1\n\n conv_layer = Convolution((3, output_size), lambda x: 1)\n with pytest.raises(IncompatibleAxesError):\n conv_layer(conv1d_placeholder)\n # As a dictionary\n output = conv_layer(conv1d_placeholder, spatial_axes={\"W\": \"time\"})\n assert output.axes == conv1d_placeholder.axes\n # As a tuple\n output = conv_layer(conv1d_placeholder, spatial_axes=(\"D\", \"H\", \"time\"))\n assert output.axes == conv1d_placeholder.axes", "def test_switch_channels(self):\n\t\t# not available yet, experimental\n\t\tpass", "async def test_name_replacement_multiple_channels(self):\n message = \"Current. The following should be replaced: {channel}.\"\n await self.cog.send_message(message, *self.text_channels, alert_target=True)\n\n self.text_channels[0].send.assert_awaited_once_with(message.format(channel=self.text_channels[0].mention))\n self.text_channels[1].send.assert_awaited_once_with(message.format(channel=\"current channel\"))", "def test_default(self):\n measure_channel = MeasureChannel(123)\n\n self.assertEqual(measure_channel.index, 123)\n self.assertEqual(measure_channel.name, \"m123\")", "def setAxisNameColor(idx=-1, axes='XYZ'):\n dislin.axclrs(idx, 'Name', axes)", "def valid_ndim_assertion(expected_dimentions, actual_dimention, name):\n\tassert (actual_dimention in expected_dimentions), \"Invalid ndim of {} should be {}\".format(name, str(expected_dimentions))", "def _visibleChannels_changed(self):\n for i in range(0,8):\n if i in self.visibleChannels:\n self.masterContainer.plots[\"channel\"+str(i)][0].visible=True\n else:\n print i\n self.masterContainer.plots[\"channel\"+str(i)][0].visible=False", "def change_axis_names(self, axis_map):\n axes = self.axes\n\n # Partition axes\n self.axes = [axis_map[axis] for axis in axes]\n\n # Flipped axes\n flip = self.flip\n if flip:\n self.flip = [axis_map[axis] for axis in flip]", "def test_dimensions(self):\n\n v = Vector({\"x\": 2, \"y\": 1})\n del v.dimensions[\"x\"]\n self.assertEqual({\"y\": 1}, v.dimensions)\n\n v.dimensions[\"x\"] = 1\n self.assertEqual({\"x\": 1, \"y\": 1}, v.dimensions)", "def test_unicode_channel_name(self):\n channel_layer.send(\"\\u00a3_test\", {\"value\": \"blue\"})\n # Get just one first\n channel, message = channel_layer.receive_many([\"\\u00a3_test\"])\n self.assertEqual(channel, \"\\u00a3_test\")\n self.assertEqual(message, {\"value\": \"blue\"})", "def test_name(self):\n node = self.create(ObjectNodeItem, UML.ObjectNode)\n name = node.shape.icon.children[1]\n\n node.subject.name = \"Blah\"\n\n assert \"Blah\" == name.text()", "def test_inspect_set_and_unset_bads(tmp_path):\n matplotlib = pytest.importorskip(\"matplotlib\")\n import matplotlib.pyplot as plt\n from mne.utils._testing import _click_ch_name\n\n matplotlib.use(\"Agg\")\n plt.close(\"all\")\n\n bids_root = setup_bids_test_dir(tmp_path)\n bids_path = _bids_path.copy().update(root=bids_root)\n raw = read_raw_bids(bids_path=bids_path, verbose=\"error\")\n orig_bads = raw.info[\"bads\"].copy()\n\n # Mark some channels as bad by clicking on their name.\n inspect_dataset(bids_path, find_flat=False)\n raw_fig = mne_bids.inspect._global_vars[\"raw_fig\"]\n _click_ch_name(raw_fig, ch_index=0, button=1)\n _click_ch_name(raw_fig, ch_index=1, button=1)\n _click_ch_name(raw_fig, ch_index=4, button=1)\n\n # Close window and save changes.\n key_event = KeyEvent(name=\"Close\", canvas=raw_fig.canvas, key=raw_fig.mne.close_key)\n raw_fig.canvas.callbacks.process(\"key_press_event\", key_event)\n\n fig_dialog = mne_bids.inspect._global_vars[\"dialog_fig\"]\n key_event = KeyEvent(name=\"Save\", canvas=fig_dialog.canvas, key=\"return\")\n fig_dialog.canvas.callbacks.process(\"key_press_event\", key_event)\n\n # Inspect the data again, click on two of the bad channels to mark them as\n # good.\n inspect_dataset(bids_path, find_flat=False)\n raw_fig = mne_bids.inspect._global_vars[\"raw_fig\"]\n _click_ch_name(raw_fig, ch_index=1, button=1)\n _click_ch_name(raw_fig, ch_index=4, button=1)\n\n # Close window and save changes.\n key_event = KeyEvent(name=\"Close\", canvas=raw_fig.canvas, key=raw_fig.mne.close_key)\n raw_fig.canvas.callbacks.process(\"key_press_event\", key_event)\n\n fig_dialog = mne_bids.inspect._global_vars[\"dialog_fig\"]\n\n key_event = KeyEvent(name=\"Save\", canvas=fig_dialog.canvas, key=\"return\")\n fig_dialog.canvas.callbacks.process(\"key_press_event\", key_event)\n\n # Check marking the channels as good has actually worked.\n expected_bads = orig_bads + [\"MEG 0113\"]\n raw = read_raw_bids(bids_path=bids_path, verbose=\"error\")\n new_bads = raw.info[\"bads\"]\n assert set(new_bads) == set(expected_bads)", "def axes_coupled(robot):\n\n target_ctrl_path = get_target_ctrl_path(robot)\n attr_name = 'axisCoupling'\n\n attr_path = target_ctrl_path + '.' + attr_name\n\n if not pm.objExists(attr_path):\n return False\n else:\n return pm.getAttr(attr_path)", "def test_new_channel(self):\n pattern = \"test.?.foo.?\"\n name1 = channel_layer.new_channel(pattern)\n self.assertIsInstance(name1, six.text_type)\n # Send a message and make sure new_channel on second pass changes\n channel_layer.send(name1, {\"value\": \"blue\"})\n name2 = channel_layer.new_channel(pattern)\n # Make sure the two ?s are replaced by the same string\n bits = name2.split(\".\")\n self.assertEqual(bits[1], bits[3], \"New channel random strings don't match\")\n # Make sure we can consume off of that new channel\n channel, message = channel_layer.receive_many([name1, name2])\n self.assertEqual(channel, name1)\n self.assertEqual(message, {\"value\": \"blue\"})", "def test_component_rename_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('component rename component1 changed_name')\n rv, output = self._execute('component list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def test_labels():\n size = 5\n labels = {\"x\": \"distance\", \"y\": \"force\"}\n s = channel.Slice(channel.TimeSeries(np.random.rand(size), np.random.rand(size)), labels)\n assert s.labels == labels\n assert s[:].labels == labels\n assert s[:0].labels == labels\n assert s[:10].labels == labels\n\n s = channel.Slice(channel.TimeSeries([], []), labels)\n assert len(s) == 0\n assert s.labels == labels\n assert s[:].labels == labels", "def test_axis_preservation(conv1d_placeholder, output_size):\n conv_layer = Convolution((3, output_size), lambda x: 1)\n output = conv_layer(conv1d_placeholder)\n assert output.axes == conv1d_placeholder.axes, (\"Output axes are not the same as input axes: \"\n \"{} != {}\").format(output.axes,\n conv1d_placeholder.axes)", "def names(self, channel, *args, **kwargs):\n pass" ]
[ "0.6249024", "0.592017", "0.58970207", "0.5892294", "0.58472556", "0.5733948", "0.57240987", "0.567717", "0.5633438", "0.5605331", "0.55340713", "0.5515224", "0.5501755", "0.5449305", "0.5431168", "0.5381191", "0.53764176", "0.5358835", "0.5335394", "0.5330941", "0.5266454", "0.52627945", "0.5224687", "0.52038646", "0.51728123", "0.5158683", "0.5147728", "0.5144516", "0.5128149", "0.512635" ]
0.6457107
0
Test that the dilated convolution layer output matches expected. This test compares the maximum output value to an expected max output value. The expected value is computed based on the dilation parameter. The test also checks that the output size matches the expected size based on the dilaton parameter value.
def test_dilated_conv(dilation): image_size = 3 batch_size = 1 init_val = 0.1 conv_size = 3 pad = 3 N_filters = 1 image_channels = 3 model = Sequential([Convolution((conv_size, conv_size, N_filters), filter_init=ConstantInit(val=init_val), padding=pad, dilation=dilation)]) X = np.ones(shape=(batch_size, 3, image_size, image_size)) # Create dummy image data = {'image': X, 'iteration': 1} data_size = OrderedDict([('N', batch_size), ('C', 3), ('H', image_size), ('W', image_size)]) ax = [ng.make_axis(length=data_size[k], name=k) for k in list(data_size.keys())] p_axes = ng.make_axes(ax) named_inputs = {'image': ng.placeholder(p_axes)} outputs = model(named_inputs['image']) named_outputs = {outputs.name: outputs} with closing(ngt.make_transformer()) as transformer: m = make_bound_computation(transformer, named_outputs, named_inputs) output = m(data)[list(m(data).keys())[0]] filter_size = dilation * (conv_size - 1) + 1 # Compute expected filter size # Compute the expected output size based on convolution parameters out_size = (image_size + 2 * pad - filter_size) + 1 filt_tmp = np.zeros(filter_size) filt_tmp[0::dilation] = 1 # max overlap between dilated filter and image (in 1-d) max_overlap = int(np.min([filter_size, image_size])) exp_max_output = init_val * image_channels * (np.sum(filt_tmp[0: max_overlap]))**2 # Expected max output changes for different dilation parameter values# assert int(10 * np.max(output)) == int(10 * exp_max_output), \ ("Dilated conv max outputs do not match expected: " "{} != {}").format(np.max(output), init_val * conv_size * ((image_size - (dilation - 1))**2)) assert np.shape(output) == (batch_size, N_filters, out_size, out_size), \ ("Dilated conv output is not expected size: " "{} != {}").format(np.shape(output), (batch_size, N_filters, out_size, out_size))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _VerifyValues(\n self,\n input_sizes=None,\n filter_sizes=None,\n strides=None,\n dilations=None,\n padding=None,\n data_format_src=\"NHWC\",\n data_format_dst=\"NHWC\",\n expected=None,\n ):\n\n total_size_1 = np.prod(input_sizes)\n total_size_2 = np.prod(filter_sizes)\n x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(input_sizes)\n x2 = np.arange(1, total_size_2 + 1, dtype=np.float32).reshape(filter_sizes)\n strides = [1] + strides + [1]\n if dilations is None:\n dilations = [1, 1]\n dilations = [1] + dilations + [1]\n\n # Convert between data formats.\n expected = test_utils.ConvertBetweenDataFormats(\n expected, data_format_src, data_format_dst\n )\n x1 = test_utils.ConvertBetweenDataFormats(x1, data_format_src, data_format_dst)\n input_sizes = test_utils.PermuteDimsBetweenDataFormats(\n input_sizes, data_format_src, data_format_dst\n )\n strides = test_utils.PermuteDimsBetweenDataFormats(\n strides, data_format_src, data_format_dst\n )\n dilations = test_utils.PermuteDimsBetweenDataFormats(\n dilations, data_format_src, data_format_dst\n )\n\n with self.session() as sess:\n t1 = array_ops.placeholder(dtypes.float32, shape=input_sizes)\n t2 = array_ops.placeholder(dtypes.float32, shape=filter_sizes)\n with self.test_scope():\n out = nn_ops.conv2d(\n t1,\n t2,\n strides=strides,\n padding=padding,\n data_format=data_format_dst,\n dilations=dilations,\n )\n\n value = sess.run(out, {t1: x1, t2: x2})\n self.assertAllClose(expected, value, 1e-3)", "def test_same_convolution(conv1d_placeholder, spatial_onehot, output_size, width, stride):\n conv_layer = Convolution((3, output_size), lambda x: 1, strides=stride, padding=\"same\")\n output = conv_layer(conv1d_placeholder)\n output_width = output.axes.find_by_name(\"W\")[0].length\n assert output_width == np.ceil(width / float(stride)), (\"Same convolution output width != \"\n \"ceil(input_width / stride): {} != \"\n \"ceil({} / {})\").format(output_width,\n width,\n stride)", "def test_on_conv_transpose_2d_dilation_padding_valid(self):\n\n # Channels/Colors, #filters, filter_size (square)\n conv_filter = objax.nn.ConvTranspose2D(1, 1, 2, dilations=2, padding=objax.ConvPadding.VALID)\n weights = objax.TrainVar(jn.array([[[[1., 2.], [3., 4.]]]]).transpose((2, 3, 1, 0)))\n conv_filter.w = weights\n image = jn.array([[[[2., 1., 3., 4.], [5., 6., 7., 8.],\n [9., 10., 11., 12.], [13., 14., 15., 16.]]]])\n # NCHW: Batch, Channels/Colors, Height, Width\n features = conv_filter(image)\n expected_features = jn.array([[[[2., 1., 7., 6., 6., 8.],\n [5., 6., 17., 20., 14., 16.],\n [15., 13., 46., 48., 34., 40.],\n [28., 32., 82., 92., 58., 64.],\n [27., 30., 69., 76., 44., 48.],\n [39., 42., 97., 104., 60., 64.]]]])\n self.assertEqual(features.shape, (1, 1, 6, 6))\n self.assertTrue(jn.array_equal(features, expected_features))", "def _VerifyValues(\n self,\n input_sizes=None,\n filter_sizes=None,\n out_backprop_sizes=None,\n strides=None,\n dilations=None,\n padding=None,\n data_format_src=\"NHWC\",\n data_format_dst=\"NHWC\",\n expected=None,\n ):\n\n total_size_1 = np.prod(filter_sizes)\n total_size_2 = np.prod(out_backprop_sizes)\n x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(filter_sizes)\n x2 = np.arange(1, total_size_2 + 1, dtype=np.float32).reshape(\n out_backprop_sizes\n )\n strides = [1] + strides + [1]\n if dilations is not None:\n dilations = [1] + dilations + [1]\n\n expected = np.reshape(expected, input_sizes)\n\n # Convert between data formats.\n expected = test_utils.ConvertBetweenDataFormats(\n expected, data_format_src, data_format_dst\n )\n x2 = test_utils.ConvertBetweenDataFormats(x2, data_format_src, data_format_dst)\n input_sizes = test_utils.PermuteDimsBetweenDataFormats(\n input_sizes, data_format_src, data_format_dst\n )\n out_backprop_sizes = test_utils.PermuteDimsBetweenDataFormats(\n out_backprop_sizes, data_format_src, data_format_dst\n )\n strides = test_utils.PermuteDimsBetweenDataFormats(\n strides, data_format_src, data_format_dst\n )\n if dilations is not None:\n dilations = test_utils.PermuteDimsBetweenDataFormats(\n dilations, data_format_src, data_format_dst\n )\n\n with self.session() as sess:\n t1 = array_ops.placeholder(dtypes.float32, shape=filter_sizes)\n t2 = array_ops.placeholder(dtypes.float32, shape=out_backprop_sizes)\n with self.test_scope():\n out = gen_nn_ops.conv2d_backprop_input(\n input_sizes=input_sizes,\n filter=t1,\n out_backprop=t2,\n strides=strides,\n dilations=dilations,\n padding=padding,\n data_format=data_format_dst,\n )\n\n value = sess.run(out, {t1: x1, t2: x2})\n self.assertAllEqual(input_sizes, value.shape)\n self.assertAllClose(expected, value, 1e-3)", "def _VerifyValues(\n self,\n input_sizes=None,\n filter_sizes=None,\n out_backprop_sizes=None,\n strides=None,\n dilations=None,\n padding=None,\n data_format_src=\"NHWC\",\n data_format_dst=\"NHWC\",\n expected=None,\n ):\n\n total_size_1 = np.prod(input_sizes)\n total_size_2 = np.prod(out_backprop_sizes)\n x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(input_sizes)\n x2 = np.arange(1, total_size_2 + 1, dtype=np.float32).reshape(\n out_backprop_sizes\n )\n strides = [1] + strides + [1]\n if dilations is not None:\n dilations = [1] + dilations + [1]\n\n expected = np.reshape(expected, filter_sizes)\n\n # Convert between data formats.\n x1 = test_utils.ConvertBetweenDataFormats(x1, data_format_src, data_format_dst)\n x2 = test_utils.ConvertBetweenDataFormats(x2, data_format_src, data_format_dst)\n input_sizes = test_utils.PermuteDimsBetweenDataFormats(\n input_sizes, data_format_src, data_format_dst\n )\n out_backprop_sizes = test_utils.PermuteDimsBetweenDataFormats(\n out_backprop_sizes, data_format_src, data_format_dst\n )\n strides = test_utils.PermuteDimsBetweenDataFormats(\n strides, data_format_src, data_format_dst\n )\n if dilations is not None:\n dilations = test_utils.PermuteDimsBetweenDataFormats(\n dilations, data_format_src, data_format_dst\n )\n\n with self.session() as sess:\n t1 = array_ops.placeholder(dtypes.float32, shape=input_sizes)\n t2 = array_ops.placeholder(dtypes.float32, shape=out_backprop_sizes)\n with self.test_scope():\n tensor = gen_nn_ops.conv2d_backprop_filter(\n input=t1,\n filter_sizes=filter_sizes,\n out_backprop=t2,\n strides=strides,\n dilations=dilations,\n padding=padding,\n data_format=data_format_dst,\n )\n\n value = sess.run(tensor, {t1: x1, t2: x2})\n self.assertAllEqual(filter_sizes, value.shape)\n self.assertAllClose(expected, value, 1e-3)", "def test_conv2d_failure(shape, pad, stride, dilation, err_msg):\n np.random.seed(0)\n\n kernel_size = (2, 2)\n groups = 1\n dtype = \"uint8\"\n out_channels = 8\n weight_format = \"HWIO\"\n\n model, _ = _get_model(\n shape,\n kernel_size[0],\n kernel_size[1],\n 0,\n 1,\n 0,\n 1,\n 0,\n 1,\n pad,\n stride,\n dilation,\n groups,\n dtype,\n out_channels,\n weight_format,\n )\n model = tei.make_ethosn_composite(model, \"ethos-n.qnn_conv2d\")\n mod = tei.make_ethosn_partition(model)\n tei.test_error(mod, {}, err_msg)", "def testOutputShapeConsistency(self, use_bias):\n\n # When padding is SAME, then the actual number of padding pixels can be\n # computed as: pad = kernel_shape - strides + (-input_shape % strides)\n # = 5 - 1 + (- 32 % 1) = 4\n\n # The formula for the minimal size is:\n # oH = strides[1] * (in_height - 1) - padding + kernel_shape_h\n # oH = 1 * ( 32 - 1) - 4 + 5 = 32\n\n # The formula for the maximum size (due to extra pixels) is:\n # oH_max = oH + strides[1] - 1\n # so, for strides = 1 and padding = SAME, input size == output size.\n inputs = tf.placeholder(tf.float32, shape=self.in_shape)\n\n conv1 = snt.Conv2DTranspose(name=\"conv2d_1\",\n output_channels=self.out_channels,\n output_shape=self.out_shape,\n kernel_shape=self.kernel_shape,\n padding=self.padding,\n stride=1,\n use_bias=use_bias)\n\n outputs = conv1(inputs)\n\n self.assertTrue(outputs.get_shape().is_compatible_with((\n self.batch_size,) + self.out_shape + (self.out_channels,)))\n\n self.assertTrue(conv1.w.get_shape().is_compatible_with(self.kernel_shape2))\n if use_bias:\n self.assertTrue(conv1.b.get_shape().is_compatible_with(\n [self.out_channels]))", "def test_on_conv_transpose_2d_dilation_padding_same(self):\n\n # Channels/Colors, #filters, filter_size (square)\n conv_filter = objax.nn.ConvTranspose2D(1, 1, 2, dilations=2, padding=objax.ConvPadding.SAME)\n weights = objax.TrainVar(jn.array([[[[1., 2.], [3., 4.]]]]).transpose((2, 3, 1, 0)))\n conv_filter.w = weights\n image = jn.array([[[[2., 1., 3., 4.], [5., 6., 7., 8.],\n [9., 10., 11., 12.], [13., 14., 15., 16.]]]])\n # NCHW: Batch, Channels/Colors, Height, Width\n features = conv_filter(image)\n expected_features = jn.array([[[[6., 17., 20., 14.],\n [13., 46., 48., 34.],\n [32., 82., 92., 58.],\n [30., 69., 76., 44.]]]])\n self.assertEqual(features.shape, (1, 1, 4, 4))\n self.assertTrue(jn.array_equal(features, expected_features))", "def test_convolution():\n # Default test\n inputs_shape = [3,3,4,5,3]\n filters_shape = [3,1,4,4,3]\n test_convolution_for_parameters(inputs_shape, filters_shape,\n \"Default test\")\n # All dimensions 1\n inputs_shape = [1,1,1,1,1]\n filters_shape = [1,1,1,1,1]\n test_convolution_for_parameters(inputs_shape, filters_shape,\n \"Input and filter dimensions 1\")\n # Filter spans all dimensions\n # This will lead to a failure for theano 2d3d for some reason\n # (for now we ignore this and remove theano2d3d for this test\n inputs_shape = [3,3,4,5,3]\n filters_shape = [3,3,4,5,3]\n test_convolution_for_parameters(inputs_shape, filters_shape,\n \"Filter dimension = Input dimension\")\n # Filter smaller for all dimensions\n inputs_shape = [3,3,4,5,3]\n filters_shape = [3,2,2,2,3]\n test_convolution_for_parameters(inputs_shape, filters_shape, \n \"Filter dimension < all Input dimension\")\n # 1,1,1,1,1 filter\n # Filter smaller for all dimensions\n inputs_shape = [3,3,4,5,1]\n filters_shape = [3,1,1,1,1]\n test_convolution_for_parameters(inputs_shape, filters_shape, \n \"Filter dimension 1 everywhere\")", "def test_deconvolve_once_general(self):\n tau = 50.0\n tau_deconv = 20.0\n mrate = 50.0\n Mrate = 100.0\n\n tmax = 60.0\n dt = 0.1\n\n self.rule.tau = tau\n self.rule.min_rate = mrate\n self.rule.max_rate = Mrate\n self.rule.compress_rates = False\n\n self.motor.error_fct = lambda t: (int_r(t/20.0)%3-1)*np.ones(self.Nsrc)\n\n M1 = simulation.StateMonitor(self.rule, 'out')\n\n sim1 = simulation.Simulation(self.source, self.motor, self.rule, M1, dt=dt)\n sim1.run(tmax)\n\n self.rule.tau_deconv1 = tau_deconv\n\n M2 = simulation.StateMonitor(self.rule, 'out')\n\n sim2 = simulation.Simulation(self.source, self.motor, self.rule, M2, dt=dt)\n sim2.run(tmax)\n\n mavg = (mrate + Mrate)*0.5\n mdiff = (Mrate - mrate)*0.5\n\n out1 = (M1.out - mavg)/mdiff\n out2 = (M2.out - mavg)/mdiff\n\n der_out1 = np.diff(out1, axis=1)/dt\n\n expected_out2_crop = out1[:, 1:] + tau_deconv*der_out1\n\n # mismatch is relatively large since we're using Euler's method\n # we can't do much better, however, since the motor controller cannot give\n # us motor error information at sub-step resolution\n mismatch = np.mean(np.abs(expected_out2_crop - out2[:, 1:])/\n expected_out2_crop)\n self.assertLess(mismatch, 1e-3)", "def test_DeformableConvolution():\n try:\n ctx = mx.gpu()\n _ = mx.nd.array([0], ctx=ctx)\n except mx.base.MXNetError:\n pytest.skip(\"deformable_convolution only supports GPU\")\n net = nn.HybridSequential()\n net.add(\n nn.DeformableConvolution(10, kernel_size=(3, 3), strides=1, padding=0),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, activation='relu',\n offset_use_bias=False, use_bias=False),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, activation='relu',\n offset_use_bias=False),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, activation='relu',\n use_bias=False),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, offset_use_bias=False, use_bias=False),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, offset_use_bias=False),\n nn.DeformableConvolution(12, kernel_size=(3, 2), strides=1, padding=0, use_bias=False),\n nn.DeformableConvolution(12, kernel_size=(3, 2), strides=1, padding=0, use_bias=False, num_deformable_group=4),\n )\n\n net.initialize(force_reinit=True, ctx=ctx)\n net.hybridize()\n\n x = mx.nd.random.uniform(shape=(8, 5, 30, 31), ctx=ctx)\n with mx.autograd.record():\n y = net(x)\n y.backward()", "def testOutputShapeConsistency(self, use_bias):\n\n # When padding is SAME, then the actual number of padding pixels can be\n # computed as: pad = kernel_shape - strides + (-input_shape % strides)\n # = 5 - 1 + (- 32 % 1) = 4\n\n # The formula for the minimal size is:\n # oH = strides[1] * (in_height - 1) - padding + kernel_shape_h\n # oH = 1 * ( 32 - 1) - 4 + 5 = 32\n\n # The formula for the maximum size (due to extra pixels) is:\n # oH_max = oH + strides[1] - 1\n # so, for strides = 1 and padding = SAME, input size == output size.\n inputs = tf.placeholder(tf.float32, shape=self.in_shape)\n\n conv1 = snt.Conv3DTranspose(name=\"conv3d_1\",\n output_channels=self.out_channels,\n output_shape=self.out_shape,\n kernel_shape=self.kernel_shape,\n padding=self.padding,\n stride=1,\n use_bias=use_bias)\n\n outputs = conv1(inputs)\n\n self.assertTrue(outputs.get_shape().is_compatible_with((\n self.batch_size,) + self.out_shape + (self.out_channels,)))\n\n self.assertTrue(conv1.w.get_shape().is_compatible_with(self.kernel_shape2))\n if use_bias:\n self.assertTrue(conv1.b.get_shape().is_compatible_with(\n [self.out_channels]))", "def test_conv_inverts_deconv(transformer_factory, input_size, filter_size, padding, stride):\n\n # convolutions whose output size are not an even multiple of stride cannot be exactly inverted\n a = (input_size + sum(padding) - filter_size) % stride\n conv_output = utils.conv_output_dim(input_size, filter_size, padding, stride)\n deconv_output = utils.deconv_output_dim(conv_output, filter_size, padding, stride)\n\n assert deconv_output == (input_size - a), (\"Convolution and Deconvolution do not invert:\\n\"\n \"output ({}) != input ({}) - a ({})\\n\"\n \"filter: {}, padding: {}, stride: {}\"\n ).format(deconv_output, input_size, a,\n filter_size, padding, stride)", "def test_network(neural_network, test_data):\n total_trials = 0\n correct_trials = 0\n output_values = [np.argmax(neural_network.calculate_output(vector[1])) for vector in test_data]\n expected_values = list(zip(*test_data))[0]\n for expected, recieved in zip(expected_values,output_values):\n total_trials += 1\n if expected == recieved:\n correct_trials+=1\n return correct_trials/total_trials", "def test_deconv():\n\n # filter params\n R, S = 5, 5\n fshape = (R, S, 1)\n strides = 2\n filter_val_nz = np.arange(1, R * S + 1).reshape(R, S)\n filter_val = np.zeros(fshape)\n filter_val[:, :, 0] = filter_val_nz\n\n deconv = Deconvolution(fshape,\n filter_init=ConstantInit(filter_val),\n strides=strides,\n padding=0,\n dilation=1)\n\n N = ng.make_axis(name='N', length=1) # batch\n image_shape = (1, 8, 8) # CHW\n image_axes = ng.make_axes([ng.make_axis(name=nm, length=l)\n for nm, l in zip('CHW', image_shape)])\n image_axes |= N\n image = ng.placeholder(axes=image_axes)\n\n output = deconv(image)\n\n with closing(ngt.make_transformer()) as transformer:\n comp = transformer.add_computation(ng.computation(output, image))\n input_val = np.zeros(image_shape + (N.length, ), dtype=float)\n input_val[0, 0, 0] = 1\n input_val[0, 5, 5] = 1\n input_val[0, 7, 7] = 1\n result = comp(input_val)\n feature_map = np.squeeze(result)\n\n assert (feature_map[:5, :5] == filter_val_nz).all()\n\n result2 = filter_val_nz.copy()\n result2[-1, -1] = 26\n assert (feature_map[10:15, 10:15] == result2).all()\n\n result3 = filter_val_nz.copy()\n result3[0, 0] = 26\n assert (feature_map[-5:, -5:] == result3).all()", "def test_deconvolve_to_motor_error(self):\n tau = 50.0\n mrate = 50.0\n Mrate = 100.0\n\n tmax = 50.0\n dt = 0.1\n\n self.rule.tau = tau\n self.rule.min_rate = mrate\n self.rule.max_rate = Mrate\n self.rule.compress_rates = False\n self.rule.gain = 1\n self.rule.tau_deconv1 = tau\n\n self.motor.error_fct = lambda _: np.ones(self.Nsrc)\n\n M = simulation.StateMonitor(self.rule, 'out')\n\n sim = simulation.Simulation(self.source, self.motor, self.rule, M, dt=dt)\n sim.run(tmax)\n \n # the output should be almost constant\n self.assertAlmostEqual(np.std(M.out)/np.mean(M.out), 0)", "def testComputationValid(self, use_bias):\n\n conv1 = snt.SeparableConv2D(\n output_channels=1,\n channel_multiplier=1,\n kernel_shape=[3, 3],\n padding=snt.VALID,\n use_bias=use_bias,\n initializers=create_separable_constant_initializers(\n 1.0, 1.0, 1.0, use_bias))\n\n out = conv1(tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32)))\n expected_out = np.array([[10, 10, 10],\n [10, 10, 10],\n [10, 10, 10]])\n if not use_bias:\n expected_out -= 1\n\n with self.test_session():\n tf.variables_initializer(\n [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else\n [conv1.w_dw, conv1.w_pw]).run()\n\n self.assertAllClose(np.reshape(out.eval(), [3, 3]), expected_out)", "def testComputationValidChannelMultiplier(self, use_bias):\n\n input_channels = 3\n channel_multiplier = 5\n output_channels = input_channels * channel_multiplier\n conv1 = snt.SeparableConv2D(\n output_channels=output_channels,\n channel_multiplier=channel_multiplier,\n kernel_shape=[3, 3],\n padding=snt.VALID,\n use_bias=use_bias,\n initializers=create_separable_constant_initializers(\n 1.0, 1.0, 1.0, use_bias))\n\n input_data = np.ones([1, 5, 5, input_channels], dtype=np.float32)\n out = conv1(tf.constant(input_data))\n expected_out = np.ones((3, 3, output_channels)) * 136\n if not use_bias:\n expected_out -= 1\n\n self.assertTrue(out.get_shape().is_compatible_with([1, 3, 3, output_channels\n ]))\n\n with self.test_session():\n tf.variables_initializer(\n [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else\n [conv1.w_dw, conv1.w_pw]).run()\n\n self.assertAllClose(np.reshape(out.eval(), [3, 3, output_channels]),\n expected_out)\n # Each convolution with weight 1 and size 3x3 results in an output of 9.\n # Pointwise filter is [1, 1, input_channels * channel_multiplier = 15, x].\n # Results in 9 * 15 = 135 + 1 bias = 136 as outputs.", "def deconv_output_length(input_length,\n filter_size,\n padding,\n output_padding=None,\n stride=0,\n dilation=1):\n assert padding in {'same', 'valid', 'full'}\n if input_length is None:\n return None\n\n # Get the dilated kernel size\n filter_size = filter_size + (filter_size - 1) * (dilation - 1)\n\n # Infer length if output padding is None, else compute the exact length\n if output_padding is None:\n if padding == 'valid':\n length = input_length * stride + max(filter_size - stride, 0)\n elif padding == 'full':\n length = input_length * stride - (stride + filter_size - 2)\n elif padding == 'same':\n length = input_length * stride\n\n else:\n if padding == 'same':\n pad = filter_size // 2\n elif padding == 'valid':\n pad = 0\n elif padding == 'full':\n pad = filter_size - 1\n\n length = ((input_length - 1) * stride + filter_size - 2 * pad +\n output_padding)\n return length", "def testComputationValid(self, use_bias):\n conv1 = snt.DepthwiseConv2D(\n channel_multiplier=1,\n kernel_shape=[3, 3],\n stride=1,\n padding=snt.VALID,\n use_bias=use_bias,\n initializers=create_constant_initializers(1.0, 1.0, use_bias))\n\n out = conv1(tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32)))\n expected_out = np.array([[10, 10, 10],\n [10, 10, 10],\n [10, 10, 10]])\n if not use_bias:\n expected_out -= 1\n\n with self.test_session():\n tf.variables_initializer(\n [conv1.w, conv1.b] if use_bias else [conv1.w]).run()\n\n self.assertAllClose(np.reshape(out.eval(), [3, 3]), expected_out)", "def testShapesSame(self, use_bias):\n\n out_channels = self.out_channels_dw\n input_shape = self.input_shape\n kernel_shape = self.kernel_shape\n output_shape = self.output_shape\n depthwise_filter_shape = self.depthwise_filter_shape\n pointwise_filter_shape = self.pointwise_filter_shape\n channel_multiplier = self.channel_multiplier\n\n inputs = tf.placeholder(tf.float32, shape=input_shape)\n\n conv1 = snt.SeparableConv2D(\n output_channels=out_channels,\n channel_multiplier=channel_multiplier,\n kernel_shape=kernel_shape,\n padding=snt.SAME,\n use_bias=use_bias)\n\n output = conv1(inputs)\n\n self.assertTrue(output.get_shape().is_compatible_with(output_shape))\n self.assertTrue(conv1.w_dw.get_shape().is_compatible_with(\n depthwise_filter_shape))\n self.assertTrue(conv1.w_pw.get_shape().is_compatible_with(\n pointwise_filter_shape))\n if use_bias:\n self.assertTrue(conv1.b.get_shape().is_compatible_with([out_channels]))", "def testComputationSame(self, use_bias):\n\n conv1 = snt.SeparableConv2D(\n output_channels=1,\n channel_multiplier=1,\n kernel_shape=[3, 3],\n padding=snt.SAME,\n name=\"conv1\",\n use_bias=use_bias,\n initializers=create_separable_constant_initializers(\n 1.0, 1.0, 1.0, use_bias))\n\n out = conv1(tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32)))\n expected_out = np.array([[5, 7, 7, 7, 5],\n [7, 10, 10, 10, 7],\n [7, 10, 10, 10, 7],\n [7, 10, 10, 10, 7],\n [5, 7, 7, 7, 5]])\n if not use_bias:\n expected_out -= 1\n\n with self.test_session():\n tf.variables_initializer(\n [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else\n [conv1.w_dw, conv1.w_pw]).run()\n\n self.assertAllClose(np.reshape(out.eval(), [5, 5]), expected_out)", "def test_realistic_max_dose(self):\n\n # min and max dose can only really hope to be within half a bin width\n\n for struct, data in self.test_structs.items():\n dvh = DVH(data[\"doses\"], data[\"volumes\"])\n diff = dvh.max_dose - data[\"monaco_dvh_max_dose\"]\n self.assertLessEqual(abs(diff), 5.)", "def testComputationValid(self, use_bias):\n conv1 = snt.Conv2D(\n output_channels=1,\n kernel_shape=3,\n stride=1,\n padding=snt.VALID,\n name=\"conv1\",\n use_bias=use_bias,\n initializers=create_constant_initializers(1.0, 1.0, use_bias))\n\n out = conv1(tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32)))\n expected_output = np.array([[10, 10, 10],\n [10, 10, 10],\n [10, 10, 10]])\n if not use_bias:\n expected_output -= 1\n\n with self.test_session():\n tf.variables_initializer(\n [conv1.w, conv1.b] if use_bias else [conv1.w]).run()\n\n self.assertAllClose(np.reshape(out.eval(), [3, 3]), expected_output)", "def testFunction(self, input_shape, stride, kernel_shape, padding,\n output_shape):\n self.assertEqual(conv._default_transpose_size(input_shape, stride,\n kernel_shape=kernel_shape,\n padding=padding),\n tuple(output_shape))", "def testComputationDilated(self, use_bias):\n conv1 = snt.CausalConv1D(\n output_channels=1,\n kernel_shape=3,\n stride=1,\n rate=2,\n use_bias=use_bias,\n name=\"conv1\",\n initializers=create_constant_initializers(1.0, 1.0, use_bias))\n\n out = conv1(tf.constant(np.ones([1, 5, 1], dtype=np.float32)))\n expected_out = np.reshape(np.array([1, 1, 2, 2, 3]), [1, 5, 1])\n if use_bias:\n expected_out += 1\n\n init_op = tf.variables_initializer(\n [conv1.w, conv1.b] if use_bias else [conv1.w])\n with self.test_session() as sess:\n sess.run(init_op)\n actual_out = sess.run(out)\n\n self.assertAllClose(actual_out, expected_out)", "def testComputationValid(self, use_bias):\n\n conv1 = snt.SeparableConv1D(\n output_channels=1,\n channel_multiplier=1,\n kernel_shape=[3],\n padding=snt.VALID,\n use_bias=use_bias,\n initializers=create_separable_constant_initializers(\n 1.0, 1.0, 1.0, use_bias))\n\n out = conv1(tf.constant(np.ones([1, 5, 1], dtype=np.float32)))\n expected_out = np.array([[[4.], [4.], [4.]]])\n if not use_bias:\n expected_out -= 1\n\n with self.test_session() as session:\n tf.variables_initializer(\n [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else\n [conv1.w_dw, conv1.w_pw]).run()\n out = session.run(out)\n self.assertAllClose(out, expected_out)", "def testShapesSame(self, use_bias):\n\n out_channels = self.out_channels_dw\n input_shape = self.input_shape\n kernel_shape = self.kernel_shape\n output_shape = self.output_shape\n depthwise_filter_shape = self.depthwise_filter_shape\n pointwise_filter_shape = self.pointwise_filter_shape\n channel_multiplier = self.channel_multiplier\n\n inputs = tf.placeholder(tf.float32, shape=input_shape)\n\n conv1 = snt.SeparableConv1D(\n output_channels=out_channels,\n channel_multiplier=channel_multiplier,\n kernel_shape=kernel_shape,\n padding=snt.SAME,\n use_bias=use_bias)\n\n output = conv1(inputs)\n\n self.assertTrue(output.get_shape().is_compatible_with(output_shape))\n self.assertTrue(conv1.w_dw.get_shape().is_compatible_with(\n depthwise_filter_shape))\n self.assertTrue(conv1.w_pw.get_shape().is_compatible_with(\n pointwise_filter_shape))\n if use_bias:\n self.assertTrue(conv1.b.get_shape().is_compatible_with([out_channels]))", "def testComputationSame(self, use_bias):\n conv1 = snt.DepthwiseConv2D(\n channel_multiplier=1,\n kernel_shape=[3, 3],\n stride=1,\n padding=snt.SAME,\n use_bias=use_bias,\n initializers=create_constant_initializers(1.0, 1.0, use_bias))\n\n out = conv1(tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32)))\n expected_out = np.array([[5, 7, 7, 7, 5],\n [7, 10, 10, 10, 7],\n [7, 10, 10, 10, 7],\n [7, 10, 10, 10, 7],\n [5, 7, 7, 7, 5]])\n if not use_bias:\n expected_out -= 1\n\n with self.test_session():\n tf.variables_initializer(\n [conv1.w, conv1.b] if use_bias else [conv1.w]).run()\n\n self.assertAllClose(np.reshape(out.eval(), [5, 5]), expected_out)", "def evaluate(self, architecture_output, target_output):\n\n def discriminator_layer(image, name, n, depth, stride, training=True):\n \"\"\"This function creates one layer of the discriminator network.\n\n This function is to be called when creating the structure of the\n discriminator network as it's often used.\n\n Args:\n image: The image to input in the convolutions.\n\n name: The name of the layer.\n\n n: the fourth dimension of the shape of the weights.\n\n depth: the third dimension of the shape of the weights.\n\n stride: the stride to use in the convolution.\n\n Returns:\n The resulting activations after applying the layer.\n \"\"\"\n weights = tf.get_variable(shape=[3, 3, depth, n], name=\"weights\" + name,\n initializer=tf.uniform_unit_scaling_initializer(factor=0.01))\n #biases = tf.Variable(tf.constant(0.01, shape=[n]), name=\"biases\" + name)\n biases = tf.get_variable(shape=[n], name=\"biases\" + name, \n initializer=tf.constant_initializer(value=0.01))\n\n conv = tf.nn.conv2d(image, weights, strides=[1, stride, stride, 1],\n padding=\"VALID\") + biases\n leaky = tf.maximum(0.1 * conv, conv)\n\n return tf.contrib.layers.batch_norm(leaky, center=True, updates_collections=None,\n scale=True, is_training=training)\n\n def discriminator_network(image):\n # Input Layer\n weights = tf.get_variable(shape=[3, 3, 3, 64], name=\"weights1\",\n initializer=tf.uniform_unit_scaling_initializer(factor=0.01))\n #biases = tf.Variable(tf.constant(0.01, shape=[64]), name=\"biases1\")\n biases = tf.get_variable(shape=[64], name=\"biases1\", \n initializer=tf.constant_initializer(value=0.01))\n conv = tf.nn.conv2d(image, weights, strides=[1, 1, 1, 1],\n padding=\"SAME\") + biases\n leaky = tf.maximum(0.1 * conv, conv)\n\n # Discriminator Layers\n layer1 = discriminator_layer(leaky, \"A\", 64, 64, 2, training=True)\n layer2 = discriminator_layer(layer1, \"B\", 128, 64, 1, training=True)\n layer3 = discriminator_layer(layer2, \"C\", 128, 128, 2, training=True)\n layer4 = discriminator_layer(layer3, \"D\", 256, 128, 1, training=True)\n layer5 = discriminator_layer(layer4, \"E\", 256, 256, 2, training=True)\n layer6 = discriminator_layer(layer5, \"F\", 512, 256, 2, training=True)\n layer7 = discriminator_layer(layer6, \"G\", 512, 512, 2, training=True)\n layer8 = discriminator_layer(layer7, \"H\", 512, 512, 2, training=True)\n\n # Output Layer\n shape = int(np.prod(layer8.get_shape()[1:]))\n flat = tf.reshape(layer8, [-1, shape])\n weights = tf.get_variable(shape=[shape, 1], name=\"weights2\", dtype=tf.float32,\n initializer=tf.truncated_normal_initializer(stddev=1e-1))\n biases = tf.get_variable(shape=[1], name=\"biases2\", dtype=tf.float32,\n initializer=tf.constant_initializer(1.0))\n connect = tf.matmul(flat, weights) + biases\n\n return tf.maximum(0.1 * connect, connect)\n\n with tf.variable_scope(\"discriminator\", reuse=None):\n self.disc_gt = discriminator_network(target_output)\n\n with tf.variable_scope(\"discriminator\", reuse=True):\n self.disc_out = discriminator_network(architecture_output)\n\n # Network Loss\n #loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(\n # logits=self.disc_out, labels=tf.ones_like(self.disc_out)))\n loss = tf.reduce_mean(-tf.log(self.disc_out + 1e-12))\n\n return loss" ]
[ "0.64663506", "0.6339015", "0.6257454", "0.61735374", "0.6152628", "0.61100185", "0.60942584", "0.60613877", "0.6036121", "0.5997487", "0.591609", "0.59005904", "0.58983564", "0.5894166", "0.58659226", "0.5855777", "0.583677", "0.5803536", "0.577784", "0.5746447", "0.57333267", "0.57035905", "0.56889933", "0.56729734", "0.56706345", "0.56684357", "0.56588", "0.56492877", "0.56303245", "0.56270146" ]
0.78776544
0
basic test of deconv fprop. ngraph/tests/test_conv.py tests ng.deconvolution bprop
def test_deconv(): # filter params R, S = 5, 5 fshape = (R, S, 1) strides = 2 filter_val_nz = np.arange(1, R * S + 1).reshape(R, S) filter_val = np.zeros(fshape) filter_val[:, :, 0] = filter_val_nz deconv = Deconvolution(fshape, filter_init=ConstantInit(filter_val), strides=strides, padding=0, dilation=1) N = ng.make_axis(name='N', length=1) # batch image_shape = (1, 8, 8) # CHW image_axes = ng.make_axes([ng.make_axis(name=nm, length=l) for nm, l in zip('CHW', image_shape)]) image_axes |= N image = ng.placeholder(axes=image_axes) output = deconv(image) with closing(ngt.make_transformer()) as transformer: comp = transformer.add_computation(ng.computation(output, image)) input_val = np.zeros(image_shape + (N.length, ), dtype=float) input_val[0, 0, 0] = 1 input_val[0, 5, 5] = 1 input_val[0, 7, 7] = 1 result = comp(input_val) feature_map = np.squeeze(result) assert (feature_map[:5, :5] == filter_val_nz).all() result2 = filter_val_nz.copy() result2[-1, -1] = 26 assert (feature_map[10:15, 10:15] == result2).all() result3 = filter_val_nz.copy() result3[0, 0] = 26 assert (feature_map[-5:, -5:] == result3).all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_conv_inverts_deconv(transformer_factory, input_size, filter_size, padding, stride):\n\n # convolutions whose output size are not an even multiple of stride cannot be exactly inverted\n a = (input_size + sum(padding) - filter_size) % stride\n conv_output = utils.conv_output_dim(input_size, filter_size, padding, stride)\n deconv_output = utils.deconv_output_dim(conv_output, filter_size, padding, stride)\n\n assert deconv_output == (input_size - a), (\"Convolution and Deconvolution do not invert:\\n\"\n \"output ({}) != input ({}) - a ({})\\n\"\n \"filter: {}, padding: {}, stride: {}\"\n ).format(deconv_output, input_size, a,\n filter_size, padding, stride)", "def test_convolution_backprop(transformer_factory):\n N = 128\n C, K = 3, 2\n D, T = 1, 1\n H = W = 32\n R = S = 2\n\n padding = dict(pad_d=0, pad_h=0, pad_w=0)\n strides = dict(str_d=1, str_h=1, str_w=1)\n dilation = dict(dil_d=1, dil_h=1, dil_w=1)\n conv_params = padding.copy()\n conv_params.update(strides)\n conv_params.update(dilation)\n\n ax_i = ng.make_axes([ax.C, ax.D, ax.H, ax.W, ax.N])\n ax_f = ng.make_axes([ax.C, ax.T, ax.R, ax.S, ax.K])\n ax_i.set_shape((C, D, H, W, N))\n ax_f.set_shape((C, T, R, S, K))\n ax_o = ng.make_axes([\n ng.make_axis(roles=[ar.features_input]).named('C'),\n ng.make_axis(roles=[ar.features_0]).named('D'),\n ng.make_axis(roles=[ar.features_1]).named('H'),\n ng.make_axis(roles=[ar.features_2]).named('W'),\n ax.N\n ])\n\n ax_o[:-1].set_shape((\n K,\n output_dim(D, T, padding['pad_d'], strides['str_d']),\n output_dim(H, R, padding['pad_h'], strides['str_h']),\n output_dim(W, S, padding['pad_w'], strides['str_w']))\n )\n\n inputs = ng.placeholder(axes=ax_i)\n filters = ng.placeholder(axes=ax_f)\n\n # randomly initialize\n input_value = rng.uniform(-1, 1, ax_i)\n filter_value = rng.uniform(-1, 1, ax_f)\n\n assert input_value.shape == ax_i.lengths\n assert filter_value.shape == ax_f.lengths\n\n output = ng.sum(ng.convolution(conv_params, inputs, filters, ax_o), out_axes=())\n\n with ExecutorFactory() as factory:\n dcdf_sym_fun = factory.derivative(output, filters, inputs)\n dcdf_num_fun = factory.numeric_derivative(output, filters, .01, inputs)\n dcdf_sym_val = dcdf_sym_fun(filter_value, input_value)\n dcdf_num_val = dcdf_num_fun(filter_value, input_value)\n\n ng.testing.assert_allclose(dcdf_sym_val, dcdf_num_val, rtol=1)", "def test_conv2d(self):\n\n \"\"\" Testing creation\"\"\"\n builder = BuildConvLayer()\n self.assertEqual(builder.nx_graph.number_of_nodes(), 0, msg='The config graph is not empty')\n builder.build_graph(builder.define_graph())\n builder.compute_graph.create_session()\n\n self.assertEqual(builder.nx_graph.node['conv_layer']['config'].name, 'conv_layer')\n self.assertEqual(builder.nx_graph.node['conv_layer']['config'].scope, 'outer_scope')\n self.assertEqual(builder.nx_graph.node['conv_layer']['config'].filter, 3)\n self.assertEqual(builder.nx_graph.node['conv_layer']['config'].type, 'hidden')\n self.assertEqual(builder.nx_graph.node['conv_layer']['config'].activations, tf.nn.relu)\n self.assertEqual(builder.nx_graph.node['conv_layer']['config'].use_bias, True)\n self.assertEqual(builder.nx_graph.node['conv_layer']['config'].flatten_output, True)\n self.assertEqual(builder.nx_graph.node['conv_layer']['config'].padding, 'same')\n self.assertEqual(builder.nx_graph.node['conv_layer']['config'].stride, (1, 1))\n self.assertEqual(builder.nx_graph.node['conv_layer']['config'].kernel_size, (3, 3))\n self.assertEqual(builder.nx_graph.node['conv_layer']['config'].kernel_initializer, tf.zeros_initializer)\n self.assertEqual(builder.nx_graph.node['conv_layer']['config'].bias_initializer, tf.zeros_initializer)\n self.assertEqual(builder.nx_graph.node['conv_layer']['config'].tensorboard_verbosity, 2)\n\n self.assertEqual(builder.nx_graph.number_of_nodes(), 2, msg='Incorrect number of nodes in the nx graph')\n self.assertTrue('config' in builder.nx_graph.node['conv_layer'].keys(),\n msg='The config was not added to the graph')\n self.assertTrue('component_builder' in builder.nx_graph.node['conv_layer'].keys(),\n msg='The component builder was not added to the nx node.')\n with builder.compute_graph.tf_graph.as_default():\n self.assertEqual(len(tf.get_collection(tf.GraphKeys.SUMMARIES, scope='test_conv')), 3,\n msg='Tensorboard summaries missing')\n self.assertTrue(builder.nx_graph.node['input']['is_build'], msg='Is build parameter was not updated')\n self.assertTrue(builder.nx_graph.node['conv_layer']['is_build'], msg='Is build parameter was not updated')\n self.assertIsNotNone(builder.nx_graph.node['input']['output'], msg='The nx node output is None')\n self.assertIsNotNone(builder.nx_graph.node['conv_layer']['output'], msg='The nx node output is None')\n\n # get the var form the tf graphs\n with builder.compute_graph.tf_graph.as_default():\n train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='test_conv')\n self.assertEqual(len(train_vars), 2, msg=\"Incorrect number of variables in the tf graph\")\n weights = train_vars[0]\n bias = train_vars[1]\n self.assertEqual(weights.name, 'test_conv/outer_scope/conv_layer/conv2d/kernel:0')\n self.assertEqual(bias.name, 'test_conv/outer_scope/conv_layer/conv2d/bias:0')\n\n builder.compute_graph.initialize_graph_variables()\n result = builder.compute_graph.session.run(builder.compute_graph.hidden['conv_layer'],\n feed_dict={builder.compute_graph.inputs['input']: np.ones((1, 3, 3, 3))})[0]\n self.assertEqual(result.tolist(), [0] * 27, msg='Wrong output')", "def deconvolution(obs, green, lambd):\n\n nr, nt = obs.shape\n num = np.zeros(nt)\n den = np.zeros(nt)\n\n for ir in range(len(obs)):\n\n OBS = fft(obs[ir, :])\n GRE = fft(green[ir, :])\n\n # Sum all\n num = num + np.conj(GRE) * OBS\n den = den + np.conj(GRE) * GRE\n\n # Get maximum value of denominator\n maxden = np.max(np.abs(den))\n\n # Waterlevel\n wl = lambd * maxden\n\n # Deconvolution using the waterlevel\n src = np.real(ifft(num / (den+wl).T))\n\n # Compute fit to original data\n res = obs\n chi0 = 0.5 * np.sum(np.sum(res ** 2))\n\n syn = compute_synth(green, src)\n res = obs - syn\n chi = 0.5 * np.sum(np.sum(res ** 2))\n\n print(chi/chi0)\n\n return src, syn", "def test_simple(self):\n module = PyTorchConv2d(3, 3, filter=Parameter(torch.tensor([[[[1.0, 2, 3], [4, 5, 6], [7, 8, 9]]]])),\n bias=Parameter(torch.tensor([0.0])))\n print(\"filter and bias parameters: \", list(module.parameters()))\n # input = torch.arange(end=16, dtype=torch.int32, requires_grad=True).view(4, 4)\n input = torch.tensor([[[[1.0, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]]], requires_grad=True)\n output = module(input)\n print(\"forward output: \", output)\n expected_output = torch.tensor([[[[348.0, 393], [528, 573]]]])\n self.assertTrue(output.equal(expected_output))\n output.backward(torch.tensor([[[[1.0, 2], [3, 4]]]]))\n print(\"gradient for the input: \", input.grad)\n self.assertTrue(input.grad.equal(torch.tensor([[[[1.0, 4, 7, 6],\n [7, 23, 33, 24],\n [19, 53, 63, 42],\n [21, 52, 59, 36]]]])))\n\n \"\"\"\n Expected output:\n filter and bias parameters: [Parameter containing:\n tensor([[ 1, 2, 3],\n [ 4, 5, 6],\n [ 7, 8, 9]]), Parameter containing:\n tensor([ 0])]\n forward output: tensor([[ 348, 393],\n [ 528, 573]])\n gradient for the input: tensor([[ 0, 1, 2, 3],\n [ 2, 11, 17, 15],\n [ 8, 29, 35, 27],\n [ 14, 37, 42, 27]])\n \"\"\"", "def test_conv_consistency(self) -> None:\n x = Input(\n 'const1',\n [1, 3, 3, 3],\n Float32(),\n )\n w = Constant(\n 'weight',\n Float32(),\n np.zeros([1, 2, 2, 3])\n )\n input_ops = {'X': cast(Operator, x), 'W': cast(Operator, w)}\n\n Conv(\n 'conv_under_test',\n [1, 3, 3, 3],\n Float32(),\n input_ops,\n pads=[1, 2, 1, 2],\n strides=[2, 2]\n )\n\n print(\"Consistency test for conv operator passed!\")", "def deconvolution_cce(shape_filter, shape_x, # pylint: disable=R0913, R0914\n input_sizes, strides, pads, dilations=(1, 1, 1, 1),\n filter_dtype='float16', x_dtype='float16',\n res_dtype='float16', bias=False, offset_x=0,\n fusion_para=None,\n kernel_name=\"deconvolution_cce\"):\n\n def _ceil(x_1, x_2):\n if x_2 == 0:\n raise RuntimeError(\"Division by zero\")\n return (x_1 + x_2 - 1) // x_2\n if fusion_para is None:\n fusion_para = {\"input_memory_type\": 0,\n \"output_memory_type\": 0,\n \"valid_shape\": (),\n \"slice_offset\": (),\n \"output_offset\": (),\n \"l1_fusion_type\": -1,\n \"fmap_l1_addr_flag\": False,\n \"fmap_l1_valid_size\": 0}\n\n if filter_dtype == \"int8\" and x_dtype == \"int8\":\n shape_filter = [shape_filter[1], shape_filter[0],\n shape_filter[2], shape_filter[3]]\n res = comm.check_conv2dbp_input_params(shape_filter, shape_x, input_sizes,\n strides, pads, dilations,\n filter_dtype, x_dtype,\n res_dtype, kernel_name, fusion_para)\n\n shape_filter, shape_x, input_sizes, strides, pads, dilations, \\\n filter_dtype, x_dtype, res_dtype, kernel_name = res\n\n dedy_batch, dedy_channel, dedy_h, dedy_w = shape_x\n filter_batch, filter_channel, filter_h, filter_w = shape_filter\n\n _, dy_k0, _ = CUBE_MKN[x_dtype]['mac']\n _, w_k0, w_n0 = CUBE_MKN[filter_dtype]['mac']\n shape_dedy = (dedy_batch,\n _ceil(dedy_channel, dy_k0), dedy_h, dedy_w, dy_k0)\n filter_channel = comm.align(filter_channel, w_n0)\n if filter_dtype == \"int8\" and x_dtype == \"int8\":\n shape_filter_frac = (\n _ceil(filter_batch, w_k0)*filter_h*filter_w,\n _ceil(filter_channel, w_n0), w_n0, w_k0)\n else:\n shape_filter_frac = (\n _ceil(filter_channel, w_n0)*filter_h*filter_w,\n _ceil(filter_batch, w_k0), w_k0, w_n0)\n tensor_dedy = tvm.placeholder(shape_dedy, name=\"dedy\", dtype=x_dtype)\n\n tensor_filter_frac = tvm.placeholder(shape_filter_frac,\n name=\"filter\", dtype=filter_dtype)\n\n if bias:\n tensor_bias = tvm.placeholder(\n (filter_channel,), name='tensor_bias', dtype=res_dtype\n )\n else:\n tensor_bias = None\n\n dedx = te.lang.cce.conv2d_backprop_input_compute(\n filters=tensor_filter_frac,\n out_backprop=tensor_dedy,\n filter_sizes=shape_filter,\n input_sizes=input_sizes,\n strides=strides,\n padding=pads,\n dilations=dilations,\n res_dtype=res_dtype,\n tensor_bias=tensor_bias,\n offset_x=offset_x,\n fusion_para=fusion_para,\n kernel_name=kernel_name\n )\n if bias:\n tensor_list = [tensor_dedy, tensor_filter_frac, tensor_bias, dedx]\n else:\n tensor_list = [tensor_dedy, tensor_filter_frac, dedx]\n\n with tvm.target.cce():\n sch = generic.auto_schedule(dedx)\n\n config = {\n \"name\": kernel_name,\n \"tensor_list\": tensor_list\n }\n\n te.lang.cce.cce_build_code(sch, config)", "def deconv(depth, nfilter, ksize=3, stride=1, \r\n pad_in=0, pad_out=0, groups=1,\r\n dilation=1, pad_mode='zeros',\r\n bias=True, lrelu=None):\r\n assert (depth>0 and nfilter>0 and ksize>0 and ksize%2==1 and \r\n stride>0 and pad_in>=0 and pad_out>=0 and dilation>=1 and\r\n groups>=1 and depth%groups==0 and nfilter%groups==0)\r\n deconv_ = nn.ConvTranspose2d(depth, nfilter, ksize, stride, \r\n pad_in, pad_out, groups, bias, dilation,\r\n pad_mode)\r\n if lrelu is not None:\r\n deconv_ = nn.Sequential(deconv_, \r\n nn.LeakyReLU(lrelu, inplace=True))\r\n return deconv_", "def test_DeformableConvolution():\n try:\n ctx = mx.gpu()\n _ = mx.nd.array([0], ctx=ctx)\n except mx.base.MXNetError:\n pytest.skip(\"deformable_convolution only supports GPU\")\n net = nn.HybridSequential()\n net.add(\n nn.DeformableConvolution(10, kernel_size=(3, 3), strides=1, padding=0),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, activation='relu',\n offset_use_bias=False, use_bias=False),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, activation='relu',\n offset_use_bias=False),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, activation='relu',\n use_bias=False),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, offset_use_bias=False, use_bias=False),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, offset_use_bias=False),\n nn.DeformableConvolution(12, kernel_size=(3, 2), strides=1, padding=0, use_bias=False),\n nn.DeformableConvolution(12, kernel_size=(3, 2), strides=1, padding=0, use_bias=False, num_deformable_group=4),\n )\n\n net.initialize(force_reinit=True, ctx=ctx)\n net.hybridize()\n\n x = mx.nd.random.uniform(shape=(8, 5, 30, 31), ctx=ctx)\n with mx.autograd.record():\n y = net(x)\n y.backward()", "def test_conv2d_fft(self):\n\t\t\n\t\tdetails = self.watcher.describe(layers=[self.first_layer], conv2d_fft=True)\n\t\tprint(details)\n\t\t\n\t\tdetails = self.watcher.analyze(layers=[self.first_layer], conv2d_fft=True)\n\t\tactual = details.alpha.to_numpy()[0]\n\t\texpected = 2.144\n\t\tself.assertAlmostEqual(actual,expected, delta=0.01)", "def test_conv_layer_forward():\n #Setup layer and inputs\n conv = BinConv2d(2,1, [2,2],stride=1, bias=False)\n conv.weight.data.copy_(torch.Tensor([ [0.5,- 0.5] , [-0.5, 0.5],\\\n [1,-1] , [0.5, 0.5],]).view(1,2,2,2))\n\n\n inputs = torch.Tensor([ [1.1,2.1],[15,.01],[1,0],[1.,1.0]] ).view(1,2,2,2)\n\n # Check the result\n result = conv(inputs)\n expected_result = torch.nn.functional.conv2d(inputs, torch.Tensor([ [sign(0.5),sign(-0.5)] , [sign(-0.5), sign(0.5)],\\\n [sign(1),sign(-1)] , [sign(0.5), sign(0.5)],]).view(1,2,2,2) ,None)\n\n assert torch.all(torch.eq(result, expected_result))\n\n\n # Redo the test with a bias\n conv2 = BinConv2d(2,1, [2,2],stride=1, bias=True)\n conv2.weight.data.copy_(torch.Tensor([ [0.5,- 0.5] , [-0.5, 0.5],\\\n [1,-1] , [0.5, 0.5],]).view(1,2,2,2))\n \n\n conv2.bias.data.copy_(torch.Tensor([33.3]))\n result2 = conv2(inputs)\n expected_result2 = torch.nn.functional.conv2d(inputs, torch.Tensor([ [sign(0.5),sign(-0.5)] , [sign(-0.5), sign(0.5)],\\\n [sign(1),sign(-1)] , [sign(0.5), sign(0.5)],]).view(1,2,2,2) ,torch.Tensor([33.3]))\n\n assert torch.all(torch.eq(result2, expected_result2))", "def test_conv_layer_train():\n\n #Setup layer and input\n weight = torch.Tensor([ [0.5,- 0.5] , [-0.5, 0.5],\\\n [1,-1] , [0.5, 0.5],]).view(1,2,2,2)\n inputs = torch.Tensor([ [1.1,2.1],[15,.01],[1,0],[1.,1.0]] ).view(1,2,2,2)\n\n expected_result = torch.nn.functional.conv2d(inputs, torch.Tensor([ [sign(0.5),sign(-0.5)] , [sign(-0.5), sign(0.5)],\\\n [sign(1),sign(-1)] , [sign(0.5), sign(0.5)],]).view(1,2,2,2) ,None)\n\n\n conv = BinConv2d(2,1, [2,2],stride=1, bias=False)\n conv.weight.data.copy_(weight)\n\n # Check the forward pass\n assert torch.all(torch.eq(expected_result, conv(inputs)))\n\n \n conv.train(False)\n # Check the forward pass\n assert torch.all(torch.eq(expected_result, conv(inputs)))\n\n # Look at layer's weight on eval mode.\n assert torch.all(torch.eq(conv.weight, BinaryConnectDeterministic.apply(weight)))\n\n conv.train(True)\n # Check the forward pass\n assert torch.all(torch.eq(expected_result, conv(inputs)))\n\n # Look at layer's weight on training mode.\n assert torch.all(torch.eq(conv.weight, weight))", "def test_convTranpose2d(self, _, module, inputs, filters, bias=None):\n\n utils.compare_tracing_methods(\n module, inputs, filters, fusible_ops={\"aten::_convolution\"}\n )", "def __init__(self, incoming, W=None, b=tf.zeros, ksize: int = None, num_outputs: int = None,\n weight_initializer=None, a=tf.nn.elu, output_shape=None, strides=(1, 2, 2, 1), padding='SAME',\n data_format='NHWC',\n name='DeConvLayer'):\n super(DeConvLayer, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n \n # Set init for W and b\n if all(p is not None for p in [weight_initializer, ksize, num_outputs]):\n W = tofov(weight_initializer,\n shape=(ksize, ksize, num_outputs, incoming.get_output_shape()[-1]),\n var_params=dict(name='W_deconv'))\n else:\n W = tofov(W, shape=None, var_params=dict(name='W_deconv'))\n b = tofov(b, shape=W.get_shape().as_list()[-2], var_params=dict(name='b_deconv'))\n \n if output_shape is None:\n if padding == 'SAME' and strides[0] == 1:\n if len(self.incoming_shape) == 5:\n output_shape = [self.incoming_shape[0], self.incoming_shape[1],\n self.incoming_shape[2] * strides[1], self.incoming_shape[3] * strides[2],\n W.get_shape().as_list()[-2] * strides[3]]\n else:\n output_shape = [self.incoming_shape[0], self.incoming_shape[1] * strides[1],\n self.incoming_shape[2] * strides[2], W.get_shape().as_list()[-2] * strides[3]]\n else:\n raise AttributeError(\"Automatic output_shape calculation not implemented for strides!=1 in \"\n \"first dimension\")\n \n if isinstance(padding, int):\n if len(self.incoming_shape) == 5:\n self.padding = [[0, 0], [0, 0], [padding, padding], [padding, padding], [0, 0]]\n elif len(self.incoming_shape) == 4:\n self.padding = [[0, 0], [padding, padding], [padding, padding], [0, 0]]\n else:\n raise ValueError(\"invalid input shape\")\n else:\n self.padding = padding\n \n self.a = a\n self.b = b\n self.W = W\n \n self.output_shape = output_shape\n self.strides = strides\n \n self.data_format = data_format\n \n self.out = None\n self.name = name", "def inference_fconv(input_shape=[None, 22,22,10,1],\n input_shape_m=[None, 22,22,10,3],\n n_filters=[1, 32, 32, 32],\n filter_sizes=[3, 2, 3, 2],\n corruption=False):\n \n # input to the network\n x = tf.placeholder(\n tf.float32, input_shape, name='x')\n m = tf.placeholder(\n tf.float32, input_shape_m, name='m')\n t = tf.placeholder(\n tf.float32, input_shape, name='t')\n keep_prob = tf.placeholder(tf.float32, name='keep_prob') #dropout (keep probability)\n \n encoder_i = []\n encoder_m = []\n encoder_main = []\n shapes_main = []\n shapes_i = []\n shapes_m = []\n \n #keep_prob=1.\n ### BRANCH 3d images\n \n with tf.variable_scope('img_conv1_1') as scope:\n shapes_i.append(x.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([3, 3, 3, input_shape[4], nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(x, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n img_1 = output\n \n #current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n ### BRANCH motion parameters\n with tf.variable_scope('motion_conv1_1') as scope:\n shapes_m.append(m.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([3, 3, 3, input_shape_m[4], nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(m, W) + b)\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n motion_1 = output\n \n \n current_input = tf.multiply(img_1,motion_1)\n \n with tf.variable_scope('img_conv1_3') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 256\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n img_2 = output\n \n\n \n #current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n # Max pooling\n motion_1 = max_pool_2x2(motion_1)\n input_nfeaturemap = 128\n \n with tf.variable_scope('motion_conv1_3') as scope:\n shapes_m.append(motion_1.get_shape().as_list())\n nfeaturemap = 256\n W = weight_variable([2, 2, 2, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(motion_1, W) + b)\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n motion_2 = output\n \n \n # resize upsampling\n motion_2 = resize_volumes(motion_2, 2, 2, 2)\n \n #current_input = tf.concat([branch_image, branch_motion], axis=4)\n #input_nfeaturemap = 512\n current_input = tf.multiply(img_2,motion_2)\n input_nfeaturemap = 256\n #print tf.shape(current_input)[-1]\n #tf.shape(current_input)[-1]\n '''\n with tf.variable_scope('img_conv1_1') as scope:\n shapes_i.append(x.get_shape().as_list())\n nfeaturemap = 256\n W = weight_variable([3, 3, 3, 1, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(x, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n \n \n with tf.variable_scope('img_conv1_3') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n branch_image = current_input\n\n ### BRANCH motion parameters\n with tf.variable_scope('motion_conv1_1') as scope:\n shapes_m.append(m.get_shape().as_list())\n nfeaturemap = 64\n W = weight_variable([3, 3, 3, 3, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(m, W) + b)\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n \n #current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n \n with tf.variable_scope('motion_conv1_3') as scope:\n shapes_m.append(current_input.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n\n branch_motion = current_input\n \n #current_input = tf.concat([branch_image, branch_motion], axis=4)\n #input_nfeaturemap = 256\n current_input = tf.multiply(branch_image,branch_motion)\n #print tf.shape(current_input)[-1]\n #tf.shape(current_input)[-1]\n '''\n with tf.variable_scope('conv3_1') as scope:\n shapes_main.append(current_input.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_main.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n # Max pooling\n #current_input = max_pool_2x2(current_input)\n \n with tf.variable_scope('conv3_2') as scope:\n shapes_main.append(current_input.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([2, 2, 2, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_main.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n \n # store the latent representation\n z = current_input\n z_input_nfeaturemap = input_nfeaturemap\n '''\n encoder_main.reverse()\n encoder_i.reverse()\n encoder_m.reverse()\n \n shapes_main.reverse()\n shapes_i.reverse()\n shapes_m.reverse()\n '''\n \n with tf.variable_scope('deconv_i_1') as scope:\n shapes_i.append(z.get_shape().as_list())\n nfeaturemap = 16\n W = weight_variable([3, 3, 3, z_input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(z, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n #current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n \n with tf.variable_scope('deconv_i_2') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 1\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = conv3d(current_input, W) + b\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n y = output\n \n \n with tf.variable_scope('deconv_m_1') as scope:\n shapes_i.append(z.get_shape().as_list())\n nfeaturemap = 32\n W = weight_variable([3, 3, 3, z_input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(z, W) + b)\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n with tf.variable_scope('deconv_m_2') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 3\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = conv3d(current_input, W) + b\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n m_hat = output\n \n \n \n \n loss_m = tf.reduce_mean(tf.square(m-m_hat))\n loss_i = tf.reduce_mean(tf.square(t-y))\n cost = loss_i + loss_m\n\n # %%\n return {'x': x, 't':t, 'm': m, 'm_hat':m_hat, 'y': y, 'cost': cost, 'loss_i':loss_i, 'loss_m':loss_m, 'keep_prob': keep_prob, 'encoder_main':encoder_main, 'encoder_i':encoder_i, 'encoder_m':encoder_m}", "def test_filter_conv2D_layer_types(self):\n\t\tprint(f\"LAYER_TYPE.CONV2D = {LAYER_TYPE.CONV2D}\")\n\t\tdetails = self.watcher.describe(layers=[LAYER_TYPE.CONV2D])\n\t\tprint(details)\n\n\t\tconv2DLayers = details[details['layer_type']==str(ww.LAYER_TYPE.CONV2D)]\n\t\tconv2DCount = len(conv2DLayers)\n\t\tself.assertEqual(conv2DCount, 8, \"# conv2D layers: {} found\".format(conv2DCount))\n\t\tnonConv2DLayers = details[details['layer_type']!=str(LAYER_TYPE.CONV2D)]\n\t\tnonConv2DCount = len(nonConv2DLayers)\n\t\tself.assertEqual(nonConv2DCount, 0, \"VGG11 has non conv2D layers: {} found\".format(nonConv2DCount))", "def test_basic(self):\r\n if (not theano.tensor.nnet.conv.imported_scipy_signal and\r\n theano.config.cxx == \"\"):\r\n raise SkipTest(\"conv2d tests need SciPy or a c++ compiler\")\r\n\r\n self.validate((1, 4, 5), (2, 2, 3), verify_grad=True)\r\n self.validate((7, 5), (5, 2, 3), verify_grad=False)\r\n self.validate((3, 7, 5), (2, 3), verify_grad=False)\r\n self.validate((7, 5), (2, 3), verify_grad=False)", "def inference_fconv_supercompact(input_shape=[None, 22,22,10,1],\n input_shape_m=[None, 22,22,10,3],\n n_filters=[1, 32, 32, 32],\n filter_sizes=[3, 2, 3, 2],\n corruption=False):\n \n # input to the network\n x = tf.placeholder(\n tf.float32, input_shape, name='x')\n m = tf.placeholder(\n tf.float32, input_shape_m, name='m')\n t = tf.placeholder(\n tf.float32, input_shape, name='t')\n keep_prob = tf.placeholder(tf.float32, name='keep_prob') #dropout (keep probability)\n \n encoder_i = []\n encoder_m = []\n encoder_main = []\n shapes_main = []\n shapes_i = []\n shapes_m = []\n \n #keep_prob=1.\n ### BRANCH 3d images\n with tf.variable_scope('img_conv1_1') as scope:\n shapes_i.append(x.get_shape().as_list())\n nfeaturemap = 256\n W = weight_variable([3, 3, 3, 1, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(x, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n \n with tf.variable_scope('img_conv1_2') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n \n branch_image = current_input\n\n ### BRANCH motion parameters\n with tf.variable_scope('motion_conv1_1') as scope:\n shapes_m.append(m.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([3, 3, 3, 3, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(m, W) + b)\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n \n #current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n\n branch_motion = current_input\n \n #current_input = tf.concat([branch_image, branch_motion], axis=4)\n #input_nfeaturemap = 256\n current_input = tf.multiply(branch_image,branch_motion)\n #print tf.shape(current_input)[-1]\n #tf.shape(current_input)[-1]\n \n with tf.variable_scope('conv3_1') as scope:\n shapes_main.append(current_input.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_main.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n \n\n # store the latent representation\n z = current_input\n z_input_nfeaturemap = input_nfeaturemap\n '''\n encoder_main.reverse()\n encoder_i.reverse()\n encoder_m.reverse()\n \n shapes_main.reverse()\n shapes_i.reverse()\n shapes_m.reverse()\n '''\n \n \n \n #current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n \n with tf.variable_scope('deconv_i_2') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 1\n W = weight_variable([1, 1, 1, z_input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = conv3d(z, W) + b\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n y = output\n \n \n with tf.variable_scope('deconv_m_2') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 3\n W = weight_variable([1, 1, 1, z_input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = conv3d(z, W) + b\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n m_hat = output\n \n \n \n \n loss_m = tf.reduce_mean(tf.square(m-m_hat))\n loss_i = tf.reduce_mean(tf.square(t-y))\n cost = loss_i + loss_m\n\n # %%\n return {'x': x, 't':t, 'm': m, 'm_hat':m_hat, 'y': y, 'cost': cost, 'loss_i':loss_i, 'loss_m':loss_m, 'keep_prob': keep_prob, 'encoder_main':encoder_main, 'encoder_i':encoder_i, 'encoder_m':encoder_m}", "def resnet50_v1b_deconv(**kwargs):\n return get_deconv_resnet('resnet50_v1b', **kwargs)", "def deconvolution_fusion_para(x, y):\n input_memory_type = x.get(\"addr_type\") \\\n if \"addr_type\" in x else 0\n output_memory_type = y.get(\"addr_type\") \\\n if \"addr_type\" in y else 0\n valid_shape = x.get(\"valid_shape\") \\\n if \"valid_shape\" in x else ()\n slice_offset = x.get(\"slice_offset\") \\\n if \"slice_offset\" in x else ()\n output_offset = y.get(\"slice_offset\") \\\n if \"slice_offset\" in y else ()\n l1_fusion_type = x.get(\"L1_fusion_type\") \\\n if \"L1_fusion_type\" in x else -1\n fmap_l1_addr_flag = x.get(\"L1_addr_flag\", False)\n fmap_l1_valid_size = x.get(\"L1_valid_size\", 0)\n\n\n\n l1_fusion_enable_flag = get_L1_info(\"L1_fusion_enabled\")\n\n if input_memory_type not in (0, 1, 2):\n args_dict = {\n \"errCode\": \"E65008\",\n \"input_memory_type_range\": \"(0, 1, 2)\",\n \"input_memory_type\": str(input_memory_type)\n }\n raise RuntimeError(args_dict,\n err_man.get_error_message(args_dict))\n if output_memory_type not in (0, 1, 2):\n args_dict = {\n \"errCode\": \"E65009\",\n \"output_memory_type_range\": \"(0, 1, 2)\",\n \"output_memory_type\": str(output_memory_type)\n }\n raise RuntimeError(args_dict,\n err_man.get_error_message(args_dict))\n if valid_shape and not slice_offset:\n reason = \"valid shape exists, slice shape cannot be []\"\n args_dict = {\n \"errCode\": \"E60108\",\n \"reason\": reason\n }\n raise RuntimeError(args_dict,\n err_man.get_error_message(args_dict))\n if valid_shape and not output_offset:\n reason = \"valid shape exists, output offset cannot be []\"\n args_dict = {\n \"errCode\": \"E60108\",\n \"reason\": reason\n }\n raise RuntimeError(args_dict,\n err_man.get_error_message(args_dict))\n\n valid_shape = shape_to_list(valid_shape)\n slice_offset = shape_to_list(slice_offset)\n output_offset = shape_to_list(output_offset)\n\n if not l1_fusion_enable_flag:\n input_memory_type = 0\n output_memory_type = 0\n valid_shape = []\n slice_offset = []\n output_offset = []\n l1_fusion_type = -1\n fmap_l1_addr_flag = False\n fmap_l1_valid_size = 0\n\n fusion_para = {\"input_memory_type\": input_memory_type,\n \"output_memory_type\": output_memory_type,\n \"valid_shape\": valid_shape,\n \"slice_offset\": slice_offset,\n \"output_offset\": output_offset,\n \"l1_fusion_type\": l1_fusion_type,\n \"fmap_l1_addr_flag\": fmap_l1_addr_flag,\n \"fmap_l1_valid_size\": fmap_l1_valid_size}\n\n return fusion_para", "def deconv_layer(self, inputs, field_size, channels_size,\n initializer_type, name, act_func=tf.nn.relu):\n batch, height, width, in_channels = inputs.get_shape().as_list()\n #shape = tf.shape(inputs)\n assert in_channels == channels_size[0], (\n 'Number of input channels doe not match filter inputs channels.'\n )\n with tf.variable_scope(name):\n channels_size.reverse() # now [out_c, in_c]\n filter_size = field_size + channels_size\n bias_size = [channels_size[0]]\n\n if initializer_type:\n initializer = tf.contrib.layers.xavier_initializer()\n else:\n initializer = tf.truncated_normal_initializer(stddev=.1)\n\n weights = tf.get_variable('W', filter_size, initializer=initializer)\n biases = tf.get_variable(\n 'b', bias_size, initializer=tf.constant_initializer(.1))\n\n #target_shape_tensor = tf.stack([shape[0], height, width, channels_size[0]])\n conv = tf.nn.conv2d_transpose(\n inputs,\n weights,\n #target_shape_tensor,\n [batch, height, width, channels_size[0]],\n [1, 1, 1, 1],\n padding='SAME')\n conv_bias = tf.nn.bias_add(conv, biases)\n if act_func == None:\n output = conv_bias\n else:\n output = act_func(conv_bias)\n #set_shape does not accept tensor\n #output.set_shape([batch, height, width, channels_size[0]])\n #this sets first size to none. why? Not used.\n #output = tf.reshape(output, target_shape_tensor)\n\n return output", "def test_convolution():\n # Default test\n inputs_shape = [3,3,4,5,3]\n filters_shape = [3,1,4,4,3]\n test_convolution_for_parameters(inputs_shape, filters_shape,\n \"Default test\")\n # All dimensions 1\n inputs_shape = [1,1,1,1,1]\n filters_shape = [1,1,1,1,1]\n test_convolution_for_parameters(inputs_shape, filters_shape,\n \"Input and filter dimensions 1\")\n # Filter spans all dimensions\n # This will lead to a failure for theano 2d3d for some reason\n # (for now we ignore this and remove theano2d3d for this test\n inputs_shape = [3,3,4,5,3]\n filters_shape = [3,3,4,5,3]\n test_convolution_for_parameters(inputs_shape, filters_shape,\n \"Filter dimension = Input dimension\")\n # Filter smaller for all dimensions\n inputs_shape = [3,3,4,5,3]\n filters_shape = [3,2,2,2,3]\n test_convolution_for_parameters(inputs_shape, filters_shape, \n \"Filter dimension < all Input dimension\")\n # 1,1,1,1,1 filter\n # Filter smaller for all dimensions\n inputs_shape = [3,3,4,5,1]\n filters_shape = [3,1,1,1,1]\n test_convolution_for_parameters(inputs_shape, filters_shape, \n \"Filter dimension 1 everywhere\")", "def regi_net_core_deconv(vol_size, enc_nf, dec_nf, full_size=True, src=None, tgt=None, src_feats=1, tgt_feats=1):\n ndims = len(vol_size)\n assert ndims in [1, 2, 3], \"ndims should be one of 1, 2, or 3. found: %d\" % ndims\n deconv_layer = getattr(KL, 'DeConv%dD' % ndims)\n\n # inputs\n if src is None:\n src = Input(shape=[*vol_size, src_feats])\n if tgt is None:\n tgt = Input(shape=[*vol_size, tgt_feats])\n x_in = concatenate([src, tgt])\n \n\n # down-sample path (encoder)\n x_enc = [x_in]\n for i in range(len(enc_nf)):\n x_enc.append(conv_block(x_enc[-1], enc_nf[i], 2))\n\n # transform the results into a flow field.\n Conv = getattr(KL, 'Conv%dD' % ndims)\n # up-sample path (decoder)\n x = conv_block(x_enc[-1], dec_nf[0])\n# flow0 = Conv(ndims, kernel_size=3, padding='same', name='flow0',\n# kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(x)\n x = deconv_layer()(x)\n x = concatenate([x, x_enc[-2]])\n# flowU = upsample_layer()(flow0)\n# x = nrn_layers.SpatialTransformer(interp_method='linear', indexing='ij')([x, flowU])\n x = conv_block(x, dec_nf[1])\n flow1 = Conv(ndims, kernel_size=3, padding='same', name='flow1',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(x)\n x = deconv_layer()(x)\n x = concatenate([x, x_enc[-3]])\n# flowU = upsample_layer()(flow1)\n# x = nrn_layers.SpatialTransformer(interp_method='linear', indexing='ij')([x, flowU])\n x = conv_block(x, dec_nf[2])\n flow2 = Conv(ndims, kernel_size=3, padding='same', name='flow2',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(x)\n x = deconv_layer()(x)\n x = concatenate([x, x_enc[-4]])\n# flowU = upsample_layer()(flow2)\n# x = nrn_layers.SpatialTransformer(interp_method='linear', indexing='ij')([x, flowU])\n x = conv_block(x, dec_nf[3])\n x = conv_block(x, dec_nf[4])\n flow3 = Conv(ndims, kernel_size=3, padding='same', name='flow3',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(x)\n \n # only upsampleto full dim if full_size\n # here we explore architectures where we essentially work with flow fields \n # that are 1/2 size \n if full_size:\n x = deconv_layer()(x)\n# flowU = upsample_layer()(flow3)\n# x = nrn_layers.SpatialTransformer(interp_method='linear', indexing='ij')([x, flowU])\n x = concatenate([x, x_enc[0]])\n x = conv_block(x, dec_nf[5])\n\n # optional convolution at output resolution (used in voxelmorph-2)\n if len(dec_nf) == 7:\n x = conv_block(x, dec_nf[6])\n \n flow4 = Conv(ndims, kernel_size=3, padding='same', name='flow4',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(x)\n\n # warp the source with the flow\n\n return Model(inputs=[src, tgt], outputs=[flow1,flow2,flow3,flow4])", "def test_positional_convolution_backward():\n i = 1\n for num_batch in [1, 2, 4]:\n for num_channel in [4, 8, 12]:\n for input_height, input_width in itertools.product([10, 12, 18], [10, 12, 18]):\n for num_filter in [2, 4, 8]:\n for kernel in [(3, 3), (2, 2)]:\n for stride in [(1, 1), (2, 2)]:\n for pad in [(0, 0), (1, 1)]:\n for dilate in [(1, 1), (2, 2)]:\n # for num_group in [1, 2, 4]:\n grad_nodes = ['im_data', 'scale_data', 'weight', 'bias']\n output_height = np.floor(\n (input_height + 2 * pad[0] - dilate[0] * (kernel[0] - 1) - 1) * 1.0 / stride[0]\n ) + 1\n output_width = np.floor(\n (input_width + 2 * pad[1] - dilate[1] * (kernel[1] - 1) - 1) * 1.0 / stride[1]\n ) + 1\n im_data = np.random.rand(num_batch, num_channel, input_height, input_width)\n scale_data = \\\n np.random.rand(num_batch, num_channel, int(output_height), int(output_width))\\\n * 0.8 + 0.1\n\n weight = np.random.normal(0, 0.001, (num_filter, num_channel, kernel[0], kernel[1]))\n bias = np.random.rand(num_filter)\n\n im_data_var = mx.symbol.Variable(name=\"im_data\")\n scale_data_var = mx.symbol.Variable(name=\"scale_data\")\n weight_var = mx.symbol.Variable(name=\"weight\")\n bias_var = mx.symbol.Variable(name=\"bias\")\n op = mx.sym.contrib.PositionalConvolution(name='test_op',\n data=im_data_var,\n scale=scale_data_var,\n weight=weight_var,\n bias=bias_var,\n num_filter=num_filter,\n kernel=kernel, stride=stride, pad=pad,\n dilate=dilate\n )\n rtol, atol = 1e-4, 1e-3\n # absolute(a - b) <= (atol + rtol * absolute(b))\n check_numeric_gradient(op, [im_data, scale_data, weight, bias], rtol=rtol,\n atol=atol, grad_nodes=grad_nodes, ctx=mx.gpu(0))\n print(\"check numeric gradient successfully for the {} times\".format(i))\n i += 1", "def deconv2d(layer_input, skip_input, filters, f_size=3, dropout_rate=0, padding='same', strides=2):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(filters, kernel_size=f_size, strides=strides, padding=padding, activation='relu')(u)\n if dropout_rate:\n u = Dropout(dropout_rate)(u)\n u = BatchNormalization()(u)\n u = UpSampling2D(size=2)(u)\n u = Concatenate()([u, skip_input])\n return u", "def deconv2d(layer_input, filters, f_size=8, dropout_rate=0,permanent=False):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)\n if dropout_rate and not permanent:\n u = Dropout(dropout_rate)(u)\n elif dropout_rate and permanent:\n # permanent droput from my main man fchollet <3\n u=Lambda(lambda x: K.dropout(x, level=dropout_rate))(u) \n \n u = BatchNormalization(momentum=0.8)(u)\n return u", "def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)\n if dropout_rate:\n u = Dropout(dropout_rate)(u)\n u = BatchNormalization(momentum=0.8)(u)\n u = Concatenate()([u, skip_input])\n return u", "def inference_fconv_small12(input_shape=[None, 22,22,10,1],\n input_shape_m=[None, 22,22,10,3],\n n_filters=[1, 32, 32, 32],\n filter_sizes=[3, 2, 3, 2],\n corruption=False):\n \n # input to the network\n x = tf.placeholder(\n tf.float32, input_shape, name='x')\n m = tf.placeholder(\n tf.float32, input_shape_m, name='m')\n t = tf.placeholder(\n tf.float32, input_shape, name='t')\n keep_prob = tf.placeholder(tf.float32, name='keep_prob') #dropout (keep probability)\n \n encoder_i = []\n encoder_m = []\n encoder_main = []\n shapes_main = []\n shapes_i = []\n shapes_m = []\n \n #keep_prob=1.\n ### BRANCH 3d images\n with tf.variable_scope('img_conv1_1') as scope:\n shapes_i.append(x.get_shape().as_list())\n nfeaturemap = 256\n W = weight_variable([3, 3, 3, 1, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(x, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n \n \n with tf.variable_scope('img_conv1_3') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n branch_image = current_input\n '''\n ### BRANCH motion parameters\n with tf.variable_scope('motion_conv1_1') as scope:\n shapes_m.append(m.get_shape().as_list())\n nfeaturemap = 64\n W = weight_variable([3, 3, 3, 3, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(m, W) + b)\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n \n #current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n \n with tf.variable_scope('motion_conv1_3') as scope:\n shapes_m.append(current_input.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n\n branch_motion = current_input\n \n #current_input = tf.concat([branch_image, branch_motion], axis=4)\n #input_nfeaturemap = 128\n current_input = tf.multiply(branch_image,branch_motion)\n #print tf.shape(current_input)[-1]\n #tf.shape(current_input)[-1]\n \n '''\n \n with tf.variable_scope('conv3_1') as scope:\n shapes_main.append(current_input.get_shape().as_list())\n nfeaturemap = 16\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(branch_image, W) + b)\n encoder_main.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n # Max pooling\n #current_input = max_pool_2x2(current_input)\n #''' \n with tf.variable_scope('conv3_2') as scope:\n shapes_main.append(current_input.get_shape().as_list())\n nfeaturemap = 16\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_main.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n \n # store the latent representation\n z = current_input\n z_input_nfeaturemap = input_nfeaturemap\n '''\n encoder_main.reverse()\n encoder_i.reverse()\n encoder_m.reverse()\n \n shapes_main.reverse()\n shapes_i.reverse()\n shapes_m.reverse()\n '''\n \n with tf.variable_scope('deconv_i_1') as scope:\n shapes_i.append(z.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([3, 3, 3, z_input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(z, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n #current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n \n with tf.variable_scope('deconv_i_2') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 1\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = conv3d(current_input, W) + b\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n y = output\n \n \n with tf.variable_scope('deconv_m_1') as scope:\n shapes_i.append(z.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([3, 3, 3, z_input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(z, W) + b)\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n with tf.variable_scope('deconv_m_2') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 3\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = conv3d(current_input, W) + b\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n m_hat = output\n \n \n loss_m = tf.reduce_mean(tf.square(m-m_hat))\n loss_i = tf.reduce_mean(tf.square(t-y))\n cost = loss_i + loss_m\n\n # %%\n return {'x': x, 't':t, 'm': m, 'm_hat':m_hat, 'y': y, 'cost': cost, 'loss_i':loss_i, 'loss_m':loss_m, 'keep_prob': keep_prob, 'encoder_main':encoder_main, 'encoder_i':encoder_i, 'encoder_m':encoder_m}", "def test_deconvolve_symmetric(self):\n tau = 50.0\n tau_deconv1 = 5.0\n tau_deconv2 = 20.0\n mrate = 50.0\n Mrate = 100.0\n\n tmax = 60.0\n dt = 0.1\n\n self.rule.tau = tau\n self.rule.min_rate = mrate\n self.rule.max_rate = Mrate\n self.rule.compress_rates = False\n self.rule.tau_deconv1 = tau_deconv1\n self.rule.tau_deconv2 = tau_deconv2\n\n self.motor.error_fct = lambda t: 2*np.sin(0.123 + t/15.0)*np.ones(self.Nsrc)\n\n M1 = simulation.StateMonitor(self.rule, 'out')\n\n sim1 = simulation.Simulation(self.source, self.motor, self.rule, M1, dt=dt)\n sim1.run(tmax)\n\n self.rule.tau_deconv1 = tau_deconv2\n self.rule.tau_deconv2 = tau_deconv1\n\n M2 = simulation.StateMonitor(self.rule, 'out')\n\n sim2 = simulation.Simulation(self.source, self.motor, self.rule, M2, dt=dt)\n sim2.run(tmax)\n\n self.assertTrue(np.allclose(M1.out, M2.out))", "def inference_fconv_small(alpha=1.,input_shape=[None, 22,22,10,1],\n input_shape_m=[None, 22,22,10,3],\n n_filters=[1, 32, 32, 32],\n filter_sizes=[3, 2, 3, 2],\n corruption=False):\n \n # input to the network\n x = tf.placeholder(\n tf.float32, input_shape, name='x')\n m = tf.placeholder(\n tf.float32, input_shape_m, name='m')\n t = tf.placeholder(\n tf.float32, input_shape, name='t')\n keep_prob = tf.placeholder(tf.float32, name='keep_prob') #dropout (keep probability)\n \n encoder_i = []\n encoder_m = []\n encoder_main = []\n shapes_main = []\n shapes_i = []\n shapes_m = []\n \n #keep_prob=1.\n ### BRANCH 3d images\n with tf.variable_scope('img_conv1_1') as scope:\n shapes_i.append(x.get_shape().as_list())\n nfeaturemap = 32\n W = weight_variable([2, 2, 2, 1, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(x, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n #current_input = max_pool_2x2(current_input)\n input_nfeaturemap = 32\n \n with tf.variable_scope('img_conv1_2') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 32\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n \n with tf.variable_scope('img_conv1_3') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 1\n W = weight_variable([2, 2, 2, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n \n # resize upsampling\n #current_input = resize_volumes(current_input, 2, 2, 2) \n \n branch_image = current_input\n\n ### BRANCH motion parameters\n with tf.variable_scope('motion_conv1_1') as scope:\n shapes_m.append(m.get_shape().as_list())\n nfeaturemap = 64\n W = weight_variable([3, 3, 3, 3, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(m, W) + b)\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n \n #current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n \n with tf.variable_scope('motion_conv1_3') as scope:\n shapes_m.append(current_input.get_shape().as_list())\n nfeaturemap = 1\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n\n branch_motion = current_input\n \n #current_input = tf.concat([branch_image, branch_motion], axis=4)\n #input_nfeaturemap = 128\n current_input = tf.multiply(branch_image,branch_motion)\n #print tf.shape(current_input)[-1]\n #tf.shape(current_input)[-1]\n #current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n \n \n \n with tf.variable_scope('conv3_1') as scope:\n shapes_main.append(current_input.get_shape().as_list())\n nfeaturemap = 16\n W = weight_variable([3, 3, 3, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_main.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n # Max pooling\n #current_input = max_pool_2x2(current_input)\n #''' \n with tf.variable_scope('conv3_2') as scope:\n shapes_main.append(current_input.get_shape().as_list())\n nfeaturemap = 16\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_main.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n \n \n \n # store the latent representation\n z = current_input\n z_input_nfeaturemap = input_nfeaturemap\n '''\n encoder_main.reverse()\n encoder_i.reverse()\n encoder_m.reverse()\n \n shapes_main.reverse()\n shapes_i.reverse()\n shapes_m.reverse()\n '''\n \n with tf.variable_scope('deconv_i_1') as scope:\n shapes_i.append(z.get_shape().as_list())\n nfeaturemap = 64\n W = weight_variable([3, 3, 3, z_input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(z, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n #current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n \n with tf.variable_scope('deconv_i_2') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 1\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = conv3d(current_input, W) + b\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n y = output\n \n \n with tf.variable_scope('deconv_m_1') as scope:\n shapes_i.append(z.get_shape().as_list())\n nfeaturemap = 64\n W = weight_variable([3, 3, 3, z_input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(z, W) + b)\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n with tf.variable_scope('deconv_m_2') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 3\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = conv3d(current_input, W) + b\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n m_hat = output\n \n \n \n \n loss_m = tf.reduce_mean(tf.square(m-m_hat))\n loss_i = tf.reduce_mean(tf.square(t-y))\n cost = alpha*loss_i + loss_m\n\n # %%\n return {'x': x, 't':t, 'm': m, 'm_hat':m_hat, 'y': y, 'cost': cost, 'loss_i':loss_i, 'loss_m':loss_m, 'keep_prob': keep_prob, 'encoder_main':encoder_main, 'encoder_i':encoder_i, 'encoder_m':encoder_m}" ]
[ "0.650111", "0.6170416", "0.6147471", "0.60740656", "0.5956182", "0.5948169", "0.5885975", "0.5847327", "0.58391535", "0.58207595", "0.5816805", "0.58126575", "0.57973516", "0.5785438", "0.57493794", "0.57127476", "0.5711217", "0.56995", "0.569624", "0.5642262", "0.56382316", "0.56312335", "0.5593715", "0.5587668", "0.5577996", "0.55777556", "0.55677754", "0.5550887", "0.55380505", "0.5532863" ]
0.6862476
0
Test that conv and deconv are inverse operations given the same parameters
def test_conv_inverts_deconv(transformer_factory, input_size, filter_size, padding, stride): # convolutions whose output size are not an even multiple of stride cannot be exactly inverted a = (input_size + sum(padding) - filter_size) % stride conv_output = utils.conv_output_dim(input_size, filter_size, padding, stride) deconv_output = utils.deconv_output_dim(conv_output, filter_size, padding, stride) assert deconv_output == (input_size - a), ("Convolution and Deconvolution do not invert:\n" "output ({}) != input ({}) - a ({})\n" "filter: {}, padding: {}, stride: {}" ).format(deconv_output, input_size, a, filter_size, padding, stride)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_inverse_transform(self):", "def test__inverse_transform_continuous(self):", "def test_inverse_c(self):\n for q in self.all:\n self.assertTrue((q * q.inverse()).almost_equal(q.inverse()*q))", "def test_functional_inverse(self, dim):\n M = np.random.rand(dim, dim)\n assert np.all(M == symplectic.xxpp_to_xpxp(symplectic.xpxp_to_xxpp(M)))\n assert np.all(M == symplectic.xpxp_to_xxpp(symplectic.xxpp_to_xpxp(M)))\n\n v = np.random.rand(dim)\n assert np.all(v == symplectic.xxpp_to_xpxp(symplectic.xpxp_to_xxpp(v)))\n assert np.all(v == symplectic.xpxp_to_xxpp(symplectic.xxpp_to_xpxp(v)))", "def test_inv() -> None:\n ntests = 30\n for _ in range(ntests):\n # output_mat = np.array([[1, 2], [3, 4]])\n # filter_mat = np.array([[1, 2], [3, 4]])\n # theta = np.array([1, 2, 3, 4, 5])\n out_size = np.random.randint(1, 4, size=(2))\n filt_size = np.random.randint(1, 4, size=(2))\n theta_size = ((out_size[0] + filt_size[0] - 1)\n * (out_size[1] + filt_size[1] - 1)\n - out_size[0] * out_size[1])\n output_mat = np.random.rand(*out_size)\n filter_mat = np.random.rand(*filt_size)\n theta = np.random.randn(theta_size)\n input_mat = inv_conv2d(output_mat, filter_mat, theta)\n convolved = fwd_conv2d(input_mat, filter_mat).reshape(out_size)\n error = np.linalg.norm(convolved - output_mat)\n assert error < 0.05, 'error %f too large!\\n' \\\n 'output:\\n%s\\ntheta:\\n%s\\n' \\\n 'filt:\\n%s\\ninput:\\n%s' % (error,\n output_mat,\n theta,\n filter_mat,\n input_mat)", "def testInverted(self):\n invertedClass = xyTransformRegistry[\"inverted\"]\n invertedConfig = invertedClass.ConfigClass()\n affineClass = xyTransformRegistry[\"affine\"]\n invertedConfig.transform.retarget(affineClass)\n affineConfig = invertedConfig.transform\n affineConfig.translation = (1.2, -3.4)\n with lsst.utils.tests.getTempFilePath(\".py\") as filePath:\n self.checkConfig(invertedClass, invertedConfig, filePath)\n inverted = invertedClass(invertedConfig)\n self.checkBasics(inverted)\n for fromPoint in self.fromIter():\n toPoint = inverted.forwardTransform(fromPoint)\n predToPoint = fromPoint - \\\n Extent2D(*invertedConfig.transform.translation)\n for i in range(2):\n self.assertAlmostEqual(toPoint[i], predToPoint[i])", "def test_conv_layer_forward():\n #Setup layer and inputs\n conv = BinConv2d(2,1, [2,2],stride=1, bias=False)\n conv.weight.data.copy_(torch.Tensor([ [0.5,- 0.5] , [-0.5, 0.5],\\\n [1,-1] , [0.5, 0.5],]).view(1,2,2,2))\n\n\n inputs = torch.Tensor([ [1.1,2.1],[15,.01],[1,0],[1.,1.0]] ).view(1,2,2,2)\n\n # Check the result\n result = conv(inputs)\n expected_result = torch.nn.functional.conv2d(inputs, torch.Tensor([ [sign(0.5),sign(-0.5)] , [sign(-0.5), sign(0.5)],\\\n [sign(1),sign(-1)] , [sign(0.5), sign(0.5)],]).view(1,2,2,2) ,None)\n\n assert torch.all(torch.eq(result, expected_result))\n\n\n # Redo the test with a bias\n conv2 = BinConv2d(2,1, [2,2],stride=1, bias=True)\n conv2.weight.data.copy_(torch.Tensor([ [0.5,- 0.5] , [-0.5, 0.5],\\\n [1,-1] , [0.5, 0.5],]).view(1,2,2,2))\n \n\n conv2.bias.data.copy_(torch.Tensor([33.3]))\n result2 = conv2(inputs)\n expected_result2 = torch.nn.functional.conv2d(inputs, torch.Tensor([ [sign(0.5),sign(-0.5)] , [sign(-0.5), sign(0.5)],\\\n [sign(1),sign(-1)] , [sign(0.5), sign(0.5)],]).view(1,2,2,2) ,torch.Tensor([33.3]))\n\n assert torch.all(torch.eq(result2, expected_result2))", "def test_cinn_invertible_odd(self):\n\n out_single, out_single_J = self.cINN_odd(self.x_single_odd, self.y_single)\n out_batch, out_batch_J = self.cINN_odd(self.x_batch_odd, self.y_batch)\n\n rec_single = self.cINN_odd(out_single, self.y_single, inverse=True)\n rec_batch = self.cINN_odd(out_batch, self.y_batch, inverse=True)\n\n self.assertTrue(np.allclose(self.x_single_odd.numpy(), rec_single.numpy(), atol=1e-6),\n 'Could not invert ConditionalInvertibleBlock on a single instance with odd z')\n self.assertTrue(np.allclose(self.x_batch_odd.numpy(), rec_batch.numpy(), atol=1e-6),\n 'Could not invert ConditionalInvertibleBlock on batch instance with odd z')", "def test_cinn_invertible_odd(self):\n\n out_single, out_single_J = self.cINN_odd(self.x_single_odd, self.y_single)\n out_batch, out_batch_J = self.cINN_odd(self.x_batch_odd, self.y_batch)\n\n rec_single = self.cINN_odd(out_single, self.y_single, inverse=True)\n rec_batch = self.cINN_odd(out_batch, self.y_batch, inverse=True)\n\n self.assertTrue(np.allclose(self.x_single_odd.numpy(), rec_single.numpy(), atol=1e-6),\n 'Could not invert ConditionalInvertibleBlock on a single instance with odd z')\n self.assertTrue(np.allclose(self.x_batch_odd.numpy(), rec_batch.numpy(), atol=1e-6),\n 'Could not invert ConditionalInvertibleBlock on batch instance with odd z')", "def inverse(self, x, y):", "def test_inverse_by_num_1q(self):\n num_tests = 24\n for num in range(num_tests):\n cliff = CliffordUtils.clifford_1_qubit(num)\n clifford_expected = cliff.adjoint()\n clifford_from_num = CliffordUtils.clifford_1_qubit(inverse_1q(num))\n clifford_from_circuit = Clifford(cliff.to_circuit().inverse())\n self.assertEqual(clifford_expected, clifford_from_num)\n self.assertEqual(clifford_expected, clifford_from_circuit)", "def __invert(self, args):", "def test_cinn_invertible_even(self):\n\n out_single, out_single_J = self.cINN_even(self.x_single_even, self.y_single)\n out_batch, out_batch_J = self.cINN_even(self.x_batch_even, self.y_batch)\n\n rec_single = self.cINN_even(out_single, self.y_single, inverse=True)\n rec_batch = self.cINN_even(out_batch, self.y_batch, inverse=True)\n\n self.assertTrue(np.allclose(self.x_single_even.numpy(), rec_single.numpy(), atol=1e-6),\n 'Could not invert ConditionalInvertibleBlock on a single instance with even z')\n self.assertTrue(np.allclose(self.x_batch_even.numpy(), rec_batch.numpy(), atol=1e-6),\n 'Could not invert ConditionalInvertibleBlock on batch instance with even z')", "def test_cinn_invertible_even(self):\n\n out_single, out_single_J = self.cINN_even(self.x_single_even, self.y_single)\n out_batch, out_batch_J = self.cINN_even(self.x_batch_even, self.y_batch)\n\n rec_single = self.cINN_even(out_single, self.y_single, inverse=True)\n rec_batch = self.cINN_even(out_batch, self.y_batch, inverse=True)\n\n self.assertTrue(np.allclose(self.x_single_even.numpy(), rec_single.numpy(), atol=1e-6),\n 'Could not invert ConditionalInvertibleBlock on a single instance with even z')\n self.assertTrue(np.allclose(self.x_batch_even.numpy(), rec_batch.numpy(), atol=1e-6),\n 'Could not invert ConditionalInvertibleBlock on batch instance with even z')", "def test_inverse_of_linear_vector_transforms(free_alg: Drudge):\n\n dr = free_alg\n p = dr.names\n v = p.v\n\n a = Vec('a')\n b = Vec('b')\n\n defs = [\n dr.define(a, v + 1),\n dr.define(b, v - 1)\n ]\n res = dr.lvt_inv(defs)\n\n assert len(res) == 2\n half = Rational(1, 2)\n one_checked = False\n v_checked = False\n for i in res:\n if i.lhs == 1:\n assert (i - half * a + half * b).simplify() == 0\n one_checked = True\n elif i.lhs == v:\n assert (i - half * a - half * b).simplify() == 0\n v_checked = True\n else:\n assert False\n continue\n\n assert one_checked and v_checked", "def test_dinn_invertible_odd(self):\n\n out_single, out_single_J = self.dINN_odd(self.x_single_odd, self.y_single)\n out_batch, out_batch_J = self.dINN_odd(self.x_batch_odd, self.y_batch)\n\n rec_single = self.dINN_odd(out_single, self.y_single, inverse=True)\n rec_batch = self.dINN_odd(out_batch, self.y_batch, inverse=True)\n\n self.assertTrue(np.allclose(self.x_single_odd.numpy(), rec_single.numpy(), atol=1e-4),\n 'Could not invert DeepInvertibleModel on a single instance with odd z')\n self.assertTrue(np.allclose(self.x_batch_odd.numpy(), rec_batch.numpy(), atol=1e-4),\n 'Could not invert DeepInvertibleModel on batch instance with odd z')", "def test_dinn_invertible_odd(self):\n\n out_single, out_single_J = self.dINN_odd(self.x_single_odd, self.y_single)\n out_batch, out_batch_J = self.dINN_odd(self.x_batch_odd, self.y_batch)\n\n rec_single = self.dINN_odd(out_single, self.y_single, inverse=True)\n rec_batch = self.dINN_odd(out_batch, self.y_batch, inverse=True)\n\n self.assertTrue(np.allclose(self.x_single_odd.numpy(), rec_single.numpy(), atol=1e-4),\n 'Could not invert DeepInvertibleModel on a single instance with odd z')\n self.assertTrue(np.allclose(self.x_batch_odd.numpy(), rec_batch.numpy(), atol=1e-4),\n 'Could not invert DeepInvertibleModel on batch instance with odd z')", "def invertible(A, B):\n\tdetA = la.det(A)\n\tdetB = la.det(B)\n\t\n\tif (detA == 0):\n\t\tif (detB == 0):\n\t\t\tprint(\"None are invertible\")\n\t\telse:\n\t\t\treturn B\n\telse:\n\t\tif (detB == 0):\n\t\t\treturn A\n\t\telse:\n\t\t\tprint(\"Both are invertible\")", "def test_reversible_block(self):\n for implementation in [0, 1]:\n # same convolution test\n Gm = torch.nn.Conv2d(10 // 2, 10 // 2, (3,3), padding=1)\n dims = (2,10,8,8)\n\n Xdata = np.random.random(dims).astype(np.float32)\n\n X = Variable(torch.from_numpy(Xdata))\n Xshape = X.shape\n rb = revop.ReversibleBlock(Gm, implementation=implementation)\n Y = rb(X)\n X.data.set_()\n self.assertTrue(len(X.data.shape) == 0)\n Y.backward(torch.ones_like(Y))\n\n self.assertTrue(Y.shape == Xshape)\n self.assertTrue(X.data.numpy().shape == Xdata.shape)\n self.assertTrue(np.isclose(X.data.numpy(), Xdata, atol=1e-06).all())", "def test_conv_consistency(self) -> None:\n x = Input(\n 'const1',\n [1, 3, 3, 3],\n Float32(),\n )\n w = Constant(\n 'weight',\n Float32(),\n np.zeros([1, 2, 2, 3])\n )\n input_ops = {'X': cast(Operator, x), 'W': cast(Operator, w)}\n\n Conv(\n 'conv_under_test',\n [1, 3, 3, 3],\n Float32(),\n input_ops,\n pads=[1, 2, 1, 2],\n strides=[2, 2]\n )\n\n print(\"Consistency test for conv operator passed!\")", "def is_inverse(self, other):\n return (self * other).is_identity() and (other * self).is_identity()", "def test_inverse_b(self):\n for q in self.all:\n self.assertTrue(\n (q*q.inverse()).almost_equal(Quaternion(1, 0, 0, 0)))", "def all_conv_ops(self):\n pass", "def test_inverse_by_num_2q(self):\n num_tests = 100\n rng = default_rng(seed=self.seed)\n for _ in range(num_tests):\n num = rng.integers(CliffordUtils.NUM_CLIFFORD_2_QUBIT)\n cliff = CliffordUtils.clifford_2_qubit(num)\n clifford_expected = cliff.adjoint()\n clifford_from_num = CliffordUtils.clifford_2_qubit(inverse_2q(num))\n clifford_from_circuit = Clifford(cliff.to_circuit().inverse())\n self.assertEqual(clifford_expected, clifford_from_num)\n self.assertEqual(clifford_expected, clifford_from_circuit)", "def test_tensor_network_flip(self):\n circuit = jet.Circuit(num_wires=1)\n circuit.append_gate(jet.PauliX(), wire_ids=[0])\n tn = circuit.tensor_network()\n\n tensor = tn.contract()\n assert tensor.indices == [\"0-1\"]\n assert tensor.shape == [2]\n assert tensor.data == pytest.approx([0, 1])", "def test_dinn_invertible_even(self):\n\n out_single, out_single_J = self.dINN_even(self.x_single_even, self.y_single)\n out_batch, out_batch_J = self.dINN_even(self.x_batch_even, self.y_batch)\n\n rec_single = self.dINN_even(out_single, self.y_single, inverse=True)\n rec_batch = self.dINN_even(out_batch, self.y_batch, inverse=True)\n\n self.assertTrue(np.allclose(self.x_single_even.numpy(), rec_single.numpy(), atol=1e-4),\n 'Could not invert DeepInvertibleModel on a single instance with even z')\n self.assertTrue(np.allclose(self.x_batch_even.numpy(), rec_batch.numpy(), atol=1e-4),\n 'Could not invert DeepInvertibleModel on batch instance with even z')", "def test_dinn_invertible_even(self):\n\n out_single, out_single_J = self.dINN_even(self.x_single_even, self.y_single)\n out_batch, out_batch_J = self.dINN_even(self.x_batch_even, self.y_batch)\n\n rec_single = self.dINN_even(out_single, self.y_single, inverse=True)\n rec_batch = self.dINN_even(out_batch, self.y_batch, inverse=True)\n\n self.assertTrue(np.allclose(self.x_single_even.numpy(), rec_single.numpy(), atol=1e-4),\n 'Could not invert DeepInvertibleModel on a single instance with even z')\n self.assertTrue(np.allclose(self.x_batch_even.numpy(), rec_batch.numpy(), atol=1e-4),\n 'Could not invert DeepInvertibleModel on batch instance with even z')", "def __invert__(self) -> Operators:\n return self.operate(inv)", "def test_convTranpose2d(self, _, module, inputs, filters, bias=None):\n\n utils.compare_tracing_methods(\n module, inputs, filters, fusible_ops={\"aten::_convolution\"}\n )", "def test_conv(self):\n for kernel_type in [lambda x: x, SharedTensor]:\n for matrix_width in range(2, 5):\n for kernel_width in range(1, matrix_width):\n for padding in range(kernel_width // 2 + 1):\n matrix_size = (5, matrix_width)\n matrix = get_random_test_tensor(size=matrix_size)\n\n kernel_size = (kernel_width, kernel_width)\n kernel = get_random_test_tensor(size=kernel_size)\n\n matrix = matrix.unsqueeze(0).unsqueeze(0)\n kernel = kernel.unsqueeze(0).unsqueeze(0)\n\n reference = torch.nn.functional.conv2d(\n matrix, kernel, padding=padding)\n encrypted_matrix = SharedTensor(matrix)\n encrypted_kernel = kernel_type(kernel)\n encrypted_conv = encrypted_matrix.conv2d(\n encrypted_kernel, padding=padding\n )\n\n self._check(encrypted_conv, reference, 'conv2d failed')" ]
[ "0.73368603", "0.71729094", "0.68075156", "0.66037315", "0.6594968", "0.64753395", "0.6474013", "0.6347338", "0.6347338", "0.62833875", "0.62619966", "0.61940867", "0.6166104", "0.6166104", "0.61169124", "0.6046706", "0.6046706", "0.6030973", "0.5981622", "0.5976751", "0.5969992", "0.5944773", "0.59291947", "0.59264535", "0.59173536", "0.59085757", "0.59085757", "0.5899606", "0.58876157", "0.58863354" ]
0.7238191
1
Create a new receiver object.
def __new__(cls, rtype, cluster_id=None, action=None, **kwargs): if rtype == consts.RECEIVER_WEBHOOK: from senlin.engine.receivers import webhook ReceiverClass = webhook.Webhook elif rtype == consts.RECEIVER_MESSAGE: from senlin.engine.receivers import message ReceiverClass = message.Message else: ReceiverClass = Receiver return super(Receiver, cls).__new__(ReceiverClass)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def construct_receiver_no_id(source_id=None, **kwargs):\n return acquire.Receiver(source_id=None, **kwargs)", "def _from_object(cls, receiver):\n kwargs = {\n 'id': receiver.id,\n 'name': receiver.name,\n 'user': receiver.user,\n 'project': receiver.project,\n 'domain': receiver.domain,\n 'created_at': receiver.created_at,\n 'updated_at': receiver.updated_at,\n 'actor': receiver.actor,\n 'params': receiver.params,\n 'channel': receiver.channel,\n }\n\n return cls(receiver.type, receiver.cluster_id, receiver.action,\n **kwargs)", "def create(cls, process_name=\"ReceiverSender\",queue_send=None, queue_receive=None): #pensar em mais parametros\n cls.instance = cls(process_name, queue_send, queue_receive)\n return cls.instance", "def add_receiver(self, name, position):#*args, **kwargs):\n return self._add_object(name, Receiver, position=position)#*args, **kwargs)", "def __init__(self, method, receiver):\n self.method = method\n if type(receiver) == Class:\n self.receiver = cast(receiver, objc_id)\n else:\n self.receiver = receiver", "def create_receiver(self):\n receiver = kafka.KafkaConsumer(bootstrap_servers=['%s:%s' % (self._host, self._port)])\n return receiver", "def __new__(cls, receiver, callback=None):\n\n if isinstance(receiver,MethodType):\n try:\n self = ref.__new__(cls,receiver.im_self,callback)\n except TypeError:\n return receiver\n else:\n self.func = receiver.im_func\n return self\n\n if isinstance(receiver,weak_receiver):\n return receiver\n\n try:\n return ref.__new__(cls,receiver,callback)\n except TypeError:\n return receiver", "def send(cls, receiver):\n def _sender(*args, **kwargs):\n receiver.send(cls(), *args, **kwargs)\n return _sender", "def __new__(cls, sender, callback=None):\n self = int.__new__(cls,id(sender))\n try:\n self.ref = ref(sender,callback)\n except TypeError:\n self.ref = lambda: sender\n return self", "def __init__(self, sender: str, receiver: str, signature: str) -> None:\n super().__init__(sender, receiver)\n self.signature = signature", "def __init__(self, sender_id, receivers_id, period, deadline=0, size=1000, starting_time=0, end_to_end=None):\n self.__set_sender_id(sender_id)\n self.__set_receivers_id(receivers_id)\n self.__set_period(period)\n self.__set_deadline(deadline)\n self.__set_size(size)\n self.__set_starting_time(starting_time)\n self.__set_end_to_end_delay(end_to_end)", "def store(self, context, update=False):\n timestamp = timeutils.utcnow(True)\n self.created_at = timeutils.utcnow(True)\n values = {\n 'id': self.id,\n 'name': self.name,\n 'type': self.type,\n 'user': self.user,\n 'project': self.project,\n 'domain': self.domain,\n 'created_at': self.created_at,\n 'updated_at': self.updated_at,\n 'cluster_id': self.cluster_id,\n 'actor': self.actor,\n 'action': self.action,\n 'params': self.params,\n 'channel': self.channel,\n }\n\n if update:\n self.updated_at = timestamp\n values['updated_at'] = timestamp\n ro.Receiver.update(context, self.id, values)\n else:\n self.created_at = timestamp\n values['created_at'] = timestamp\n receiver = ro.Receiver.create(context, values)\n self.id = receiver.id\n\n return self.id", "def create_bridge(factory, msg_type, topic_from,frequency=0):\n #print(factory,msg_type,topic_from)\n if isinstance(factory, str):\n module_name, obj_name = factory.split(\":\")\n module = import_module(module_name, 'gateway')\n factory = getattr(module, obj_name)\n print(module_name, obj_name, factory)\n print(\"factory is isinstance \")\n if not issubclass(factory, Bridge):\n raise ValueError(\"factory should be Bridge subclass\")\n if isinstance(msg_type, str):\n msg_name, msg_obj = msg_type.split(\":\")\n msg_module = import_module(msg_name)\n msg_type = getattr(msg_module, msg_obj)\n print(module_name, obj_name, msg_type)\n print(\"msg_type is isinstance \")\n if not issubclass(msg_type, rospy.Message):\n raise TypeError(\n \"msg_type should be rospy.Message instance or its string\"\n \"reprensentation\")\n return factory(topic_from=topic_from, msg_type=msg_type, frequency=frequency)", "def create_remote_instance(self, payload):\n instance = RemoteInstance()\n instance.init_from_payload(payload)\n return instance", "def __init__(self, sender, receiver, content):\n self.sender = sender\n self.receiver = receiver\n self.content = content\n self.unread = True\n self.timestamp = services.time.now()", "def set_receiver(self, receiver):\n self.receiver = receiver", "def create(self):\n o = self._create_impl()\n self.logger.debug(f\"created {o}\")\n self._notify(o)", "def receiver(self, receiver):\n\n self._receiver = receiver", "def receiver(self, receiver):\n\n self._receiver = receiver", "def load(cls, context, receiver_id=None, receiver_obj=None,\n project_safe=True):\n if receiver_obj is None:\n receiver_obj = ro.Receiver.get(context, receiver_id,\n project_safe=project_safe)\n if receiver_obj is None:\n raise exception.ResourceNotFound(type='receiver',\n id=receiver_id)\n\n return cls._from_object(receiver_obj)", "def __init__(self, instance, created, signal_type):\n\n self.instance = instance\n self.created = created\n self.signal_type = signal_type", "def new_message(self, body=''):\r\n m = self.message_class(self, body)\r\n m.queue = self\r\n return m", "def new(self, obj):\n pass", "def __call__(self):\n \n p = self.protocol()\n p.factory = self\n return p", "def createMessage( self, *args, **kw ):\n return MailMessage( *args, **kw )", "def __new__(cls, *p, **k):\n if not '_single_instance' in cls.__dict__:\n cls._single_instance = object.__new__(cls)\n \n # Prepare Parent\n #super(SerialSocket, self).__init__()\n \n cls.__port = serial.Serial(port = 0, baudrate = 19200, parity = 'O', timeout=1)\n else:\n if not cls._single_instance.is_open:\n cls._single_instance.open()\n return cls._single_instance", "def __init__(self, local, remote):\n self.receiver = HalfConnection(local)\n self.sender = HalfConnection(remote)", "def __init__(self, local, remote):\n self.receiver = HalfConnection(local)\n self.sender = HalfConnection(remote)", "def create(cls, _):\n return cls", "def test_receiver():\n p = Provider([1, 2, 3])\n r = Receiver(p)\n p.receiver = r\n r.append([4, 5])\n p.value = [2, 3]\n assert r.append([4, 5]) == [2, 3, 4, 5]" ]
[ "0.70990366", "0.67225856", "0.66829747", "0.65813774", "0.63713026", "0.62152827", "0.61773187", "0.5897912", "0.58591956", "0.58540297", "0.5778976", "0.5712958", "0.5694952", "0.56846017", "0.56715405", "0.56713593", "0.5661827", "0.56467944", "0.56467944", "0.5628301", "0.55828625", "0.55401415", "0.552048", "0.5518909", "0.5503709", "0.5481767", "0.54731286", "0.54731286", "0.5442961", "0.5396205" ]
0.7392701
0
Store the receiver in database and return its ID.
def store(self, context, update=False): timestamp = timeutils.utcnow(True) self.created_at = timeutils.utcnow(True) values = { 'id': self.id, 'name': self.name, 'type': self.type, 'user': self.user, 'project': self.project, 'domain': self.domain, 'created_at': self.created_at, 'updated_at': self.updated_at, 'cluster_id': self.cluster_id, 'actor': self.actor, 'action': self.action, 'params': self.params, 'channel': self.channel, } if update: self.updated_at = timestamp values['updated_at'] = timestamp ro.Receiver.update(context, self.id, values) else: self.created_at = timestamp values['created_at'] = timestamp receiver = ro.Receiver.create(context, values) self.id = receiver.id return self.id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __get_receivers_id(self):\n return self.__receivers_id", "def insert(self):\n item = self.create()\n return item.id", "def create(cls, sender, recipient, body, timestamp=None):\n if timestamp is None:\n timestamp = datetime.datetime.utcnow()\n msg = cls(sender, recipient, body, timestamp)\n db.session.add(msg)\n\n try:\n db.session.commit()\n db.session.flush()\n return msg.id\n except BaseException as exc:\n db.session.rollback()\n raise exc", "def save(self)->None:\n item = database.cursor.fetchone()\n if item:\n self.id = item['id']\n database.connection.commit()", "def GetRecipientID(self, order):\n cursor = self.cursor\n\n manager_guid = order[\"manager\"][\"id\"]\n query = f'select id from recipient where guid=\\'{manager_guid}\\''\n cursor.execute(query)\n row = cursor.fetchone()\n if not row:\n return None\n managerid = row[0]\n\n if (not order[\"agent\"]):\n lname = order[\"customer\"][\"lname\"]\n fname = order[\"customer\"][\"fname\"]\n mname = order[\"customer\"][\"mname\"]\n if not mname: mname = \"\"\n phone = order[\"customer\"][\"phone\"]\n email = order[\"customer\"][\"email\"]\n query = '''\nselect id from dbo.[Recipient] where upper(rtrim([name]))=? and ltrim([phone])=? and [email]=?\n'''\n values = ((lname + ' ' + fname + ' ' + mname).upper().rstrip(), phone, email)\n cursor.execute(query, values)\n row = cursor.fetchone()\n if (not row):\n selector = '''\ninsert into dbo.[Recipient]\n([name], [recipienttypeid], [phone], [rtypeid], [legalgroupid], [metaphone], [managerid], [email])\nvalues (?,?,?,?,?,?,?,?)\n'''\n values = (\n (lname + ' ' + fname + ' ' + mname).rstrip(), 8, phone, 1, 10,\n (lname + fname + mname + phone).lower(),\n managerid, email)\n cursor.execute(selector, values)\n\n cursor.execute(\"select IDENT_CURRENT('recipient')\")\n row = cursor.fetchone()\n id = row[0]\n\n else:\n guid = order[\"agent\"][\"id\"]\n query = f'select id from dbo.Recipient where guid=\\'{guid}\\''\n\n cursor.execute(query)\n row = cursor.fetchone()\n if (not row):\n name = order[\"agent\"][\"name\"]\n query = 'insert into dbo.Recipient ([name], [rtypeid], [recipienttypeid], [metaphone],[managerid], [guid]) values (?,?,?,?,?,?)'\n values = (name, 2, 13, name.lower(), managerid, guid)\n cursor.execute(query, values)\n cursor.execute(\"select IDENT_CURRENT('recipient')\")\n row = cursor.fetchone()\n id = row[0]\n\n return id", "def get_id(self):\n return self.uid", "def save(self):\n if not self.id:\n self.id = uuid4()\n DataStore.add_instance(self)", "def getid_saveifneeded(self):\n #if (not hasattr(self,'id') or self.id == None):\n if (self.id == None):\n self.save()\n return self.id", "def __get_sender_id(self):\n return self.__sender_id", "def get_id(self):\n\n\t\treturn self.__id", "def getID(self):\n return self.__id", "def unique_id(self):\n return self._uid", "def unique_id(self):\n return self._uid", "def unique_id(self):\n return self._uid", "def save(self)->None:\n database.cursor.execute(\n \"INSERT INTO blacklist (token) VALUES(%s) RETURNING id\", (self.token,))\n super().save()", "def get_id(self):\n return self.__id", "def get_id(self):\n return self.__id", "def save(self):\n try:\n db.session.add(self)\n db.session.commit()\n return self.id\n except Exception as e:\n db.session.rollback()\n return {\n \"message\": \"Ensure the object you're saving is valid\",\n \"help\": \"Has all fields and doesn't repeat unique values.\",\n \"exception\": str(e)\n }", "def get_id(self):\n return self.email", "def get_id(self):\n return self.email", "def unique_id(self):\n return _spacegrant_swig.ax25_udp_pdu_receiver_sptr_unique_id(self)", "def getID(self):\n return self._id", "def get_id(self) -> int:\n return self.id", "def get_id(self) -> int:\n return self.id", "def _post(self, data):\n new_ticket_id = DB_TICKET_TABLE.insert(data)\n return new_ticket_id", "def id(self):\n return self.proto.id", "def id(self) -> UID:\n return self._id", "def id(self) -> UID:\n return self._id", "def get_ID(self):\n return self.ID", "def _get_id(self):\n return self.id" ]
[ "0.6423932", "0.5876769", "0.5797933", "0.5710716", "0.5706996", "0.56506854", "0.5634671", "0.5573839", "0.5572288", "0.553507", "0.5532543", "0.5480858", "0.5480858", "0.5480858", "0.54778296", "0.5473655", "0.5473655", "0.54736364", "0.5444757", "0.5444757", "0.54426414", "0.54390275", "0.5422554", "0.5422554", "0.54209685", "0.5397179", "0.53930527", "0.53930527", "0.5390657", "0.5382589" ]
0.69515216
0
Construct a receiver from receiver object.
def _from_object(cls, receiver): kwargs = { 'id': receiver.id, 'name': receiver.name, 'user': receiver.user, 'project': receiver.project, 'domain': receiver.domain, 'created_at': receiver.created_at, 'updated_at': receiver.updated_at, 'actor': receiver.actor, 'params': receiver.params, 'channel': receiver.channel, } return cls(receiver.type, receiver.cluster_id, receiver.action, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def construct_receiver_no_id(source_id=None, **kwargs):\n return acquire.Receiver(source_id=None, **kwargs)", "def __new__(cls, rtype, cluster_id=None, action=None, **kwargs):\n if rtype == consts.RECEIVER_WEBHOOK:\n from senlin.engine.receivers import webhook\n ReceiverClass = webhook.Webhook\n elif rtype == consts.RECEIVER_MESSAGE:\n from senlin.engine.receivers import message\n ReceiverClass = message.Message\n else:\n ReceiverClass = Receiver\n\n return super(Receiver, cls).__new__(ReceiverClass)", "def __init__(self, method, receiver):\n self.method = method\n if type(receiver) == Class:\n self.receiver = cast(receiver, objc_id)\n else:\n self.receiver = receiver", "def __new__(cls, receiver, callback=None):\n\n if isinstance(receiver,MethodType):\n try:\n self = ref.__new__(cls,receiver.im_self,callback)\n except TypeError:\n return receiver\n else:\n self.func = receiver.im_func\n return self\n\n if isinstance(receiver,weak_receiver):\n return receiver\n\n try:\n return ref.__new__(cls,receiver,callback)\n except TypeError:\n return receiver", "def add_receiver(self, name, position):#*args, **kwargs):\n return self._add_object(name, Receiver, position=position)#*args, **kwargs)", "def load(cls, context, receiver_id=None, receiver_obj=None,\n project_safe=True):\n if receiver_obj is None:\n receiver_obj = ro.Receiver.get(context, receiver_id,\n project_safe=project_safe)\n if receiver_obj is None:\n raise exception.ResourceNotFound(type='receiver',\n id=receiver_id)\n\n return cls._from_object(receiver_obj)", "def _from_other(cls, obj):", "def __new__(cls, sender, callback=None):\n self = int.__new__(cls,id(sender))\n try:\n self.ref = ref(sender,callback)\n except TypeError:\n self.ref = lambda: sender\n return self", "def send(cls, receiver):\n def _sender(*args, **kwargs):\n receiver.send(cls(), *args, **kwargs)\n return _sender", "def __init__(self, sender_id, receivers_id, period, deadline=0, size=1000, starting_time=0, end_to_end=None):\n self.__set_sender_id(sender_id)\n self.__set_receivers_id(receivers_id)\n self.__set_period(period)\n self.__set_deadline(deadline)\n self.__set_size(size)\n self.__set_starting_time(starting_time)\n self.__set_end_to_end_delay(end_to_end)", "def create_receiver(self):\n receiver = kafka.KafkaConsumer(bootstrap_servers=['%s:%s' % (self._host, self._port)])\n return receiver", "def __init__(self, sender: str, receiver: str, signature: str) -> None:\n super().__init__(sender, receiver)\n self.signature = signature", "def __init__(\n self,\n to: Address,\n sender: Address,\n message: Union[Message, bytes],\n context: Optional[EnvelopeContext] = None,\n protocol_specification_id: Optional[PublicId] = None,\n ) -> None:\n enforce(isinstance(to, str), f\"To must be string. Found '{type(to)}'\")\n enforce(\n isinstance(sender, str), f\"Sender must be string. Found '{type(sender)}'\"\n )\n enforce(\n isinstance(message, (Message, bytes)),\n \"message should be a type of Message or bytes!\",\n )\n\n if isinstance(message, Message):\n message = self._check_consistency(message, to, sender)\n\n self._to = to\n self._sender = sender\n\n enforce(\n self.is_to_public_id == self.is_sender_public_id,\n \"To and sender must either both be agent addresses or both be public ids of AEA components.\",\n )\n\n if isinstance(message, bytes):\n if protocol_specification_id is None:\n raise ValueError(\n \"Message is bytes object, protocol_specification_id must be provided!\"\n )\n elif isinstance(message, Message):\n if message.protocol_id is None:\n raise ValueError( # pragma: nocover\n f\"message class {type(message)} has no protocol_id specified!\"\n )\n protocol_specification_id = message.protocol_specification_id\n if protocol_specification_id is None:\n raise ValueError(\n \"Message is Message object, protocol_specification_id could not be resolved! Ensure protocol is valid!\"\n )\n else:\n raise ValueError(\n f\"Message type: {type(message)} is not supported!\"\n ) # pragma: nocover\n\n self._protocol_specification_id: PublicId = protocol_specification_id\n self._message = message\n if self.is_component_to_component_message:\n enforce(\n context is None,\n \"EnvelopeContext must be None for component to component messages.\",\n )\n self._context = context", "def create_bridge(factory, msg_type, topic_from,frequency=0):\n #print(factory,msg_type,topic_from)\n if isinstance(factory, str):\n module_name, obj_name = factory.split(\":\")\n module = import_module(module_name, 'gateway')\n factory = getattr(module, obj_name)\n print(module_name, obj_name, factory)\n print(\"factory is isinstance \")\n if not issubclass(factory, Bridge):\n raise ValueError(\"factory should be Bridge subclass\")\n if isinstance(msg_type, str):\n msg_name, msg_obj = msg_type.split(\":\")\n msg_module = import_module(msg_name)\n msg_type = getattr(msg_module, msg_obj)\n print(module_name, obj_name, msg_type)\n print(\"msg_type is isinstance \")\n if not issubclass(msg_type, rospy.Message):\n raise TypeError(\n \"msg_type should be rospy.Message instance or its string\"\n \"reprensentation\")\n return factory(topic_from=topic_from, msg_type=msg_type, frequency=frequency)", "def set_receiver(self, receiver):\n self.receiver = receiver", "def receiver(self, receiver):\n\n self._receiver = receiver", "def receiver(self, receiver):\n\n self._receiver = receiver", "def create(cls, process_name=\"ReceiverSender\",queue_send=None, queue_receive=None): #pensar em mais parametros\n cls.instance = cls(process_name, queue_send, queue_receive)\n return cls.instance", "def __call__(self):\n \n p = self.protocol()\n p.factory = self\n return p", "def __init__(self, *args):\r\n \r\n self.bl = None\r\n self.buddy = None\r\n self.connection = None\r\n \r\n #\r\n # incoming\r\n #\r\n #__init__(self, bl, connection, command, encoded)\r\n if type(args[0]) == BuddyList:\r\n self.bl = args[0]\r\n self.connection = args[1]\r\n if self.connection:\r\n self.buddy = self.connection.buddy\r\n self.command = args[2]\r\n \r\n # decode from line format to raw binary\r\n # and then let the message parse it \r\n self.blob = decodeLF(args[3])\r\n self.parse()\r\n \r\n # the incoming message is now properly initialized and somebody\r\n # could now call its execute() method to trigger its action\r\n return\r\n \r\n \r\n #\r\n # outgoing\r\n #\r\n #__init__(self, connection, blob)\r\n #__init__(self, buddy, blob)\r\n if type(args[0]) in [InConnection, OutConnection, Buddy]:\r\n if type(args[0]) in [InConnection, OutConnection]:\r\n self.connection = args[0]\r\n if self.connection.buddy:\r\n self.buddy = self.connection.buddy\r\n \r\n elif type(args[0]) == Buddy:\r\n self.buddy = args[0]\r\n self.connection = self.buddy.conn_out\r\n \r\n if len(args) > 1:\r\n blob = args[1]\r\n if type(blob) in [list, tuple]:\r\n self.blob = \" \".join(str(part) for part in blob)\r\n else:\r\n self.blob = str(blob)\r\n else:\r\n self.blob = \"\"\r\n \r\n self.command = type(self).__name__[12:]", "def __init__(self, sender, receiver, content):\n self.sender = sender\n self.receiver = receiver\n self.content = content\n self.unread = True\n self.timestamp = services.time.now()", "def __init__(self, *args):\n this = _libsbml.new_SBMLReactionConverter(*args)\n try: self.this.append(this)\n except: self.this = this", "def _object2proto(self) -> RunFunctionOrConstructorAction_PB:\n return RunFunctionOrConstructorAction_PB(\n path=self.path,\n args=[serialize(x) for x in self.args],\n kwargs={k: serialize(v) for k, v in self.kwargs.items()},\n id_at_location=serialize(self.id_at_location),\n address=serialize(self.address),\n msg_id=serialize(self.id),\n )", "def create_remote_instance(self, payload):\n instance = RemoteInstance()\n instance.init_from_payload(payload)\n return instance", "def receiver(self, receiver: str):\n if receiver is None:\n raise ValueError(\"Invalid value for `receiver`, must not be `None`\") # noqa: E501\n\n self._receiver = receiver", "def __init__(self,\n anchor: Entity,\n transmission_range: Optional[float] = None,\n receiver_capacity: Optional[int] = None,\n ):\n\n super().__init__(anchor=anchor)\n\n self._transmission_range = transmission_range\n self._receiver_capacity = receiver_capacity\n\n self._received_messages: List[Tuple[CommunicationDevice, Message]] = []\n self._comms_in_range: List[CommunicationDevice] = []", "def __init__(self, local, remote):\n self.receiver = HalfConnection(local)\n self.sender = HalfConnection(remote)", "def __init__(self, local, remote):\n self.receiver = HalfConnection(local)\n self.sender = HalfConnection(remote)", "def __init__(\n self, env, link,\n transmitter_port, receiver_port):\n self.env = env\n self.link = link\n self._transmitter_port = transmitter_port\n self._receiver_port = receiver_port\n env.process(self.run())", "def from_proto(cls, raw_task, delivery_tag=None, deserializator=False):\n app = gromozeka.get_app()\n if not deserializator:\n proto_task = ProtoTask()\n proto_task.ParseFromString(raw_task)\n task_uuid = proto_task.uuid\n graph_uuid = proto_task.graph_uuid\n task_id = proto_task.task_id\n args = None if not proto_task.args else json.loads(proto_task.args)\n kwargs = None if not proto_task.kwargs else json.loads(proto_task.kwargs)\n delay = proto_task.delay\n delivery_tag = proto_task.delivery_tag\n retries = proto_task.retries\n reply_to_exchange, reply_to_routing_key = proto_task.reply_to.exchange, proto_task.reply_to.routing_key\n\n else:\n task_uuid, task_id, graph_uuid, args, kwargs, \\\n retries, delay, reply_to_exchange, reply_to_routing_key = deserializator.deserialize(raw_task=raw_task)\n r_task = app.get_task(task_id)\n return cls(func=r_task.func,\n args=args,\n kwargs=kwargs,\n bind=r_task.bind,\n app=app,\n max_retries=r_task.max_retries,\n retry_countdown=r_task.retry_countdown,\n retries=retries,\n delay=delay,\n uuid_=task_uuid,\n delivery_tag=delivery_tag,\n ignore_result=r_task.ignore_result,\n graph_uuid=graph_uuid,\n broker_point=BrokerPoint(exchange=r_task.broker_point.exchange,\n exchange_type=r_task.broker_point.exchange_type,\n queue=r_task.broker_point.queue,\n routing_key=r_task.broker_point.routing_key),\n reply_to_exchange=reply_to_exchange,\n reply_to_routing_key=reply_to_routing_key)" ]
[ "0.71256065", "0.6681739", "0.6624353", "0.61288947", "0.5840496", "0.570091", "0.5698267", "0.5570594", "0.55004007", "0.5460564", "0.5394144", "0.53751534", "0.5333654", "0.53211445", "0.5306747", "0.53045243", "0.53045243", "0.52950823", "0.52561927", "0.5206004", "0.5145775", "0.5118532", "0.5094424", "0.50826794", "0.5082174", "0.50810826", "0.5067464", "0.5067464", "0.50594395", "0.5058042" ]
0.747322
0
Build connection params for specific user and project.
def _build_conn_params(self, user, project): service_creds = senlin_context.get_service_credentials() params = { 'username': service_creds.get('username'), 'password': service_creds.get('password'), 'auth_url': service_creds.get('auth_url'), 'user_domain_name': service_creds.get('user_domain_name'), 'project_domain_name': service_creds.get('project_domain_name'), 'verify': service_creds.get('verify'), 'interface': service_creds.get('interface'), } cred = co.Credential.get(oslo_context.get_current(), user, project) if cred is None: raise exception.TrustNotFound(trustor=user) params['trust_id'] = cred.cred['openstack']['trust'] return params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_connection_params(self, host, port, db):\r\n\r\n kwargs = {\r\n \"db\": db,\r\n \"parser_class\": self.get_parser_cls(),\r\n \"password\": self.options.get('PASSWORD', None),\r\n }\r\n\r\n if host == \"unix\":\r\n kwargs.update({'path': port, 'connection_class': UnixDomainSocketConnection})\r\n else:\r\n kwargs.update({'host': host, 'port': port, 'connection_class': Connection})\r\n\r\n if 'SOCKET_TIMEOUT' in self.options:\r\n timeout = self.options['SOCKET_TIMEOUT']\r\n assert isinstance(timeout, (int, float)), \"Socket timeout should be float or integer\"\r\n kwargs['socket_timeout'] = timeout\r\n\r\n return kwargs", "def build(self):\n return ConnectionParams(self)", "def get_connection_params(self):\r\n settings_dict = self.settings_dict.copy()\r\n if settings_dict['NAME'] == '':\r\n from django.core.exceptions import ImproperlyConfigured\r\n raise ImproperlyConfigured(\r\n \"settings.DATABASES is improperly configured. \"\r\n \"Please supply the NAME value.\")\r\n if not settings_dict['NAME']:\r\n # if _nodb_connection, connect to master\r\n settings_dict['NAME'] = 'master'\r\n\r\n autocommit = settings_dict.get('OPTIONS', {}).get('autocommit', False)\r\n return {\r\n 'connection_string': make_connection_string(settings_dict),\r\n 'timeout': self.command_timeout,\r\n 'use_transactions': not autocommit,\r\n }", "def get_connection_params (self, kwargs = {}):\n\n config = configparser.ConfigParser ()\n\n if 'MYSQL_CONF' in kwargs:\n config.read (('/etc/my.cnf', os.path.expanduser (kwargs['MYSQL_CONF'])))\n else:\n config.read (('/etc/my.cnf', os.path.expanduser ('~/.my.cnf')))\n\n section = config[kwargs.get ('MYSQL_GROUP', 'mysql')]\n from_my_cnf = {\n 'host' : section.get ('host', 'localhost').strip ('\"'),\n 'port' : section.get ('port', '3306').strip ('\"'),\n 'database' : section.get ('database', '').strip ('\"'),\n 'user' : section.get ('user', '').strip ('\"'),\n 'password' : section.get ('password', '').strip ('\"'),\n }\n\n return from_my_cnf", "def _get_conn_params(self) -> dict[str, Any]:\n conn = self.get_connection(self.slack_conn_id)\n if not conn.password:\n raise AirflowNotFoundException(\n f\"Connection ID {self.slack_conn_id!r} does not contain password (Slack API Token).\"\n )\n conn_params: dict[str, Any] = {\"token\": conn.password, \"retry_handlers\": self.retry_handlers}\n extra_config = ConnectionExtraConfig(\n conn_type=self.conn_type, conn_id=conn.conn_id, extra=conn.extra_dejson\n )\n # Merge Hook parameters with Connection config\n conn_params.update(\n {\n \"timeout\": self.timeout or extra_config.getint(\"timeout\", default=None),\n \"base_url\": self.base_url or extra_config.get(\"base_url\", default=None),\n \"proxy\": self.proxy or extra_config.get(\"proxy\", default=None),\n }\n )\n # Add additional client args\n conn_params.update(self.extra_client_args)\n return {k: v for k, v in conn_params.items() if v is not None}", "def get_connection_params(self, session, **kwargs):\n return {}", "def get_connection_params(settings_dict):\n valid_settings = {\n 'NAME': 'name',\n 'HOST': 'host',\n 'PORT': 'port',\n 'USER': 'username',\n 'PASSWORD': 'password',\n 'AUTH_SOURCE': 'authSource',\n 'AUTH_MECHANISM': 'authMechanism',\n 'ENFORCE_SCHEMA': 'enforce_schema',\n 'REPLICASET': 'replicaset',\n 'SSL': 'ssl',\n 'SSL_CERTFILE': 'ssl_certfile',\n 'SSL_CA_CERTS': 'ssl_ca_certs',\n 'READ_PREFERENCE': 'read_preference'\n }\n connection_params = {\n 'name': 'djongo_test',\n 'enforce_schema': True\n }\n for setting_name, kwarg in valid_settings.items():\n try:\n setting = settings_dict[setting_name]\n except KeyError:\n continue\n\n if setting or setting is False:\n connection_params[kwarg] = setting\n\n return connection_params", "def prepare_connection(unpw, port=''):\n params = {\n 'database': unpw['database'],\n 'user': unpw['username'],\n 'password': unpw['password'],\n 'host': 'localhost',\n 'port': port,\n }\n return params", "def getParamsFromEnv(self):\r\n self.port = os.getenv('PGPORT', self.port)\r\n self.host = os.getenv('PGHOST', self.host)\r\n self.database = os.getenv('PGDATABASE', self.database)\r\n self.user = os.getenv('PGUSER', self.user)\r\n self.password = os.getenv('PGPASSWORD', self.password)", "def connection_arguments(self):\n filtered_args = ['name', 'metrics']\n\n # make sure we make a copy of this global so it is thread-safe\n args = dict(DEFAULT_CONNECT_ARGS)\n\n for key in set(self.config) - set(filtered_args):\n if key == 'dbname':\n args['database'] = self.config[key]\n else:\n args[key] = self.config[key]\n return args", "def create_connect_args(self, url):\n opts = url.translate_connect_args(username='uid', password='pwd',\n host='server', port='service') # Are these safe renames?\n connstr = \";\".join(['%s=%s' % (k.upper(), v) for k, v in opts.items()])\n opt = {}\n\n return ([connstr], opt)", "def buildConnectionString(params):\n print params\n return \";\".join([\"%s=%s\" % (a, b) for a, b in params.items()])", "def _build_scm_url_extra_vars(self, project_update):\n extra_vars = {}\n if project_update.credential:\n scm_username = project_update.credential.get_input('username', default='')\n scm_password = project_update.credential.get_input('password', default='')\n else:\n scm_username = ''\n scm_password = ''\n scm_type = project_update.scm_type\n scm_url = update_scm_url(scm_type, project_update.scm_url, check_special_cases=False)\n scm_url_parts = urlparse.urlsplit(scm_url)\n # Prefer the username/password in the URL, if provided.\n scm_username = scm_url_parts.username or scm_username\n scm_password = scm_url_parts.password or scm_password\n if scm_username:\n if scm_type == 'svn':\n extra_vars['scm_username'] = scm_username\n extra_vars['scm_password'] = scm_password\n scm_password = False\n if scm_url_parts.scheme != 'svn+ssh':\n scm_username = False\n elif scm_url_parts.scheme.endswith('ssh'):\n scm_password = False\n elif scm_type in ('insights', 'archive'):\n extra_vars['scm_username'] = scm_username\n extra_vars['scm_password'] = scm_password\n scm_url = update_scm_url(scm_type, scm_url, scm_username, scm_password, scp_format=True)\n else:\n scm_url = update_scm_url(scm_type, scm_url, scp_format=True)\n\n # Pass the extra accept_hostkey parameter to the git module.\n if scm_type == 'git' and scm_url_parts.scheme.endswith('ssh'):\n extra_vars['scm_accept_hostkey'] = 'true'\n\n return scm_url, extra_vars", "def build(**kwargs):\n conf = CommonConfig.get()\n for key, value in kwargs.items():\n conf[key] = value\n return conf", "def build_config_from_user_input() -> BuildConfig:\n project_dir: str = get_user_input_for_value(\"Cmake Project Directory: \", str)\n name: str = get_user_input_for_value(\"Project Config Name: \", str)\n generator: str = get_user_input_for_value(\"Generator: \", str)\n configurationType: str = get_user_input_for_value(\"Configuration Type: \", str)\n inheritEnvironments: str = get_user_input_for_value(\"Inherit Environments: \", str)\n buildRoot: str = r\"${projectDir}\\\\out\\\\build\\\\${name}\"\n installRoot: str = r\"${projectDir}\\\\out\\\\install\\\\${name}\"\n cmakeCommandArgs: str = get_user_input_for_value(\"Cmake Command Args: \", str)\n buildCommandArgs: str = get_user_input_for_value(\"Build Command Args: \", str)\n ctestCommandArgs: str = get_user_input_for_value(\"Ctest Command Args: \", str)\n variables: List[CmakeVariable] = get_cmake_vars_from_user()\n\n # Build object and return it to function user\n return BuildConfig(name=name,\n generator=generator,\n configurationType=configurationType,\n inheritEnvironments=inheritEnvironments,\n buildRoot=buildRoot,\n installRoot=installRoot,\n cmakeCommandArgs=cmakeCommandArgs,\n buildCommandArgs=buildCommandArgs,\n ctestCommandArgs=ctestCommandArgs,\n variables=variables)", "def create_connection_kwargs(self, **kwargs):\n region = kwargs.get('region')\n endpoint_url = self.service_url\n verify = kwargs.get('validate_certs', True)\n api_version = kwargs.get('api_version', \"\")\n use_ssl = kwargs.get('is_secure', False)\n region = region\n\n self._connection_kwargs = {'service_name': self.SERVICE_PREFIX,\n 'aws_access_key_id': self.eucarc.aws_access_key,\n 'aws_secret_access_key': self.eucarc.aws_secret_key,\n 'use_ssl': use_ssl,\n 'region_name': region,\n 'verify': verify,\n 'endpoint_url': endpoint_url}\n if api_version is not None:\n self._connection_kwargs['api_version'] = api_version\n\n return self._connection_kwargs", "def buildConnectionString(params):\n return \";\".join([\"%s=%s\" % (k,v) for k, v in params.items()])", "def get_client_settings_args(**kwargs):\r\n settings = {\r\n 'endpoint_url': kwargs.get('endpoint_url'),\r\n 'timeout': kwargs.get('timeout'),\r\n 'auth': kwargs.get('auth'),\r\n 'proxy': kwargs.get('proxy'),\r\n }\r\n username = kwargs.get('username')\r\n api_key = kwargs.get('api_key')\r\n if username and api_key and not settings['auth']:\r\n settings['auth'] = BasicAuthentication(username, api_key)\r\n return settings", "def __init__(self, project_info: ProjectInfo, conn):\n self.project_info = project_info\n self.conn = conn", "def get_project_connect(project, base_url=\"\", api_key=\"\"):\n updated = False\n if project not in CONFIG.sections():\n CONFIG.add_section(project)\n\n if not base_url:\n base_url = CONFIG[project].get('base_url', None)\n if not base_url:\n domain = '%s.djaoapp.com' % project\n custom_domain = input(\"Please enter the domain for project '%s'\\n\"\\\n \"(default to: %s): \" % (project, domain))\n if custom_domain:\n domain = custom_domain\n base_url = \"https://%s\" % domain\n if not CONFIG[project].get('base_url', None):\n CONFIG.set(project, 'base_url', base_url)\n updated = True\n\n if not api_key:\n api_key = CONFIG[project].get('api_key', None)\n if not api_key:\n api_key = input(\"Please enter an API Key for %s\\n\"\\\n \"(see https://www.djaodjin.com/docs/faq/#api-keys for help): \"\n % base_url)\n if not CONFIG[project].get('api_key', None):\n CONFIG.set(project, 'api_key', api_key)\n updated = True\n\n return base_url, api_key, updated", "def Build(self):\n if all((self.client_type, self.client_id, self.client_secret,\n self.auth_uri, self.token_uri)):\n client_config = {\n self.client_type: {\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'auth_uri': self.auth_uri,\n 'token_uri': self.token_uri\n }\n }\n else:\n raise ValueError('Required field is missing.')\n\n return client_config", "def get_kafka_connection_params():\n return {\n \"bootstrap_servers\": os.environ[\"BOOTSTRAP_SERVERS\"],\n \"security_protocol\": os.environ[\"SECURITY_PROTOCOL\"],\n \"sasl_mechanism\": os.environ[\"SASL_MECHANISM\"],\n \"sasl_plain_username\": os.environ[\"SASL_PLAIN_USERNAME\"],\n \"sasl_plain_password\": os.environ[\"SASL_PLAIN_PASSWORD\"],\n \"ssl_cafile\": os.environ[\"SSL_CAFILE\"],\n }", "def _get_connection_params(\n cls, conninfo: str, **kwargs: Any\n ) -> Dict[str, Any]:\n params = conninfo_to_dict(conninfo, **kwargs)\n\n # Make sure there is an usable connect_timeout\n if \"connect_timeout\" in params:\n params[\"connect_timeout\"] = int(params[\"connect_timeout\"])\n else:\n params[\"connect_timeout\"] = None\n\n # TODO: SRV lookup (RFC 2782)\n # https://github.com/psycopg/psycopg/issues/70\n\n return params", "def get_psycopg2_connection_kwargs(_id: str) -> dict:\n return get_connection_kwargs(_id, credentials_mapping=PSYCOPG2_CONNECTION_KWARG_MAP)", "def _build_config() -> None:\n\n global CONFIGURATION\n\n configuration_common = {\n 'SERVICE_HOST': os.getenv('SERVICE_HOST', '0.0.0.0'),\n 'SERVICE_PORT': os.getenv('SERVICE_PORT', 8080),\n 'FILE_NAME': os.getenv('FILE_NAME', 'example'),\n }\n\n CONFIGURATION = {\n **configuration_common\n }", "def buildConnectionStringx(**paramx):\n return \";\".join([\"%s=%s\" % (k, v) for k, v in paramx.items()])", "def get_connection_string(self):\n auth = ''\n if self.user:\n auth = self.user\n if self.password:\n auth = auth + ':' + self.password\n if auth:\n auth += '@'\n conn_string = 'postgresql://{auth}{host}/{dbname}'.format(\n auth=auth, host=self.host, dbname=self.dbname)\n\n return conn_string", "def build_dsn(database):\n\t\tdsn = { 'database': database, 'host': config.get('database', 'host') }\n\t\ttry:\n\t\t\tssl = config.getboolean('database', 'ssl')\n\t\t\tif ssl:\n\t\t\t\tdsn['ssl'] = ssl\n\t\texcept ConfigParser.Error:\n\t\t\tpass\n\t\ttry:\n\t\t\tusername = config.get('database', 'username')\n\t\t\tdsn['user'] = username\n\t\texcept ConfigParser.Error:\n\t\t\tpass\n\t\ttry:\n\t\t\tpassword = config.get('database', 'password')\n\t\t\tdsn['password'] = password\n\t\texcept ConfigParser.Error:\n\t\t\tpass\n\t\treturn dsn", "def _ex_connection_class_kwargs(self):\r\n kwargs = {}\r\n if not self._host_argument_set:\r\n kwargs['host'] = API_ENDPOINTS[self.region]['host']\r\n\r\n return kwargs", "def build_connection_pool(conn_details: dict):\n\n # Expecting url to be like jdbc:postgresql://host:port/db\n conn_details.update(\n DB.url_regex.match(conn_details[\"url\"]).groupdict()\n )\n return SimpleConnectionPool(\n minconn=1,\n maxconn=20,\n user=conn_details[\"user\"],\n password=conn_details[\"password\"],\n host=conn_details[\"host\"],\n port=conn_details[\"port\"],\n database=conn_details[\"db\"])" ]
[ "0.6681274", "0.6415917", "0.64074725", "0.6044565", "0.59719354", "0.5750442", "0.57281107", "0.5702612", "0.56912225", "0.5690372", "0.5643423", "0.56048477", "0.5570526", "0.5529019", "0.55210865", "0.551333", "0.5467593", "0.54630786", "0.5452627", "0.54466474", "0.54416496", "0.54301655", "0.5399108", "0.5344217", "0.5292398", "0.52917075", "0.5274229", "0.526029", "0.52447975", "0.52409655" ]
0.76817983
0
Registers an obj to this registry When locked, a RuntimeError is raised When an object with the name exists, and overwrite_existing is False, a RuntimeError is raised When the object does not extend IRegistryContent, a ValueError is raised When the object NAMEattribute is not set, a ValueError is raised
def register( self, obj: typing.Union[IRegistryContent, typing.Type[IRegistryContent]], overwrite_existing=True, ): if self.locked: raise RuntimeError(f"registry {self.name} is locked!") if not ( isinstance(obj, IRegistryContent) if not self.class_based else issubclass(obj, IRegistryContent) ): raise ValueError( f"can only register stuff created from IRegistryContent, not {obj}" ) if obj.NAME == "minecraft:unknown_registry_content": raise ValueError( f"object {obj} has no name set, and as such cannot be registered!" ) if obj.NAME in self.entries and not overwrite_existing: raise RuntimeError( f"could not register object {obj.NAME} ({obj}) into registry {self.name} as an object with this name exists" ) self.entries[obj.NAME] = obj self.full_entries[obj.NAME] = obj # todo: what to do if an object exists HERE? if isinstance(obj.NAME, str): self.full_entries[obj.NAME.split(":")[-1]] = obj if self.injection_function: self.injection_function(self, obj) # Call the event function on the object obj.on_register(self) return obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register(self, obj, name=None):\n if not name:\n name = obj.__name__\n if name in self._registry:\n raise KeyError(\"Name '%s' has been registered in '%s'!\" %\n (name, self._name))\n\n # logging.vlog(1, \"Registering %s (%s) in %s.\", name, obj, self._name)\n self._registry[name] = obj", "def register(self, obj):\r\n name = obj._component_name\r\n if name in self.components:\r\n raise ComponentAlreadyRegistered(\"Component already registered with name %s\" % name)\r\n\r\n self.components[obj._component_name] = obj", "def register(self, obj):\n if not callable(obj):\n raise ValueError(f\"object must be callable\")\n\n obj_name = obj.__name__\n if obj_name in self._obj_dict:\n pass\n # print(f\"{obj_name} is already registered in {self.name}\")\n # raise KeyError(f'{obj_name} is already registered in {self.name}')\n\n self._obj_dict[obj_name] = obj\n return obj", "def register_object(self, obj):\n self.modules.append(obj)", "def register(obj_name, obj):\n if obj_name not in ninja_globals['register']:\n ninja_globals['register'][obj_name] = obj", "def register_class(obj):\r\n try:\r\n KnownClass.objects.get(module_name=obj.__module__, class_name=obj.__class__.__name__)\r\n except DoesNotExist:\r\n # Create it\r\n KnownClass(module_name = obj.__module__, class_name = obj.__class__.__name__).save()", "def add_object(self, obj: str):\n if obj not in self._objects:\n self._objects.append(obj)\n else:\n raise IDAlreadyExists", "def register_object(self, obj, isdelete = False, listonly = False, postupdate=False, **kwargs):\n #print \"REGISTER\", repr(obj), repr(getattr(obj, '_instance_key', None)), str(isdelete), str(listonly)\n \n # things can get really confusing if theres duplicate instances floating around,\n # so make sure everything is OK\n self.uow._validate_obj(obj)\n \n mapper = object_mapper(obj)\n self.mappers.add(mapper)\n task = self.get_task_by_mapper(mapper)\n\n if postupdate:\n mod = task.append_postupdate(obj)\n if mod: self._mark_modified()\n return\n \n # for a cyclical task, things need to be sorted out already,\n # so this object should have already been added to the appropriate sub-task\n # can put an assertion here to make sure....\n if task.circular:\n return\n \n mod = task.append(obj, listonly, isdelete=isdelete, **kwargs)\n if mod: self._mark_modified()", "def register(self, what, obj):\n # print(\"Registering pattern\", name, pattern)\n name = obj.name\n version = obj.version\n enable = obj.enable\n if enable == 'n':\n return\n\n key = Key(name, version)\n self.plugins[what][key] = obj", "def register(self, name, obj):\r\n self.eval_allowed_globals[name] = obj", "def test_object_register():\n dummy = \"Stub\"\n value = \" \"\n\n o_reg = ObjectRegister()\n\n o_reg[dummy] = value\n\n assert dummy in o_reg # is dummy in register?\n\n assert o_reg.Stub == value # is value correct?\n\n o_reg.Stub = 1 # can I change the value?\n assert o_reg.Stub == 1\n\n # does the setter and getter send exceptions on bad key?\n import pytest\n\n with pytest.raises(KeyError):\n o_reg.Exception\n\n register = make_register(o_reg)\n\n @register\n def number(i):\n return i\n\n assert o_reg.number(2) == 2", "def register(self,registerable):\n result = self.registry.register(registerable)\n if result.reg_info.index is None:\n raise RuntimeError(\"failed to register {}\".format(str(registerable)))\n return result", "def register_instance(self, obj):\n self.__instances.append(obj)\n self._proxy_class_methods(obj)", "def addobj(self, obj):\n self._objslock.acquire()\n if obj.objid in self._objs:\n self._objslock.release()\n raise KeyError(\"non-unique EMANE object id %s for %s\" % (obj.objid, obj))\n self._objs[obj.objid] = obj\n self._objslock.release()", "def addObject(self, name, object):\n self.map[name] = object", "def set(self, obj: _T) -> None:\n\n self.registry[self.scopefunc()] = obj", "def Register(self, window, persistenceHandler=None):\n\n if self.Find(window):\n raise Exception(\"Object (class=%s, name=%s) is already registered\"%(window.__class__, window.GetName()))\n\n name = window.GetName()\n self._persistentObjects[name] = PersistentObject(window, persistenceHandler)\n\n return True", "def regen(self):\n self.create(overwrite=True)\n self.load()", "def put(self, name, obj, lifetime=ObjectLifetime.Event):\n\n # check if object with the same name is already stored?\n if name in self.store.keys():\n raise AlreadyInDataStore()\n # no, store it!\n self.store[name] = (lifetime, obj)", "def forceRegister(self, name, value):\n pass", "def register(self, cls, force=False):\n if not issubclass(cls, self.type):\n raise InvalidRegistryItemType(\n \"Invalid item type `{0}` for registry \"\n \"`{1}`\".format(cls, self.__class__)\n )\n\n # If item has not been forced yet, add/replace its' value in the\n # registry.\n if force:\n\n if cls.uid not in self._forced:\n self._registry[cls.uid] = cls\n self._forced.append(cls.uid)\n return True\n else:\n return False\n\n else:\n\n if cls.uid in self._registry:\n return False\n else:\n self._registry[cls.uid] = cls\n return True", "def new(self, obj):\n new_key = \"{}.{}\".format(obj.__class__.__name__, obj.id)\n self.__objects[new_key] = obj", "def putobjname(self,objname_): # 3\n res = self.__obj.putobjname(objname_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def update(self, obj):\n self.identity_map[obj._instance_key] = obj\n self.register_dirty(obj)", "def new(self, obj):\n if obj:\n key = obj.__class__.__name__ + \".\" + obj.id\n self.__objects[key] = obj", "def _register(registry, cls):\n assert issubclass(cls, Registrable)\n\n reg_attr = f\"_{cls.__name__}_registered\"\n if getattr(cls, reg_attr, False):\n return cls\n\n name = cls.__fieldtype__()\n assert (\n name not in registry\n ), f\"{cls!r} cannot be registered as {name!r}: already used by {registry[name]!r}\"\n\n registry[name] = cls\n setattr(cls, reg_attr, True)\n return cls", "def register(self):\n raise NotImplementedError", "def register(self):\n raise NotImplementedError", "def register(self):\n raise NotImplementedError()", "def register(self, cls, force=False):\n if not issubclass(cls, self.type):\n raise InvalidRegistryItemType(\n \"Invalid item type `{0}` for registry \"\n \"`{1}`\".format(cls, self.__class__)\n )\n\n # If item has not been forced yet, add/replace its' value in the\n # registry.\n if force:\n\n if cls.uid not in self._forced:\n self._registry[cls.integrate_with][cls.uid] = cls\n self._forced[cls.integrate_with].append(cls.uid)\n return True\n else:\n return False\n\n else:\n\n if cls.uid in self._registry[cls.integrate_with]:\n return False\n else:\n self._registry[cls.integrate_with][cls.uid] = cls\n return True" ]
[ "0.7712725", "0.71682245", "0.70870656", "0.69742715", "0.6501139", "0.642957", "0.63914746", "0.6340303", "0.6338703", "0.63272023", "0.62428355", "0.6232373", "0.61719275", "0.61291784", "0.6081065", "0.6008799", "0.5974267", "0.59550905", "0.59465", "0.5944888", "0.59387934", "0.5933276", "0.5902548", "0.58970827", "0.58883053", "0.5861337", "0.58538693", "0.58538693", "0.5838343", "0.58333296" ]
0.8434839
0
Converts individual C01 file to TIF using bfconvert.
def cellomics2tiff((file_in,dir_out)): file_out = cutils.getTifPath(file_in,dir_out) # don't repeat conversion if converted file exists # and is newer than the original data if os.path.isfile(file_out) \ and os.stat(file_out).st_mtime > os.stat(file_in).st_mtime: return if platform.system() == 'Linux': #cmd = ['bfconvert','-nogroup',file_in,file_out,'> /dev/null'] #cmd = ['/opt/bftools/bfconvert','-nogroup',file_in,file_out,'] #print " ".join(cmd) #FNULL = open(os.devnull,'w') #subprocess.call(cmd, stdout=FNULL, shell=False) #FNULL.close() cmd = '/opt/bftools/bfconvert -overwrite -nogroup %s %s > /dev/null'%(file_in,file_out) #print cmd os.system(cmd) else: cmd = ['bfconvert','-nogroup',file_in,file_out] print " ".join(cmd) subprocess.call(cmd, shell=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def asc_to_gtif(i_dir):\n\n # Set search for all files with suffix in specified folder\n q = join(i_dir, \"*.asc\")\n # List of all TIF files\n asc_fps = glob.glob(q)\n\n # Loop over all files\n for item in asc_fps:\n # Open ASC file\n data = np.loadtxt(item, delimiter=\";\")\n\n # Determine the size of the output array\n x_size = np.count_nonzero(data[:, 0] == data[0, 0])\n y_size = np.count_nonzero(data[:, 1] == data[0, 1])\n\n # Transform columns to grid\n arr = np.reshape(data[:, 2], (1, x_size, y_size), order=\"F\")\n arr = np.flip(arr, axis=1)\n\n # Determine pixel resolution\n arr_x = np.reshape(data[:, 0], (x_size, y_size), order=\"F\")\n pix_x = arr_x[0, 1] - arr_x[0, 0]\n arr_y = np.reshape(data[:, 1], (x_size, y_size), order=\"F\")\n pix_y = arr_y[1, 0] - arr_y[0, 0]\n\n # Determine top-left coordinates\n left = data[:, 0].min()\n top = data[:, 1].max() + pix_y # Adjust for pixel size\n\n # Set meta data for GeoTIF\n transform = from_origin(left, top, pix_x, pix_y)\n si_crs = {'init': 'EPSG:3794'} # D96/TM\n\n _, name = split(item[:-4])\n save_file = join(i_dir, name + '.tif')\n\n # Save array as with metadata as GeoTIFF\n new_dataset = rasterio.open(save_file, \"w\", driver=\"GTiff\",\n height=arr.shape[1], width=arr.shape[2],\n count=1, dtype=str(arr.dtype),\n crs=si_crs,\n transform=transform, compress=\"lzw\")\n new_dataset.write(arr)\n new_dataset.close()\n\n # Remove ASC file\n # remove(item)\n\n # Output message:\n out_msg = 'Successfully converted ASC files to GeoTIFF!'\n\n return out_msg", "def convert(self,inputDir, outputDir):\n print \"mp_cellomics2tiff:\",\"INPUT:\", inputDir\n print \"mp_cellomics2tiff:\",\"OUTPUT:\", outputDir\n\n # input image files\n c01s = glob.glob(inputDir + \"/*.C01\")\n\n if os.path.isdir(outputDir):\n # check if entire dataset is already converted\n if cutils.isDatasetConverted(inputDir,outputDir):\n logfile = open(os.path.join(outputDir,'cellomics2tiff_error.log'),'w')\n msg = \"Seems that data was converted already, stopping.\"\n print >> logfile, msg\n print \"mp_cellomics2tiff:\",msg\n logfile.close()\n return\n else:\n os.makedirs(outputDir)\n\n metadataDir = os.path.join(outputDir,\"metadata\")\n if not os.path.isdir(metadataDir):\n os.makedirs(metadataDir)\n \n logging.basicConfig(filename=outputDir+'/cellomics2tiff.log', format='%(levelname)s:%(message)s', level=logging.DEBUG)\n logging.basicConfig(level=logging.DEBUG)\n\n # convert the metadata in MS Access files to CSV \n msg = \"Converting metadata to \", metadataDir\n print \"mp_cellomics2tiff:\",msg \n mdbs = glob.glob(inputDir + \"/*.MDB\")\n mdbs.extend(glob.glob(inputDir + \"/*.mdb\"))\n for mdb in mdbs:\n print \"MDB:\",mdb\n mdb_export(mdb, metadataDir)\n\n # Convert the data\n start_time_convert = time.time()\n msg = \"Converting...\"\n print \"mp_cellomics2tiff:\",msg \n logging.info(msg)\n pool = multiprocessing.Pool(None)\n files = glob.glob(inputDir + \"/*.C01\")\n\n # http://stackoverflow.com/questions/8521883/multiprocessing-pool-map-and-function-with-two-arguments\n r = pool.map(cellomics2tiff, zip(files,repeat(outputDir)))\n msg = \"Time elapsed: \" + str(time.time() - start_time_convert) + \"s\"\n print \"mp_cellomics2tiff:\",msg\n logging.info(msg)", "def f2tif(path,is_gray=1): \n# import tifffile\n import tqdm\n print(\"==============================================\")\n print(\"Convert file to tif stack!\")\n pathout = path[:-4]+'_'+str(is_gray)+'.tif' \n video = mp.VideoFileClip(path)\n i=0\n for fr in tqdm.tqdm(video.iter_frames()):\n if is_gray == 1:\n fr= cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY) \n if i == 0:\n tifffile.imwrite(pathout,fr, append=False)\n else:\n tifffile.imwrite(pathout,fr, append=True)\n i += 1\n print(\"==============================================\")\n print(\"TIF convertion Done!\")\n print(\"nFrames=\"+str(i))\n video.reader.close()# To fix handel error problem", "def ascii_to_tiff(infile, outfile, refIm):", "def imgCIF(cif_file):\n\n cbf_handle = pycbf.cbf_handle_struct()\n cbf_handle.read_file(cif_file, pycbf.MSG_DIGEST)\n\n return ScanFactory.imgCIF_H(cif_file, cbf_handle)", "def ConvertAnat(self):\n if self.verbose:\n print 'Convert T1 and T2 images...'\n for entry in self.info:\n info = self.info[entry]\n if self.info[entry]['imgfile'] is None:\n continue\n if self.info[entry]['type'] in self.anat_types:\n key = self.info[entry]['type']\n imgfile = self.info[entry]['imgfile']\n cmd = 'convert_file %s %s %s %s' % (self.flip_opts, entry, \\\n imgfile, self.info[entry]['filetype'])\n checkfile = '%s%s' % (imgfile, self.info[entry]['suffix'])\n self.CheckExec(cmd, [checkfile])\n if self.info[entry]['norm_src'] and self.skull_strip:\n cmd = \"3dSkullStrip -input %s -prefix %s\" % \\\n (checkfile, self.info[entry]['imgfile_skstrip'])\n checkfile = '%s+orig.BRIK' % \\\n (self.info[entry]['imgfile_skstrip'])\n self.CheckExec(cmd, [checkfile])", "def convert(model_file, thermo_file=None, transport_file=None, path=''):\n # check whether Chemkin or Cantera model\n basename = os.path.splitext(os.path.basename(model_file))[0]\n extension = os.path.splitext(os.path.basename(model_file))[1]\n\n # Chemkin files can have multiple extensions, so easier to check if Cantera\n if extension == '.cti':\n # Convert from Cantera to Chemkin format.\n logging.info('Converter detected Cantera input model: ' + model_file)\n logging.info('Converting to Chemkin format.')\n\n solution = ct.Solution(model_file)\n converted_files = soln2ck.write(solution, basename + '.inp', path=path)\n return converted_files\n else:\n # Convert from Chemkin to Cantera format.\n logging.info('Converter detected Chemkin input model: ' + model_file)\n logging.info('Converting to Cantera format.')\n\n converted_file = os.path.join(path, basename + '.cti')\n\n # calls ck2cti based on given files\n args = [f'--input={model_file}']\n if thermo_file:\n args.append(f'--thermo={thermo_file}')\n if transport_file:\n args.append(f'--transport={transport_file}')\n args.append(f'--output={converted_file}')\n \n # generally Chemkin files have issues (redundant species, etc.) that require this argument\n args.append('--permissive')\n\n ck2cti.main(args)\n return converted_file", "def convert_filetype(infile, outfile, intype='xyz', outtype='mol'):\n try:\n conv = openbabel.OBConversion()\n conv.OpenInAndOutFiles(infile, outfile)\n conv.SetInAndOutFormats(intype, outtype)\n conv.Convert()\n conv.CloseOutFile()\n except Exception as e:\n print \"Error {}.\".format(e)", "def _bif2bayesian(pathname, verbose=3):\n if verbose>=3: print('[bnlearn] >Loading bif file <%s>' %(pathname))\n\n bifmodel=readwrite.BIF.BIFReader(path=pathname)\n\n try:\n model = BayesianModel(bifmodel.variable_edges)\n model.name = bifmodel.network_name\n model.add_nodes_from(bifmodel.variable_names)\n\n tabular_cpds = []\n for var in sorted(bifmodel.variable_cpds.keys()):\n values = bifmodel.variable_cpds[var]\n cpd = TabularCPD(var, len(bifmodel.variable_states[var]), values,\n evidence=bifmodel.variable_parents[var],\n evidence_card=[len(bifmodel.variable_states[evidence_var])\n for evidence_var in bifmodel.variable_parents[var]])\n tabular_cpds.append(cpd)\n\n model.add_cpds(*tabular_cpds)\n# for node, properties in bifmodel.variable_properties.items():\n# for prop in properties:\n# prop_name, prop_value = map(lambda t: t.strip(), prop.split('='))\n# model.node[node][prop_name] = prop_value\n\n return model\n\n except AttributeError:\n raise AttributeError('[bnlearn] >First get states of variables, edges, parents and network names')", "def _bif2bayesian(pathname, verbose=3):\n if verbose>=3: print('[bnlearn] >Loading bif file <%s>' %(pathname))\n\n bifmodel=readwrite.BIF.BIFReader(path=pathname)\n\n try:\n model = BayesianModel(bifmodel.variable_edges)\n model.name = bifmodel.network_name\n model.add_nodes_from(bifmodel.variable_names)\n\n tabular_cpds = []\n for var in sorted(bifmodel.variable_cpds.keys()):\n values = bifmodel.variable_cpds[var]\n cpd = TabularCPD(var, len(bifmodel.variable_states[var]), values,\n evidence=bifmodel.variable_parents[var],\n evidence_card=[len(bifmodel.variable_states[evidence_var])\n for evidence_var in bifmodel.variable_parents[var]])\n tabular_cpds.append(cpd)\n\n model.add_cpds(*tabular_cpds)\n# for node, properties in bifmodel.variable_properties.items():\n# for prop in properties:\n# prop_name, prop_value = map(lambda t: t.strip(), prop.split('='))\n# model.node[node][prop_name] = prop_value\n\n return model\n\n except AttributeError:\n raise AttributeError('[bnlearn] >First get states of variables, edges, parents and network names')", "def mif_to_fib():\n parser = ArgumentParser(\n description='qsiprep: Convert MRtrix mif file to DSI Studio fib file',\n formatter_class=RawTextHelpFormatter)\n\n parser.add_argument('--mif',\n type=os.path.abspath,\n required=True,\n action='store',\n default='',\n help='MRtrix mif file to convert')\n parser.add_argument('--fib',\n required=True,\n action='store',\n type=os.path.abspath,\n default='',\n help='the output path for the DSI Studio fib file')\n parser.add_argument('--mask',\n required=False,\n action='store',\n type=os.path.abspath,\n help='a NIfTI-1 format mask file.')\n parser.add_argument('--num_fibers',\n required=False,\n action='store',\n type=int,\n default=5,\n help='maximum number of fixels per voxel.')\n parser.add_argument('--unit-odf',\n required=False,\n action='store_true',\n help='force ODFs to sum to 1.')\n opts = parser.parse_args()\n if opts.mask is not None:\n converter = FODtoFIBGZ(mif_file=opts.mif,\n fib_file=opts.fib,\n num_fibers=opts.num_fibers,\n unit_odf=opts.unit_odf,\n mask_file=opts.mask)\n else:\n converter = FODtoFIBGZ(mif_file=opts.mif,\n fib_file=opts.fib,\n num_fibers=opts.num_fibers,\n unit_odf=opts.unit_odf)\n converter.run()", "def test_convert_Ti_to_FLX(self):\r\n sff_flx_fp = os.path.join(self.sff_dir, 'test_FLX.sff')\r\n sff_flx_gz_fp = os.path.join(self.gz_sff_dir, 'test_FLX_gz.sff')\r\n convert_Ti_to_FLX(self.sff_fp, sff_flx_fp)\r\n convert_Ti_to_FLX(self.sff_gz_fp, sff_flx_gz_fp)\r\n self.assertNotEqual(os.path.getsize(sff_flx_fp), 0)\r\n self.assertNotEqual(os.path.getsize(sff_flx_gz_fp), 0)", "def extract_b0(in_file, b0_ixs, out_path=None):\n if out_path is None:\n out_path = fname_presuffix(in_file, suffix=\"_b0\")\n\n img = nb.load(in_file)\n bzeros = np.squeeze(np.asanyarray(img.dataobj)[..., b0_ixs])\n\n hdr = img.header.copy()\n hdr.set_data_shape(bzeros.shape)\n hdr.set_xyzt_units(\"mm\")\n nb.Nifti1Image(bzeros, img.affine, hdr).to_filename(out_path)\n return out_path", "def test_no_conversion(pdf, config, hs, exported):\n get_info = partial(\n _get_bti_info,\n rotation_x=0.0,\n translation=(0.0, 0.02, 0.11),\n convert=False,\n ecg_ch=\"E31\",\n eog_ch=(\"E63\", \"E64\"),\n rename_channels=False,\n sort_by_ch_name=False,\n )\n\n raw_info, _ = get_info(pdf, config, hs, convert=False)\n raw_info_con = read_raw_bti(\n pdf_fname=pdf,\n config_fname=config,\n head_shape_fname=hs,\n convert=True,\n preload=False,\n ).info\n\n pick_info(\n raw_info_con, pick_types(raw_info_con, meg=True, ref_meg=True), copy=False\n )\n pick_info(raw_info, pick_types(raw_info, meg=True, ref_meg=True), copy=False)\n bti_info = _read_bti_header(pdf, config)\n dev_ctf_t = _correct_trans(bti_info[\"bti_transform\"][0])\n assert_array_equal(dev_ctf_t, raw_info[\"dev_ctf_t\"][\"trans\"])\n assert_array_equal(raw_info[\"dev_head_t\"][\"trans\"], np.eye(4))\n assert_array_equal(raw_info[\"ctf_head_t\"][\"trans\"], np.eye(4))\n\n nasion, lpa, rpa, hpi, dig_points = _read_head_shape(hs)\n dig, t, _ = _make_bti_dig_points(\n nasion, lpa, rpa, hpi, dig_points, convert=False, use_hpi=False\n )\n\n assert_array_equal(t[\"trans\"], np.eye(4))\n\n for ii, (old, new, con) in enumerate(\n zip(dig, raw_info[\"dig\"], raw_info_con[\"dig\"])\n ):\n assert_equal(old[\"ident\"], new[\"ident\"])\n assert_array_equal(old[\"r\"], new[\"r\"])\n assert not np.allclose(old[\"r\"], con[\"r\"])\n\n if ii > 10:\n break\n\n ch_map = {ch[\"chan_label\"]: ch[\"loc\"] for ch in bti_info[\"chs\"]}\n\n for ii, ch_label in enumerate(raw_info[\"ch_names\"]):\n if not ch_label.startswith(\"A\"):\n continue\n t1 = ch_map[ch_label] # correction already performed in bti_info\n t2 = raw_info[\"chs\"][ii][\"loc\"]\n t3 = raw_info_con[\"chs\"][ii][\"loc\"]\n assert_allclose(t1, t2, atol=1e-15)\n assert not np.allclose(t1, t3)\n idx_a = raw_info_con[\"ch_names\"].index(\"MEG 001\")\n idx_b = raw_info[\"ch_names\"].index(\"A22\")\n assert_equal(raw_info_con[\"chs\"][idx_a][\"coord_frame\"], FIFF.FIFFV_COORD_DEVICE)\n assert_equal(\n raw_info[\"chs\"][idx_b][\"coord_frame\"], FIFF.FIFFV_MNE_COORD_4D_HEAD\n )", "def convert_to_1best_format(infname,outfname):\n with codecs.open(outfname,'w','utf-8') as outfile:\n for sent_no, parsed_lines in iterate_nbest_list(infname): \n outfile.write(parsed_lines[0][1].strip()+u'\\n')", "def save_tiff(self, to_file=None):\n self.tif_file.clear() # Empty the array first\n\n # Header\n byteo = 'II'\n if self.byteOrder != 'little':\n byteo = 'MM'\n self.tif_file.insert_bytes(list(byteo.encode())) # byte order\n self.tif_file.insert_int(42, 2) # Magic number\n self.tif_file.insert_int(8, 4) # first IFD always at 0x08\n\n for ifd in self.ifds:\n # self.calculateIFDSpace(ifd) # Readjusts counts because of changes to image data\n endpos = self.save_ifd(ifd)\n self.save_image(ifd, endpos)\n\n self.tif_file.write(to_file) # lastly, write to file", "def to_cif(self, container):\n raise NotImplementedError(\"BaseRecord does not implement to_cif.\")", "def transform_output_anaylsis(self):\n\n for ix, curbbattinst in enumerate(self.inputlist):\n # set up files and file names\n source_file_of_interest: str = curbbattinst.source_file\n source_file_of_interest_basename: str = os.path.splitext(source_file_of_interest)[0]\n\n # look at Status Files\n pardir = os.path.dirname(source_file_of_interest)\n statusfiles = [f for f in os.listdir(pardir) if os.path.isfile(os.path.join(pardir, f)) and f.endswith(\"STATUS\")]\n\n if len(statusfiles) > 1:\n raise Exception(\"Multiple Status Files during BBMCTS-Batch\")\n\n if len(statusfiles) == 1:\n statusfile = statusfiles[0].split(\".\")[-1]\n status = int(statusfile.split('_')[0])\n self.mcts_statesequences[ix].status = status\n check_for_errors_index = min(status + 1, self.path_length-1) # the next transformer, but if all were possible, the last one\n else:\n self.mcts_statesequences[ix].status = -1\n check_for_errors_index = 0\n\n # log if we had unexpected errors, if no state, 1st transformer caused problems, otherwise use status ...\n error_file_transf = os.path.join(source_file_of_interest_basename + \".stderr\")\n self.__check_error_file_transformations(err_file=error_file_transf, attinstance=curbbattinst,\n iteration=ix, check_index=check_for_errors_index)\n\n # trim sequence to only valid transformers\n for i in range(len(self.mcts_statesequences[ix].states)-1, self.mcts_statesequences[ix].status, -1):\n del self.mcts_statesequences[ix].states[i]\n assert len(self.mcts_statesequences[ix].states) == self.mcts_statesequences[ix].status+1", "def load_tiff(self):\n\n try:\n # Byte order\n h = bytes(self.tif_file.read(2))\n self.byteOrder = {b'II': 'little', b'MM': 'big'}[h]\n assert (self.byteOrder == 'little' or self.byteOrder == 'big')\n self.tif_file.set_byte_order(self.byteOrder)\n\n # Magic number\n self.magic = self.tif_file.read_int(2)\n assert (self.magic == 42)\n except (KeyError, AssertionError):\n raise InvalidTiffError(self.tif_file._filename, \"Incorrect header\")\n\n # IFD offset\n nextifd_offset = self.tif_file.read_int(4) # returns offset to first IFD\n\n # read in each IFD and image data\n while nextifd_offset != 0:\n ifd = self.read_ifd(nextifd_offset)\n self.ifds.append(ifd)\n self.read_image(ifd)\n nextifd_offset = ifd.nextifd", "def test_consitency_convert(self):\n name = os.path.basename(self.cbf_filename)\n obj = fabio.open(self.cbf_filename)\n new = obj.convert(\"cbf\")\n new.write(os.path.join(self.tempdir, name))\n other = fabio.open(os.path.join(self.tempdir, name))\n self.assertEqual(abs(obj.data - other.data).max(), 0, \"data are the same\")\n for key in obj.header:\n if key in[ \"filename\", \"X-Binary-Size-Padding\"]:\n continue\n self.assertTrue(key in other.header, \"Key %s is in header\" % key)\n self.assertEqual(obj.header[key], other.header[key], \"value are the same for key %s [%s|%s]\" % (key, obj.header[key], other.header[key]))", "def fib_to_mif():\n parser = ArgumentParser(\n description='qsiprep: Convert DSI Studio fib file to MRtrix mif file.',\n formatter_class=RawTextHelpFormatter)\n\n parser.add_argument('--fib',\n required=True,\n action='store',\n type=os.path.abspath,\n default='',\n help='DSI Studio fib file to convert')\n parser.add_argument('--mif',\n type=os.path.abspath,\n required=False,\n action='store',\n default='',\n help='output path for a MRtrix mif file')\n parser.add_argument('--ref_image',\n required=True,\n action='store',\n type=os.path.abspath,\n help='a NIfTI-1 format file with a valid q/sform.')\n parser.add_argument('--subtract-iso',\n required=False,\n action='store_true',\n help='subtract ODF min so visualization looks similar in mrview')\n opts = parser.parse_args()\n converter = FIBGZtoFOD(mif_file=opts.mif,\n fib_file=opts.fib,\n ref_image=opts.ref_image,\n subtract_iso=opts.subtract_iso)\n converter.run()", "def niss_to_cif(job_name):\n # We just need the job name so don't overcomplicate with configparser\n if job_name[-5:] == \".niss\":\n job_name = job_name[:-5]\n\n # put the niss part back on, even if we took it off already\n niss_name = \"%s.niss\" % job_name\n\n if path.exists(niss_name):\n print(\"Found niss file: %s\" % niss_name)\n load_niss = open(niss_name, 'rb')\n my_simulation = pickle.load(load_niss)\n load_niss.close()\n else:\n print(\"ERROR! %s not found\" % niss_name)\n return\n\n # the method from fapswitch accepts separate components\n cryst = my_simulation.structure\n cif_lines = cryst.to_cif()\n\n # Can't change the name for now; overwrites existing\n cif_file_name = \"%s.out.cif\" % job_name\n\n # Write the cif to the file\n output_file = open(cif_file_name, 'w')\n output_file.writelines(cif_lines)\n output_file.close()\n print(\"Wrote %s\" % cif_file_name)", "def convert_labelme_to_coco(path_to_data):\r\n # convert labelme annotations to coco\r\n labelme2coco.convert(path_to_data, path_to_data + r'\\coco_annotation.json')\r\n\r\n # Open the coco format data\r\n with open(path_to_data + r'\\coco_annotation.json') as f:\r\n coco_d = json.load(f)\r\n\r\n # Get the category IDs for each category and create a new \"categories\" section.\r\n categories = []\r\n # for category in coco_d['categories']:\r\n # if category['name'] == 'Bad':\r\n # categories.append({\"id\": category['id'],\r\n # \"name\": category['id'],\r\n # \"supercategory\": category['id'],\r\n # \"isthing\": 1,\r\n # \"color\": [222, 23, 1]\r\n # })\r\n # elif category['name'] == 'Good':\r\n # categories.append({\"id\": category['id'],\r\n # \"name\": \"Good\",\r\n # \"supercategory\": \"Good\",\r\n # \"isthing\": 1,\r\n # \"color\": [133, 23, 1]\r\n # })\r\n\r\n # Update the \"catogories\" section of the coco format data with the correct category IDs.\r\n # coco_d['categories'] = categories\r\n\r\n categories = []\r\n for cat in coco_d['categories']:\r\n cat['isthing'] = 1\r\n categories.append(cat['name'])\r\n\r\n # Fix the segmentation and bbox.\r\n for annot in coco_d['annotations']:\r\n annot['bbox_mode'] = 0\r\n seg = annot['segmentation'][0]\r\n annot['bbox'] = seg\r\n annot['segmentation'] = [[seg[0], seg[1], seg[0], seg[3], seg[2], seg[3], seg[2], seg[1]]]\r\n\r\n # Save the modified coco format data.\r\n with open(path_to_data + r'\\coco_annotation.json', 'w') as j:\r\n json.dump(coco_d, j, sort_keys=True, indent=4)\r\n\r\n # Show the images to the user to validate the annotations.\r\n # Register the image information.\r\n register_coco_instances(\"coco_visualise\", {}, path_to_data + r\"/coco_annotation.json\",\r\n path_to_data)\r\n MetadataCatalog.get(\"meta_visualise\").set(thing_classes=categories)\r\n # MetadataCatalog.get(\"meta_train\").set(thing_classes=[\"Bad\", \"Good\"], thing_colors=[(172, 0, 0), (229, 0, 0)])\r\n train_metadata = MetadataCatalog.get(\"meta_visualise\")\r\n coco_train_dataset = DatasetCatalog.get(\"coco_visualise\")\r\n\r\n st.write('Showing the randomly picked 5 images. Check if the annotation is correctly embedded.')\r\n # Randomly pick 5 images to show to the user to validate the annotations.\r\n for d in random.sample(coco_train_dataset, 5):\r\n im = Image.open(d['file_name'])\r\n im_array = np.asarray(im)\r\n v = Visualizer(im_array, metadata=train_metadata, instance_mode=ColorMode.SEGMENTATION, scale=0.5)\r\n v = v.draw_dataset_dict(d)\r\n pil_image = Image.fromarray(v.get_image())\r\n st.image(pil_image)\r\n # window = tk.Toplevel()\r\n # window.tkimage = ImageTk.PhotoImage(pil_image)\r\n # window.attributes('-topmost', True)\r\n # label = tk.Label(window, image=window.tkimage)\r\n # label.pack()\r\n # button_close = tk.Button(window, text=\"Close\", command=window.destroy)\r\n # button_close.pack(fill='x')\r\n\r\n # Confirm the annotations with user. If the annotations are correct, it will proceed further.\r\n # If not, it terminates the program.\r\n # if messagebox.askyesno(title=\"Validate Annotations\", message=\"Were all annotations correct?\"):\r\n # pass\r\n DatasetCatalog.clear()\r\n MetadataCatalog.clear()", "def raw_to_tif(file, channel=None ):\n \n def read_uint12(data_chunk):\n data = np.frombuffer(data_chunk, dtype=np.uint8)\n fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T\n # fst_uint12 = (fst_uint8 << 4) + (mid_uint8 >> 4)\n # snd_uint12 = (lst_uint8 << 4) + (np.bitwise_and(15, mid_uint8))\n fst_uint12 = (fst_uint8 << 4) + (np.bitwise_and(15, mid_uint8))\n snd_uint12 = (lst_uint8 << 4) + (mid_uint8 >> 4)\n return np.reshape(np.concatenate((fst_uint12[:, None], snd_uint12[:, None]), axis=1), 2 * fst_uint12.shape[0])\n\n# def read_uint12(data_chunk):\n# data = np.frombuffer(data_chunk, dtype=np.uint8)\n# fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T\n# fst_uint12 = (fst_uint8 << 4) + (mid_uint8 >> 4)\n# snd_uint12 = ((mid_uint8 % 16) << 8) + lst_uint8\n# return np.reshape(np.concatenate((fst_uint12[:, None], snd_uint12[:, None]), axis=1), 2 * fst_uint12.shape[0])\n\n# def read_uint12(data_chunk):\n# data = np.frombuffer(data_chunk, dtype=np.uint8)\n# fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T\n# fst_uint12 = ((mid_uint8 & 0x0F) << 8) | fst_uint8\n# snd_uint12 = (lst_uint8 << 4) | ((mid_uint8 & 0xF0) >> 4)\n# return np.reshape(np.concatenate((fst_uint12[:, None], snd_uint12[:, None]), axis=1), 2 * fst_uint12.shape[0])\n \n # infile = 'd:\\\\Projekti\\\\Satelit\\\\CO\\\\Razpis\\\\Flat field images_new2020\\\\flatfield\\\\NHDBflat_1D'\n # infile = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Jure_naloga_banje_raw_pyt\\\\NHDRGoreMorje_3D'\n\n # in_path = 'p:\\\\NEMO\\Posnetki\\\\20201014_GoreMorje_data\\cele\\\\'\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Peking_PAN\\\\'\n # in_image_files = [filename for filename in os.listdir(in_path) if filename.lower().startswith(\"nhd\") and filename.lower().endswith(\"d\")]\n\n \n # infile = in_path + in_image_files[i]\n with open(file, 'rb', buffering=10) as f: # problem pri branju podatkov?\n byte = f.read()\n print(file)\n # # ar = open(infile, 'rb')\n # buffer = BytesIO()\n # byte = BytesIO(ar)\n \n img = read_uint12(byte)\n print(img)\n \n if channel==\"P\":\n img = img.reshape((2748, 3664)) # PAN\n else:\n img = img.reshape((2050, 2448)) # MS\n # img = img.reshape((2748, 3664)) # PAN\n\n size = img.shape\n \n \n out = file[:-4]+ \"_py.tif\"\n\n driver = gdal.GetDriverByName('GTiff')\n\n outRaster = driver.Create(out, size[1], size[0], 1, gdal.GDT_UInt16)\n\n outband = outRaster.GetRasterBand(1)\n outband.WriteArray(img)\n outband.FlushCache()", "def zip_imagenet100c():\n #First make sure the directory we are given is correct!\n if not os.path.isdir(DATA_SRC_ROOT):\n raise Exception(\"Bad filepath given\")\n\n #create the destiantion directories if they don't exist\n if not os.path.isdir(IMAGENET100_DIR):\n os.mkdir(IMAGENET100_DIR)\n\n #grab the subset wnids for the 100 class-subset\n with open(IMAGENET100_CLASSES) as f:\n subset_wnids = f.readlines()\n subset_wnids = [x.strip() for x in subset_wnids] #list of the 100 WNIDs we grab\n\n #Grab the names of all of the folders inside the root data source\n #Structure is distortion/sub_distortion/level/wnids\n for distortion in os.listdir(DATA_SRC_ROOT):\n if distortion != \"meta.bin\":\n print(distortion)\n\n folder_path = os.path.join(DATA_SRC_ROOT, distortion)\n\n if not os.path.isdir(folder_path):\n continue\n\n for sub_distortion in os.listdir(folder_path):\n print(sub_distortion)\n\n subfolder_path = os.path.join(folder_path, sub_distortion)\n\n if not os.path.isdir(subfolder_path):\n continue\n\n for level in os.listdir(subfolder_path):\n print(level)\n\n level_path = os.path.join(subfolder_path, level)\n\n #grab the correcrt validation d9recotires\n for wnid in os.listdir(level_path):\n wnid_path = os.path.join(level_path, wnid)\n\n if not os.path.isdir(wnid_path):\n continue\n\n if wnid in subset_wnids:\n dest_path = os.path.join(IMAGENET100_DIR, distortion, sub_distortion, level, wnid)\n\n shutil.copytree(wnid_path, dest_path)\n\n #copy the metadata bin file\n meta_file = os.path.join(DATA_SRC_ROOT, 'meta.bin')\n meta_dest = os.path.join(IMAGENET100_DIR, 'meta.bin')\n\n shutil.copy(meta_file, meta_dest)\n\n #Zip the destinatio file\n shutil.make_archive(ZIP_PATH + '/ImageNet100C', 'tar', IMAGENET100_DIR)", "def test_single_file_cup_string(self):\r\n # convert_biom using otu_table w/o leading #\r\n bt_string = (\r\n '{\"rows\": [{\"id\": \"1\", \"metadata\": null}, {\"id\": \"2\",'\r\n '\"metadata\": null}, {\"id\": \"3\", \"metadata\": null}, {\"id\": \"4\", '\r\n '\"metadata\": null}, {\"id\": \"5\", \"metadata\": null}], \"format\": '\r\n '\"Biological Observation Matrix 0.9.1-dev\", \"data\": [[0, 0, 3.0], '\r\n '[0, 1, 4.0], [1, 0, 2.0], [1, 1, 5.0], [2, 0, 1.0], [2, 1, 2.0], '\r\n '[3, 1, 4.0], [4, 0, 1.0]], \"columns\": [{\"id\": \"S1\", \"metadata\": '\r\n 'null}, {\"id\": \"S2\", \"metadata\": null}], \"generated_by\": '\r\n '\"BIOM-Format 0.9.1-dev\", \"matrix_type\": \"sparse\", \"shape\": '\r\n '[5, 2], \"format_url\": \"http://biom-format.org\", \"date\": '\r\n '\"2012-05-04T09:28:28.247809\", \"type\": \"OTU table\", \"id\": null, '\r\n '\"matrix_element_type\": \"float\"}')\r\n\r\n with open(self.tmp_file, 'w') as fh:\r\n fh.write(bt_string)\r\n\r\n single_file_cup(self.tmp_file, 'lladser_pe,lladser_ci',\r\n self.tmp_outfile, r=4)\r\n\r\n # Not much testing here, just make sure we get back a (formatted)\r\n # matrix with the right dimensions\r\n with open(self.tmp_outfile, 'U') as out_f:\r\n observed = out_f.readlines()\r\n self.assertEqual(len(observed), 3)\r\n self.assertEqual(len(observed[1].split('\\t')), 4)", "def test_convert_azfp_01a_matlab_raw(azfp_path):\n azfp_01a_path = azfp_path / '17082117.01A'\n azfp_xml_path = azfp_path / '17041823.XML'\n azfp_matlab_data_path = azfp_path / 'from_matlab/17082117_matlab_Data.mat'\n azfp_matlab_output_path = azfp_path / 'from_matlab/17082117_matlab_Output_Sv.mat'\n\n # Convert file\n echodata = open_raw(\n raw_file=azfp_01a_path, sonar_model='AZFP', xml_path=azfp_xml_path\n )\n\n # Read in the dataset that will be used to confirm working conversions. (Generated by Matlab)\n ds_matlab = loadmat(azfp_matlab_data_path)\n ds_matlab_output = loadmat(azfp_matlab_output_path)\n\n # Test beam group\n # frequency\n assert np.array_equal(\n ds_matlab['Data']['Freq'][0][0].squeeze(),\n echodata[\"Sonar/Beam_group1\"].frequency_nominal / 1000,\n ) # matlab file in kHz\n # backscatter count\n assert np.array_equal(\n np.array(\n [ds_matlab_output['Output'][0]['N'][fidx] for fidx in range(4)]\n ),\n echodata[\"Sonar/Beam_group1\"].backscatter_r.values,\n )\n\n # Test vendor group\n # Test temperature\n assert np.array_equal(\n np.array([d[4] for d in ds_matlab['Data']['Ancillary'][0]]).squeeze(),\n echodata[\"Vendor_specific\"].ancillary.isel(ancillary_len=4).values,\n )\n assert np.array_equal(\n np.array([d[0] for d in ds_matlab['Data']['BatteryTx'][0]]).squeeze(),\n echodata[\"Vendor_specific\"].battery_tx,\n )\n assert np.array_equal(\n np.array(\n [d[0] for d in ds_matlab['Data']['BatteryMain'][0]]\n ).squeeze(),\n echodata[\"Vendor_specific\"].battery_main,\n )\n # tilt x-y\n assert np.array_equal(\n np.array([d[0] for d in ds_matlab['Data']['Ancillary'][0]]).squeeze(),\n echodata[\"Vendor_specific\"].tilt_x_count,\n )\n assert np.array_equal(\n np.array([d[1] for d in ds_matlab['Data']['Ancillary'][0]]).squeeze(),\n echodata[\"Vendor_specific\"].tilt_y_count,\n )\n\n # check convention-required variables in the Platform group\n check_platform_required_scalar_vars(echodata)", "def proc_one(filename):\n (rate, sig) = wav.read(filename)\n assert rate == samp_rate\n # since templates have max value of 32768, normalise it\n if sig.max() > 1:\n sig = sig / 32768\n # Normalise so that max-value is 1\n sig = sig / max(sig)\n\n # calculate MFCC\n feat = mfcc(sig, samplerate=samp_rate, winlen=win_length / 1000, winstep=hop / 1000, preemph=0.95, numcep=14,\n winfunc=np.hamming)\n # print(sig.shape, feat.shape)\n return feat", "def load_cifar10_batch(directory):\n with open(directory, 'rb') as fo:\n datadict = pickle.load(fo, encoding='bytes')\n X = np.array(datadict[b'data'])\n Y = np.array(datadict[b'labels'])\n return X, Y", "def convert_to_nbest_format(infname,outfname):\n with codecs.open(infname,'r','utf-8') as infile: \n with codecs.open(outfname,'w','utf-8') as outfile: \n for n,line in enumerate(iter(infile)):\n outfile.write( u'{} ||| {} ||| {} ||| {}\\n'.format( n, line.strip(), \n u'Distortion0= 0 LM0= 0 WordPenalty0= 0 PhrasePenalty0= 3 TranslationModel0= 0 0 0 0', u'0' ) )" ]
[ "0.58994037", "0.5708016", "0.5645735", "0.5637907", "0.5601799", "0.5523944", "0.54352075", "0.5245638", "0.52050906", "0.52050906", "0.5131295", "0.51254326", "0.509316", "0.50552475", "0.5052939", "0.5020321", "0.49904832", "0.4942433", "0.49388912", "0.49288186", "0.4922492", "0.48994583", "0.4894313", "0.4885783", "0.48777083", "0.48599583", "0.4858221", "0.48581308", "0.48480728", "0.48467612" ]
0.6668941
0
Constructs a shortened representation of a PdObject.
def short_repr(obj: PdObject, length_guide: int = 8) -> str: if isinstance(obj, list): lower_length_guide = (length_guide + 1) // 2 if len(obj) > 2 * length_guide: return "[{}, ({} more), {}]".format( ", ".join(short_repr(e, lower_length_guide) for e in obj[:length_guide]), len(obj) - 2 * length_guide, ", ".join(short_repr(e, lower_length_guide) for e in obj[-length_guide:])) else: return "[{}]".format(", ".join(short_repr(e, lower_length_guide) for e in obj)) return repr(obj)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pformat(object):\r\n return PrettyPrinter().pformat(object)", "def stringReco(obj):\n name = obj.get_name()\n name = obj._pid if (name is None) else name\n return (\"pdg: \" + name + \" E: \" + str(obj._E)\n + \" px: \" + str(obj._px) + \" py: \" + str(obj._py)\n + \" pz: \"+ str(obj._pz) + \" mass: \" + str(obj._m))", "def stringify_short(self):\n return self.stringify()", "def ObjectDescription(object_id):\n rhobj = rhutil.coercerhinoobject(object_id, True, True)\n return rhobj.ShortDescription(False)", "def _compact_class_repr(obj):\n dict_str_list = []\n post_repr_string = \"\"\n\n # If features are present, then shorten it.\n init_func = obj.__init__\n if _sys.version_info.major == 2:\n init_func = init_func.__func__\n\n fields = _inspect.getargspec(init_func).args\n fields = fields[1:] # remove self\n if 'features' in fields:\n fields.remove('features')\n features = obj.get(\"features\")\n if features is not None:\n post_repr_string = ' on %s feature(s)' % len(features)\n if 'excluded_features' in fields:\n fields.remove('excluded_features')\n\n # GLC transformers.\n if issubclass(obj.__class__, _Transformer):\n for attr in fields:\n dict_str_list.append(\"%s=%s\" % (attr, obj.get(attr).__repr__()))\n\n # Chains\n elif obj.__class__ == TransformerChain:\n _step_classes = list(map(lambda x: x.__class__.__name__, obj.get('steps')))\n _steps = _internal_utils.pretty_print_list(\n _step_classes, 'steps', False)\n dict_str_list.append(_steps)\n\n # For user defined transformers.\n else:\n for attr in fields:\n dict_str_list.append(\"%s=%s\" % (attr, obj.__dict__[attr]))\n\n return \"%s(%s)%s\" % (obj.__class__.__name__, \", \".join(dict_str_list),\n post_repr_string)", "def serialize_str(self, obj):\n if len(obj) < 0x100:\n return 'U' + struct.pack('<B', len(obj)) + obj\n return 'T' + struct.pack('<I', len(obj)) + obj", "def _convert_construction_info_to_string(obj):\n if not hasattr(obj, '_constructor_args'):\n raise AttributeError('obj has no attribute _constructor_args.')\n import StringIO\n output = StringIO.StringIO()\n info = {}\n info['_module_name'] = obj.__class__.__module__\n info['_class_name'] = obj.__class__.__name__\n encoded_constructor_args = {}\n for k, v in obj._constructor_args.items():\n if isinstance(v, chainer.Link):\n encoded_constructor_args[k] \\\n = _convert_construction_info_to_string(v)\n elif isinstance(v, six.string_types):\n encoded_constructor_args[k] = 'STR' + v\n else:\n encoded_constructor_args[k] = v\n info['_encoded_constructor_args'] = encoded_constructor_args\n numpy.save(output, arr=info)\n encoded_construction_info = 'OBJ' + output.getvalue()\n output.close()\n return encoded_construction_info", "def __repr__(self) -> str:\r\n\r\n return 'HealthDominoDataObject({}, {}, {})'.format(repr(self.data),\r\n self.version,\r\n self.compatibilityLimit)", "def serialize(self, obj):\n return dill.dumps(obj, 0).decode('latin-1')", "def format(self, obj, indent=0):\r\n return pformat(obj, indent=indent, depth=self.depth)", "def srepr(obj):\n return repr(str(obj))", "def __repr__(self):\n return \"<PID_onject P: %s I: %s D: %s>\"\\\n % (self.K[0], self.K[1], self.K[2])", "def __repr__(self):\n cls = self.__class__.__name__\n return '%s(%s)' % (cls, repr(self.d))", "def toString(self, obj):\n return Box(shareID=obj.shareID.encode('utf-8'),\n localpart=obj.localpart.encode('utf-8'),\n domain=obj.domain.encode('utf-8')).serialize()", "def __str__(self):\n s = \"Ext Object (Type: 0x%02x, Data: \" % self.type\n s += \" \".join([\"0x%02x\" % ord(self.data[i:i + 1])\n for i in xrange(min(len(self.data), 8))])\n if len(self.data) > 8:\n s += \" ...\"\n s += \")\"\n return s", "def to_short_string(self):\n return f'{self.name} - {self.resource_type}'", "def obj_pretty(objective):\n if objective.objective_type == u'/datum/objective/assassinate':\n return 'Asassinate {} the {}.'.format(objective.target_name, objective.target_role)\n else:\n return objective.objective_desc", "def __str__(self):\n return '[{}] ({}) {}/{} - {}'.\\\n format(type(self).__name__, self.id, self.x, self.y, self.size)", "def get_str(self, obj):\n if self.pretty:\n return pprint.pformat(obj)\n else:\n return str(obj)", "def pack(self, obj):\n # TODO: use a JSON encoder that handles more types?\n if obj is not None:\n return json.dumps(obj)", "def pformat(obj, incr=\" \"):\n def sub_pformat(obj):\n txt = pformat(obj, incr=incr)\n return indent(txt, incr)\n # Try short version.\n short_len = 60\n maybe_short = pp.pformat(obj)\n if \"\\n\" not in maybe_short and len(maybe_short) <= short_len:\n return maybe_short\n\n if isinstance(obj, list):\n out = f\"[\\n\"\n for obj_i in obj:\n out += sub_pformat(obj_i) + \",\\n\"\n out += f\"]\"\n return out\n elif isinstance(obj, dict):\n out = f\"{{\\n\"\n for k_i, obj_i in obj.items():\n txt = sub_pformat(obj_i)\n out += f\"{incr}{repr(k_i)}: {txt.strip()},\\n\"\n out += f\"}}\"\n return out\n else:\n return indent(pp.pformat(obj), incr)", "def repr_(object_):\n return repr(object_)", "def __str__(self) -> str:\r\n\r\n result = 'HealthDominoDataObject:\\nBODY:\\n=====\\n{}\\n=====\\nHEAD:\\n=====\\n'.format(self.data)\r\n result += '{:>22}: {}\\n{:>22}: {}\\n'.format('version', self.version,\r\n 'compatibilitiLimit', self.compatibilityLimit)\r\n if len(self.script) > 0:\r\n result += '{:>22}: {}\\n'.format('script', ' '.join(self.script))\r\n else:\r\n result += '{:>22}: {}\\n'.format('script', 'NO-SCIRPT')\r\n if self.seriesSignature != '':\r\n result += '{:>22}: {}\\n'.format('seriesSignature', self.seriesSignature)\r\n else:\r\n result += '{:>22}: {}\\n'.format('seriesSignature', 'NOT-ADDED')\r\n if self.pha != '':\r\n result += '{:>22}: {}\\n'.format('personalHealthAddress', self.pha)\r\n else:\r\n result += '{:>22}: {}\\n'.format('personalHealthAddress', 'NOT-ADDED')\r\n if len(self.identityInfo) > 0:\r\n for key, value in self.identityInfo.items():\r\n result += '{:>22}: {} -> {}\\n'.format('identityInfo', key, value)\r\n else:\r\n result += '{:>22}: {}\\n'.format('identityInfo', 'NOT-ADDED')\r\n if self.message != '':\r\n result += '{:>22}: {}\\n'.format('message', self.message)\r\n else:\r\n result += '{:>22}: {}\\n'.format('message', 'NOT-ADDED')\r\n if self.isClosed:\r\n result += '======\\nSTATE:\\n======\\n HealthDominoDataObject is already closed.\\n'\r\n else:\r\n result += '======\\nSTATE:\\n======\\n HealthDominoDataObject is not open for editing.\\n'\r\n if self.isTransmitted:\r\n result += ' HealthDominoDataObject is already transmitted.\\n'\r\n result += ' innerHash: {}\\n'.format(self.innerHash)\r\n result += ' outerHash: {}'.format(self.outerHash)\r\n else:\r\n result += ' HealthDominoDataObject is not yet transmitted.\\n'\r\n return result", "def str_(object_):\n return str(object_)", "def _construct_new_2d_object(new_xp,\n half_w,\n new_yp,\n half_l):\n\n new_x1 = float(new_xp - half_w)\n new_x2 = float(new_xp + half_w)\n new_y1 = float(new_yp - half_l)\n new_y2 = float(new_yp + half_l)\n\n new_obj = od.ObjectLabel()\n new_obj.x1 = new_x1\n new_obj.x2 = new_x2\n new_obj.y1 = new_y1\n new_obj.y2 = new_y2\n\n new_box = np.array([new_x1, new_y1, new_x2, new_y2])\n\n return new_obj, new_box", "def __repr__(self):\n return \"<katdal.{} '{}': shape {}, type {} at {:#x}>\".format(\n self.__class__.__name__, self.name, self.shape, self.dtype, id(self))", "def __unicode__(self):\n prepr = com.pprint_thing(self, escape_chars=('\\t', '\\r', '\\n'),\n quote_strings=True)\n return \"%s(%s, dtype='%s')\" % (type(self).__name__, prepr, self.dtype)", "def __repr__(self):\n return '<%r object, width=%r, height=%r>' % \\\n (self.__class__.__name__, self._width, self._height)", "def odump(obj, no_dunder=True, whelp=False):\n import builtins\n builtin_types = [ty for ty in builtins.__dict__.values() if isinstance(ty, type)]\n print(type(obj))\n if type(obj) not in builtin_types and hasattr(obj, '__doc__') and getattr(obj, '__doc__'):\n print(getattr(obj, '__doc__'))\n print()\n for attr in dir(obj):\n if no_dunder and attr.startswith('__'):\n continue\n oattr = getattr(obj, attr)\n if hasattr(oattr, '__class__'):\n tdesc = f'({oattr.__class__.__name__})'\n else:\n tdesc = f'({str(type(attr))})'\n if callable(oattr):\n soattr = '<function or method>'\n tdesc = ''\n else:\n try:\n soattr = str(oattr)\n if not soattr:\n soattr = \"''\"\n except TypeError as exc:\n # Some objects return wrong (non-string) results for str() call,\n # (raising exception like \"TypeError: __str__ returned non-string (type list)\")\n soattr = f'ERROR: string representation of an attribute could not be computed ({exc}))'\n print(f'.{attr:20} = {soattr:5} {tdesc}', end='')\n if whelp and hasattr(oattr, '__doc__') and getattr(oattr, '__doc__'):\n if type(oattr) in builtin_types:\n print(f' (builtin)')\n else:\n print(f\"\\n {getattr(oattr, '__doc__')}\\n\")", "def __repr__(self):\n return \"<katdal.{} '{}': shape {}, type {} at {:#x}>\".format(\n self.__class__.__name__, self.name, self.shape, self.dtype, id(self))" ]
[ "0.5644744", "0.5611544", "0.5591873", "0.54736", "0.54563993", "0.53513646", "0.53206956", "0.53024477", "0.52644145", "0.5253599", "0.5235866", "0.52308536", "0.5215566", "0.52139914", "0.5204089", "0.51857984", "0.5153454", "0.5145673", "0.51341456", "0.5121201", "0.5112726", "0.51122296", "0.51121205", "0.5085643", "0.5084748", "0.5083377", "0.50684696", "0.50647116", "0.50573087", "0.50566846" ]
0.62185085
0
Return the count, sum, and sum of squares, deeply accumulated over the object.
def pd_deep_stats(obj: PdObject) -> Tuple[int, Union[int, float, complex], Union[int, float, complex]]: if isinstance(obj, Block): raise TypeError('Cannot deeply accumulate stats over block ' + repr(obj)) if isinstance(obj, (Char, int, float, complex)): v = num.numerify(obj) return (1, v, v**2) else: c: int = 0 s: Union[int, float, complex] = 0 q: Union[int, float, complex] = 0 for e in pd_iterable(obj): c1, s1, q1 = pd_deep_stats(e) c += c1 s += s1 # type: ignore q += q1 # type: ignore return (c, s, q)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_fun(obj):\n list_memory_sum = 0 # used to calculate total memory occupied by list elements\n for item in obj:\n if type(item) != dict:\n list_memory_sum = list_memory_sum + norm_fun(item)\n else:\n list_memory_sum = list_memory_sum + dict_fun(item)\n return list_memory_sum", "def sum(self):\n return self._summarize(lambda c: c.sum)", "def dict_fun(obj):\n dict_memory_sum = 0 # calculates the total memory used by fields in a dictionary\n for each_key in obj.keys():\n dict_obj_val = obj[each_key]\n if type(dict_obj_val) == list:\n dict_memory_sum = dict_memory_sum + list_fun(dict_obj_val)\n elif type(dict_obj_val) == dict:\n dict_memory_sum = dict_memory_sum + dict_fun(obj[each_key])\n else:\n dict_memory_sum = dict_memory_sum + norm_fun(obj[each_key])\n return dict_memory_sum + list_fun(obj.keys())", "def sum(self):\n import numpy as np\n\n # covering zero-matrices\n if self.child_nodes == {}:\n return self.null_value\n\n def sum_rec(node, offset):\n # making sure the node exists\n if not node:\n return 0\n # checking whether the node is a leaf\n elif node.is_leaf():\n return np.sum(node.dtype.to_mat(node, offset))\n else:\n tmp_result = 0\n # the recursive call\n # checking for the kind of diagram. MTxxx?\n if self.offsets == {}:\n for edge_name in node.child_nodes:\n tmp_result += sum_rec(node.child_nodes[edge_name], node.dtype.to_mat(node, 0, 0))\n # or edge-value dd?\n else:\n for edge_name in node.child_nodes:\n tmp_result += sum_rec(node.child_nodes[edge_name], node.dtype.to_mat(node,\n node.offsets[edge_name],\n offset))\n\n return tmp_result\n\n return sum_rec(self, None)", "def total(self):\n return sum(self.meta) + sum(child.total() for child in self.children)", "def _sum(self):\n s = 0\n for element, value in self.items():\n s += value\n return s", "def calculate_sum(self):\n\n left_sum = self.left.calculate_sum() if self.left else 0\n right_sum = self.right.calculate_sum() if self.right else 0\n return self.data + left_sum + right_sum", "def sum(self):\n return self.aggregate(np.sum)", "def total(self):\n gd_total = self._grand_total()\n counts = self._get_as_dict_count()\n for rule in self.rules:\n gd_total += rule(counts)\n return gd_total", "def sum(self):\n return sum(self.items())", "def sum(self) -> FrameLike:\n return super().sum()", "def sum(self) -> FrameLike:\n return super().sum()", "def sum(self) -> FrameLike:\n return super().sum()", "def sum(self) -> FrameLike:\n return super().sum()", "def sumsquares(self):\n return np.dot((self.demeaned ** 2).T, self.weights)", "def get_nested_sum():\n l_int = [1,2,[], 3,[4,[], 5,[6]],[7],[8,9], 10,[[],11]]\n print 'Sum:', nested_sum(l_int) \n return", "def sum(self) -> int:\n return self.root.sum", "def calculate_sum_of_all_attributes(self):\n\n sum = 0\n\n for key, val in self.__dict__.items():\n\n if isinstance(val, (int, float)):\n sum += val\n\n return sum", "def sum(self):\n return self._reduce_for_stat_function(F.sum, only_numeric=True)", "def trace(self):\n self.check_square()\n\n retval = 0.0\n for i in range(self.rows):\n retval += self[i, i]\n\n return retval", "def total(tree):\n if tree is None:\n return 0\n return total(tree.left) + total(tree.right) + tree.cargo", "def with_sum_sum_reduction(self):\n return self.with_reduction(lambda x: x.sum())", "def _nt_sum(cobj, prop, theta):\n # Build sum term\n i = 1\n s = 0\n while True:\n try:\n ni = getattr(cobj, f\"{prop}_coeff_n{i}\")\n ti = getattr(cobj, f\"{prop}_coeff_t{i}\")\n s += ni * theta**ti\n i += 1\n except AttributeError:\n break\n return s", "def sum(self):\n return sum(self.values)", "def sum_tree(t):\n \"*** YOUR CODE HERE ***\"\n if is_leaf(t):\n return entry(t)\n total = entry(t)\n for subtree in subtrees(t):\n total += sum_tree(subtree)\n return total", "def sum (self):\n return self.values.sum ()", "def sum (self):\n return self.values.sum ()", "def test_sum_product2():\n nodes = {1, 2, 3, 4, 5}\n edges = {1: [2, 3], 2: [1, 4, 5], 3: [1], 4: [2], 5: [2]}\n\n node_potentials = {1: {'blue': 0.5, 'green': 0.5},\n 2: {'blue': 0.5, 'green': 0.5},\n 3: {'blue': 0.6, 'green': 0.4},\n 4: {'blue': 0.8, 'green': 0.2},\n 5: {'blue': 0.8, 'green': 0.2}}\n edge_potentials = {(1, 2): {'blue': {'blue': 0, 'green': 1},\n 'green': {'blue': 1, 'green': 0}},\n (2, 1): {'blue': {'blue': 0, 'green': 1},\n 'green': {'blue': 1, 'green': 0}},\n (1, 3): {'blue': {'blue': 0, 'green': 1},\n 'green': {'blue': 1, 'green': 0}},\n (3, 1): {'blue': {'blue': 0, 'green': 1},\n 'green': {'blue': 1, 'green': 0}},\n (2, 4): {'blue': {'blue': 0, 'green': 1},\n 'green': {'blue': 1, 'green': 0}},\n (4, 2): {'blue': {'blue': 0, 'green': 1},\n 'green': {'blue': 1, 'green': 0}},\n (2, 5): {'blue': {'blue': 0, 'green': 1},\n 'green': {'blue': 1, 'green': 0}},\n (5, 2): {'blue': {'blue': 0, 'green': 1},\n 'green': {'blue': 1, 'green': 0}}}\n\n marginals = sum_product(nodes, edges, node_potentials, edge_potentials)\n print('Your output:', marginals)\n print('Expected output:',\n {1: {'blue': 0.9142857142857144, 'green': 0.08571428571428572},\n 2: {'blue': 0.08571428571428569, 'green': 0.9142857142857143},\n 3: {'blue': 0.08571428571428572, 'green': 0.9142857142857144},\n 4: {'blue': 0.9142857142857143, 'green': 0.0857142857142857},\n 5: {'blue': 0.9142857142857143, 'green': 0.0857142857142857}})", "def op_sum(self, args):\n sum = 0\n stack_levels = len(self.stack)\n if args != None:\n stack_levels = int(args[0])\n self.require_stack(stack_levels)\n for i in range(0, stack_levels):\n sum += self.stack.pop()\n self.stack.append(sum)", "def compute_node_sums(nodes):\n for node in nodes:\n node.children_summed = 0 # Dynamically add a meta field to Node to improve runtime when computing sums.\n\n leaf_nodes = []\n for node in nodes:\n if len(node.children) == 0:\n leaf_nodes.append(node)\n to_process = leaf_nodes\n while to_process:\n node = to_process.pop()\n # if leaf_node or all child notes computed their sum.\n if len(node.children) == 0 or len(node.children) == node.children_summed:\n node.sum = node.value\n if len(node.children) > 0:\n node.sum = node.sum + sum([child.sum for child in list(node.children.values())])\n if node.parent:\n node.parent.children_summed += 1\n if len(\n node.parent.children) == node.parent.children_summed: # all children have computed their sums\n to_process.append(node.parent)\n\n for node in nodes:\n del node.children_summed" ]
[ "0.6165658", "0.6074642", "0.5960235", "0.59523267", "0.5946558", "0.5885655", "0.58521736", "0.57811224", "0.57755905", "0.5774179", "0.5719372", "0.5719372", "0.5719372", "0.5719372", "0.566922", "0.56002814", "0.559825", "0.5564518", "0.5548685", "0.5547149", "0.55294913", "0.55173826", "0.5509227", "0.5492565", "0.54600245", "0.54466265", "0.54466265", "0.54343426", "0.5413935", "0.5401435" ]
0.652207
0
Calculate and return the acceptance rate of the proposal. Returns
def get_acceptance(self): return self.count_accepted / self.count_proposed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def acceptance_rate(self):\n total = float(self.accepted + self.rejected)\n return self.accepted / total", "def acceptance_fraction(self):\n return float(self._accepted / self.num_iterations)", "def acceptance_fraction(self):\n return self.sampler.acceptance_fraction", "def acceptance(self):\n return _mean(self._accepted_samples) / _mean(self._total_samples)", "def acceptance_rate(target_distribution, x0, xs, accepteds):\n return np.mean(accepteds)", "def acceptance_fraction(self):\n return ValueError(\"acceptance_fraction function not set.\")", "def participation_rate(self) -> float:\n return self.__participation_rate", "def expectation(self):\n\n return self.rate", "def rate(self):\n return self.brate / FAC", "def acceptance_probability(old_cost, new_cost, T):\n ap = EE * ((new_cost - old_cost) / T)\n\n return ap", "def compute_advantage(self, trials=1000):\n\n return self.compute_success_ratio(1, trials) - (1 - self.compute_success_ratio(0, trials))", "def getRate(self) -> int:\n if (self._total_stake.get() + self._daily_reward.get()) == 0:\n rate = DENOMINATOR\n else:\n rate = (self._total_stake.get() + self._daily_reward.get()) * DENOMINATOR // self.sICX_score.totalSupply()\n return rate", "def compute_mc_acceptance(self):\n if self.steps > 0:\n self.move_viability = \\\n (1. * self.viablesteps) / self.steps\n if self.viablesteps > 0:\n self.acceptance = float(self.acceptedsteps)/float(self.viablesteps)\n else:\n self.acceptance = 0.0\n else:\n self.move_viability = 0.0\n self.acceptance = 0.0", "def p_accept(self, candidate_fitness):\n return math.exp(-abs(candidate_fitness - self.cur_fitness) / self.T)", "def acceptance_prob(self, prop_prior_llh, cur_prior_llh):\n change_llh = self.change_llh_calc()\n\n # Log-Likelihood\n change_prior_llh = prop_prior_llh - cur_prior_llh\n\n # Note we use np.exp(new - old) because it's the log-likelihood\n return min(1, np.exp(change_llh+change_prior_llh))", "def ventilation_rate(self):\n # TODO: calculate based on MERV ratings/efficiency/power/etc.\n return (\n sum(v.calculate_ach(self.volume) for v in self.air_quality_measures)\n + self.outdoor_air_ventilation\n )", "def get_average_repro(self):\n return np.mean([agent.get_fledge_probability() for agent in self.agents])", "def success_rate(self):\n success_rate_text = self.emulator.get_screen_text(ui_element=self.ui['ENHANCE_POTENTIAL_RATE'])\n success_rate = success_rate_text.replace(\"%\", \"\").replace(\" \", \"\")\n return float(success_rate)", "def update_attendance_rate(self):\n\n total_attendees = self.attendee_set.all().count()\n attended = self.attendee_set\\\n .filter(presented=True)\\\n .count()\n self.attendance_rate = attended / total_attendees\n assert(self.attendance_rate != None)\n self.save()", "def calculate(self):\n\n tp = self.confusion_matrix.tp\n tn = self.confusion_matrix.tn\n fp = self.confusion_matrix.fp\n fn = self.confusion_matrix.fn\n\n agreement = tp + tn\n chance0 = (tn + fn) * (tn + fp)\n chance1 = (fp + tp) * (fn + tp)\n sum_ = tn + fn + fp + tp\n chance = (chance0 + chance1) / sum_\n\n return (agreement - chance) / (sum_ - chance)", "def acceptability(self, g):\n if g >= self.bounds.cutoff:\n return 1.0\n return 0.0", "def acceptability(self, g):\n if g <= self.bounds.cutoff:\n return 1.0\n return 0.0", "def calculate_profit(self):", "def acceptance_probability(cost, new_cost, temperature):\n if new_cost < cost:\n return 1\n return np.exp(- (new_cost - cost) / temperature)", "def approves(self):\n # verify trailing stop-loss threshold has been met\n thresholdMet = self.analysis.trailing_percentage >= constants.PERCENT_TRAILING_CLOSE_THRESHOLD\n\n # verify price has reverted back to the mean\n if self.analysis.initial_order_type == \"buy\":\n meanReverted = self.analysis.current_price >= self.analysis.current_volume_weighted_average_price\n else:\n meanReverted = self.analysis.current_price <= self.analysis.current_volume_weighted_average_price\n\n # return approval\n _approval = thresholdMet or meanReverted\n if _approval:\n self.logger.log(self.analysis.__dict__)\n self.logger.log(\"%s close approved!\" % self.ticker)\n return _approval", "def toss_pull_request_acceptance_rate(repo_id, begin_date=None, end_date=None, group_by='week'):\n if not begin_date:\n begin_date = '1970-1-1 00:00:01'\n if not end_date:\n end_date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n pr_acceptance_rate_sql = s.sql.text(\"\"\"\n SELECT CAST\n ( merged.num_approved AS DECIMAL ) / CAST ( opened.num_opened AS DECIMAL ) AS \"rate\"\n FROM\n (\n SELECT COUNT\n ( pull_request_events.pull_request_id ) AS num_approved,\n repo_id\n FROM\n pull_requests\n JOIN pull_request_events ON pull_request_events.pull_request_id = pull_requests.pull_request_id\n WHERE\n pull_requests.repo_id = :repo_id\n AND ACTION = 'merged'\n OR ACTION = 'ready_for_review'\n AND pull_request_events.created_at BETWEEN :begin_date\n AND :end_date\n GROUP BY\n repo_id\n ) merged\n JOIN (\n SELECT COUNT\n ( pull_request_events.pull_request_id ) AS num_opened,\n repo_id\n FROM\n pull_requests\n JOIN pull_request_events ON pull_request_events.pull_request_id = pull_requests.pull_request_id\n WHERE\n pull_requests.repo_id = :repo_id\n AND ACTION = 'closed'\n AND pull_request_events.created_at BETWEEN :begin_date\n AND :end_date\n GROUP BY\n repo_id\n ) opened ON merged.repo_id = opened.repo_id\n \"\"\")\n \n results = pd.read_sql(pr_acceptance_rate_sql, engine, params={'repo_id': repo_id, 'group_by': group_by,\n 'begin_date': begin_date, 'end_date': end_date})\n return results", "def reward_function(self):\r\n def R(state, decision, nodes):\r\n return -1.0/1000*nodes['G'].get_preds_value(state)*(decision['G:R_1']+decision['G:L'])\r\n \r\n return R", "def fitness(self):\r\n history = self.history\r\n return sum(history) / len(history)", "def rate(self) -> float:\n return self.success_cnt / self.total_cnt if self.total_cnt > 0 else 1.0", "def get_autofocus_ratio(self, x):\n\n # sample designs from the prior\n z = tf.random.normal([tf.shape(x)[0], self.latent_size])\n q_dx = self.q_vae.decoder.get_distribution(z, training=False)\n p_dx = self.p_vae.decoder.get_distribution(z, training=False)\n\n # evaluate the score and importance weights\n log_w = q_dx.log_prob(x)[..., tf.newaxis] - \\\n p_dx.log_prob(x)[..., tf.newaxis]\n while len(log_w.shape) > 2:\n log_w = tf.reduce_sum(log_w, axis=1)\n return tf.math.exp(log_w)" ]
[ "0.8237646", "0.7138192", "0.71306014", "0.69210577", "0.6664414", "0.6460886", "0.62920713", "0.6223292", "0.6203049", "0.6135156", "0.6112492", "0.5974629", "0.5866377", "0.5857397", "0.575626", "0.5755724", "0.5665877", "0.5657661", "0.5641951", "0.5625141", "0.56085306", "0.56064975", "0.5572997", "0.5562263", "0.5558689", "0.554082", "0.55264497", "0.5524066", "0.5523991", "0.55055505" ]
0.7540025
1
Calculate and return the illegal proposal rate of this proposal. (Proposing values outside the support of the parameter space e.g covariances < 0) Returns
def get_illegal(self): return self.count_illegal / self.count_proposed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def penalty(self):\n assert len(self.weights) == len(self.means), \"Dimensions!\"\n out = np.exp(self.data['riskfree'] * self.data['maturity'])\n for weight, mean in zip(self.weights, self.means):\n out -= weight * np.exp(mean * self.data['maturity'])\n return (out**2).mean()**.5", "def emission_rate_per_aerosol_per_person_when_present(self) -> _VectorisedFloat:\n raise NotImplementedError(\"Subclass must implement\")", "def emission_rate_per_aerosol_per_person_when_present(self) -> _VectorisedFloat:\n return self.known_individual_emission_rate", "def emission_rate_per_person_when_present(self) -> _VectorisedFloat:\n return (self.emission_rate_per_aerosol_per_person_when_present() *\n self.aerosols())", "def test_false_positive_rate(self):\n # For the penalty, the default loss is hinge.\n expected_penalty_numerator = np.sum(\n np.maximum(0.0, 1.0 + self._penalty_predictions) *\n (self._penalty_labels <= 0.0) * self._penalty_weights *\n self._penalty_predicate)\n expected_penalty_denominator = np.sum(\n (self._penalty_labels <= 0.0) * self._penalty_weights *\n self._penalty_predicate)\n expected_penalty_value = (\n expected_penalty_numerator / expected_penalty_denominator)\n\n # For the constraint, the default loss is zero-one.\n expected_constraint_numerator = np.sum(\n (0.5 * (1.0 + np.sign(self._constraint_predictions))) *\n (self._constraint_labels <= 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_denominator = np.sum(\n (self._constraint_labels <= 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_value = (\n expected_constraint_numerator / expected_constraint_denominator)\n\n actual_expression = binary_rates.false_positive_rate(self.context)\n self.check_rates(expected_penalty_value, expected_constraint_value,\n actual_expression)", "def _compute_penalty(self):\n raise ValueError('Implement in a child class')", "def _evaluate(self, state):\n input_values_rmse = self.get_input_values_rmse(state)\n if not self._include_leading_powers:\n if np.isfinite(input_values_rmse):\n return -input_values_rmse\n else:\n return self._default_value\n # NOTE(leeley): If computing the leading power fails\n # (timeout or sympy ValueError) or functions in symbolic_properties return\n # nan (for example, 1 / (x - x)).\n leading_power_error = self.get_leading_power_error(state)\n\n if self._hard_penalty_default_value is None:\n # Soft penalty.\n if np.isfinite(leading_power_error):\n return -input_values_rmse - leading_power_error\n else:\n return self._default_value\n else:\n # Hard penalty.\n if (np.isfinite(leading_power_error)\n and np.isclose(leading_power_error, 0)):\n return -input_values_rmse\n else:\n return self._hard_penalty_default_value", "def test_negative_prediction_rate(self):\n # For the penalty, the default loss is hinge.\n expected_penalty_numerator = np.sum(\n np.maximum(0.0, 1.0 - self._penalty_predictions) * self._penalty_weights\n * self._penalty_predicate)\n expected_penalty_denominator = np.sum(\n self._penalty_weights * self._penalty_predicate)\n expected_penalty_value = (\n expected_penalty_numerator / expected_penalty_denominator)\n\n # For the constraint, the default loss is zero-one.\n expected_constraint_numerator = np.sum(\n (0.5 * (1.0 - np.sign(self._constraint_predictions))) *\n self._constraint_weights * self._constraint_predicate)\n expected_constraint_denominator = np.sum(\n self._constraint_weights * self._constraint_predicate)\n expected_constraint_value = (\n expected_constraint_numerator / expected_constraint_denominator)\n\n actual_expression = binary_rates.negative_prediction_rate(self.context)\n self.check_rates(expected_penalty_value, expected_constraint_value,\n actual_expression)", "def gPenalty(d):\n return -1/(d+0.2)**2 if d > -0.1 else 0", "def penalty(self):\n return 0", "def test_false_negative_rate(self):\n # For the penalty, the default loss is hinge.\n expected_penalty_numerator = np.sum(\n np.maximum(\n 0.0, 1.0 - self._penalty_predictions) * (self._penalty_labels > 0.0)\n * self._penalty_weights * self._penalty_predicate)\n expected_penalty_denominator = np.sum(\n (self._penalty_labels > 0.0) * self._penalty_weights *\n self._penalty_predicate)\n expected_penalty_value = (\n expected_penalty_numerator / expected_penalty_denominator)\n\n # For the constraint, the default loss is zero-one.\n expected_constraint_numerator = np.sum(\n (0.5 * (1.0 - np.sign(self._constraint_predictions))) *\n (self._constraint_labels > 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_denominator = np.sum(\n (self._constraint_labels > 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_value = (\n expected_constraint_numerator / expected_constraint_denominator)\n\n actual_expression = binary_rates.false_negative_rate(self.context)\n self.check_rates(expected_penalty_value, expected_constraint_value,\n actual_expression)", "def disruptions_allowed(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"disruptions_allowed\")", "def _validate_iterations(self, proposal):\n iterations = proposal[\"value\"]\n if iterations <= 0:\n raise traitlets.TraitError(\"iterations must be greater than 0.\")\n return iterations", "def effective_strain_rate(self):\n epi = self.strain_rate_tensor()\n ep_xx = epi[0,0]\n ep_yy = epi[1,1]\n ep_zz = epi[2,2]\n ep_xy = epi[0,1]\n ep_xz = epi[0,2]\n ep_yz = epi[1,2]\n \n # Second invariant of the strain rate tensor squared\n epsdot = 0.5 * (+ ep_xx**2 + ep_yy**2 + ep_zz**2) \\\n + ep_xy**2 + ep_xz**2 + ep_yz**2\n return epsdot", "def missing(self) -> float:\n return self._coreEstimation.noMaskScore", "def _proportionalTerm(self):\n\n\t\treturn self._getErrorFunction() * self._Kp", "def theoretical_effective(dataset):\n return float(sum(dataset))/len(dataset)", "def em_var(self) -> float:\n if self.__total_pulls == 0:\n raise Exception('Number of pulls is 0. No empirical variance.')\n return (self.__sum_of_square_reward -\n self.__total_rewards**2 / self.__total_pulls) / self.__total_pulls", "def auditcontextnotfoundrate(self) :\n\t\ttry :\n\t\t\treturn self._auditcontextnotfoundrate\n\t\texcept Exception as e:\n\t\t\traise e", "def nNeg(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 and self.prob.Y[i] == -1:\n running_total += 1\n return running_total if running_total > 0 else 1", "def learnign_rate_examples():\n #######\n bad_larning_rate = 0.1\n not_bad_learning_rate = 1e-4\n good_learning_rate = 1e-3\n #######\n return bad_larning_rate, not_bad_learning_rate, good_learning_rate", "def model_error(self):\n return self.premium() / self.data['premium'] - 1", "def neg_log_prob(self,params: ndarray) -> float:\n return -self.compute_log_prob(params)", "def anti_differentiate(self):\n if not self.is_nan() :\n return RatTerm(RatNum(self.coeff.nominator,self.expt + 1), self.expt + 1)\n else :\n return RatTerm(RatNum(1, 0), 0)", "def _raise_or_return_valid_prob(self, prob):\n assert prob >= 0 and prob <= 1.0, f\"Invalid probability: '{prob}'\"\n return prob", "def perplexity(self):\n raise NotImplementedError(\"To be implemented\")", "def _estim_determ_p(num_zeros, num_ones):\n p = 0.0\n if num_zeros == 0:\n p += 0.5\n if num_ones == 0:\n p += 0.5\n return p", "def test_true_negative_rate(self):\n # For the penalty, the default loss is hinge.\n expected_penalty_numerator = np.sum(\n np.maximum(0.0, 1.0 - self._penalty_predictions) *\n (self._penalty_labels <= 0.0) * self._penalty_weights *\n self._penalty_predicate)\n expected_penalty_denominator = np.sum(\n (self._penalty_labels <= 0.0) * self._penalty_weights *\n self._penalty_predicate)\n expected_penalty_value = (\n expected_penalty_numerator / expected_penalty_denominator)\n\n # For the constraint, the default loss is zero-one.\n expected_constraint_numerator = np.sum(\n (0.5 * (1.0 - np.sign(self._constraint_predictions))) *\n (self._constraint_labels <= 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_denominator = np.sum(\n (self._constraint_labels <= 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_value = (\n expected_constraint_numerator / expected_constraint_denominator)\n\n actual_expression = binary_rates.true_negative_rate(self.context)\n self.check_rates(expected_penalty_value, expected_constraint_value,\n actual_expression)", "def badness(self_):\n return self._badness(self_.time)", "def get_expected_objective(self) -> float:\n # pylint: disable=invalid-name\n obj = 0.\n for gr in self.grounded.values():\n dist = gr.get_expected_dist_to_satisfaction()\n obj += 1 - self.weight * max(0, dist) ** 2\n return obj" ]
[ "0.61560255", "0.61219674", "0.60336506", "0.59619415", "0.5867981", "0.5866423", "0.5817501", "0.58053184", "0.5801318", "0.5792975", "0.57858485", "0.5781058", "0.57493556", "0.5713597", "0.5710124", "0.5701755", "0.5694844", "0.5671048", "0.56675017", "0.56662416", "0.5652163", "0.56385183", "0.56257284", "0.56155175", "0.56140816", "0.5613", "0.56088513", "0.5602694", "0.5591661", "0.55840755" ]
0.6779935
0
Propose a new set of gmm parameters. Calls each proposal function one after another.
def propose(self, X, gmm, target, n_jobs=1): new_gmm = gmm for _ in xrange(self.propose_iterations): if self.propose_mean is not None: new_gmm = self.propose_mean.propose(X, new_gmm, target, n_jobs) if self.propose_covars is not None: new_gmm = self.propose_covars.propose(X, new_gmm, target, n_jobs) if self.propose_weights is not None: new_gmm = self.propose_weights.propose(X, new_gmm, target, n_jobs) return new_gmm
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gpbandits(model, data, iters=10, kernel='se', cl=0.1, v=0.0, num_samples=500, verbose=True, best_model_log=False):\n\n num_dims = model.num_dims # number of hyperparameter dimensions\n\n # initial model evaluation\n points = model.encode()[np.newaxis,:]\n scores = np.array([model.train_test_cv(data)])\n\n # best model and corresponding value at each iteration\n if best_model_log:\n best_point_tmp = []\n best_point_tmp.append(points[0,:])\n\n # print update\n if verbose:\n print(\"Iteration: %03d | Score: %.06e\" %(0, scores[0]))\n #print(\"Iteration: %03d | Design Point: %f | Score: %.06e\" %(0,points[0,:] scores[0]))\n\n # loop\n for i in range(iters):\n\n # sample num_Samples random points from [0,1)^num_dims\n candidates = sample(num_dims, num_samples)\n\n # find GP posterior\n A = formK(candidates, candidates, kernel, cl)\n B = formK(points, points, kernel, cl) + v*np.eye(points.shape[0])\n C = formK(candidates, points, kernel, cl)\n tmp = C.dot(np.linalg.inv(B))\n mu = tmp.dot(scores)\n Sigma = A - tmp.dot(C.T)\n var = np.diagonal(Sigma) + np.finfo(float).eps\n sig = np.sqrt(var)\n\n # choose new point with best expected improvement\n exp_imp = expected_improvement(scores.min(), mu, sig)\n best_idx = np.argmax(exp_imp)\n best_point = candidates[best_idx]\n\n # set hyperparameters with best sampled point\n model.decode(best_point)\n\n # return re-encoded point\n new_point = model.encode()\n\n # evaluate model\n new_score = model.train_test_cv(data)\n\n # append to points/scores lists\n points = np.vstack((points, best_point)) # use best_point, not re-encoded new_point to break discrete symmetries\n scores = np.append(scores, new_score)\n\n # save progress\n save_checkpoint(points, scores)\n\n # print update\n if verbose:\n print(\"Iteration: %03d | Score: %.06e\" %(i+1, new_score))\n #print(\"Iteration: %03d | Design Point: %f | Score: %.06e\" %(i+1, best_point, new_score))\n\n if best_model_log:\n ind = np.argmin(scores)\n best_point_tmp.append(points[ind])\n\n\n\n # return best model\n ind = np.argmin(scores)\n best_overall_point = points[ind]\n model.decode(best_overall_point)\n\n if not best_model_log:\n return model\n else:\n return model, best_point_tmp", "def main():\r\n parametrized_data = loadData('files/parametrized.p')\r\n config = loadConfig('config/gmm.cfg')\r\n\r\n data_ = eachDigitGMM(parametrized_data, config)\r\n\r\n save(data_)", "def label_and_sample_proposals_mod(\n\t\tself, proposals: List[Instances], targets: List[Instances]\n\t) -> List[Instances]:\n\t\tgt_boxes = [x.gt_boxes for x in targets]\n\t\t# Augment proposals with ground-truth boxes.\n\t\t# In the case of learned proposals (e.g., RPN), when training starts\n\t\t# the proposals will be low quality due to random initialization.\n\t\t# It's possible that none of these initial\n\t\t# proposals have high enough overlap with the gt objects to be used\n\t\t# as positive examples for the second stage components (box head,\n\t\t# cls head, mask head). Adding the gt boxes to the set of proposals\n\t\t# ensures that the second stage components will have some positive\n\t\t# examples from the start of training. For RPN, this augmentation improves\n\t\t# convergence and empirically improves box AP on COCO by about 0.5\n\t\t# points (under one tested configuration).\n\t\texpansion_scale = 0.05\n\t\tprint(\"proposals before expansion\")\n\t\tprint(proposals)\n\t\tfor i in range(len(proposals)):\n\t\t\th,w = proposals[i].image_size\n\t\t\tprop_boxes = proposals[i].proposal_boxes.tensor\n\t\t\tbw = prop_boxes[:,2] - prop_boxes[:,0]\n\t\t\tbh = prop_boxes[:,3]-prop_boxes[:,1]\n\t\t\tprop_boxes[:,0] = torch.max(prop_boxes[:,0] - (bw*expansion_scale*0.5),torch.zeros_like(prop_boxes[:,0]))\n\t\t\tprop_boxes[:,1] = torch.max(prop_boxes[:,1] - (bh*expansion_scale*0.5),torch.zeros_like(prop_boxes[:,1]))\n\t\t\tprop_boxes[:,2] = torch.min(prop_boxes[:,2] + (bw*expansion_scale*0.5),torch.zeros_like(prop_boxes[:,2]) + w)\n\t\t\tprop_boxes[:,3] = torch.min(prop_boxes[:,3] + (bh*expansion_scale*0.5),torch.zeros_like(prop_boxes[:,3]) + h)\n\t\t\tproposals[i].proposal_boxes = Boxes(prop_boxes)\n\t\tprint(\"proposals after expansion\")\n\t\tprint(proposals)\n\t\tif self.proposal_append_gt:\n\t\t\tproposals = add_ground_truth_to_proposals(gt_boxes, proposals)\n\n\t\tproposals_with_gt = []\n\n\t\tnum_fg_samples = []\n\t\tnum_bg_samples = []\n\t\tfor proposals_per_image, targets_per_image in zip(proposals, targets):\n\t\t\thas_gt = len(targets_per_image) > 0\n\t\t\tmatch_quality_matrix = pairwise_iou(\n\t\t\t\ttargets_per_image.gt_boxes, proposals_per_image.proposal_boxes\n\t\t\t)\n\t\t\tmatched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix)\n\t\t\tsampled_idxs, gt_classes = self._sample_proposals_mod(\n\t\t\t\tmatched_idxs, matched_labels, targets_per_image.gt_classes\n\t\t\t)\n\n\t\t\t# Set target attributes of the sampled proposals:\n\t\t\tproposals_per_image = proposals_per_image[sampled_idxs]\n\t\t\tproposals_per_image.gt_classes = gt_classes\n\n\t\t\t# We index all the attributes of targets that start with \"gt_\"\n\t\t\t# and have not been added to proposals yet (=\"gt_classes\").\n\t\t\tif has_gt:\n\t\t\t\tsampled_targets = matched_idxs[sampled_idxs]\n\t\t\t\t# NOTE: here the indexing waste some compute, because heads\n\t\t\t\t# like masks, keypoints, etc, will filter the proposals again,\n\t\t\t\t# (by foreground/background, or number of keypoints in the image, etc)\n\t\t\t\t# so we essentially index the data twice.\n\t\t\t\tfor (trg_name, trg_value) in targets_per_image.get_fields().items():\n\t\t\t\t\tif trg_name.startswith(\"gt_\") and not proposals_per_image.has(trg_name):\n\t\t\t\t\t\tproposals_per_image.set(trg_name, trg_value[sampled_targets])\n\t\t\telse:\n\t\t\t\tgt_boxes = Boxes(\n\t\t\t\t\ttargets_per_image.gt_boxes.tensor.new_zeros((len(sampled_idxs), 4))\n\t\t\t\t)\n\t\t\t\tproposals_per_image.gt_boxes = gt_boxes\n\n\t\t\tnum_bg_samples.append((gt_classes == self.num_classes).sum().item())\n\t\t\tnum_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])\n\t\t\tproposals_with_gt.append(proposals_per_image)\n\n\t\t# Log the number of fg/bg samples that are selected for training ROI heads\n\t\tstorage = get_event_storage()\n\t\tstorage.put_scalar(\"roi_head/num_fg_samples\", np.mean(num_fg_samples))\n\t\tstorage.put_scalar(\"roi_head/num_bg_samples\", np.mean(num_bg_samples))\n\n\t\treturn proposals_with_gt", "def train_mdn_proposal_prior(save=True):\n\n n_iterations = n_bootstrap_iter\n n_data = 500\n\n # read data\n pilot_means, pilot_stds = helper.load(datadir + 'pilot_run_results.pkl')\n obs_stats = helper.load(datadir + 'obs_stats.pkl')\n obs_stats -= pilot_means\n obs_stats /= pilot_stds\n\n # create an mdn\n net = mdn.MDN_SVI(n_inputs=9, n_hiddens=[50], act_fun='tanh', n_outputs=4, n_components=1)\n regularizer = lf.regularizerSvi(net.mps, net.sps, 0.01)\n prior_proposal = None\n\n for iter in xrange(n_iterations):\n\n # generate new data\n params = []\n stats = []\n dist = []\n i = 0\n\n while i < n_data:\n\n prop_params = sim_prior_params() if iter == 0 else np.exp(prior_proposal.gen())[0]\n if np.any(np.log(prop_params) < log_prior_min) or np.any(np.log(prop_params) > log_prior_max):\n continue\n try:\n lv = mjp.LotkaVolterra(init, prop_params)\n states = lv.sim_time(dt, duration, max_n_steps=max_n_steps)\n except mjp.SimTooLongException:\n continue\n\n sum_stats = calc_summary_stats(states)\n sum_stats -= pilot_means\n sum_stats /= pilot_stds\n\n params.append(prop_params)\n stats.append(sum_stats)\n dist.append(calc_dist(sum_stats, obs_stats))\n i += 1\n\n print 'simulation {0}, distance = {1}'.format(i, dist[-1])\n\n params = np.array(params)\n stats = np.array(stats)\n dist = np.array(dist)\n\n # plot distance histogram\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.hist(dist, bins=int(np.sqrt(n_data)))\n ax.set_title('iteration = {0}'.format(iter + 1))\n ax.set_xlim([0.0, 12.0])\n plt.show(block=False)\n\n # train an mdn to give the posterior\n minibatch = 100\n maxiter = int(2000 * n_data / minibatch)\n monitor_every = 100\n trainer = Trainer.Trainer(\n model=net,\n trn_data=[stats, np.log(params)],\n trn_loss=net.mlprob + regularizer / n_data,\n trn_target=net.y\n )\n trainer.train(\n maxiter=maxiter,\n minibatch=minibatch,\n show_progress=True,\n monitor_every=monitor_every\n )\n\n # calculate the approximate posterior\n mdn_mog = net.get_mog(obs_stats)\n approx_posterior = mdn_mog if iter == 0 else mdn_mog / prior_proposal\n prior_proposal = approx_posterior.project_to_gaussian()\n\n # save the net and the approximate posterior\n if save:\n helper.save((net, approx_posterior, prior_proposal, dist), netsdir + 'mdn_svi_proposal_prior_{0}.pkl'.format(iter))", "def handle_propose(self, m) -> None:\n self.proposed_value = m.value\n self.value = m.value\n self.state = 'PROMISE'\n self.sim.propose_counter += 1\n self.p_id = self.sim.propose_counter\n\n for acceptor in self.sim.a:\n respond_m = Message(self, acceptor, 'PREPARE', None, self.p_id, None)\n self.n.queue_message(respond_m)", "def GAStep(self):\n\n self.updateMatingPool()\n self.newGeneration()", "def _logp_propose(self, top_proposal, old_positions, beta, new_positions=None, direction='forward',\n validate_energy_bookkeeping=True, platform_name='CPU'):\n _logger.info(\"Conducting forward proposal...\")\n import copy\n from perses.dispersed.utils import compute_potential_components\n # Ensure all parameters have the expected units\n check_dimensionality(old_positions, unit.angstroms)\n if new_positions is not None:\n check_dimensionality(new_positions, unit.angstroms)\n\n # Determine order in which atoms (and the torsions they are involved in) will be proposed\n _logger.info(\"Computing proposal order with NetworkX...\")\n proposal_order_tool = NetworkXProposalOrder(top_proposal, direction=direction)\n torsion_proposal_order, logp_choice, omitted_bonds = proposal_order_tool.determine_proposal_order()\n atom_proposal_order = [ torsion[0] for torsion in torsion_proposal_order ]\n\n # some logs for clarity\n _logger.info(f\"number of atoms to be placed: {len(atom_proposal_order)}\")\n _logger.info(f\"Atom index proposal order is {atom_proposal_order}\")\n _logger.info(f\"omitted_bonds: {omitted_bonds}\")\n\n growth_parameter_name = 'growth_stage'\n if direction==\"forward\":\n _logger.info(\"direction of proposal is forward; creating atoms_with_positions and new positions from old system/topology...\")\n # Find and copy known positions to match new topology\n import parmed\n structure = parmed.openmm.load_topology(top_proposal.new_topology, top_proposal.new_system)\n atoms_with_positions = [structure.atoms[atom_idx] for atom_idx in top_proposal.new_to_old_atom_map.keys()]\n new_positions = self._copy_positions(atoms_with_positions, top_proposal, old_positions)\n self._new_posits = copy.deepcopy(new_positions)\n\n # Create modified System object\n _logger.info(\"creating growth system...\")\n growth_system_generator = GeometrySystemGenerator(top_proposal.new_system,\n torsion_proposal_order,\n omitted_bonds = omitted_bonds,\n reference_topology = top_proposal._new_topology,\n global_parameter_name=growth_parameter_name,\n use_sterics=self.use_sterics,\n neglect_angles = self.neglect_angles,\n use_14_nonbondeds = self._use_14_nonbondeds)\n\n growth_system = growth_system_generator.get_modified_system()\n\n elif direction=='reverse':\n _logger.info(\"direction of proposal is reverse; creating atoms_with_positions from old system/topology\")\n if new_positions is None:\n raise ValueError(\"For reverse proposals, new_positions must not be none.\")\n\n # Find and copy known positions to match old topology\n import parmed\n structure = parmed.openmm.load_topology(top_proposal.old_topology, top_proposal.old_system)\n atoms_with_positions = [structure.atoms[atom_idx] for atom_idx in top_proposal.old_to_new_atom_map.keys()]\n\n # Create modified System object\n _logger.info(\"creating growth system...\")\n growth_system_generator = GeometrySystemGenerator(top_proposal.old_system,\n torsion_proposal_order,\n omitted_bonds = omitted_bonds,\n reference_topology = top_proposal._old_topology,\n global_parameter_name=growth_parameter_name,\n use_sterics=self.use_sterics,\n neglect_angles = self.neglect_angles,\n use_14_nonbondeds = self._use_14_nonbondeds)\n\n growth_system = growth_system_generator.get_modified_system()\n else:\n raise ValueError(\"Parameter 'direction' must be forward or reverse\")\n\n # Define a system for the core atoms before new atoms are placed\n self.atoms_with_positions_system = growth_system_generator._atoms_with_positions_system\n self.growth_system = growth_system\n\n # Get the angle terms that are neglected from the growth system\n neglected_angle_terms = growth_system_generator.neglected_angle_terms\n _logger.info(f\"neglected angle terms include {neglected_angle_terms}\")\n\n # Rename the logp_choice from the NetworkXProposalOrder for the purpose of adding logPs in the growth stage\n logp_proposal = np.sum(np.array(logp_choice))\n _logger.info(f\"log probability choice of torsions and atom order: {logp_proposal}\")\n\n if self._storage:\n self._storage.write_object(\"{}_proposal_order\".format(direction), proposal_order_tool, iteration=self.nproposed)\n\n # Create an OpenMM context\n from simtk import openmm\n from perses.dispersed.utils import configure_platform\n _logger.info(\"creating platform, integrators, and contexts; setting growth parameter\")\n platform = configure_platform(platform_name, fallback_platform_name='Reference', precision='double')\n integrator = openmm.VerletIntegrator(1*unit.femtoseconds)\n atoms_with_positions_system_integrator = openmm.VerletIntegrator(1*unit.femtoseconds)\n final_system_integrator = openmm.VerletIntegrator(1*unit.femtoseconds)\n context = openmm.Context(growth_system, integrator, platform)\n growth_system_generator.set_growth_parameter_index(len(atom_proposal_order)+1, context)\n\n #create final growth contexts for nonalchemical perturbations...\n if direction == 'forward':\n self.forward_final_growth_system = copy.deepcopy(context.getSystem())\n elif direction == 'reverse':\n self.reverse_final_growth_system = copy.deepcopy(context.getSystem())\n\n growth_parameter_value = 1 # Initialize the growth_parameter value before the atom placement loop\n\n # In the forward direction, atoms_with_positions_system considers the atoms_with_positions\n # In the reverse direction, atoms_with_positions_system considers the old_positions of atoms in the\n atoms_with_positions_context = openmm.Context(self.atoms_with_positions_system, atoms_with_positions_system_integrator, platform)\n if direction == 'forward':\n _logger.info(\"setting atoms_with_positions context new positions\")\n atoms_with_positions_context.setPositions(new_positions)\n else:\n _logger.info(\"setting atoms_with_positions context old positions\")\n atoms_with_positions_context.setPositions(old_positions)\n\n #Print the energy of the system before unique_new/old atoms are placed...\n state = atoms_with_positions_context.getState(getEnergy=True)\n atoms_with_positions_reduced_potential = beta*state.getPotentialEnergy()\n atoms_with_positions_reduced_potential_components = compute_potential_components(atoms_with_positions_context,\n platform=platform)\n _logger.debug(f'atoms_with_positions_reduced_potential_components:')\n for f, e in atoms_with_positions_reduced_potential_components.items():\n _logger.debug(f'\\t{f} : {e}')\n atoms_with_positions_methods_differences = abs(atoms_with_positions_reduced_potential -\n sum(atoms_with_positions_reduced_potential_components.values()))\n _logger.debug(f'Diffence in energy on adding unique atoms: {atoms_with_positions_methods_differences}')\n assert atoms_with_positions_methods_differences < \\\n ENERGY_THRESHOLD, f\"the difference between the atoms_with_positions_reduced_potential and the sum of \" \\\n f\"atoms_with_positions_reduced_potential_components is\" \\\n f\" {atoms_with_positions_methods_differences}\"\n\n # Place each atom in predetermined order\n _logger.info(\"There are {} new atoms\".format(len(atom_proposal_order)))\n\n rjmc_info = list()\n energy_logger = [] #for bookkeeping per_atom energy reduced potentials\n\n for torsion_atom_indices, proposal_prob in zip(torsion_proposal_order, logp_choice):\n\n _logger.debug(f\"Proposing torsion {torsion_atom_indices} with proposal probability {proposal_prob}\")\n\n # Get parmed Structure Atom objects associated with torsion\n atom, bond_atom, angle_atom, torsion_atom = [ structure.atoms[index] for index in torsion_atom_indices ]\n\n # Activate the new atom interactions\n growth_system_generator.set_growth_parameter_index(growth_parameter_value, context=context)\n\n # Get internal coordinates if direction is reverse\n if direction == 'reverse':\n atom_coords, bond_coords, angle_coords, torsion_coords = [ old_positions[index] for index in torsion_atom_indices ]\n internal_coordinates, detJ = self._cartesian_to_internal(atom_coords, bond_coords, angle_coords, torsion_coords)\n # Extract dimensionless internal coordinates\n r, theta, phi = internal_coordinates[0], internal_coordinates[1], internal_coordinates[2] # dimensionless\n\n _logger.debug(f\"\\treverse proposal: r = {r}; theta = {theta}; phi = {phi}\")\n\n bond = self._get_relevant_bond(atom, bond_atom)\n\n if bond is not None:\n if direction == 'forward':\n r = self._propose_bond(bond, beta, self._n_bond_divisions)\n\n _logger.debug(f\"\\tproposing forward bond of {r}.\")\n\n logp_r = self._bond_logp(r, bond, beta, self._n_bond_divisions)\n _logger.debug(f\"\\tlogp_r = {logp_r}.\")\n\n # Retrieve relevant quantities for valence bond and compute u_r\n r0, k = bond.type.req, bond.type.k * self._bond_softening_constant\n sigma_r = unit.sqrt((1.0/(beta*k)))\n r0, k, sigma_r = r0.value_in_unit_system(unit.md_unit_system), k.value_in_unit_system(unit.md_unit_system), sigma_r.value_in_unit_system(unit.md_unit_system)\n u_r = 0.5*((r - r0)/sigma_r)**2\n\n _logger.debug(f\"\\treduced r potential = {u_r}.\")\n\n else:\n if direction == 'forward':\n constraint = self._get_bond_constraint(atom, bond_atom, top_proposal.new_system)\n if constraint is None:\n raise ValueError(\"Structure contains a topological bond [%s - %s] with no constraint or bond information.\" % (str(atom), str(bond_atom)))\n\n r = constraint.value_in_unit_system(unit.md_unit_system) #set bond length to exactly constraint\n _logger.debug(f\"\\tproposing forward constrained bond of {r} with log probability of 0.0 and implied u_r of 0.0.\")\n\n logp_r = 0.0\n u_r = 0.0\n\n # Propose an angle and calculate its log probability\n angle = self._get_relevant_angle(atom, bond_atom, angle_atom)\n if direction=='forward':\n theta = self._propose_angle(angle, beta, self._n_angle_divisions)\n _logger.debug(f\"\\tproposing forward angle of {theta}.\")\n\n logp_theta = self._angle_logp(theta, angle, beta, self._n_angle_divisions)\n _logger.debug(f\"\\t logp_theta = {logp_theta}.\")\n\n # Retrieve relevant quantities for valence angle and compute u_theta\n theta0, k = angle.type.theteq, angle.type.k * self._angle_softening_constant\n sigma_theta = unit.sqrt(1.0/(beta * k))\n theta0, k, sigma_theta = theta0.value_in_unit_system(unit.md_unit_system), k.value_in_unit_system(unit.md_unit_system), sigma_theta.value_in_unit_system(unit.md_unit_system)\n u_theta = 0.5*((theta - theta0)/sigma_theta)**2\n _logger.info(f\"\\treduced angle potential = {u_theta}.\")\n\n # Propose a torsion angle and calcualate its log probability\n if direction=='forward':\n # Note that (r, theta) are dimensionless here\n phi, logp_phi = self._propose_torsion(context, torsion_atom_indices, new_positions, r, theta, beta, self._n_torsion_divisions)\n xyz, detJ = self._internal_to_cartesian(new_positions[bond_atom.idx], new_positions[angle_atom.idx], new_positions[torsion_atom.idx], r, theta, phi)\n new_positions[atom.idx] = xyz\n\n _logger.debug(f\"\\tproposing forward torsion of {phi}.\")\n _logger.debug(f\"\\tsetting new_positions[{atom.idx}] to {xyz}. \")\n else:\n old_positions_for_torsion = copy.deepcopy(old_positions)\n # Note that (r, theta, phi) are dimensionless here\n logp_phi = self._torsion_logp(context, torsion_atom_indices, old_positions_for_torsion, r, theta, phi, beta, self._n_torsion_divisions)\n _logger.debug(f\"\\tlogp_phi = {logp_phi}\")\n\n\n # Compute potential energy\n if direction == 'forward':\n context.setPositions(new_positions)\n else:\n context.setPositions(old_positions)\n\n state = context.getState(getEnergy=True)\n reduced_potential_energy = beta*state.getPotentialEnergy()\n _logger.debug(f\"\\taccumulated growth context reduced energy = {reduced_potential_energy}\")\n\n\n #Compute change in energy from previous reduced potential\n if growth_parameter_value == 1: # then there is no previous reduced potential so u_phi is simply reduced_potential_energy - u_r - u_theta\n added_energy = reduced_potential_energy\n else:\n previous_reduced_potential_energy = energy_logger[-1]\n added_energy = reduced_potential_energy - previous_reduced_potential_energy\n\n _logger.debug(f\"growth index {growth_parameter_value} added reduced energy = {added_energy}.\")\n\n atom_placement_dict = {'atom_index': atom.idx,\n 'u_r': u_r,\n 'u_theta' : u_theta,\n 'r': r,\n 'theta': theta,\n 'phi': phi,\n 'logp_r': logp_r,\n 'logp_theta': logp_theta,\n 'logp_phi': logp_phi,\n 'log_detJ': np.log(detJ),\n 'added_energy': added_energy,\n 'proposal_prob': proposal_prob}\n rjmc_info.append(atom_placement_dict)\n\n logp_proposal += logp_r + logp_theta + logp_phi - np.log(detJ) # TODO: Check sign of detJ\n growth_parameter_value += 1\n energy_logger.append(reduced_potential_energy)\n # DEBUG: Write PDB file for placed atoms\n atoms_with_positions.append(atom)\n _logger.debug(f\"\\tatom placed, rjmc_info list updated, and growth_parameter_value incremented.\")\n\n\n # assert that the energy of the new positions is ~= atoms_with_positions_reduced_potential + reduced_potential_energy\n # The final context is treated in the same way as the atoms_with_positions_context\n if direction == 'forward': #if the direction is forward, the final system for comparison is top_proposal's new system\n _system, _positions = top_proposal._new_system, new_positions\n else:\n _system, _positions = top_proposal._old_system, old_positions\n\n if not self.use_sterics:\n final_system = self._define_no_nb_system(_system, neglected_angle_terms, atom_proposal_order)\n _logger.info(f\"{direction} final system defined with {len(neglected_angle_terms)} neglected angles.\")\n else:\n final_system = copy.deepcopy(_system)\n force_names = {force.__class__.__name__ : index for index, force in enumerate(final_system.getForces())}\n if 'NonbondedForce' in force_names.keys():\n final_system.getForce(force_names['NonbondedForce']).setUseDispersionCorrection(False)\n _logger.info(f\"{direction} final system defined with nonbonded interactions.\")\n final_context = openmm.Context(final_system, final_system_integrator, platform)\n final_context.setPositions(_positions)\n\n state = final_context.getState(getEnergy=True)\n final_context_reduced_potential = beta*state.getPotentialEnergy()\n final_context_components = [(force, energy*beta) for force, energy in\n compute_potential_components(final_context, platform=platform).items()]\n atoms_with_positions_reduced_potential_components = [\n (force, energy*beta) for force, energy in compute_potential_components(atoms_with_positions_context,\n platform=platform).items()\n ]\n _logger.debug(f\"reduced potential components before atom placement:\")\n for item in atoms_with_positions_reduced_potential_components:\n _logger.debug(f\"\\t\\t{item[0]}: {item[1]}\")\n _logger.info(f\"total reduced potential before atom placement: {atoms_with_positions_reduced_potential}\")\n\n _logger.debug(f\"potential components added from growth system:\")\n added_energy_components = [(force, energy*beta) for force, energy in\n compute_potential_components(context, platform=platform).items()]\n for item in added_energy_components:\n _logger.debug(f\"\\t\\t{item[0]}: {item[1]}\")\n\n # now for the corrected reduced_potential_energy\n if direction == 'forward':\n positions = new_positions\n else:\n positions = old_positions\n\n reduced_potential_energy = self._corrected_reduced_potential(growth_system_generator, positions, platform_name, atom_proposal_order, beta)\n\n _logger.info(f\"total reduced energy added from growth system: {reduced_potential_energy}\")\n\n _logger.debug(f\"reduced potential of final system:\")\n for item in final_context_components:\n _logger.debug(f\"\\t\\t{item[0]}: {item[1]}\")\n _logger.info(f\"final reduced energy {final_context_reduced_potential}\")\n\n _logger.info(f\"sum of energies: {atoms_with_positions_reduced_potential + reduced_potential_energy}\")\n _logger.info(f\"magnitude of difference in the energies: {abs(final_context_reduced_potential - atoms_with_positions_reduced_potential - reduced_potential_energy)}\")\n\n energy_mismatch_ratio = (atoms_with_positions_reduced_potential + reduced_potential_energy) / (final_context_reduced_potential)\n\n if validate_energy_bookkeeping:\n assert (energy_mismatch_ratio < ENERGY_MISMATCH_RATIO_THRESHOLD + 1) and (energy_mismatch_ratio > 1 - ENERGY_MISMATCH_RATIO_THRESHOLD) , f\"The ratio of the calculated final energy to the true final energy is {energy_mismatch_ratio}\"\n\n\n # Final log proposal:\n _logger.info(\"Final logp_proposal: {}\".format(logp_proposal))\n # Clean up OpenMM Context since garbage collector is sometimes slow\n del context; del atoms_with_positions_context; del final_context\n del integrator; del atoms_with_positions_system_integrator; del final_system_integrator\n\n check_dimensionality(logp_proposal, float)\n check_dimensionality(new_positions, unit.nanometers)\n\n omitted_growth_terms = growth_system_generator.omitted_growth_terms\n\n if self.use_sterics:\n return logp_proposal, new_positions, rjmc_info, 0.0, reduced_potential_energy, [], omitted_growth_terms\n\n\n return logp_proposal, new_positions, rjmc_info, atoms_with_positions_reduced_potential, final_context_reduced_potential, neglected_angle_terms, omitted_growth_terms", "def gamp_gmm_test(nz=200,ny=100,ns=10, snr=30, verbose=False, mse_tol=-17, plt_results=False): \n\n # Compute the dimensions\n if (ns==1):\n zshape = (nz,)\n yshape = (ny,)\n else:\n zshape = (nz,ns)\n yshape = (ny,ns)\n Ashape = (ny,nz)\n\n # GMM parameters\n zmeanc = [0, 0] # mean of each component\n zvarc = [1,0.001] # variance in each component\n pc = [0.1,0.9] # probability of each component\n ncomp= len(zmeanc)\n \n # Generate GMM data\n nztot = np.prod(zshape) \n u = np.random.choice(range(ncomp),p=pc,size=nztot)\n z = np.random.randn(nztot)\n for i in range(nztot):\n j = u[i]\n z[i] = zmeanc[j] + np.sqrt(zvarc[j])*z[i]\n z = np.reshape(z,zshape) \n\n # Create a random transform\n A = np.random.normal(0,np.sqrt(1/nz), Ashape)\n \n # Create output\n y0 = A.dot(z) \n wvar = np.power(10,-0.1*snr)*np.mean(np.abs(y0)**2)\n y = y0 + np.random.normal(0,np.sqrt(wvar),yshape)\n\n # Create a set of estimators, one for each component of the GMM\n est_list = []\n for i in range(ncomp):\n est = vp.estim.GaussEst(zmeanc[i], zvarc[i], zshape)\n est_list.append(est)\n \n # Create the GMM estimator\n est_in = vp.estim.MixEst(est_list, w=pc,name='input')\n \n # Create linear transform\n Aop = vp.trans.MatrixLT(A,zshape)\n\n # Create the output estimator\n est_out = vp.estim.GaussEst(y,wvar,yshape,name='output')\n\n # Create the solver\n solver = vp.solver.Gamp(est_in,est_out,Aop,hist_list=['z0','zvar0'],step=0.95,\\\n nit=50)\n \n # Run the solver\n solver.solve()\n \n # Compute the MSE as a function of the iteration\n z0_hist = solver.hist_dict['z0']\n zvar0_hist = solver.hist_dict['zvar0']\n nit = len(z0_hist)\n zpow = np.mean(np.abs(z)**2)\n mse = np.zeros(nit)\n mse_pred = np.zeros(nit)\n for it in range(nit):\n zerr = np.mean(np.abs(z0_hist[it]-z)**2)\n mse[it] = 10*np.log10(zerr/zpow)\n mse_pred[it] = 10*np.log10(np.mean(zvar0_hist[it])/zpow)\n\n if (plt_results):\n import matplotlib.pyplot as plt\n t = np.arange(nit)\n plt.plot(t,mse,'-o')\n plt.plot(t,mse_pred,'-s')\n plt.legend(['Actual', 'Pred'])\n plt.grid()\n \n if verbose:\n print(\"Final MSE = %f\" % mse[-1]) \n \n # Check final error if test passed\n if mse[-1] > mse_tol:\n raise vp.common.TestException(\"MSE exceeded expected value\")", "def optimise_GP_kernel(self,iters=1000):\n \n new_params=SCG(self.ll_hyper,self.ll_hyper_grad,np.hstack((self.DGPLVM_tar.GP.kernel.get_params(), np.log(self.DGPLVM_tar.GP.beta))),maxiters=iters,display=True,func_flg=0)\n #gtol=1e-10,epsilon=1e-10,\n# new_params = fmin_cg(self.ll,np.hstack((self.kernel.get_params(), np.log(self.beta))),fprime=self.ll_grad,maxiter=iters,gtol=1e-10,disp=False) \n self.DGPLVM_src.GP.set_params(new_params)\n self.DGPLVM_tar.GP.set_params(new_params)\n self.DGPLVM_all.GP.set_params(new_params)", "def propose(self, top_proposal, current_positions, beta, validate_energy_bookkeeping = True):\n _logger.info(\"propose: performing forward proposal\")\n # Ensure positions have units compatible with nanometers\n check_dimensionality(current_positions, unit.nanometers)\n check_dimensionality(beta, unit.kilojoules_per_mole**(-1))\n\n # TODO: Change this to use md_unit_system instead of hard-coding nanometers\n if not top_proposal.unique_new_atoms:\n _logger.info(\"propose: there are no unique new atoms; logp_proposal = 0.0.\")\n # If there are no unique new atoms, return new positions in correct order for new topology object and log probability of zero\n # TODO: Carefully check this\n import parmed\n structure = parmed.openmm.load_topology(top_proposal.old_topology, top_proposal._old_system)\n atoms_with_positions = [ structure.atoms[atom_idx] for atom_idx in top_proposal.new_to_old_atom_map.keys() ]\n new_positions = self._copy_positions(atoms_with_positions, top_proposal, current_positions)\n logp_proposal, rjmc_info, atoms_with_positions_reduced_potential, final_context_reduced_potential, neglected_angle_terms = 0.0, None, None, None, None\n self.forward_final_growth_system = None\n else:\n _logger.info(\"propose: unique new atoms detected; proceeding to _logp_propose...\")\n logp_proposal, new_positions, rjmc_info, atoms_with_positions_reduced_potential, final_context_reduced_potential, neglected_angle_terms, omitted_terms = self._logp_propose(top_proposal, current_positions, beta, direction='forward', validate_energy_bookkeeping = validate_energy_bookkeeping)\n self.nproposed += 1\n\n check_dimensionality(new_positions, unit.nanometers)\n check_dimensionality(logp_proposal, float)\n\n #define forward attributes\n self.forward_rjmc_info = rjmc_info\n self.forward_atoms_with_positions_reduced_potential, self.forward_final_context_reduced_potential = atoms_with_positions_reduced_potential, final_context_reduced_potential\n self.forward_neglected_angle_terms = neglected_angle_terms\n\n return new_positions, logp_proposal", "def precomp_GpGm(ells, thetas):\n n_ell, n_theta = len(ells), len(thetas)\n P_m_l = np.zeros((n_theta, n_ell))\n P_m_lminus1 = np.zeros_like(P_m_l)\n costs = np.cos(thetas)\n sints = np.sin(thetas)\n for it in range(n_theta):\n P_m_l[it] = P2l_rec_norm(ells, costs[it])\n # for il in range(n_ell):\n # P_m_l[it,il] = sp.lpmv(2, ells[il], costs[it])\n P_m_lminus1[:, 1:] = P_m_l[:, :-1]\n ELLS, THETAS = np.meshgrid(ells, thetas)\n COSTS, SINTS = np.cos(THETAS), np.sin(THETAS)\n G_plus = -((ELLS - 4) / SINTS**2 + 0.5 * ELLS * (ELLS - 1)) * \\\n P_m_l + (ELLS + 2) * COSTS * P_m_lminus1 / SINTS**2\n G_minus = 2 * ((ELLS - 1) * COSTS * P_m_l -\n (ELLS + 2) * P_m_lminus1) / SINTS**2\n G_plus[:, 0] = 0.\n G_minus[:, 0] = 0.\n return G_plus, G_minus", "def optimize(self, optimizer=None, start=None, messages=False, max_iters=1000, ipython_notebook=True, clear_after_finish=False, **kwargs):\n self.inference_method.on_optimization_start()\n try:\n super(GPMSGP, self).optimize(optimizer, start, messages, max_iters, ipython_notebook, clear_after_finish, **kwargs)\n except KeyboardInterrupt:\n print(\"KeyboardInterrupt caught, calling on_optimization_end() to round things up\")\n self.inference_method.on_optimization_end()\n raise\n \n self.posterior_prediction = self.inference_method.update_prediction_vectors(self.kern,self.posterior,self.grad_dict,self.likelihood)", "def _generate_proposals(self, box):\n # Generate proposals\n num_proposals = self.proposal_params['boxes_per_frame']\n proposals = torch.zeros((num_proposals, 4))\n gt_iou = torch.zeros(num_proposals)\n\n for i in range(num_proposals):\n proposals[i, :], gt_iou[i] = prutils.perturb_box(box, min_iou=self.proposal_params['min_iou'],\n sigma_factor=self.proposal_params['sigma_factor']\n )\n\n # Map to [-1, 1]\n gt_iou = gt_iou * 2 - 1\n return proposals, gt_iou", "def _generate_proposals(self, box):\n # Generate proposals\n num_proposals = self.proposal_params['boxes_per_frame']\n proposals = torch.zeros((num_proposals, 4))\n gt_iou = torch.zeros(num_proposals)\n\n for i in range(num_proposals):\n proposals[i, :], gt_iou[i] = prutils.perturb_box(box, min_iou=self.proposal_params['min_iou'],\n sigma_factor=self.proposal_params['sigma_factor']\n )\n\n # Map to [-1, 1]\n gt_iou = gt_iou * 2 - 1\n return proposals, gt_iou", "def generate_proposals(predictor, test_data, imdb, vis=False, thresh=0.):\r\n assert vis or not test_data.shuffle\r\n data_names = [k[0] for k in test_data.provide_data]\r\n\r\n i = 0\r\n t = time.time()\r\n imdb_boxes = list()\r\n original_boxes = list()\r\n for im_info, data_batch in test_data:\r\n t1 = time.time() - t\r\n t = time.time()\r\n\r\n scale = im_info[0, 2]\r\n scores, boxes, data_dict = im_proposal(predictor, data_batch, data_names, scale)\r\n t2 = time.time() - t\r\n t = time.time()\r\n\r\n # assemble proposals\r\n dets = np.hstack((boxes, scores))\r\n original_boxes.append(dets)\r\n\r\n # filter proposals\r\n keep = np.where(dets[:, 4:] > thresh)[0]\r\n dets = dets[keep, :]\r\n imdb_boxes.append(dets)\r\n\r\n if vis:\r\n vis_all_detection(data_dict['data'].asnumpy(), [dets], ['obj'], scale)\r\n\r\n logger.info('generating %d/%d ' % (i + 1, imdb.num_images) +\r\n 'proposal %d ' % (dets.shape[0]) +\r\n 'data %.4fs net %.4fs' % (t1, t2))\r\n i += 1\r\n\r\n assert len(imdb_boxes) == imdb.num_images, 'calculations not complete'\r\n\r\n # save results\r\n rpn_folder = os.path.join(imdb.root_path, 'rpn_data')\r\n if not os.path.exists(rpn_folder):\r\n os.mkdir(rpn_folder)\r\n\r\n rpn_file = os.path.join(rpn_folder, imdb.name + '_rpn.pkl')\r\n with open(rpn_file, 'wb') as f:\r\n cPickle.dump(imdb_boxes, f, cPickle.HIGHEST_PROTOCOL)\r\n\r\n if thresh > 0:\r\n full_rpn_file = os.path.join(rpn_folder, imdb.name + '_full_rpn.pkl')\r\n with open(full_rpn_file, 'wb') as f:\r\n cPickle.dump(original_boxes, f, cPickle.HIGHEST_PROTOCOL)\r\n\r\n logger.info('wrote rpn proposals to %s' % rpn_file)\r\n return imdb_boxes", "def step(self):\n\n with torch.no_grad():\n for group in self.param_groups:\n lr = group[\"lr\"]\n for p in group[\"params\"]:\n\n if p.grad is None:\n continue\n\n lambda_square = self.mf.conf_factor(p, keepdim=True) ** 2\n p.data.copy_(self.mf.exp(p, -lr * p.grad.data / lambda_square))", "def leapfrog(params, momentum, log_prob_func, steps=10, step_size=0.1, jitter=0.01, normalizing_const=1., softabs_const=1e6, explicit_binding_const=100, fixed_point_threshold=1e-20, fixed_point_max_iterations=6, jitter_max_tries=10, inv_mass=None, ham_func=None, sampler=Sampler.HMC, integrator=Integrator.IMPLICIT, metric=Metric.HESSIAN, store_on_GPU = True, debug=False, pass_grad = None):\n\n params = params.clone(); momentum = momentum.clone()\n # TodO detach graph when storing ret_params for memory saving\n if sampler == Sampler.HMC and integrator != Integrator.SPLITTING and integrator != Integrator.SPLITTING_RAND and integrator != Integrator.SPLITTING_KMID:\n def params_grad(p):\n p = p.detach().requires_grad_()\n log_prob = log_prob_func(p)\n # log_prob.backward()\n p = collect_gradients(log_prob, p, pass_grad)\n # print(p.grad.std())\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n return p.grad\n ret_params = []\n ret_momenta = []\n momentum += 0.5 * step_size * params_grad(params)\n for n in range(steps):\n if inv_mass is None:\n params = params + step_size * momentum #/normalizing_const\n else:\n #Assum G is diag here so 1/Mass = G inverse\n if type(inv_mass) is list:\n i = 0\n for block in inv_mass:\n it = block[0].shape[0]\n params[i:it+i] = params[i:it+i] + step_size * torch.matmul(block,momentum[i:it+i].view(-1,1)).view(-1) #/normalizing_const\n i += it\n elif len(inv_mass.shape) == 2:\n params = params + step_size * torch.matmul(inv_mass,momentum.view(-1,1)).view(-1) #/normalizing_const\n else:\n params = params + step_size * inv_mass * momentum #/normalizing_const\n p_grad = params_grad(params)\n momentum += step_size * p_grad\n ret_params.append(params.clone())\n ret_momenta.append(momentum.clone())\n # only need last for Hamiltoninian check (see p.14) https://arxiv.org/pdf/1206.1901.pdf\n ret_momenta[-1] = ret_momenta[-1] - 0.5 * step_size * p_grad.clone()\n # import pdb; pdb.set_trace()\n return ret_params, ret_momenta\n elif sampler == Sampler.RMHMC and (integrator == Integrator.IMPLICIT or integrator == Integrator.S3):\n if integrator is not Integrator.S3:\n ham_func = None\n # Else we are doing semi sep and need auxiliary for Riemann version.\n if pass_grad is not None:\n raise RuntimeError('Passing user-determined gradients not implemented for RMHMC')\n\n def fixed_point_momentum(params, momentum):\n momentum_old = momentum.clone()\n # print('s')\n for i in range(fixed_point_max_iterations):\n momentum_prev = momentum.clone()\n params = params.detach().requires_grad_()\n ham = hamiltonian(params, momentum, log_prob_func, jitter=jitter, softabs_const=softabs_const, normalizing_const=normalizing_const, ham_func=ham_func, sampler=sampler, integrator=integrator, metric=metric)\n params = collect_gradients(ham, params)\n\n # draw the jitter on the diagonal of Fisher again (probably a better place to do this)\n tries = 0\n while util.has_nan_or_inf(params.grad):\n params = params.detach().requires_grad_()\n ham = hamiltonian(params, momentum, log_prob_func, jitter=jitter, softabs_const=softabs_const, normalizing_const=normalizing_const, ham_func=ham_func, sampler=sampler, integrator=integrator, metric=metric)\n params = collect_gradients(ham, params)\n tries += 1\n if tries > jitter_max_tries:\n print('Warning: reached jitter_max_tries {}'.format(jitter_max_tries))\n # import pdb; pdb.set_trace()\n raise util.LogProbError()\n # import pdb; pdb.set_trace()\n # break\n\n momentum = momentum_old - 0.5 * step_size * params.grad\n momenta_diff = torch.max((momentum_prev-momentum)**2)\n if momenta_diff < fixed_point_threshold:\n break\n if debug == 1:\n print('Converged (momentum), iterations: {}, momenta_diff: {}'.format(i, momenta_diff))\n return momentum\n\n def fixed_point_params(params, momentum):\n params_old = params.clone()\n momentum = momentum.detach().requires_grad_()\n ham = hamiltonian(params, momentum, log_prob_func, jitter=jitter, softabs_const=softabs_const, normalizing_const=normalizing_const, ham_func=ham_func, sampler=sampler, integrator=integrator, metric=metric)\n momentum = collect_gradients(ham,momentum)\n momentum_grad_old = momentum.grad.clone()\n for i in range(fixed_point_max_iterations):\n params_prev = params.clone()\n momentum = momentum.detach().requires_grad_()\n ham = hamiltonian(params, momentum, log_prob_func, jitter=jitter, softabs_const=softabs_const, normalizing_const=normalizing_const, ham_func=ham_func, sampler=sampler, integrator=integrator, metric=metric)\n momentum = collect_gradients(ham,momentum)#collect_gradients(ham, params)\n params = params_old + 0.5 * step_size * momentum.grad + 0.5 * step_size * momentum_grad_old\n params_diff = torch.max((params_prev-params)**2)\n if params_diff < fixed_point_threshold:\n break\n if debug == 1:\n print('Converged (params), iterations: {}, params_diff: {}'.format(i, params_diff))\n return params\n ret_params = []\n ret_momenta = []\n for n in range(steps):\n # import pdb; pdb.set_trace()\n momentum = fixed_point_momentum(params, momentum)\n params = fixed_point_params(params, momentum)\n\n params = params.detach().requires_grad_()\n ham = hamiltonian(params, momentum, log_prob_func, jitter=jitter, softabs_const=softabs_const, normalizing_const=normalizing_const, ham_func=ham_func, sampler=sampler, integrator=integrator, metric=metric)\n params = collect_gradients(ham, params)\n\n # draw the jitter on the diagonal of Fisher again (probably a better place to do this)\n tries = 0\n while util.has_nan_or_inf(params.grad):\n params = params.detach().requires_grad_()\n ham = hamiltonian(params, momentum, log_prob_func, jitter=jitter, softabs_const=softabs_const, normalizing_const=normalizing_const, ham_func=ham_func, sampler=sampler, integrator=integrator, metric=metric)\n params = collect_gradients(ham, params)\n tries += 1\n if tries > jitter_max_tries:\n print('Warning: reached jitter_max_tries {}'.format(jitter_max_tries))\n raise util.LogProbError()\n # break\n momentum -= 0.5 * step_size * params.grad\n\n ret_params.append(params)\n ret_momenta.append(momentum)\n return ret_params, ret_momenta\n\n elif sampler == Sampler.RMHMC and integrator == Integrator.EXPLICIT:\n if pass_grad is not None:\n raise RuntimeError('Passing user-determined gradients not implemented for RMHMC')\n\n #During leapfrog define integrator as implict when passing into riemannian_hamiltonian\n leapfrog_hamiltonian_flag = Integrator.IMPLICIT\n def hamAB_grad_params(params,momentum):\n params = params.detach().requires_grad_()\n ham = hamiltonian(params, momentum.detach(), log_prob_func, jitter=jitter, normalizing_const=normalizing_const, softabs_const=softabs_const, explicit_binding_const=explicit_binding_const, sampler=sampler, integrator=leapfrog_hamiltonian_flag, metric=metric)\n params = collect_gradients(ham, params)\n\n # draw the jitter on the diagonal of Fisher again (probably a better place to do this)\n tries = 0\n while util.has_nan_or_inf(params.grad):\n # import pdb; pdb.set_trace()\n params = params.detach().requires_grad_()\n ham = hamiltonian(params, momentum.detach(), log_prob_func, jitter=jitter, normalizing_const=normalizing_const, softabs_const=softabs_const, explicit_binding_const=explicit_binding_const, sampler=sampler, integrator=leapfrog_hamiltonian_flag, metric=metric)\n params = collect_gradients(ham, params)\n tries += 1\n if tries > jitter_max_tries:\n print('Warning: reached jitter_max_tries {}'.format(jitter_max_tries))\n raise util.LogProbError()\n # import pdb; pdb.set_trace()\n # break\n\n return params.grad\n def hamAB_grad_momentum(params,momentum):\n momentum = momentum.detach().requires_grad_()\n params = params.detach().requires_grad_()\n # Can't detach p as we still need grad to do derivatives\n ham = hamiltonian(params, momentum, log_prob_func, jitter=jitter, normalizing_const=normalizing_const, softabs_const=softabs_const, explicit_binding_const=explicit_binding_const, sampler=sampler, integrator=leapfrog_hamiltonian_flag, metric=metric)\n # import pdb; pdb.set_trace()\n momentum = collect_gradients(ham,momentum)\n return momentum.grad\n ret_params = []\n ret_momenta = []\n params_copy = params.clone()\n momentum_copy = momentum.clone()\n for n in range(steps):\n # \\phi_{H_A}\n momentum = momentum - 0.5 * step_size * hamAB_grad_params(params,momentum_copy)\n params_copy = params_copy + 0.5 * step_size * hamAB_grad_momentum(params,momentum_copy)\n # \\phi_{H_B}\n params = params + 0.5 * step_size * hamAB_grad_momentum(params_copy,momentum)\n momentum_copy = momentum_copy - 0.5 * step_size * hamAB_grad_params(params_copy,momentum)\n # \\phi_{H_C}\n c = torch.cos(torch.FloatTensor([2* explicit_binding_const * step_size])).to(params.device)\n s = torch.sin(torch.FloatTensor([2* explicit_binding_const * step_size])).to(params.device)\n # params_add = params + params_copy\n # params_sub = params - params_copy\n # momentum_add = momentum + momentum_copy\n # momentum_sub = momentum - momentum_copy\n # ### CHECK IF THE VALUES ON THE RIGHT NEED TO BE THE OLD OR UPDATED ones\n # ### INSTINCT IS THAT USING UPDATED ONES IS BETTER\n # params = 0.5 * ((params_add) + c*(params_sub) + s*(momentum_sub))\n # momentum = 0.5 * ((momentum_add) - s*(params_sub) + c*(momentum_sub))\n # params_copy = 0.5 * ((params_add) - c*(params_sub) - s*(momentum_sub))\n # momentum_copy = 0.5 * ((momentum_add) + s*(params_sub) - c*(momentum_sub))\n params = 0.5 * ((params+params_copy) + c*(params-params_copy) + s*(momentum-momentum_copy))\n momentum = 0.5 * ((momentum+momentum_copy) - s*(params-params_copy) + c*(momentum-momentum_copy))\n params_copy = 0.5 * ((params+params_copy) - c*(params-params_copy) - s*(momentum-momentum_copy))\n momentum_copy = 0.5 * ((momentum+momentum_copy) + s*(params-params_copy) - c*(momentum-momentum_copy))\n\n\n # \\phi_{H_B}\n params = params + 0.5 * step_size * hamAB_grad_momentum(params_copy,momentum)\n momentum_copy = momentum_copy - 0.5 * step_size * hamAB_grad_params(params_copy,momentum)\n # \\phi_{H_A}\n momentum = momentum - 0.5 * step_size * hamAB_grad_params(params,momentum_copy)\n params_copy = params_copy + 0.5 * step_size * hamAB_grad_momentum(params,momentum_copy)\n\n ret_params.append(params.clone())\n ret_momenta.append(momentum.clone())\n return [ret_params,params_copy], [ret_momenta, momentum_copy]\n\n # PAGE 35 MCMC Using Hamiltonian dynamics (Neal 2011)\n elif sampler == Sampler.HMC and (integrator == Integrator.SPLITTING or integrator == Integrator.SPLITTING_RAND or Integrator.SPLITTING_KMID):\n if type(log_prob_func) is not list:\n raise RuntimeError('For splitting log_prob_func must be list of functions')\n if pass_grad is not None:\n raise RuntimeError('Passing user-determined gradients not implemented for splitting')\n\n def params_grad(p,log_prob_func):\n # OLD:\n # p = p.detach().requires_grad_()\n # log_prob = log_prob_func(p)\n # # log_prob.backward()\n # p = collect_gradients(log_prob, p)\n # grad = p.grad\n # # For removing GPU memory for large data sets.\n # del p, log_prob\n # torch.cuda.empty_cache()\n\n p = p.detach().requires_grad_()\n log_prob = log_prob_func(p)\n # Need to check memory issues in collect_gradients\n grad = torch.autograd.grad(log_prob,p)[0]\n # For removing GPU memory for large data sets.\n del p, log_prob, log_prob_func\n torch.cuda.empty_cache()\n return grad\n\n params = params.detach() # Detach as we do not need to remember graph until we pass into log_prob\n ret_params = []\n ret_momenta = []\n if integrator == Integrator.SPLITTING:\n M = len(log_prob_func)\n K_div = (M - 1) * 2\n if M == 1:\n raise RuntimeError('For symmetric splitting log_prob_func must be list of functions greater than length 1')\n for n in range(steps):\n # Symmetric loop to ensure reversible\n for m in range(M):\n # print('p ',n)\n grad = params_grad(params,log_prob_func[m])\n with torch.no_grad():\n momentum += 0.5 * step_size * grad\n del grad\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n if m < M-1:\n # print('q ',n)\n if inv_mass is None:\n params += (step_size/K_div) * momentum #/normalizing_const\n else:\n if type(inv_mass) is list:\n pass\n #Assum G is diag here so 1/Mass = G inverse\n elif len(inv_mass.shape) == 2:\n params += (step_size/K_div) * torch.matmul(inv_mass,momentum.view(-1,1)).view(-1) #/normalizing_const\n else:\n params += (step_size/K_div) * inv_mass * momentum #/normalizing_const\n for m in reversed(range(M)):\n # print('p ', n )\n grad = params_grad(params,log_prob_func[m])\n with torch.no_grad():\n momentum += 0.5 * step_size * grad\n del grad\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n if m > 0:\n # print('q ', n-1)\n if inv_mass is None:\n params += (step_size/K_div) * momentum #/normalizing_const\n else:\n if type(inv_mass) is list:\n pass\n #Assum G is diag here so 1/Mass = G inverse\n elif len(inv_mass.shape) == 2:\n params += (step_size/K_div) * torch.matmul(inv_mass,momentum.view(-1,1)).view(-1) #/normalizing_const\n else:\n params += (step_size/K_div) * inv_mass * momentum #/normalizing_const\n\n if store_on_GPU:\n ret_params.append(params.clone())\n ret_momenta.append(momentum.clone())\n else:\n ret_params.append(params.clone().cpu())\n ret_momenta.append(momentum.clone().cpu())\n elif integrator == Integrator.SPLITTING_RAND:\n M = len(log_prob_func)\n idx = torch.randperm(M)\n for n in range(steps):\n # \"Labelling of subsets is randomised for each iteration\"\n # idx = torch.randperm(M)\n for m in range(M):\n # print('p ',n)\n momentum += 0.5 * step_size * params_grad(params, log_prob_func[idx[m]])\n # print('q ',n)\n if inv_mass is None:\n params += (step_size/M) * momentum #/normalizing_const\n else:\n if type(inv_mass) is list:\n pass\n #Assum G is diag here so 1/Mass = G inverse\n elif len(inv_mass.shape) == 2:\n params += (step_size/M) * torch.matmul(inv_mass,momentum.view(-1,1)).view(-1) #/normalizing_const\n else:\n params += (step_size/M) * inv_mass * momentum #/normalizing_const\n momentum += 0.5 * step_size * params_grad(params,log_prob_func[idx[m]])\n\n ret_params.append(params.clone())\n ret_momenta.append(momentum.clone())\n # import pdb; pdb.set_trace()\n\n\n elif integrator == Integrator.SPLITTING_KMID:\n M = len(log_prob_func)\n if M == 1:\n raise RuntimeError('For symmetric splitting log_prob_func must be list of functions greater than length 1')\n for n in range(steps):\n # Symmetric loop to ensure reversible\n for m in range(M):\n # print('p ',n)\n momentum += 0.5 * step_size * params_grad(params,log_prob_func[m])\n\n if inv_mass is None:\n params = params + (step_size) * momentum #/normalizing_const\n else:\n if type(inv_mass) is list:\n pass\n #Assum G is diag here so 1/Mass = G inverse\n elif len(inv_mass.shape) == 2:\n params = params + (step_size) * torch.matmul(inv_mass,momentum.view(-1,1)).view(-1) #/normalizing_const\n else:\n params = params + (step_size) * inv_mass * momentum #/normalizing_const\n\n for m in reversed(range(M)):\n # print('p ', n )\n momentum += 0.5 * step_size * params_grad(params,log_prob_func[m])\n\n ret_params.append(params.clone())\n ret_momenta.append(momentum.clone())\n\n return ret_params, ret_momenta\n\n else:\n raise NotImplementedError()", "def run(self, C, p0 = None):\n global algorithm \n algorithm = AdaptiveMM(self.g, C, p0 = p0, lambda0 = 2000)\n solve()", "def increase_probabilities(G, B, Q, F, E, P):\n raise \"Not implemented, need to define for LTM\"\n pass \n\n\n \n # changed = dict() # changed edges and its previous probabilities\n # for e in E:\n # changed[e] = float(P.loc[e]) # remember what edges changed\n # hF = len(set(F).intersection(G.node[e[1]]['Fu']))/len(G.node[e[1]]['Fu']) # function h(F)\n # q = float(Q.loc[e])\n # b = float(B.loc[e])\n # P.loc[e] = min(hF*q + b, 1) # final probabilities p = h(F)*q + b\n # return changed", "def update_likelihood_approximation(self, **kwargs):\r\n if not isinstance(self.likelihood, Gaussian): # Updates not needed for Gaussian likelihood\r\n self.likelihood.restart()\r\n if self.has_uncertain_inputs:\r\n Lmi = chol_inv(self._Lm)\r\n Kmmi = tdot(Lmi.T)\r\n diag_tr_psi2Kmmi = np.array([np.trace(psi2_Kmmi) for psi2_Kmmi in np.dot(self.psi2, Kmmi)])\r\n\r\n self.likelihood.fit_FITC(self.Kmm, self.psi1.T, diag_tr_psi2Kmmi, **kwargs) # This uses the fit_FITC code, but does not perfomr a FITC-EP.#TODO solve potential confusion\r\n # raise NotImplementedError, \"EP approximation not implemented for uncertain inputs\"\r\n else:\r\n self.likelihood.fit_DTC(self.Kmm, self.psi1.T, **kwargs)\r\n # self.likelihood.fit_FITC(self.Kmm,self.psi1,self.psi0)\r\n self._set_params(self._get_params()) # update the GP\r", "def update_likelihood_approximation(self, **kwargs):\r\n self.likelihood.restart()\r\n self.likelihood.fit_full(self.kern.K(self.X), **kwargs)\r\n self._set_params(self._get_params()) # update the GP\r", "def fit_gp(self):\n # Put things into training mode.\n self.gpf_core.float()\n self.likelihood.train()\n # Now use Adam by default.\n optimizer = torch.optim.Adam([{'params': self.gpf_core.parameters()}],\n lr=0.1)\n mll = gpytorch.mlls.ExactMarginalLogLikelihood(self.likelihood,\n self.gpf_core)\n # TODO: Allow length of training to be an option.\n for _ in range(500):\n optimizer.zero_grad()\n output = self.gpf_core(self.tensor_x)\n loss = -mll(output, self.tensor_y)\n loss.backward()\n optimizer.step()", "def newParams(self):\n return package(Proposal.GaussianProposal.newParams(self))", "def _dream_proposals(\n currentVectors, history, dimensions, nChains, DEpairs, gamma, jitter, eps\n):\n\n sampleRange = history.ncombined_history\n currentIndex = np.arange(sampleRange - nChains, sampleRange)[:, np.newaxis]\n combined_history = history.combined_history\n\n # choose some chains without replacement to combine\n chains = _random_no_replace(DEpairs * 2, sampleRange - 1, nChains)\n\n # makes sure we have already selected the current chain so it is not replaced\n # this ensures that the the two chosen chains cannot be the same as the\n # chain for which the jump is\n chains += chains >= currentIndex\n\n chainDifferences = np.sum(\n combined_history[chains[:, 0:DEpairs], :], axis=1\n ) - np.sum(combined_history[chains[:, DEpairs : (DEpairs * 2)], :], axis=1)\n\n e = np.random.normal(0, jitter, (nChains, dimensions))\n\n # could replace eps with 1e-6 here\n E = np.random.normal(0, eps, (nChains, dimensions))\n\n proposalVectors = (\n currentVectors + (1 + e) * gamma[:, np.newaxis] * chainDifferences + E\n )\n return proposalVectors", "def update_mp(self, obs, pool):\n #######################################\n # Step 1 - prediction for birth targets\n born = [deepcopy(comp) for comp in self.birthgmm]\n # The original paper would do a spawning iteration as part of Step 1.\n spawned = [] # not implemented\n\n #######################################\n # Step 2 - prediction for existing targets\n updated = [GmphdComponent(self.survival * comp.weight, dot(self.f, comp.loc),\n self.q + dot(dot(self.f, comp.cov), self.f.T), comp.id)\n for comp in self.gmm]\n\n predicted = born + spawned + updated\n\n #######################################\n # Step 3 - construction of PHD update components\n # These two are the mean and covariance of the expected observation\n nu = [dot(self.h, comp.loc) for comp in predicted]\n s = [self.r + dot(dot(self.h, comp.cov), self.h.T) for comp in predicted]\n # Not sure about any physical interpretation of these two...\n k = [dot(dot(comp.cov, self.h.T), linalg.inv(s[index]))\n for index, comp in enumerate(predicted)]\n pkk = [dot(eye(len(k[index])) - dot(k[index], self.h), comp.cov)\n for index, comp in enumerate(predicted)]\n\n #######################################\n # Step 4 - update using observations\n # The 'predicted' components are kept, with a decay\n newgmm = [GmphdComponent(comp.weight * (1.0 - self.detection), comp.loc, comp.cov, comp.id)\n for comp in predicted]\n\n # then more components are added caused by each obsn's interaction with existing component\n result = pool.map_async(partial(self.update_obs_mp, predicted=predicted, nu=nu, s=s, pkk=pkk, k=k), obs)\n result = result.get()\n for newgmmpartial in result:\n newgmm.extend(newgmmpartial)\n\n self.gmm = newgmm", "def update_likelihood_approximation(self):\n self.likelihood.fit_full(self.kern.K(self.X))\n self._set_params(self._get_params()) # update the GP", "def addPppParams(model):\n \n ### GAPDP Parameters ####\n model.addParameter('GAPDP','KmSub2',0.385) # nadp\n model.addParameter('GAPDP','KmProd2',0.202) # nadph\n model.addParameter('GAPDP','kcatF',2.8)\n model.addParameter('GAPDP','kcatR',0)\n\n ### FMETTRS Parameters ###\n model.addParameter('FMETTRS','kcatF',0.45)\n\n ### MTHFC Parameters ###\n model.addParameter('MTHFC','kcatF',185)\n\n #### GHMT2 Paramters ####\n model.addParameter('GHMT2','kcatF',0.0)\n model.addParameter('GHMT2','kcatR',0.0)\n \n #### TKT1 Parameters ####\n model.addParameter('TKT1',rxnFormKey='kcatF',value=20.58)\n model.addParameter('TKT1',rxnFormKey='kcatR',value=0.8)\n \n model.addParameter('TKT1',rxnFormKey='KmSub1',value=0.743) #g3p\n model.addParameter('TKT1',rxnFormKey='KmSub2',value=3.7298) #s7p\n model.addParameter('TKT1',rxnFormKey='KmProd1',value=0.4717) #r5p\n model.addParameter('TKT1',rxnFormKey='KmProd2',value=0.134) #xu5p\n \n #### TKT2 Parameters ####\n model.addParameter('TKT2',rxnFormKey='kcatF',value=26.87)\n model.addParameter('TKT2',rxnFormKey='kcatR',value=1.4)\n \n model.addParameter('TKT2',rxnFormKey='KmSub1',value=0.25) #f6p\n model.addParameter('TKT2',rxnFormKey='KmSub2',value=0.743) #g3p\n model.addParameter('TKT2',rxnFormKey='KmProd1',value=0.0227) #e4p\n model.addParameter('TKT2',rxnFormKey='KmProd2',value=0.134) #xu5p\n \n #### TALA Parameters ####\n model.addParameter('TALA',rxnFormKey='kcatF',value=22.3)\n model.addParameter('TALA',rxnFormKey='kcatR',value=0.54)\n \n model.addParameter('TALA',rxnFormKey='KmSub1',value=0.0401) #e4p\n model.addParameter('TALA',rxnFormKey='KmSub2',value=0.6688) #f6p\n model.addParameter('TALA',rxnFormKey='KmProd1',value=1.9) #g3p\n model.addParameter('TALA',rxnFormKey='KmProd2',value=0.285) #s7p\n\n \n #### Speed up DGSN Pathway ####\n model.addParameter('DGSNK',rxnFormKey='kcatF',value=2.25)\n\n #### Speed up DADN pathway ####\n model.addParameter('PUNP2',rxnFormKey='kcatF',value=13.3)\n\n #### Speed up FBA rxn ####\n #model.addParameter('FBA',rxnFormKey='kcatF',value=64.5)\n\n model.addParameter('RNDR2',rxnFormKey='KmSub1',value=0.24)\n\n \n# #### RPI Parameters ####\n model.addParameter('RPI',rxnFormKey='kcatF',value=10.0)\n model.addParameter('RPI',rxnFormKey='kcatR',value=1.0)\n \n #model.addParameter('RPI',rxnFormKey='KmSub1',value=1.0)\n #model.addParameter('RPI',rxnFormKey='KmProd1',value=1.0)\n \n model.addParameter('FBA',rxnFormKey='KmSub1',value=0.12)\n model.addParameter('FBA',rxnFormKey='KmProd2',value=0.05)\n \n \n model.addParameter('GAPD',rxnFormKey='kcatF',value=442.0) \n model.addParameter('GAPD',rxnFormKey='kcatR',value=73.6) \n \n\n model.addParameter('FBA',rxnFormKey='kcatR',value=12.6)\n \n\n model.addParameter('TPI',rxnFormKey='kcatR',value=67)\n \n model.addParameter('TPI',rxnFormKey='KmSub1',value=0.077)\n model.addParameter('TPI',rxnFormKey='KmProd1',value=0.084) \n \n\n model.addParameter('FBA',rxnFormKey='kcatF',value=21.0)\n \n \n model.addParameter('PGK',rxnFormKey='kcatR',value=3.4)\n \n model.addParameter('PGM',rxnFormKey='KmSub1',value=3.6)\n model.addParameter('PGM',rxnFormKey='KmProd1',value=0.2)\n \n \n model.addParameter('PGK',rxnFormKey='KmSub1',value=0.01)\n model.addParameter('PGK',rxnFormKey='KmProd1',value=0.1)\n \n \n model.addParameter('GAPD',rxnFormKey='KmProd1',value=0.47)\n model.addParameter('GAPD',rxnFormKey='KmProd2',value=0.061)\n \n \n model.addParameter('DRPA',rxnFormKey='kcatR',value=34.0)\n \n model.addParameter('DRPA',rxnFormKey='KmProd1',value=0.267)\n model.addParameter('DRPA',rxnFormKey='KmProd2',value=0.2)\n\n \n model.addParameter('PPM2',rxnFormKey='kcatF',value=173)\n \n model.addParameter('PPM2',rxnFormKey='KmSub1',value=0.013)\n model.addParameter('PPM2',rxnFormKey='KmProd1',value=1.2)\n\n\n\n# print('Updated PPP Parameters')\n\n return", "def propose(matrix,motif):\n if random.random() < 0.5:\n return matrix,mutate_motif(motif)\n else:\n return mutate_matrix(matrix),motif", "def train_mdn_with_proposal(save=True):\n\n # load prior proposal and observations\n pilot_means, pilot_stds = helper.load(datadir + 'pilot_run_results.pkl')\n obs_stats = helper.load(datadir + 'obs_stats.pkl')\n obs_stats -= pilot_means\n obs_stats /= pilot_stds\n net, _, prior_proposal, _ = helper.load(netsdir + 'mdn_svi_proposal_prior_{0}.pkl'.format(n_bootstrap_iter-1))\n\n n_samples = 2000\n\n # generate data\n params = []\n stats = []\n i = 0\n\n while i < n_samples:\n\n prop_params = np.exp(prior_proposal.gen())[0]\n if np.any(np.log(prop_params) < log_prior_min) or np.any(np.log(prop_params) > log_prior_max):\n continue\n try:\n lv = mjp.LotkaVolterra(init, prop_params)\n states = lv.sim_time(dt, duration, max_n_steps=max_n_steps)\n except mjp.SimTooLongException:\n continue\n\n sum_stats = calc_summary_stats(states)\n sum_stats -= pilot_means\n sum_stats /= pilot_stds\n\n params.append(prop_params)\n stats.append(sum_stats)\n i += 1\n\n params = np.array(params)\n stats = np.array(stats)\n\n # train an mdn to give the posterior\n minibatch = 100\n maxiter = int(5000 * n_samples / minibatch)\n monitor_every = 1000\n regularizer = lf.regularizerSvi(net.mps, net.sps, 0.01)\n trainer = Trainer.Trainer(\n model=net,\n trn_data=[stats, np.log(params)],\n trn_loss=net.mlprob + regularizer / n_samples,\n trn_target=net.y\n )\n trainer.train(\n maxiter=maxiter,\n minibatch=minibatch,\n show_progress=True,\n monitor_every=monitor_every\n )\n\n # calculate the approximate posterior\n mdn_mog = net.get_mog(obs_stats)\n mdn_mog.prune_negligible_components(1.0e-3)\n approx_posterior = mdn_mog / prior_proposal\n\n # save the net\n if save:\n filename = netsdir + 'mdn_svi_proposal_hiddens_50_tanh_comps_1_sims_2k.pkl'\n helper.save((net, approx_posterior), filename)", "def train_restgpm(self):\n j = 0\n\n for p in self.net.parameters():\n # only handle conv weight and fc weight\n if p.grad is None:\n continue\n if p.grad.ndim != 2 and p.grad.ndim != 4:\n continue\n if j < len(self.M_vec):\n if self.args.model in ['fsdgpm'] and self.args.method in ['dgpm', 'xdgpm']:\n # lambdas = torch.sigmoid(self.args.tmp * self.lambdas[j]).reshape(-1)\n lambdas = self.lambdas[j]\n else:\n lambdas = torch.ones(self.M_vec[j].shape[1])\n\n if self.cuda:\n self.M_vec[j] = self.M_vec[j].cuda()\n lambdas = lambdas.cuda()\n\n if p.grad.ndim == 4:\n # rep[i]: n_samples * n_features\n grad = p.grad.reshape(p.grad.shape[0], -1)\n grad -= torch.mm(torch.mm(torch.mm(grad, self.M_vec[j]), torch.diag(lambdas)), self.M_vec[j].T)\n p.grad = grad.reshape(p.grad.shape).clone()\n else:\n p.grad -= torch.mm(torch.mm(torch.mm(p.grad, self.M_vec[j]), torch.diag(lambdas)), self.M_vec[j].T)\n\n j += 1" ]
[ "0.6288661", "0.6088569", "0.5891187", "0.5777578", "0.57462066", "0.5741664", "0.5720792", "0.56987095", "0.5695706", "0.56813467", "0.56514174", "0.56160617", "0.5612685", "0.5612685", "0.55843073", "0.55429906", "0.551866", "0.5515407", "0.5506441", "0.55028486", "0.54870903", "0.548517", "0.5481529", "0.5451254", "0.5448714", "0.54442227", "0.5439902", "0.5414758", "0.5405085", "0.5391655" ]
0.72234386
0
Sets the current node to the given data and assigns the node as a leaf node
def setData(self,data): self.data=data self.leaf=True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def node_data(self, node_data):\n\n self._node_data = node_data", "def insert_node(self, data):\n\t\tif self.root is None:\n\t\t\tself.root = Node(data)\n\t\telse:\n\t\t\tcurrent_node = self.root\n\t\t\twhile current_node.next is not None:\n\t\t\t\tcurrent_node = current_node.next\n\t\t\tcurrent_node.next = Node(data, current_node)", "def setCurrentNode(self, newNode):\r\n\t\tself.currentNode = newNode", "def set_data(node, value):\n node['data'] = value", "def __init__(self, data):\n\n super(DataNode, self).__init__()\n\n # the current data\n self._data = data\n # the set of children\n self._children = set()", "def __init__(self, data, parent):\n self.left = None\n self.right = None\n self.data = data\n self.parent = parent", "def set(self, key, data, expiration=None):\n node = self._get_node(key)\n\n if node:\n node.data = data\n else:\n if len(self) + 1 > self.max_size:\n node = self._pop()\n del self.map[node.key]\n\n node = Node(key, data, expiration)\n self._append(node)\n self.map[key] = node", "def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None", "def __init__(self, data, node):\n self.data = data\n self.node = node", "def insert(self, data):\n if not self:\n self.root.append(data)\n return self\n\n parent, current = self._lookup(data)\n if current: # data equivalent node found!\n current.append(data)\n else: # equivalent node not found!\n setattr(parent, \"right\" if parent < data else \"left\", Node().append(data))\n return self", "def set_node(self, node):\n self.__node = node", "def insert(self, data):\n if self.head == None:\n self.head = Node(data)\n else:\n curr = self.head\n while curr.link != None:\n curr = curr.link\n curr.link = Node(data)", "def _add_root(self, data):\n if self._root is not None:\n raise ValueError(\"Root exists\")\n self._size = 1\n self._root = self._Node(data)\n return self._make_position(self._root)", "def insert(self, data):\n \n def _find_parent(current, node):\n \"\"\"Recursively descend through the tree to find the node that\n should be the parent of the new node. Do not allow for duplicates.\n \"\"\"\n \n if node == current:\n raise ValueError(str(node.data) + \" is already in the tree.\")\n if node < current: # Travel left\n if current.left:\n return _find_parent(current.left,node)\n else:\n return current\n else: # Travel right\n if current.right:\n return _find_parent(current.right,node)\n else:\n return current\n \n n = KDTNode(data) # Make a new node\n if len(data) != self.k:\n raise ValueError(\"data must be of length \" + str(self.k))\n if not self.root:\n self.root = n # Case 1: empty tree\n n.axis = 0\n else: # Case 2: use _find_parent\n parent = _find_parent(self.root, n) # Get the parent\n if n < parent: parent.left = n # Insert the node\n else: parent.right = n\n n.prev = parent # Double link\n n.axis = (n.prev.axis + 1) % self.k\n return n", "def set_data(self,key='',val=None):\n parent_itm = self._root\n if '.' in key:\n parent_itm = self.get_data(self.parent_key(key))\n itm_key = key.split('.')[-1]\n if itm_key:\n try: \n parent_itm[itm_key] = val\n except:\n try: \n parent_itm[int(itm_key)] = val # list case\n except:\n parent_itm.append(val) # append to list case", "def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n # added stuff below", "def add_node(self, data):\n new_node = Node(data)\n if self.cur_node is not None:\n new_node.next, self.cur_node.next = self.cur_node.next, new_node\n self.cur_node = new_node\n self.length += 1\n self.cur_pos += 1\n if self.start_node is None:\n self.start_node = self.cur_node\n # print(\"Node({}) added to {}\".format(new_node.data, self.cur_pos-1))", "def set_leaf_node(self, leaf_value):\n\n if not self.empty:\n try:\n node_key = self.node_key\n except AttributeError:\n node_key = '_'\n raise ValueError(\n 'Cannot modify a non-empty node. ' + \\\n 'If you meant to change type of node {}, '.format(node_key) + \\\n 'delete it first and then add an empty node with ' + \\\n 'the same key.')\n\n # check if leaf_value is a list-like object\n try:\n _ = iter(leaf_value)\n is_list = True\n except TypeError:\n is_list = False\n\n try:\n if is_list:\n leaf_value = [float(i) for i in leaf_value]\n else:\n leaf_value = float(leaf_value)\n except TypeError:\n raise TreeliteError('leaf_value parameter should be either a ' + \\\n 'single float or a list of floats')\n\n try:\n if is_list:\n _check_call(_LIB.TreeliteTreeBuilderSetLeafVectorNode(\n self.tree.handle,\n ctypes.c_int(self.node_key),\n c_array(ctypes.c_double, leaf_value),\n ctypes.c_size_t(len(leaf_value))))\n else:\n _check_call(_LIB.TreeliteTreeBuilderSetLeafNode(\n self.tree.handle,\n ctypes.c_int(self.node_key),\n ctypes.c_double(leaf_value)))\n self.empty = False\n except AttributeError:\n raise TreeliteError('This node has never been inserted into a tree; '\\\n + 'a node must be inserted before it can be a leaf node')", "def add(self, data):\n root_copy= self\n while (root_copy.right):\n root_copy = root_copy.right\n new_node = DoublyLinkedList(data, root_copy, None)\n root_copy.right = new_node\n return new_node", "def __init__(self, data):\n self._data = data\n self._parent = None\n self._rkid = None\n self._lkid = None", "def __init__(self, data, node):\n self.data = data\n self.node = node # This is the data structure which holds the data for this node, e.g. lat, lon, etc.", "def __init__(self, data: str):\n self.root = Node(data)\n self.node_count = 1\n self.node_of_last_computed_hash = 0", "def push(self, data):\n\n node = Node(data)\n\n if self.head == None:\n\n self.head = node\n else:\n\n traverse = self.head\n\n while traverse.next != None:\n traverse = traverse.next\n\n traverse.next = node", "def add_child(self, data):\n if data == self.data:\n return # node already exist\n\n if data < self.data:\n #add data to left subtree\n if self.left:\n self.left.add_child(data)\n else:\n self.left = BinarySearchTreeNode(data)\n else:\n #add data to right subtree\n if self.right:\n self.right.add_child(data)\n else:\n self.right = BinarySearchTreeNode(data)", "def _put(self, k, v, currNode):\n if k < currNode.key:\n if currNode.hasLeftChild():\n self._put(k, v, currNode.leftChild)\n else:\n currNode.leftChild = TreeNode(k, v, parent=currNode)\n\n elif k > currNode.key:\n if currNode.hasRightChild():\n self._put(k, v, currNode.rightChild)\n else:\n currNode.rightChild = TreeNode(k, v, parent=currNode)\n\n else:\n currNode.payload = v\n self.size -= 1", "def insert_left(self, data: DataType) -> Node[DataType]:\n self.left_node = Node(data)\n self.left_node.right_node = self # connect the new node to this node\n return self.left_node", "def insert(self, data):\n\n\t\tif (self.treetype() and type(data) != self.treetype()):\n\t\t\traise TypeError(str(type(data)) + \" is invalid for this tree.\")\n\n\t\tself._size += 1\n\n\t\tif (not self._root):\n\t\t\tself._root = BTNode(value=data, depth=1)\n\t\t\treturn\n\n\t\tself._recursive_insert(data, self._root)\n\t\treturn", "def _insert(self, data, cur_node):\n if data < cur_node.data:\n if cur_node.left_child == None:\n cur_node.left_child = AVLNode(data)\n cur_node.left_child.parent=cur_node # set parent\n self._check_balance(cur_node.left_child)\n else:\n self._insert(data, cur_node.left_child)\n elif data > cur_node.data:\n if cur_node.right_child == None:\n cur_node.right_child = AVLNode(data)\n cur_node.right_child.parent = cur_node # set parent\n self._check_balance(cur_node.right_child)\n else:\n self._insert(data,cur_node.right_child)\n # else:\n # print(\"data already in tree!\")", "def sync_tree_with_data(self, tree: QTreeView, data: List[DataNode]) -> None:\n tree.setModel(self.create_model_from_nodes(data))\n tree.expandAll()", "def leaf_replace(self, node):\r\n if self.label is not None: # return if leaf node\r\n return\r\n left, right = self.left, self.right\r\n left.parents.remove(self) if self in left.parents else left.parents\r\n right.parents.remove(self) if self in right.parents else right.parents\r\n if node.label is None:\r\n internal = [node]\r\n else:\r\n internal = []\r\n while len(internal) > 0:\r\n l = internal.pop(0)\r\n if l.left.label is not None: # leaf\r\n if l.left.label == 0:\r\n l.left = left\r\n left.parents.append(l) if l not in left.parents else left.parents\r\n elif l.left.label == 1:\r\n l.left = right\r\n right.parents.append(l) if l not in right.parents else right.parents\r\n else:\r\n internal.append(l.left)\r\n\r\n if l.right.label is not None: # leaf\r\n if l.right.label == 0:\r\n l.right = left\r\n left.parents.append(l) if l not in left.parents else left.parents\r\n elif l.right.label == 1:\r\n l.right = right\r\n right.parents.append(l) if l not in right.parents else right.parents\r\n else:\r\n internal.append(l.right)" ]
[ "0.70346564", "0.6833494", "0.6730778", "0.6714654", "0.66997105", "0.65295124", "0.63632095", "0.63567376", "0.6312098", "0.6311947", "0.63067853", "0.62925583", "0.6279957", "0.62610626", "0.625829", "0.6252369", "0.62375367", "0.62080336", "0.619256", "0.6182957", "0.6157092", "0.6153961", "0.6130706", "0.61271805", "0.6120443", "0.6067323", "0.60323757", "0.6017214", "0.6011494", "0.6008777" ]
0.77952176
0
Sets the split attribute and split value to the current node
def setSplit(self,split): self.split=split
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split(self, attribute):\n if attribute not in self.attributes:\n raise KeyError('Attribute not present in node')\n \n self.split_attr = attribute\n \n # list() is used to make a copy of the list instead of pointing to the same list\n child_attributes = list(self.attributes)\n child_attributes.remove(attribute)\n \n child1_ancestors = list(self.ancestors)\n child0_ancestors = list(self.ancestors)\n child1_ancestors.append(attribute_value(attribute, 1))\n child0_ancestors.append(attribute_value(attribute, 0))\n \n self.val1 = Node(child_attributes, child1_ancestors, self.data, self.heuristic)\n self.val0 = Node(child_attributes, child0_ancestors, self.data, self.heuristic)", "def set_split(self, split, force=True):\n if(force or self.split != split):\n c = (0, 1)[split]\n self.debug_print('set split mode: %d' % c)\n r = self.send_com(0x0f, [c])\n if(r):\n self.split = split\n return r", "def SetSplitValues(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_SplitCurve_SetSplitValues(self, *args)", "def set_split(self,split='train'):\r\n \r\n self._target_data = self.processed_data[split]\r\n self.split_ = split", "def __split_node(self, cur_node):\n temp = self.Node(cur_node.data_list[len(cur_node.data_list) / 2:], cur_node.next_node)\n cur_node.data_list = cur_node.data_list[:len(cur_node.data_list) / 2]\n cur_node.next_node = temp\n\n if cur_node == self.tail:\n self.tail = cur_node.next_node", "def addSplitValue(self, split):\n self.balance += split.value", "def split_rule(self, split_rule):\n\n self._split_rule = split_rule", "def setValueSplitSymbol(self, value):\n return self._set(valueSplitSymbol=value)", "def split(self):\n left = BPlusNode(self.order)\n right = BPlusNode(self.order)\n mid = self.order // 2\n\n left.keys = self.keys[:mid]\n left.values = self.values[:mid]\n\n right.keys = self.keys[mid:]\n right.values = self.values[mid:]\n\n # When the node is split, set the parent key to the left-most key of the right child node.\n self.keys = [right.keys[0]]\n self.values = [left, right]\n self.leaf = False", "def setAnnotationSplitSymbol(self, value):\n return self._set(annotationSplitSymbol=value)", "def addSplit(self):\n pass", "def attr_sep(self, new_sep: str) -> None:\n self._attr_sep = new_sep\n self._filters_tree = self._generate_filters_tree()", "def set_split(self):\n #Regular expressions; try 1 first, then 2, etc.\n rex1 = re.compile('F?LD')\n rex2 = re.compile('[LF]?LQ')\n \n #For regular expression, check if there is a match that is >10 AA from the end\n if re.search(rex1, self.sequence) and len(re.split(rex1, self.sequence)[-1]) > 10:\n start, end = [m.span() for m in rex1.finditer(self.sequence)][-1]\n# end += 16 #TODO why +15/16?\n elif re.search(rex2, self.sequence) and len(re.split(rex2,self.sequence)[-1]) > 10:\n start, end = [m.span() for m in rex2.finditer(self.sequence)][-1]\n# end += 15\n else:\n self.split_index = -1\n self.core = self.sequence\n self.leader = ''\n return\n self.split_index = end\n self.leader = self.sequence[:end]\n self.core = self.sequence[end:]", "async def _set_split(self, row, value):\n self.sheet.update_cell(row, 2, value)\n return True", "def split(self, split):\n\n # check split validity\n self.__check_split_object_validity(split)\n\n res = self.pdf4me_client.custom_http.post_universal_object(universal_object=split,\n controller='Split/Split')\n\n return res", "def SetVSplitValues(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_SplitSurface_SetVSplitValues(self, *args)", "def splitmetric(self, dataset, attr, target_attr):\n raise NotImplementedError('Subclass should implement this method')", "def split(self, place_leaf_splitted):\n raise NotImplementedError", "def test_split_adds_children(mock_amg):\n\n mock_amg.cells[0].split()\n assert mock_amg.cells[0].children['bl'] is mock_amg.cells[-4]\n assert mock_amg.cells[0].children['br'] is mock_amg.cells[-3]\n assert mock_amg.cells[0].children['tl'] is mock_amg.cells[-2]\n assert mock_amg.cells[0].children['tr'] is mock_amg.cells[-1]", "def set_tag_separator(self, tag_separator):\n\n self._tag_separator = tag_separator", "def split(self, X):", "def _create_split(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n helper.make_attribute('split', op.parts),\n ])\n return node", "def splitmetric(self, dataset, attr, target_attr):\n freq = {}\n splitinfo = 0.0\n \n #Call information gain\n gain = ID3.splitmetric(self, dataset, attr, target_attr);\n samplenumbers = len(dataset)\n # Calculate the frequency of each of the values in the split attribute\n for record in dataset:\n if (record[attr] in freq):\n freq[record[attr]] += 1.0\n else:\n freq[record[attr]] = 1.0\n \n #Calculate split info, entropy of splitter\n for val in list(freq.values()):\n splitinfo += (- val / samplenumbers) * math.log(val / samplenumbers, 2)\n \n #Split info equals 0 when there only one class in data set\n if splitinfo == 0:\n splitinfo = 0.00000001\n \n return gain / splitinfo", "def setSplitLength(self, value):\n return self._set(splitLength=value)", "def setValidationSplit(self, v):\n self._set(validationSplit=v)\n return self", "def setValidationSplit(self, v):\n self._set(validationSplit=v)\n return self", "def setValidationSplit(self, v):\n self._set(validationSplit=v)\n return self", "def setValidationSplit(self, v):\n self._set(validationSplit=v)\n return self", "def setValidationSplit(self, validation_split):\n return self._set(validationSplit=validation_split)", "def setSplitCount(self, count):\n pass" ]
[ "0.7075071", "0.6009954", "0.59733343", "0.59363616", "0.5901629", "0.5762262", "0.5728003", "0.55657804", "0.55646825", "0.5537182", "0.551015", "0.5344667", "0.5314403", "0.5285725", "0.5230963", "0.5223967", "0.5204766", "0.51964456", "0.5193168", "0.5177903", "0.51655376", "0.51642436", "0.5132085", "0.51012623", "0.50880903", "0.50880903", "0.50880903", "0.50880903", "0.50697064", "0.5069651" ]
0.69429463
1
Sets the given node to greater value of the current node
def setGreater(self,Node): self.greater=Node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __gt__(self, other: 'MaxNode') -> bool:\n if self.priority == other.priority:\n return self.value < other.value\n return self.priority < other.priority", "def __gt__(self, other: 'MinNode') -> bool:\n if self.priority == other.priority:\n return self.value > other.value\n return self.priority > other.priority", "def compare(self, node, new_node):\n if new_node.get_value() == node.get_value():\n return 0\n elif new_node.get_value() < node.get_value():\n return -1 # traverse left\n else: # new_node > node\n return 1 # traverse right", "def greater_than(self) -> global___Expression:", "def setLesser(self,Node):\n self.lesser=Node", "def compare(self,node, new_node):\n if new_node.get_value() == node.get_value():\n return 0\n elif new_node.get_value() < node.get_value():\n return -1\n else:\n return 1", "def _sink(self, val, cur_node):\n if val > cur_node.data:\n if not cur_node.right:\n cur_node.right = Node(val, cur_node)\n self.size_number += 1\n if cur_node.balance_number == 0:\n cur_node.balance_number = 1\n else:\n count = self._sink(val, cur_node.right)\n if cur_node.balance_number <= count:\n cur_node.balance_number += 1\n elif val < cur_node.data:\n if not cur_node.left:\n cur_node.left = Node(val, cur_node)\n self.size_number += 1\n if cur_node.balance_number == 0:\n cur_node.balance_number = 1\n else:\n count = self._sink(val, cur_node.left)\n if cur_node.balance_number <= count:\n cur_node.balance_number += 1\n return cur_node.balance_number", "def __lt__(self, other: 'MaxNode') -> bool:\n if self.priority == other.priority:\n return self.value > other.value\n return self.priority > other.priority", "def greater(value, other):\n return value < other", "def add(self, value):\n try:\n if not self.root:\n self.root = Node(value)\n else:\n node = self.root\n while node:\n if node.value > value:\n if not node.left:\n node.left = Node(value)\n break\n node = node.left\n else:\n if not node.right:\n node.right = Node(value)\n break\n node = node.right\n except:\n print(\"something went wrong please try again\")", "def __gt__(self, other):\n return greater(self, other)", "def __gt__(self, other):\n\t\ttry:\n\t\t\treturn self.val > other.val\n\t\texcept:\n\t\t\treturn self.val > other", "def __gt__(self, other):\n return self.element() > other.element()", "def __gt__(self, other):\n return self.greaterThan(other)", "def __ge__( self, value ):\r\n\t\treturn ( self > value ) or ( self == value )", "def __gt__(self, value):\n self = self.__ge__(value)\n return self.__invert__()", "def set_GreaterThan(self, value):\n super(MoneyReceivedInputSet, self)._set_input('GreaterThan', value)", "def insert(self, val):\n if type(val) not in [int, float]:\n raise TypeError('This tree accepts numbers only.')\n if self.contains(val):\n raise ValueError('Node already in tree.')\n new_node = Node(val)\n if self._size == 0:\n self._root = new_node\n self._max_depth = 1\n self._rbal = 1\n self._lbal = 1\n else:\n current_depth = 1\n current_node = self._root\n while val is not current_node._data:\n current_depth += 1\n if val < current_node._data:\n if current_node._lkid:\n current_node = current_node._lkid\n else:\n current_node._lkid = new_node\n new_node._parent = current_node\n self._get_new_max()\n elif val > current_node._data:\n if current_node._rkid:\n current_node = current_node._rkid\n else:\n current_node._rkid = new_node\n new_node._parent = current_node\n self._get_new_max()\n self._size += 1", "def greater_than_or_equal(self) -> global___Expression:", "def less(value, other):\n return value > other", "def _insert(self, value, cur_node):\n if value < cur_node.value:\n if cur_node.left_child == None:\n cur_node.left_child = Node(value)\n else: \n self._insert(value, cur_node.left_child)\n elif value > cur_node.value: #creating elif in case the value is same as the current node \n if cur_node.right_child == None:\n cur_node.right_child = Node(value)\n else:\n self._insert(value, cur_node.right_child)\n else:\n print(\"Value already in the tree\")", "def insert(self, value):\n new_node = Node(value)\n if self.root is None:\n self.root = new_node\n else:\n node = self.root\n while(node!=None):\n if(value <= node.data):\n if node.left is None:\n node.left = new_node\n node = node.left\n node = node.left\n elif(value > node.data):\n if node.right is None:\n node.right = new_node\n node = node.right\n node = node.right", "def sorted_insert(self, value):\n if self.__head is None or self.__head.data > value:\n new_node = Node(value)\n if self.__head is not None:\n new_node.next_node = self.__head\n self.__head = new_node\n else:\n runner = self.__head\n while runner.next_node and value > runner.next_node.data:\n runner = runner.next_node\n runner.next_node = Node(value, runner.next_node)", "def set(self, node_index, value):\n if value < 0.0:\n raise ValueError(\n 'Sum tree values should be nonnegative. Got {}'.format(value))\n self.highest_set = max(node_index, self.highest_set)\n node_index = node_index + self.low_idx\n self.max_recorded_priority = max(value, self.max_recorded_priority)\n\n delta_value = value - self.nodes[node_index]\n\n # Now traverse back the tree, adjusting all sums along the way.\n for _ in reversed(range(self.depth)):\n # Note: Adding a delta leads to some tolerable numerical inaccuracies.\n self.nodes[node_index] += delta_value\n node_index = (node_index - 1) // 2\n\n self.nodes[node_index] += delta_value\n assert node_index == 0, ('Sum tree traversal failed, final node index '\n 'is not 0.')", "def add(self, value: object) -> None:\n #binary search tree == empty\n if self.root is None:\n self.root = TreeNode(value)\n return\n\n #variables loop/traverse bt\n child = self.root\n parent = None\n\n # place node via while loop\n while child is not None:\n parent = child\n if value < child.value:\n child = child.left\n else:\n child = child.right\n\n #new_node/ child\n if value < parent.value:\n parent.left = TreeNode(value)\n else:\n parent.right = TreeNode(value)", "def gt(self, val):\n\t\treturn GreaterThan(self, val)", "def convert_broadcast_greater(node, **kwargs):\n return create_basic_op_node('Greater', node, kwargs)", "def clear_negative_branches(self, new_value=0.0):\n for node in self.nodes:\n if node.branch is not None and node.branch < 0:\n node.branch = new_value\n self.process_tree_nodes()", "def maximum_value(self):\n if not self.root:\n return \"the tree is empty!\"\n\n max_val = self.root.value\n\n def _max_value(node):\n nonlocal max_val\n if not node:\n return\n if node.value > max_val:\n max_val = node.value\n\n _max_value(node.left)\n _max_value(node.right)\n _max_value(self.root)\n return max_val", "def test_find_smallest_element_greater_than(self):\n valToFind = max(self.values)\n\n valueFound = self.tree.findSmallestElementGreaterThan(valToFind)\n self.assertEquals(None, valueFound)" ]
[ "0.6773434", "0.67291725", "0.6492923", "0.6459286", "0.64502484", "0.6394402", "0.6299902", "0.6269281", "0.6263838", "0.623945", "0.6228478", "0.62032765", "0.5915307", "0.5913338", "0.5901221", "0.58720595", "0.585558", "0.58470595", "0.5845788", "0.58243996", "0.58213806", "0.581632", "0.58143985", "0.5802757", "0.57929415", "0.57763606", "0.57588696", "0.57564545", "0.57542056", "0.57218695" ]
0.82811105
0
Sets the given node to lesser link of the current node
def setLesser(self,Node): self.lesser=Node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def linked_node(self, value):\n self._linked_node = value", "def move_ant(self, node_to_visit):\n self.actual_node = node_to_visit\n self.remember_visited_node(node_to_visit)", "def _switch_nodes(self, walker):\n walker.prev.next = walker.next \n walker.next = walker.next.next \n walker.next.prev = walker\n walker.prev.next.prev = walker.prev\n walker.prev.next.next = walker \n walker.prev = walker.prev.next", "def set_next(self, node):\r\n self.__next = node", "def set_node(self, node):\n self.__node = node", "def set_next(self, node):\n self.__next = node", "def _move_to_head(self, node):\n self._remove_node(node)\n self._add_node(node)", "def link_source(self, node: Node) -> None:\n self.probability_links[node.key] = node", "def findLeastEnteringLinks(self):\n leastEnteringLinks = self.numLinks + 1\n leastEnteringNode = None\n for i in self.node:\n if len(self.node[i].reverseStar) < leastEnteringLinks:\n leastEnteringLinks = len(self.node[i].reverseStar)\n leastEnteringNode = i\n return leastEnteringNode", "def parent_connect(self, node):\n if self.parent.get() >= self.data:\n self.parent.set_left(node)\n if node and node.left is not None:\n node.set_parent(self.parent)\n else:\n self.parent.set_right(node)\n if node and node.left is not None:\n node.set_parent(self.parent)", "def set_next(node, value):\n node['next'] = value", "def relink(self, link_id):", "def move_to_head(self, node):\n if node is self.head:\n return\n value = node.value\n self.delete(node)\n self.add_to_head(value)", "def set_left(self, nodeL):\n self.left = nodeL\n nodeL.right = self", "def add_node(self, node):\n temp = self.head.post\n self.head.post = node\n node.pre = self.head\n node.post = temp\n temp.pre = node", "def set_both_connections(self, new_node):\n distance_to_new = self.current_node.distance_between(new_node.location)\n self.current_node.set_adjacent_from_direction(distance_to_new, new_node)\n reverse_distance = new_node.distance_between(self.current_node.location)\n new_node.set_adjacent_from_direction(reverse_distance, self.current_node)", "def __relax_edge(self, node):\n if node == str(self.get_start()):\n distance_to_parent = 0 # no parent for the starting point\n else:\n parent = self.path[node]\n distance_to_parent = self.distances[parent] + 1\n # try to relax the stretched edge\n if self.distances[node] > distance_to_parent:\n self.distances[node] = distance_to_parent", "def _redirect(self, node1, node2):\n if node1.parent.right is node1:\n node1.parent.right = node2\n else:\n node1.parent.left = node2", "def reorder_links(self, previous_node, links): # pragma: no cover\n\t\treturn links", "def set_head(self, node, head):\n # Check indices.\n if head in [node] + self.children_recursive(node):\n msg = 'future head %i is a (possibly indirect) child of %i'\n raise ValueError(msg % (head, node))\n if node <= 0 or head < 0:\n raise IndexError()\n\n # Set head.\n heads = self._heads[:]\n heads[node - 1] = head\n\n # Construct new tree.\n self.__init__(\n self._forms,\n self._lemmas,\n self._cpostags,\n self._postags,\n self._feats,\n heads,\n self._deprels\n )", "def move_to_node(self,node):\n path=self.get_path(self.current_node,node)\n self.move_to(path)", "def _remove_node(self, node):\n previous = node.prev\n next_node = node.next\n\n previous.next = next_node\n next_node.prev = previous", "def set_head(self, new_head: Node):\n pointer = self.head\n self.head = new_head\n self.head.next_node = pointer\n return self", "def _add_node(self, node):\n node.prev = self.head\n node.next = self.head.next\n\n self.head.next.prev = node\n self.head.next = node", "def setPrev(self, edge):\n self.half1.setPrev(edge.half1)\n self.half2.setPrev(edge.half2)\n self.prev = edge", "def _remove_node(self, node):\n prev = node.prev\n new = node.next\n\n prev.next = new\n new.prev = prev", "def delete_ll_node(node):\n node.val = node.next.val\n node.next = node.next.next", "def link_dependent(self, node: Node) -> None:\n self.used_by.append(node)", "def node_id(self, node_id: int):\r\n self._node_id = node_id", "def next_node(self):\r\n to_visit = list(self.to_visit())\r\n if not to_visit:\r\n return None\r\n next_up = min(to_visit, key=lambda n: self.table[n][\"dist\"])\r\n connected = self.table[next_up][\"dist\"] != float(\"inf\")\r\n return next_up if connected else None" ]
[ "0.6257754", "0.61906594", "0.6186895", "0.60881466", "0.6046128", "0.6041979", "0.59969556", "0.5941199", "0.5892048", "0.5874085", "0.5778439", "0.57763374", "0.57636875", "0.5724862", "0.56946915", "0.56720793", "0.56540406", "0.56457955", "0.56115913", "0.5611262", "0.5574946", "0.5571454", "0.5564779", "0.5562335", "0.55615836", "0.5557544", "0.5528519", "0.5519056", "0.5510725", "0.5499519" ]
0.7686591
0
Converts the given csv file of a tree into list
def treeList(tree): with open(tree) as fp: line = fp.read() nodesList = line.strip().split(',') return nodesList
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_tree(csv_data: List[List[str]], order: List[str]) -> List[NodeList]:\n tree = []\n\n for row in csv_data:\n branch = generate_branch(row, order)\n if not branch:\n continue\n\n branch[0], root_result = level_exists(branch[0], tree)\n\n for i in range(len(branch) - 1):\n branch[i + 1], result = level_exists(branch[i + 1], branch[i][\"children\"])\n if not result:\n branch[i][\"children\"].append(branch[i + 1])\n\n if not root_result:\n tree.append(branch[0])\n return tree", "def read_csv_to_list(csv_path):\n\n with open(csv_path, newline=\"\") as f:\n reader = csv.reader(f)\n data = list(reader)\n\n return data", "def read_file_to_list(input_file):\n with open(input_file) as csvfile:\n csv_rows = csv.reader(csvfile)\n\n data = []\n for row in csv_rows:\n data.append(row)\n\n return data", "def load_nodes(filename):\n\n with open(filename) as f:\n reader = csv.DictReader(f)\n return [item for item in reader]", "def csv_to_list(csv_file, delimiter=','):\n with open(csv_file, 'r') as csv_con:\n reader = csv.reader(csv_con, delimiter=delimiter)\n return list(reader)", "def csv_to_list(path):\n features = {}\n with open(path, newline='', encoding='utf-8-sig', errors=\"ignore\") as csvfile:\n for row in csv.reader(csvfile, delimiter=',', quotechar='|'):\n features[row[0]] = list(map(float, row[1:]))\n csvfile.close()\n\n results = []\n # for i in range(len(listFeatures)):\n # # results.append(listFeatures[i][-1])\n # listFeatures[i]=listFeatures[i][:len(listFeatures[i])-1]\n\n return results, features", "def read_from_csv(file):\n with open(file) as f:\n next(f)\n data = []\n for line in csv.reader(f, delimiter='\\t'):\n data.append(list(line))\n return data", "def parse_csv2list_hdfs(file_path):\n file_df = h2o.import_file(path=file_path, header=1, sep=',')\n file_df = file_df[:, :2].ascharacter()\n csv_list = h2o.as_list(file_df, use_pandas=False, header=True)\n csv_list = [[j.strip() for j in i] for i in csv_list]\n return csv_list", "def parse_csv2list_upload(file_name):\n with open(file_name) as f:\n records = csv.reader(f)\n csv_list = [[j.strip() for j in record] for record in records]\n return csv_list", "def listparse(csvfilename):\r\n output = []\r\n with open(csvfilename, 'r', newline = '') as csvfile:\r\n csvreader = csv.reader(csvfile, skipinitialspace = True)\r\n for row in csvreader:\r\n output.append(row)\r\n return output", "def loadCSVSeeds(self, csvFilePath):\n labels = []\n with open(csvFilePath) as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in reader:\n labels.append([row[0], row[1], [float(row[2]), float(row[3]), float(row[4]) ]])\n print(csvFilePath + \": labels loaded\")\n return labels", "def csv_file_to_list(filename, config=None):\n output_list = []\n with open(filename) as f:\n reader= csv.DictReader(f)\n for line in reader:\n output_list.append(line)\n return output_list", "def read_csv(csv_file):\r\n with open(csv_file, \"r\") as files:\r\n data = csv.reader(files)\r\n return list(data)", "def load_csv(csv_path):\n with open(csv_path, newline='') as csv_file:\n csv_reader = csv.reader(csv_file)\n csv_list = [row for row in csv_reader]\n return csv_list", "def loadCSV(input_file):", "def read_csv(csv_file_path):\n res = [] #list\n # f = open(csv_file_path) #read file\n with open(csv_file_path,\"r\") as f:", "def read_csv():", "def _read_csv(input_file):\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f)\n lines = []\n for line in reader:\n lines.append(line)\n return lines[1:] # remove header", "def csvtolist(csvfile, column_header='Organism'):\n\n file = pd.read_csv(csvfile)\n # Create a list name/variable and use list()\n listfromcolumn = list(file[column_header])\n\n return listfromcolumn", "def load_gene_list(csv_path):\n if isinstance(csv_path, str):\n my_dataframe = pd.read_csv(os.path.join(csv_path, 'gene_info.csv'))\n my_genes = my_dataframe['Entrez'].as_matrix()\n return my_genes\n else:\n raise ValueError('csv_path parameter must be a str')", "def csv_list(file_name):\n listoflist = []\n with open('../test_files/' + file_name, 'r') as infile:\n for x in infile.readlines():\n x = x.replace('\\n','')\n #splitting based on ',' that are encountered in csv files.\n #splitted vale will be a list, that inturn is stored into another main list\n #making it list of lists or 2D array.\n listoflist.append(x.split(','))\n return listoflist", "def load_data_from_csv(csv_file):\n list=[]\n\n with open(csv_file) as csv_1:\n csv_out = csv.reader(csv_1) \n next(csv_out)\n for rows in csv_out: \n if len(rows) != 0:\n list.append([rows[0],int(rows[1]),int(rows[2])])\n \n return (list)", "def handle(self, *args, **options):\n csv_filename = options['file_name']\n parent_name = options['parent']\n levels = 0\n\n with open(csv_filename) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n headings = next(csv_reader)\n\n # Determine the topic hierarchy levels\n for heading in headings:\n if heading.lower() in [\"topic\", \"topics\"]:\n levels += 1\n\n parent_topic = Topic.objects.get_or_create(name=parent_name)\n parent_topic_structure = TopicStructure.objects.get_or_create(topic=parent_topic[0],\n parent=None)\n generate_tree(levels, csv_reader, parent_topic_structure[0])", "def _load_csv_data(kingdom_csv_path: str):\n\n file_path = os.getcwd() + \"/\" + RESOURCES_DIR_PATH + \"/\" + kingdom_csv_path\n\n kingdomArr = []\n\n with open(file_path, newline=\"\") as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\")\n for row in reader:\n kingdomArr.append(Kingdom(row[0], row[1]))\n\n return kingdomArr", "def load_list(filename):\n # Open the file\n with open(filename, 'r', newline='') as f:\n # Use the CSV library to load the file\n reader = csv.reader(f)\n # Return the full list to the caller of the function. The 'list' in this line converts the 'reader' object to a list type\n # using a process called 'casting'. https://www.w3schools.com/python/python_casting.asp\n return(list(reader))\n #endwith", "def deserialize(self, data):\n def dfs(data):\n if len(data) == 0:\n return None\n if data[0] == 'null':\n data.pop(0)\n return None\n root = TreeNode(data[0])\n data.pop(0)\n root.left = dfs(data)\n root.right = dfs(data)\n\n return root \n if not data:\n return None\n data = data[1:-1].split(',')\n res = dfs(data)\n return res", "def loadCSV(file):\n def convertTypes(s):\n s = s.strip()\n try:\n return float(s) if '.' in s else int(s)\n except ValueError:\n return s\n\n reader = csv.reader(open(file, 'rt'))\n return [[convertTypes(item) for item in row] for row in reader]", "def parse(csvfilename):\n table = []\n with open(csvfilename, \"r\") as csvfile:\n for line in csvfile:\n line = line.rstrip()\n columns = line.split(',')\n table.append(columns)\n return table", "def open_csv(file):\n\n\ttmp = [] # initialise the list\n\twith open(file, 'r') as f:\n\t\treader = csv.reader(f)\n\t\tfor row in reader:\n\t\t\ttmp.append(row) # add row to list\n\n\treturn tmp", "def read_csv(filename):\n # Implement this function\n file = open(filename)\n wrapper = csv.reader(file)\n result = []\n for rpos in wrapper: \n result = result + [rpos]\n file.close() \n return result" ]
[ "0.71949446", "0.6465496", "0.6309598", "0.62743926", "0.6224334", "0.6217084", "0.62085897", "0.6135008", "0.61179715", "0.61113775", "0.6101576", "0.6098895", "0.6097796", "0.6075376", "0.6022257", "0.5948729", "0.5923453", "0.5913197", "0.5897696", "0.5892974", "0.58911306", "0.58806574", "0.5861266", "0.5844718", "0.58428186", "0.58411515", "0.5823135", "0.58002335", "0.5797772", "0.5779996" ]
0.72178835
0
It plots a graph of decision boundary for all the data samples in the classes
def decisionBoundary(root, figure, fileName): stepValue = 0.001 classClassification = [1, 2, 3, 4] colorClassification = ['b', 'g', 'r', 'm'] markerClassification = ['x', '+', '*', 'o'] classesList = ["Bolts", "Nuts", "Rings", "Scraps"] decisionPlot = figure.add_subplot(111) attributeValues, classes, _ = readData(fileName) attributeValues = np.array(attributeValues) classes = np.array(classes) attribute1, attribute2 = np.meshgrid(np.arange(0, 1, stepValue), np.arange(0, 1, stepValue)) predicted_class = [] for i in range(attribute1.shape[0]): predicted_class.append([]) for j in range(attribute1.shape[1]): result = [attribute1[i][j], attribute2[i][j]] predicted_value = classify(np.array(result), root) predicted_class[i].append(predicted_value) decisionPlot.contourf(attribute1, attribute2, np.array(predicted_class)) for a in classClassification: attribute1=[] attribute2=[] for j in range(len(attributeValues[:])): if classes[j]==a: attribute1 +=[attributeValues[j][0]] for k in range(len(attributeValues[:])): if classes[k]==a: attribute2 +=[attributeValues[k][1]] decisionPlot.scatter(attribute1, attribute2, color=colorClassification[a - 1], marker=markerClassification[a - 1] , label=classesList[a - 1], s=100) decisionPlot.legend(loc='upper right') decisionPlot.set_xlabel("Six fold Rotational Symmetry") decisionPlot.set_ylabel("Eccentricity") decisionPlot.set_title("Decision boundary") return decisionPlot
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plotBoundary(self,X,Y,axis=None):\n if len(self.theta) != 3: raise ValueError('Data & model must be 2D');\n ax = X.min(0),X.max(0); ax = (ax[0][0],ax[1][0],ax[0][1],ax[1][1]);\n ## TODO: find points on decision boundary defined by theta0 + theta1 X1 + theta2 X2 == 0\n x1b = np.array([ax[0],ax[1]]); # at X1 = points in x1b\n (t0, t1, t2) = self.theta\n x2b = ( -np.array([t0, t0]) - t1 * x1b) / t2\n ## Now plot the data and the resulting boundary:\n A = Y==self.classes[0]; # and plot it:\n recs = [\n mpatches.Rectangle((0, 0), 1, 1, fc=c)\n for c in ['r', 'g']\n ]\n if not axis:\n plt.plot(X[A,0],X[A,1],'r.',X[~A,0],X[~A,1],'g.',x1b,x2b,'k-'); plt.axis(ax); plt.draw(); plt.legend(recs, self.classes)\n else:\n axis.plot(X[A,0],X[A,1],'r.',X[~A,0],X[~A,1],'g.',x1b,x2b,'k-'); axis.axis(ax); axis.legend(recs, self.classes)", "def plot(self):\r\n \r\n\r\n print(\"Printing decision surfaces of decision trees\")\r\n plot_colors = \"rb\"\r\n plot_step = 0.02\r\n n_classes = 2\r\n for _ in range (self.n_estimators):\r\n plt.subplot(2, 3, _ + 1)\r\n x_min, x_max = self.X.iloc[:, 0].min() - 1, self.X.iloc[:, 0].max() + 1\r\n y_min, y_max = self.X.iloc[:, 1].min() - 1, self.X.iloc[:, 1].max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\r\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\r\n Z = self.clfs[_].predict(np.c_[xx.ravel(), yy.ravel()])\r\n Z = np.array(Z)\r\n Z = Z.reshape(xx.shape)\r\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdBu)\r\n for i, color in zip(range(n_classes), plot_colors):\r\n if i == 0:\r\n idx = np.where(self.y == -1)\r\n if i == 1:\r\n idx = np.where(self.y == 1)\r\n for i in range (len(idx[0])):\r\n plt.scatter(self.X.loc[idx[0][i]][0], self.X.loc[idx[0][i]][1],c=color,cmap=plt.cm.RdBu, edgecolor='black', s=15)\r\n plt.suptitle(\"Decision surface of a decision tree using paired features\")\r\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\r\n plt.axis(\"tight\")\r\n\r\n plt.show()\r\n fig1 = plt\r\n\r\n # Figure 2\r\n print(\"Printing decision surface by combining the individual estimators\")\r\n plot_colors = \"rb\"\r\n plot_step = 0.02\r\n n_classes = 2\r\n x_min, x_max = self.X.iloc[:, 0].min() - 1, self.X.iloc[:, 0].max() + 1\r\n y_min, y_max = self.X.iloc[:, 1].min() - 1, self.X.iloc[:, 1].max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\r\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\r\n Z = config.Classifier_AB.predict(np.c_[xx.ravel(), yy.ravel()])\r\n Z = np.array(Z)\r\n Z = Z.reshape(xx.shape)\r\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdBu)\r\n for i, color in zip(range(n_classes), plot_colors):\r\n if i == 0:\r\n idx = np.where(self.y == -1)\r\n if i == 1:\r\n idx = np.where(self.y == 1)\r\n for i in range (len(idx[0])):\r\n plt.scatter(self.X.loc[idx[0][i]][0], self.X.loc[idx[0][i]][1],c=color,cmap=plt.cm.RdBu, edgecolor='black', s=15)\r\n plt.suptitle(\"Decision surface by combining individual estimators\")\r\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\r\n plt.axis(\"tight\")\r\n\r\n plt.show()\r\n fig2 = plt\r\n\r\n return [fig1,fig2]", "def plot_2D_boundary(plot_range, points, decisionfcn, labels, values=[0]):\n\n clist = ['b', 'r', 'g', 'k', 'm', 'y'] # colors for the classes\n\n # evaluate on a grid and plot contour of decision function\n x = np.arange(plot_range[0], plot_range[1], .1)\n y = np.arange(plot_range[2], plot_range[3], .1)\n xx, yy = np.meshgrid(x, y)\n xxx, yyy = xx.flatten(), yy.flatten() # lists of x,y in grid\n zz = np.array(decisionfcn(xxx, yyy))\n zz = zz.reshape(xx.shape)\n\n # plot contour(s) at values\n plt.contour(xx, yy, zz, values)\n\n # for each class, plot the points with ’*’ for correct, ’o’ for incorrect\n for i in range(len(points)):\n d = decisionfcn(points[i][:, 0], points[i][:, 1])\n correct_ndx = labels[i] == d\n incorrect_ndx = labels[i] != d\n plt.plot(\n points[i][correct_ndx, 0],\n points[i][correct_ndx, 1],\n '*',\n color=clist[i])\n plt.plot(\n points[i][incorrect_ndx, 0],\n points[i][incorrect_ndx, 1],\n 'o',\n color=clist[i])\n plt.axis('equal')\n plt.show()", "def plot_decision_regions(self, option, canvas):\n\t\tle = preprocessing.LabelEncoder()\t\t# integer encoder\n\t\tle.fit(self.y)\n\t\tclassifier = self.classifier.fit(self.X, le.transform(self.y))\n\t\tclasses = classifier.classes_\n\t\tnum_classes = len(classes)\n\n\t\tif option == 'train':\n\t\t\tX = self.X\n\t\t\ty = self.y\n\t\telif option == 'test':\n\t\t\tX = self.test_X\n\t\t\ty = self.test_y\n\n\t\tb1 = self.X.iloc[:, 0]\n\t\tb2 = self.X.iloc[:, 1]\n\t\tb1_slack = (b1.max() - b1.min()) * 0.1\n\t\tb2_slack = (b2.max() - b2.min()) * 0.1\n\t\tb1_min, b1_max = b1.min() - b1_slack, b1.max() + b1_slack \t# x-axis range\n\t\tb2_min, b2_max = b2.min() - b2_slack, b2.max() + b2_slack\t# y-axis range\n\t\tstep_1 = (b1_max - b1_min) / 200\n\t\tstep_2 = (b2_max - b2_min) / 200\n\t\tmd1, md2 = np.meshgrid(np.arange(b1_min, b1_max, step_1), np.arange(b2_min, b2_max, step_2))\n\n\t\trcParams.update({'font.size': 7})\n\t\tcanvas.figure.clear()\n\t\tax = canvas.figure.subplots()\n\t\tlevels = np.arange(-0.19, 1, 0.2) + 0.2\n\n\t\tif num_classes == 2:\n\t\t\tcm_bkgd = plt.cm.RdBu\n\t\t\tcm_pts = ListedColormap(['#FF0000', '#0000FF'])\n\t\t\tZ = classifier.predict_proba(np.c_[md1.ravel(), md2.ravel()])[:, 1]\n\t\t\tZ = Z.reshape(md1.shape)\n\t\t\tax.contourf(md1, md2, Z, vmin=0, vmax=1, cmap=cm_bkgd, alpha=0.8)\n\n\t\telif num_classes == 3:\n\t\t\tcm_bkgd_1 = plt.cm.Reds\n\t\t\tcm_bkgd_2 = plt.cm.Greens\n\t\t\tcm_bkgd_3 = plt.cm.Blues\n\t\t\tcm_pts = cm_pts = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])\n\t\t\tZ = classifier.predict_proba(np.c_[md1.ravel(), md2.ravel()])\n\t\t\tZ1 = Z[:, 0]\n\t\t\tZ2 = Z[:, 1]\n\t\t\tZ3 = Z[:, 2]\n\n\t\t\tP1 = np.maximum(0, Z1 - np.maximum(Z2, Z3))\n\t\t\tP2 = np.maximum(0, Z2 - np.maximum(Z1, Z3))\n\t\t\tP3 = np.maximum(0, Z3 - np.maximum(Z1, Z2))\n\t\t\tP1 = P1.reshape(md1.shape)\n\t\t\tP2 = P2.reshape(md1.shape)\n\t\t\tP3 = P3.reshape(md1.shape)\n\n\t\t\tax.contourf(md1, md2, P1, levels, cmap=cm_bkgd_1, alpha=0.8)\n\t\t\tax.contourf(md1, md2, P2, levels, cmap=cm_bkgd_2, alpha=0.8)\n\t\t\tax.contourf(md1, md2, P3, levels, cmap=cm_bkgd_3, alpha=0.8)\n\n\t\td1 = X.iloc[:, 0] \t# x-axis\n\t\td2 = X.iloc[:, 1]\t# y-axis\n\t\tax.scatter(d1, d2, c=le.transform(y), cmap=cm_pts, alpha=0.6, edgecolors='k')\n\t\tax.set_xlim(md1.min(), md1.max())\n\t\tax.set_ylim(md2.min(), md2.max())\n\t\tax.set_xticks(())\n\t\tax.set_yticks(())\n\t\tax.set_xlabel(X.columns[0])\n\t\tax.set_ylabel(X.columns[1])\n\n\t\tcanvas.figure.tight_layout()\n\t\tcanvas.draw()", "def plot_decision_boundary(X, Y, models, titles):\n fig, sub = plt.subplots(2, 4, figsize=(20, 8))\n plt.subplots_adjust(wspace=1.0, hspace=0.6)\n\n xx, yy = create_meshgrid(X[:, 0], X[:, 1])\n\n for clf, title, ax in zip(models, titles, sub.flatten()):\n plot_contours(ax, clf, xx, yy,\n cmap=plt.cm.coolwarm, alpha=0.8)\n ax.scatter(X[:,0], X[:,1], c=Y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')\n\tax.set_xlim(xx.min(), xx.max())\n\tax.set_ylim(yy.min(), yy.max())\n\tax.set_xlabel('Xvalues')\n\tax.set_ylabel('Yvalues')\n\tax.set_xticks(())\n\tax.set_yticks(())\n\tax.set_title(title)\n\n back = matplotlib.get_backend()\n manager = plt.get_current_fig_manager()\n if \"QT\" in back:\n manager.window.showMaximized()\n elif \"Tk\" in back:\n manager.resize(*manager.window.maxsize())\n else:\n manager.frame.Maximize(True)\n plt.show()\n plt.close()", "def plot(self, fname=None):\n x = np.linspace(self.bounds[0], self.bounds[-1], 200)\n y = [self.evaluate(xi) for xi in x]\n plt.figure()\n plt.plot(x, y, label='Class func')\n plt.plot(self.bounds, self.gis, 'o', label='Algorithm')\n plt.grid(color='0.7')\n plt.xlabel('Dependent Variable')\n plt.ylabel('PP Transformed Class Value')\n if fname:\n plt.savefig(fname)\n else:\n plt.show()", "def plot_decision_boundary(resolution=100, colors=('b', 'k', 'r'), levels=(-1, 0, 1)):\n\n # Generate coordinate grid of shape [resolution x resolution]\n # and evaluate the model over the entire space\n xrange = np.linspace(x_train[:,0].min(), x_train[:,0].max(), resolution)\n yrange = np.linspace(x_train[:,1].min(), x_train[:,1].max(), resolution)\n grid = [[decision_function(alpha, y_train,\n Kernel1, x_train,\n np.array([xr, yr]), b) for xr in xrange] for yr in yrange]\n grid = np.array(grid).reshape(len(xrange), len(yrange))\n\n # Plot decision contours using grid and\n # make a scatter plot of training data\n ax.contour(xrange, yrange, grid, levels=levels, linewidths=(1, 1, 1),\n linestyles=('--', '-', '--'), colors=colors)\n ax.scatter(x_train[:,0], x_train[:,1],\n c=y_train, cmap=plt.cm.viridis, lw=0, alpha=0.25)\n\n # Plot support vectors (non-zero alphas)\n # as circled points (linewidth > 0)\n mask = np.round(alpha, decimals=2) != 0.0\n ax.scatter(x_train[mask,0], x_train[mask,1],\n c=y_train[mask], cmap=plt.cm.viridis, lw=1, edgecolors='k')\n\n return grid, ax", "def plot_decision_boundary(pred_func):\n # Set min and max values\n x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\n y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\n h = 0.01\n # Generate a grid of points\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n # Predict the function value for the whole gid\n Z = pred_func(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n # Plot the contour and training examples\n plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)\n plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)\n plt.show()", "def show_learning_curve(self):\n\n # Loop output classes\n for c in range(1,self.n_output_classes):\n # Get data\n x_values = np.array(self.n_class_samples_list[c])\n accuracy = np.array(self.accuracy_list[c])\n precision = np.array(self.precision_list[c])\n recall = np.array(self.recall_list[c])\n F1 = np.array(self.F1_list[c])\n\n # Make plot\n with sns.axes_style(\"ticks\"):\n fig,ax = plt.subplots()\n plt.plot([np.min(x_values),np.max(x_values)],[0.5,0.5],\n color='#777777',linestyle='--')\n plt.plot([np.min(x_values),np.max(x_values)],[0.66,0.66],\n color='#777777',linestyle=':')\n plt.plot([np.min(x_values),np.max(x_values)],[0.8,0.8],\n color='#777777',linestyle=':')\n plt.plot([np.min(x_values),np.max(x_values)],[0.9,0.9],\n color='#777777',linestyle=':')\n\n plt.plot( x_values, accuracy, color='#000000',\n linewidth=1, label='Accuracy' )\n plt.plot( x_values, precision, color='#0000aa',\n linewidth=1, label='Precision' )\n plt.plot( x_values, recall, color='#00aa00',\n linewidth=1, label='Recall' )\n plt.plot( x_values, F1, color='#aa0000',\n linewidth=2, label='F1' )\n\n plt.yticks( [0, 0.5, 0.66, 0.8, 0.9, 1.0],\n ['0','0.5','0.66','0.8','0.9','1.0'], ha='right' )\n plt.xlim(np.max(x_values)*-0.02,np.max(x_values)*1.02)\n plt.ylim(-0.02,1.02)\n plt.xlabel('Number of training samples')\n plt.ylabel('Performance')\n plt.title('Learning curve, class {}'.format(c))\n sns.despine(ax=ax, offset=0, trim=True)\n lgnd = plt.legend(loc=4, ncol=1, frameon=True, fontsize=9)\n lgnd.get_frame().set_facecolor('#ffffff')\n ax.spines['left'].set_bounds(0,1)\n ax.spines['bottom'].set_bounds(np.min(x_values),np.max(x_values))", "def plot_decision_regions(X, y, classifier, resolution=0.02):\n #setup marker generator and color map\n markers = ('s', 'x', 'o', '^', 'v')\n colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\n cmap = ListedColormap(colors[:len(np.unique(y))])\n\n #plot the decision surface\n #just find the limit and/reduce 1\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n #np.arange(start, stop, step): create list of tupple from start to stop with step of step\n #np.meshgrid convert: create accessible arrays from list of tupple\n #(-1,-2) (-1,0) (-1,1) xx1 = [-1 -1 -1][0 0 0 ][1 1 1]\n #(0,-2)(0,0)(0,1) ==> \n #(1,-2)(1,0)(1,1) xx2 = [-2 -2 -2][0 0 0 ][1 1 1]\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n\n #ravel() xx1 = [-1 -1 -1 0 0 0 1 1 1]\n # xx2 = [-2 -2 -2 0 0 0 1 1 1]\n #array() [[-1 -1 -1 0 0 0 1 1 1]\n # [-2 -2 -2 0 0 0 1 1 1]] concatenation... sort of\n #.T , transpose from in this case a 2x9 to 9x2\n\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha = 0.3, cmap=cmap)\n plt.xlim(xx1.min(), xx1.max())\n plt.ylim(xx2.min(), xx2.max())\n\n #plot class samples\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y == cl, 0],\n y=X[y == cl, 1],\n alpha=0.8,\n c=colors[idx],\n marker=markers[idx],\n label=cl,\n edgecolor='black')", "def plot_decision_regions(X, y, clf,\n ax=None,\n X_highlight=None,\n res=0.02, legend=1,\n hide_spines=True,\n markers='s^oxv<>',\n colors='red,blue,limegreen,gray,cyan'):\n # http://stackoverflow.com/questions/22294241/plotting-a-decision-boundary-separating-2-classes-using-matplotlibs-pyplot?lq=1\n # check if data is numpy array\n for a in (X, y):\n if not isinstance(a, np.ndarray):\n raise ValueError('%s must be a NumPy array.' % a.__name__)\n\n if ax is None:\n ax = plt.gca()\n\n if not y.dtype == int:\n y = y.astype(int)\n\n # check if test data is provided\n plot_testdata = True\n if not isinstance(X_highlight, np.ndarray):\n if X_highlight is not None:\n raise ValueError('X_test must be a NumPy array or None')\n else:\n plot_testdata = False\n\n if len(X.shape) == 2 and X.shape[1] > 1:\n dim = '2d'\n else:\n dim = '1d'\n\n marker_gen = cycle(list(markers))\n\n # make color map\n n_classes = len(np.unique(y))\n colors = colors.split(',')\n cmap = ListedColormap(colors[:n_classes])\n\n # plot the decision surface\n if dim == '2d':\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n else:\n y_min, y_max = -1, 1\n\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, res),\n np.arange(y_min, y_max, res))\n\n if dim == '2d':\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n Z = clf.predict(np.array([xx.ravel(), yy.ravel()]).T)\n else:\n y_min, y_max = -1, 1\n Z = clf.predict(np.array([xx.ravel()]).T)\n\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, alpha=0.3, cmap=cmap)\n\n ax.axis(xmin=xx.min(), xmax=xx.max(), y_min=yy.min(), y_max=yy.max())\n\n # plot class samples\n\n for c in np.unique(y):\n if dim == '2d':\n y_data = X[y == c, 1]\n else:\n y_data = [0 for i in X[y == c]]\n\n ax.scatter(x=X[y == c, 0],\n y=y_data,\n alpha=0.8,\n c=cmap(c),\n marker=next(marker_gen),\n label=c)\n\n if hide_spines:\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n if not dim == '2d':\n ax.axes.get_yaxis().set_ticks([])\n\n if legend:\n legend = plt.legend(loc=legend,\n fancybox=True,\n framealpha=0.3,\n scatterpoints=1,\n handletextpad=-0.25,\n borderaxespad=0.9)\n\n ax.add_artist(legend)\n\n if plot_testdata:\n if dim == '2d':\n ax.scatter(X_highlight[:, 0],\n X_highlight[:, 1],\n c='',\n alpha=1.0,\n linewidth=1,\n marker='o',\n s=80)\n else:\n ax.scatter(X_highlight,\n [0 for i in X_highlight],\n c='',\n alpha=1.0,\n linewidth=1,\n marker='o',\n s=80)\n\n return ax", "def plot(self):\n h = .02\n i=1\n bags_X = self.bags_X\n bags_y = self.bags_y\n fig1 = plt.figure(figsize=(45, 9))\n\n \n cm = plt.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n \n for model in self.models:\n ax = plt.subplot(1, len(self.models) , i)\n X = pd.DataFrame(bags_X[i-1])\n y = pd.Series(bags_y[i-1])\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(model.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n # print(Z[12])\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n ax.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n # size=[1000*w for w in self.weights[i-1]]\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xlabel(str(X.columns[0]))\n ax.set_ylabel(str(X.columns[1]))\n plt.title(\"Estimator \"+str(i))\n i+=1\n \n fig2 = plt.figure(figsize=(9,9))\n X = self.X\n y = self.y\n ax2 = plt.subplot(1,1,1)\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(self.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n Z = Z.reshape(xx.shape)\n ax2.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n # size=[1000*w for w in self.weights[i-2]]\n ax2.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n ax2.set_xlim(xx.min(), xx.max())\n ax2.set_ylim(yy.min(), yy.max())\n plt.title(\"Combined Decision Surface\")\n \n plt.tight_layout()\n plt.show()\n\n return [fig1,fig2]", "def visclassifier(fun,xTr,yTr):\n\n yTr = np.array(yTr).flatten()\n \n symbols = [\"ko\",\"kx\"]\n marker_symbols = ['o', 'x']\n mycolors = [[0.5, 0.5, 1], [1, 0.5, 0.5]]\n classvals = np.unique(yTr)\n\n plt.figure()\n\n res=300\n xrange = np.linspace(min(xTr[:, 0]), max(xTr[:, 0]),res)\n yrange = np.linspace(min(xTr[:, 1]), max(xTr[:, 1]),res)\n pixelX = repmat(xrange, res, 1)\n pixelY = repmat(yrange, res, 1).T\n\n xTe = np.array([pixelX.flatten(), pixelY.flatten()]).T\n\n testpreds = fun(xTe)\n Z = testpreds.reshape(res, res)\n # Z[0,0] = 1 # optional: scale the colors correctly\n plt.contourf(pixelX, pixelY, np.sign(Z), colors=mycolors)\n\n for idx, c in enumerate(classvals):\n plt.scatter(xTr[yTr == c,0],\n xTr[yTr == c,1],\n marker=marker_symbols[idx],\n color='k'\n )\n\n plt.axis('tight')\n plt.show()", "def plot_decision_boundary(data, x, y, labels, model, **kwargs):\n xx, yy, Z = setup_contours(data=data, x=x, y=y, model=model)\n\n x0, x1 = data[x].values, data[y].values\n x0lim = x0.min(), x0.max()\n x1lim = x1.min(), x1.max()\n\n col = data[labels].values\n plt.figure(figsize=(10, 10))\n\n plt.scatter(x0, x1, c=col, **kwargs)\n CS = plt.contourf(xx, yy, Z, **kwargs)\n CS2 = plt.contour(CS, CS.levels[::2], **kwargs)\n cbar = plt.colorbar(CS, **kwargs)\n cbar.ax.set_ylabel('Fitted Probability')\n # Add the contour line levels to the colorbar\n cbar.add_lines(CS2)\n\n plt.xlim(x0lim)\n plt.ylim(x1lim)\n plt.xlabel(x)\n plt.ylabel(y)\n plt.legend()", "def plot_decision_boundary(k, x, t, granularity=100, figures_root='../figures', data_name=None):\r\n print(f'KNN for K={k}')\r\n\r\n # Initialize meshgrid to be used to store the class prediction values\r\n # this is used for computing and plotting the decision boundary contour\r\n\r\n pointsX = numpy.linspace(numpy.min(x[:, 0]) - 0.1, numpy.max(x[:, 0]) + 0.1, granularity)\r\n pointsY = numpy.linspace(numpy.min(x[:, 1]) - 0.1, numpy.max(x[:, 1]) + 0.1, granularity)\r\n\r\n Xv, Yv = numpy.meshgrid(pointsX, pointsY)\r\n\r\n # Calculate KNN classification for every point in meshgrid\r\n classes = numpy.zeros(shape=(Xv.shape[0], Xv.shape[1]))\r\n for i in range(Xv.shape[0]):\r\n for j in range(Xv.shape[1]):\r\n c = knn(numpy.array([Xv[i][j], Yv[i][j]]), k, x, t)\r\n # print('{0} {1} {2}'.format(i, j, c))\r\n classes[i][j] = c\r\n\r\n # plot the binary decision boundary contour\r\n plt.figure()\r\n plt.pcolormesh(Xv, Yv, classes, cmap=CMAP_LIGHT)\r\n ti = f'KNN with K = {k}'\r\n plt.title(ti)\r\n plt.draw()\r\n\r\n save_path = None\r\n if data_name is not None:\r\n save_path = os.path.join(figures_root, f'knn_{data_name}_k={k}')\r\n # else:\r\n # save_path = os.path.join(figures_root, f'knn_k={k}')\r\n\r\n # plot the data (on top of the decision boundary color mesh)\r\n plot_data(x, t, new_figure=False, save_path=save_path)\r\n\r\n return classes", "def plot_decision_function(fitted_classifier, range_features, ax=None):\n from sklearn.preprocessing import LabelEncoder\n\n feature_names = list(range_features.keys())\n # create a grid to evaluate all possible samples\n plot_step = 0.02\n xx, yy = np.meshgrid(\n np.arange(*range_features[feature_names[0]], plot_step),\n np.arange(*range_features[feature_names[1]], plot_step),\n )\n\n # compute the associated prediction\n Z = fitted_classifier.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = LabelEncoder().fit_transform(Z)\n Z = Z.reshape(xx.shape)\n\n # make the plot of the boundary and the data samples\n if ax is None:\n _, ax = plt.subplots()\n ax.contourf(xx, yy, Z, alpha=0.4, cmap=\"RdBu\")\n\n return ax", "def plot_decision_boundary(model: torch.nn.Module, X: torch.Tensor, y: torch.Tensor):\n # Put everything to CPU (works better with NumPy + Matplotlib)\n model.to(\"cpu\")\n X, y = X.to(\"cpu\"), y.to(\"cpu\")\n\n # Setup prediction boundaries and grid\n x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1\n y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1\n xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101), np.linspace(y_min, y_max, 101))\n\n # Make features\n X_to_pred_on = torch.from_numpy(np.column_stack((xx.ravel(), yy.ravel()))).float()\n\n # Make predictions\n model.eval()\n with torch.inference_mode():\n y_logits = model(X_to_pred_on)\n\n # Test for multi-class or binary and adjust logits to prediction labels\n if len(torch.unique(y)) > 2:\n y_pred = torch.softmax(y_logits, dim=1).argmax(dim=1) # mutli-class\n else:\n y_pred = torch.round(torch.sigmoid(y_logits)) # binary\n\n # Reshape preds and plot\n y_pred = y_pred.reshape(xx.shape).detach().numpy()\n plt.contourf(xx, yy, y_pred, cmap=plt.cm.RdYlBu, alpha=0.7)\n plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)\n plt.xlim(xx.min(), xx.max())\n plt.ylim(yy.min(), yy.max())", "def plot(self):\n\t\traw_labels = self.make_raw_data()[1]\n\t\tbalanced_labels = self.get_extra()[1]\n\t\tfig, ax1 = subplots()\n\t\tax2 = ax1.twinx()\n\t\tx = array(range(1, NCLASSES + 1))\n\t\tl1 = ax1.bar(x - 0.3, self.prior_sizes, width = 0.25, color = 'b', align = 'center', label = 'train')\n\t\tl2 = ax2.bar(x, bincount(raw_labels - 1), width = 0.25, color = 'r', align = 'center', label = 'confident')\n\t\tl3 = ax2.bar(x + 0.3, bincount(balanced_labels - 1), width = 0.25, color = 'g', align = 'center', label = 'rebalanced')\n\t\tconfident_frac = len(raw_labels) / float(self.predictions.shape[0])\n\t\tusable_frac = len(balanced_labels) / float(self.predictions.shape[0])\n\t\tax1.set_title('at >{0:.1f}%, {1:.1f}% reliable, {2:.1f}% usable'.format(self.confidence * 100, confident_frac * 100, usable_frac * 100))\n\t\tax1.legend([l1, l2, l3], [l1.get_label(), l2.get_label(), l3.get_label()], loc = 'upper right')\n\t\tax1.set_xticks(x)", "def plot_decision_boundary(model, X, y):\r\n \r\n x1_array, x2_array = np.meshgrid(np.arange(-4, 4, 0.01), np.arange(-4, 4, 0.01))\r\n grid_coordinates = np.c_[x1_array.ravel(), x2_array.ravel()]\r\n Z = model.predict(grid_coordinates)\r\n Z = Z.reshape(x1_array.shape)\r\n plt.contourf(x1_array, x2_array, Z, cmap=plt.cm.bwr)\r\n plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.bwr)\r\n plt.show()", "def plot_data (features : list, actual_labels : list, classified_labels : list = None,\n extra_lines : list = None, normalize=False):\n samples = np.array(features)\n if normalize:\n norms = np.linalg.norm(samples, axis=1)\n l=[]\n for i, s in enumerate(samples):\n l.append(s/norms[i])\n samples = np.array(l)\n \n plt.figure(figsize=(8, 8))\n for (idx_case, ((actual, classified), marker, color)) in enumerate(zip(cases, markers, colors)):\n mask = np.logical_and(np.equal(actual_labels, actual), \n np.equal(actual if classified_labels == None else classified_labels, classified))\n if not np.any(mask): continue\n plt.scatter(\n samples[mask, 0], samples[mask, 1],\n label = f\"Class {actual}\" if classified_labels == None else f\"Was {actual}, classified {classified}\",\n marker = marker, s = 300, c = [color],\n )\n # Add the lines to show the true classes boundaries, if provided\n if extra_lines != None:\n for line in extra_lines:\n plt.plot(line[0], line[1], color = 'gray')\n plt.legend()", "def model_visualization(model,X,y,classifier):\n sns.set_context(context='notebook',font_scale=2)\n plt.figure(figsize=(16,9))\n from matplotlib.colors import ListedColormap\n X_set, y_set = X, y\n X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\n plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha = 0.6, cmap = ListedColormap(('green', 'blue')))\n plt.xlim(X1.min(), X1.max())\n plt.ylim(X2.min(), X2.max())\n for i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n color = ListedColormap(('turquoise', 'blue'))(i), label = j)\n plt.title(\"%s Model Set\" %(model))\n plt.xlabel('PC 1')\n plt.ylabel('PC 2')\n plt.legend()\n plt.savefig('images/{0}.png'.format(model))", "def visualize(self):\n\t\tplt.figure(1)\n\t\tax1 = plt.add_suplot(1,2,1)\n\t\t# Plot free energy error\n\t\tax1.plot(self.FE_errors_GMM_CV_, linewidth=4, label='GMM with cross-validation')\n\t\tax1.plot(self.FE_errors_GMM_mix_models_, linewidth=4, label='GMM with mixture of models')\n\t\tplt.legend()\n\n\t\t# Plot density error\n\n\t\t# Plot log-likelihood of test set\n\n\t\t# Plot clustering score\n\n\t\tplt.show()\n\n\t\treturn", "def visualize(self, reduced_data):\n\t\t# Step size of the mesh. Decrease to increase the quality of the VQ.\n\t\th = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].\n\t\t\n\t\t# Plot the decision boundary. For that, we will assign a color to each\n\t\tx_min, x_max = reduced_data[:, 0].min() + 1, reduced_data[:, 0].max() - 1\n\t\ty_min, y_max = reduced_data[:, 1].min() + 1, reduced_data[:, 1].max() - 1\n\t\txx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n\n\t\t# Obtain labels for each point in mesh. Use last trained model.\n\t\tZ = self.estimator.predict(np.c_[xx.ravel(), yy.ravel()])\n\n\t\t# Put the result into a color plot\n\t\tZ = Z.reshape(xx.shape)\n\t\t\n\t\tplt.figure(1)\n\t\tplt.clf()\n\t\tplt.imshow(Z, interpolation='nearest',\n\t\t extent=(xx.min(), xx.max(), yy.min(), yy.max()),\n\t\t cmap=plt.cm.Paired,\n\t\t aspect='auto', origin='lower')\n\n\t\tplt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=4)\n\t\t# Plot the centroids as a white X\n\t\tcentroids = self.estimator.cluster_centers_\n\t\tplt.scatter(centroids[:, 0], centroids[:, 1],\n\t\t marker='x', s=169, linewidths=3,\n\t\t color='w', zorder=10)\n\t\tplt.title('K-means clustering with random data (PCA-reduced data)\\n'\n\t\t 'Centroids are marked with white cross')\n\t\tplt.xlim(x_min, x_max)\n\t\tplt.ylim(y_min, y_max)\n\t\tplt.xticks(())\n\t\tplt.yticks(())\n\t\tplt.show()", "def draw_knn_boundaries(knn, h=0.02): # h = Step size in the mesh\n ax = plt.gca()\n [xmin, xmax] = ax.get_xlim()\n [ymin, ymax] = ax.get_ylim()\n # Generate the axis associated to the first feature: \n x_axis = np.arange(xmin, xmax, h)\n # Generate the axis associated to the 2nd feature: \n y_axis = np.arange(ymin, ymax, h)\n # Generate a meshgrid (2D grid) from the 2 axis:\n x_grid, y_grid = np.meshgrid(x_axis, y_axis)\n # Vectorize the grids into column vectors:\n x_grid_vectorized = x_grid.flatten()\n x_grid_vectorized = np.expand_dims(x_grid_vectorized, axis=1)\n y_grid_vectorized = y_grid.flatten()\n y_grid_vectorized = np.expand_dims(y_grid_vectorized, axis=1)\n # Concatenate the vectorized grids\n grid = np.concatenate((x_grid_vectorized, y_grid_vectorized), axis=1)\n # Now you can use 'grid' as data to classify by the knn \n\n # Predict concatenated features to get the decision boundaries:\n decision_boundaries = ... #TODO!\n\n # Reshape the decision boundaries into a 2D matrix:\n decision_boundaries = decision_boundaries.reshape(x_grid.shape)\n plt.pcolormesh(x_grid, y_grid, decision_boundaries, cmap=cmap_light, zorder=1)\n return ax", "def visualize(self):\n\n check_is_fitted(self, \"sm_\")\n\n fig = plt.figure(figsize=(6, 4))\n inner = gridspec.GridSpec(2, 1, hspace=0.1, height_ratios=[6, 1])\n ax1_main = plt.Subplot(fig, inner[0]) \n xgrid = np.linspace(self.xmin, self.xmax, 100).reshape([-1, 1])\n ygrid = self.decision_function(xgrid)\n ax1_main.plot(xgrid, ygrid)\n ax1_main.set_xticklabels([])\n ax1_main.set_title(\"Shape Function\", fontsize=12)\n fig.add_subplot(ax1_main)\n \n ax1_density = plt.Subplot(fig, inner[1]) \n xint = ((np.array(self.bins_[1:]) + np.array(self.bins_[:-1])) / 2).reshape([-1, 1]).reshape([-1])\n ax1_density.bar(xint, self.density_, width=xint[1] - xint[0])\n ax1_main.get_shared_x_axes().join(ax1_main, ax1_density)\n ax1_density.set_yticklabels([])\n ax1_density.autoscale()\n fig.add_subplot(ax1_density)\n plt.show()", "def plot_result(data, gt_y, pred_y):\n assert data.shape[0] == gt_y.shape[0]\n assert data.shape[0] == pred_y.shape[0]\n\n plt.figure()\n\n plt.subplot(1, 2, 1)\n plt.title('Ground Truth', fontsize=18)\n\n for idx in range(data.shape[0]):\n if gt_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.subplot(1, 2, 2)\n plt.title('Prediction', fontsize=18)\n\n for idx in range(data.shape[0]):\n if pred_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.show()", "def plot_result(data, gt_y, pred_y):\n assert data.shape[0] == gt_y.shape[0]\n assert data.shape[0] == pred_y.shape[0]\n\n plt.figure()\n\n plt.subplot(1, 2, 1)\n plt.title('Ground Truth', fontsize=18)\n\n for idx in range(data.shape[0]):\n if gt_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.subplot(1, 2, 2)\n plt.title('Prediction', fontsize=18)\n\n for idx in range(data.shape[0]):\n if pred_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.show()", "def Plot_predict(X,Y,model,X_path): \n labels = {0: 'CNV', 1: 'DME', 2: 'DRUSEN', 3: 'NORMAL'}\n Y_pred_classes = np.argmax(model.predict(X),axis = 1) \n Y_true = np.argmax(Y,axis = 1)\n \n fig = plt.figure(figsize=(40, 40)) \n for i in range(X.shape[0]):\n ax = fig.add_subplot(8, 4, i + 1, xticks=[], yticks=[])\n ax.set_title(\"Groundtruth : {} \\n Prediction : {}\".format(labels[Y_true[i]],labels[Y_pred_classes[i]]), \\\n color=(\"green\" if Y_true[i] == Y_pred_classes[i] else \"red\"),fontsize=20) \n img = image.load_img(X_path[i])\n ax.imshow(img)\n plt.show()\n return", "def make_plots():\n prep = DataPrep(filepath='/home/ubuntu/ca_bills_project/data/extra/topic_intro_data_05-23-17-08-23.csv')\n prep.prepare()\n k = 100\n trained_model_file = \"/home/ubuntu/ca_bills_project/data/extra/intro_model_100_topics_rf_10000trees.pkl\"\n with open(trained_model_file) as p:\n model = pickle.load(p)\n print \"loaded model\"\n features = [u'days_since_start', u'session_type', u'party_ALL_DEM', u'party_ALL_REP',\n u'party_BOTH', 'party_COM', u'urgency_No', u'urgency_Yes',\n u'taxlevy_No',\n u'taxlevy_Yes']\n topic_features = [\"topic_\"+str(x) for x in range(k)]\n features += topic_features\n X_train, y_train = prep.subset(features)\n feature_importance(model, features)\n feature_subset_indices = [73, 13]\n gb_file = \"/home/ubuntu/ca_bills_project/data/extra/intro_model_100_topics_gb.pkl\"\n with open(gb_file) as p:\n gb = pickle.load(p)\n make_partial_dependence(gb, X_train, y_train, features, feature_subset_indices)", "def plot_decision_regions(X, y, classifier, resolution=.02, test_idx=None):\n # setup marker generator & color map\n plt.figure()\n markers = ('x', 'o')\n colors = ('red', 'blue')\n\n # calculate and plot the decision surface\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha=.35, cmap=ListedColormap(colors=colors[:len(np.unique(y))]))\n plt.xlim(xx1.min(), xx2.max())\n plt.ylim(xx2.min(), xx2.max())\n\n # scatter plot all values of the data sets\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y == cl, 0],\n y=X[y == cl, 1],\n c=colors[idx],\n marker=markers[idx],\n label=cl,\n edgecolors='black')\n if test_idx:\n # circle test data\n X_test, y_test = X[test_idx, :], y[test_idx]\n plt.scatter(X_test[:, 0],\n X_test[:, 1],\n c='',\n edgecolors='black',\n alpha=1.0,\n linewidths=1,\n marker='o',\n s=100,\n label='test set')" ]
[ "0.7012952", "0.6911963", "0.6594613", "0.6533057", "0.65208495", "0.6514652", "0.65101475", "0.65086174", "0.6432394", "0.6432042", "0.6407054", "0.63388914", "0.6319624", "0.6305352", "0.62987405", "0.62423414", "0.6223323", "0.6214983", "0.6184689", "0.6180739", "0.61555046", "0.6116136", "0.6114839", "0.6086819", "0.60800153", "0.6069158", "0.6069158", "0.60527766", "0.60287595", "0.60077965" ]
0.7111119
0
Helper method for padding the input number so that program can maintain symmetry in tables
def padding(input_value, value): padding_value = str(input_value) for i in range(value - len(str(input_value))): padding_value += " " return padding_value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pad(number, width=0):\n return str(number).zfill(width)", "def get_pad1(n):\n if n < 10:\n return \" \"\n if n < 100:\n return \" \"\n if n < 1000:\n return \" \"\n return \"\"", "def padding_zeroes(number, length_string):\n return str(number).zfill(length_string)", "def int_padding(length, val, direction=\">\"):\n return '{0:0{direction}{fill}}'.format(val, direction=direction, fill=length)", "def gtin_pad(gtin):\n zero_space = 11 - len(gtin)\n gtin = '%s%s' % ('0'*zero_space, gtin)\n if len(gtin) == 11:\n gtin = '%s%s' % (gtin, gtin_checksum(gtin))\n return gtin", "def pad_number(number, length):\n\n string_number = str(number)\n number_of_zeros = length - len(string_number)\n if number_of_zeros >= 0:\n return \"0\" * number_of_zeros + string_number\n else:\n return string_number", "def pad(size, value):\n return (value + size - 1)/size*size", "def pad_digits(x, width):\n if pd.notnull(x):\n return '{0:0{1}d}'.format(int(x), width)\n else:\n return x", "def get_pad(self, n):\n pad = \"\"\n for i in range(0, n):\n pad += \" \"\n return pad", "def calculate_padding_to_align(length, align):\n return 0 if length % align == 0 else (align - (length % align))", "def pad_number(n, a, b):\n padded = shift_left(n, 8, a)\n padded = shift_left(padded, 8, b)\n crc = checksum(padded)\n padded = shift_left(padded, 32, crc)\n return padded", "def padding(self, n):\n if n < self._length: # pad with blanks\n k = self._length - n\n pad_str = \" \" * k\n else:\n pad_str = \"\"\n\n return pad_str", "def bracket_pad_num(num, maxnum):\n # compute padding\n numpad = len(str(maxnum)) - len(str(num))\n return \"[{}]{}\".format(num, \" \" * numpad)", "def pad_zeros(x, total):\n num_pad = total - len(x)\n for idx in range(num_pad):\n x = '0' + x\n return x", "def pad(seq, n):\n return", "def pad_with_border(x, n_pad):\n x_pad_list = [x[0:1]] * int(n_pad) + [x] + [x[-1:]] * int(n_pad)\n return np.concatenate(x_pad_list, axis=0)", "def _pad8(s):\n return '%08d' % int(s)", "def transform_padding(pad_width):\n num_pad_values = len(pad_width)\n onnx_pad_width = [0]*num_pad_values\n\n start_index = 0\n # num_pad_values will always be multiple of 2\n end_index = int(num_pad_values/2)\n for idx in range(0, num_pad_values):\n if idx % 2 == 0:\n onnx_pad_width[start_index] = pad_width[idx]\n start_index += 1\n else:\n onnx_pad_width[end_index] = pad_width[idx]\n end_index += 1\n\n return onnx_pad_width", "def pad(pfile):\n h, b, t = pfile.split('.') # [\"P06144\", \"7\", \"4754\"]\n\n if len(t) == 3:\n t = '0' + t\n elif len(t) == 2:\n t = '00' + t\n elif len(t) == 1:\n t = '000' + t\n else:\n pass\n\n return '.'.join([h, b, t])", "def str_padding(length, val):\n return '{0:<{fill}}'.format(val, fill=length)", "def pad_string(text, pad):\n\tpad_str = ''\n\tresult = ''\n\tif (type(pad) is int):\n\t\tpad_str = zen_settings['variables']['indentation'] * pad\n\telse:\n\t\tpad_str = pad\n\t\t\n\tnl = get_newline()\n\tlines = text.split(nl)\n\tresult = result + lines[0]\n\tfor line in lines[1:]:\n\t\tresult += nl + pad_str + line\n\t\t\n\treturn result", "def pad(value, digits, to_right=False):\n len_val = len(value)\n assert len_val <= digits\n rem_digits = digits - len_val\n if to_right:\n return value + \"0\"*rem_digits\n else:\n return \"0\"*rem_digits + value", "def padding_width(self):\n ...", "def pad_sequence(seq):\n seq_split = seq.strip().split(\"1\")\n last = seq_split[0]\n new_seq = last + \"1\"\n inc_added = 0\n out_added = 0\n for i in range(1, len(seq_split)-1):\n current = seq_split[i]\n\n # break up the intial sequences that leak information by adding padding\n if current == last:\n if last == \"-\":\n new_seq += \"+1\"\n inc_added += 1\n last = \"+\"\n else:\n new_seq += \"-1\"\n out_added += 1\n last = \"-\"\n else:\n new_seq += current + \"1\"\n last = current\n\n # 30% chance to inject randomness\n coin = random.randint(1, 101)\n if coin <= 30:\n if coin % 2 == 0:\n new_seq += \"+1\"\n else:\n new_seq += \"-1\"\n \n # return padded sequence, original number of cells, \n # number of incoming padding cells, and number of outgoing padding cells\n return new_seq, len(seq_split), inc_added, out_added", "def offset_pad(self, offset):\n return (((offset + 3) / 4) * 4)", "def _add_padding(input_str):\r\n padding_len = AES.block_size - len(input_str) % AES.block_size\r\n return input_str + padding_len * chr(padding_len)", "def zeroPad(numberString, zeros, left = True):\n for i in range(zeros):\n if left:\n numberString = '0' + numberString\n else:\n numberString = numberString + '0'\n return numberString", "def make_padding(kernel_size, stride, dilation):\n return -((-kernel_size - (kernel_size - 1) * (dilation - 1)) // stride + 1) // 2", "def add_padding(text1: str) -> str:\n\n pad_len = 8 - (len(text1) % 8)\n return text1 + (pad_len * '\\0')", "def pad(plain, size):\n offset = size - (len(plain) % size)\n return plain + chr(offset) * offset" ]
[ "0.7694465", "0.7258582", "0.7234789", "0.7135013", "0.70261407", "0.6954748", "0.6911326", "0.6909492", "0.68428165", "0.6823084", "0.68187344", "0.681711", "0.6802245", "0.6777663", "0.6709971", "0.6627718", "0.6591441", "0.65827405", "0.65770954", "0.65270615", "0.65061265", "0.6486627", "0.6473917", "0.6473288", "0.64709663", "0.64630765", "0.6435558", "0.64342713", "0.6424145", "0.64233166" ]
0.7383802
1
Calculates the profit from the confusion matrix
def profitCalculation(confusion_matrix): numberofClasses = 4 profits = [[20, -7, -7, -7], [-7, 15, -7, -7], [-7, -7, 5, -7], [-3, -3, -3, -3]] totalProfit = 0 for count in range(numberofClasses): for counter in range(numberofClasses): totalProfit += confusion_matrix[count][counter] * profits[count][counter] return totalProfit
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def profit_curve(cost_benefit_mat, y_pred_proba, y_true):\n n_obs = float(len(y_true))\n # Make sure that 1 is going to be one of our thresholds\n\n thresholds = np.linspace(0,1,101)\n profits = []\n for threshold in thresholds:\n y_predict = y_pred_proba >= threshold\n confusion_matrix = standard_confusion_matrix(y_true, y_predict)\n threshold_profit = np.sum(confusion_matrix * cost_benefit_mat) * 20 / 1000000\n profits.append(threshold_profit)\n return np.array(profits), np.array(thresholds)", "def calculate_profit(self):", "def profit_curve(cost_benefit, predicted_probs, labels):\n n_obs = float(len(labels))\n # Make sure that 1 is going to be one of our thresholds\n maybe_one = [] if 1 in predicted_probs else [1] \n thresholds = maybe_one + sorted(predicted_probs, reverse=True)\n profits = []\n for threshold in thresholds:\n y_predict = predicted_probs >= threshold\n confusion_matrix = standard_confusion_matrix(labels, y_predict)\n threshold_profit = np.sum(confusion_matrix * cost_benefit) / n_obs\n profits.append(threshold_profit)\n return np.array(profits), np.array(thresholds)", "def profit(self):\n retail_value = 0\n wholesale_value = 0\n for bike in self.sold:\n retail_value += bike.total_cost() + (\n self.retail_margin * bike.total_cost())\n wholesale_value += bike.total_cost()\n return retail_value - wholesale_value", "def calculate(self):\n\n tp = self.confusion_matrix.tp\n tn = self.confusion_matrix.tn\n fp = self.confusion_matrix.fp\n fn = self.confusion_matrix.fn\n n = self.confusion_matrix.n\n\n fp_tn = tn + fp\n tp_fn = fn + tp\n tn_fn = tn + fn\n tp_fp = fp + tp\n nis = tn_fn * tn_fn + tp_fp * tp_fp\n njs = fp_tn * fp_tn + tp_fn * tp_fn\n sum_of_squares = tp * tp + tn * tn + fp * fp + fn * fn\n\n a = (tp * (tp - 1) + fp * (fp - 1) + tn * (tn - 1) + fn * (fn - 1)) / 2.\n b = (njs - sum_of_squares) / 2.\n c = (nis - sum_of_squares) / 2.\n d = (n * n + sum_of_squares - nis - njs) / 2.\n\n x1 = a - ((a + c) * (a + b) / (a + b + c + d))\n x2 = ((a + c) + (a + b)) / 2.\n x3 = ((a + c) * (a + b)) / (a + b + c + d)\n denominator = x2 - x3\n\n if denominator != 0:\n return x1 / denominator\n else:\n return 0", "def compute_score(self, model, X, y):\n sum_benefit = 0\n probabs = model.clf.predict_proba(self.X_chunk)\n chunk_classes, chunk_classes_count = np.unique(self.y_chunk, return_counts=True)\n\n # for every actual label in y\n # if the label in y is unseen when training, skip it, don't include it in the error\n for i, c in enumerate(self.y_chunk):\n for j, cprime in enumerate(chunk_classes):\n\n # (1) compute the benefit matrix\n benefit_c_cprime = 0\n if cprime == self.fraud_label:\n benefit_c_cprime = self.X_chunk[i][-1] - self.cost if c == self.fraud_label else -self.cost\n\n # compute the probability f_i_cprime(x)\n probab_ic = 0\n if cprime in model.chunk_labels:\n try:\n probab_ic = probabs[i][list(model.chunk_labels).index(cprime)]\n except IndexError:\n probab_ic = probabs[i][np.argmax(chunk_classes_count)]\n\n sum_benefit += probab_ic * benefit_c_cprime\n\n return sum_benefit", "def sensitivity(confusion):\n conf = np.zeros(confusion.shape[0])\n for i in range(confusion.shape[0]):\n tp = confusion[i][i]\n fn = np.sum(confusion, axis=1) - tp\n conf[i] = tp / (tp + fn[i])\n return conf", "def get_confusion_matrix(y_true, y_pred):\r\n\r\n ## 3 classes\r\n TP1, TP2, TP3, FP1, FP2, FP3, TN1, TN2, TN3, FN1, FN2, FN3 = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\r\n\r\n for i in range(y_true.shape[0]):\r\n if y_true[i] == 0 and y_pred[i] == 0:\r\n TN1 += 1\r\n elif y_true[i] == 0 and y_pred[i] != 0:\r\n FP1 += 1\r\n elif y_true[i] != 0 and y_pred[i] == 0:\r\n FN1 += 1\r\n elif y_true[i] != 0 and y_pred[i] != 0:\r\n TP1 += 1\r\n\r\n for i in range(y_true.shape[0]):\r\n if y_true[i] == 1 and y_pred[i] == 1:\r\n TN2 += 1\r\n elif y_true[i] == 1 and y_pred[i] != 1:\r\n FP2 += 1\r\n elif y_true[i] != 1 and y_pred[i] == 1:\r\n FN2 += 1\r\n elif y_true[i] != 1 and y_pred[i] != 1:\r\n TP2 += 1\r\n\r\n for i in range(y_true.shape[0]):\r\n if y_true[i] == 2 and y_pred[i] == 2:\r\n TN3 += 1\r\n elif y_true[i] == 2 and y_pred[i] != 2:\r\n FP3 += 1\r\n elif y_true[i] != 2 and y_pred[i] == 2:\r\n FN3 += 1\r\n elif y_true[i] != 2 and y_pred[i] != 2:\r\n TP3 += 1\r\n\r\n conf_matrix1 = [\r\n [TP1, FP1],\r\n [FN1, TN1]\r\n ]\r\n conf_matrix2 = [\r\n [TP2, FP2],\r\n [FN2, TN2]\r\n ]\r\n conf_matrix3 = [\r\n [TP3, FP3],\r\n [FN3, TN3]\r\n ]\r\n\r\n return conf_matrix1, conf_matrix2, conf_matrix3", "def get_profit(self):\n # Profit from previous transactions\n values = [t['value'] for t in self.transactions]\n\n profits = []\n base = None\n for v in values:\n if not base:\n base = v\n profit = v - base\n profits.append(profit)\n base = v\n\n return np.array(profits).sum()\n\n # Get all values to get profit\n #return np.array([ s['value'] for s in self.states ]).mean()", "def calculate(self):\n\n tp = self.confusion_matrix.tp\n tn = self.confusion_matrix.tn\n fp = self.confusion_matrix.fp\n fn = self.confusion_matrix.fn\n\n agreement = tp + tn\n chance0 = (tn + fn) * (tn + fp)\n chance1 = (fp + tp) * (fn + tp)\n sum_ = tn + fn + fp + tp\n chance = (chance0 + chance1) / sum_\n\n return (agreement - chance) / (sum_ - chance)", "def calculate(self):\n\n tp = self.confusion_matrix.tp\n tn = self.confusion_matrix.tn\n fp = self.confusion_matrix.fp\n fn = self.confusion_matrix.fn\n n = self.confusion_matrix.n\n\n fp_tn = tn + fp\n tp_fn = fn + tp\n tn_fn = tn + fn\n tp_fp = fp + tp\n nis = tn_fn * tn_fn + tp_fp * tp_fp\n njs = fp_tn * fp_tn + tp_fn * tp_fn\n sum_of_squares = tp * tp + tn * tn + fp * fp + fn * fn\n\n a = (tp * (tp - 1) + fp * (fp - 1) + tn * (tn - 1) + fn * (fn - 1)) / 2.\n b = (njs - sum_of_squares) / 2.\n c = (nis - sum_of_squares) / 2.\n d = (n * n + sum_of_squares - nis - njs) / 2.\n\n return (a + d) / (a + b + c + d)", "def calculate_confusion_matrix(self):\n self.confusion_matrix = confusion_matrix(self.y_true, self.y_pred)\n \n classes_weights = np.array(self.number_of_samples_per_class).reshape(\n self.number_of_classes, 1)\n \n self.normalized_confusion_matrix = (self.confusion_matrix/classes_weights).round(self.digits_count_fp)", "def calculate(self):\n\n return 2 * self.confusion_matrix.tp / \\\n (2 * self.confusion_matrix.tp + self.confusion_matrix.fp + self.confusion_matrix.fn)", "def calculate(self):\n\n return self.confusion_matrix.tp / (self.confusion_matrix.tp + self.confusion_matrix.fn)", "def theoretical_effective(dataset):\n return float(sum(dataset))/len(dataset)", "def calculate(self):\n\n return self.confusion_matrix.fn", "def compute_random_baseline(self, classes):\n\n # based on the class distribution of the data\n sum_benefit = 0\n\n # c is the actual label\n # if the label in y is unseen when training, skip it, don't include it in the error\n for i, c in enumerate(self.y_chunk):\n for j, cprime in enumerate(classes):\n\n # (1) compute the benefit matrix\n benefit_c_cprime = 0\n if cprime == self.fraud_label:\n benefit_c_cprime = self.X_chunk[i][-1] - self.cost if c == self.fraud_label else -self.cost\n\n # (2) get the probability\n probab_ic = 1 / len(classes)\n sum_benefit += probab_ic * benefit_c_cprime\n\n return sum_benefit", "def get_perf(self) :\n self.train()\n\n prediction = self.clf.predict(self.df_test.drop(columns = 'up')[:-1])\n self.accuracy = accuracy_score(df_test['up'][length:].values, prediction)\n tn, fp, fn, tp = confusion_matrix(df_test['up'][length:].values, prediction).ravel()\n self.recall = tp/(tp+fn)\n self.specificity = tn / (tn+fp)\n\n\n self.df_true = self.df_true[self.length:]\n\n profit = 1\n mini = 1\n maxi = 1\n self.df_true['close'] = self.df_true['close'].map(lambda x : np.exp(x))\n for s in range(1,len(self.df_true)):\n if prediction[x-1] == 1 :\n result = ((self.df_true['close'].iloc[s] -self.df_true['close'].iloc[s-1]) / self.df_true['close'].iloc[s-1]) + 1\n profit = profit * result\n if result < mini :\n mini = result\n if maxi < result :\n maxi = result\n self.mini = mini\n self.maxi = maxi\n self.profit = profit", "def get_model_profits(model, cost_benefit, X_test, y_test):\n predicted_probs = model.predict_proba(X_test)[:, 1]\n profits, thresholds = profit_curve(cost_benefit, predicted_probs, y_test)\n\n return profits, thresholds", "def calculate(self):\n\n tp = self.confusion_matrix.tp\n tn = self.confusion_matrix.tn\n fp = self.confusion_matrix.fp\n fn = self.confusion_matrix.fn\n\n n = tp + tn + fp + fn\n e1 = (fn * (fn + 2 * tp) / (tp + fn) + fp * (fp + 2 * tn) / (tn + fp)) / n\n e2 = (fp * (fp + 2 * tp) / (tp + fp) + fn * (fn + 2 * tn) / (tn + fn)) / n\n\n return min(e1, e2)", "def calculate(self):\n\n tp = self.confusion_matrix.tp\n fp = self.confusion_matrix.fp\n fn = self.confusion_matrix.fn\n\n return tp / (tp + fp + fn)", "def accuracy(y_true, y_pred):\r\n\r\n cm = confusion_matrix(y_true=y_true, y_pred=y_pred)\r\n cost_m = np.max(cm) - cm\r\n indices = linear_sum_assignment(cost_m)\r\n indices = np.asarray(indices)\r\n indexes = np.transpose(indices)\r\n total = 0\r\n for row, column in indexes:\r\n value = cm[row][column]\r\n total += value\r\n return total * 1. / np.sum(cm)", "def get_confmatrix(self,y_pred,y_test):", "def analyze_confusion(confusion, true_labels):\n print(\"Confusion matrix:\")\n printer(\"Pre\\Tru\", true_labels)\n\n for line, label in zip(confusion, true_labels):\n printer(f\"{label}\", line)\n\n TP = confusion.diagonal()\n FN = np.sum(confusion, axis=0) - TP\n FP = np.sum(confusion, axis=1) - TP\n\n print()\n printer(\"TP\", TP)\n printer(\"FP\", FP)\n printer(\"FN\", FN)\n\n # https://stackoverflow.com/a/37977222\n # P = TP / ( TP + FP)\n # R = TP / ( TP + FN)\n dP = TP + FP\n P = np.divide(TP, dP, out=np.zeros_like(TP, dtype=float), where=dP != 0)\n dR = TP + FN\n R = np.divide(TP, dR, out=np.zeros_like(TP, dtype=float), where=dR != 0)\n\n print(\"\\nPrecision = TP / ( TP + FP)\\tRecall = TP / ( TP + FN)\")\n printer(\"Prec\", P, \":.4f\")\n printer(\"Recall\", R, \":.4f\")\n\n avgP = np.sum(P) / len(true_labels)\n avgR = np.sum(R) / len(true_labels)\n print(f\"Average P: {avgP:.4f}\\tR: {avgR:.4f}\")\n\n print(\"F-score = 2 (PxR) / (P+R)\")\n # F = 2 (PxR) / (P+R)\n PdR = 2 * P * R\n PpR = P + R\n F = np.divide(PdR, PpR, out=np.zeros_like(TP, dtype=float), where=PpR != 0)\n printer(\"F-score\", F, \":.4f\")\n\n avgF = np.sum(F) / len(true_labels)\n print(f\"Average F-score {avgF}\")\n\n return avgF", "def confusionMetric( self, classTest, classPred):\n # accuracy of the model - in one number\n accuracy = average_precision_score( classTest, classPred )\n # confusion matrix 2x2 matric\n matConf = confusion_matrix(classTest, classPred)\n # cohen Kappa is applicable for unbalanced data\n matCohenKappa = cohen_kappa_score(classTest, classPred)\n # classification report\n strClassificationReport = classification_report(classTest, classPred)\n \n return accuracy, matConf, matCohenKappa, strClassificationReport", "def calculate(self):\n\n return self.confusion_matrix.fp", "def get_confusion_matrix(self):\n return confusion_matrix(self.test_y, self.predict())", "def expected_policy_profit(targeting_decision, g, observed_profit, prob_treatment):\n return np.sum(((1-targeting_decision) * (1-g) * observed_profit)/(1-prob_treatment) +\\\n (targeting_decision * g * observed_profit)/(prob_treatment))", "def negative_predictive_value(y_true, y_pred):\n\n cm = confusion_matrix(y_true, y_pred)\n return cm[0,0] / cm[:,0].sum()", "def specificity(y_true, y_pred):\n\n cm = confusion_matrix(y_true, y_pred)\n return cm[0,0] / cm[0,:].sum()" ]
[ "0.68429047", "0.6588161", "0.6383195", "0.62662387", "0.62233555", "0.617524", "0.6172645", "0.61560917", "0.61267865", "0.61042863", "0.60839117", "0.60539186", "0.60437346", "0.60431606", "0.59532946", "0.5939802", "0.5924911", "0.5886583", "0.5864573", "0.58506", "0.5841168", "0.5839408", "0.58378524", "0.58179355", "0.5805116", "0.580051", "0.5797128", "0.5791904", "0.57754207", "0.5766181" ]
0.8248979
0
The lines are getting split and parsed to gather the required information. a Edge with the data of lines is getting created. The list is appended by this edge
def createEdge(lines, list): res = lines.split('\\n') mains = res[0].split(' ') sid = mains[3] sid = sid[4:-1] ssource = mains[4] ssource = ssource[8:-1] starget = mains[5] starget = starget[8:-2] slabel = '' i = 2 while ('key=' in res[i]): i = i + 1 if ('EdgeLabel' in res[i + 4]): slabels = res[i + 4].split('>') slabel = slabels[1] slabel = slabel.split('<')[0] slabel = umlautHelper(slabel) source = findInList(ssource, list) target = findInList(starget, list) nline = Edge(sid, source, target) nline.setLabel(slabel) j = i + 1 while ('Path' in res[j] or 'Point' in res[j]): j = j + 1 allarrows = res[j + 1] if ('source="standard' in allarrows or 'source="delta' in allarrows): nline.setArrowSource(True) if ('target="standard' in allarrows or 'target="delta' in allarrows): nline.setArrowTarget(True) if (type(source) == Entity and type(target) == Attribute): source.addAttribute(target) if (type(target) == Entity and type(source) == Attribute): target.addAttribute(source) if (type(source) == Relation and type(target) == Attribute): source.addAttribute(target) if (type(target) == Relation and type(source) == Attribute): target.addAttribute(source) list.append(nline)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lineToList(self, line):\n raise NotImplementedError", "def parse_lines(self, lines):\n assert isinstance(lines, Iterable)\n\n for line in lines:\n name, values = self.parse_line(line)\n self.add(name, values)", "def prepare_lines_data(self):\n for l_hd in self.hour_data:\n if not self.node_from or not self.node_to:\n print('ERROR! line %i-%i has no node(s)' % (self.node_from_code, self.node_to_code))\n if l_hd.state and self.node_from.get_node_hour_state(l_hd.hour) \\\n and self.node_to.get_node_hour_state(l_hd.hour):\n if not self.type:\n node_start = self.node_from_code\n node_finish = self.node_to_code\n base_coeff = 0\n k_pu = 0\n else:\n node_start = self.node_to_code\n node_finish = self.node_from_code\n base_coeff = self.node_to.voltage_class / self.node_from.voltage_class\n k_pu = math.sqrt(math.pow(self.kt_re, 2) + math.pow(self.kt_im, 2))\n lag = math.atan(self.kt_im / self.kt_re) if self.kt_re else 0\n\n self.eq_db_lines_data.append((\n l_hd.hour, node_start, node_finish, self.parallel_num, self.type,\n max(self.node_from.voltage_class, self.node_to.voltage_class), base_coeff,\n l_hd.r, l_hd.x, l_hd.g, -l_hd.b, k_pu, lag, -l_hd.b_from, -l_hd.b_to\n ))", "def create_lines(self) -> None:\n res = []\n for connection in self.connections:\n start_component = self.components[connection.start_entity]\n end_component = self.components[connection.end_entity]\n start_pin_location = (\n start_component.location\n + start_component.pin_locations[connection.start_pin]\n )\n end_pin_location = (\n end_component.location + end_component.pin_locations[connection.end_pin]\n )\n\n x_midpoint = (start_pin_location.x + end_pin_location.x) / 2\n bend_start = Point(x_midpoint, start_pin_location.y)\n bend_end = Point(x_midpoint, end_pin_location.y)\n bends = [bend_start, bend_end]\n res.append(Line(connection, start_pin_location, *bends, end_pin_location))\n\n self.lines = res", "def _return_node_lists(self, my_breaklines):\n my_filelines = self.file_lines\n v_start, c_start = my_breaklines\n\n for i, line in enumerate(my_filelines):\n if v_start == i:\n vehicle_part = line.strip().split(' ')\n self.info['NUMBER'], self.info['CAPACITY'] = int(vehicle_part[0]), int(vehicle_part[-1])\n if c_start <= i:\n c_part = line.strip().split(' ')\n c_store = list()\n for j in c_part:\n try:\n c_store.append(int(j))\n\n except ValueError:\n continue\n if c_store != []:\n if c_store[4]> 130:\n self.customers.append(\n Customer(c_store[0], c_store[1], c_store[2], c_store[3], c_store[4], c_store[5], c_store[6], 0))\n else:\n self.customers.append(\n Customer(c_store[0], c_store[1], c_store[2], c_store[3], c_store[4], c_store[5], c_store[6], 1))", "def fromLines(cls, listOfLine: list):\n points = cls.edgeToVertex(listOfLine)\n return cls.fromPoints(points)", "def fromLines(cls, listOfLine: list):\n points = cls.edgeToVertex(listOfLine)\n return cls.fromPoints(points)", "def fromLines(cls, listOfLine: list):\n points = cls.edgeToVertex(listOfLine)\n return cls.fromPoints(points)", "def build_edges(self):\n print(\"Constructing Edges.\")\n # -----------------------------------------\n # TODO: You should write this method!\n\n # Note: this method may take some time to run - it is likely to be O(N^2), and some lists have N = 10,000 words or more.\n # (I've had students decide that their program was \"broken\" and quit it before this process finished... every time,\n # not realizing that the program was working hard behind the scenes.)\n # I recommend that you keep track of the number of edges you have added, and if it is a multiple of 1000, print\n # something so that you know your program is making progress.\n n = len(self.vertices)\n\n\n\n \n # -----------------------------------------\n print(\"Done Constructing Edges.\\n------------------------------------\")", "def parse_edgelist(\n lines, comments=\"#\", delimiter=None, create_using=None, nodetype=None\n):\n H = empty_hypergraph(create_using)\n for line in lines:\n if comments is not None:\n p = line.find(comments)\n if p >= 0:\n line = line[:p]\n if not line:\n continue\n edge = line.strip().split(delimiter)\n\n if nodetype is not None:\n try:\n edge = [nodetype(node) for node in edge]\n except ValueError as e:\n raise TypeError(f\"Failed to convert nodes to type {nodetype}.\") from e\n\n H.add_edge(edge)\n return H", "def linelist(self):\n line_list = Marker()\n line_list.header = self._header\n line_list.type = Marker.LINE_LIST\n line_list.action = Marker.ADD\n line_list.scale.x = 0.005\n line_list.color = self.YELLOW\n line_list.pose = deepcopy(self.POSE)\n\n line_list.points.extend((self._p1, self._p2))\n line_list.points.extend((self._p2, self._p3))\n line_list.points.extend((self._p3, self._p4))\n line_list.points.extend((self._p4, self._p1))\n line_list.points.extend((self._p5, self._p6))\n line_list.points.extend((self._p6, self._p7))\n line_list.points.extend((self._p7, self._p8))\n line_list.points.extend((self._p8, self._p5))\n line_list.points.extend((self._p1, self._p5))\n line_list.points.extend((self._p2, self._p6))\n line_list.points.extend((self._p3, self._p7))\n line_list.points.extend((self._p4, self._p8))\n\n return line_list", "def create_edgelist(self):\n self.edgelist = []\n \n for i in range(len(self.Adjmatrix)):\n for j in range(len(self.Adjmatrix)):\n if(self.Adjmatrix[i, j] == 1):\n middlex = 0.5*(self.x[i] + self.x[j])\n middley = 0.5*(self.y[i] + self.y[j])\n self.edgelist.append({\"start node\": i, \"end node\": j, \"link length\": self.Dismatrix[i, j], \"edgediameter\": self.edgediameter, \"middlex\": middlex, \"middley\": middley})", "def merge_lines(self, lines):\n pairs = []\n for line in lines:\n if len(pairs) and self.is_left(pairs[-1][-1]) == self.is_left(line):\n pairs[-1].append(line)\n else:\n pairs.append([line])\n\n lines = []\n for pair in pairs:\n if len(pair) == 1:\n lines.append(pair[0])\n else:\n x1 = sum([line.x1 for line in pair]) // len(pair)\n x2 = sum([line.x2 for line in pair]) // len(pair)\n y1 = sum([line.y1 for line in pair]) // len(pair)\n y2 = sum([line.y2 for line in pair]) // len(pair)\n lines.append(GripPipeline.Line(x1, y1, x2, y2))\n return lines", "def prepare_data(self, lines: List[str]) -> List[str]:\n if self.is_tokenized:\n if self.parser == \"spacy\":\n lines = [l.split() for l in lines]\n elif self.parser == \"udpipe\":\n lines = [[l.split()] for l in lines]\n\n return lines", "def extract_edges(self,fn):\n new_edges = {}\n with open (fn,'r') as f:\n for line in f:\n new_edges = EdgeParser.line_to_edges(self,line,new_edges)\n return new_edges", "def addEdge_file(self, path):\n with open(path, 'r') as File:\n for line in File.readlines():\n ints = list(map(int, line.strip().split())) \n u = ints[0]\n v = ints[1:]\n for i in v:\n self.addEdge(u, i)", "def __init__(self, scn_line_list):\n self.scn_line_list = scn_line_list", "def process_lines(self, lines):\n line_index = 0\n n_lines = len(lines)\n while line_index < n_lines:\n if lines[line_index].startswith(\"HIERARCHY\"):\n line_index = self._read_skeleton(lines, line_index, n_lines)\n if lines[line_index].startswith(\"MOTION\"):\n self._read_frametime(lines, line_index+2)\n line_index = self._read_frames(lines, line_index+3, n_lines)\n else:\n line_index += 1", "def __init__(self, lines, names):\n\n # from graphing import Graph\n\n self.lines = lines\n self.remaining_events = []\n\n leftmost = _MAX_RIGHT\n\n for i, (name, left, right) in enumerate(self.lines):\n self.lines[i] = (name, left-leftmost, right-leftmost)\n\n for i, (name, left, right) in enumerate(self.lines):\n self.remaining_events.append((left, i))\n self.remaining_events.append((right, i))\n\n self.remaining_events.sort()\n\n self.active_line_segments = []\n self.sweep_line = None\n\n self.is_done = False\n self.idx = 0\n self.a_line = None\n\n self.overlap_graph = nx.Graph(names)\n # self.interval_graph = nx.Graph(names)", "def parse(lines: List[str]):\n\n len_lines = len(lines)\n i = 0\n\n node_list = []\n \n while i < len_lines:\n line = lines[i]\n l = line.strip()\n if len(l) == 0:\n i += 1\n continue\n ls = l.split(\"\\t\")\n nlines = int(ls[0])\n content_lines = lines[i: i + nlines + 1]\n node = _build_node(content_lines)\n node_list.append(node)\n\n i = i + nlines + 1\n \n return node_list", "def splitlines(self) -> List[String]:\n pass", "def _create_examples_split(self, lines, set_type):\n examples = []\n \n for (i, line) in enumerate(lines):\n a_label = int(line[\"label\"])\n q_type = line[\"type\"]\n if a_label == 0 and q_type != \"qLookup\":\n #print(\"discontinue\")\n continue\n sentence_number = 0\n premise_text = line[\"premise\"]\n the_id = int(line[\"id\"])\n modified_premise_text = re.sub(self.stage_name_pattern,\"\",premise_text)\n modified_premise_text = re.sub(self.w_patterns,\"\",modified_premise_text)\n hypothesis_text = line[\"hypothesis\"]\n hypothesis_text = re.sub(self.w_patterns,\"\",hypothesis_text)\n \n\n sentences = modified_premise_text.split('.')\n\n for j, sentence in enumerate(sentences):\n guid = \"\" + str(sentence_number) + \"\\t\" + str(i) + \"\\t\" + str(len(sentences)) + \"\\t\" + str(a_label)\n text_a = sentence\n text_b = hypothesis_text\n label = a_label\n sentence_number += 1\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n #print(\"16th sentence::\",sentences[16])\n\n return examples", "def process_edges(edges_string_list):\n edge_list = []\n for line in edges_string_list:\n pair = line.split(',')\n edge_list.append([int(pair[0]), int(pair[1]), float(pair[2])])\n return edge_list", "def __init__(self, lines, img=False):\n self.grid = []\n self.bingrid = []\n self.edges = []\n self.neighbors = []\n self.parse(lines)\n if not img:\n self.find_edges()", "def create_line(self):\n if self.hosts and self.line:\n self.msg(\"There is a line here already.\")\n self.display_line()\n return\n self.line = []\n other_hosts = [self.caller.search(arg) for arg in self.lhslist]\n other_hosts = [ob for ob in other_hosts if ob and ob.player]\n other_hosts.append(self.caller)\n self.hosts = other_hosts\n if \"loop\" in self.switches:\n self.toggle_loop()\n self.display_line()", "def create_line_list(self,depth_arr):\n\n '''\n depth_arr- depth image as numpy array\n '''\n\n try:\n body=[['shoulder_line',[self.rpts[11],self.rpts[12]]],['waist_line',[self.rpts[23],self.rpts[24]]],['left_shoulder_waist',[self.rpts[11],self.rpts[23]]],['right_shoulder_waist',[self.rpts[12],self.rpts[24]]],['right_thigh',[self.rpts[24],self.rpts[26]]],['left_thigh',[self.rpts[23],self.rpts[25]]],['right_leg',[self.rpts[26],self.rpts[28]]],['left_leg',[self.rpts[25],self.rpts[27]]],['right_forearm',[self.rpts[14],self.rpts[16]]],['left_forearm',[self.rpts[13],self.rpts[15]]],['right_bicep',[self.rpts[12],self.rpts[14]]],['left_bicep',[self.rpts[11],self.rpts[13]]]]\n self.linelist.points=[]\n self.linelist.header.frame_id = \"kinect_frame\"\n self.linelist.header.stamp = rospy.Time.now()\n self.linelist.type = Marker.LINE_LIST\n \n self.linelist.id=1\n self.linelist.action = Marker.ADD \n self. linelist.scale.x = 0.05\n\n self.linelist.color.g = 1.0\n self.linelist.color.a = 1.0\n\n \n\n for _,pointl in body:\n for pt in pointl:\n depth_val=float(depth_arr[pt[1], pt[0]])\n ptl_x,ptl_y,ptl_z=self.depth_to_xyz(pt[0],pt[1],depth_val)\n \n self.linelist_point=Point()\n self.linelist_point.x = ptl_x\n self.linelist_point.y = ptl_y\n self.linelist_point.z = ptl_z\n self.linelist.points.append(self.linelist_point)\n \n except:\n pass", "def edges(self):\n return {k: v for k, v in self.parts.items() if isinstance(v, LineString)}", "def _parseLine(self, line, delimiter = \":\"):\r\n\t\tsplt = line.split(delimiter)\r\n\t\tinVec = self._parseVec(splt[0])\r\n\t\toutVec = self._parseVec(splt[1])\r\n\t\tif (len(splt) == 2):\r\n\t\t\tlabel = \"\"\r\n\t\telse:\r\n\t\t\tlabel = splt[2]\r\n\t\tself.data.append({'in':inVec, 'out':outVec, 'label':label})", "def _extract_next_forc(self, lines):\r\n\r\n _h, _m, _hr, _T = [], [], [], []\r\n i = 0\r\n\r\n while lines[i][0] in ['+', '-']:\r\n split_line = lines[i].split(',')\r\n _h.append(float(split_line[0]))\r\n _hr.append(_h[0])\r\n _m.append(float(split_line[1]))\r\n if self.temperature is not None:\r\n _T.append(float(split_line[2]))\r\n i += 1\r\n\r\n self.h.append(_h)\r\n self.hr.append(_hr)\r\n self.m.append(_m)\r\n if self.temperature is not None:\r\n self.temperature.append(_T)\r\n\r\n return len(_h)", "def read_dataset_from_list(self, lineLst):\n data = []\n for line in lineLst:\n if self.sos != '':\n data.append(self.sos)\n for word in line:\n word = self.replace_special_chars(word)\n _word = word\n if self.unit == \"oracle\":\n if \"+\" in word:\n # double check\n if word.startswith(\"word\") and len(word.split('+'))>1 \\\n and len(word.split('+')[0].split(\":\"))>1:\n _word = word.split('+')[0].split(\":\")[1]\n else:\n continue\n if self.unit == \"morpheme\":\n _word = re.sub(\"@@\", \"\", word)\n if not self.is_hyperlink(_word.lower()) and len(_word) <= 100:\n data.append(word)\n if self.eos != '':\n data.append(self.eos)\n return data" ]
[ "0.63740003", "0.6287413", "0.62869203", "0.6241954", "0.62419176", "0.61737263", "0.61737263", "0.61737263", "0.6111498", "0.5984311", "0.59755605", "0.596852", "0.59572536", "0.5953377", "0.5927684", "0.59248614", "0.5886327", "0.5864776", "0.58360136", "0.5832824", "0.5829702", "0.58245707", "0.5790501", "0.57896024", "0.5774937", "0.5773654", "0.56866", "0.5681923", "0.56792355", "0.56753665" ]
0.6507163
0
creates a Relation with the given parameter
def createRelation(rid, rlabel, list, x, y): relation = Relation(rid, rlabel, x, y) list.append(relation)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def declare_relation(self,\n rel_name,\n domain_type,\n range_type,\n trainable = False,\n dense = False):\n if rel_name in self._declaration:\n raise RelationNameError(rel_name, 'Multiple declarations for relation.')\n reserved = dir(NeuralQueryExpression)\n if rel_name in reserved:\n logging.warn(\n 'rel_name prohibits expr.%s() as it matches a reserved word in: %r',\n rel_name, reserved)\n self._declaration[rel_name] = RelationDeclaration(rel_name, domain_type,\n range_type, trainable,\n dense)\n for type_name in [domain_type, range_type]:\n if type_name not in self._symtab:\n self._symtab[type_name] = symbol.SymbolTable()\n self._rel_name_symtab.insert(rel_name)", "def create_relation(self, left_node, rel, right_node):\n rel = Relationship(left_node, rel, right_node)\n self.graph.merge(rel)\n return", "def Relation_fromString(*args):\n return _libsbml.Relation_fromString(*args)", "def _create_new_relation_concept(self, rc_type, data_dict):\n # generate name, create individual with role assignments\n i = self.auto_generated_name_numbers[rc_type]\n self.auto_generated_name_numbers[rc_type] += 1\n relation_name = f\"i{rc_type.name}_{i}\"\n\n kwargs = {}\n for key, value in data_dict.items():\n res = self._handle_key_for_individual(key, value, relation_name, None)\n if res is not None:\n kwargs.update(res)\n\n relation_individual = self._create_individual(rc_type, relation_name, relation_name, label=None, kwargs=kwargs)\n\n return relation_individual", "def relation( self, obj, relType ):\n raise NotImplementedError(\"relation\")", "def addRelation(self, aRelation):\n # TODO: Raise exception\n if issubclass(aRelation.__class__, RelationDefinition):\n self._relations[aRelation.name] = aRelation\n # else:\n # raise exception, not a relation", "def add_relation(cls, row_id, rel_obj):\n obj = cls.query.filter_by(id=row_id).first()\n # obj = db.session.query(cls).filter_by(id=row_id).first()\n #print(type(obj))\n if cls.__name__ == 'Actor':\n obj.filmography.append(rel_obj)\n elif cls.__name__ == 'Movie':\n obj.cast.append(rel_obj)\n return commit(obj)", "def createNodeRoleRelation(_session, _segment, _const):\n return createNode(_session, _segment, _const, \"role_relation\")", "def addRelation(klass, relation, rtype):\n if type(relation) is dict:\n if not relation.has_key('name'):\n msg = \"No key 'name' in the relation %s in class %s\" % (relation, klass.__name__)\n raise InvalidRelationshipError, msg\n name = relation['name']\n args = relation\n else:\n name = relation\n args = {}\n relationshipKlass = Relationship.TYPES[rtype]\n klass.RELATIONSHIP_CACHE[name] = (relationshipKlass, args)", "def _setRelation(self, node):\n if getattr(self, \"relation\", None):\n element = etree.SubElement(node, 'relation')\n element.text = getattr(self, \"relation\")", "def _add_relationship(self, reltype, target_part):\n rId = self._relationships._next_rId\n rel = _Relationship(rId, reltype, target_part)\n self._relationships._additem(rel)\n return rel", "def cypher_create():\n graph.cypher.execute(\"CREATE (a:Person {name:{N}})\", {\"N\": \"yangyy\"})", "def add_relation(wn, source, target, new_rel, change_list=None):\n insert_rel(source, new_rel, target, change_list)\n if new_rel in inverse_synset_rels:\n inv_rel_type = inverse_synset_rels[new_rel]\n insert_rel(target, inv_rel_type, source, change_list)", "def __init__(self, name=None, relation_property=None, relation_table=None,\n entity_id_column=None, related_entity_id_column=None):\n self.name = name\n self.relation_property = relation_property\n self.relation_table = relation_table\n self.entity_id_column = entity_id_column\n self.related_entity_id_column = related_entity_id_column\n \n self.insert = text(f\"REPLACE INTO {self.relation_table.name} \"\n f\" ({self.entity_id_column}, {self.related_entity_id_column}) \"\n f\" VALUES (:entity_id, :related_entity_id)\")\n \n self.delete = text(f\"DELETE FROM {self.relation_table.name} \"\n f\" WHERE {self.entity_id_column} = :entity_id\"\n f\" AND {self.related_entity_id_column} = :related_entity_id\")", "def as_relational(self, symbol):\n A, B = self.args\n\n A_rel = A.as_relational(symbol)\n B_rel = B.as_relational(symbol)\n\n return Xor(A_rel, B_rel)", "def _create(cls, model_class, *args, **kwargs):\n for k in kwargs.keys():\n if k in model_class.relationships():\n rel_key = '{}_id'.format(k)\n kwargs[rel_key] = str(kwargs[k].id)\n obj = super(BaseFactory, cls)._create(model_class, *args, **kwargs)\n obj.save(obj)\n return obj", "def test_add_relation_type(self):\n pass", "def __new__(cls, *args, **kwargs):\n if not hasattr(cls, '_instance'):\n cls._instance = super(RelationStorage, cls).__new__(cls, *args, **kwargs)\n\n return cls._instance", "def add_resource_relation_by_user(self, *, id: str,\n user_id: str,\n relation_type: UserResourceRel,\n resource_type: ResourceType) -> None:\n if resource_type not in resource_relation_model:\n raise NotImplementedError(f'The resource_type {resource_type.name} is not defined!')\n\n if relation_type not in resource_relation_model[resource_type]:\n raise NotImplementedError(f'the relation type {relation_type} is not defined!')\n\n res_rel_model = resource_relation_model[resource_type][relation_type]\n res_key = f'{resource_type.name.lower()}_rk'\n\n user_record = RDSUser(rk=user_id, email=user_id)\n res_rel_record = res_rel_model(user_rk=user_id)\n res_rel_record.__setattr__(res_key, id)\n try:\n with self.client.create_session() as session:\n session.merge(user_record)\n session.merge(res_rel_record)\n session.commit()\n except Exception as e:\n LOGGER.exception(f'Failed to create relation between user {user_id} and resource {id}')\n raise e", "def __push_relation(self, id1, id2, id1_name, id2_name, table):\n # case: No entry about relation is in DB yet\n if not self.__postgre_db.is_in_table(table, id1_name + \"=\" + str(\n id1)):\n self.__postgre_db.insert(table, {\n id1_name: id1, id2_name: [id2], \"aggregation\": 0})\n\n # case: Entry about single_pattern is in DB\n else:\n old_list = self.__postgre_db.get(table, id1_name + \"=\" + str(\n id1), id2_name)\n new_list = list(set(old_list + [id2]))\n self.__postgre_db.update(\n table, id2_name + \"=\" + add_quotes(replace_brackets(str(new_list))), id1_name + \"=\" + str(id1))", "def actor_add_relation():\r\n\r\n data = get_request_data()\r\n if 'id' in data.keys():\r\n try:\r\n row_id = int(data['id'])\r\n relation_id = int(data['relation_id'])\r\n except:\r\n err = 'Id must be integer'\r\n return make_response(jsonify(error=err), 400)\r\n\r\n obj = Movie.query.filter_by(id=relation_id).first()\r\n try:\r\n actor = Actor.add_relation(row_id, obj)\r\n rel_actor = {k: v for k, v in actor.__dict__.items() if k in ACTOR_FIELDS}\r\n rel_actor['filmography'] = str(actor.filmography)\r\n except:\r\n err = 'Record with such id does not exist'\r\n return make_response(jsonify(error=err), 400)\r\n\r\n return make_response(jsonify(rel_actor), 200)\r\n\r\n else:\r\n err = 'No id specified'\r\n return make_response(jsonify(error=err), 400)", "def add_relation(self, qid, relation, qid2):\n if self._kg_symbols is not None:\n self._kg_symbols.add_relation(qid, relation, qid2)", "def create(self, validated_data):\n # `entities` field is renamed to `positioninrelation_set` based on source of nested serializer\n\n entities = validated_data.pop('positioninrelation_set', {})\n\n # Prevent \"Failed to populate slug Relation.slug from name\" output\n validated_data['name'] = 'Relation'\n\n with transaction.atomic():\n instance = Relation.objects.create(**validated_data)\n for entity in entities:\n PositionInRelation.objects.create(\n relation=instance,\n entity=entity['entity'],\n position=entity.get('position', None)\n )\n\n return instance", "def create_wroteRelation(conn, wroteRelation):\n\tsql = ''' INSERT INTO wroteRelation(authorId,eid)\n\t\t\t VALUES(?,?) '''\n\tcur = conn.cursor()\n\tcur.execute(sql, wroteRelation)\n\treturn cur.lastrowid", "def relationship(cls):\n return relationship.many_to_one(cls, 'relationship')", "def getUniversal(cls):\n temp = cls.A * cls.A\n l = []\n for i in temp:\n l.append(i)\n return Relation(*l,name = 'Universal Relation')", "def add_relationship(self, rel: ResourceRelationshipDescriptor) -> None:\n self._relationships[assert_not_none(rel.name)] = rel.bind(self)", "def add_relations(self, relations: List[Relation]):\n if not relations:\n return None\n labels_str = relations[0].rel_type\n prop_str = \",\\n\".join(\n [\"rel.%s = relation.%s\" % (k, k) for k in relations[0].data]\n )\n query = \"\"\"\n UNWIND $relations AS relation\n MATCH (e1 {id: relation.source_id}), (e2 {id: relation.target_id})\n MERGE (e1)-[rel:%s]->(e2)\n SET %s\n \"\"\" % (\n labels_str,\n prop_str,\n )\n rel_params = []\n for rel in relations:\n rd = dict(source_id=rel.source_id, target_id=rel.target_id, **rel.data)\n rel_params.append(rd)\n return self.create_tx(query, query_params={\"relations\": rel_params})", "def fetch_relation(self, address):\n if (self.from_id is not None) and (self.to_id is not None):\n new_neofj = NeoFJ(address=address)\n relations = new_neofj.get_two_node_relations(_id1=self.from_id, _id2=self.to_id, _f_relation=self.rel_type)\n relation = relations[0]\n self.rel_type = relation.type\n self.rel_dict = relation.properties", "def test_accepts_relation(self):\n self.Test.scope('foo', self.Test.relation().where('foo'))\n self.assertEqual(self.Test.foo().params['where'], ['foo'])" ]
[ "0.6203038", "0.61803585", "0.59780806", "0.5964317", "0.59502816", "0.5891712", "0.587325", "0.5832663", "0.5781058", "0.5695156", "0.56259346", "0.55553883", "0.54986626", "0.5438117", "0.54175186", "0.5413704", "0.5407033", "0.5391687", "0.53306854", "0.52989084", "0.5278789", "0.5220675", "0.52092254", "0.5195006", "0.5161451", "0.5141271", "0.5129459", "0.5112687", "0.50940514", "0.5089727" ]
0.7199586
0
Hydrate a ViewIO into the constructor arguments for View.
def hydrate_arguments(cls, view_io: ViewIO) -> Dict: return { **super().hydrate_arguments(view_io), # TODO: should we add this here? probably not: "software_system" "paper_size": view_io.paper_size, "automatic_layout": AutomaticLayout.hydrate(view_io.automatic_layout) if view_io.automatic_layout else None, "element_views": map(ElementView.hydrate, view_io.element_views), "relationship_views": map( RelationshipView.hydrate, view_io.relationship_views ), }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_view(self, view, request, *args, **kwargs):\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def setup_view(view, request, *args, **kwargs):\n\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def initialize(self, view, request, *args, **kwargs):\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def setup_view(view, request=None, *args, **kwargs):\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def __init__(self, **kwargs):\n \n # init base\n super().__init__(**kwargs)\n self.name = \"Pero\"\n \n # init view\n self._view = None", "def __init__( viewname, view ):", "def init_view(self):\n self.view_map = self.ctx.clientmap", "def viewfactory(self):\n raise NotImplementedError()", "def get_view(self, request=None, args=None, kwargs=None, **initkwargs):\n view = self.view_class(**initkwargs)\n view.setup(request, *(args or ()), **(kwargs or {}))\n return view", "def deserialize(cls, iodata):\n return cls(**iodata.as_kwargs())", "def __init__(self, view: View, url: str, key: str, server_time_url: str, data_loader: DataLoader,\n data_writer: DataWriter, ai_class: str, ai_params):\n\n super().__init__(view)\n self.__url = url\n self.__key = key\n self.__server_time_url = server_time_url\n self.__data_loader = data_loader\n self.__data_writer = data_writer\n self.__ai = None\n self.__default_ai = None\n self.__ai_class = ai_class\n self.__ai_params = ai_params", "def createViews(views):\n ...", "def make_view(app, view_class=View, view_name='View', **kwargs):\n kwargs.update({'__app__': app})\n return type(view_name, (view_class, ), kwargs)", "def __init__(self, view, interactor, model, observer):\n\n # Set view and interactor to couple and process events\n self.view = view\n self.interactor= interactor\n\n # Set model and observer to couple and process events\n self.model = model\n self.observer = observer", "def __init__(self, view_name, cursor=None, schema=None):\n self.name = view_name\n self.type = 'view' # Saves using type() or isinstance\n self.columns = {}\n self.sql = ''\n self.triggers = {}\n if schema:\n self.schema = schema\n else:\n schema = None\n if cursor:\n self._get_view(cursor)", "def __init__(self, view, model):\n self.view = view\n self.view.set_controller(self)\n self.model = model", "def __init__(self, parent: View):\n super().__init__(parent)", "def mock_as_view(view, request, *args, **kwargs):\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def __init__(self, views=None, page_load=None, mboxes=None, metrics=None):\n\n self._views = None\n self._page_load = None\n self._mboxes = None\n self._metrics = None\n self.discriminator = None\n\n if views is not None:\n self.views = views\n if page_load is not None:\n self.page_load = page_load\n if mboxes is not None:\n self.mboxes = mboxes\n if metrics is not None:\n self.metrics = metrics", "def __init__(\n self,\n *,\n software_system: Optional[SoftwareSystem] = None,\n paper_size: Optional[PaperSize] = None,\n automatic_layout: Optional[AutomaticLayout] = None,\n element_views: Optional[Iterable[ElementView]] = (),\n relationship_views: Optional[Iterable[RelationshipView]] = (),\n layout_merge_strategy: Optional[Any] = None,\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.software_system = software_system\n self.software_system_id = software_system.id if software_system else None\n self.paper_size = paper_size\n self.automatic_layout = automatic_layout\n self.element_views: Set[ElementView] = set(element_views)\n self._relationship_views: Set[RelationshipView] = set(relationship_views)\n\n # TODO\n self.layout_merge_strategy = layout_merge_strategy", "def setup_view(view, request, *args, **kwargs):\n\n view.request = request\n view.args = args\n view.kwargs = kwargs\n setattr(request, \"session\", \"session\")\n messages = FallbackStorage(request)\n setattr(request, \"_messages\", messages)\n return view", "def initView(self):\n return {}", "def __init__(self):\n self.view = GuiView(self)\n return", "def from_dict(cls, dikt) -> 'DataView':\n return util.deserialize_model(dikt, cls)", "def openapi_view(view: View, info: ViewDeriverInfo) -> View:\n if info.options.get(\"openapi\"):\n\n def wrapper_view(context: Context, request: Request) -> Response:\n # We need this to be able to raise AttributeError if view code\n # accesses request.openapi_validated on a view that is marked\n # with openapi=False\n request.environ[\"pyramid_openapi3.enabled\"] = True\n\n # If view is marked with openapi=True (i.e. we are in this\n # function) and registry settings are not set to disable\n # validation, then do request/response validation\n request.environ[\"pyramid_openapi3.validate_request\"] = asbool(\n request.registry.settings.get(\n \"pyramid_openapi3.enable_request_validation\", True\n )\n )\n request.environ[\"pyramid_openapi3.validate_response\"] = asbool(\n request.registry.settings.get(\n \"pyramid_openapi3.enable_response_validation\", True\n )\n )\n\n # Request validation can happen already here, but response validation\n # needs to happen later in a tween\n if request.openapi_validated and request.openapi_validated.errors:\n raise RequestValidationError(errors=request.openapi_validated.errors)\n\n # Do the view\n return view(context, request)\n\n return wrapper_view\n return view", "def __init__(self, model, **kwargs):\n\n super().__init__(model)\n\n self._ut = UnscentedTransform(model, **kwargs)", "def __init__(self, *args, **kwargs):\n \n super(AvatarView, self).__init__(*args, **kwargs)\n \n wm = bpy.context.window_manager\n wm.verse_avatars.add()\n wm.verse_avatars[-1].node_id = self.id\n \n # Force redraw of 3D view\n ui.update_all_views(('VIEW_3D',))\n\n self.scene_node = None\n view_initialized = False\n self.visualized = True\n self.cur_area = None\n self.cur_space = None\n\n if self.id == self.session.avatar_id:\n # Initialize default values\n self.cur_screen = bpy.context.screen\n self.__class__.__my_view = self\n\n # Try to find current 3D view \n for area in bpy.context.screen.areas.values():\n if area.type == 'VIEW_3D':\n self.cur_area = area\n for space in area.spaces.values():\n if space.type == 'VIEW_3D':\n self.cur_space = space\n break\n break\n\n if self.cur_area.type == 'VIEW_3D' and self.cur_space.type == 'VIEW_3D':\n view_initialized = True\n # Create tag group containing information about view\n self.view_tg = vrsent.VerseTagGroup(\n node=self,\n custom_type=TG_INFO_CT)\n # Create tags with data of view to 3D view\n # Location\n self.location = AvatarLocation(\n tg=self.view_tg,\n value=tuple(self.cur_space.region_3d.view_location))\n # Rotation\n self.rotation = AvatarRotation(\n tg=self.view_tg,\n value=tuple(self.cur_space.region_3d.view_rotation))\n # Distance\n self.distance = AvatarDistance(\n tg=self.view_tg,\n value=(self.cur_space.region_3d.view_distance,))\n # Perspective/Orthogonal\n self.perspective = AvatarPerspective(\n tg=self.view_tg,\n value=(self.cur_space.region_3d.view_perspective,))\n # Width\n self.width = AvatarWidth(\n tg=self.view_tg,\n value=(self.cur_area.width,))\n # Height\n self.height = AvatarHeight(\n tg=self.view_tg,\n value=(self.cur_area.height,))\n # Lens\n self.lens = AvatarLens(\n tg=self.view_tg,\n value=(self.cur_space.lens,))\n # Get current Scene ID\n if bpy.context.scene.verse_node_id != -1:\n scene_node_id = bpy.context.scene.verse_node_id\n else:\n scene_node_id = 0\n self.scene_node_id = AvatarScene(\n tg=self.view_tg,\n value=(scene_node_id,))\n \n # TODO: check following code (may be not needed anymore)\n original_type = bpy.context.area.type\n bpy.context.area.type = 'VIEW_3D'\n bpy.ops.view3d.verse_avatar()\n bpy.context.area.type = original_type\n else:\n # TODO: Add some assert, because this should not happen.\n pass\n else:\n self.__class__.__other_views[self.id] = self\n \n if view_initialized is False:\n # Create tag group containing information about view\n self.view_tg = vrsent.VerseTagGroup(\n node=self,\n custom_type=TG_INFO_CT)\n # Create tags with data of view to 3D view\n self.location = AvatarLocation(tg=self.view_tg)\n self.rotation = AvatarRotation(tg=self.view_tg)\n self.distance = AvatarDistance(tg=self.view_tg)\n self.perspective = AvatarPerspective(tg=self.view_tg)\n self.width = AvatarWidth(tg=self.view_tg)\n self.height = AvatarHeight(tg=self.view_tg)\n self.lens = AvatarLens(tg=self.view_tg)\n self.scene_node_id = AvatarScene(tg=self.view_tg)", "def __init__(self, view_list=None, view_id=None, view_name=None, is_public=None, selected_field_list=None, available_field_list=None, local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._view_list = None\n self._view_id = None\n self._view_name = None\n self._is_public = None\n self._selected_field_list = None\n self._available_field_list = None\n self.discriminator = None\n\n if view_list is not None:\n self.view_list = view_list\n if view_id is not None:\n self.view_id = view_id\n if view_name is not None:\n self.view_name = view_name\n if is_public is not None:\n self.is_public = is_public\n if selected_field_list is not None:\n self.selected_field_list = selected_field_list\n if available_field_list is not None:\n self.available_field_list = available_field_list", "def config_to_view(self):\n raise NotImplementedError", "def __init__(self, type_name, args):\n super().__init__()\n self.type_name = type_name\n self.args = args\n self._projection = None" ]
[ "0.6241447", "0.62288916", "0.6197663", "0.6184966", "0.6031715", "0.57787627", "0.57195693", "0.5683868", "0.5610492", "0.5606469", "0.5535032", "0.5503669", "0.5500431", "0.54453224", "0.53948295", "0.53366286", "0.5272058", "0.5234212", "0.5200459", "0.5178498", "0.51314396", "0.5115602", "0.50692564", "0.50518304", "0.50273377", "0.50253713", "0.50216424", "0.50141335", "0.5006029", "0.5004541" ]
0.71726805
0
Return the relationship views contained by this view.
def relationship_views(self) -> Iterable[RelationshipView]: return set(self._relationship_views)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def views(self):\n return self._views", "def getViews(self):\n return list(self.__views)", "def child_views(self):\n return self.children", "def getViews(self):\n return list(self.__views.keys())", "def other_views(cls):\n return cls.__other_views", "def getReachableViews(self):\n return [self]", "def getViews(self):\n raise NotImplementedError()", "def related_view(self):\n return get_related_view(self.request)", "def selected_relationships(self):\n return self._selected_relationships", "def references(self):\n return self._get_related_resources(False)", "def get_views(self):\n return self._get_types_from_default_ns(View)", "def relations(self):\n return set(self.triples()[\"relation\"])", "def relationships(self):", "def getReachableViews(self):\n raise NotImplementedError()", "def associated_objects(self):\n return self._associated_objects", "def subresources(self):\n return self._get_related_resources(True)", "def trait_view_elements ( self ):\n return self.__class__.class_trait_view_elements()", "def views(self):\r\n return Views(self)", "def get_queryset(self):\n\n return Relationship.objects.filter(\n Q(from_person=self.request.user.person) |\n Q(to_person=self.request.user.person))", "def views(self):\r\n return resources.Views(self)", "def settable_relations(cls):\n return [r for r in cls.relations\n if getattr(cls, r).property.viewonly is False]", "def navigations(self):\n return Navigation.objects.filter(page=self)", "def get_all_ancestors(self, view_dict):\n result = []\n parent_id = self.__safe_dict_get(view_dict, 'parent', -1)\n if 0 <= parent_id < len(self.views):\n result.append(parent_id)\n result += self.get_all_ancestors(self.views[parent_id])\n return result", "def get_views(self):\n query = mssqlqueries.get_views()\n logger.info(u'Views query: %s', query)\n for tabular_result in self.execute_query(query):\n for row in tabular_result[0]:\n yield (row[0], row[1])", "def associatedObjects (self):\n return self.__associatedObjects", "def relations(cls):\n return [c.key for c in cls.__mapper__.iterate_properties\n if isinstance(c, RelationshipProperty)]", "def GetView(self):\r\n return self.model.GetView()", "def trait_views ( self, klass = None ):\n return self.__class__.__dict__[ ViewTraits ].filter_by( klass )", "def links(self):\n\n links = []\n for foreign_key in self.__table__.foreign_keys:\n column = foreign_key.column.name\n column_value = getattr(self, column, None)\n if column_value:\n table = foreign_key.column.table.name\n with app.app_context():\n endpoint = current_app.class_references[table]\n links.append({'rel': 'related', 'uri': '/{}/{}'.format(\n endpoint.__name__, column_value)})\n links.append({'rel': 'self', 'uri': self.resource_uri()})\n return links", "def nodes(self):\n return self._get_tree_queryset()" ]
[ "0.7207797", "0.71174836", "0.69009525", "0.68892264", "0.6824849", "0.6787118", "0.678279", "0.6724912", "0.65338415", "0.64794517", "0.6432522", "0.63637567", "0.6284232", "0.6266386", "0.6243599", "0.62266636", "0.6214038", "0.6122439", "0.6109116", "0.6089614", "0.6069089", "0.60596365", "0.60133076", "0.59696513", "0.5946312", "0.591625", "0.5905629", "0.5867909", "0.58613014", "0.58168674" ]
0.861147
0
Add all relationships involving the given element to this view.
def _add_relationships(self, element: Element) -> None: elements: Set[str] = {v.id for v in self.element_views} for relationship in element.get_efferent_relationships(): if relationship.destination.id in elements: self._relationship_views.add( RelationshipView(relationship=relationship) ) for relationship in element.get_afferent_relationships(): if relationship.source.id in elements: self._relationship_views.add( RelationshipView(relationship=relationship) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def relationships(self):", "def add_relationship(self, relationship):\n self.relationships[relationship.parent].append(relationship)", "def _add_element(self, element: Element, add_relationships: bool) -> ElementView:\n if element not in self.model:\n raise RuntimeError(\n f\"The element {element} does not exist in the model associated with \"\n f\"this view.\"\n )\n view = self.find_element_view(element=element)\n if view is None:\n view = ElementView(element=element)\n self.element_views.add(view)\n if add_relationships:\n self._add_relationships(element)\n return view", "def _add_relationships(\n self,\n obj: BaseContent,\n relationships: List[graph.Relationship],\n nodes_to: List[graph.Node],\n ) -> None:\n for node_to, rel in zip(nodes_to, relationships):\n if not rel.start_node or not rel.end_node:\n raise ValueError(\"Relationships must have start and end nodes\")\n obj.add_relationship(\n RelationshipType(rel.type),\n RelationshipData(\n relationship_type=rel.type,\n source_id=rel.start_node.element_id,\n target_id=rel.end_node.element_id,\n content_item_to=self._id_to_obj[node_to.element_id],\n is_direct=True,\n **rel,\n ),\n )", "def relations_to(self, end_node):", "def _add_relationships_to_objects(\n self,\n session: Session,\n result: Dict[str, Neo4jRelationshipResult],\n marketplace: Optional[MarketplaceVersions] = None,\n ):\n content_item_nodes: Set[str] = set()\n packs: List[Pack] = []\n nodes_to = []\n for res in result.values():\n nodes_to.extend(res.nodes_to)\n self._add_nodes_to_mapping(nodes_to)\n for id, res in result.items():\n obj = self._id_to_obj[id]\n self._add_relationships(obj, res.relationships, res.nodes_to)\n if isinstance(obj, Pack) and not obj.content_items:\n packs.append(obj)\n content_item_nodes.update(\n node.element_id\n for node, rel in zip(res.nodes_to, res.relationships)\n if rel.type == RelationshipType.IN_PACK\n )\n\n if isinstance(obj, Integration) and not obj.commands:\n obj.set_commands() # type: ignore[union-attr]\n\n if content_item_nodes:\n content_items_result = session.execute_read(\n _match_relationships, content_item_nodes, marketplace\n )\n self._add_relationships_to_objects(\n session, content_items_result, marketplace\n )\n\n # we need to set content items only after they are fully loaded\n for pack in packs:\n pack.set_content_items()", "def add_relationship(self, rel: ResourceRelationshipDescriptor) -> None:\n self._relationships[assert_not_none(rel.name)] = rel.bind(self)", "def append(self, element):\r\n if not isinstance(element, ApplicationRecordElement):\r\n raise ValueError('element must be an instance of ApplicationRecordElement')\r\n element.parent = self\r\n self.elements.append(element)", "def _remove_element(self, element: Element) -> None:\n if element not in self.model:\n raise RuntimeError(\n f\"The element {element} does not exist in the model associated with \"\n f\"this view.\"\n )\n self.element_views.add(ElementView(id=element.id))\n for element_view in list(self.element_views): # Copy as modifying as we go\n if element_view.id == element.id:\n self.element_views.remove(element_view)\n\n for relationship_view in list(self._relationship_views):\n if (\n relationship_view.relationship.source.id == element.id\n or relationship_view.relationship.destination.id == element.id\n ):\n self._relationship_views.remove(relationship_view)", "def add_relation(cls, row_id, rel_obj):\n obj = cls.query.filter_by(id=row_id).first()\n # obj = db.session.query(cls).filter_by(id=row_id).first()\n #print(type(obj))\n if cls.__name__ == 'Actor':\n obj.filmography.append(rel_obj)\n elif cls.__name__ == 'Movie':\n obj.cast.append(rel_obj)\n return commit(obj)", "def connect_elements(self, event: matplotlib.backend_bases.LocationEvent,\n element: 'FigureElement') -> None:\n if self.selected_element is None:\n self.selected_element = element\n element.add_extra_path_effect('selection',\n pe.Stroke(linewidth=5,\n foreground='b'))\n return\n graph = self._get_connected_graph(event.inaxes)\n element1 = self.graph_to_figure.inverse[self.selected_element][0]\n element2 = self.graph_to_figure.inverse[element][0]\n if self.selected_element.axes != element.axes:\n if event.guiEvent.CmdDown():\n log.debug('Adding Attribute Requirement.')\n self._add_attr_requirement(element1, element2)\n else:\n log.debug('Adding Mapping.')\n self._add_mapping(element1, element2)\n elif isinstance(element1, Vertex) and isinstance(element2, Vertex):\n log.debug('Connecting Vertices.')\n self._add_edge(graph, element1, element2)\n self.selected_element.remove_extra_path_effect('selection')\n self.selected_element = None\n self._redraw_graph()", "def _add_related(related, dep, all_related, index, connector=None):\n doc = {}\n doc[\"relationForm\"] = dep\n doc[\"rawName\"] = related\n doc[\"tokenIndex\"] = int(index)\n doc[\"offsetStart\"] = A.lookup[int(index)][\"start\"]\n doc[\"offsetEnd\"] = A.lookup[int(index)][\"end\"]\n doc[\"connector\"] = \"\" if connector is None else connector\n if not doc in all_related:\n all_related.append(doc)\n return all_related", "def _add_dominance_relation__to__nodes(self):\n dominating_dict = defaultdict(list)\n dominated_dict = defaultdict(list)\n for dom_rel_id in self._dominance_relation_ids:\n dominated_node_id = self.edges[dom_rel_id].target\n dominating_node_id = self.edges[dom_rel_id].source\n dominating_dict[dominating_node_id].append(dominated_node_id)\n dominated_dict[dominated_node_id].append(dominating_node_id)\n\n for dominating_node_id in dominating_dict:\n self.nodes[dominating_node_id].dominates = \\\n dominating_dict[dominating_node_id]\n for dominated_node_id in dominated_dict:\n self.nodes[dominated_node_id].dominated_by = \\\n dominated_dict[dominated_node_id]", "def relate(self, related):\n\n self._module._connection.relate(self, related)", "def _add_all_level_relationships(\n self,\n session: Session,\n node_ids: Iterable[str],\n relationship_type: RelationshipType,\n marketplace: MarketplaceVersions = None,\n ):\n relationships: Dict[str, Neo4jRelationshipResult] = session.execute_read(\n get_all_level_packs_relationships,\n relationship_type,\n node_ids,\n marketplace,\n True,\n )\n nodes_to = []\n for content_item_relationship in relationships.values():\n nodes_to.extend(content_item_relationship.nodes_to)\n self._add_nodes_to_mapping(nodes_to)\n\n for content_item_id, content_item_relationship in relationships.items():\n obj = self._id_to_obj[content_item_id]\n for node in content_item_relationship.nodes_to:\n target = self._id_to_obj[node.element_id]\n source_id = content_item_id\n target_id = node.element_id\n if relationship_type == RelationshipType.IMPORTS:\n # the import relationship is from the integration to the content item\n source_id = node.element_id\n target_id = content_item_id\n obj.add_relationship(\n relationship_type,\n RelationshipData(\n relationship_type=relationship_type,\n source_id=source_id,\n target_id=target_id,\n content_item_to=target,\n mandatorily=True,\n is_direct=False,\n ),\n )", "def relationship(self, relationship):\n\n self._relationship = relationship", "def _additem(self, relationship):\n rIds = [rel._rId for rel in self._values]\n if relationship._rId in rIds:\n tmpl = \"cannot add relationship with duplicate rId '%s'\"\n raise ValueError(tmpl % relationship._rId)\n self._values.append(relationship)\n self.__resequence()\n # register as observer of partname changes\n relationship._target.add_observer(self)", "def _populate_relationships(self, rec_curr):\n for relationship_type, goids in rec_curr.relationship.items():\n parent_recs = set([self[goid] for goid in goids]) \n rec_curr.relationship[relationship_type] = parent_recs # replace GO ID with GO Term record object\n for parent_rec in parent_recs:\n if relationship_type not in parent_rec.relationship_rev:\n parent_rec.relationship_rev[relationship_type] = set([rec_curr])\n else:\n parent_rec.relationship_rev[relationship_type].add(rec_curr)", "def _add_relationship(\n self,\n relationship: Relationship,\n *,\n description: Optional[str] = None,\n order: Optional[str] = None,\n response: bool = False,\n ) -> RelationshipView:\n if self.is_element_in_view(relationship.source) and self.is_element_in_view(\n relationship.destination\n ):\n view = self.find_relationship_view(\n relationship=relationship, description=description, response=response\n )\n if not view:\n view = RelationshipView(\n relationship=relationship,\n description=description,\n order=order,\n response=response,\n )\n self._relationship_views.add(view)\n return view", "def _do_relation(self):\n if self.chunks:\n ch = self.chunks[-1]\n for relation, role in ch.relations:\n if role == \"SBJ\" or role == \"OBJ\":\n self.relations[role][relation] = ch\n if ch.type in (\"VP\",):\n self.relations[ch.type][ch.relation] = ch", "def relationship(self):\r\n return relationships.Relationship(self)", "def graph(self):\n graph = nx.DiGraph()\n for name, joint in self.joints.items():\n graph.add_edge(*joint.connects, joint=name)\n return graph", "def aggregate_rules_relationships(cls, recommender_model):\n pass", "def relationship(self):\n return relationships.Relationship(self)", "def _find_relations(self, node, depth=0):\n depth += 1\n\n model = node.model\n opts = model._meta\n\n # determine relational fields to determine paths\n forward_fields = opts.fields\n reverse_fields = opts.get_all_related_objects()\n\n forward_o2o = filter(self._filter_one2one, forward_fields)\n reverse_o2o = filter(self._filter_related_one2one, reverse_fields)\n\n forward_fk = filter(self._filter_fk, forward_fields)\n reverse_fk = filter(self._filter_related_fk, reverse_fields)\n\n forward_m2m = filter(self._filter_m2m, opts.many_to_many)\n reverse_m2m = filter(self._filter_related_m2m,\n opts.get_all_related_many_to_many_objects())\n\n # iterate m2m relations\n for f in forward_m2m:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'manytomany',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related m2m fields\n for r in reverse_m2m:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'manytomany',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over one2one fields\n for f in forward_o2o:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'onetoone',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': False,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related one2one fields\n for r in reverse_o2o:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'onetoone',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': False,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over fk fields\n for f in forward_fk:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'foreignkey',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': f.null,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related foreign keys\n for r in reverse_fk:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'foreignkey',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n return node", "def relationship_views(self) -> Iterable[RelationshipView]:\n return set(self._relationship_views)", "def fixRelations (self):\n\t\tnodes = self.getFieldElements (\"relation\")\n\t\tif not nodes: return\n\t\t\n\t\tprint \"\\n%s\" % self.getId()\n\t\tfor r in nodes:\n\t\t\tvalue = XmlUtils.getText(r)\n\t\t\tif not value: return\n\t\t\tXmlUtils.setText (r,\"\")\n\t\t\tif value.startswith (\"http://\"):\n\t\t\t\tr.setAttribute (\"type\", \"Has part\")\n\t\t\t\tr.setAttribute (\"url\", value)\n\t\t\telse:\n\t\t\t\tr.setAttribute (\"type\", \"Is related\")\n\t\t\t\tr.setAttribute (\"title\", value)\n\t\t\tprint r.toxml()\n\t\tif 0:\n\t\t\tself.write()\n\t\t\tprint \"wrote record\"", "def _setRelation(self, node):\n if getattr(self, \"relation\", None):\n element = etree.SubElement(node, 'relation')\n element.text = getattr(self, \"relation\")", "def add_fact_relationship(self, table_from: str, entry_from: dict, table_to: str, entry_to: dict):\n\n table_lut = {'p': \"10\", # procedure\n 'c': \"19\", # condition\n 'm': \"21\", # measurement\n 'o': \"27\"} # observation\n self.fact_relations.append((table_lut[table_from], entry_from, table_lut[table_to], entry_to))", "def _add_child_elements_recursive(self, element):\n\n element_id = element.get_id()\n\n element.set_parent_element(\n self.get_root_element().get_element_by_element_id(\n element.get_parent_id()\n )\n )\n\n for child_element in self.get_child_elements_by_id(element_id):\n element.add_child_element(child_element)\n self._add_child_elements_recursive(child_element)" ]
[ "0.63140434", "0.6036017", "0.6033511", "0.5894598", "0.54866344", "0.5441199", "0.5249128", "0.5248479", "0.52381414", "0.52308077", "0.52253497", "0.52209246", "0.5182565", "0.5141708", "0.5135262", "0.51191837", "0.5117873", "0.5116992", "0.51102453", "0.50909835", "0.5083427", "0.5074861", "0.50335455", "0.50213856", "0.4999595", "0.49465072", "0.4940261", "0.49214292", "0.49185073", "0.48288178" ]
0.8321654
0
Copy the layout information from another view, including child views.
def copy_layout_information_from(self, source: "View") -> None: if not self.paper_size: self.paper_size = source.paper_size for source_element_view in source.element_views: destination_element_view = self.find_element_view( element=source_element_view.element ) if destination_element_view: destination_element_view.copy_layout_information_from( source_element_view ) for source_relationship_view in source.relationship_views: destintion_relationship_view = self.find_relationship_view( relationship=source_relationship_view.relationship ) if destintion_relationship_view: destintion_relationship_view.copy_layout_information_from( source_relationship_view )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_layout( self ):", "def test_copying_dynamic_view_layout(empty_viewset):\n source_viewset = empty_viewset\n source_view = source_viewset.create_dynamic_view(key=\"dyn1\", description=\"test\")\n source_view.paper_size = PaperSize.A4_Landscape\n\n target_viewset = ViewSet(model=empty_viewset.model)\n\n # Check it's OK if there isn't a matching view in the target\n target_viewset.copy_layout_information_from(source_viewset)\n\n # Now try one where we have a match\n target_view = target_viewset.create_dynamic_view(key=\"dyn1\", description=\"test2\")\n assert target_view.paper_size is None\n target_viewset.copy_layout_information_from(source_viewset)\n assert target_view.paper_size == PaperSize.A4_Landscape", "def layout(self):\n \n # set size to child\n if self._view is not None:\n self._view.frame = (0, 0, self.width, self.height)", "def clone(self):\n return _libsbml.Layout_clone(self)", "def _do_layout(self):\n return", "def layout(self):\n pass", "def _generate_layout(self):\n\n pass", "def set_layout(layout):\r\n # Get active window and set reference to active view\r\n window = sublime.active_window()\r\n previous_active = window.active_view()\r\n\r\n # Do not set layout when disabled\r\n if get_value(S.KEY_DISABLE_LAYOUT):\r\n S.RESTORE_LAYOUT = window.get_layout()\r\n set_window_value('restore_layout', S.RESTORE_LAYOUT)\r\n S.RESTORE_INDEX = H.new_dictionary()\r\n set_window_value('restore_index', S.RESTORE_INDEX)\r\n return\r\n\r\n # Show debug layout\r\n if layout == 'debug':\r\n debug_layout = get_value(S.KEY_DEBUG_LAYOUT, S.LAYOUT_DEBUG)\r\n if window.get_layout() != debug_layout:\r\n # Save current layout\r\n S.RESTORE_LAYOUT = window.get_layout()\r\n set_window_value('restore_layout', S.RESTORE_LAYOUT)\r\n # Remember view indexes\r\n S.RESTORE_INDEX = H.new_dictionary()\r\n for view in window.views():\r\n view_id = \"%d\" % view.id()\r\n group, index = window.get_view_index(view)\r\n S.RESTORE_INDEX[view_id] = { \"group\": group, \"index\": index }\r\n set_window_value('restore_index', S.RESTORE_INDEX)\r\n # Set debug layout\r\n window.set_layout(S.LAYOUT_NORMAL)\r\n window.set_layout(debug_layout)\r\n # Show previous (single) layout\r\n else:\r\n # Get previous layout configuration\r\n if S.RESTORE_LAYOUT is None:\r\n S.RESTORE_LAYOUT = get_window_value('restore_layout', S.LAYOUT_NORMAL)\r\n if S.RESTORE_INDEX is None:\r\n S.RESTORE_INDEX = get_window_value('restore_index', {})\r\n # Restore layout\r\n window.set_layout(S.LAYOUT_NORMAL)\r\n window.set_layout(S.RESTORE_LAYOUT)\r\n for view in window.views():\r\n view_id = \"%d\" % view.id()\r\n # Set view indexes\r\n if view_id in H.dictionary_keys(S.RESTORE_INDEX):\r\n v = S.RESTORE_INDEX[view_id]\r\n window.set_view_index(view, v[\"group\"], v[\"index\"])\r\n\r\n # Restore focus to previous active view\r\n if not previous_active is None:\r\n window.focus_view(previous_active)", "def init_layout(self):\n pass", "def do_layout(self):\n self.define_panel_structure()\n self.layout_selection()\n self.layout_data_list()\n self.layout_batch()\n self.layout_button()", "def clone(self):\n return _libsbml.LayoutExtension_clone(self)", "def _playout(self, state):\n node = self._root", "def clone(self):\n return _libsbml.ListOfLayouts_clone(self)", "def gui_layout_view(self) -> List[List[sg.Element]]:\n return []", "def create_layout( self ):\n\n # XXX: debugging layout\n self.setStyleSheet( \"border: 1px solid black\" )\n\n selection_layout = QVBoxLayout()\n selection_layout.setContentsMargins( 0, 0, 0, 0 )\n selection_layout.setSpacing( 0 )\n selection_layout.addWidget( self.selectionView )\n\n selection_type_layout = QHBoxLayout()\n selection_type_layout.setContentsMargins( 0, 0, 0, 0 )\n selection_type_layout.setSpacing( 0 )\n selection_type_layout.addWidget( self.selectionBoxLabel )\n selection_type_layout.addWidget( self.selectionBox )\n selection_type_layout.setStretchFactor( self.selectionBox, 1 )\n\n selection_layout.addLayout( selection_type_layout )\n selection_layout.setStretchFactor( self.selectionView, 1 )\n\n info_layout = QVBoxLayout()\n info_layout.setContentsMargins( 0, 0, 0, 0 )\n info_layout.setSpacing( 0 )\n\n stats_layout = QGridLayout()\n stats_layout.setContentsMargins( 0, 0, 0, 0 )\n stats_layout.setVerticalSpacing( 1 )\n stats_layout.setHorizontalSpacing( 10 )\n\n stats_layout.addWidget( QLabel( \"State:\" ),\n 0, 0 )\n stats_layout.addWidget( self.infoStateLabel,\n 0, 1 )\n\n stats_layout.addWidget( QLabel( \"Art Records:\" ),\n 1, 0 )\n stats_layout.addWidget( self.infoSummaryLabel,\n 1, 1 )\n\n stats_layout.addWidget( QLabel( \"Location:\" ),\n 2, 0 )\n stats_layout.addWidget( self.infoLocationLabel,\n 2, 1 )\n\n stats_layout.addWidget( QLabel( \"Taken:\" ),\n 3, 0 )\n stats_layout.addWidget( self.infoTakenLabel,\n 3, 1 )\n\n stats_layout.addWidget( QLabel( \"Tags:\" ),\n 4, 0 )\n stats_layout.addWidget( self.infoTagsLabel,\n 4, 1 )\n\n stats_layout.setColumnStretch( 1, 1 )\n\n info_layout.addWidget( self.previewArea )\n info_layout.addLayout( stats_layout )\n info_layout.setStretchFactor( self.previewArea, 1 )\n\n self.centralWidget = QWidget()\n self.centralWidget.setLayout( info_layout )\n\n self.selection_dock.widget().setLayout( selection_layout )\n\n self.addDockWidget( Qt.LeftDockWidgetArea, self.selection_dock )\n\n self.setCentralWidget( self.centralWidget )", "def _createLayout(self) -> None:\n # layouts\n self._mainLayout = QVBoxLayout()\n self._fileViewLayout = QHBoxLayout()\n self._sideFileLayout = QVBoxLayout()\n self._addressBarLayout = QHBoxLayout()\n\n # side file view\n self._documentsButton = QPushButton('Go to Documents')\n self._desktopButton = QPushButton('Go to Desktop')\n self._sideFileView = QTreeView()\n self._sideFileView.setFixedWidth(250)\n self._sideModel = QFileSystemModel()\n self._sideModel.setRootPath(str(self._currPath))\n self._sideFileView.setModel(self._sideModel)\n for i in range(1, 4):\n self._sideFileView.hideColumn(i) # leave only directory names on the side file view\n self._sideFileView.setHeaderHidden(True)\n self._sideFileView.setFont(QFont('Consolas'))\n self._sideModel.setFilter(QDir.AllDirs | QDir.NoDotAndDotDot)\n self._sideFileLayout.addWidget(self._documentsButton)\n self._sideFileLayout.addWidget(self._desktopButton)\n self._sideFileLayout.addWidget(self._sideFileView)\n self._fileViewLayout.addLayout(self._sideFileLayout)\n\n # central file view\n self._mainFileView = CustomTreeView()\n self._mainFileView.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self._mainFileView.setAlternatingRowColors(True)\n self._mainFileView.setSortingEnabled(True)\n self._mainFileView.setSelectionMode(QAbstractItemView.ExtendedSelection)\n self._model = QStandardItemModel()\n self._modelHeaders = ['Filename', 'Type', 'Modified On', 'Size', '']\n self._mainFileView.setModel(self._model)\n self._mainFileView.setFont(QFont('Consolas'))\n self._mainFileView.setRootIsDecorated(False)\n self._mainFileView.setItemsExpandable(False)\n self._mainFileView.setDragEnabled(True)\n self._mainFileView.setAcceptDrops(True)\n self._mainFileView.setDropIndicatorShown(True)\n self._mainFileView.header().setFirstSectionMovable(True)\n self._mainFileView.setFocusPolicy(Qt.NoFocus)\n self._fileViewLayout.addWidget(self._mainFileView)\n\n # navigation buttons\n self._goBackBtn = QToolButton()\n self._goUpBtn = QToolButton()\n self._goForwardBtn = QToolButton()\n self._addressBarLayout.addWidget(self._goBackBtn)\n self._addressBarLayout.addWidget(self._goUpBtn)\n self._addressBarLayout.addWidget(self._goForwardBtn)\n\n # address bar\n self._addressBar = QLineEdit()\n self._addressBarLayout.addWidget(self._addressBar)\n self._mainLayout.addLayout(self._addressBarLayout)\n self._mainLayout.addLayout(self._fileViewLayout)\n\n # main parent widget\n self._centralWidget = QWidget()\n self._centralWidget.setLayout(self._mainLayout)\n self.setCentralWidget(self._centralWidget)", "def __get_view_structure(self, view_dict):\n if 'view_structure' in view_dict:\n return view_dict['view_structure']\n width = DeviceState.get_view_width(view_dict)\n height = DeviceState.get_view_height(view_dict)\n class_name = DeviceState.__safe_dict_get(view_dict, 'class', \"None\")\n children = {}\n\n root_x = view_dict['bounds'][0][0]\n root_y = view_dict['bounds'][0][1]\n\n child_view_ids = self.__safe_dict_get(view_dict, 'children')\n if child_view_ids:\n for child_view_id in child_view_ids:\n child_view = self.views[child_view_id]\n child_x = child_view['bounds'][0][0]\n child_y = child_view['bounds'][0][1]\n relative_x, relative_y = child_x - root_x, child_y - root_y\n children[\"(%d,%d)\" % (relative_x, relative_y)] = self.__get_view_structure(child_view)\n\n view_structure = {\n \"%s(%d*%d)\" % (class_name, width, height): children\n }\n view_dict['view_structure'] = view_structure\n return view_structure", "def _do_layout(self):\n if self.stack_order == \"bottom_to_top\":\n components = (self.zoomed_plot, self.reference_plot)\n relative_sizes = (4 / 3., 2 / 3.)\n else:\n components = (self.reference_plot, self.zoomed_plot)\n relative_sizes = (2 / 3., 4 / 3.)\n if self.halign == \"left\":\n align = \"min\"\n elif self.halign == \"center\":\n align = \"center\"\n else:\n align = \"max\"\n #import pdb; pdb.set_trace()\n return self._do_stack_layout(components, relative_sizes, align)", "def layout(self, width, height):\n raise NotImplementedError", "def _init_default_layout(self):\n self._main_v_layout_ = QVBoxLayout(self)\n self._main_v_layout_.setContentsMargins(0, 0, 0, 0)\n self.setLayout(self._main_v_layout_)", "def _update_cytoscape_layout(layout):\n\n pass", "def use(self, layout):\n self._wid.setLayout(layout)\n return layout", "def update_vizualization_layout(self, new):\n self.stages[\"Connectome\"].define_inspect_outputs()\n self.stages[\"Connectome\"].config.subject = self.subject", "def do_relayout(self):\n # This method is called whenever a relayout is requested. By\n # default, this is when the layout children change. In that case\n # we just need to update the min and max sizes. We are a top\n # level window, so no one really cares about our size hint. \n self.update_minimum_size()\n self.update_maximum_size()", "def _update_view(self):\n NavigationToolbar2._update_view(self)\n\n self._myParent.evt_view_updated()\n\n return", "def update_view(self): \n raise NotImplementedError(\"Widget descendents MUST implement the update_view() method!\")", "def update_layouts(self):\n self.layouttreestore.clear()\n layouts = self.config.list_layouts()\n for layout in sorted(layouts, key=str.lower):\n if layout != \"default\":\n self.layouttreestore.append([layout])\n else:\n self.layouttreestore.prepend([layout])", "def make_layout(self):\n\n for h in range(0, self.num_layout_heads):\n self.set_sliding_window_layout(h)\n self.set_global_layout(h)\n\n self.check_and_propagate_first_head_layout()\n return self.layout", "def clone(self):\n return _libsbml.LayoutModelPlugin_clone(self)", "def make_layout(self):\n\n for h in range(0, self.num_layout_heads):\n self.set_random_layout(h)\n self.set_sliding_window_layout(h)\n self.set_global_layout_itc(h)\n\n self.check_and_propagate_first_head_layout()\n return self.layout" ]
[ "0.5935125", "0.5920447", "0.58474386", "0.5801851", "0.57843786", "0.56659687", "0.5598473", "0.54099834", "0.53438044", "0.53059226", "0.52955204", "0.5243661", "0.5236582", "0.51705223", "0.5160853", "0.5154584", "0.51248074", "0.510902", "0.50980395", "0.50895196", "0.5054778", "0.5040494", "0.5028298", "0.50263715", "0.50248307", "0.5010787", "0.49750158", "0.49680445", "0.49630457", "0.4948586" ]
0.7830946
0