query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Add a set of nodes to a role. The ROLE argument specify the role which the nodes will be added.
|
def role_add(role, nodes, node, node_vars, host_vars, extra):
role_manager = get_role_manager()
node += nodes
nodes, node_vars, host_vars, extra_args = _split_vars(
node, node_vars, host_vars, extra)
if not nodes:
raise ArgumentError('No nodes informed')
added_nodes = role_manager.add_role(
role, hosts_node_map=nodes, host_vars=host_vars,
node_vars=node_vars, extra_args=extra_args)
print(f"{len(added_nodes)} nodes were added to role {role}: {', '.join(sorted(added_nodes))}")
return 0
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_adding_node_multiple_roles(self):\n Nodes().nodes_discovered[0].checkbox.click()\n with RolesPanel() as r:\n r.controller.click()\n r.cinder.click()\n r.ceph_osd.click()\n Nodes().apply_changes.click()\n time.sleep(1)\n with Nodes() as n:\n self.assertTrue(n.env_summary.is_displayed())\n self.assertIn(ROLE_CONTROLLER, n.nodes[0].roles.text,\n 'Node first role')\n self.assertIn(ROLE_CINDER, n.nodes[0].roles.text,\n 'Node second role')\n self.assertIn(ROLE_CEPH, n.nodes[0].roles.text,\n 'Node third role')",
"def add_role(self, role, parents=[]):\r\n self._roles.setdefault(role, set())\r\n self._roles[role].update(parents)",
"async def addRoles(self, ctx: Context, person: Member, roles: Greedy[Role]):\n roles = remove_dupe_roles(roles)\n\n await person.add_roles(*roles)\n await ctx.send(f\"Adding {roles_str(person, roles)}\")",
"def add_user_roles(userid:str, *roles):",
"def put_node_roles(session, node_roles_data):\n # type: (Session, NodeRolesData) -> None\n if not session.network:\n raise ValueError(\"Network must be set to get node roles\")\n url_tail = \"/{}/{}/{}\".format(\n CoordConstsV2.RSC_NETWORKS, session.network, CoordConstsV2.RSC_NODE_ROLES\n )\n return _put_json(session, url_tail, node_roles_data)",
"async def add_roles(self, ctx: commands.Context, *roles: discord.Role):\n if not roles:\n return await ctx.send_help()\n errored = \"\"\n message = \"\"\n added = []\n already_added = []\n for role in roles:\n if role >= ctx.author.top_role:\n errored += (\n \"{role}: You can't set a role equal to or higher than your own.\\n\".format(\n role=role.name\n )\n )\n continue\n if role >= ctx.guild.me.top_role:\n errored += (\n \"{role}: You can't set a role that's equal to or higher than the \"\n \"bot.\\n\".format(role=role.name)\n )\n continue\n async with self.config.guild(ctx.guild).autoroles() as roles_list:\n if role.id not in roles_list:\n roles_list.append(role.id)\n added.append(role.name)\n else:\n already_added.append(role.name)\n message += errored\n if added:\n message += \"\\nAdded role(s): {roles}\".format(roles=humanize_list(added))\n if already_added:\n message += \"\\nRole(s) already added: {roles}\".format(\n roles=humanize_list(already_added)\n )\n if message:\n for line in pagify(message):\n await ctx.send(line)",
"def add_role(self, role):\n if role.name not in [r.name for r in self.roles]:\n return db[self.colNam].find_and_modify(query=dict(_id=self.id), update={'$push': {'roles': role.to_python()}})",
"def add(self, user, role=None, roles=None):\n # TODO(adriant): resolve the roles and users into id's\n # user_id = base.getid(user)\n user_id = user\n # role_id = role\n if role:\n params = {\n 'roles': [role]\n }\n elif roles:\n params = {\n 'roles': roles\n }\n\n route = '/openstack/users/%s/roles'\n url = route % (user_id)\n try:\n self._put(url, json=params, response_key=None)\n except exc.HTTPBadRequest as e:\n print(e.message)\n return False\n\n return True",
"def add_nodes(self, nodes):\n return self.manager.add_nodes(self, nodes)",
"async def addRole(self, ctx, *roles_to_add):\n already_present_roles = [] # roles that will be deleted from \"roles_to_add\"\n\n available_roles = open(\"assets/roles.txt\", \"r\").readlines()\n available_roles = [role.lower().strip() for role in available_roles]\n\n output_msg = \"\"\n\n for role_to_add in roles_to_add:\n for role in available_roles:\n if role_to_add.lower() == role:\n output_msg += f\"Failed to add {role_to_add}: role already exists.\\n\"\n already_present_roles.append(role_to_add)\n break\n\n for role in already_present_roles:\n roles_to_add.remove(role)\n\n if roles_to_add:\n with open(\"assets/roles.txt\", \"a\") as f:\n for role in roles_to_add:\n f.write(f\"{role}\\n\")\n output_msg += f\"{role} has been added successfully.\\n\"\n\n await ctx.send(output_msg)",
"def changeRole(self, node, role):",
"async def add(ctx, *args: commands.clean_content):\r\n if len(args) < 2:\r\n await ctx.send('Add takes 2+ parameters')\r\n return\r\n\r\n tgt_role = args[-1]\r\n if tgt_role.startswith('@'):\r\n tgt_role = tgt_role[1:]\r\n if not discord.utils.get(ctx.guild.roles, name=tgt_role):\r\n await ctx.send(f'Role {args[-1]} does not exist')\r\n return\r\n\r\n roles = list(args[:-1])\r\n\r\n for index, role in enumerate(roles):\r\n if role.startswith('@'):\r\n role = role[1:]\r\n roles[index] = role\r\n print(role)\r\n if not discord.utils.get(ctx.guild.roles, name=role):\r\n await ctx.send(f'Role {role} does not exist')\r\n return\r\n\r\n docid = db.insert({'guild': ctx.guild.id, 'roles': roles, 'target': tgt_role})\r\n await ctx.send(f'Rule {docid} created')\r\n await update_roles(ctx.guild)\r\n await check_guild_rules(ctx.guild)",
"async def add_role(\n client,\n event,\n user: ('user', 'User to add role to'),\n role: ('role', 'The role to give'),\n):\n # Check for permissions\n if not event.user_permissions.can_manage_roles:\n abort('You need `manage roles` permission to invoke this command.')\n \n if not event.guild.cached_permissions_for(client).can_manage_roles:\n abort('I need `manage roles` permission to execute this command.')\n \n if not event.user.has_higher_role_than(role):\n abort('You must have higher role than the role you are trying to give.')\n \n if not client.has_higher_role_than(role):\n abort('I must have higher role than the role you are trying to give.')\n \n # Using `.copy_to` on forms works as well.\n return ADD_ROLE_FORM.copy_with(\n title = f'Add role {role.name} to {user.full_name}',\n custom_id = f'add_role.{user.id}.{role.id}',\n )",
"def grant_role(self, role, principal_ids):",
"async def addroleall(self, ctx, role: discord.Role):\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to add this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not assign *{role}* role using this command.\",\n description=\"For more information run ```.help addroleall```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n for i in ctx.guild.members:\n if not i.bot:\n await i.add_roles(role)\n\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been added to **{len(ctx.guild.members)}** members!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )",
"def add_role():\n role = roles.find_or_create_role(request.values.get('role_name', ''))\n user = users.get_or_404(int(request.values.get('user_id', '')))\n if not users.add_role_to_user(user, role):\n return {}, 500\n return {}",
"def test_edit_role_add_new_role(self):\n # Add node with controller role\n Nodes().nodes_discovered[0].checkbox.click()\n RolesPanel().controller.click()\n Nodes().apply_changes.click()\n time.sleep(1)\n # Add cinder role\n with Nodes() as n:\n n.nodes[0].checkbox.click()\n n.edit_roles.click()\n RolesPanel().cinder.click()\n Nodes().apply_changes.click()\n time.sleep(1)\n with Nodes() as n:\n self.assertIn(ROLE_CONTROLLER, n.nodes[0].roles.text,\n 'Controller role')\n self.assertIn(ROLE_CINDER, n.nodes[0].roles.text,\n 'Cinder role')",
"def test_adding_node_single_role(self):\n name = Nodes().nodes_discovered[0].name.text\n Nodes().nodes_discovered[0].checkbox.click()\n RolesPanel().controller.click()\n Nodes().apply_changes.click()\n time.sleep(1)\n with Nodes() as n:\n self.assertTrue(n.env_summary.is_displayed())\n self.assertEqual(len(n.nodes), 1, 'Nodes amount')\n self.assertEqual(n.nodes[0].name.text, name, 'Node name')\n self.assertIn(ROLE_CONTROLLER, n.nodes[0].roles.text, 'Node role')",
"def add_users(caller, role, *users):\r\n _check_caller_authority(caller, role)\r\n role.add_users(*users)",
"def createNodeRoleRelation(_session, _segment, _const):\n return createNode(_session, _segment, _const, \"role_relation\")",
"def addRole(self, role):\n self._client.addRole(role)",
"def _add_users_to_role(self, users, rolename):\n role = Role.objects.get(name=rolename, course_id=self.course.id)\n for user in users:\n role.users.add(user)",
"def roles(*args):\n env.salt_roles.extend(args)",
"def roles(self, roles):\n\n self._roles = roles",
"def roles(self, roles):\n\n self._roles = roles",
"def roles(self, roles):\n\n self._roles = roles",
"async def addrole(self, ctx, user: discord.Member=None, *, role=None):\r\n if user is None or role is None:\r\n return await ctx.send(\"Incorrect usage! *;addrole @user role*\")\r\n r = discord.utils.get(ctx.guild.roles, name=str(role))\r\n if r is None:\r\n return await ctx.send(f'{role} was not found')\r\n try:\r\n await user.add_roles(r)\r\n return await ctx.send(f\"**{str(user)}** has been given the role of **{role}** {self.bot.get_emoji(470063310386233344)}\")\r\n except discord.Forbidden:\r\n return await ctx.send(\"Bot does not have enough permissions to give roles.\")",
"def role_remove(role, nodes, node):\n role_manager = get_role_manager()\n node += nodes\n nodes, node_vars, host_vars, extra_args = _split_vars(node, [], [], [])\n\n if not nodes:\n raise ArgumentError('No nodes informed')\n\n if type(nodes) is list:\n d = defaultdict(list)\n for n in nodes:\n hosts = role_manager.get_role_node_hosts(role, n)\n if not hosts:\n raise NodeRoleError(n, role)\n for hname in hosts:\n d[hname].append(n)\n nodes = defaultdict_to_dict(d)\n else:\n nodes = nodes\n\n if not nodes:\n raise ValueError(f\"No nodes to remove from role {role}\")\n\n result = role_manager.remove_role(role, nodes)\n print(f\"{len(result)} nodes were removed from {role}: {', '.join(sorted(result))}\")\n return 0",
"def manage_addRole(self, role_id, title, description, RESPONSE=None,\n REQUEST=None):\n if not role_id:\n message = 'Please+provide+a+Role+ID'\n else:\n self.addRole(role_id, title, description)\n message = 'Role+added'\n\n if RESPONSE is not None:\n RESPONSE.redirect('%s/manage_roles?manage_tabs_message=%s' %\n (self.absolute_url(), message))",
"async def command_assign_role(self, context, role: str):\n try:\n await context.author.add_roles(discord.utils.get(\n context.guild.roles, name=role))\n await context.message.add_reaction('👍')\n except Exception as e:\n await context.message.add_reaction('👎')\n await context.send('Role could not be assigned')\n print(f'Errored in command_assign_role.', e)"
] |
[
"0.68646795",
"0.6732058",
"0.6718173",
"0.63641965",
"0.6343155",
"0.6333133",
"0.63255256",
"0.6093537",
"0.60662687",
"0.6046124",
"0.5994745",
"0.59777933",
"0.5960202",
"0.595892",
"0.58962196",
"0.58676285",
"0.57872",
"0.57861674",
"0.5776835",
"0.5765455",
"0.5741388",
"0.57406235",
"0.5732833",
"0.5718746",
"0.5718746",
"0.5718746",
"0.5707558",
"0.56732047",
"0.56576735",
"0.5644056"
] |
0.7983592
|
0
|
Perform an group action at a set of nodes. The ROLE argument specify the role which the action will be performed.
|
def role_action(role, action, nodes, node, node_vars, host_vars, extra):
role_manager = get_role_manager()
node += nodes
nodes, node_vars, host_vars, extra_args = _split_vars(
node, node_vars, host_vars, extra)
if not nodes:
nodes = role_manager.get_all_role_nodes_hosts(role)
else:
if type(nodes) is list:
d = defaultdict(list)
for n in nodes:
hosts = role_manager.get_role_node_hosts(role, n)
if not hosts:
raise NodeRoleError(n, role)
for hname in hosts:
d[hname].append(n)
nodes = defaultdict_to_dict(d)
else:
nodes = nodes
all_values = [n for v in nodes.values() for n in v]
if not all_values:
raise ValueError(f"No nodes to perform the action '{action} of role {role}")
result = role_manager.perform_action(
role, action, hosts_node_map=nodes, host_vars=host_vars,
node_vars=node_vars, extra_args=extra_args)
if not result.ok:
logger.error(f"Playbook for action {action} of role {role} did not "
f"executed successfully...")
return 1
print(f"Action {action} from role {role} was successfully performed!")
return 0
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_adding_node_multiple_roles(self):\n Nodes().nodes_discovered[0].checkbox.click()\n with RolesPanel() as r:\n r.controller.click()\n r.cinder.click()\n r.ceph_osd.click()\n Nodes().apply_changes.click()\n time.sleep(1)\n with Nodes() as n:\n self.assertTrue(n.env_summary.is_displayed())\n self.assertIn(ROLE_CONTROLLER, n.nodes[0].roles.text,\n 'Node first role')\n self.assertIn(ROLE_CINDER, n.nodes[0].roles.text,\n 'Node second role')\n self.assertIn(ROLE_CEPH, n.nodes[0].roles.text,\n 'Node third role')",
"def execute(self, context):\n global array_nodes\n sub_tree = bpy.data.node_groups.new('Armory group', 'ArmGroupTree') # creating subtree\n sub_tree.use_fake_user = True\n group_node = array_nodes[self.node_index]\n group_node.group_tree = sub_tree # link subtree to group node\n sub_tree.nodes.new('LNGroupInputsNode').location = (-250, 0) # create node for putting data into subtree\n sub_tree.nodes.new('LNGroupOutputsNode').location = (250, 0) # create node for getting data from subtree\n context.space_data.path.append(sub_tree, node=group_node)\n sub_tree.group_node_name = group_node.name\n return {'FINISHED'}",
"def add_node_group_acl(session, node_id=None, group_id=None, allow_read=False,\n allow_install=False, allow_uninstall=False, allow_reboot=False,\n allow_schedule=False, allow_wol=False, allow_snapshot_creation=False,\n allow_snapshot_removal=False, allow_snapshot_revert=False,\n allow_tag_creation=False, allow_tag_removal=False,\n date_created=datetime.now(), date_modified=datetime.now(),\n username='system_user'\n ):\n session = validate_session(session)\n date_created=datetime.now()\n group_for_node_exists = session.query(NodeGroupAccess).\\\n filter_by(group_id=group_id).\\\n filter_by(node_id=node_id).first()\n if node_id and group_id and not group_for_node_exists:\n try:\n add_acl = NodeGroupAccess(node_id, group_id=group_id,\n allow_read=allow_read, allow_install=allow_install,\n allow_uninstall=allow_uninstall, allow_reboot=allow_reboot,\n allow_schedule=allow_schedule, allow_wol=allow_wol,\n allow_snapshot_creation=allow_snapshot_creation,\n allow_snapshot_removal=allow_snapshot_removal,\n allow_snapshot_revert=allow_snapshot_revert,\n allow_tag_creation=allow_tag_creation,\n allow_tag_removal=allow_tag_removal,\n date_created=date_created, date_modified=date_modified\n )\n session.add(add_acl)\n session.commit()\n return({\n 'pass': True,\n 'message': 'Group ACL %s added for Node %s' % \\\n (group_id, node_id)\n })\n except Exception as e:\n session.rollback()\n return({\n 'pass': False,\n 'message': 'Failed to add ACL for Group %s on Node %s:%s' % \\\n (group_id, node_id, e)\n })",
"def test_group_by_roles(self):\n self._test_group_by('Roles', [1, 5])",
"def dictGetHierarchy_granted_via_role(self, node=None):\n\n user_name = f\"user_{getuid()}\"\n role_name = f\"role_{getuid()}\"\n\n if node is None:\n node = self.context.node\n\n with user(node, f\"{user_name}\"), role(node, f\"{role_name}\"):\n\n with When(\"I grant the role to the user\"):\n node.query(f\"GRANT {role_name} TO {user_name}\")\n\n Suite(run=dictGetHierarchy_check,\n examples=Examples(\"privilege on grant_target_name user_name\", [\n tuple(list(row)+[role_name,user_name]) for row in dictGetHierarchy_check.examples\n ], args=Args(name=\"check privilege={privilege}\", format_name=True)))",
"def grant_role(self, role, principal_ids):",
"def changeRole(self, node, role):",
"def cli_run_group(log_level, endpoint, email, password, org_name, grp_name):\n logging.basicConfig(\n level=log_level,\n format='%(levelname)s: %(message)s',\n )\n knex = Knex(endpoint)\n User(knex, email, password).login()\n org = Organization(knex, org_name).get()\n logger.info(f'Loaded {org}')\n grp = org.sample_group(grp_name).get()\n logger.info(f'Loaded {grp}')\n auto_metadata(list(grp.get_samples()), grp)\n logger.info(f'Processed Metadata')\n run_group(grp, lambda x: click.echo(x, err=True))",
"def update_node_group_acl(session, node_id=None, group_id=None,\n allow_install=False, allow_uninstall=False, allow_reboot=False,\n allow_schedule=False, allow_wol=False, allow_snapshot_creation=False,\n allow_snapshot_removal=False, allow_snapshot_revert=False,\n allow_tag_creation=False, allow_tag_removal=False, allow_read=False,\n date_modified=datetime.now(), username='system_user'\n ):\n session = validate_session(session)\n group = None\n if group_id and node_id:\n group = session.query(NodeGroupAccess).\\\n filter(NodeGroupAccess.group_id == group_id).\\\n filter(NodeGroupAccess.node_id == node_id).first()\n if group:\n try:\n group.allow_install = allow_install\n group.allow_uninstall = allow_uninstall\n group.allow_reboot = allow_reboot\n group.allow_schedule = allow_schedule\n group.allow_wol = allow_wol\n group.allow_snapshot_creation = allow_snapshot_creation\n group.allow_snapshot_removal = allow_snapshot_removal\n group.allow_snapshot_revert = allow_snapshot_revert\n group.allow_tag_creation = allow_tag_creation\n group.allow_tag_removal = allow_tag_removal\n group.allow_read = allow_read\n group.date_modified = date_modified\n session.commit()\n return({\n 'pass': True,\n 'message': 'ACL for Group %s was modified for Node %s' % \\\n (group_id, node_id)\n })\n except Exception as e:\n session.rollback()\n return({\n 'pass': False,\n 'message': 'Failed to modify ACL for Group %s on Node %s' % \\\n (group_id, node_id)\n })\n else:\n return({\n 'pass': False,\n 'message': 'Invalid group_id %s and or node_id %s' % \\\n (group_id, node_id)\n })",
"def create_groups(self, role):\n security_group_names = self._get_all_group_names()\n\n cluster_group_name = self.get_cluster_group_name()\n if not cluster_group_name in security_group_names:\n self.ec2Connection.create_security_group(cluster_group_name, \"Hadoop cluster (%s)\" % (self.name))\n self.ec2Connection.authorize_security_group(cluster_group_name, cluster_group_name)\n # Allow SSH from anywhere\n self.ec2Connection.authorize_security_group(cluster_group_name, ip_protocol=\"tcp\", from_port=22, to_port=22, cidr_ip=\"0.0.0.0/0\")\n\n role_group_name = self.group_name_for_role(role)\n if not role_group_name in security_group_names:\n self.ec2Connection.create_security_group(role_group_name, \"Hadoop %s (%s)\" % (role, self.name))",
"def role_add(role, nodes, node, node_vars, host_vars, extra):\n role_manager = get_role_manager()\n node += nodes\n nodes, node_vars, host_vars, extra_args = _split_vars(\n node, node_vars, host_vars, extra)\n if not nodes:\n raise ArgumentError('No nodes informed')\n\n added_nodes = role_manager.add_role(\n role, hosts_node_map=nodes, host_vars=host_vars,\n node_vars=node_vars, extra_args=extra_args)\n\n print(f\"{len(added_nodes)} nodes were added to role {role}: {', '.join(sorted(added_nodes))}\")\n return 0",
"def set_role(userid, role, group, request=None):",
"def __call__(self, *args: FParams.args, **kwargs: FParams.kwargs) -> DAGNode:\n return self._create_task_group(TaskGroup, *args, **kwargs)",
"def pgroup(pynodes, world = False, re = \"\", suffix = \"\"):\n # Initiate return variable\n output = []\n # Filter supplied pynodes, if equal to 0 then return false\n if len(pynodes) == 0:\n return output\n # Group created on each object transformation\n if not world:\n for o in pynodes:\n # Name var\n the_name = o.name()\n # Replace object name if any\n if re != \"\":\n the_name = the_name.replace(re, suffix)\n else:\n the_name = the_name + suffix\n # Create group for each specified PyNode\n grp = pm.group(empty = True, name = the_name)\n # Align the pgroup to each PyNode transformation\n transformation.align(grp, o, mode = 'transform')\n # Get object parent\n parent = o.getParent()\n # If the object have parent,\n # Parent the group to object parent\n if parent:\n grp.setParent(parent)\n # Parent the object to pgroup\n o.setParent(grp)\n # Collect group to output\n output.append(grp)\n\n else:\n # Name var\n the_name = pynodes[0].name()\n # Replace object name if any\n if re != \"\":\n the_name = the_name.replace(re, suffix)\n else:\n the_name = the_name + suffix\n # Create single group\n grp = pm.group(empty = True, name = the_name)\n # Collect group to output\n output.append(grp)\n # Parent all specified PyNodes to pgroup\n pm.parent(pynodes, grp)\n\n return output",
"def add_users_to_groups(output=True):\n\n for group in DEFAULT_GROUPS:\n user = User.objects.get(username=group)\n role_title = Group.objects.get(name=group)\n user.groups.add(role_title)",
"def grant_grp_access ( ec2_conn, incoming_grps, tgt_grp, port, protocol = 'tcp' ) :\n for grp in incoming_grps :\n if not does_grp_rule_exist( tgt_grp, grp, port, port, protocol ) :\n tgt_grp.authorize( ip_protocol = protocol,\n from_port = port,\n to_port = port,\n src_group = grp )\n ec2_conn.authorize_security_group_egress( group_id = grp.id,\n ip_protocol = protocol,\n from_port = port,\n to_port = port,\n src_group_id = tgt_grp.id )",
"def execute(self, context):\n\n # go to subtree, select all except input and output groups and mark nodes to be copied\n group_node = context.active_node\n sub_tree = group_node.group_tree\n\n if len(self.conflicts) > 0:\n self._resolve_conflicts(sub_tree, group_node.get_tree())\n\n bpy.ops.arm.edit_group_tree(node_index=group_node.get_id_str())\n [setattr(n, 'select', False) for n in sub_tree.nodes]\n group_nodes_filter = filter(lambda n: n.bl_idname not in {'LNGroupInputsNode', 'LNGroupOutputsNode'}, sub_tree.nodes)\n for node in group_nodes_filter:\n node.select = True\n node['sub_node_name'] = node.name # this will be copied within the nodes\n\n # the attribute should be empty in destination tree\n tree = context.space_data.path[-2].node_tree\n for node in tree.nodes:\n if 'sub_node_name' in node:\n del node['sub_node_name']\n\n # Frames can't be just copied because they do not have absolute location, but they can be recreated\n frame_names = {n.name for n in sub_tree.nodes if n.select and n.bl_idname == 'NodeFrame'}\n [setattr(n, 'select', False) for n in sub_tree.nodes if n.bl_idname == 'NodeFrame']\n\n if any(n for n in sub_tree.nodes if n.select): # if no selection copy operator will raise error\n # copy and past nodes into group tree\n bpy.ops.node.clipboard_copy()\n context.space_data.path.pop()\n bpy.ops.node.clipboard_paste() # this will deselect all and select only pasted nodes\n\n # move nodes in group node center\n tree_select_nodes = [n for n in tree.nodes if n.select]\n center = reduce(lambda v1, v2: v1 + v2,\n [Vector(ArmLogicTreeNode.absolute_location(n)) for n in tree_select_nodes]) / len(tree_select_nodes)\n [setattr(n, 'location', n.location - (center - group_node.location)) for n in tree_select_nodes]\n\n # recreate frames\n node_name_mapping = {n['sub_node_name']: n.name for n in tree.nodes if 'sub_node_name' in n}\n ArmAddGroupTreeFromSelected.recreate_frames(sub_tree, tree, frame_names, node_name_mapping)\n else:\n context.space_data.path.pop() # should exit from subtree anywhere\n\n # delete group node\n tree.nodes.remove(group_node)\n for node in tree.nodes:\n if 'sub_node_name' in node:\n del node['sub_node_name']\n\n tree.update()\n\n return {'FINISHED'}",
"def request_access_to_groups(self, ceph):\n for ceph_group in (\"volumes\", \"images\", \"vms\"):\n ceph.request_access_to_group(\n name=ceph_group,\n object_prefix_permissions={\"class-read\": [\"rbd_children\"]},\n permission=\"rwx\",\n )",
"def grp(self, grpNode):\n\t\tself._grp = grpNode",
"def delete_groups(self, roles):\n security_group_names = self._get_all_group_names()\n\n for role in roles:\n role_group_name = self.group_name_for_role(role)\n if role_group_name in security_group_names:\n self.ec2Connection.delete_security_group(role_group_name)\n cluster_group_name = self.get_cluster_group_name()\n if cluster_group_name in security_group_names:\n self.ec2Connection.delete_security_group(cluster_group_name)",
"async def command_rolecall(self, context):\n print(self._fetch_category_roles(context))\n print(self._fetch_category_roles(context, COSMETIC_CATEGORY_NAME))",
"def can_set_role(userid, role, group):",
"async def roles(self, ctx, *, role: Fuzzy[Selfrole] = None):\n\n if role:\n await self._toggle_role(ctx, role)\n else:\n await self._list_all_roles(ctx)",
"async def toggle_group(role_title, ctx):\n if ctx.guild is None:\n return 'whisper'\n server_roles = ctx.guild.roles\n #print(\"Server roles\", server_roles)\n user_roles = ctx.author.roles\n #print(\"Author roles\", user_roles)\n\n role_id = \"\"\n\n #Finding the role on the server. If it doesn't exist, we'll let the user know.\n found_role = False\n role_id_index = ''\n for i in server_roles:\n #print(i.name.lower())\n if i.name.lower() == role_title.lower(): #.lower is for consistency\n role_id = i\n found_role = True\n try:\n role_id_index = user_roles.index(i)\n except:\n pass\n\n if not found_role:\n return \"role not found\"\n else:\n if role_id in user_roles:\n # User has this role, need to remove it.\n user_roles.pop(role_id_index)\n await ctx.author.edit(roles=user_roles, reason=\"Automated role removal requested by user\")\n return \"removed\"\n else:\n # User does not have this role\n user_roles.append(role_id)\n await ctx.author.edit(roles=user_roles, reason=\"Automated role add requested by user\")\n return \"added\"",
"def group_list(request, format=None):\n if request.method == 'GET':\n snippets = RoleList.objects.all()\n serializer = GroupSerializer(snippets, many=True)\n return Response(serializer.data)\n elif request.method == 'POST':\n if not request.user.has_perm('ops.change_group'):\n return Response(status=status.HTTP_403_FORBIDDEN)\n serializer = GroupSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n recordAssets.delay(user=str(request.user),\n content=\"添加用户组:{group_name}\".format(group_name=request.data.get(\"name\")), type=\"group\",\n id=serializer.data.get('id'))\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def test_adding_node_single_role(self):\n name = Nodes().nodes_discovered[0].name.text\n Nodes().nodes_discovered[0].checkbox.click()\n RolesPanel().controller.click()\n Nodes().apply_changes.click()\n time.sleep(1)\n with Nodes() as n:\n self.assertTrue(n.env_summary.is_displayed())\n self.assertEqual(len(n.nodes), 1, 'Nodes amount')\n self.assertEqual(n.nodes[0].name.text, name, 'Node name')\n self.assertIn(ROLE_CONTROLLER, n.nodes[0].roles.text, 'Node role')",
"def menu_grupos(self):\r\n print('=== Gestão de grupos ===')\r\n print('\\nEscolha uma das opções abaixo.')\r\n print('1. Listar grupo')\r\n print('2. Criar grupo')\r\n print('3. Remover grupo')\r\n print('4. Adicionar contato a grupo')\r\n print('5. Remover contato de grupo')\r\n print('6. Voltar')",
"def spawn_clusters(trajgroup, assignment_file, cluspath_dir):\n traj_inds, totalclusters = load_clusteringresults(assignment_file)\n\n all_clusters = []\n\n for i in range(0, totalclusters):\n # Get cluster number and pick out member trajectories\n cluster_num = i + 1\n trajlist = [trajgroup.trajectories[int(j)] for j in traj_inds[i]]\n\n # Get the cluster path\n cluspath_fname = ('C' + str(cluster_num) + '_' +\n str(totalclusters) + 'mean.tdump')\n cluspath_file = os.path.join(cluspath_dir, cluspath_fname)\n data, pathdata, header, datetime, _ = load_hysplitfile(cluspath_file)\n\n # Make sure longitudes are -180 to 180\n pathdata[:, 1] = np.where(pathdata[:, 1] > 180.0,\n pathdata[:, 1] - 360.0,\n pathdata[:, 1])\n\n # Make cluster\n clusterobj = Cluster(data, pathdata, datetime, header, trajlist,\n cluster_num)\n\n all_clusters.append(clusterobj)\n\n clustergroup = ClusterGroup(all_clusters)\n\n return clustergroup",
"def create_groups():\n groups = [\"iDRAC-Administrators\", \"iDRAC-Operators\", \"iDRAC-Readonly\"]\n group_priviledges = [\"0x000001ff\", \"0x000000f9\", \"0x00000001\"]\n for host in online_hosts:\n for index in [1,2,3]:\n print index,\" \", groups[index-1]\n with settings(warn_only=True):\n\n result1 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupName \"+groups[index-1])\n if result1.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupName failed \")\n\n result2 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupDomain corp.inmobi.com\")\n if result2.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupDomain failed \")\n\n result3 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupPrivilege \"+ group_priviledges[index-1])\n if result3.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupPriviledge failed \")",
"async def group(ctx, *, new_group=None):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n # Can't be group-less\n if new_group is None:\n new_group = random.choice(changeable_groups)\n new_group = new_group.lower()\n author = ctx.message.author\n member_roles = author.roles\n server_roles = ctx.message.server.roles\n\n member_allowed = discord.utils.find(lambda r: r.name.lower() == required_role, member_roles)\n\n if not member_allowed:\n need_citizen = \"You must be a member of the {0} role to join a color group\"\n await amor_manager.say(need_citizen.format(required_role.title()))\n return\n\n if new_group in changeable_groups:\n # Remove the old group the user was in\n new_roles = [r for r in member_roles if not r.name.lower() in changeable_groups]\n # Get the proper object for the user's new group\n role = discord.utils.find(lambda r: r.name.lower() == new_group, server_roles)\n if role is not None:\n new_roles.append(role)\n await(amor_manager.replace_roles(author, *new_roles))\n await amor_manager.say('{0} moved to group {1}'.format(author.name, new_group))\n else:\n suggest = random.choice(changeable_groups)\n cant_join = \"`{0}` is not a color group you're allowed to join. Why not try `{1}`\"\n await amor_manager.say(cant_join.format(new_group, suggest))"
] |
[
"0.542813",
"0.54093146",
"0.5217608",
"0.514884",
"0.51022536",
"0.50641006",
"0.5063021",
"0.49907935",
"0.4959319",
"0.49530065",
"0.4937757",
"0.49216166",
"0.4792276",
"0.4791709",
"0.47832346",
"0.47490916",
"0.47359744",
"0.47330654",
"0.4713322",
"0.46839148",
"0.46478587",
"0.4639862",
"0.46322727",
"0.46248925",
"0.46031126",
"0.45905983",
"0.45782554",
"0.45521033",
"0.4540935",
"0.45345825"
] |
0.6100199
|
0
|
Print roles in role path. The ROLE argument filter specific roles.
|
def role_list(role, detailed, indent, quiet):
if quiet and detailed:
raise ValueError(f"Options `detailed` and `quiet` are mutually exclusive")
role_manager = get_role_manager()
roles = role_manager.roles if not role else {
rname: r
for rname, r in role_manager.roles.items() if rname in role
}
for role_name in sorted(roles.keys()):
if quiet:
print(role_name)
continue
role_info = roles[role_name]
if not detailed:
print(f"* name: {role_name}")
print(f" Has {len(role_info.actions)} actions and "
f"{len(role_info.hosts)} hosts defined")
print(f" actions: {', '.join(sorted(role_info.actions.keys()))}")
print(f" hosts: {', '.join(sorted(role_info.hosts))}")
else:
print(f"{'-' * 20} ROLE: `{role_name}` {'-' * 20}")
print(f"{yaml.dump(asdict(role_info), sort_keys=True, indent=indent)}")
print(f"{'-' * 80}")
print()
if not quiet:
print(f"Listed {len(roles)} roles")
return 0
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_roles(role):",
"async def command_rolecall(self, context):\n print(self._fetch_category_roles(context))\n print(self._fetch_category_roles(context, COSMETIC_CATEGORY_NAME))",
"def test_list_roles(self):\n pass",
"async def roles(self, ctx, *, role: Fuzzy[Selfrole] = None):\n\n if role:\n await self._toggle_role(ctx, role)\n else:\n await self._list_all_roles(ctx)",
"def present_roles(self):\n print(\"User\" + str(self.unique_id) + \": roles=\")\n for group in self._roles:\n print(\"\\tGroup\" + str(group) + \" -> [\"\n + self.get_role_from_type(group, roles_influence) + \", \"\n + self.get_role_from_type(group, roles_neighbors) + \", \"\n + self.get_role_from_type(group, roles_activities) + \", \"\n + self.get_role_from_type(group, roles_attitude) + \"]\")\n print('')",
"def main_role_list(\n client: CitusCloudMgmt,\n **opts: tp.Any\n) -> None:\n\n roles = client.list_roles(opts[\"formation\"])\n click.echo(\n tabulate.tabulate(\n [{\"Name\": i.name, \"Id\": i.id_} for i in roles],\n headers=\"keys\",\n ),\n )",
"def test_list_namespaced_role(self):\n pass",
"def list(self, **kwargs):\n # TODO(adriant): Look up user by name/id\n url = '/openstack/users/%s/roles' % kwargs['user']\n return self._list(url, 'roles')",
"def list(self, **kwargs):\n params = {}\n url = '/openstack/roles?%(params)s' % {\n 'params': parse.urlencode(params, True)\n }\n return self._list(url, 'roles')",
"def test_list_role(self):\n pass",
"def list_role(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_role\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/roles'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1RoleList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def process_roles_path(pathname, ctx):\n if not os.path.isdir(pathname) or os.path.islink(pathname):\n return\n for item_name, role_path in os_listdir(pathname):\n if item_name.startswith(\".git\"):\n continue\n if ctx.is_role(role_path):\n ctx.found_role_name = item_name\n process_role(role_path, True, ctx)\n elif ctx.in_collection_integration_tests:\n process_ansible_yml_path(role_path, ctx)\n else:\n logging.warning(f\"Unexpected item {role_path} - not a role\")",
"def test_ipam_roles_list(self):\n pass",
"def roles_str(person: Member, roles: commands.Greedy[Role]) -> str:\n message = \"role\" if len(roles) == 1 else \"roles\"\n roleIds = [role.name for role in roles]\n\n return f\"{message} for {person}: {roleIds}\"",
"async def list_roles(self, ctx: commands.Context):\n all_roles = await self.config.guild(ctx.guild).autoroles()\n maybe_not_found = []\n message = \"\"\n for role in all_roles:\n fetched_role = ctx.guild.get_role(role)\n if not fetched_role:\n maybe_not_found.append(role)\n continue\n message += \"- {name} (`{id}`).\\n\".format(name=fetched_role.name, id=fetched_role.id)\n if maybe_not_found:\n clean_list = list(set(all_roles) - set(maybe_not_found))\n await self.config.guild(ctx.guild).autoroles.set(clean_list)\n message += \"\\nSome roles has been removed since I was unable to find them.\"\n if message:\n for line in pagify(message):\n await ctx.send(line)\n else:\n await ctx.send(\"No role has been added.\")",
"def getRoles(self):",
"def test_list_role_for_all_namespaces(self):\n pass",
"def roles(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"roles\")",
"def get_roles_output(filter: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetRolesResult]:\n ...",
"def getRoles():\n return jsonify(listRoles(ROLES_DIR))",
"def test03_perm_roles(self):\n print_ln('test16_perm_roles')\n \n try:\n pList = review.find_perms(Perm(obj_name='py-obj*', op_name='*'))\n for perm in pList: \n print_ln(\"Role Perm obj name=\" + perm.obj_name + ', op=' + perm.op_name + ', id=' + perm.obj_id)\n rList = review.perm_roles(perm)\n for role in rList:\n print_ln(\"Assigned role=\" + role, 1)\n except Exception as e:\n self.fail('test16_perm_roles failed, exception=' + e.msg)",
"def get_role_choices(my_role):\n roles = get_all_roles(my_role)\n for r in roles :\n yield ( r.id, u'%s (%s)' % (r.description, r.name) )",
"def listRoleAccess(self, role):\n return self._client.listRoleAccess(role)",
"def list_roles(self, hints):\n raise exception.NotImplemented() # pragma: no cover",
"def getRoleInfo(self, role):",
"def get_roles():\n check_admin()\n roles = Role.query.all()\n\n return render_template('admin/roles/roles.html', roles=roles, title=\"Roles\")",
"def roles(self):\n params = {\n \"f\" : \"json\"\n }\n uURL = self._url + \"/roles\"\n return self._con.get(path=uURL, params=params)",
"def list_roles():\n\tsession = get_session()\n\tresponse = session.get(\"{url}/api/roles\".format(url=get_registry_url()))\n\treturn response.json()[\"results\"]",
"def role_command():",
"async def roles(self, ctx):\n\n pass"
] |
[
"0.6430919",
"0.633462",
"0.61451775",
"0.60598814",
"0.60481584",
"0.5967613",
"0.59049743",
"0.58852696",
"0.5871352",
"0.5838742",
"0.58185387",
"0.5815103",
"0.57983583",
"0.5796137",
"0.5789229",
"0.57704955",
"0.5754722",
"0.5742902",
"0.57375866",
"0.5691435",
"0.5690403",
"0.5646068",
"0.5640352",
"0.5614902",
"0.5601165",
"0.5585905",
"0.55821306",
"0.552848",
"0.55165213",
"0.54911005"
] |
0.6729608
|
0
|
Explicitly set identity and claims for jwt.
|
def add_claims_to_access_token(identity):
if identity == 'admin':
roles = 'admin'
else:
roles = 'peasant'
now = datetime.utcnow()
return {
'exp': now + current_app.config['JWT_EXPIRES'],
'iat': now,
'nbf': now,
'sub': identity,
'roles': roles
}
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_claims_to_jwt(identity):\n user = UserModel.find_by_id(identity)\n if user.is_admin:\n return {\"is_admin\": True}\n return {\"is_admin\": False}",
"def remember(self, response, request, identity):\n extra_claims = identity.as_dict()\n userid = extra_claims.pop('userid')\n claims_set = self.create_claims_set(userid, extra_claims)\n token = self.encode_jwt(claims_set)\n response.headers['Authorization'] = '%s %s' % (self.auth_header_prefix, token)",
"def _set_jwt_cookies(response, cookie_settings, jwt_header_and_payload, jwt_signature):\n cookie_settings['httponly'] = None\n response.set_cookie(\n jwt_cookies.jwt_cookie_header_payload_name(),\n jwt_header_and_payload,\n **cookie_settings\n )\n\n cookie_settings['httponly'] = True\n response.set_cookie(\n jwt_cookies.jwt_cookie_signature_name(),\n jwt_signature,\n **cookie_settings\n )",
"def set_user_jwt(auth_key, profile):\n try:\n jwt = JWT(jwt=auth_key, key=jwkeys)\n claims = json_loads(jwt.claims)\n except:\n # This is called if the authorization failed (auth key has been forged)\n # We don't really care about providing malicious requests with debug\n # info so this is just a really basic error message\n return render_error('rejected.'), 403\n\n # JWT has a couple of fields (claims) that we care about:\n # - iss: Issuer - Basically provider of open id (google, SE, etc.)\n # - sub: Subject - Who is auth'd by this.\n # Together we will use these to store an auth method.\n issuer = claims.get('iss')\n subject = claims.get('sub')\n\n # Error handle against malformed keys. This should never really happen and\n # the error is not shown to the user so doesn't need to be user-friendly\n if not issuer or not subject:\n return render_error('malformed. Missing iss or sub'), 400\n\n # If we are here that means we have validated a valid login attempt. Now we\n # will delegate to another method\n token = UserJWTToken(identity=subject, issuer=issuer)\n return get_or_set_user(jwt_token=token, profile=profile)",
"def set_jwt_handlers(jwt):\n\n @jwt.authentication_handler\n def authenticate(username, password):\n pass\n\n @jwt.jwt_error_handler\n def error_handler(error):\n return 'Auth Failed: {}'.format(error.description), 400\n\n @jwt.jwt_payload_handler\n def make_payload(user):\n return {\n 'user_id': str(user.id),\n 'exp': (datetime.datetime.utcnow() +\n current_app.config['JWT_EXPIRATION_DELTA']).isoformat()\n }\n\n @jwt.request_handler\n def load_user(payload):\n pass",
"def claims(self, claims):\n\n self._claims = claims",
"def encode_jwt(self, claims_set):\n key = self.master_secret\n private_key = self.private_key\n if self.private_key_file is not None:\n with open(self.private_key_file, 'r') as rsa_priv_file:\n private_key = rsa_priv_file.read()\n if private_key is not None:\n key = private_key\n algorithm = self.algorithm\n token = jwt.encode(claims_set, key, algorithm)\n\n if PY3:\n token = token.decode(encoding='UTF-8')\n return token",
"def __init__(\n self,\n uri,\n audience,\n get_token,\n **kwargs\n ):\n super(JWTTokenAuth, self).__init__(uri, audience, kwargs.pop(\"token_type\", TOKEN_TYPE_JWT), get_token)\n self.get_token = get_token",
"def setIdentity(self) -> None:\n ...",
"def auth_token_provider_default_claims(self, auth_token_provider_default_claims):\n\n self._auth_token_provider_default_claims = auth_token_provider_default_claims",
"def create_claims_set(self, userid, extra_claims=None):\n expiration_delta = self.expiration_delta\n issuer = self.issuer\n userid_claim = self.userid_claim\n claims_set = {userid_claim: userid}\n if expiration_delta is not None:\n claims_set['exp'] = datetime.datetime.utcnow() + expiration_delta\n if issuer is not None:\n claims_set['iss'] = issuer\n if extra_claims is not None:\n claims_set.update(extra_claims)\n return claims_set",
"def __init__(__self__, *,\n always_include_in_token: Optional[pulumi.Input[bool]] = None,\n auth_server_id: Optional[pulumi.Input[str]] = None,\n claim_type: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n status: Optional[pulumi.Input[str]] = None,\n value: Optional[pulumi.Input[str]] = None,\n value_type: Optional[pulumi.Input[str]] = None):\n if always_include_in_token is not None:\n pulumi.set(__self__, \"always_include_in_token\", always_include_in_token)\n if auth_server_id is not None:\n pulumi.set(__self__, \"auth_server_id\", auth_server_id)\n if claim_type is not None:\n pulumi.set(__self__, \"claim_type\", claim_type)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if scopes is not None:\n pulumi.set(__self__, \"scopes\", scopes)\n if status is not None:\n pulumi.set(__self__, \"status\", status)\n if value is not None:\n pulumi.set(__self__, \"value\", value)\n if value_type is not None:\n pulumi.set(__self__, \"value_type\", value_type)",
"def create_fake_JWT_token(userEmail):\n pass",
"def __init__(__self__, *,\n auth_type: pulumi.Input[str],\n client_id: pulumi.Input[str],\n subscription_id: pulumi.Input[str]):\n pulumi.set(__self__, \"auth_type\", 'userAssignedIdentity')\n pulumi.set(__self__, \"client_id\", client_id)\n pulumi.set(__self__, \"subscription_id\", subscription_id)",
"def do(self, action_context, view, impersonate_user, *args, **kwargs):\n # If not in the argument check on the view\n jwt = action_context.extra_context.get('jwt')\n user = action_context.extra_context.get('user')\n if not jwt:\n logger.error(\"Not jwt token to set\",\n extra={'request': action_context.request})\n\n raise ValueError(\"Not jwt token to set\")\n\n # Get the host for the cookie\n host = self.get_host(action_context.request)\n\n # Set the response\n action_context.response = jwt_utils.set_jwt_cookie_to_response(\n response=action_context.response,\n cookie_key=settings.JWT_COOKIE_NAME,\n encoded_jwt=jwt,\n expiration_delta_sec=settings.JWT_IMPERSONATE_COOKIE_EXPIRATION_TIME_DELTA,\n domain=host,\n secure=settings.JWT_COOKIE_ONLY_HTTPS)\n\n logger.info(\"JWT token set for user {0} on {1}\".format(impersonate_user, host))\n\n logger.info(\"User '{0}' impersonated user '{1}'\".format(\n user.id,\n impersonate_user.id))\n\n return super().do(action_context, view, impersonate_user, *args, **kwargs)",
"def _create_and_set_jwt_cookies(response, request, cookie_settings, user=None):\n\n # Skip setting JWT cookies for most unit tests, since it raises errors when\n # a login oauth client cannot be found in the database in ``_get_login_oauth_client``.\n # This solution is not ideal, but see https://github.com/edx/edx-platform/pull/19180#issue-226706355\n # for a discussion of alternative solutions that did not work or were halted.\n if settings.FEATURES.get('DISABLE_SET_JWT_COOKIES_FOR_TESTS', False):\n return\n\n expires_in = settings.JWT_AUTH['JWT_IN_COOKIE_EXPIRATION']\n _set_expires_in_cookie_settings(cookie_settings, expires_in)\n\n jwt = _create_jwt(request, user, expires_in)\n jwt_header_and_payload, jwt_signature = _parse_jwt(jwt)\n\n _set_jwt_cookies(\n response,\n cookie_settings,\n jwt_header_and_payload,\n jwt_signature,\n )",
"def create_jwt(self, claims, header):\n token = jwt.encode(\n claims, self.jwt_oidc_test_private_key_pem, headers=header, algorithm='RS256')\n return token",
"def create_jwt(self, audience: List[str], additional_claims=None) -> str:\n iat = time.time()\n exp = iat + self.lifetime\n payload = additional_claims or {}\n payload.update({'iss': self.credentials[\"client_email\"],\n 'sub': self.credentials[\"client_email\"],\n 'aud': audience,\n 'iat': iat,\n 'exp': exp,\n 'scope': ['email', 'openid', 'offline_access'],\n 'email': self.credentials[\"client_email\"]\n })\n additional_headers = {'kid': self.credentials[\"private_key_id\"]}\n token = jwt.encode(\n payload,\n self.credentials[\"private_key\"],\n headers=additional_headers,\n algorithm='RS256').decode()\n return token",
"def do(self, action_context, view, impersonate_user, *args, **kwargs):\n if impersonate_user:\n extra_data = {\n 'impersonate': True,\n 'real_user_id': action_context.extra_context['user'].id\n }\n action_context.extra_context['jwt'] = jwt_utils.create_jwt(\n impersonate_user,\n expiration_time=settings.JWT_IMPERSONATE_EXPIRATION_TIME_DELTA,\n extra_data=extra_data)\n\n return super().do(action_context, view, impersonate_user, *args, **kwargs)",
"def __init__(self, app=None, well_known_config=None, well_known_obj_cache=None, algorithms='RS256', jwks_uri=None, issuer=None, audience=None, client_secret=None, cache=None, caching_enabled=False, jwt_oidc_test_mode=False, jwt_oidc_test_keys=None, jwt_role_callback=None, jwt_oidc_test_private_key_pem=None):\n \n self.app = app\n self.well_known_config = well_known_config\n self.well_known_obj_cache = well_known_obj_cache\n self.algorithms = algorithms\n self.jwks_uri = jwks_uri\n self.issuer = issuer\n self.audience = audience\n self.client_secret = client_secret\n self.cache = cache\n self.caching_enabled = caching_enabled\n\n self.jwt_oidc_test_mode = jwt_oidc_test_mode\n self.jwt_oidc_test_keys = jwt_oidc_test_keys\n self.jwt_oidc_test_private_key_pem = jwt_oidc_test_private_key_pem\n self.jwt_role_callback = jwt_role_callback\n\n print(\"Running constructor\")\n if app is not None:\n self.init_app(app)",
"async def mock_login(auth: MyBMWAuthentication) -> None:\n auth.access_token = \"SOME_ACCESS_TOKEN\"",
"def jwt_auth(client):\n return JwtAuthActions(client)",
"def authenticate(self, request):\n auth_data = super().authenticate(request)\n if not auth_data:\n return auth_data\n\n user, auth = auth_data\n\n if amr_claim := auth.data.get(\"amr\"):\n user.token_amr_claim = amr_claim\n\n return user, auth",
"def set_jwt_file(self, filename):\n self.jwtfile = filename",
"def set_auth_headers(self, access_token, client_id):\n\t\tself.headers['X-Udemy-Bearer-Token'] = access_token\n\t\tself.headers['X-Udemy-Client-Id'] = client_id\n\t\tself.headers['Authorization'] = \"Bearer \" + access_token\n\t\tself.headers['X-Udemy-Authorization'] = \"Bearer \" + access_token",
"def encode_auth_token(self, id):\n payload = {\n \"exp\": datetime.utcnow()\n + timedelta(\n days=current_app.config.get(\"TOKEN_EXPIRATION_DAYS\"),\n seconds=current_app.config.get(\"TOKEN_EXPIRATION_SECONDS\"),\n ),\n \"iat\": datetime.utcnow(),\n \"sub\": id,\n }\n return jwt.encode(\n payload, current_app.config.get(\"SECRET_KEY\"), algorithm=\"HS256\"\n )",
"def set_auth_token_header(self):\n\n username = 'test-user'\n passwd = 'testuserpass1234'\n user = User.objects.create(username=username)\n user.set_password(passwd)\n user.save()\n\n assert Account.objects.get(user=user) is not None\n url = reverse('token_obtain_pair')\n res = self.client.post(url,\n data={'username': username, 'password': passwd})\n self.client.credentials(HTTP_AUTHORIZATION=\n f\"Bearer {res.data['access']}\")\n return user",
"def setUp(self):\n self.token = AuthToken()\n self.token[api_settings.USER_ID_CLAIM] = 33\n self.token[\"username\"] = \"daron\"\n self.token[\"first_name\"] = \"Daron\"\n self.token[\"last_name\"] = \"Malakian\"\n self.token[\"email\"] = \"[email protected]\"\n\n self.user = TokenUser(self.token)",
"def set_issuer(self, claim=ISSUER):\n if api_settings.ISSUER is not None:\n self.payload[claim] = api_settings.ISSUER",
"def __call__(self, r):\n r.headers[\"x-aims-auth-token\"] = self._token\n return r"
] |
[
"0.65983707",
"0.65081966",
"0.61748314",
"0.6009898",
"0.59909314",
"0.5906171",
"0.58335584",
"0.5824592",
"0.58182263",
"0.57838553",
"0.5780166",
"0.55363196",
"0.55278736",
"0.55009097",
"0.5463044",
"0.54460394",
"0.5442401",
"0.54184085",
"0.5409769",
"0.5373524",
"0.5342139",
"0.5341208",
"0.53274393",
"0.53271854",
"0.5306851",
"0.53048176",
"0.52939695",
"0.52811056",
"0.52759105",
"0.52558404"
] |
0.6664418
|
0
|
Search list for device name and retrieve address
|
def get_address(self,device_list):
for device in device_list:
if device.name == self.NAME:
self.ADDR = device.address
return True
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def search(self,name=None):\n\t\taddresses = discover_devices()\n\t\t#if len(addresses) == 0:\n\t\t#\treturn None\n\t\tnames = []\n\t\tfor adr in addresses:\n\t\t\tnames.append(lookup_name(adr))\n\t\t\tif name != None and name == names[-1]:\n\t\t\t\treturn adr\n\n\t\treturn zip(addresses,names)",
"def _find_device(self):\n found_device = False\n nearby_devices = None\n try:\n nearby_devices = self._adapter.scan()\n except Exception:\n pass\n\n if nearby_devices is not None:\n for device in nearby_devices:\n name = device['name']\n if name is not None and name.startswith(self._search_name):\n self._address = device['address']\n print(f'Found device named: {name} at {self._address}')\n found_device = True\n break\n\n return found_device",
"def find_devices (devicelist):\n vprint(\"\\nFind known devices:\")\n for device in devicelist:\n if find_device(device) is not None :\n vprint(\"\\tFound :\", device)\n else:\n vprint(\"\\tNOT found:\", device )\n vprint(\"..........\") \n return",
"def find_target_device(ble_device, name):\r\n scan_report = ble_device.scanner.start_scan().wait()\r\n\r\n for report in scan_report.advertising_peers_found:\r\n if report.advertise_data.local_name == name:\r\n return report.peer_address",
"def discover(bt_addr):\n print \"performing inquiry...\"\n nearby_devices = bluetooth.discover_devices(lookup_names = True)\n print \"Found %d devices\" % len(nearby_devices)\n \n for addr, name in neaby_devices:\n print \" %s - %s\" % (addr, name)",
"def find(ctx, name):\n conf = settings.devices.get(name, dict())\n if conf.get('type') == 'command':\n return conf, name, name\n\n uuids = ctx.obj['uuids']\n context = Context()\n for dev in iter(context.list_devices()):\n if 'ID_FS_TYPE' in dev:\n if name == uuids.get(dev.get('ID_FS_UUID')):\n return (settings.devices[name], dev['DEVNAME'],\n settings.devices[name].get('label',\n dev.get('ID_FS_LABEL')))\n\n print('Device \"%s\" not found.' % name)\n sys.exit(1)",
"async def find_devices() -> List[DeviceInfo]:\n return await Discovery.search_devices()",
"def get_devices():\n global managed_objects\n global devices_by_adr\n \n devices_by_adr = {}\n \n r = re.compile(\"\\/org\\/bluez\\/hci\\d*\\/dev\\_(.*)\")\n # e.g., match a string like this:\n # /org/bluez/hci0/dev_58_C9_35_2F_A1_EF\n \n for key, value in managed_objects.items():\n # print(\"key=\", key)\n m = r.match(key)\n if m is not None:\n dev_str = m.group(1) # we have a device string!\n # print(\"dev_str=\", dev_str)\n # let's flatten that dict a bit\n devices_by_adr[dev_str] = value[\"org.bluez.Device1\"]",
"def connect_magic():\n nearby_devices = bluetooth.discover_devices(lookup_names = True, duration=5)\n\n for addr, name in nearby_devices:\n print(name)\n if name == \"MindWave Mobile\":\n print \"found\"\n return (connect_bluetooth_addr(addr), addr)\n return (None, \"\")",
"def scan(backend, timeout=10):\n result = []\n for (mac, name) in backend.scan_for_devices(timeout):\n print(mac + \" \" + name)\n return result",
"def test_get_device_address_from_dbus_path(self):\n test_data = [\n ['/org/bluez/hci0/dev_EB_F6_95_27_84_A0', 'EB:F6:95:27:84:A0'],\n ['/org/bluez/hci0', ''],\n ['/org/bluez/hci0/dev_EB_F6_95_27_84_A0/player0',\n 'EB:F6:95:27:84:A0']\n ]\n for i in range(0, len(test_data)):\n with self.subTest(i=i):\n self.assertEqual(\n test_data[i][1],\n self.module_under_test.get_device_address_from_dbus_path(\n test_data[i][0]\n ))",
"def bt_scan():\n print(\"Searching for nearby devices...\")\n explore_devices = []\n if explorepy._bt_interface == 'sdk':\n device_manager = explorepy.exploresdk.ExploreSDK_Create()\n nearby_devices = device_manager.PerformDeviceSearch()\n for bt_device in nearby_devices:\n if \"Explore\" in bt_device.name:\n print(\"Device found: %s - %s\" % (bt_device.name, bt_device.address))\n explore_devices.append((bt_device.name, bt_device.address))\n else:\n import bluetooth\n nearby_devices = bluetooth.discover_devices(lookup_names=True)\n for address, name in nearby_devices:\n if \"Explore\" in name:\n print(\"Device found: %s - %s\" % (name, address))\n explore_devices.append((address, name))\n\n if not nearby_devices:\n print(\"No Devices found\")\n\n return explore_devices",
"def find(vps, sn = None):\n devices = UsbTools.find_all(vps)\n # do we have any devices?\n if len(devices) == 0:\n return None, 'no device found'\n if sn is not None:\n # filter using the serial number\n devices_sn = [d for d in devices if d[2] == sn]\n if len(devices_sn) == 0:\n # we have devices, but none with this serial number\n s = []\n s.append('no device with this serial number')\n s.append('devices found:')\n for d in devices:\n s.append('%04x:%04x sn %r' % (d[0], d[1], d[2]))\n return None, '\\n'.join(s)\n else:\n devices = devices_sn\n # no devices\n if len(devices) == 0:\n return None, 'no device found'\n # multiple devices\n if len(devices) > 1:\n s = []\n s.append('multiple devices found:')\n for d in devices:\n s.append('%04x:%04x sn %r' % (d[0], d[1], d[2]))\n return None, '\\n'.join(s)\n # 1 device\n return devices[0], None",
"def findDeviceDescriptor(self, string: str) -> cern.japc.core.DeviceDescriptor:\n ...",
"def list_devices():\r\n DeviceManagerCLI.BuildDeviceList()\r\n return DeviceManagerCLI.GetDeviceList()",
"def getDevices():\n \n scannedDevices = list()\n \n proc = subprocess.Popen('bluetoothctl scan on', shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=8192, universal_newlines=True)\n \n time.sleep(10)\n \n proc.stdin.write('scan off')\n \n try:\n stdout, stderr = proc.communicate()\n except subprocess.TimeoutExpired:\n proc.kill()\n stdout, stderr = proc.communicate()\n\n ansiEscapePattern = re.compile(r'\\x1B[@-_][0-?]*[ -/]*[@-~]')\n stdout = ansiEscapePattern.sub('', stdout)\n \n #deviceNamePattern = re.compile('^\\[NEW\\] Device [A-F0-9]{2}:[A-F0-9]{2}:[A-F0-9]{2}:[A-F0-9]{2}:[A-F0-9]{2}:[A-F0-9]{2} ')\n \n for line in stdout.split('\\n'):\n if '[NEW] Device' in line:\n device = list()\n device.append(line[13:31])\n device.append(line[31:])\n scannedDevices.append(device)\n \n return scannedDevices",
"async def device_info(request):\n textx = await request.get_reply_message()\n codename = request.pattern_match.group(1)\n if codename:\n pass\n elif textx:\n codename = textx.text\n else:\n await edit_or_reply(request, \"`Usage: .device <codename> / <model>`\")\n return\n data = json.loads(\n get(\n \"https://raw.githubusercontent.com/androidtrackers/\"\n \"certified-android-devices/master/by_device.json\"\n ).text\n )\n results = data.get(codename)\n if results:\n reply = f\"**Search results for {codename}**:\\n\\n\"\n for item in results:\n reply += (\n f\"**Brand**: `{item['brand']}`\\n\"\n f\"**Name**: `{item['name']}`\\n\"\n f\"**Model**: `{item['model']}`\\n\\n\"\n )\n else:\n reply = f\"`Couldn't find info about {codename}!`\\n\"\n await edit_or_reply(request, reply)",
"def ret_device_list():\n token = get_auth_token() # Get Token\n url = \"https://sandboxdnac.cisco.com/api/v1/network-device\"\n hdr = {'x-auth-token': token, 'content-type' : 'application/json'}\n resp = requests.get(url, headers=hdr) # Make the Get Request\n device_list = resp.json()\n return device_list",
"def device_info(device_id):\n device_info_map = listall.device_raw_info()[\"devices\"]\n for operating_system in device_info_map.keys():\n devices = device_info_map[operating_system]\n for device in devices:\n if device[\"udid\"].lower() == device_id.lower():\n return device\n return None",
"def _get_entry_address(device_serial: str) -> str:\n comports = list_ports.comports()\n for com_port in comports:\n if com_port.serial_number == device_serial:\n return com_port.device.replace('/cu.', '/tty.')\n return ''",
"def scan_devices(self):\n self._update_info()\n return [client[\"mac\"] for client in self.last_results]",
"def scan_devices(self):\n self._update_info()\n\n return [client['mac'] for client in self.last_results]",
"def device_list():\n click.echo(\"\\nRetrieving the devices.\")\n\n url = base_url + \"/device\"\n\n response = requests.get(url=url, headers=header,verify=False)\n if response.status_code == 200:\n items = response.json()['data']\n else:\n print(\"Failed to get list of devices \" + str(response.text))\n exit()\n\n headers = [\"Host-Name\", \"Device Type\", \"Device ID\", \"System IP\", \"Site ID\", \"Version\", \"Device Model\"]\n table = list()\n\n for item in items:\n tr = [item.get('host-name'), item.get('device-type'), item.get('uuid'), item.get('system-ip'), item.get('site-id'), item.get('version'), item.get('device-model')]\n table.append(tr)\n try:\n click.echo(tabulate.tabulate(table, headers, tablefmt=\"fancy_grid\"))\n except UnicodeEncodeError:\n click.echo(tabulate.tabulate(table, headers, tablefmt=\"grid\"))",
"def do_list(self, _):\n devices = []\n for source in self._target.devices:\n devices.append({\n 'name': source.device['name'],\n 'path': source.device['path'],\n })\n return devices",
"def update(self):\n self._devices_list = []\n self.sendto(\"FIND%-8s\" % (self.search_password,))\n\n start = time.time()\n while start + self.timeout > time.time():\n rfds, _, _ = select.select([self.device_s], [], [], 0.5)\n\n for sock in rfds:\n data = self.recvfrom()\n if data[0:4] in (\"IMIN\", \"SETC\"):\n try:\n dev = WizSearch.DEVICE_TYPES[self.device_type](data[4:])\n # devices.append(self.extract_IMIN(data, wiztype))\n if not self.allowed_mac or dev.mac in self.allowed_mac:\n self._devices_list.append(dev)\n except:\n logger.exception(\"parsing error.\")\n\n if not self._devices_list:\n logger.error(\"Timeout, no devices found\")\n return self._devices_list",
"def find_dev_by_prefix(devices, prefix):\n result = []\n for dev in devices.values():\n if dev.unique_id.startswith(prefix):\n result.append(dev)\n return result",
"def find(cls, device_name):\n return cls.query(cls.device_name == device_name).fetch(1)",
"def getDevices(i):\n devices = Account['KTFLR'].devices('monpressprod')\n device = devices[i]\n return device",
"async def get_discovered_device_names(self):\n json = self._api_call(\"app/monitors/%s/devices\" % self.sense_monitor_id)\n self._devices = await [entry[\"name\"] for entry in json]\n return self._devices",
"def get_devices():\n devices, errors = [], []\n\n for path in hookenv.action_get('devices').split(' '):\n path = path.strip()\n if not os.path.isabs(path):\n errors.append('{}: Not absolute path.'.format(path))\n elif not os.path.exists(path):\n errors.append('{}: Device does not exist.'.format(path))\n else:\n devices.append(path)\n\n if errors:\n raise ZapDiskError(\", \".join(errors))\n\n return devices"
] |
[
"0.753403",
"0.6893208",
"0.6690935",
"0.6621304",
"0.6455121",
"0.64122295",
"0.63889885",
"0.6303021",
"0.61488307",
"0.6101672",
"0.60946697",
"0.6088623",
"0.606284",
"0.6058179",
"0.60379183",
"0.6000065",
"0.5980085",
"0.592712",
"0.59264034",
"0.5920494",
"0.59175026",
"0.5895252",
"0.58901924",
"0.5886396",
"0.5881307",
"0.5857086",
"0.58527464",
"0.58461386",
"0.5835645",
"0.5822747"
] |
0.7269385
|
1
|
Connect button press callback, retrieves device name from text box and sets flag
|
def get_device(self):
self.connect_button = 1
self.device_name = self.deviceEntry.text()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def device_connect(self):\n pass",
"def connect(self):\n def f():\n com_port = self.ports_listbox.get(tk.ACTIVE)\n ser = serial.Serial(com_port)\n print(\"connected to :\" + ser.name)\n self.connect_msg = tk.Label(self.connect_frame, text=\"Connected to :\" + ser.name).grid(row=1)\n self.parent.on_connect(ser, self.model)\n self.ports_listbox.destroy()\n self.connect_button.destroy()\n return f",
"def device_event(observer, device):\n if (device.action == \"add\"):\n print(\"conectado\")\n name = device.sys_name\n print(name)\n print(name[len(name) - 4])\n if(name[len(name) - 4] == \":\"):\n print(\"device mala\")\n else:\n time.sleep(5)\n try:\n with open(\"/media/usb0/LABSD.txt\", \"r\") as f:\n data = f.readlines()\n except IOError:\n print('cannot open')\n else:\n dataprocess(data)\n f.close()\n elif (device.action == \"remove\"):\n print(\"desconectado\")\n else:\n print(\"error\")",
"def on_connected_cbk(_):\n cprint(\"\\nDevice connected.\", color=\"green\", flush=True)",
"def device_event(observer, action, device):\n if (device.action == \"add\"):\n print(\"conectado\")\n name = device.sys_name\n print(name)\n print(name[len(name) - 4])\n if(name[len(name) - 4] == \":\"):\n print(\"Duplicado\")\n else:\n time.sleep(5)\n try:\n with open(\"/media/usb0/LABSD.txt\", \"r\") as f:\n data = f.readlines()\n except IOError:\n print('cannot open')\n else:\n dataprocess(data)\n f.close()\n elif (device.action == \"remove\"):\n print(\"desconectado\")\n else:\n print(\"error\")",
"def search_device(self,event):\n bi = wx.BusyInfo(\"Searching for Device, please wait...\", self)\n # **add code here to establish connection with the device over wifi\n time.sleep(3)\n bi2 = wx.BusyInfo(\"Device found!\", self)\n bi.Destroy()\n bi2.Destroy()",
"def on_open_sensor_com_btn_clicked(self):\n if not self.com_sensor_box.currentText() == \"\":\n # self.ser.port = self.video_com_box.currentText()\n port = self.com_sensor_box.currentText()\n self.sensor1.sensor.initialize_sensor(port, 9600)\n self.sensor1.resume()\n self.open_sensor_com_btn.setEnabled(False)\n self.close_sensor_com_btn.setEnabled(True)\n self.set_status_txt(\"openning sensor board port \" + port)\n else:\n QMessageBox.information(self,\n \"Warning\",\n \"No port detected, pls check your port and open again!\",\n QMessageBox.Yes | QMessageBox.No)",
"def change_device(self):\n if self.state.ser:\n UsbHost.close_port(self.state.ser)\n device = self.CBDevices.currentText()\n if device:\n comport = self.devices[int(device)]\n self.state.ser = UsbHost.open_port(comport)\n if not self.state.ser:\n self.statusbar.showMessage(\"Выбранный порт более недоступен. Произведите повторный поиск\")\n return\n answer: str = self.UsbHost.send_command(self.state.ser, \"ping\", device)\n if answer in wrong_answers:\n error_message(\"Выбранный девайс не отвечает\")\n self.statusbar.showMessage(\"Выбранный порт более недоступен. Произведите повторный поиск\")\n return\n self.state.device_id = int(device)\n self.state.comport = comport\n self.create_message()\n self.set_controls_state(True)\n self.BtnL1.click()\n self.BtnAttenuate.click()\n self.SpinDACValue.setValue(35000)\n self.BtnSetDACValue.click()\n self.set_sw(\"0 1\")",
"def ask_for_device():\n return input(\"Device name or ip address : \")",
"def toggle_connect( self ):\n # show waiting for communication\n self.lbl_status.setText( 'Waiting...' )\n self.lbl_statusLight.setPixmap( self.img_yellowLight )\n self.repaint()\n \n # create laser controller if doesn't already exist, connect\n if self.inst is None:\n try:\n self.inst = pac.Ammeter( self.port, timeout = 30 )\n self.inst.connect()\n \n except Exception as err:\n self.update_connected_ui( False )\n \n warning = QMessageBox()\n warning.setWindowTitle( 'Picoammeter Controller Error' )\n warning.setText( 'Could not connect\\n{}'.format( err ) )\n warning.exec()\n \n else:\n self.delete_controller()\n \n # update ui\n if self.inst is not None:\n self.update_connected_ui( self.inst.connected )\n \n else:\n self.update_connected_ui( False )",
"def connect():\n \n print(\"*****Starting connection*****\")\n \n ssid = id_key.network_id #hidden ssid\n key = id_key.network_key #hidden key\n \n station = network.WLAN(network.STA_IF)\n \n if station.isconnected() == True:\n print(\"*****Already connected*****\")\n return\n \n station.active(True)\n station.connect(ssid, key)\n \n while station.isconnected() == False:\n pass\n \n print(\"*****Connection successful*****\")\n print(station.ifconfig())",
"def connect_device(self):\n # check for valid device number\n \n usb_device_file = '/dev/ttyUSB{}'.format(self.dev_id)\n try:\n self.boardcon = serial.Serial(port=usb_device_file,\n baudrate=9600, bytesize=8,\n parity='N', stopbits=1,\n timeout=2\n )\n # org stopbits = 1\n status = True\n except Exception as e:\n status = False\n logger.error(\"Serial connection with /dev/ttyUSB{0} failed\"\n .format(self.dev_id)\n )\n return status",
"def connection(self):\n if (self.symbol.type == self.scanner.NAME):\n device_name = self.names.get_name_string(self.symbol.id)\n first_device_id = self.names.query(device_name)\n self.old_symbol = self.symbol # for undeclared device names\n self.symbol = self.scanner.get_symbol()\n\n if (self.symbol.type == self.scanner.PERIOD):\n self.symbol = self.scanner.get_symbol()\n\n if(self.symbol.type == self.scanner.OUT_PIN):\n pin_name = self.names.get_name_string(self.symbol.id)\n first_port_id = self.names.query(pin_name)\n self.symbol = self.scanner.get_symbol()\n else:\n # Error: Output pin has to be 'Q' or 'QBAR'\n # Stopping symbols: ';', '}' , '=', 'MONITOR' or 'END'\n # KEYWORD\n self.error(self.OUTPUT_PIN,\n [self.scanner.KEYWORD,\n self.scanner.SEMICOLON, self.scanner.EQUALS,\n self.scanner.RIGHT_CURLY],\n [self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Device with only a single output port\n first_port_id = None\n\n if (self.symbol.type == self.scanner.EQUALS):\n self.symbol = self.scanner.get_symbol()\n\n if (self.symbol.type == self.scanner.NAME):\n device_name = self.names.get_name_string(self.symbol.id)\n second_device_id = self.names.query(device_name)\n self.symbol = self.scanner.get_symbol()\n\n if (self.symbol.type == self.scanner.PERIOD):\n self.symbol = self.scanner.get_symbol()\n\n if(self.symbol.type == self.scanner.IN_PIN):\n pin_name = self.names.get_name_string(\n self.symbol.id)\n second_port_id = self.names.query(pin_name)\n self.symbol = self.scanner.get_symbol()\n\n if(self.symbol.type == self.scanner.SEMICOLON):\n self.symbol = self.scanner.get_symbol()\n else:\n # Error: Connection has to be terminated by ';'\n # Stopping symbols: NAME, ';', '}' , 'MONITOR'\n # or 'END' KEYWORD\n self.error(\n self.NO_CONNECT_SEMICOLON, [\n self.scanner.KEYWORD,\n self.scanner.SEMICOLON,\n self.scanner.NAME,\n self.scanner.RIGHT_CURLY], [\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: Valid input pin required\n # Stopping symbols: ';' , '}', 'MONITOR' or 'END'\n # KEYWORD\n self.error(\n self.INPUT_PIN, [\n self.scanner.KEYWORD,\n self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY], [\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: Period required to specify input pin\n # Stopping symbols: ';' , '}', 'MONITOR' or 'END'\n # KEYWORD\n self.error(\n self.PERIOD_INPUT_PIN, [\n self.scanner.KEYWORD, self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY],\n [self.scanner.MONITOR_ID, self.scanner.END_ID])\n else:\n # Error: Name string of input device required\n # Stopping symbols: ';' , '}', 'MONITOR' or 'END' KEYWORD\n self.error(\n self.NAME_INPUT, [\n self.scanner.KEYWORD, self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY],\n [self.scanner.MONITOR_ID, self.scanner.END_ID])\n else:\n # Error: '=' Assignment operator requried\n # Stopping symbols: ';' , '}', 'MONITOR' or 'END' KEYWORD\n self.error(\n self.ASSIGNMENT, [\n self.scanner.KEYWORD, self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY], [self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: Valid string name required\n # Stopping symbols: ';' , '}', 'MONITOR' or 'END' KEYWORD\n self.error(\n self.NAME_STRING, [\n self.scanner.KEYWORD, self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY], [self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n\n # Check for Connection Semantic errors\n if self.error_count == 0:\n # Only check for semantic errors if no errors so far\n err = self.network.make_connection(\n first_device_id, first_port_id,\n second_device_id, second_port_id)\n if err != self.network.NO_ERROR:\n # Stopping symbols: ';' , '}', 'MONITOR' or 'END' KEYWORD\n self.error(\n err, [\n self.scanner.KEYWORD, self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY], [self.scanner.MONITOR_ID,\n self.scanner.END_ID])",
"def connectAdapter(self):\n self.canusb = pycanusb.CanUSB(bitrate='500')\n print('CanUSB: ',self.canusb)\n Msg = Switch_to_Operational_State_Msg()\n QTimer.singleShot(50,lambda msg = Msg : self.initialization(Msg))",
"def set_name(self, set_device_name):\n is_device_name_set = False\n if self.phone_info.phone_type == PhoneType.IOS:\n is_general_visible = False\n try:\n try:\n # verify that General Button\n self.find_element(self.driver.appium_driver,\n 'self.ios_locators.GENERAL_NAVIGATION_BUTTON_ByXPATH',\n 1).is_displayed()\n is_general_visible = True\n except:\n logger.debug(\"General Button is currently not visible \")\n\n if is_general_visible:\n pass\n else:\n self.driver.appium_driver.close_app()\n self.driver.appium_driver.launch_app()\n logger.error('Navigate to general and about in settings')\n self.find_element(self.driver.appium_driver,\n self.general_button_settings).click()\n self.wait_till_element_to_be_visible(\n self.driver.appium_driver, self.status_button, 10)\n self.find_element(self.driver.appium_driver,\n self.status_button).click()\n\n self.find_element(self.driver.appium_driver,\n self.device_name).click()\n text_field = self.find_element(self.driver.appium_driver,\n self.device_name_text_box).clear()\n\n self.driver.appium_driver.set_value(text_field,\n set_device_name)\n self.find_element(self.driver.appium_driver,\n self.set_name_button).click()\n is_device_name_set = True\n\n logger.debug('Set New Name of Mobile Phone - ',\n set_device_name)\n except Exception as e:\n logger.warning(\"Bluetooth Device name is not Set\")\n logger.debug(repr(e))\n elif self.phone_info.phone_type == PhoneType.ANDROID:\n name_text_box = False\n is_bluetooth_button__visible = self.__verify_current_screen()\n try:\n try:\n self.find_element(self.driver.appium_driver,\n self.device_name_text_box,\n 1).is_displayed()\n name_text_box = True\n except:\n logger.debug(\"Device name text box is not visible\")\n if name_text_box is True:\n device_name = self.find_element(self.driver.appium_driver,\n self.device_name_text_box).clear()\n self.driver.appium_driver.set_value(device_name,\n set_device_name)\n self.find_element(self.driver.appium_driver,\n self.set_name_button).click()\n is_device_name_set = True\n logger.debug(\n ':Set New Name of Mobile Phone - ' + set_device_name)\n elif name_text_box is False:\n if is_bluetooth_button__visible:\n pass\n else:\n self.testcase_action = 'STEP -: Go to Bluetooth ' \\\n 'option from settings - '\n self._go_to_bluetooth_button()\n\n bluetooth_element = self.find_element(\n self.driver.appium_driver,\n self.bluetooth_button_on_off_button, 1)\n # if bluetooth is OFF then throw Exception\n if bluetooth_element.text is False or \\\n bluetooth_element.text == 'OFF':\n bluetooth_element.click()\n logger.debug(\n \"Bluetooth is turned on in device with name \" +\n self.phone_info.bluetooth_name)\n\n else:\n logger.debug(\n \"Bluetooth is already on \" +\n self.phone_info.bluetooth_name)\n self.find_element(self.driver.appium_driver,\n self.bluetooth_more_options_button).click()\n self.wait_till_element_to_be_visible(\n self.driver.appium_driver, self.device_name, 10)\n self.find_element(self.driver.appium_driver,\n self.device_name).click()\n self.wait_till_element_to_be_visible(\n self.driver.appium_driver, self.device_name_text_box,\n 10)\n device_name = self.find_element(self.driver.appium_driver,\n self.device_name_text_box).clear()\n self.driver.appium_driver.set_value(device_name,\n set_device_name)\n self.find_element(self.driver.appium_driver,\n self.set_name_button).click()\n is_device_name_set = True\n logger.debug(\n ':Set New Name of Mobile Phone - ' + set_device_name)\n except Exception as e:\n logger.warning(\"Bluetooth Device name is not Set\")\n logger.debug(repr(e))\n return is_device_name_set",
"def on_connect(client, userdata, flags, rc):\n\t# subscribe to the LEDs topic when connected\n\tclient.subscribe(\"SNHU/IT697/leds\")",
"def on_connect_btn(self):\n ip = self.ip_edit.text()\n port = int(self.port_edit.text())\n try:\n self.rtsp_socket.connect((ip, port))\n QMessageBox.information(self, 'success', 'Connection Success\\n' + str(self.ip_edit.text()) + '\\n' +\n str(self.port_edit.text()), QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)\n except:\n QMessageBox.critical(self, \"Connection Error\", \"Failed to connect \\n\" + str(self.ip_edit.text()) + '\\n' +\n str(self.port_edit.text()), QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)",
"def on_connect():\n print('User connected!')\n return'connected'",
"def on_connect():\n print(\"User connected!\")",
"def __connect(self):\n try:\n self._device = usb.core.find(idVendor=self.usb_vendor_id, idProduct=self.usb_product_id)\n self._configuration = self._device.get_active_configuration()\n except Exception as e:\n return False\n self.reset()\n return True",
"def open_device_dialog(self):\n res, device = DeviceDialog.get_device(self.indexer)\n if res and device:\n self.serial = device.serial\n if self.serial:\n caps_str = None\n self.open_device(self.serial, caps_str)",
"def deviceConnected(self, deviceName):\n if not deviceName:\n return False\n\n for driver in self.drivers:\n if not self.scanValid(driver=driver, deviceName=deviceName):\n continue\n\n self.drivers[driver]['uiDropDown'].setStyleSheet(self.BACK_GREEN)\n self.deviceStat[driver] = True\n # self.app.message.emit(f'{driver} connected', 0)\n return True",
"def test_device_on(self):\n self.ms.add_response({'\\x14081031031E226410\\x0D': 'PA\\x0D'})\n # Network / Device ID\n response = self.upb.on((49, 3))\n self.assertTrue(response)",
"def connect_serial(self):\n if self.ser == 0:\n try:\n self.ser = serial.Serial(PORT, BAUDRATE, BYTESIZE, PARITY, STOPBITS, TIMEOUT)\n #self.ser = serial.Serial('/dev/ttyUSB0', 115200, timeout=10)\n except:\n self.textBrowser.append(QtGui.QApplication.translate(\"MainWindow\", \"\\n\\t----Failed to connect to the device----\\n\", None, QtGui.QApplication.UnicodeUTF8))\n return 0\n # this code shouldn't be never executed\n else:\n try:\n self.ser.open()\n except:\n self.textBrowser.append(QtGui.QApplication.translate(\"MainWindow\", \"\\n\\t----Failed to connect to the device----\\n\", None, QtGui.QApplication.UnicodeUTF8))\n return 0\n return 1",
"def connect_to_device(self):\n result = self._lib.NRFJPROG_connect_to_device()\n if result != NrfjprogdllErr.SUCCESS:\n raise APIError(result)",
"def connect_magic():\n nearby_devices = bluetooth.discover_devices(lookup_names = True, duration=5)\n\n for addr, name in nearby_devices:\n print(name)\n if name == \"MindWave Mobile\":\n print \"found\"\n return (connect_bluetooth_addr(addr), addr)\n return (None, \"\")",
"def get_name(self):\n device_name = ''\n if self.phone_info.phone_type == PhoneType.IOS:\n is_general_visible = False\n try:\n try:\n # verify that General Button\n self.find_element(self.driver.appium_driver,\n 'self.ios_locators.GENERAL_NAVIGATION_BUTTON_ByXPATH',\n 1).text\n is_general_visible = True\n except:\n logger.debug(\"General Button is currently not visible \")\n\n if is_general_visible:\n pass\n else:\n self.driver.appium_driver.close_app()\n self.driver.appium_driver.launch_app()\n logger.debug(\n 'STEP -: Navigate to general and about in settings '\n '- ')\n self.find_element(self.driver.appium_driver,\n self.general_button_settings).click()\n self.wait_till_element_to_be_visible(\n self.driver.appium_driver, self.status_button, 10)\n self.find_element(self.driver.appium_driver,\n self.status_button).click()\n\n device_name = self.find_element(self.driver.appium_driver,\n self.device_name).text\n logger.debug(\"Bluetooth Device Name:{}\".format(device_name))\n except Exception as e:\n logger.warning('Device name is not Visible')\n logger.debug(repr(e))\n elif self.phone_info.phone_type == PhoneType.ANDROID:\n name_text_box = False\n is_bluetooth_button__visible = self.__verify_current_screen()\n try:\n if not is_bluetooth_button__visible:\n logger.debug(\n 'STEP -: Navigate to bluetooth screen in settings - ')\n # On my Nexus 6P + Android 8.0.0, found that wasn't\n # going to the Bluetooth settings menu correctly,\n # So using the below function instead. I think it's what\n # was intended, (but maybe didn't get\n # switched in everywhere once it was created?).\n self._go_to_bluetooth_button()\n\n bluetooth_element = self.find_element(\n self.driver.appium_driver,\n self.bluetooth_button_on_off_button)\n # if bluetooth is OFF then throw Exception\n if bluetooth_element.text is False or bluetooth_element.text == 'OFF':\n bluetooth_element.click()\n logger.debug(\"Bluetooth turned on in device with device_name \" +\n self.phone_info.bluetooth_name)\n else:\n logger.debug(\"Bluetooth is already on \" + self.phone_info.bluetooth_name)\n try:\n self.find_element(self.driver.appium_driver,\n self.device_name_text_box, 1).is_displayed()\n name_text_box = True\n except:\n logger.debug(\"Device name text box is not visible\")\n if name_text_box is True:\n device_name = self.find_element(self.driver.appium_driver,\n self.device_name_text_box).text\n else:\n\n self.find_element(self.driver.appium_driver,\n self.bluetooth_more_options_button, 1).click()\n self.wait_till_element_to_be_visible(self.driver.appium_driver,\n self.device_name, 10)\n self.find_element(self.driver.appium_driver, self.device_name).click()\n self.wait_till_element_to_be_visible(self.driver.appium_driver,\n self.device_name_text_box, 10)\n device_name = self.find_element(self.driver.appium_driver,\n self.device_name_text_box).text\n logger.debug(\"Bluetooth Device Name:\" + device_name)\n except Exception as e:\n logger.warning('Bluetooth Device name is not Visible')\n logger.debug(repr(e))\n return device_name",
"def got_wemo(device):\n bridgectl.register(device)\n device.register_callback(\"statechange\", bridgectl.status_changed)",
"def QuickClient():\n window = Toplevel(root)\n window.title(\"Connection options\")\n window.grab_set()\n Label(window, text=\"Server IP:\").grid(row=0)\n destination = Entry(window)\n destination.grid(row=0, column=1)\n go = Button(window, text=\"Connect\", command=lambda:\n client_options_go(destination.get(), \"9999\", window))\n go.grid(row=1, column=1)",
"def testCheckDeviceName(self):\n device = config.devices[self.driver.desired_capabilities.get(\"deviceName\")][\"name\"]\n print(\"Device : \", device)"
] |
[
"0.66811657",
"0.6662639",
"0.6400079",
"0.63588834",
"0.6315138",
"0.62960154",
"0.62502056",
"0.62403375",
"0.60962975",
"0.606808",
"0.60387564",
"0.59946436",
"0.5988977",
"0.59868294",
"0.59591913",
"0.5915835",
"0.58743566",
"0.5867381",
"0.5817054",
"0.581208",
"0.58104753",
"0.5809723",
"0.5808493",
"0.57820404",
"0.57715553",
"0.57590294",
"0.5728845",
"0.57239956",
"0.5682172",
"0.5665773"
] |
0.7825053
|
0
|
Check that the options and value positional arguments are working for RadioItems.
|
def test_dbpa002_radio_items(dash_duo):
app = Dash()
options = {
"OptionA": "Option 1",
"OptionB": "Option 2",
"OptionC": "Option 3",
}
value = "OptionB"
with_keywords = RadioItems(
options=options,
value=value,
id="with-keywords",
)
without_keywords = RadioItems(options, value, id="without-keywords")
app.layout = html.Div([with_keywords, without_keywords])
dash_duo.start_server(app)
# Check values
assert [
a.get_attribute("value")
for a in dash_duo.wait_for_element(
"#with-keywords"
).find_elements_by_tag_name("input")
] == [
a.get_attribute("value")
for a in dash_duo.wait_for_element(
"#without-keywords"
).find_elements_by_tag_name("input")
]
# Check labels
assert [
a.text
for a in dash_duo.wait_for_element(
"#with-keywords"
).find_elements_by_tag_name("label")
] == [
a.text
for a in dash_duo.wait_for_element(
"#without-keywords"
).find_elements_by_tag_name("label")
]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_radioselect_field():",
"def check_argument(self, struct_class, item, keyword, value):\n pass",
"def _validate_options(self):\r\n valid_choices = ('correct', 'partially-correct', 'incorrect')\r\n for option in self.options:\r\n choice = option['choice']\r\n if choice is None:\r\n raise ValueError('Missing required choice attribute.')\r\n elif choice not in valid_choices:\r\n raise ValueError('Invalid choice attribute: {0}. Must be one of: {1}'.format(\r\n choice, ', '.join(valid_choices)))",
"def test_radiotextgroup(self):\r\n self.check_group('radiotextgroup', 'choice', 'radio')",
"def _check_value(item, allowed_values, item_name=None, extra=None):\n if item not in allowed_values:\n item_name = \"\" if item_name is None else \" '%s'\" % item_name\n extra = \"\" if extra is None else \" \" + extra\n msg = (\n \"Invalid value for the{item_name} parameter{extra}. \"\n \"{options}, but got {item!r} instead.\"\n )\n allowed_values = tuple(allowed_values) # e.g., if a dict was given\n if len(allowed_values) == 1:\n options = \"The only allowed value is %s\" % repr(allowed_values[0])\n elif len(allowed_values) == 2:\n options = \"Allowed values are %s and %s\" % (\n repr(allowed_values[0]),\n repr(allowed_values[1]),\n )\n else:\n options = \"Allowed values are \"\n options += \", \".join([f\"{repr(v)}\" for v in allowed_values[:-1]])\n options += f\", and {repr(allowed_values[-1])}\"\n raise ValueError(\n msg.format(\n item_name=item_name, extra=extra, options=options, item=item\n )\n )\n\n return item",
"def __check_args(self):\n self.__check_args_type()\n self.__check_args_val()",
"def _check_args(self, args_):\n\n pass",
"def test_invalid_options(self):\n\n wf._items = []\n\n sys.argv = ['drive.py', '> not here']\n main(None)\n self.assertEqual(len(wf._items), 1)\n self.assertEqual(wf._items[0].title, ERRORS['InvalidOption']['title'])\n self.assertFalse(wf._items[0].valid)\n self.assertFalse(wf._items[0].arg)\n wf._items = []",
"def test_validate_choices_ok(self, choices, value):\n opt = scheme.Option('test-option', choices=choices)\n opt.validate('foo', value)",
"def check_arguments(self):\n ## only four test operation is permitted, if given anything apart from this, then it should print error message\n if (self.args.snap is False and self.args.snapcheck is False and self.args.check is False and self.args.diff is False and self.args.version is False):\n self.logger.error(colorama.Fore.RED +\n \"Arguments not given correctly, Please refer help message\", extra=self.log_detail)\n self.parser.print_help()\n sys.exit(1)\n\n if(((self.args.snap is True and (self.args.pre_snapfile is None or self.args.file is None)) or\n (self.args.snapcheck is True and self.args.file is None) or\n (self.args.check is True and self.args.file is None)) and \n (self.args.testfiles is None or self.args.hostname is None)\n ):\n self.logger.error(colorama.Fore.RED +\n \"Arguments not given correctly, Please refer help message\", extra=self.log_detail)\n self.parser.print_help()\n sys.exit(1)\n if self.args.diff is True:\n if (self.args.pre_snapfile is not None and os.path.isfile(self.args.pre_snapfile)) and (\n self.args.post_snapfile is not None and os.path.isfile(self.args.post_snapfile)):\n comp = Comparator()\n comp.compare_diff(\n self.args.pre_snapfile,\n self.args.post_snapfile,\n None)\n sys.exit(1)\n else:\n if (self.args.file is None) and (\n self.args.testfiles is None or self.args.hostname is None):\n self.parser.print_help()\n sys.exit(1)",
"def __init__(self, name, title, items, show_icon=True, desc=None, prop=None, style=None, attr=None,\n onclick_callback=None, app=None, css_cls=None):\n RadioButtonGroup.__init__(self, name, title, items, show_icon=show_icon, desc=desc, prop=prop,\n style=style, attr=attr, css_cls=css_cls, onclick_callback=onclick_callback,\n app=app)\n self._value = {}\n for item in items:\n record = items.get(item)\n is_checked = record[1]\n self._value[item] = is_checked",
"def test_set_arguments_for_widget_view_when_riskiqassettype_is_valid():\n\n # set argument for command\n indicator_data = {\n 'indicator_type': 'RiskIQAsset',\n 'value': 'dummy domain',\n 'CustomFields': {\n 'riskiqassettype': 'Domain'\n }\n }\n # set expected output\n expected_arguments = {\n 'name': 'dummy domain',\n 'type': 'Domain'\n }\n # Execute\n arguments = RiskIQDigitalFootprintAssetDetailsWidgetScript.set_arguments_for_widget_view(indicator_data)\n # Assert\n assert expected_arguments == arguments",
"def assert_argument_vals(args, valid_voices):\n assert (\n args.volume >= 0 and args.volume <= 100\n ), \"Please provide a volume between [0, 100].\"\n\n assert not args.memos or (\n args.memos and os.path.exists(args.memos)\n ), \"File path to memos does not exist.\"\n\n assert not args.after_num_plays or (\n args.after_num_plays >= 1\n ), \"Cannot play more than after every completed song.\"\n\n assert args.voice.lower() in valid_voices, (\n \"Voice passed is not considered valid. Please consult:\"\n \"\\n\\t'say -v \\\"?\\\"'\"\n \"\\nfor a list of valid voices and corresponding languages\"\n )",
"def validate(self):\r\n for opt in self.required:\r\n if not getattr(self, opt):\r\n print \"Error: %s is not specified.\" % opt\r\n self.optp.print_help()\r\n sys.exit(1)",
"def _check_vals(self):\n\n try:\n self.is_set = True\n self.pack()\n except Exception as err:\n # Set default values again\n raise ValueError(\"Invalid arguments. Could not packed since: {}\".format(err))\n self.__init__()",
"def test_options(self):\n\n wf._items = []\n\n sys.argv = ['drive.py', '']\n main(None)\n self.assertEqual(len(wf._items), 2)\n self.assertEqual(wf._items[0].title, OPTIONS[0]['title'])\n self.assertEqual(wf._items[1].title, OPTIONS[1]['title'])\n wf._items = []",
"def check_inputs(self, item_data):\n if not item_data[0] in self.data['pizza']:\n print('Error: ' + item_data[0] + ' pizza does not exist.')\n return False\n\n if not item_data[1] in self.data['pizza'][item_data[0]]:\n print('Error: ' + item_data[1] + ' size does not exist for '\n + item_data[0] + ' pizza.')\n return False\n\n for topping in item_data[2]:\n if not topping in self.data['topping']:\n print('Error: Pizza topping ' + topping + ' does not exist.')\n return False\n return True",
"def test_validate_available_choice_1(self):\n self.assertRaises(\n InvalidStatusOperationError,\n validate_available_choice,\n *(BeerStyle, \"Not an int\")\n )",
"def check_args(self):\n parser = get_base_arguments(get_parser())\n parser = get_tc_arguments(parser)\n # Disable \"Do not use len(SEQ) as condition value\"\n # pylint: disable=C1801\n if len(sys.argv) < 2:\n self.logger.error(\"Icetea called with no arguments! \")\n parser.print_help()\n return False\n elif not self.args.ignore_invalid_params and self.unknown:\n self.logger.error(\"Unknown parameters received, exiting. \"\n \"To ignore this add --ignore_invalid_params flag.\")\n self.logger.error(\"Following parameters were unknown: {}\".format(self.unknown))\n parser.print_help()\n return False\n return True",
"def test_checkParameters(self):\n self.failUnlessEqual(self.nice.opts['long'], \"Alpha\")\n self.failUnlessEqual(self.nice.opts['another'], \"Beta\")\n self.failUnlessEqual(self.nice.opts['longonly'], \"noshort\")\n self.failUnlessEqual(self.nice.opts['shortless'], \"Gamma\")",
"def radioButton(*args, align: Union[AnyStr, bool]=\"\", annotation: Union[AnyStr, bool]=\"\",\n backgroundColor: Union[List[float, float, float], bool]=None, changeCommand:\n Script=None, collection: AnyStr=\"\", data: Union[int, bool]=0, defineTemplate:\n AnyStr=\"\", docTag: Union[AnyStr, bool]=\"\", dragCallback: Script=None,\n dropCallback: Script=None, editable: bool=True, enable: bool=True,\n enableBackground: bool=True, enableKeyboardFocus: bool=True, exists: bool=True,\n fullPathName: bool=True, height: Union[int, bool]=0, highlightColor:\n Union[List[float, float, float], bool]=None, isObscured: bool=True, label:\n Union[AnyStr, bool]=\"\", manage: bool=True, noBackground: bool=True,\n numberOfPopupMenus: bool=True, offCommand: Script=None, onCommand: Script=None,\n parent: Union[AnyStr, bool]=\"\", popupMenuArray: bool=True, preventOverride:\n bool=True, recomputeSize: bool=True, select: bool=True, statusBarMessage:\n AnyStr=\"\", useTemplate: AnyStr=\"\", visible: bool=True, visibleChangeCommand:\n Union[Script, bool]=None, width: Union[int, bool]=0, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[AnyStr, Any]:\n pass",
"def _validate_options(self, rules, operator_name):\n values = []\n option_values = []\n for argument in rules[operator_name]:\n if isinstance(argument, dict) and argument.get(\"source\") == \"answers\":\n option_values = (\n self.questionnaire_schema.answer_id_to_option_values_map.get(\n argument[\"identifier\"]\n )\n )\n else:\n values = argument if isinstance(argument, list) else [argument]\n\n if values and option_values:\n for value in values:\n # Null values are allowed and will not exist in answer options\n if value and value not in option_values:\n self.add_error(\n self.VALUE_DOESNT_EXIST_IN_ANSWER_OPTIONS,\n value=value,\n answer_options=list(option_values),\n )",
"def validate_options(subscription_key, text):\n if not subscription_key or len(subscription_key) == 0:\n print 'Error: Warning the option subscription_key should contain a string.'\n print USAGE\n sys.exit(3)\n if not text or len(text) == 0:\n print 'Error: Warning the option text should contain a string.'\n print USAGE\n sys.exit(3)",
"def accept(self):\n \n self.key = self.keyEdit.text()\n if self.key == '':\n print(\"Must set parameter name!\")\n QDialog.reject(self)\n return\n\n try:\n self.weight = float(self.weightEdit.text())\n except ValueError:\n print(\"Must set weight!\")\n QDialog.reject(self)\n return\n\n if self.rangeRadioButton.isChecked():\n # Return min and max as float values\n self.type = 'range'\n self.val1, self.val2 = self.minEdit.text(), self.maxEdit.text()\n\n # If either is blank, use + or - inf\n self.val1 = -np.inf if self.val1 == '' else float(self.val1)\n self.val2 = np.inf if self.val2 == '' else float(self.val2)\n if self.val1 >= self.val2:\n print(\"Max must be greater than min!\")\n QDialog.reject(self)\n return\n if np.isinf(self.val1) and np.isinf(self.val2):\n print(\"Must set either min or max!\")\n QDialog.reject(self)\n return\n \n elif self.valueRadioButton.isChecked():\n # Return the value as-is\n self.type = 'value'\n self.val1 = self.valueEdit.text()\n if self.val1 == '':\n print(\"Must set comparison value!\")\n QDialog.reject(self)\n return\n\n elif self.boolRadioButton.isChecked():\n # Return True or False as a bool value\n self.type = 'bool'\n self.val1 = self.boolComboBox.currentText().upper() == 'TRUE'\n\n QDialog.accept(self)",
"def _check_kwargs(self):\n valid_kw = {\n 'hf_type': 'str',\n 'hierarchy': 'bool',\n 'smooth': 'bool',\n 'water_level': 'float',\n # Object modifier kw\n 'no_shadow': 'bool',\n 'no_image': 'bool',\n 'no_reflection': 'bool',\n 'inverse': 'bool',\n 'double_illuminate': 'bool',\n 'hollow': 'bool'\n }\n\n self._validate_kwargs(valid_kw)\n\n valid_types = [\n 'gif', 'tga', 'pot', 'png', 'pgm',\n 'ppm', 'jpeg', 'tiff', 'sys', 'function'\n ]\n self._checkKwargValue('hf_type', valid_types)",
"def check(input, options):\r\n expected = [(o, o) for o in options]\r\n self.assertEqual(f(input), expected)",
"def add_options(self, options):\n if options and self.check_options(options):\n self._options = options\n self._alternatives = widgets.RadioButtons(options=options,\n description='',\n disabled=False,\n layout=widgets.Layout(width='100%'))\n\n self.options_status = 'OK'\n else:\n self._options = None\n self._alternatives = None\n self.options_status = 'X'",
"def choices(self, key, *args):\n return And(lambda n: n in args, error='%s should be in [%s]!' % (key, str(args)))",
"def _check_args(self, args):\n if len(args) == 0:\n print(\"No parameters provided.\")\n return False\n else:\n return True",
"def check_arguments(self):\n # only four test operation is permitted, if given anything apart from this,\n # then it should print error message.\n if not (\n (self.args.file is None)\n and ((self.args.testfiles is None or self.args.hostname is None))\n ):\n action = None\n if self.set_action_cmd(action) is not None:\n # the operation is checked in above function\n return None\n\n self.logger.error(\n colorama.Fore.RED\n + \"Arguments not given correctly, Please refer help message\",\n extra=self.log_detail,\n )\n self.parser.print_help()\n sys.exit(1)"
] |
[
"0.63287705",
"0.59502095",
"0.5938727",
"0.5835922",
"0.5808001",
"0.57984513",
"0.5779106",
"0.56867117",
"0.56511545",
"0.5647488",
"0.5631126",
"0.551938",
"0.5463903",
"0.54584664",
"0.5452936",
"0.54190683",
"0.5414275",
"0.54088724",
"0.539896",
"0.539767",
"0.5382266",
"0.53762203",
"0.5367883",
"0.53390414",
"0.5329774",
"0.5323693",
"0.53176504",
"0.53119755",
"0.53093594",
"0.5309168"
] |
0.6168521
|
1
|
Test draw two (nondeterministic(.
|
def test_draw_two(self):
f = txn_oracle.draw_two
for _ in range(1000):
max_n = random.randint(4, 20)
i, j = f(max_n)
assert i != j
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_simulate_draw():\n # TODO: Use a proper testing framework\n TESTS = [\n ([], []),\n (['A'], []),\n (['A', 'B', 'C', 'D'], [('A', 'C'), ('D', 'B'), ('A', 'B'), ('C', 'D'), ('A', 'D'), ('B', 'C')]),\n (['A', 'B', 'C', 'D', 'E'], [('A', 'E'), ('B', 'C'), ('A', 'D'), ('E', 'C'), ('A', 'C'), ('D', 'B'), ('A', 'B'), ('D', 'E'), ('B', 'E'), ('C', 'D')]),\n ]\n for teams, expected_out in TESTS:\n # print(teams)\n ret = simulate_draw(teams)\n assert ret == expected_out\n\n #test_simulate_draw()\n #displays_simulated_draws(['1', '2', '3', '4', '5'])",
"def testDraw():\n deleteMatches()\n deletePlayers()\n registerPlayer(\"Pikachu\")\n registerPlayer(\"Charmander\")\n registerPlayer(\"Bulbasaur\")\n registerPlayer(\"Squirtle\")\n standings = playerStandings()\n [id1, id2, id3, id4] = [row[0] for row in standings]\n reportMatch(id1, id2, True)\n reportMatch(id3, id4, True)\n standings = playerStandings()\n if not (standings[0][2]==standings[1][2]==standings[2][2]==standings[3][2]):\n raise ValueError(\n \"Players should have the same number of points after drawing\"\n )\n\n print \"3. Draw is recorded properly.\"",
"async def simulate_draw(teams):\n if len(teams) % 2 == 0:\n return await simulate_even_draw(teams)\n else:\n return await simulate_odd_draw(teams)",
"def testDraw(self):\n self.initDeck([1,2,3,4,5,6,7,8,9,10])\n\n card = self.testDeck.draw()\n assert len(self.testDeck.deck) == 9, \"Test Deck did not reduce\"\n assert len(self.testDeck.discardPile) == 1, \"card was not added to the discard pile\" \n assert self.testDeck.discardPile[0] == card, \"Card in the discardPile is not the same card that was drawned\"\n card = self.testDeck.draw()\n assert len(self.testDeck.deck) == 8, \"Test Deck did not reduce\"\n assert len(self.testDeck.discardPile) == 2, \"card was not added to the discard pile\"",
"def test_failed_draw_case():\n hands = draw_hands(4)[0]\n start_tile = pick_start_tile(hands)\n\n\n # testing that I'm handling weird start players right\n fail_draws = 0\n good_draws = 0\n for i in range(10000):\n hands = draw_hands(4)[0]\n start_tile = pick_start_tile(hands)[0]\n if start_tile is None:\n fail_draws += 1\n else:\n good_draws += 1\n\n return fail_draws, good_draws",
"def test_single_game_works(self):\n sim = ss.Simulation(seed=154)\n game1 = sim.single_game()\n sim = ss.Simulation(seed=79)\n game2 = sim.single_game()\n assert game1 != game2, 'Your method single_game is not working.'",
"def test_draw(self):\r\n deck_size = 3\r\n d = Deck(deck_size)\r\n for _ in range(deck_size):\r\n d.draw()\r\n self.assertRaises(ValueError, d.draw)\r\n d.shuffle_in([1])\r\n d.draw()\r\n self.assertRaises(ValueError, d.draw)",
"def test_rectangle_yank(self):\n before_b = \"\"\"\\\n before\n aaaxxxbbb\n aaaxxxbbb\n aaaxxxbbb\n aaaxxxbbb\n after\n \"\"\"\n after_b = \"\"\"\\\n before\n aaaY1Ybbb\n aaaY2Ybbb\n aaaY3Ybbb\n aaaY4Ybbb\n after\n \"\"\"\n # A hack. The command tests for g.app.unitTesting.\n g.app.unitTesting = True\n try:\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.3\", \"5.6\"),\n after_sel=(\"2.3\", \"5.6\"),\n command_name=\"rectangle-yank\",\n )\n finally:\n g.app.unitTesting = False",
"def test_equal6():\n x = randtool(\"float\", -10, 10, [3, 3, 3, 1])\n y = randtool(\"float\", -10, 10, [3, 3, 1])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)",
"def test_no_ops_draws(self):\r\n dev = qml.device(\"default.qubit\", wires=3)\r\n\r\n @qml.beta.qnode(dev)\r\n def qnode():\r\n return qml.expval(qml.PauliX(wires=[0]) @ qml.PauliX(wires=[1]) @ qml.PauliX(wires=[2]))\r\n\r\n res = qml.draw(qnode)()\r\n expected = [\r\n \" 0: ──╭┤ ⟨X ⊗ X ⊗ X⟩ \\n\",\r\n \" 1: ──├┤ ⟨X ⊗ X ⊗ X⟩ \\n\",\r\n \" 2: ──╰┤ ⟨X ⊗ X ⊗ X⟩ \\n\",\r\n ]\r\n\r\n assert res == \"\".join(expected)",
"def displays_simulated_draws(teams):\n for gm in simulate_draw(teams):\n a, b = random.sample(gm, len(gm))\n print(a + ' plays ' + b)",
"async def simulate_even_draw(teams):\n half_len = int(len(teams)/2)\n arr1 = [i for i in range(half_len)]\n arr2 = [i for i in range(half_len, len(teams))][::-1]\n matches = []\n for i in range(len(teams)-1):\n arr1.insert(1, arr2.pop(0))\n arr2.append(arr1.pop())\n for a, b in zip(arr1, arr2):\n matches.append((teams[a], teams[b]))\n return matches",
"def test_single_quadrant(self):",
"def test_random_state_transfer(self):\r\n class Graph:\r\n def __init__(self, seed=123):\r\n self.rng = RandomStreams(seed)\r\n self.y = self.rng.uniform(size=(1,))\r\n g1 = Graph(seed=123)\r\n f1 = function([], g1.y)\r\n g2 = Graph(seed=987)\r\n f2 = function([], g2.y)\r\n\r\n for (su1, su2) in zip(g1.rng.state_updates, g2.rng.state_updates):\r\n su2[0].set_value(su1[0].get_value())\r\n\r\n numpy.testing.assert_array_almost_equal(f1(), f2(), decimal=6)",
"def comparison_test_2():\n for pose in SE2.interesting_points():\n se2a = se2_from_SE2(pose)\n se2b = se2_from_SE2_slow(pose)\n # printm('pose', pose, 'se2a', se2a, 'se2b', se2b)\n assert_allclose(se2a, se2b, atol=1e-8)",
"def testDrawEdge(self):\n (w,h) = self.im8_1.getSize()\n \n for thick in range(10):\n self.im8_1.reset()\n drawEdge(self.im8_1, thick)\n self.im8_3.fill(255)\n drawSquare(self.im8_3, (thick, thick, w-1-thick, h-1-thick), 0)\n (x,y) = compare(self.im8_1, self.im8_3, self.im8_2)\n self.assertTrue(x<0)",
"def expected(x, y):",
"def expected(x, y):",
"def expected(x, y):",
"def test_two_game(self):\n self.choice.side_effect = [\"ant\", \"baboon\"]\n self.input.side_effect = list(\"ant\" \"y\" \"babon\" \"n\")\n\n gallows.main()\n\n self.xprint.assert_any_call('Yes! The secret word is \"ant\"! '\n 'You have won!')\n self.xprint.assert_any_call('Yes! The secret word is \"baboon\"! '\n 'You have won!')",
"async def simulate_odd_draw(teams):\n half_len = int((len(teams)+1)/2)\n arr1 = [i for i in range(half_len)]\n arr2 = [i for i in range(half_len, len(teams)+1)][::-1]\n matches = []\n for i in range(len(teams)):\n arr1.insert(1, arr2.pop(0))\n arr2.append(arr1.pop())\n for a, b in zip(arr1, arr2):\n if len(teams) not in (a, b):\n matches.append((teams[a], teams[b]))\n return matches",
"def comparison_test():\n for pose in SE2.interesting_points():\n se2 = se2_from_SE2(pose)\n SE2a = SE2_from_se2_slow(se2)\n SE2b = SE2_from_se2(se2)\n # printm('pose', pose, 'se2', se2)\n # printm('SE2a', SE2a, 'SE2b', SE2b)\n SE2.assert_close(SE2a, pose)\n # print('SE2a = pose Their distance is %f' % d)\n SE2.assert_close(SE2b, pose)\n # print('SE2b = pose Their distance is %f' % d)\n assert_allclose(SE2a, SE2b, atol=1e-8, err_msg=\"SE2a != SE2b\")\n assert_allclose(SE2a, pose, atol=1e-8, err_msg=\"SE2a != pose\")\n assert_allclose(SE2b, pose, atol=1e-8, err_msg=\"SE2b != pose\")",
"def test_equal7():\n x = randtool(\"float\", -10, 10, [3, 3, 1])\n y = randtool(\"float\", -10, 10, [3, 3, 3, 1])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)",
"def test_equal(self):\r\n\r\n a_players = [ZeroPlayer(1), ZeroPlayer(2)]\r\n a_x_dist = 3\r\n a_y_dist = 3\r\n a_num_to_win = 1\r\n a_game = Game(a_players, a_x_dist, a_y_dist, a_num_to_win)\r\n\r\n b_players = [ZeroPlayer(1), ZeroPlayer(2)]\r\n b_x_dist = 3\r\n b_y_dist = 3\r\n b_num_to_win = 1\r\n b_game = Game(b_players, b_x_dist, b_y_dist, b_num_to_win)\r\n\r\n c_players = [ZeroPlayer(1), ZeroPlayer(2)]\r\n c_x_dist = 3\r\n c_y_dist = 3\r\n c_num_to_win = 1\r\n c_game = Game(c_players, c_x_dist, c_y_dist, c_num_to_win)\r\n\r\n self.assertTrue(b_game == a_game == c_game)\r\n\r\n a_game.play_game()\r\n b_game.play_game()\r\n\r\n self.assertTrue(a_game == b_game)\r\n self.assertFalse(c_game == a_game)\r\n\r\n c_game.play_game()\r\n\r\n self.assertTrue(b_game == a_game == c_game)",
"def test_equal12():\n x = np.array([[True, False, True], [True, False, True], [True, False, True]])\n y = np.array([[True, False, True], [False, False, False], [True, True, False]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)",
"def test_GenerateDigraphOrder(self):\n\n src = EmptyImage()\n tgt = EmptyImage()\n block_image_diff = BlockImageDiff(tgt, src)\n\n transfers = block_image_diff.transfers\n t0 = Transfer(\n \"t1\", \"t1\", RangeSet(\"10-15\"), RangeSet(\"0-5\"), \"move\", transfers)\n t1 = Transfer(\n \"t2\", \"t2\", RangeSet(\"20-25\"), RangeSet(\"0-7\"), \"move\", transfers)\n t2 = Transfer(\n \"t3\", \"t3\", RangeSet(\"30-35\"), RangeSet(\"0-4\"), \"move\", transfers)\n t3 = Transfer(\n \"t4\", \"t4\", RangeSet(\"0-10\"), RangeSet(\"40-50\"), \"move\", transfers)\n\n block_image_diff.GenerateDigraph()\n t3_goes_after_copy = t3.goes_after.copy()\n\n # Elements in the set must be in the transfer evaluation order.\n elements = list(t3_goes_after_copy)\n self.assertEqual(t0, elements[0])\n self.assertEqual(t1, elements[1])\n self.assertEqual(t2, elements[2])\n\n # Now switch the order of t0, t1 and t2.\n transfers[0], transfers[1], transfers[2] = (\n transfers[2], transfers[0], transfers[1])\n t3.goes_after.clear()\n t3.goes_before.clear()\n block_image_diff.GenerateDigraph()\n\n # The goes_after must be different from last run.\n self.assertNotEqual(t3_goes_after_copy, t3.goes_after)\n\n # Assert that each element must agree with the transfer order.\n elements = list(t3.goes_after)\n self.assertEqual(t2, elements[0])\n self.assertEqual(t0, elements[1])\n self.assertEqual(t1, elements[2])",
"def test_draw(self):\n pen = m.Pen('yo')\n\n self.assertEqual(pen.draw(), 'Drawing with a pen')",
"def testPreferFewerSwaps(self):\n data = (((1, 'C'), (5, 'B')),\n ((2, 'A'), (6, 'A')),\n ((3, 'C'), (7, 'C')),\n ((4, 'B'), (8, 'D')))\n result = [(1, 5), (2, 7), (3, 6), (4, 8)]\n self.assertEqual(result, self.draw(data))\n return self.draw(data)",
"def test_correct_output_provided(self):\n # Create a subTest for current circuit drawer\n for draw_method in self.draw_methods:\n with self.subTest('Test calling of the {} draw method'.format(draw_method),\n draw_method=draw_method):\n\n # Patch function corresponding to the current circuit drawer such that\n # it does nothing\n with patch.object(_cv, self.draw_methods[draw_method], return_value=None)\\\n as mock_draw_method:\n\n # Check that corresponding function was called once with the correct arguments\n circuit_drawer(None, output=draw_method)\n mock_draw_method.assert_called_once_with(None, **self.calls[draw_method])",
"def test_single_game_seed_works(self):\n sim = ss.Simulation(seed=23)\n game1 = sim.single_game()\n sim = ss.Simulation(seed=23)\n game2 = sim.single_game()\n assert game1 == game2, 'Your seed in Simulation class is not working.'"
] |
[
"0.701766",
"0.6821074",
"0.64297235",
"0.637359",
"0.6368454",
"0.62849975",
"0.61145186",
"0.60920817",
"0.608275",
"0.6082548",
"0.60510737",
"0.60502326",
"0.5988041",
"0.5981341",
"0.5977884",
"0.5976905",
"0.5951073",
"0.5951073",
"0.5951073",
"0.5949259",
"0.592096",
"0.58393764",
"0.58208716",
"0.5814637",
"0.5811475",
"0.57905364",
"0.57870126",
"0.578273",
"0.5771091",
"0.5744093"
] |
0.76903397
|
0
|
Checks for differences between ActivityType instance and upstream version
|
def _diff(self) -> ModelDiff:
try:
description = self.connection.describe_activity_type(self.domain.name, self.name, self.version)
except SWFResponseError as err:
if err.error_code == "UnknownResourceFault":
raise DoesNotExistError("Remote ActivityType does not exist")
raise ResponseError(err.body["message"])
info = description["typeInfo"]
config = description["configuration"]
return ModelDiff(
("name", self.name, info["activityType"]["name"]),
("version", self.version, info["activityType"]["version"]),
("status", self.status, info["status"]),
("description", self.description, info["description"]),
("creation_date", self.creation_date, info["creationDate"]),
("deprecation_date", self.deprecation_date, info["deprecationDate"]),
("task_list", self.task_list, config["defaultTaskList"]["name"]),
(
"task_heartbeat_timeout",
self.task_heartbeat_timeout,
config["defaultTaskHeartbeatTimeout"],
),
(
"task_schedule_to_close_timeout",
self.task_schedule_to_close_timeout,
config["defaultTaskScheduleToCloseTimeout"],
),
(
"task_schedule_to_start_timeout",
self.task_schedule_to_start_timeout,
config["defaultTaskScheduleToStartTimeout"],
),
(
"task_start_to_close_timeout",
self.task_start_to_close_timeout,
config["defaultTaskStartToCloseTimeout"],
),
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test__ActivityMetadataBase__eq():\n activity_metadata = ActivityMetadataBase()\n \n vampytest.assert_eq(activity_metadata, activity_metadata)\n vampytest.assert_ne(activity_metadata, object())",
"def type(self, type):\n allowed_values = [\"android\", \"ios\"]\n if type.lower() not in map(str.lower, allowed_values):\n # print(\"Invalid value for type -> \" + type)\n self._type = \"outdated_sdk_version\"\n else:\n self._type = type",
"def _validate_type(self):\n # TODO: add transformation logic so that we don't have to transform outputs at every place they are used, including v1 back compat support\n if not spec_type_is_parameter(self.type):\n type_utils.validate_bundled_artifact_type(self.type)",
"def __validate_upgrade_type(self):\n if self.upgrade_type not in self.upgrade_function.keys():\n self.fail(\"Unsupported upgrade_type: %s\" % self.upgrade_type)",
"def checkDiffTypes(self):\n count = 0\n for t in self.types:\n if t > 0:\n count = count + 1\n return count",
"def _validate_type(self) -> None:\n # TODO: add transformation logic so that we don't have to transform inputs at every place they are used, including v1 back compat support\n if not spec_type_is_parameter(self.type):\n type_utils.validate_bundled_artifact_type(self.type)",
"def test_superType(self):\n self.assertTrue(ChangeType().superType is not None)",
"def check_unstaged_changes(self):\n pass",
"def validate_types(self):\n for req in self.requests:\n required_types = req.get_required_types()\n available_types = self.substrate.get_types()\n if not (required_types <= available_types):\n print required_types - available_types, ' missing'\n return False\n return True",
"def test_do_check_event_type(self):\n self.assertEqual(self.a.get_type(), None)\n self.assertEqual(self.b.get_type(), None)\n self.assertTrue(self.a.do_check_event_type(self.a))\n\n self.a = +self.a\n self.assertFalse(self.a.do_check_event_type(self.b))",
"def exists(self) -> bool:\n self.connection.describe_activity_type(self.domain.name, self.name, self.version)\n return True",
"def ongeza(self, type_):\n switch = {\n 'm': semver.bump_major,\n 'n': semver.bump_minor,\n 'p': semver.bump_patch,\n 'major': semver.bump_major,\n 'minor': semver.bump_minor,\n 'patch': semver.bump_patch}\n\n new_version = switch.get(type_)(self.version)\n\n if new_version in set(self.versions):\n self.logger.error('version `%s` already present', new_version)\n new_version = None\n\n return new_version",
"def CheckType(self, *args, **kwargs):\n pass",
"def test_update_activity(self):\n pass",
"def _check_type_compatibility(self, type_name1, type_name2,\n operation):\n if type_name1 != type_name2:\n raise TypeCompatibilityError(type_name1, type_name2, operation)",
"def test_updated_type(self):\n\n base_model = BaseModel()\n self.assertTrue(base_model.updated_at, datetime.datetime)",
"def verify_activity(activity_text):\n activity = ActivityParser13.parse_string_in_scope(\n activity_text, verify.Activity().scope, 'activity')\n assert activity\n verifier = verify.Verifier()\n verifier.verify_activity_instance(activity, 'test')",
"def _check_type(self, new_value):\n raise NotImplementedError",
"def is_valid_version(self):\n pass",
"def is_legacy(self):\n return not self.xmlnode.hasProp(\"type\")",
"def check_stability(self):",
"def test_update_activity_occurrence_status(self):\n pass",
"def test_issubclass(self):\n self.assertTrue(issubclass(self.rev.__class__, BaseModel), True)",
"def test_equality(self):\n tools.eq_(self.old_manifest, load_manifest(StringIO(old_manifest)))",
"def test_ticket_type_change_error_bad_type(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('ticket_type change bad_type changed_type')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def alter(self, instance, activity, **kwargs):\n return activity",
"def check_type(self):\n return True",
"def test_difference_id(self):\n self.assertFalse(\n self.factory.create_type('iphone') is self.factory.create_type(\n 'iphone'))",
"def test_change_asset_type_assignment_rule(self):\n pass",
"def test_a(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n v2 = versions.Version(version='1.2.3', name='bar')\n\n self.assertFalse(v1 < v2)\n self.assertFalse(v2 < v1)"
] |
[
"0.5724631",
"0.55525714",
"0.5368454",
"0.5284736",
"0.5281542",
"0.5276441",
"0.52265596",
"0.5200576",
"0.5189388",
"0.5187152",
"0.5167561",
"0.5157312",
"0.51186454",
"0.5118532",
"0.5112942",
"0.5107356",
"0.5081232",
"0.50169253",
"0.4989162",
"0.49855864",
"0.49466124",
"0.49344927",
"0.4913271",
"0.49122784",
"0.49121815",
"0.49103966",
"0.49086797",
"0.49012864",
"0.48984054",
"0.48919526"
] |
0.58819556
|
0
|
Checks if the ActivityType exists amazonside
|
def exists(self) -> bool:
self.connection.describe_activity_type(self.domain.name, self.name, self.version)
return True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def exists_intent_action(self, intent_keyword):\n pass",
"def is_start(self, activity) -> bool:\n return activity == self.activity_concept_name(TRACE_START)",
"def is_asset_based_activity(self):\n return bool(self._my_map['assetIds'])",
"def check_action_type_exists(\n action_type_id: int\n) -> None:\n if not db.session.query(db.exists().where(models.ActionType.id == action_type_id)).scalar():\n raise errors.ActionTypeDoesNotExistError()",
"def get_type(self):\n types = dict(ACTIVITY_TYPES)\n return types.get(self.activity_type, \"N/A\")",
"def check_activity_existence(self, searched_activity_id):\n self.__load_activities_from_file_into_memory()\n return super().check_activity_existence(searched_activity_id)",
"def get_activityType(activity_task):\n try:\n return activity_task[\"activityType\"][\"name\"]\n except KeyError:\n # No activityType found\n return None",
"def is_assessment_based_activity(self):\n return 'assessmentIds' in self._my_map and bool(self._my_map['assessmentIds'])",
"def _check_asset_apk(asset):\n return all(\n {\n asset['name'].endswith('.apk'),\n asset['content_type'] == 'application/vnd.android.package-archive',\n asset['state'] == 'uploaded',\n }\n )",
"def has_type(self, item_type):\n raise NotImplementedError()",
"def is_extension_activity(self, activity) -> bool:\n return self.is_start(activity) or self.is_end(activity)",
"def _is_valid_entity_type(self, entity_type):\n return entity_type in [\"artist\", \"song\", \"genre\"]",
"def is_activity_only(self):\n return self._tag == 'activity_only'",
"def _get_activity_type(self, video):\n\n activity_type = \"https://w3id.org/xapi/video/activity-type/video\"\n\n # When the video is a live we change the activity to webinar\n if video.is_live:\n activity_type = \"http://id.tincanapi.com/activitytype/webinar\"\n\n return activity_type",
"def convert_strava_activity_type(self, x):\n\n if x == \"Run\":\n return ActivityType.RUN\n elif x == \"Ride\":\n return ActivityType.BIKE\n else:\n return ActivityType.OTHER",
"def is_type(self, ent_type):\n # type: (str) -> bool\n # its always an entity ...\n if ent_type.lower() in ('entity', self.settings['_type'].lower()):\n return True\n else:\n return False",
"def activity_type(self, type_id):\r\n return activities.ActivityType(self, type_id)",
"def is_course_based_activity(self):\n return False",
"def _match_entry_type_tuple(code_entry, type_tuple):\n entry_type = code_entry['type']\n return entry_type in type_tuple",
"def validate_type(type):\n\n types_upper = [i.upper() for i in officeTypes]\n if type.upper() in types_upper:\n return True\n return False",
"def is_my_case(self, type_):\n return (\n isinstance(self.__apply_sequence(type_), self.declaration_class)\n )",
"def activity_inclusion(trace:Trace, activity1:str, activity2:str, parameters=None):\n\n if parameters is None:\n parameters = {}\n\n activity_key = parameters[PARAMETER_CONSTANT_ACTIVITY_KEY] if PARAMETER_CONSTANT_ACTIVITY_KEY in parameters else DEFAULT_NAME_KEY\n\n activity1_found = False\n activity2_found = False\n\n for event in trace:\n if(not activity1_found and event[activity_key] == activity1):\n activity1_found = True\n elif(not activity2_found and event[activity_key] == activity2):\n activity2_found = True\n\n if(activity1_found and activity2_found):\n return True\n\n return False",
"def is_applicable(self, context: Any) -> bool:\n pass",
"def filter_intent(self, intent: Intent):\n return True",
"async def activity(self, ctx:utils.Context, activity_type:str, *, name:str=None):\n\n if name:\n activity = discord.Activity(name=name, type=getattr(discord.ActivityType, activity_type.lower()))\n else:\n await self.bot.set_default_presence()\n return\n await self.bot.change_presence(activity=activity, status=self.bot.guilds[0].me.status)",
"def activitySearch (listAct,activity):\n \n for act in listAct:\n if (act.name == activity.name): \n return True",
"def _is_valid_account_type(account_type):\n return account_type in ACCOUNT_TYPE_FORMS.keys()",
"def assets(self, asset_type=None):\n assets = self.V().is_asset()\n if asset_type is None:\n return assets\n return assets.has('type', asset_type)",
"def check_status(item):\n if item[2] == 'annotation':\n return True\n else:\n return False",
"def __is_type_instance( self, instance_type ):\n for index, instance in enumerate(INSTANCE_TYPES):\n if instance == instance_type:\n return True\n return False"
] |
[
"0.62097484",
"0.56517756",
"0.55855805",
"0.55750483",
"0.55431366",
"0.5327626",
"0.5315846",
"0.5300829",
"0.5234638",
"0.52015376",
"0.51873296",
"0.5181711",
"0.5171451",
"0.5164309",
"0.51637644",
"0.5154588",
"0.5144253",
"0.51424676",
"0.51396215",
"0.512979",
"0.5112852",
"0.51005954",
"0.5072072",
"0.5070741",
"0.50454116",
"0.49795076",
"0.4954513",
"0.49493852",
"0.49492815",
"0.49207792"
] |
0.6902263
|
0
|
Tests updating aesthetic fields on Tenant
|
def test_tenant_update(sample_identity):
access_token, tenant, tenant_user, tc = sample_identity
tenant.name = "ilovebeansllc"
headers = {"Authorization": "Bearer " + access_token}
updated_tenant_request = id_schemas.TenantSchema().dump(tenant)
updated_tenant = tc.put(
f"api/v1/identity/tenant/{tenant.id}",
json=updated_tenant_request,
headers=headers,
)
assert updated_tenant.status_code == 200, "Tenant could not be updated"
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_tenant_user_aesthetic_update(sample_identity):\n access_token, tenant, tenant_user, tc = sample_identity\n headers = {\"Authorization\": \"Bearer \" + access_token}\n new_email = f\"{uuid.uuid4()}@c1.com\"\n new_first_name = str(uuid.uuid4())\n new_last_name = str(uuid.uuid4())\n updated_tenant_user = {\"first_name\": new_first_name, \"last_name\": new_last_name}\n update_request = tc.put(\n f\"api/v1/identity/tenant-user/{tenant_user.id}\",\n json=updated_tenant_user,\n headers=headers,\n )\n assert update_request.status_code == 200, \"Update Failed with non 200 error code\"\n assert update_request.json[\"data\"][\"first_name\"] == new_first_name\n assert update_request.json[\"data\"][\"last_name\"] == new_last_name",
"def test_tenant_user_change_tenant(sample_identity):\n access_token, tenant, tenant_user, tc = sample_identity\n # Create a new Tenant\n new_tenant = identity.Tenant()\n new_tenant.name = \"Aperture Science\"\n db.session.add(new_tenant)\n db.session.commit()\n # Create a Tenant Specific admin role\n new_special_role = authorization.Role()\n # Assign ability to create a user on specific tenant to the new role but\n # not the ability to create user on ANY tenant\n can_create_tenant_user = authorization.Permission.query.filter_by(\n name=authorization.PermissionType.CAN_CREATE_TENANT_USER.value\n ).first()\n new_special_role.permissions.append(can_create_tenant_user)\n db.session.add(new_special_role)\n db.session.commit()\n # Create a new TenantUser assigned to new Tenant\n new_tenant_user = identity.TenantUser()\n new_tenant_user.username = \"gordonfreeman\"\n new_tenant_user.tenant_id = new_tenant.id\n new_tenant_user.password = \"1234\"\n new_tenant_user.roles.append(new_special_role)\n db.session.add(new_tenant_user)\n db.session.commit()\n # Login new user\n new_access_token = tc.post(\n \"api/v1/authentication/login\",\n json={\"username\": new_tenant_user.username, \"password\": \"1234\"},\n ).json[\"data\"][\"access_token\"]\n\n # Try to re-assign original tenant_user to new tenant\n headers = {\"Authorization\": \"Bearer \" + new_access_token}\n tenant_user_json = id_schemas.TenantUserSchema().dump(tenant_user)\n tenant_user_json[\"tenant_id\"] = new_tenant_user.tenant_id\n response = tc.put(\n f\"api/v1/identity/tenant-user/{tenant_user.id}\",\n json=tenant_user_json,\n headers=headers,\n )\n # Assert that permission is blocked\n assert response.status_code == 403, \"Tenant Permission assignment not blocking\"\n\n # Login with admin user\n new_access_token = tc.post(\n \"api/v1/authentication/login\",\n json={\"username\": tenant_user.username, \"password\": \"1234\"},\n ).json[\"data\"][\"access_token\"]\n # Attempt to Change tenant of new_tenant_user\n headers = {\"Authorization\": \"Bearer \" + new_access_token}\n tenant_user_json = id_schemas.TenantUserSchema().dump(new_tenant_user)\n tenant_user_json[\"tenant_id\"] = tenant.id\n response = tc.put(\n f\"api/v1/identity/tenant-user/{new_tenant_user.id}\",\n json=tenant_user_json,\n headers=headers,\n )\n assert response.status_code == 200, \"Tenant change permission blocking\"",
"def test_update(self):\n tz = pytz.timezone(settings.TIME_ZONE)\n self.assertFalse(self.user1.o365_licence)\n url = '/api/users/{}/'.format(self.user1.ad_guid)\n data = {\n 'Surname': 'Lebowski',\n 'title': 'Bean Counter',\n 'o365_licence': True,\n\n 'email' : '[email protected]' ,\n 'name' : 'Mike' ,\n 'username' : 'MikeLebowski' ,\n 'ad_guid' : '123',\n 'expiry_date' : '2019-03-12',\n 'given_name' : 'Mike',\n #'Enabled' :'True',\n 'active' : True,\n 'deleted' : False,\n\n\n\n }\n response = self.client.put(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 202)\n user = DepartmentUser.objects.get(pk=self.user1.pk) # Refresh from db\n self.assertEqual(user.surname, data['Surname'])\n self.assertEqual(user.title, data['title'])\n\n self.assertEqual(user.name , data['name'])\n self.assertEqual(user.email, data['email'])\n self.assertEqual(user.username, data['username'])\n\n #self.assertEqual(user.expiry_date, data['expiry_date'])\n\n self.assertEqual(user.ad_guid, data['ad_guid'])\n\n self.assertEqual(user.expiry_date, tz.localize(parse(data['expiry_date'])))\n\n self.assertEqual(user.given_name, data['given_name'])\n #self.assertEqual(user.active, data['Enabled'])\n self.assertEqual(user.active, data['active'])\n self.assertEqual(user.ad_deleted, data['deleted'])\n\n self.assertTrue(user.o365_licence)\n self.assertTrue(user.in_sync)",
"def test_client_tax_information_update(self):\n pass",
"def test_update_scenario(self):\n pass",
"def test_update_case(self):\n pass",
"def test_update_one(self):\n pass",
"def test_update(self):\n pass",
"def test_update(self):\n pass",
"def test_update(self):\n pass",
"def test_update_user(self):\n pass",
"def test_updating_dietitian_account(self):\n \n form_data = {\"fname\": \"Jill\", \"lname\": \"Jones\", \n \"email\": \"[email protected]\", \"street-address\": \"33 Blue St\", \n \"city\": \"San Francisco\", \"state\": \"CA\", \"zipcode\": \"43223\"}\n\n update_dietitian_account(1, form_data)\n\n dietitian = Dietitian.query.get(1)\n self.assertEqual(\"Jill\", dietitian.fname)",
"def test_update(self):\n user = self.custodian_1_user\n user_client = self.custodian_1_client\n urls = [reverse('api:user-detail', kwargs={'pk': user.pk})]\n new_first_name = \"New First Name\"\n data = {\n \"first_name\": new_first_name,\n }\n access = {\n \"forbidden\": [self.anonymous_client, self.readonly_client, self.custodian_2_client],\n \"allowed\": [self.admin_client, user_client]\n }\n\n for client in access['forbidden']:\n for url in urls:\n self.assertIn(\n client.patch(url, data, format='json').status_code,\n [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]\n )\n\n for client in access['allowed']:\n for url in urls:\n new_first_name += '1'\n data['first_name'] = new_first_name\n self.assertEqual(\n client.patch(url, data, format='json').status_code,\n status.HTTP_200_OK\n )\n user.refresh_from_db()\n self.assertEqual(user.first_name, new_first_name)",
"def test_update(self):\n doctor = DoctorFactory.create(id=21)\n data = {'name': 'Joe'}\n self.assertNotEqual(doctor.name, data['name'])\n\n response = self.unath_client.put(reverse('doctor-detail', args=[21]), data=data)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n response = self.client.put(reverse('doctor-detail', args=[21]), data=data)\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)",
"def test_user_update_request(self):\n pass",
"def test_post_update_admin(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.superuser)\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn(title, response.content)\n self.assertIn(body, response.content)\n self.assertIn(user_url, response.content)",
"def test_dashboards_v2_update(self):\n pass",
"def test_company_patch_permissions(self):\n companyPK = Company.objects.get(name=self.admin.profile.company.name).pk\n url = reverse('Company-detail', kwargs={'pk': companyPK + 1})\n data = {'name': 'NewTestCompany'}\n response = self.client.put(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(Company.objects.get(pk=companyPK).name,\n 'NewTestCompany')",
"def test_user_update_permissions(self):\n userPK = User.objects.get(username='c2e1').pk\n url = reverse('User-detail', kwargs={'pk': userPK})\n data = {'username': 'company1NewTest'}\n response = self.client.put(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest')\n data = {'username': 'company1NewTest2'}\n response = self.client.patch(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest2')",
"def test_client_bank_account_update(self):\n pass",
"def test_update_attribute_data(self):\n pass",
"def test_update_virtual_account_by_id(self):\n pass",
"def test_dietitian_edit_account(self):\n\n data = {\"fname\": \"Jill\", \"lname\": \"Jones\", \n \"email\": \"[email protected]\", \"street-address\": \"33 Blue St\", \n \"city\": \"San Francisco\", \"state\": \"CA\", \"zipcode\": \"43223\"}\n\n result = self.client.post(\"/dietitian/1/account/edit\", data=data,\n follow_redirects=True)\n\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"successfully updated\", result.data)",
"def test_update_record(self):\n pass",
"def test_update(self):\n self.client.force_authenticate(user=self.admin)\n\n data = {\n 'retreat': reverse(\n 'retreat:retreat-detail', args=[self.retreat.id]\n ),\n 'user': reverse('user-detail', args=[self.user2.id]),\n }\n\n response = self.client.put(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n data,\n format='json',\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )",
"def test_website_companies_update(self):\n pass",
"def test_company_put_permissions(self):\n companyPK = Company.objects.get(name=self.admin.profile.company.name).pk\n url = reverse('Company-detail', kwargs={'pk': companyPK + 1})\n data = {'name': 'NewTestCompany', 'address': {'address1': '123 fake st',\n 'address2': 'fake address 2',\n 'city': 'nowhere', 'state': 'IN', 'zip': '90210'}}\n response = self.client.put(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(Company.objects.get(pk=companyPK).name,\n 'NewTestCompany')",
"def test_update(self):\n # this is tested graphically, as it is UI\n pass",
"def test_client_tax_information_partial_update(self):\n pass",
"def test_beneficiaries_update_content_validation_that_will_pass(self):\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n post_body = {\n 'lastname': 'Dora',\n 'firstname': 'Jane1',\n 'nationality_country_iso_code': 'FRA',\n 'date_of_birth': '1970-07-01',\n 'country_of_birth_iso_code': 'FRA',\n 'gender': 'MALE',\n 'address': '42 Rue des fleurs',\n 'postal_code': '75000',\n 'city': 'Paris',\n 'country_iso_code': 'FRA',\n 'msisdn': '1123131413',\n 'email': '[email protected]',\n 'id_type': 'PASSPORT',\n 'id_country_iso_code': 'FRA',\n 'id_number': '1123131413',\n 'occupation': 'Teacher'\n }\n url = reverse('beneficiary:beneficiary-entity-by-id-update', kwargs={'pk': 1})\n response = self.client.put(url, data=post_body, content_type='application/json')\n\n # serialize all model object data\n beneficiaries = Beneficiary.objects.get(pk=1)\n serializer = BeneficiarySerializer(beneficiaries, many=False)\n self.assertEqual(response.json(), serializer.data)\n self.assertEqual(response.status_code, 200)"
] |
[
"0.74706066",
"0.70539707",
"0.70173454",
"0.6784828",
"0.66443217",
"0.6637554",
"0.6588656",
"0.6583764",
"0.6583764",
"0.6583764",
"0.6574935",
"0.6535326",
"0.6534278",
"0.65183234",
"0.6483002",
"0.64679134",
"0.6450146",
"0.6431829",
"0.6415476",
"0.64035213",
"0.6397389",
"0.6381455",
"0.63752556",
"0.63671255",
"0.6361323",
"0.63528377",
"0.6349959",
"0.6348803",
"0.63358426",
"0.6332224"
] |
0.7749024
|
0
|
Tests aesthetic updates for tenant_user
|
def test_tenant_user_aesthetic_update(sample_identity):
access_token, tenant, tenant_user, tc = sample_identity
headers = {"Authorization": "Bearer " + access_token}
new_email = f"{uuid.uuid4()}@c1.com"
new_first_name = str(uuid.uuid4())
new_last_name = str(uuid.uuid4())
updated_tenant_user = {"first_name": new_first_name, "last_name": new_last_name}
update_request = tc.put(
f"api/v1/identity/tenant-user/{tenant_user.id}",
json=updated_tenant_user,
headers=headers,
)
assert update_request.status_code == 200, "Update Failed with non 200 error code"
assert update_request.json["data"]["first_name"] == new_first_name
assert update_request.json["data"]["last_name"] == new_last_name
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_update_user(self):\n pass",
"def test_tenant_update(sample_identity):\n access_token, tenant, tenant_user, tc = sample_identity\n tenant.name = \"ilovebeansllc\"\n headers = {\"Authorization\": \"Bearer \" + access_token}\n updated_tenant_request = id_schemas.TenantSchema().dump(tenant)\n updated_tenant = tc.put(\n f\"api/v1/identity/tenant/{tenant.id}\",\n json=updated_tenant_request,\n headers=headers,\n )\n assert updated_tenant.status_code == 200, \"Tenant could not be updated\"",
"def test_tenant_user_change_tenant(sample_identity):\n access_token, tenant, tenant_user, tc = sample_identity\n # Create a new Tenant\n new_tenant = identity.Tenant()\n new_tenant.name = \"Aperture Science\"\n db.session.add(new_tenant)\n db.session.commit()\n # Create a Tenant Specific admin role\n new_special_role = authorization.Role()\n # Assign ability to create a user on specific tenant to the new role but\n # not the ability to create user on ANY tenant\n can_create_tenant_user = authorization.Permission.query.filter_by(\n name=authorization.PermissionType.CAN_CREATE_TENANT_USER.value\n ).first()\n new_special_role.permissions.append(can_create_tenant_user)\n db.session.add(new_special_role)\n db.session.commit()\n # Create a new TenantUser assigned to new Tenant\n new_tenant_user = identity.TenantUser()\n new_tenant_user.username = \"gordonfreeman\"\n new_tenant_user.tenant_id = new_tenant.id\n new_tenant_user.password = \"1234\"\n new_tenant_user.roles.append(new_special_role)\n db.session.add(new_tenant_user)\n db.session.commit()\n # Login new user\n new_access_token = tc.post(\n \"api/v1/authentication/login\",\n json={\"username\": new_tenant_user.username, \"password\": \"1234\"},\n ).json[\"data\"][\"access_token\"]\n\n # Try to re-assign original tenant_user to new tenant\n headers = {\"Authorization\": \"Bearer \" + new_access_token}\n tenant_user_json = id_schemas.TenantUserSchema().dump(tenant_user)\n tenant_user_json[\"tenant_id\"] = new_tenant_user.tenant_id\n response = tc.put(\n f\"api/v1/identity/tenant-user/{tenant_user.id}\",\n json=tenant_user_json,\n headers=headers,\n )\n # Assert that permission is blocked\n assert response.status_code == 403, \"Tenant Permission assignment not blocking\"\n\n # Login with admin user\n new_access_token = tc.post(\n \"api/v1/authentication/login\",\n json={\"username\": tenant_user.username, \"password\": \"1234\"},\n ).json[\"data\"][\"access_token\"]\n # Attempt to Change tenant of new_tenant_user\n headers = {\"Authorization\": \"Bearer \" + new_access_token}\n tenant_user_json = id_schemas.TenantUserSchema().dump(new_tenant_user)\n tenant_user_json[\"tenant_id\"] = tenant.id\n response = tc.put(\n f\"api/v1/identity/tenant-user/{new_tenant_user.id}\",\n json=tenant_user_json,\n headers=headers,\n )\n assert response.status_code == 200, \"Tenant change permission blocking\"",
"def test_user_update_request(self):\n pass",
"def test_update(self):\n tz = pytz.timezone(settings.TIME_ZONE)\n self.assertFalse(self.user1.o365_licence)\n url = '/api/users/{}/'.format(self.user1.ad_guid)\n data = {\n 'Surname': 'Lebowski',\n 'title': 'Bean Counter',\n 'o365_licence': True,\n\n 'email' : '[email protected]' ,\n 'name' : 'Mike' ,\n 'username' : 'MikeLebowski' ,\n 'ad_guid' : '123',\n 'expiry_date' : '2019-03-12',\n 'given_name' : 'Mike',\n #'Enabled' :'True',\n 'active' : True,\n 'deleted' : False,\n\n\n\n }\n response = self.client.put(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 202)\n user = DepartmentUser.objects.get(pk=self.user1.pk) # Refresh from db\n self.assertEqual(user.surname, data['Surname'])\n self.assertEqual(user.title, data['title'])\n\n self.assertEqual(user.name , data['name'])\n self.assertEqual(user.email, data['email'])\n self.assertEqual(user.username, data['username'])\n\n #self.assertEqual(user.expiry_date, data['expiry_date'])\n\n self.assertEqual(user.ad_guid, data['ad_guid'])\n\n self.assertEqual(user.expiry_date, tz.localize(parse(data['expiry_date'])))\n\n self.assertEqual(user.given_name, data['given_name'])\n #self.assertEqual(user.active, data['Enabled'])\n self.assertEqual(user.active, data['active'])\n self.assertEqual(user.ad_deleted, data['deleted'])\n\n self.assertTrue(user.o365_licence)\n self.assertTrue(user.in_sync)",
"def test_update_useruser_uuid_put(self):\n pass",
"def test_can_update_user_profile(self):\n self.update_user()\n self.assertEqual(self.user.first_name, self.updated_data['first_name'])\n self.assertEqual(self.user.last_name, self.updated_data['last_name'])\n self.assertEqual(self.user.email, self.updated_data['email'])",
"def test_update(self):\n user = self.custodian_1_user\n user_client = self.custodian_1_client\n urls = [reverse('api:user-detail', kwargs={'pk': user.pk})]\n new_first_name = \"New First Name\"\n data = {\n \"first_name\": new_first_name,\n }\n access = {\n \"forbidden\": [self.anonymous_client, self.readonly_client, self.custodian_2_client],\n \"allowed\": [self.admin_client, user_client]\n }\n\n for client in access['forbidden']:\n for url in urls:\n self.assertIn(\n client.patch(url, data, format='json').status_code,\n [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]\n )\n\n for client in access['allowed']:\n for url in urls:\n new_first_name += '1'\n data['first_name'] = new_first_name\n self.assertEqual(\n client.patch(url, data, format='json').status_code,\n status.HTTP_200_OK\n )\n user.refresh_from_db()\n self.assertEqual(user.first_name, new_first_name)",
"def test_user_update_permissions(self):\n userPK = User.objects.get(username='c2e1').pk\n url = reverse('User-detail', kwargs={'pk': userPK})\n data = {'username': 'company1NewTest'}\n response = self.client.put(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest')\n data = {'username': 'company1NewTest2'}\n response = self.client.patch(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest2')",
"def test_patch_user(self):\n pass",
"def test_update_user(self):\n\n update_dict = dict(\n username='test_another_username',\n role='test_new_role',\n department='test_new_department'\n )\n\n # Update non-existing user\n updated = self.user_api.update_user(MAGEN_USER['user_uuid'], update_dict)\n self.assertTrue(updated.success)\n self.assertEqual(updated.count, 0)\n\n # Insert user in Database\n inserted = self.user_api.insert_user(MAGEN_USER)\n self.assertTrue(inserted.success)\n\n # Update existing user\n updated = self.user_api.update_user(MAGEN_USER['user_uuid'], update_dict)\n self.assertTrue(updated.success)\n self.assertEqual(updated.count, 1)\n # Verify that data was updated\n selected = self.user_api.get_user(MAGEN_USER['user_uuid'])\n self.assertTrue(selected.success)\n self.assertEqual(selected.documents['username'], update_dict['username'])\n self.assertEqual(selected.documents['role'], update_dict['role'])\n self.assertEqual(selected.documents['department'], update_dict['department'])",
"def test_user_is_really_updated():\n response = api_helper.get_user(user_id=pytest.test_user.id)\n check_user_data_in_response(response.json()[\"data\"][0])",
"def test_update(self):\n\n user = CustomUser.objects.get(email=\"[email protected]\")\n user.update(first_name=\"UpdatedName\", second_name=\"UpdatedSecondName\")\n\n self.assertEqual(user.first_name, \"UpdatedName\")\n self.assertEqual(user.second_name, \"UpdatedSecondName\")",
"def test_user_update(self):\n userPK = self.testUser.pk\n url = reverse('User-detail', kwargs={'pk': userPK})\n data = {'username': 'company1NewTest'}\n response = self.client.put(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest')\n data = {'username': 'company1NewTest2'}\n response = self.client.patch(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest2')",
"def test_user_update(self):\n self.client.login(username=self.teacher.username,\n password='1234')\n post = {'email': '[email protected]', 'first_name': 'Tim',\n 'last_name': 'Teacher'}\n response = self.client.post(self.update_url, post)\n updated_teacher = SchoolUser.objects.get(\n username=self.teacher.username)\n self.assertEqual(updated_teacher.email, post['email'])",
"def test_client_bank_account_update(self):\n pass",
"def testUpdateAccessAllowed(self):\n for user in (self.contributor, self.delegate, self.owner, self.root):\n response = self.runPut(user, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"vendor_id\"], self.post_data[\"vendor_id\"])",
"def test_update_the_created_user():\n pytest.test_user.name += \"Updated\"\n response = api_helper.update_user(pytest.test_user)\n assert response.status_code == 200",
"def test_update(self, client, users):\n user = users[0]\n data = factory.build(dict, FACTORY_CLASS=UserFactory)\n url = reverse('users:update', args=(user.pk,))\n response = client.post(url, data)\n assert response.status_code == 302\n assert response.url == reverse('users:list')\n\n user.refresh_from_db()\n assert user.username == data['username']\n assert user.birthday == datetime.datetime.strptime(data['birthday'], '%Y-%m-%d').date()",
"def test_anonymize_user_data(api_client, resource_in_unit, user):\n user.first_name = 'testi_ukkeli'\n user.save()\n original_uuid = user.uuid\n original_email = user.email\n user_pk = user.pk\n\n SocialAccount.objects.create(user=user, uid=original_uuid, provider='helsinki')\n EmailAddress.objects.create(user=user, email=original_email)\n\n Reservation.objects.create(\n resource=resource_in_unit,\n begin='2015-04-04T09:00:00+02:00',\n end='2015-04-04T10:00:00+02:00',\n user=user,\n reserver_name='John Smith',\n event_subject='John\\'s welcome party',\n state=Reservation.CONFIRMED\n )\n # anonymize_user_data expects a queryset instead of single object\n test_user = get_user_model().objects.filter(first_name='testi_ukkeli')\n anonymize_user_data(modeladmin=None, request=None, queryset=test_user)\n assert get_user_model().objects.filter(first_name='testi_ukkeli').count() == 0\n reservation = Reservation.objects.get(resource=resource_in_unit)\n assert reservation.event_description == 'Sensitive data of this reservation has been anonymized by a script.'\n changed_user = get_user_model().objects.get(pk=user_pk)\n assert changed_user.uuid != original_uuid\n assert reservation.state == Reservation.CANCELLED\n assert not SocialAccount.objects.filter(user=user, uid=original_uuid).exists()\n assert not EmailAddress.objects.filter(user=user, email=original_email).exists()",
"def test_update_virtual_account_by_id(self):\n pass",
"def test_update_account_user(self):\n self._require_login()\n\n response = self.client.put('/v1/users/' +str(self.user.id)+'/',\n {\"username\": 'toni@malucao', \"password\": 'cidadeeee'},\n format='json')\n\n self.assertEqual(response.status_code, 200,\n 'Expected Response Code 200, received {0} instead.'.format(response.status_code))",
"def test_update_user_profile(setup_client, setup_user):\n client = setup_client\n user = setup_user\n payload = {\n \"name\": \"New name\",\n \"role\": \"Purchaser\",\n \"password\": \"New password\"\n }\n res = client.patch(ME_URL, payload)\n user.refresh_from_db()\n assert res.status_code == status.HTTP_200_OK\n assert user.name == payload[\"name\"]\n assert user.role == payload[\"role\"]\n assert user.check_password(payload[\"password\"])\n assert res.status_code == status.HTTP_200_OK",
"def test_update_user(self):\n token = self.authenticate_user(self.auth_user_data).data[\"token\"]\n response = self.client.put(self.user_url,\n self.user_data,\n HTTP_AUTHORIZATION=f'token {token}',\n format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_api_user_put(self):\n pass",
"def test_resource_user_resource_change_user_patch(self):\n pass",
"def test_admin_update_user(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Lover',\n username='lover',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'user updated!')\n self.assertEqual(resp.status_code, 200)",
"def updateTenant(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def update_user():",
"def test_patch_user(self):\n new_user = self.make_user('new_user')\n url = reverse(\n 'projectroles:api_role_update',\n kwargs={'roleassignment': self.update_as.sodar_uuid},\n )\n patch_data = {'user': str(new_user.sodar_uuid)}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 400, msg=response.content)"
] |
[
"0.73280764",
"0.73153883",
"0.72682774",
"0.71767485",
"0.6941097",
"0.6702267",
"0.6653942",
"0.6599848",
"0.6599473",
"0.6586029",
"0.6533386",
"0.65045184",
"0.6487314",
"0.64579034",
"0.6445055",
"0.64442503",
"0.64317745",
"0.64147985",
"0.6383129",
"0.63338864",
"0.63273656",
"0.63238716",
"0.6300858",
"0.62806493",
"0.6258515",
"0.6247653",
"0.6230438",
"0.62153363",
"0.62074965",
"0.6201871"
] |
0.8055346
|
0
|
Tests authorization around limiting reassignment of TenantUsers by nonadministrators
|
def test_tenant_user_change_tenant(sample_identity):
access_token, tenant, tenant_user, tc = sample_identity
# Create a new Tenant
new_tenant = identity.Tenant()
new_tenant.name = "Aperture Science"
db.session.add(new_tenant)
db.session.commit()
# Create a Tenant Specific admin role
new_special_role = authorization.Role()
# Assign ability to create a user on specific tenant to the new role but
# not the ability to create user on ANY tenant
can_create_tenant_user = authorization.Permission.query.filter_by(
name=authorization.PermissionType.CAN_CREATE_TENANT_USER.value
).first()
new_special_role.permissions.append(can_create_tenant_user)
db.session.add(new_special_role)
db.session.commit()
# Create a new TenantUser assigned to new Tenant
new_tenant_user = identity.TenantUser()
new_tenant_user.username = "gordonfreeman"
new_tenant_user.tenant_id = new_tenant.id
new_tenant_user.password = "1234"
new_tenant_user.roles.append(new_special_role)
db.session.add(new_tenant_user)
db.session.commit()
# Login new user
new_access_token = tc.post(
"api/v1/authentication/login",
json={"username": new_tenant_user.username, "password": "1234"},
).json["data"]["access_token"]
# Try to re-assign original tenant_user to new tenant
headers = {"Authorization": "Bearer " + new_access_token}
tenant_user_json = id_schemas.TenantUserSchema().dump(tenant_user)
tenant_user_json["tenant_id"] = new_tenant_user.tenant_id
response = tc.put(
f"api/v1/identity/tenant-user/{tenant_user.id}",
json=tenant_user_json,
headers=headers,
)
# Assert that permission is blocked
assert response.status_code == 403, "Tenant Permission assignment not blocking"
# Login with admin user
new_access_token = tc.post(
"api/v1/authentication/login",
json={"username": tenant_user.username, "password": "1234"},
).json["data"]["access_token"]
# Attempt to Change tenant of new_tenant_user
headers = {"Authorization": "Bearer " + new_access_token}
tenant_user_json = id_schemas.TenantUserSchema().dump(new_tenant_user)
tenant_user_json["tenant_id"] = tenant.id
response = tc.put(
f"api/v1/identity/tenant-user/{new_tenant_user.id}",
json=tenant_user_json,
headers=headers,
)
assert response.status_code == 200, "Tenant change permission blocking"
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_user_can_change_superuser(self):\n self.assertTrue(self.story.user_can_change(self.superuser))",
"def test_user_can_change_admin(self):\n self.assertTrue(self.story.user_can_change(self.admin_user))",
"def test_admin_get(self, *args, **kwargs):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n res = self.view.get(self.request, *args, **kwargs)\n nt.assert_equal(res.status_code, 200)",
"def test_admin_get(self, *args, **kwargs):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n res = self.view.get(self.request, *args, **kwargs)\n nt.assert_equal(res.status_code, 200)",
"def test_specify_non_default_tenant():\n pass",
"def test_super_admin_get(self, *args, **kwargs):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n res = self.view.get(self.request, *args, **kwargs)\n nt.assert_equal(res.status_code, 200)",
"def test_super_admin_get(self, *args, **kwargs):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n res = self.view.get(self.request, *args, **kwargs)\n nt.assert_equal(res.status_code, 200)",
"def test_access_levels(self):\n resource_id = 'protected-data'\n arn_prefix = \"arn:dcp:fus:us-east-1:dev:\"\n user = 'user_test_access_levels'\n group = 'group_test_access_levels'\n\n # create the resource to control\n resp = self.app.post(\n f'/v1/resource/{self.test_resource}/id/{resource_id}',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 201)\n\n with self.subTest(\"Check that no one has access by listing who has access\"):\n resp = self.app.get(\n f'/v1/resource/{self.test_resource}/id/{resource_id}/members',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n self.assertJSONEqual(resp.body, {'members': []})\n\n with self.subTest(\"Toggle user access.\"):\n # create a user\n resp = self.app.post(\n f'/v1/user',\n data=json.dumps({'user_id': user}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 201)\n\n # give a user access\n request_body = [\n {'member': user,\n 'member_type': 'user',\n 'access_level': 'read'}\n ]\n resp = self.app.put(\n f'/v1/resource/{self.test_resource}/id/{resource_id}/members',\n data=json.dumps(request_body),\n headers=admin_headers\n )\n self.assertEqual(resp.status_code, 200)\n\n # Check that the user has access by listing who has access\n resp = self.app.get(\n f'/v1/resource/{self.test_resource}/id/{resource_id}/members',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n self.assertJSONEqual(resp.body, {'members': request_body})\n\n # Remove access for the user\n request_body = [\n {'member': user,\n 'member_type': 'user'}\n ]\n resp = self.app.put(\n f'/v1/resource/{self.test_resource}/id/{resource_id}/members',\n data=json.dumps(request_body),\n headers=admin_headers\n )\n self.assertEqual(resp.status_code, 200)\n\n # Check that the user does not have access by listing who has access\n resp = self.app.get(\n f'/v1/resource/{self.test_resource}/id/{resource_id}/members',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n self.assertJSONEqual(resp.body, {'members': []})\n\n with self.subTest(\"Toggle group access.\"):\n # create a group\n resp = self.app.post(\n f'/v1/group',\n data=json.dumps({'group_id': group}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 201)\n\n # give a group access\n request_body = [\n {'member': group,\n 'member_type': 'group',\n 'access_level': 'read'}\n ]\n resp = self.app.put(\n f'/v1/resource/{self.test_resource}/id/{resource_id}/members',\n data=json.dumps(request_body),\n headers=admin_headers\n )\n self.assertEqual(resp.status_code, 200)\n\n # Check that the group has access by listing who has access\n resp = self.app.get(\n f'/v1/resource/{self.test_resource}/id/{resource_id}/members',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n self.assertJSONEqual(resp.body, {'members': request_body})\n\n # Remove access for the group\n request_body = [\n {'member': group,\n 'member_type': 'group'}\n ]\n resp = self.app.put(\n f'/v1/resource/{self.test_resource}/id/{resource_id}/members',\n data=json.dumps(request_body),\n headers=admin_headers\n )\n self.assertEqual(resp.status_code, 200)\n\n # Check that the group does not have access by listing who has access\n resp = self.app.get(\n f'/v1/resource/{self.test_resource}/id/{resource_id}/members',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n self.assertJSONEqual(resp.body, {'members': []})",
"def testUpdateAccessAllowed(self):\n for user in (self.contributor, self.delegate, self.owner, self.root):\n response = self.runPut(user, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"vendor_id\"], self.post_data[\"vendor_id\"])",
"def test_change_permission(self):\r\n self.assertTrue(self.creator_admin.has_change_permission(self.request))\r\n\r\n self.request.user = self.user\r\n self.assertFalse(self.creator_admin.has_change_permission(self.request))",
"def test_allowed_if_superuser(self):\n\n @task_or_superuser_only\n def view(request):\n return HttpResponse(\"Hello\")\n\n class User(object):\n is_superuser = True\n is_authenticated = True\n\n request = self.factory.get(\"/\")\n request.user = None\n response = view(request)\n self.assertEqual(response.status_code, 403)\n\n request.user = User()\n response = view(request)\n self.assertEqual(response.status_code, 200)",
"def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())",
"def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())",
"def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())",
"def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())",
"def test_assessor_access_limited(self):\n assessor = get_or_create_default_assessor()\n self.client.login(assessor.email)\n # This assessor doesn't belong to a group\n self.assertTrue(is_assessor(assessor))\n self.assertFalse(get_user_assessor_groups(assessor))\n\n # forbidden\n urls_get_forbidden = [\n reverse('wl_applications:enter_conditions', args=[self.application.pk]),\n reverse('wl_applications:enter_conditions_assessor', args=[self.application.pk, self.assessment.pk]),\n ]\n urls_post_forbidden = [\n {\n 'url': reverse('wl_applications:create_condition', args=[self.application.pk]),\n 'data': {\n 'code': '123488374',\n 'text': 'condition text'\n }\n },\n {\n 'url': reverse('wl_applications:set_assessment_condition_state'),\n 'data': {\n 'assessmentConditionID': self.assessment_condition.pk,\n 'acceptanceStatus': 'accepted',\n }\n },\n {\n 'url': reverse('wl_applications:enter_conditions', args=[self.application.pk]),\n 'data': {\n 'conditionID': [self.condition.pk],\n }\n },\n {\n 'url': reverse('wl_applications:enter_conditions_assessor',\n args=[self.application.pk, self.assessment.pk]),\n 'data': {\n 'conditionID': [self.condition.pk],\n }\n },\n ]\n # Allowed\n urls_get_allowed = [\n reverse('wl_applications:search_conditions')\n ]\n urls_post_allowed = [\n ]\n for url in urls_get_forbidden:\n response = self.client.get(url, follow=True)\n if response.status_code != 403:\n self.assertRedirects(response, reverse('wl_dashboard:tables_assessor'), status_code=302,\n target_status_code=200)\n for url in urls_post_forbidden:\n response = self.client.post(url['url'], url['data'], follow=True)\n if response.status_code != 403:\n self.assertRedirects(response, reverse('wl_dashboard:tables_assessor'), status_code=302,\n target_status_code=200)\n for url in urls_get_allowed:\n response = self.client.get(url, follow=True)\n self.assertEqual(200, response.status_code)\n\n for url in urls_post_allowed:\n response = self.client.post(url['url'], url['data'], follow=True)\n self.assertEqual(200, response.status_code)",
"def test_non_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = False\n nt.assert_equal(self.view.test_func(), False)",
"def test_non_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = False\n nt.assert_equal(self.view.test_func(), False)",
"def test_non_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = False\n nt.assert_equal(self.view.test_func(), False)",
"def test_non_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = False\n nt.assert_equal(self.view.test_func(), False)",
"def testDenyAllowAccess(self):\n self.host.ContinueAuth()\n self.host.SignIn(self.account['username'], self.account['password'])\n self.host.DenyAccess()\n self.host.ContinueAuth()\n self.host.AllowAccess()",
"def test_admin_update_user_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n user_taskrun = TaskRunFactory.create()\r\n\r\n assert self.mock_admin.id != user_taskrun.user.id\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').update,\r\n user_taskrun)",
"def test_admin(self):\r\n \r\n self.assertEqual(False, self.user.isAdmin)",
"def test_updateview_read_for_wrong_user(self):\n\n for user in self.users:\n updateview = reverse('account_update', args=(user.uuid,))\n other_users = self.users\n other_users.remove(user)\n random_user = random.choice(other_users)\n\n self.client.login(email=random_user.email, password='letmein')\n\n response = self.client.get(updateview)\n\n self.assertEqual(response.status_code, 403)",
"def test_user_belonging_to_more_sites(self):\n self._create_simple_setup()\n foo_site = Site.objects.get(domain='foo.site.com')\n bar_site = Site.objects.get(domain='bar.site.com')\n writer_role = Role.objects.get(name='writer')\n bob = User.objects.get(username='bob')\n foo_master_page = Page.objects.get(\n title_set__title='master',\n site=foo_site)\n writer_role.grant_to_user(bob, foo_site, [foo_master_page])\n news_page = Page.objects.get(\n title_set__title='news',\n parent=foo_master_page)\n PagePermission.objects.create(user=bob, page=news_page)\n writer_role.ungrant_from_user(bob, foo_site)\n writer_users = writer_role.users(foo_site)\n self.assertNotIn(bob, writer_users)\n writer_users = writer_role.users(bar_site)\n self.assertIn(bob, writer_users)",
"def test_user_isnt_admin():\n app = create_ctfd()\n with app.app_context():\n register_user(app)\n client = login_as_user(app)\n for page in ['pages', 'teams', 'scoreboard', 'chals', 'statistics', 'config']:\n r = client.get('/admin/{}'.format(page))\n assert r.location.startswith(\"http://localhost/login?next=\")\n assert r.status_code == 302\n destroy_ctfd(app)",
"def test_auth_sharable_admin(self):\n self.do_sharable(True, 'pattieblack', None, tenant='froggy',\n is_admin=True)",
"def testGetAccessAllowed(self):\n for user in (self.guest, self.contributor, self.delegate, self.owner, self.root):\n response = self.runGet(user, sequencer=self.hiseq2000.vendor_id)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"sodar_uuid\"], str(self.hiseq2000.sodar_uuid))",
"def test_user_can_change_inactive(self):\n self.assertTrue(self.story.user_can_change(self.user1))\n self.user1.is_active = False \n self.assertFalse(self.story.user_can_change(self.user1))",
"def testGetAccessAllowed(self):\n for user in (self.guest, self.contributor, self.delegate, self.owner, self.root):\n response = self.runGet(user, sequencer=self.hiseq2000.sodar_uuid)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"sodar_uuid\"], str(self.hiseq2000.sodar_uuid))"
] |
[
"0.6481255",
"0.6474486",
"0.642696",
"0.642696",
"0.6413336",
"0.6392763",
"0.6392763",
"0.6359528",
"0.6330002",
"0.6308295",
"0.6306587",
"0.627981",
"0.627981",
"0.627981",
"0.627981",
"0.6235596",
"0.62184274",
"0.62184274",
"0.62184274",
"0.62184274",
"0.62169385",
"0.62070906",
"0.6201333",
"0.61741805",
"0.6174141",
"0.61311466",
"0.61284035",
"0.6107119",
"0.6086318",
"0.60838085"
] |
0.7147109
|
0
|
get score ranging from [0, 100] from gender, grate, test_name and test_result.
|
def get_score_from_test(gender, grade, test_name, test_result):
score_map = TestSports[test_name.upper()] \
.value \
[Constants.SCORE_MAP] \
[Gender[gender.upper()]] \
[Grade[grade.upper()]]
print(score_map)
score = Student.get_score(score_map, test_result)
print(score)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_score(score_map, test_result):\n if test_result < score_map[20]:\n return int((test_result / score_map[20]) * 20)\n elif test_result < score_map[40]:\n return int(20 + (test_result - score_map[20]) / (score_map[40] - score_map[20]) * 20)\n elif test_result < score_map[60]:\n return int(40 + (test_result - score_map[40]) / (score_map[60] - score_map[40]) * 20)\n elif test_result < score_map[85]:\n return int(60 + (test_result - score_map[60]) / (score_map[85] - score_map[60]) * 20)\n elif test_result < score_map[100]:\n return int(85 + (test_result - score_map[85]) / (score_map[100] - score_map[85]) * 20)\n else:\n return 100",
"def score(self, test_data):\n\n\t\tpass",
"def score(name):\r\n return (sorted(test).index(name)+1)*value(name)",
"def getScore(data):\n return score",
"def get_score(self, a, b):\n ### FILL IN ###",
"def score(sentence, test):\n total = 0\n wrong = []\n i = 0\n while i < len(sentence):\n if sentence[i] == test[i]:\n total += 1\n else:\n # keep track of pos in list\n wrong.append(i)\n i += 1\n percent = float(total)/len(test)*100\n return percent, wrong",
"def score(self,ytest,how='score'):\n scores = []\n #iterate through each pred for each nn value\n for pred in self.ypred:\n sc = np.empty(pred.shape[1]) #need to store the scores\n\n for i in range(pred.shape[1]):\n\n p = pred[:,i]\n\n if how == 'score':\n sc[i] = utilities.score(p, ytest[:,i])\n\n if how == 'corrcoef':\n\n sc[i] = utilities.corrcoef(p, ytest[:,i])\n\n scores.append(sc)\n\n scores = np.vstack(scores)\n return scores",
"def get_score(self, student_answers):\r\n pass",
"def score(self, test_data):\n\n\t\tins, outs = self._split_inputs_outputs(test_data)\n\t\treturn self.model.score(ins, outs)",
"def test_get_score(self):\r\n score_dict = self.combinedoe.get_score()\r\n self.assertEqual(score_dict['score'], 15.0)\r\n self.assertEqual(score_dict['total'], 5.0)",
"def extract_score(results):\n total_score = 0;\n total_possible_score = 0;\n for k in results.keys():\n total_score = total_score + results[k][0]\n total_possible_score = total_possible_score + results[k][1]\n return (total_score, total_possible_score)",
"def evaluate(self, pred, **nargs):\n sse = sum((pred(u, ds=self, **nargs) - (1 if g == \"F\" else 0)) ** 2\n for (u, g) in self.gender_test.items())\n\n ll = 0\n for (u, g) in self.gender_test.items():\n for prn in [pred(u, ds=self, **nargs)]:\n if g == 'F' and prn == 0:\n ll = math.inf\n break\n elif g == 'F':\n ll += math.log(prn, 2)\n elif g == 'M' and prn == 1:\n ll = math.inf\n break\n else:\n ll += math.log(1 - prn, 2)\n\n return (sse, -ll, sse / len(self.gender_test), -ll / len(self.gender_test))",
"def test_get_score(self):\r\n score_dict = self.combinedoe.get_score()\r\n self.assertEqual(score_dict['score'], 0)\r\n self.assertEqual(score_dict['total'], 1)",
"def calculate_score(result):\n sample1=result['Sample1']\n sample2=result['Sample2']\n string1=paragraph_to_list(sample1)\n string2=paragraph_to_list(sample2)\n \n return round( strings_similarity(string1, string2), 2)\n #method_dict=strings_count_compare(string1, string2)/ max(len(string1), len(string2))\n #return round(0.5*(method_difflab+method_dict), 2)",
"def test_score():\n print(\"Tests for 'score' function\")\n test_suite = TestSuite()\n\n # Testing with empty hand\n result = score([])\n test_suite.run_test(result, 0, '0')\n # Testing with non-empty hand\n result = score([1, 3])\n test_suite.run_test(result, 3, '1')\n # Testing with non-empty hand\n result = score([1, 3, 1, 1])\n test_suite.run_test(result, 3, '2')\n # Testing with non-empty hand\n result = score([4, 3, 4, 3, 3])\n test_suite.run_test(result, 9, '3')\n\n # Show report\n test_suite.report_results()",
"def score(self, test_data):\n\n\t\tins, outs = self._split_inputs_outputs(test_data)\n\n\t\t# One hot encode the input/labels\n\t\tencoder = LabelEncoder()\n\t\tencoder.fit(outs)\n\t\tenc_labels = encoder.transform(outs)\n\t\tenc_labels = np_utils.to_categorical(enc_labels)\n\n\t\t_, score = self.model.evaluate(ins, enc_labels, verbose=2)\n\n\t\treturn score",
"def score_method(pairs_true, pairs_test):\n \n set_true = {tuple(e) for e in pairs_true}\n set_test = {tuple(e) for e in pairs_test}\n true_pos, false_pos, false_neg = confusion_stats(set_true, set_test)\n \n total = true_pos + false_pos + false_neg\n true_pos_rate = true_pos / total\n false_pos_rate = false_pos / total\n false_neg_rate = false_neg / total\n \n return true_pos_rate, false_pos_rate, false_neg_rate",
"def _score_to_decision(self, score):",
"def GetTestScoreAndDisplayValue(self, test_key, raw_scores):\n #logging.info('Cookies.GetScoreAndDisplayValue '\n # 'test: %s, median: %s, medians: %s' % (self.key, median, \n # len(medians)))\n\n #TODO(eric): change this method\n median = raw_scores[test_key]\n score = 0\n if 'hostconn' == test_key:\n if median > 2:\n score = 100\n elif median == 2:\n score = 50\n else:\n score = 0\n\n elif 'maxconn' == test_key:\n if median > 20:\n score = 100\n elif median >= 10:\n score = 50\n else:\n score = 0\n return score, str(median)",
"def score(self, X, y, predict_results=None, style=\"accuracy\"):\n results = predict_results\n if results is None:\n results = np.reshape(self.predict(X)[0], np.shape(y))\n if style=='accuracy':\n correct = 0\n for scored, expected in zip(results, y):\n if scored == expected:\n correct += 1\n return 0 if len(results) == 0 else (correct / len(results)) * 100.0\n if style=='mse':\n summer = 0\n count = 0\n for scored, expected in zip(results, y):\n summer = summer + ((scored - expected) ** 2)\n count = count + 1\n return summer / count",
"def get_scores(self) -> tuple:\n return (self.get_score(), self.p2_score)",
"def evaluate_success_rate(number_of_user_guesses):\n if number_of_user_guesses < 10:\n success_rate = 'Awesome'\n elif number_of_user_guesses >= 10 and number_of_user_guesses < 20:\n success_rate = 'Good'\n elif number_of_user_guesses >= 20 and number_of_user_guesses < 30:\n success_rate = 'I quess it is good'\n elif number_of_user_guesses >= 30:\n success_rate = 'Not so good'\n return success_rate",
"def __calculate_gender_diversity_score(project: dict, student: dict) -> int:\n # project_name = project[\"fields\"][PROJECT_NAME_FIELD]\n # student_name = student[\"fields\"][SURVEY_STUDENT_NAME_FIELD][0]\n\n # print(\"Calculating gender pairing score for: Project({}) - Student({})\".format(project_name, student_name))\n\n # Get the gender specified by the student\n student_gender = student[\"fields\"].get(SURVEY_GENDER_FIELD, None)\n if not student_gender:\n # The student didn't provide a gender, so we can't calculate a score\n return 0\n\n # Get the list of current assignments for the project team\n team_assignments = __get_team_assignments(project)\n\n # This list will hold the list of genders on the team\n team_gender_values = []\n for assignment in team_assignments:\n assigned_student_gender = assignment.student[\"fields\"].get(SURVEY_GENDER_FIELD, None)\n\n if assigned_student_gender:\n team_gender_values.append(assigned_student_gender)\n\n # ================================================================================================================\n # Get the count genders for the already assigned students\n gender_counter = __get_gender_counter()\n gender_counter.update(team_gender_values)\n\n # Get the count of the particular gender that matches the student\n matching_gender_count = gender_counter.get(student_gender)\n\n if matching_gender_count == 0:\n # This is good, as it will make the team more diverse\n return SURVEY_GENDER_BASE_WEIGHT\n elif matching_gender_count == 1:\n # This is better, as it will pair students with like genders\n return SURVEY_GENDER_BASE_WEIGHT * 2\n else:\n # There are already at least 2 student with this gender identity, so we won't\n # prefer this\n return 0",
"def _get_scores(target, predicted):\n recall = scoring(target, predicted, metric=\"recall\")\n precision = scoring(target, predicted, metric=\"precision\")\n accuracy = scoring(target, predicted, metric=\"accuracy\")\n f_score = scoring(target, predicted, metric=\"f1\")\n\n return [recall, precision, accuracy, f_score]",
"def get_score(self):\r\n if self.is_complete():\r\n score = 1\r\n elif self.is_half_complete():\r\n score = 0.5\r\n else:\r\n score = 0\r\n return {'score': score,\r\n 'total': self.max_score()}",
"def get_score(self, red_score, blue_score):\n if red_score < blue_score:\n return 0\n elif red_score > blue_score:\n return 1\n else:\n return 0.5",
"def evaluate(self):\n scores = []\n scores.append(self.word_analogy())\n print(\"Word Analogy (acc): \", scores[0])\n scores.append(self.word_similarity())\n print(\"Word Similarity (MSE): \", scores[1])\n scores.append(self.concept_categorization())\n print(\"Concept Categorization (purity): \", scores[2])\n scores.append(self.sentiment_analysis())\n print(\"Sentiment Analysis (acc): \", scores[3])\n return scores",
"def disp_score():",
"def retrieve_scores(num_letters, language, folder, min_score, max_score):\n if (num_letters == 1):\n return retrieve_letter_scores(1, language, folder, min_score, max_score)\n elif (num_letters == 2):\n return retrieve_syllable_scores(num_letters, 1, language, folder, min_score, max_score)\n elif (num_letters == 3):\n return retrieve_syllable_scores(num_letters, 3, language, folder, min_score, max_score)\n else:\n print(\"Error: incorrect number of letters. Value ranges from 1 to 3.\\n\")",
"def calculate_gpa(score):\n if score < 60:\n return 0\n elif 60 <= score < 70:\n return 1\n elif 70 <= score < 80:\n return 2\n elif 80 <= score < 90:\n return 3\n elif score >= 90:\n return 4"
] |
[
"0.68614393",
"0.6568922",
"0.64610434",
"0.61989254",
"0.6156589",
"0.6151049",
"0.6018301",
"0.60071886",
"0.6000255",
"0.59969515",
"0.5904228",
"0.58880126",
"0.5869166",
"0.5790369",
"0.5775762",
"0.5764602",
"0.57628024",
"0.575975",
"0.5730284",
"0.5727392",
"0.57141775",
"0.570529",
"0.5695025",
"0.56871074",
"0.56834924",
"0.56777704",
"0.5645826",
"0.56374586",
"0.5635053",
"0.5628804"
] |
0.7776665
|
0
|
Calculate the overlay polygon based on the selection and the location of the source and destination plots.
|
def calculate_points(self, component):
# find selection range on source plot
x_start, x_end = self._get_selection_screencoords()
if x_start > x_end:
x_start, x_end = x_end, x_start
y_end = self.source.y
y_start = self.source.y2
left_top = np.array([x_start, y_start])
left_mid = np.array([x_start, y_end])
right_top = np.array([x_end, y_start])
right_mid = np.array([x_end, y_end])
# Offset y because we want to avoid overlapping the trapezoid with the topmost
# pixels of the destination plot.
y = self.destination.y2 + 1
left_end = np.array([self.destination.x, y])
right_end = np.array([self.destination.x2, y])
polygon = np.array((left_top, left_mid, left_end,
right_end, right_mid, right_top))
left_line = np.array((left_top, left_mid, left_end))
right_line = np.array((right_end, right_mid, right_top))
return left_line, right_line, polygon
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def calculate_points(self, component):\n # find selection range on source plot\n x_start, x_end = self._get_selection_screencoords()\n if x_start > x_end:\n x_start, x_end = x_end, x_start\n\n y_end = self.source.y\n y_start = self.source.y2\n\n left_top = np.array([x_start, y_end])\n left_mid = np.array([x_start, y_start])\n right_top = np.array([x_end, y_end])\n right_mid = np.array([x_end, y_start])\n\n # Offset y because we want to avoid overlapping the trapezoid with the topmost\n # pixels of the destination plot.\n y = self.destination.y - 1\n\n left_end = np.array([self.destination.x, y])\n right_end = np.array([self.destination.x2, y])\n\n polygon = np.array((left_end, left_mid, left_top,\n right_top, right_mid, right_end))\n left_line = np.array((left_top, left_mid, left_end))\n right_line = np.array((right_end, right_mid, right_top))\n\n return left_line, right_line, polygon",
"def overlay(self, component, gc, view_bounds=None, mode=\"normal\"):\n\n tmp = self._get_selection_screencoords()\n if tmp is None:\n return\n\n left_line, right_line, polygon = self.calculate_points(component)\n\n gc.save_state()\n try:\n gc.translate_ctm(*component.position)\n gc.set_alpha(self.alpha)\n gc.set_fill_color(self.fill_color_)\n gc.set_line_width(self.border_width)\n gc.set_stroke_color(self.border_color_)\n gc.begin_path()\n gc.lines(polygon)\n gc.fill_path()\n\n gc.begin_path()\n gc.lines(left_line)\n gc.lines(right_line)\n gc.stroke_path()\n finally:\n gc.restore_state()\n return",
"def _select_polygons(self):\r\n selected_polys_layer = \"SelectedPolys_\" + self.job_id\r\n if self.facility_id is None:\r\n facility_query = arcpy.AddFieldDelimiters(self.time_lapse_polygons, FACILITY_ID_FIELD) + \" IS NULL\"\r\n else:\r\n facility_query = arcpy.AddFieldDelimiters(self.time_lapse_polygons, FACILITY_ID_FIELD) + \" = \" + \\\r\n str(self.facility_id)\r\n query = facility_query + \" AND \" + \\\r\n arcpy.AddFieldDelimiters(self.time_lapse_polygons, FROM_BREAK_FIELD) + \" = \" + str(self.from_break) + \\\r\n \" AND \" + \\\r\n arcpy.AddFieldDelimiters(self.time_lapse_polygons, TO_BREAK_FIELD) + \" = \" + str(self.to_break)\r\n arcpy.management.MakeFeatureLayer(self.time_lapse_polygons, selected_polys_layer, where_clause=query)\r\n self.logger.info(\r\n f\"{int(arcpy.management.GetCount(selected_polys_layer).getOutput(0))} time lapse polygons selected.\")\r\n return selected_polys_layer",
"def poly_to_list_with_overlap(self, polygon):\n added = 0\n polygon_item = polygon.polygon()\n polygon_item.translate(polygon.x(), polygon.y())\n\n # Comparator to determine which x value of two points is the highest\n def compare_x(item1, item2):\n if item1.x() < item2.x():\n return -1\n elif item1.x() > item2.x():\n return 1\n else:\n return 0\n\n # Comparator to determine which y value of two points is the highest\n def compare_y(item1, item2):\n if item1.y() < item2.y():\n return -1\n elif item1.y() > item2.y():\n return 1\n else:\n return 0\n\n # Create two lists, one sorted by ascending x-values, one by ascending y-values\n x_list = sorted(self.potential_edge_splitters, key=cmp_to_key(compare_x))\n y_list = sorted(self.potential_edge_splitters, key=cmp_to_key(compare_y))\n\n # Loop over all children to the polygon\n for item in polygon.childItems():\n # Look only at edges (overlapping of points is handled elsewhere)\n if isinstance(item, PyQt5.QtWidgets.QGraphicsLineItem):\n edge = item\n\n p1 = edge.line().p1()\n p2 = edge.line().p2()\n added_this = 0\n\n # Choose the direction with the largest disparity (to avoid scenario of straight lines)\n # then use the sorted list for that direction\n if abs(p1.x() - p2.x()) > abs(p1.y() - p2.y()):\n mode = \"X\"\n circ_list = x_list\n else:\n mode = \"Y\"\n circ_list = y_list\n\n for circ in circ_list:\n poly = circ.parentItem()\n p = circ.scenePos()\n\n # temp_p needed since edge.contains does not account for the edge being moved in the canvas\n temp_p = circ.scenePos()\n temp_p.setX(temp_p.x() - edge.scenePos().x())\n temp_p.setY(temp_p.y() - edge.scenePos().y())\n\n # Find the edges to split which contain temp_p, if the edge contains decide the orientation (in x-\n # or y-direction decided earlier) of p1 and p2, based on this insert the new point in the polygon\n # in the correct position\n if edge.contains(temp_p):\n if edge in poly.childItems():\n pass # Ignore if the edge is in the same polygon as the point\n else:\n if temp_p == p1 or temp_p == p2:\n pass # Don't compare if it contains an edge point, instead handled later by the overlapping points\n elif mode == \"Y\":\n if p1.y() < p2.y(): # Left to right\n index = abs(edge.localIndex)\n polygon_item.insert(index + added, p)\n added += 1\n elif p1.y() > p2.y(): # Right to left\n index = abs(edge.localIndex)\n polygon_item.insert(index + added - added_this, p)\n added_this += 1\n added += 1\n else:\n if p1.x() < p2.x(): # Left to right\n index = abs(edge.localIndex)\n polygon_item.insert(index + added, p)\n added += 1\n elif p1.x() > p2.x(): # Right to left\n index = abs(edge.localIndex)\n polygon_item.insert(index + added - added_this, p)\n added_this += 1\n added += 1\n\n return self.poly_to_list(polygon_item, \"Global\")",
"def _draw_polygon(self):\n xs, ys = zip(*self._xys) if self._xys else ([], [])\n self._selection_artist.set_data(xs, ys)\n self._update_box()\n # Only show one tool handle at the start and end vertex of the polygon\n # if the polygon is completed or the user is locked on to the start\n # vertex.\n if (self._selection_completed\n or (len(self._xys) > 3\n and self._xys[-1] == self._xys[0])):\n self._polygon_handles.set_data(xs[:-1], ys[:-1])\n else:\n self._polygon_handles.set_data(xs, ys)\n self.update()",
"def _update_selection_poly(self, vmin, vmax):\n # The vertices are positioned\n # 1 ------ 2\n # | |\n # 0, 4 ---- 3\n verts = self.poly.xy\n if self.orientation == \"vertical\":\n verts[0] = verts[4] = .25, vmin\n verts[1] = .25, vmax\n verts[2] = .75, vmax\n verts[3] = .75, vmin\n else:\n verts[0] = verts[4] = vmin, .25\n verts[1] = vmin, .75\n verts[2] = vmax, .75\n verts[3] = vmax, .25",
"def draw_overlay(self):\n pass",
"def SH_FindOverlap(xcenter, ycenter, xlength, ylength, xp_corner, yp_corner):\n\n areaClipped = 0.0\n top = ycenter + 0.5 * ylength\n bottom = ycenter - 0.5 * ylength\n\n left = xcenter - 0.5 * xlength\n right = xcenter + 0.5 * xlength\n\n nVertices = 4 # input detector pixel vertices\n MaxVertices = 9\n # initialize xPixel, yPixel to the detector pixel corners.\n # xPixel,yPixel will become the clipped polygon vertices inside the cube pixel\n # xnew,ynew xpixel and ypixel of size MaxVertices\n\n xPixel = []\n yPixel = []\n\n xnew = []\n ynew = []\n\n for j in range(0, 9):\n xnew.append(0.0)\n ynew.append(0.0)\n xPixel.append(0.0)\n yPixel.append(0.0)\n\n\n # Xpixel, YPixel closed (5 corners)\n for i in range(0, 4):\n xPixel[i] = xp_corner[i]\n yPixel[i] = yp_corner[i]\n xPixel[4] = xp_corner[0]\n yPixel[4] = yp_corner[0]\n\n\n for i in range(0, 4): # 0:left, 1: right, 2: bottom, 3: top\n nVertices2 = 0\n for j in range(0, nVertices):\n x1 = xPixel[j]\n y1 = yPixel[j]\n x2 = xPixel[j + 1]\n y2 = yPixel[j + 1]\n condition = calcCondition(i, x1, y1, x2, y2, left, right, top, bottom)\n x = 0\n y = 0\n\n if condition == 1:\n x, y = solveIntersection(i, x1, y1, x2, y2,\n left, right, top, bottom)\n nVertices2 = addpoint(x, y, xnew, ynew, nVertices2);\n nVertices2 = addpoint(x2, y2, xnew, ynew, nVertices2)\n\n elif condition == 2:\n nVertices2 = addpoint(x2, y2, xnew, ynew, nVertices2)\n elif condition == 3:\n x, y = solveIntersection(i, x1, y1, x2, y2,\n left, right, top, bottom)\n nVertices2 = addpoint(x, y, xnew, ynew, nVertices2)\n\n#\tcondition == 4: points outside\n# Done looping over J corners\n nVertices2 = addpoint(xnew[0], ynew[0], xnew, ynew, nVertices2) # close polygon\n\n if nVertices2 > MaxVertices:\n raise Error2DPolygon(\" Failure in finding the clipped polygon, nVertices2 > 9 \")\n\n\n nVertices = nVertices2 - 1;\n\n for k in range(0, nVertices2):\n xPixel[k] = xnew[k]\n yPixel[k] = ynew[k]\n\n# done loop over top,bottom,left,right\n nVertices = nVertices + 1\n\n\n if nVertices > 0:\n areaClipped = FindAreaPoly(nVertices, xPixel, yPixel);\n\n\n return areaClipped;",
"def on_action_merge(self):\n ignore_warning = False\n if self.mode == \"Draw Poly\":\n self.remove_drawing_poly()\n elif self.mode == \"Draw Rect\":\n self.remove_drawing_rect()\n\n # Loop over all polygons and compare to all other, if two polygons are merged they are removed from the list\n for poly_outer in self.poly_list:\n for poly_inner in self.poly_list:\n if poly_outer == poly_inner:\n continue # Ignore comparison to self\n\n contain_list = self.polygon_contains(poly_outer, poly_inner)\n\n if all(contain_list):\n # If all points are inside the outer polygon do not merge (this would remove the inner one)\n pass\n elif any(contain_list):\n # If some but not all points are inside the outer polygon the two polygons overlap and should be\n # merged\n\n # Ignore holes\n if poly_inner in self.hole_list or poly_outer in self.hole_list:\n pass\n else:\n # Warning message that merging will remove any markers on the polygons\n # If return is chosen cancel the merge, else proceed and ignore the warning message\n # for the continuation of the loop\n for child in poly_inner.childItems():\n if child.childItems():\n if isinstance(child.childItems()[0], QGraphicsTextItem):\n if not ignore_warning:\n user_choice = self.marker_removal_warning()\n if user_choice == \"Cancel\":\n return\n elif user_choice == \"Ignore\":\n ignore_warning = True\n else:\n self.point_marker_list.remove(child)\n elif child.childItems()[0].childItems():\n if isinstance(child.childItems()[0].childItems()[0], QGraphicsTextItem):\n if not ignore_warning:\n user_choice = self.marker_removal_warning()\n if user_choice == \"Cancel\":\n return\n elif user_choice == \"Ignore\":\n ignore_warning = True\n self.line_marker_list.remove(child.childItems()[0])\n else:\n self.line_marker_list.remove(child.childItems()[0])\n\n for child in poly_outer.childItems():\n if child.childItems():\n if isinstance(child.childItems()[0], QGraphicsTextItem):\n if not ignore_warning:\n user_choice = self.marker_removal_warning()\n if user_choice == \"Cancel\":\n return\n elif user_choice == \"Ignore\":\n ignore_warning = True\n else:\n self.point_marker_list.remove(child)\n\n elif child.childItems()[0].childItems():\n if not ignore_warning:\n if isinstance(child.childItems()[0].childItems()[0], QGraphicsTextItem):\n user_choice = self.marker_removal_warning()\n if user_choice == \"Cancel\":\n return\n elif user_choice == \"Ignore\":\n self.line_marker_list.remove(child.childItems()[0])\n ignore_warning = True\n else:\n self.line_marker_list.remove(child.childItems()[0])\n\n # Move the QPolygonF items to the global coordinates and unite them (merge)\n p1 = poly_outer.polygon().translated(poly_outer.x(), poly_outer.y())\n p2 = poly_inner.polygon().translated(poly_inner.x(), poly_inner.y())\n uni = p1.united(p2)\n\n # Unite adds the starting point again as endpoint so we have to remove this duplicate point\n # to avoid future problems\n uni = self.poly_to_list(uni, \"Global\")\n uni = uni[:-1]\n\n # Add the new merged polygon, remove the old polygons from the view and lists\n self.add_poly_to_scene(QPolygonF(uni))\n self.delete_polygon(poly_inner, True)\n self.delete_polygon(poly_outer, True)\n # break",
"def get_geometry(self, selection_name):",
"def updateSelectionArea(self):\n self.machine.setSelectionArea(self.points,\n fill='hatch',\n color=self.machine.color)\n eventDict = prepareDrawingSignal('drawingProgress',\n 'polygon',\n self.points,\n self.machine.parameters)\n self.machine.plot.notify(**eventDict)",
"def __draw_polygon(self, event, klick):\n global creating_polygon\n curX = self.canvas.canvasx(event.x)\n curY = self.canvas.canvasy(event.y)\n if not klick and len(self.polygon_points) >= 2:\n c_r_x, c_r_y = self.get_canvas_relative_coords((self.polygon_points[-2], self.polygon_points[-1]))\n distanceX = curX - c_r_x\n distanceY = curY - c_r_y\n if pow(pow(distanceX, 2) + pow(distanceY, 2), 1 / 2) <= 15:\n return\n image_relative_x, image_relative_y = self.get_image_relative_coords((curX, curY))\n self.polygon_points.extend((image_relative_x, image_relative_y))\n self.polygon_groundstructure.append(self.canvas.create_rectangle(curX - 2, curY - 2, curX + 2, curY + 2,\n outline='magenta', width=1,\n activewidth=2))\n if not creating_polygon: # start with a new polygon\n creating_polygon = True\n return\n else: # draw a line between the last points\n c_r_x1, c_r_y1 = self.get_canvas_relative_coords((self.polygon_points[-4], self.polygon_points[-3]))\n c_r_x2, c_r_y2 = self.get_canvas_relative_coords((self.polygon_points[-2], self.polygon_points[-1]))\n self.polygon_groundstructure.append(self.canvas.create_line([c_r_x1, c_r_y1, c_r_x2, c_r_y2],\n fill='magenta', width=2))",
"def polySelectEditCtx(*args, adjustEdgeFlow: Union[float, bool]=1.0, divisions: Union[int,\n bool]=2, exists: bool=True, fixQuads: bool=False, image1: Union[AnyStr,\n bool]=\"\", image2: Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\",\n insertWithEdgeFlow: bool=False, smoothingAngle: Union[float, bool]=0.0,\n splitType: Union[int, bool]=0, useEqualMultiplier: bool=True,\n absoluteOffset: bool=True, autoComplete: bool=True, deleteEdge: bool=True,\n endVertexOffset: Union[float, bool]=0.0, mode: Union[int, bool]=0,\n startVertexOffset: Union[float, bool]=0.0, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[AnyStr, Any]:\n pass",
"def event_click_polygon(self, event):\n\n event_x_pos = self.canvasx(event.x)\n event_y_pos = self.canvasy(event.y)\n drag_lims = self.get_vector_object(self.variables.current_shape_id).image_drag_limits\n if drag_lims:\n canvas_lims = self.image_coords_to_canvas_coords(drag_lims)\n if event_x_pos < canvas_lims[0]:\n event_x_pos = canvas_lims[0]\n elif event_x_pos > canvas_lims[2]:\n event_x_pos = canvas_lims[2]\n if event_y_pos < canvas_lims[1]:\n event_y_pos = canvas_lims[1]\n elif event_y_pos > canvas_lims[3]:\n event_y_pos = canvas_lims[3]\n\n if self.variables.actively_drawing_shape:\n old_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)\n new_coords = list(old_coords) + [event_x_pos, event_y_pos]\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)\n # re-initialize shape if we're not actively drawing\n else:\n new_coords = (event.x, event.y, event_x_pos+1, event_y_pos+1)\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)\n self.variables.actively_drawing_shape = True",
"def createImgOverlay(sli, graph, drawing_params, notAnEmptySlice):\n \n (Y, X) = sli.shape\n \n # Creation of the superposition file (3 dimensions because RGB for the graph drawing)\n sli_ovl = np.zeros([Y, X, 3], np.uint8)\n sli_ovl[:, :, 0] = sli\n sli_ovl[:, :, 1] = sli\n sli_ovl[:, :, 2] = sli\n \n if notAnEmptySlice:\n line = drawing_params[0]\n line_color = drawing_params[1]\n line_size = drawing_params[2]\n apex_color = drawing_params[3]\n apex_size = drawing_params[4]\n node_color = drawing_params[5]\n node_size = drawing_params[6]\n body_color = drawing_params[7]\n body_size = drawing_params[8]\n \n graph = nx.convert_node_labels_to_integers(graph, first_label=0, \n ordering='default', \n label_attribute=None)\n \n # Creation of arrays from graph elements \n x_node = np.fromiter(nx.get_node_attributes(graph, 'x').values(), \n dtype=int) \n y_node = np.fromiter(nx.get_node_attributes(graph, 'y').values(), \n dtype=int) \n degrees = np.array([degree for node, degree in nx.degree(graph)], \n dtype=int) \n edges = np.array(graph.edges())\n \n # Lists of edges coordinates\n x1 = x_node[edges[:, 0]]\n y1 = y_node[edges[:, 0]]\n x2 = x_node[edges[:, 1]]\n y2 = y_node[edges[:, 1]]\n \n # Edges drawing\n if line:\n for i in range(len(x1)):\n cv2.line(sli_ovl, (x1[i], y1[i]), (x2[i], y2[i]), \n line_color, line_size)\n \n # Nodes drawing\n for i in range(len(x_node)):\n if degrees[i] == 1: # apex points\n color = apex_color\n size = apex_size\n elif degrees[i] == 2: # body/hypha points\n color = body_color\n size = body_size\n else: # branching/node points\n color = node_color\n size = node_size\n cv2.circle(sli_ovl, (x_node[i], y_node[i]), size, color, \n thickness=-1)\n\n return sli_ovl",
"def sf_dotset():\n global fig,ax,ss,statusL,statusM,axh,axv\n #get the handle of figure and axis\n fig=plt.gcf()\n ax=plt.gca()\n statusL,statusM=None,None\n ss=[]\n print 'Drawing a line, you should select at least 2 points'\n #\n def onmouse(event):\n global fig,ax,ss,statusL,statusM,axh,axv\n if statusL==None:\n if event.button==1:\n x,y=event.xdata,event.ydata\n print 'Coordinate of current point', x, y\n ss.append([x,y])\n axv=ax.axvline(event.xdata)\n axh=ax.axhline(event.ydata)\n fig.canvas.draw()\n #remove the vertical and horizontal lines\n axh.remove()\n axv.remove()\n #print ss\n statusL=None\n elif event.button==2:\n #polygon=np.array([ss],dtype=np.int32)\n #cv2.fillPoly(res['map'],polygon,0)\n #ax.imshow(res['map'],interpolation='nearest',cmap=cmapp)\n #polygondraw=plt.Polygon(ss,fill=None,edgecolor='b')\n #convert ss to to two-col array\n sxy=np.asarray(ss)\n sx,sy=sxy[:,0],sxy[:,1]\n plt.plot(sx,sy)\n #ax.add_patch(polygondraw)\n #show the patch\n fig.canvas.draw()\n print 'Press the middle button to remove the bad points'\n print 'Press the right button to quit and return the res'\n statusL=1\n statusM=1\n elif event.button==3:\n plt.close()\n #returen res\n elif statusM==1:\n if event.button==2:\n res=deepcopy(img)\n #ax.imshow(res['map'],interpolation='nearest',cmap=cmapp)\n #show the original pattern\n polygondraw.remove()\n fig.canvas.draw()\n statusL=None\n ss=[]\n print 'Press the left button to select a new point'\n elif event.button==3:\n plt.close()\n\n cid=fig.canvas.mpl_connect('button_press_event',onmouse)\n plt.show(block=True)\n #remove the polygon area\n #tmp=np.ones((res['height'],res['width']))\n line=np.asarray(ss,dtype=np.float)\n return line",
"def event_drag_multipoint_polygon(self, event):\n\n if self.variables.current_shape_id:\n event_x_pos = self.canvasx(event.x)\n event_y_pos = self.canvasy(event.y)\n drag_lims = self.get_vector_object(self.variables.current_shape_id).image_drag_limits\n if drag_lims:\n canvas_lims = self.image_coords_to_canvas_coords(drag_lims)\n if event_x_pos < canvas_lims[0]:\n event_x_pos = canvas_lims[0]\n elif event_x_pos > canvas_lims[2]:\n event_x_pos = canvas_lims[2]\n if event_y_pos < canvas_lims[1]:\n event_y_pos = canvas_lims[1]\n elif event_y_pos > canvas_lims[3]:\n event_y_pos = canvas_lims[3]\n\n self.show_shape(self.variables.current_shape_id)\n coords = self.coords(self.variables.current_shape_id)\n new_coords = list(coords[0:-2]) + [event_x_pos, event_y_pos]\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)\n else:\n pass",
"def make_percent_access_polygons(self):\r\n self.logger.info(\r\n f\"Processing FacilityID {self.facility_id}, FromBreak {self.from_break}, ToBreak {self.to_break}...\")\r\n self.scratch_gdb = self._create_output_gdb()\r\n selected_polygons = self._select_polygons()\r\n joined_polygons = self._join_polygons(selected_polygons)\r\n dissolved_polygons = self._dissolve_cells(joined_polygons)\r\n self.job_result[\"polygons\"] = dissolved_polygons",
"def _set_poly_roi(self, event, x, y, flags, params):\r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n self.tpoly.append((x, y))\r\n self.ix = x\r\n self.iy = y\r\n self.drawing = True\r\n if event == cv2.EVENT_MOUSEMOVE:\r\n if self.drawing:\r\n self.img = self.current_frame.copy()\r\n _poly = self.tpoly + [(x, y), self.tpoly[0]]\r\n for p1, p2 in zip(_poly[:-1], _poly[1:]):\r\n cv2.line(self.img, p1, p2, (0, 0, 255), 3)\r\n if event == cv2.EVENT_LBUTTONDBLCLK:\r\n self.poly = self.tpoly\r\n self.tpoly = []\r\n self.drawing = False",
"def generatePolygons():",
"def polyclip(i, j, pol_x, pol_y, area=False):\n n = len(pol_x)\n nout = n + 4\n px_out, py_out = [0] * nout, [0] * nout\n clip_vals = [i, i + 1, j + 1, j]\n\n for ctype in range(4):\n cv = clip_vals[ctype]\n if ctype == 0:\n inside = [px > i for px in pol_x]\n elif ctype == 1:\n inside = [(px < i + 1) for px in pol_x]\n elif ctype == 2:\n inside = [(py < j + 1) for py in pol_y]\n else:\n inside = [py > j for py in pol_y]\n if all(inside):\n continue\n\n shiftp1 = inside.copy()\n shiftp1.insert(0, shiftp1.pop(-1))\n crosses = [i1 != i2 for (i1, i2) in zip(inside, shiftp1)]\n pind = 0\n for k in range(n):\n px, py = pol_x[k], pol_y[k]\n if crosses[k]: # out->in or in->out, add intersection\n ind = n - 1 if k == 0 else k - 1\n sx, sy = pol_x[ind], pol_y[ind]\n try:\n if ctype <= 1: # left or right\n px_out[pind] = cv\n py_out[pind] = sy + ((py - sy) / (px - sx)) * (cv - sx)\n else: # top or bottom\n px_out[pind] = sx + ((px - sx) / (py - sy)) * (cv - sy)\n py_out[pind] = cv\n except ZeroDivisionError: # pragma: no cover\n px_out[pind] = np.nan\n py_out[pind] = np.nan\n pind += 1\n\n if inside[k]: # out->in or in->in, add 2nd point\n px_out[pind] = px\n py_out[pind] = py\n pind += 1\n\n if pind >= nout - 2:\n nout *= 2\n px_out = px_out + [0] * nout\n py_out = py_out + [0] * nout\n nout *= 2\n\n if pind == 0: # polygon is entirely outside this line\n return None, None\n n = pind\n pol_x = px_out[:n].copy()\n pol_y = py_out[:n].copy()\n\n if area:\n if pol_x is None: # pragma: no cover\n return 0.0\n shiftx = pol_x.copy()\n shifty = pol_y.copy()\n shiftx.append(shiftx.pop(0))\n shifty.append(shifty.pop(0))\n a1 = [p[0] * p[1] for p in zip(pol_x, shifty)]\n a2 = [p[0] * p[1] for p in zip(pol_y, shiftx)]\n a = [p[0] - p[1] for p in zip(a1, a2)]\n return abs(sum(a)) / 2\n\n return pol_x, pol_y",
"def layer_coords(label_lst): #full path\n \n #if a fundus then do this block\n gyrus_check = all(i.__contains__(\"fundus\") for i in label_lst)\n if gyrus_check:\n for layer in label_lst:\n #read data\n df_layer = pd.read_csv(layer)\n df_layer = df_layer.iloc[1:,0]\n df_layer = pd.DataFrame( [list(map(float, i)) for i in [list(i.split()) for i in \\\n df_layer.values]], columns=['idk1', 'X', 'Y', 'Z', 'idk2'])[['X', 'Y', 'Z']]\n\n #compute slope\n yvals = [(y2 - y1) for y1, y2 in zip(df_layer['Y'], df_layer['Y'][1:])]\n xvals = [(x2 - x1) for x1, x2 in zip(df_layer['X'], df_layer['X'][1:])]\n layer_slope = [round(i,2) for i in np.divide(yvals, xvals)]\n\n #split lam label into three\n split = math.floor(len(df_layer['X'].values)/3)\n df_layer_right = df_layer[0:split]\n df_layer_left = df_layer[-split:]\n df_layer_middle = df_layer[split:-split]\n\n plt.plot(df_layer['X'], df_layer['Y'], lw=3) #color='#000000'\n # plt.plot(df_layer['X'], df_layer['Y'], linewidth=1, marker='o', markersize=5)\n plt.axis('off')\n plt.savefig('layer_contour.png')\n # plt.show()\n plt.close()\n\n #read, convert to grayscale, find edges\n layer_img = cv2.imread('layer_contour.png')\n layer_img_grey = cv2.cvtColor(layer_img, cv2.COLOR_BGR2GRAY)\n layer_edges = cv2.Canny(layer_img_grey, 30, 200)\n\n #find contours\n contours, hierachy = cv2.findContours(layer_edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n # cv2.imshow('contour', layer_edges)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n \n #order contours\n contours = [np.squeeze(i) for i in contours]\n df_contours = pd.DataFrame(contours)\n contours_ord = df_contours.loc[6].values, df_contours.loc[4].values, \\\n df_contours.loc[3].values, df_contours.loc[2].values, \\\n df_contours.loc[1].values, df_contours.loc[0].values, \\\n df_contours.loc[5].values\n contours_ord = np.squeeze(contours_ord)\n \n\n #plot all layers and add coordinate data to dict \n lay_coords_dict = {}\n for laycon, i in zip(contours_ord, list(range(len(contours)))): \n #split coordinates into top and bottom edge\n if i == 0: #0 == pial\n c_idx = int(np.floor(len(laycon)/2))\n coords_top = np.array(list(reversed(laycon[:c_idx])))\n lay_coords_dict[i] = coords_top[10:]\n # print(coords_top)\n\n else:\n c_idx = int(np.floor(len(laycon)/2))\n coords_top = np.array(list(reversed(laycon[c_idx:])))\n lay_coords_dict[i] = coords_top[5:-7]\n\n \n #plot coords\n # for key, val in lay_coords_dict.items():\n # plt.plot([i[0] for i in val], [i[1] for i in val], lw=1.75)\n # plt.gca().invert_yaxis()\n # plt.show()\n # plt.close()\n # sys.exit()\n\n #delete edge detect image and return dict\n rm_img_cmd = \"rm layer_contour.png\"\n os.system(rm_img_cmd)\n return(lay_coords_dict)\n \n\n #for crown data do this block\n else:\n for layer in label_lst:\n #read data\n df_layer = pd.read_csv(layer)\n df_layer = df_layer.iloc[1:,0]\n df_layer = pd.DataFrame( [list(map(float, i)) for i in [list(i.split()) for i in \\\n df_layer.values]], columns=['idk1', 'X', 'Y', 'Z', 'idk2'])[['X', 'Y', 'Z']]\n\n #compute slope\n yvals = [(y2 - y1) for y1, y2 in zip(df_layer['Y'], df_layer['Y'][1:])]\n xvals = [(x2 - x1) for x1, x2 in zip(df_layer['X'], df_layer['X'][1:])]\n layer_slope = [round(i,2) for i in np.divide(yvals, xvals)]\n\n #split lam label into three\n split = math.floor(len(df_layer['X'].values)/3)\n df_layer_right = df_layer[0:split]\n df_layer_left = df_layer[-split:]\n df_layer_middle = df_layer[split:-split]\n\n plt.plot(df_layer['X'], df_layer['Y'], lw=3) #color='#000000', lw=5\n # plt.plot(df_layer['X'], df_layer['Y'], linewidth=1, marker='o', markersize=5)\n plt.axis('off')\n plt.savefig('layer_contour.png')\n # plt.show()\n plt.close()\n\n #read, convert to grayscale, find edges\n layer_img = cv2.imread('layer_contour.png')\n layer_img_grey = cv2.cvtColor(layer_img, cv2.COLOR_BGR2GRAY)\n layer_edges = cv2.Canny(layer_img_grey, 30, 200)\n\n #find contours\n contours, hierachy = cv2.findContours(layer_edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n # cv2.imshow('contour', layer_edges)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n \n\n #plot all layers and add coordinate data to dict\n lay_coords_dict = {}\n for laycon, i in zip(contours, list(range( len(contours) ) )[::-1] ):#7\n #split coordinates into top and bottom edge\n # print(laycon)\n coords_lst = [list(ii) for i in laycon for ii in i] # 0 == GWB\n # print(coords_lst)\n\n c_split = math.floor(len(coords_lst)/4)\n coords_top = coords_lst[:c_split][::-1] + coords_lst[-c_split:][::-1]\n lay_coords_dict[i] = coords_top\n df_coords = pd.DataFrame(coords_top, columns=['X', 'Y'])\n # print(df_coords)\n\n #plot using all coordinates\n plt.plot(df_coords['X'].values, df_coords['Y'].values, lw=3)\n plt.gca().invert_yaxis()\n # plt.show()\n plt.close()\n\n\n # use k means to get rid of extra coords on short lines\n for i in list(range(1,6)):\n # kMEANS clustering, separate short line bottom half\n df_short = pd.DataFrame(lay_coords_dict[i], columns=['X', 'Y']) #1=L1,\n # plt.scatter( df_short['X'].values, df_short['Y'].values, s=5 )\n # plt.gca().invert_yaxis()\n # plt.show()\n\n #scale data\n scaler = StandardScaler()\n scaler.fit( df_short[['X', 'Y']].values )\n short_scale = scaler.transform( df_short[['X', 'Y']].values )\n\n init = np.array([[0.514, -0.629], [-1.101, 1.344]])\n\n #predict\n # kmeans_classifier = KMeans(n_clusters=2, init=init) #fixed centroids\n kmeans_classifier = KMeans(n_clusters=2) \n\n y_kmeans = kmeans_classifier.fit_predict(short_scale)\n centroids = kmeans_classifier.cluster_centers_\n inertia = kmeans_classifier.inertia_\n\n\n #update df\n df_short.insert(2, column='kClass', value=y_kmeans)\n\n #df scaled\n df_short_scale = pd.DataFrame(short_scale, columns=['X', 'Y'])\n df_short_scale.insert(2, column='kClass', value=y_kmeans)\n \n\n \"\"\"\n #plot data points for k means, clusters\n colmap = {0: '#029386', 1: '#D2691E', 2: '#A52A2A'}\n for i in range(2):\n new_df = df_short_scale[df_short_scale['kClass']==i]\n plt.scatter(new_df['X'].values, new_df['Y'].values, s=20, \\\n label='cluster' + str(i+1), color=colmap[i])\n\n #plot centroids\n for i in range (2):\n plt.scatter(centroids[i][0], centroids[i][1], marker='x', s=500, \\\n label='centroid' + str(i+1), color=colmap[i])\n \n plt.legend()\n plt.gca().invert_yaxis()\n plt.show()\n \"\"\"\n\n\n #new df for clean data, take centroid with more data points\n num_class0 = len(df_short[df_short['kClass']==0])\n num_class1 = len(df_short[df_short['kClass']==1])\n\n if num_class0 > num_class1:\n \n df_short_clean = df_short[df_short['kClass']==0]\n lay_coords_dict[i] = [[i,j] for i,j in zip(df_short_clean['X'].values,\\\n df_short_clean['Y'].values)]\n else:\n df_short_clean = df_short[df_short['kClass']==1]\n lay_coords_dict[i] = [[i,j] for i,j in zip(df_short_clean['X'].values,\\\n df_short_clean['Y'].values)]\n\n #plot clean short line\n # plt.scatter(df_short_clean['X'].values, df_short_clean['Y'].values, s=20)\n # plt.gca().invert_yaxis()\n # plt.show()\n\n #delete edge detect image and return dict\n rm_img_cmd = \"rm layer_contour.png\"\n os.system(rm_img_cmd)\n return(lay_coords_dict)",
"def select_vert(img):\n\n # Local variable which breaks loop if area of interest is selected well\n OK = False\n\n # Main while-loop\n while OK == False:\n\n # Plot image\n fig, ax = plt.subplots(figsize=(10, 10))\n ax.imshow(img, cmap=\"gray\")\n\n # Let user specify points\n coord = np.asarray(plt.ginput(4, show_clicks=True))\n p = Polygon(coord, linewidth=1, edgecolor='r', facecolor='none')\n plt.gca().add_artist(p)\n # Include area of interest in plot\n plt.draw()\n plt.show()\n\n # Ask user to accept or reject the proposed area of interest\n val = input(\"Is the region correct ([Y]/n)?\\n\")\n\n # Break if OK, re-do if not\n if val == \"Y\" or val == \"\":\n OK = True\n\n \"\"\"\n Creates a mask which marks the vertical line based on the coordinates given by the user.\n \"\"\"\n \n x, y = np.meshgrid(np.arange(img.shape[0]), np.arange(img.shape[1]), indexing='xy')\n x, y = x.flatten(), y.flatten()\n pts = np.vstack((x,y)).T\n pts_t = tuple(map(tuple, pts))\n mask = np.ones((img.shape[0],img.shape[1]))\n for (x,y) in pts_t:\n if p.get_path().contains_point((x,y)):\n mask[y][x] = 0\n\n # Return mask which is the area of interest with value 1, 0 else\n return mask",
"def lasso(image, save=True):\n if image.dtype == np.uint8:\n image = image / 255.\n\n TITLE = 'Press ENTER when satisfied with your selection.'\n fig = plt.figure()\n plt.tick_params(axis='both', which='both', bottom='off', top='off',\n labelbottom='off', right='off', left='off', labelleft='off')\n ax = fig.add_subplot(111)\n ax.imshow(image)\n ax.set_title(TITLE)\n\n height, width, _ = image.shape\n x, y = np.meshgrid(np.arange(width), np.arange(height))\n pix = np.vstack((x.flatten(), y.flatten())).T\n output = None\n\n def onselect(verts):\n # Select elements in original array bounded by selector path.\n verts = np.array(verts)\n p = Path(verts)\n ind = p.contains_points(pix, radius=1)\n selected = np.copy(image)\n selected[:, :, 0].flat[ind] = image[:, :, 0].flat[ind] * 0.8\n selected[:, :, 1].flat[ind] = image[:, :, 1].flat[ind] * 0.8\n selected[:, :, 2].flat[ind] = image[:, :, 2].flat[ind] * 0.8\n\n nonlocal output\n b = path_bbox(verts)\n ymin, ymax = int(min(b[:, 1])), int(max(b[:, 1])) + 1\n xmin, xmax = int(min(b[:, 0])), int(max(b[:, 0])) + 1\n alpha_mask = np.zeros((height, width))\n alpha_mask.flat[ind] = 1.0\n alpha_mask = alpha_mask[ymin:ymax, xmin:xmax]\n output = np.dstack((image[ymin:ymax, xmin:xmax], alpha_mask))\n\n ax.clear()\n ax.imshow(selected)\n ax.set_title(TITLE)\n ax.plot(*p.vertices.T, scalex=False, scaley=False)\n fig.canvas.draw_idle()\n\n def quit_figure(event):\n # Source: https://github.com/matplotlib/matplotlib/issues/830/.\n if event.key == 'enter':\n plt.close(event.canvas.figure)\n\n cid = plt.gcf().canvas.mpl_connect('key_press_event', quit_figure)\n lasso = LassoSelector(ax, onselect)\n plt.show()\n if save:\n plt.imsave('source.png', output)\n return output",
"def draw_polygon(left_x, right_x, left_y, right_y, img_):\n pts_left = np.array([np.flipud(np.transpose(np.vstack([left_x, left_y])))])\n pts_right = np.array([np.transpose(np.vstack([right_x, right_y]))])\n pts = np.hstack((pts_left, pts_right))\n img_ = cv2.polylines(img_, np.int_([pts]), isClosed=False, color=(60, 200, 60), thickness=10, lineType=cv2.LINE_AA)\n img_ = cv2.fillPoly(img_, np.int_(pts), (50, 90, 50))\n return img_",
"def add_overlay(self, data, vertices=None, to_overlay=None, mask_data=None,\n **kwargs):\n # Check input variables :\n if vertices is None:\n vertices = np.ones((len(self),), dtype=bool)\n if not len(vertices):\n logger.warning('Vertices array is empty. Abandoning.')\n return\n\n data = np.asarray(data)\n to_overlay = self._n_overlay if to_overlay is None else to_overlay\n data_lim = (data.min(), data.max())\n if len(self._data_lim) < to_overlay + 1:\n self._data_lim.append(data_lim)\n else:\n self._data_lim[to_overlay] = data_lim\n # -------------------------------------------------------------\n # TEXTURE COORDINATES\n # -------------------------------------------------------------\n need_reshape = to_overlay >= self._xrange.shape[1]\n if need_reshape:\n # Add column of zeros :\n z_ = np.zeros((len(self),), dtype=np.float32)\n z_text = np.zeros((1, LUT_LEN, 4), dtype=np.float32)\n self._xrange = np.c_[self._xrange, z_]\n self._alphas = np.c_[self._alphas, z_]\n self._text2d_data = np.concatenate((self._text2d_data, z_text))\n # (x, y) coordinates of the overlay for the texture :\n self._xrange[vertices, to_overlay] = normalize(data)\n # Transparency :\n self._alphas[vertices, to_overlay] = 1. # transparency level\n\n # -------------------------------------------------------------\n # TEXTURE COLOR\n # -------------------------------------------------------------\n # Colormap interpolation (if needed):\n colormap = Colormap(**kwargs)\n vec = np.linspace(data_lim[0], data_lim[1], LUT_LEN)\n self._text2d_data[to_overlay, ...] = colormap.to_rgba(vec)\n # Send data to the mask :\n if isinstance(mask_data, np.ndarray) and len(mask_data) == len(self):\n self._bgd_data[mask_data] = .5\n self._bgd_buffer.set_data(self._bgd_data)\n # -------------------------------------------------------------\n # BUFFERS\n # -------------------------------------------------------------\n if need_reshape:\n # Re-define buffers :\n self._xrange_buffer = gloo.VertexBuffer(self._xrange)\n self._text2d = gloo.Texture2D(self._text2d_data)\n self._alphas_buffer = gloo.VertexBuffer(self._alphas)\n # Send buffers to vertex shader :\n self.shared_program.vert['u_range'] = self._xrange_buffer\n self.shared_program.vert['u_alphas'] = self._alphas_buffer\n self.shared_program.vert['u_over_text'] = self._text2d\n else:\n self._xrange_buffer.set_data(self._xrange)\n self._text2d.set_data(self._text2d_data)\n self._alphas_buffer.set_data(self._alphas)\n # Update the number of overlays :\n self._n_overlay = to_overlay + 1\n self.shared_program.vert['u_n_overlays'] = self._n_overlay",
"def drawShapes(self):\n self.draw_polygon(self.poly3.get_points() , color = \"#000\")\n self.draw_polygon(self.poly2.get_points() , color = \"#000\")\n self.draw_polygon(self.poly1.get_points() , color = \"#000\")\n self.draw_rect(0, 0, self.width, self.height, color= \"#000\")\n \"\"\"These statements are used to determine if a point is inside any of the\n 3 polygons and if so changes the point's color\"\"\"\n if (self.poly2.point_inside_polygon(self.p1) or self.poly1.point_inside_polygon(self.p1)\n or self.poly3.point_inside_polygon(self.p1)):\n color = \"#0F0\"\n else:\n color = \"#F00\"\n self.fill_oval(self.p1.x, self.p1.y, 7, 7, color)\n\n if (self.poly2.point_inside_polygon(self.p2) or self.poly1.point_inside_polygon(self.p2)\n or self.poly3.point_inside_polygon(self.p2)):\n color = \"#0F0\"\n else:\n color = \"#F00\"\n self.fill_oval(self.p2.x, self.p2.y, 7, 7, color)\n if (self.poly2.point_inside_polygon(self.p3) or self.poly1.point_inside_polygon(self.p3)\n or self.poly3.point_inside_polygon(self.p3)):\n color = \"#0F0\"\n else:\n color = \"#F00\"\n self.fill_oval(self.p3.x, self.p3.y, 7, 7, color)",
"def clip_polygon(subject, clipper, operation = 'difference'):\n Subject = Polygon()\n Clipper = Polygon()\n\n for s in subject:\n Subject.add(Vertex(s))\n\n for c in clipper:\n Clipper.add(Vertex(c))\n\n clipped = Clipper.difference(Subject)\\\n if operation == 'reversed-diff'\\\n else Subject.__getattribute__(operation)(Clipper)\n\n clipped = [(ext.points,[hole.points for hole in holes]) for ext,holes in clipped]\n return clipped",
"def _finish_polygon(self):\n global undo_stack, choose_polygon\n if len(self.polygon_points) < 6:\n messagebox.showinfo(title='Info', message='Too few points for a polygon')\n return 'too_few_points'\n relative_poly_points = []\n for p in range(0, len(self.polygon_points), 2):\n relative_poly_points.extend(self.get_canvas_relative_coords((self.polygon_points[p],\n self.polygon_points[p + 1])))\n if choose_polygon:\n undo_stack.append('p')\n self.polygons.append(self.canvas.create_polygon(relative_poly_points,\n outline='blue', activewidth=3, width=1,\n fill='magenta', stipple='gray50'))\n self.canvas.tag_bind(self.polygons[-1], '<ButtonPress-1>', self.callback_click_polygon)\n self.canvas.tag_bind(self.polygons[-1], '<ButtonRelease-1>', self.callback_release_polygon)\n self.canvas.tag_bind(self.polygons[-1], '<B1-Motion>', self.callback_move_polygon)\n for p in self.polygon_groundstructure:\n self.canvas.delete(p)\n self.polygon_points_history[self.polygons[-1]] = np.reshape(np.asarray(self.polygon_points),\n (round(len(self.polygon_points) / 2),\n 2))\n self.polygon_points.clear()\n self.polygon_groundstructure.clear()\n self.parent_class.activate_save_bt()",
"def generatePolygons(self, *args, **kwargs): \n return 'var PloneMapPolygons = [' + \\\n ''.join([\"{ 'id': '%s', 'path' : %s,'title':'%s'},\" % (object.id, object.polygon, object.Title()) \n for object in self.context.objectValues() \n if hasattr(object, 'polygon') and len(object.polygon) > 0 ])[:-1] \\\n + '];'"
] |
[
"0.66302127",
"0.6113158",
"0.5918895",
"0.5812843",
"0.5737684",
"0.57315946",
"0.55679476",
"0.5533929",
"0.55014855",
"0.54703",
"0.5465126",
"0.54649836",
"0.5447356",
"0.54321885",
"0.5431229",
"0.5395844",
"0.53884107",
"0.53771347",
"0.5351133",
"0.5337138",
"0.53348064",
"0.5261546",
"0.52424693",
"0.51665395",
"0.5166439",
"0.51546925",
"0.5140716",
"0.5138022",
"0.5135545",
"0.5134518"
] |
0.66462404
|
0
|
Returns a tuple of (x1, x2) screen space coordinates of the start and end selection points. If there is no current selection, then returns None.
|
def _get_selection_screencoords(self):
selection = self.source.index.metadata["selections"]
if (selection is not None) and (len(selection) == 2):
mapper = self.source.index_mapper
return mapper.map_screen(np.array(selection))
else:
return None
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_selection(self):\n if not len(self.GetSelectionBlockTopLeft()):\n selected_columns = self.GetSelectedCols()\n selected_rows = self.GetSelectedRows()\n if selected_columns:\n start_col = selected_columns[0]\n end_col = selected_columns[-1]\n start_row = 0\n end_row = self.GetNumberRows() - 1\n elif selected_rows:\n start_row = selected_rows[0]\n end_row = selected_rows[-1]\n start_col = 0\n end_col = self.GetNumberCols() - 1\n else:\n start_row = end_row = self.GetGridCursorRow()\n start_col = end_col = self.GetGridCursorCol()\n elif len(self.GetSelectionBlockTopLeft()) > 1:\n wx.MessageBox(\"Multiple selections are not supported\", \"Warning\")\n return []\n else:\n start_row, start_col = self.GetSelectionBlockTopLeft()[0]\n end_row, end_col = self.GetSelectionBlockBottomRight()[0]\n return [start_row, start_col, end_row, end_col]",
"def selection_pos(self):\n buff = self._vim.current.buffer\n beg = buff.mark('<')\n end = buff.mark('>')\n return beg, end",
"def startAndEnd(self):\n upperRow = 0\n upperCol = 0\n lowerRow = 0\n lowerCol = 0\n if self.selectionMode == kSelectionNone:\n upperRow = self.penRow\n upperCol = self.penCol\n lowerRow = self.penRow\n lowerCol = self.penCol\n elif self.selectionMode == kSelectionAll:\n upperRow = 0\n upperCol = 0\n lowerRow = self.parser.rowCount() - 1\n lowerCol = self.parser.rowWidth(-1)\n elif self.selectionMode == kSelectionBlock:\n upperRow = min(self.markerRow, self.penRow)\n upperCol = min(self.markerCol, self.penCol)\n lowerRow = max(self.markerRow, self.penRow)\n lowerCol = max(self.markerCol, self.penCol)\n elif (self.selectionMode == kSelectionCharacter or\n self.selectionMode == kSelectionLine or\n self.selectionMode == kSelectionWord):\n upperRow = self.markerRow\n upperCol = self.markerCol\n lowerRow = self.penRow\n lowerCol = self.penCol\n if upperRow == lowerRow and upperCol > lowerCol:\n upperCol, lowerCol = lowerCol, upperCol\n elif upperRow > lowerRow:\n upperRow, lowerRow = lowerRow, upperRow\n upperCol, lowerCol = lowerCol, upperCol\n #app.log.detail('start and end', upperRow, upperCol, lowerRow, lowerCol)\n return (upperRow, upperCol, lowerRow, lowerCol)",
"def get_pick_position(self):\n x0 = int(self.GetPickX1())\n x1 = int(self.GetPickX2())\n y0 = int(self.GetPickY1())\n y1 = int(self.GetPickY2())\n return x0, y0, x1, y1",
"def cursorPosGL(self):\n globalPos = QtGui.QCursor.pos()\n pos = self.mapFromGlobal(globalPos)\n y = self.size().height() - pos.y()\n return pos.x(), y",
"def cursorPosGL(self):\n globalPos = QtGui.QCursor.pos()\n pos = self.mapFromGlobal(globalPos)\n y = self.size().height() - pos.y()\n return pos.x(), y",
"def extendSelection(self):\n if self.selectionMode == kSelectionNone:\n return (0, 0, -self.markerRow, -self.markerCol, 0)\n elif self.selectionMode == kSelectionAll:\n lowerRow = self.parser.rowCount() - 1\n lowerCol = self.parser.rowWidth(-1)\n return (lowerRow - self.penRow,\n lowerCol - self.penCol, -self.markerRow,\n -self.markerCol, 0)\n elif self.selectionMode == kSelectionLine:\n return (0, -self.penCol, 0, -self.markerCol, 0)\n elif self.selectionMode == kSelectionWord:\n if self.penRow > self.markerRow or (self.penRow == self.markerRow\n and\n self.penCol > self.markerCol):\n upperCol, lowerCol = self.__extendWords(\n self.markerRow, self.markerCol, self.penRow, self.penCol)\n return (0, lowerCol - self.penCol, 0, upperCol - self.markerCol,\n 0)\n else:\n upperCol, lowerCol = self.__extendWords(\n self.penRow, self.penCol, self.markerRow, self.markerCol)\n return (0, upperCol - self.penCol, 0, lowerCol - self.markerCol,\n 0)\n return (0, 0, 0, 0, 0)",
"def last_pos(self) -> tuple[int, int]:\n if not self.actions:\n return (self.start_x, self.start_y)\n else:\n box = self.get_hitbox_at(self.time_consumed)\n return box.pos_x, box.pos_y",
"def getPosition(self):\n\tleft = self.getLeft()\n\ttop = self.getTop()\n\treturn (left,top)",
"def cursor_coordinates(self):\n text = self.getText()\n lines = text.split(\"\\n\")\n pos = self.getCursorPos()\n if pos == 0:\n return (0, 0)\n i = 0\n cursor_row = -1\n cursor_col = -1\n for row, line in enumerate(lines):\n i += len(line) + 1 # we need to include \"\\n\"\n if pos < i:\n cursor_row = row\n cursor_col = pos - i + len(line) + 1\n break\n return (cursor_col, cursor_row)",
"def get_selection_first_coord(sel, name=\"(sel)\"):\n\n #print \"--> selection %(name)s: %(sel)s\" % vars()\n cmd.select(name, sel)\n try:\n return numpy.array(cmd.get_model(name).atom[0].coord)\n except IndexError:\n print \"--> empty selection: %(sel)s\" % vars()\n raise",
"def select(self):\n\n return self.p[0], self.p[1]",
"def get_cursor_pos(self):\n return (self.text_maker.pos[0] + 9, self.text_maker.pos[1] + 120 + 8)",
"def getMousePosition(self):\n return (self.mouseData.x, self.mouseData.y)",
"def get_pos(self):\n return (self.x, self.y)",
"def midpoint(self) -> Tuple[int, int]:\n minx, miny, maxx, maxy = self.substrates.bounds\n return ((minx + maxx) // 2, (miny + maxy) // 2)",
"def dimscr(self):\n return (self.startx, self.starty, self.endx - self.startx, self.endy - self.starty)",
"def get_current_position(self) -> Tuple[int, int]:\n return self.__row_position, self.__col_position",
"def getCoordinates(p):\n if p[0] == 'p': # minimum bounding rectangle for point\n return (int(p[1]), int(p[2]), int(p[1]), int(p[2]))\n elif p[0] == 'c': # minimum bounding rectangle for circle\n x = int(p[1])\n y = int(p[2])\n r = int(p[3])\n return (x - r, y - r, x + r, y + r)\n elif p[0] == 'l': # minimum bounding rectangle for line segment\n x1 = int(p[1])\n y1 = int(p[2])\n x2 = int(p[3])\n y2 = int(p[4])\n if y2 > y1:\n if x1 < x2:\n return (x1, y1, x2, y2)\n else:\n return (x2, y1, x1, y2)\n else:\n if x1 < x2:\n return (x1, y2, x2, y1)\n else:\n return (x2, y2, x1, y1)",
"def getPosition(self):\n return self.target, min(self.points), max(self.points)",
"def get_point(self):\n return self._x, self._y",
"def fetchbounds(self):\n pnts = [x for x in [self.out_start, self.start, self.in_start, \\\n self.in_end, self.end, self.out_end] \\\n if x is not None]\n return min(pnts), max(pnts)",
"def coordinates(self) -> Tuple[int, int]:\n return self.x, self.y",
"def GetSelection(self):\n # STC HELL\n # Translate the UTF8 byte offsets to unicode\n start, end = super(EditraBaseStc, self).GetSelection()\n utf8_txt = self.GetTextUTF8()\n if start != 0:\n start = len(ed_txt.DecodeString(utf8_txt[0:start], 'utf-8'))\n if end != 0:\n end = len(ed_txt.DecodeString(utf8_txt[0:end], 'utf-8'))\n del utf8_txt\n return start, end",
"def cursorPosQt(self):\n globalPos = QtGui.QCursor.pos()\n pos = self.mapFromGlobal(globalPos)\n return pos.x(), pos.y()",
"def cursorPosQt(self):\n globalPos = QtGui.QCursor.pos()\n pos = self.mapFromGlobal(globalPos)\n return pos.x(), pos.y()",
"def get_position(self):\n return (self.x_pos, self.y_pos)",
"def mousePos():\n data = display.Display().screen().root.query_pointer()._data\n return data[\"root_x\"], data[\"root_y\"]",
"def get_from_to(self):\n pfrom = self.cursors['y1']\n pto = self.cursors['y2']\n\n return pfrom, pto",
"def position(self):\n return self.x, self.y"
] |
[
"0.7141307",
"0.69176507",
"0.689422",
"0.6854982",
"0.65557784",
"0.65557784",
"0.655157",
"0.6543182",
"0.6430244",
"0.6377898",
"0.6370149",
"0.6288393",
"0.6245805",
"0.62029105",
"0.6114205",
"0.60794985",
"0.60788393",
"0.6052775",
"0.6035539",
"0.6032607",
"0.6031879",
"0.6028135",
"0.6023028",
"0.5981974",
"0.5978457",
"0.5978457",
"0.5942773",
"0.5934941",
"0.5929953",
"0.5901546"
] |
0.7626898
|
0
|
Calculate the overlay polygon based on the selection and the location of the source and destination plots.
|
def calculate_points(self, component):
# find selection range on source plot
x_start, x_end = self._get_selection_screencoords()
if x_start > x_end:
x_start, x_end = x_end, x_start
y_end = self.source.y
y_start = self.source.y2
left_top = np.array([x_start, y_end])
left_mid = np.array([x_start, y_start])
right_top = np.array([x_end, y_end])
right_mid = np.array([x_end, y_start])
# Offset y because we want to avoid overlapping the trapezoid with the topmost
# pixels of the destination plot.
y = self.destination.y - 1
left_end = np.array([self.destination.x, y])
right_end = np.array([self.destination.x2, y])
polygon = np.array((left_end, left_mid, left_top,
right_top, right_mid, right_end))
left_line = np.array((left_top, left_mid, left_end))
right_line = np.array((right_end, right_mid, right_top))
return left_line, right_line, polygon
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def calculate_points(self, component):\n # find selection range on source plot\n x_start, x_end = self._get_selection_screencoords()\n if x_start > x_end:\n x_start, x_end = x_end, x_start\n\n y_end = self.source.y\n y_start = self.source.y2\n\n left_top = np.array([x_start, y_start])\n left_mid = np.array([x_start, y_end])\n right_top = np.array([x_end, y_start])\n right_mid = np.array([x_end, y_end])\n\n # Offset y because we want to avoid overlapping the trapezoid with the topmost\n # pixels of the destination plot.\n y = self.destination.y2 + 1\n\n left_end = np.array([self.destination.x, y])\n right_end = np.array([self.destination.x2, y])\n\n polygon = np.array((left_top, left_mid, left_end,\n right_end, right_mid, right_top))\n left_line = np.array((left_top, left_mid, left_end))\n right_line = np.array((right_end, right_mid, right_top))\n\n return left_line, right_line, polygon",
"def overlay(self, component, gc, view_bounds=None, mode=\"normal\"):\n\n tmp = self._get_selection_screencoords()\n if tmp is None:\n return\n\n left_line, right_line, polygon = self.calculate_points(component)\n\n gc.save_state()\n try:\n gc.translate_ctm(*component.position)\n gc.set_alpha(self.alpha)\n gc.set_fill_color(self.fill_color_)\n gc.set_line_width(self.border_width)\n gc.set_stroke_color(self.border_color_)\n gc.begin_path()\n gc.lines(polygon)\n gc.fill_path()\n\n gc.begin_path()\n gc.lines(left_line)\n gc.lines(right_line)\n gc.stroke_path()\n finally:\n gc.restore_state()\n return",
"def _select_polygons(self):\r\n selected_polys_layer = \"SelectedPolys_\" + self.job_id\r\n if self.facility_id is None:\r\n facility_query = arcpy.AddFieldDelimiters(self.time_lapse_polygons, FACILITY_ID_FIELD) + \" IS NULL\"\r\n else:\r\n facility_query = arcpy.AddFieldDelimiters(self.time_lapse_polygons, FACILITY_ID_FIELD) + \" = \" + \\\r\n str(self.facility_id)\r\n query = facility_query + \" AND \" + \\\r\n arcpy.AddFieldDelimiters(self.time_lapse_polygons, FROM_BREAK_FIELD) + \" = \" + str(self.from_break) + \\\r\n \" AND \" + \\\r\n arcpy.AddFieldDelimiters(self.time_lapse_polygons, TO_BREAK_FIELD) + \" = \" + str(self.to_break)\r\n arcpy.management.MakeFeatureLayer(self.time_lapse_polygons, selected_polys_layer, where_clause=query)\r\n self.logger.info(\r\n f\"{int(arcpy.management.GetCount(selected_polys_layer).getOutput(0))} time lapse polygons selected.\")\r\n return selected_polys_layer",
"def poly_to_list_with_overlap(self, polygon):\n added = 0\n polygon_item = polygon.polygon()\n polygon_item.translate(polygon.x(), polygon.y())\n\n # Comparator to determine which x value of two points is the highest\n def compare_x(item1, item2):\n if item1.x() < item2.x():\n return -1\n elif item1.x() > item2.x():\n return 1\n else:\n return 0\n\n # Comparator to determine which y value of two points is the highest\n def compare_y(item1, item2):\n if item1.y() < item2.y():\n return -1\n elif item1.y() > item2.y():\n return 1\n else:\n return 0\n\n # Create two lists, one sorted by ascending x-values, one by ascending y-values\n x_list = sorted(self.potential_edge_splitters, key=cmp_to_key(compare_x))\n y_list = sorted(self.potential_edge_splitters, key=cmp_to_key(compare_y))\n\n # Loop over all children to the polygon\n for item in polygon.childItems():\n # Look only at edges (overlapping of points is handled elsewhere)\n if isinstance(item, PyQt5.QtWidgets.QGraphicsLineItem):\n edge = item\n\n p1 = edge.line().p1()\n p2 = edge.line().p2()\n added_this = 0\n\n # Choose the direction with the largest disparity (to avoid scenario of straight lines)\n # then use the sorted list for that direction\n if abs(p1.x() - p2.x()) > abs(p1.y() - p2.y()):\n mode = \"X\"\n circ_list = x_list\n else:\n mode = \"Y\"\n circ_list = y_list\n\n for circ in circ_list:\n poly = circ.parentItem()\n p = circ.scenePos()\n\n # temp_p needed since edge.contains does not account for the edge being moved in the canvas\n temp_p = circ.scenePos()\n temp_p.setX(temp_p.x() - edge.scenePos().x())\n temp_p.setY(temp_p.y() - edge.scenePos().y())\n\n # Find the edges to split which contain temp_p, if the edge contains decide the orientation (in x-\n # or y-direction decided earlier) of p1 and p2, based on this insert the new point in the polygon\n # in the correct position\n if edge.contains(temp_p):\n if edge in poly.childItems():\n pass # Ignore if the edge is in the same polygon as the point\n else:\n if temp_p == p1 or temp_p == p2:\n pass # Don't compare if it contains an edge point, instead handled later by the overlapping points\n elif mode == \"Y\":\n if p1.y() < p2.y(): # Left to right\n index = abs(edge.localIndex)\n polygon_item.insert(index + added, p)\n added += 1\n elif p1.y() > p2.y(): # Right to left\n index = abs(edge.localIndex)\n polygon_item.insert(index + added - added_this, p)\n added_this += 1\n added += 1\n else:\n if p1.x() < p2.x(): # Left to right\n index = abs(edge.localIndex)\n polygon_item.insert(index + added, p)\n added += 1\n elif p1.x() > p2.x(): # Right to left\n index = abs(edge.localIndex)\n polygon_item.insert(index + added - added_this, p)\n added_this += 1\n added += 1\n\n return self.poly_to_list(polygon_item, \"Global\")",
"def _draw_polygon(self):\n xs, ys = zip(*self._xys) if self._xys else ([], [])\n self._selection_artist.set_data(xs, ys)\n self._update_box()\n # Only show one tool handle at the start and end vertex of the polygon\n # if the polygon is completed or the user is locked on to the start\n # vertex.\n if (self._selection_completed\n or (len(self._xys) > 3\n and self._xys[-1] == self._xys[0])):\n self._polygon_handles.set_data(xs[:-1], ys[:-1])\n else:\n self._polygon_handles.set_data(xs, ys)\n self.update()",
"def _update_selection_poly(self, vmin, vmax):\n # The vertices are positioned\n # 1 ------ 2\n # | |\n # 0, 4 ---- 3\n verts = self.poly.xy\n if self.orientation == \"vertical\":\n verts[0] = verts[4] = .25, vmin\n verts[1] = .25, vmax\n verts[2] = .75, vmax\n verts[3] = .75, vmin\n else:\n verts[0] = verts[4] = vmin, .25\n verts[1] = vmin, .75\n verts[2] = vmax, .75\n verts[3] = vmax, .25",
"def draw_overlay(self):\n pass",
"def SH_FindOverlap(xcenter, ycenter, xlength, ylength, xp_corner, yp_corner):\n\n areaClipped = 0.0\n top = ycenter + 0.5 * ylength\n bottom = ycenter - 0.5 * ylength\n\n left = xcenter - 0.5 * xlength\n right = xcenter + 0.5 * xlength\n\n nVertices = 4 # input detector pixel vertices\n MaxVertices = 9\n # initialize xPixel, yPixel to the detector pixel corners.\n # xPixel,yPixel will become the clipped polygon vertices inside the cube pixel\n # xnew,ynew xpixel and ypixel of size MaxVertices\n\n xPixel = []\n yPixel = []\n\n xnew = []\n ynew = []\n\n for j in range(0, 9):\n xnew.append(0.0)\n ynew.append(0.0)\n xPixel.append(0.0)\n yPixel.append(0.0)\n\n\n # Xpixel, YPixel closed (5 corners)\n for i in range(0, 4):\n xPixel[i] = xp_corner[i]\n yPixel[i] = yp_corner[i]\n xPixel[4] = xp_corner[0]\n yPixel[4] = yp_corner[0]\n\n\n for i in range(0, 4): # 0:left, 1: right, 2: bottom, 3: top\n nVertices2 = 0\n for j in range(0, nVertices):\n x1 = xPixel[j]\n y1 = yPixel[j]\n x2 = xPixel[j + 1]\n y2 = yPixel[j + 1]\n condition = calcCondition(i, x1, y1, x2, y2, left, right, top, bottom)\n x = 0\n y = 0\n\n if condition == 1:\n x, y = solveIntersection(i, x1, y1, x2, y2,\n left, right, top, bottom)\n nVertices2 = addpoint(x, y, xnew, ynew, nVertices2);\n nVertices2 = addpoint(x2, y2, xnew, ynew, nVertices2)\n\n elif condition == 2:\n nVertices2 = addpoint(x2, y2, xnew, ynew, nVertices2)\n elif condition == 3:\n x, y = solveIntersection(i, x1, y1, x2, y2,\n left, right, top, bottom)\n nVertices2 = addpoint(x, y, xnew, ynew, nVertices2)\n\n#\tcondition == 4: points outside\n# Done looping over J corners\n nVertices2 = addpoint(xnew[0], ynew[0], xnew, ynew, nVertices2) # close polygon\n\n if nVertices2 > MaxVertices:\n raise Error2DPolygon(\" Failure in finding the clipped polygon, nVertices2 > 9 \")\n\n\n nVertices = nVertices2 - 1;\n\n for k in range(0, nVertices2):\n xPixel[k] = xnew[k]\n yPixel[k] = ynew[k]\n\n# done loop over top,bottom,left,right\n nVertices = nVertices + 1\n\n\n if nVertices > 0:\n areaClipped = FindAreaPoly(nVertices, xPixel, yPixel);\n\n\n return areaClipped;",
"def on_action_merge(self):\n ignore_warning = False\n if self.mode == \"Draw Poly\":\n self.remove_drawing_poly()\n elif self.mode == \"Draw Rect\":\n self.remove_drawing_rect()\n\n # Loop over all polygons and compare to all other, if two polygons are merged they are removed from the list\n for poly_outer in self.poly_list:\n for poly_inner in self.poly_list:\n if poly_outer == poly_inner:\n continue # Ignore comparison to self\n\n contain_list = self.polygon_contains(poly_outer, poly_inner)\n\n if all(contain_list):\n # If all points are inside the outer polygon do not merge (this would remove the inner one)\n pass\n elif any(contain_list):\n # If some but not all points are inside the outer polygon the two polygons overlap and should be\n # merged\n\n # Ignore holes\n if poly_inner in self.hole_list or poly_outer in self.hole_list:\n pass\n else:\n # Warning message that merging will remove any markers on the polygons\n # If return is chosen cancel the merge, else proceed and ignore the warning message\n # for the continuation of the loop\n for child in poly_inner.childItems():\n if child.childItems():\n if isinstance(child.childItems()[0], QGraphicsTextItem):\n if not ignore_warning:\n user_choice = self.marker_removal_warning()\n if user_choice == \"Cancel\":\n return\n elif user_choice == \"Ignore\":\n ignore_warning = True\n else:\n self.point_marker_list.remove(child)\n elif child.childItems()[0].childItems():\n if isinstance(child.childItems()[0].childItems()[0], QGraphicsTextItem):\n if not ignore_warning:\n user_choice = self.marker_removal_warning()\n if user_choice == \"Cancel\":\n return\n elif user_choice == \"Ignore\":\n ignore_warning = True\n self.line_marker_list.remove(child.childItems()[0])\n else:\n self.line_marker_list.remove(child.childItems()[0])\n\n for child in poly_outer.childItems():\n if child.childItems():\n if isinstance(child.childItems()[0], QGraphicsTextItem):\n if not ignore_warning:\n user_choice = self.marker_removal_warning()\n if user_choice == \"Cancel\":\n return\n elif user_choice == \"Ignore\":\n ignore_warning = True\n else:\n self.point_marker_list.remove(child)\n\n elif child.childItems()[0].childItems():\n if not ignore_warning:\n if isinstance(child.childItems()[0].childItems()[0], QGraphicsTextItem):\n user_choice = self.marker_removal_warning()\n if user_choice == \"Cancel\":\n return\n elif user_choice == \"Ignore\":\n self.line_marker_list.remove(child.childItems()[0])\n ignore_warning = True\n else:\n self.line_marker_list.remove(child.childItems()[0])\n\n # Move the QPolygonF items to the global coordinates and unite them (merge)\n p1 = poly_outer.polygon().translated(poly_outer.x(), poly_outer.y())\n p2 = poly_inner.polygon().translated(poly_inner.x(), poly_inner.y())\n uni = p1.united(p2)\n\n # Unite adds the starting point again as endpoint so we have to remove this duplicate point\n # to avoid future problems\n uni = self.poly_to_list(uni, \"Global\")\n uni = uni[:-1]\n\n # Add the new merged polygon, remove the old polygons from the view and lists\n self.add_poly_to_scene(QPolygonF(uni))\n self.delete_polygon(poly_inner, True)\n self.delete_polygon(poly_outer, True)\n # break",
"def get_geometry(self, selection_name):",
"def updateSelectionArea(self):\n self.machine.setSelectionArea(self.points,\n fill='hatch',\n color=self.machine.color)\n eventDict = prepareDrawingSignal('drawingProgress',\n 'polygon',\n self.points,\n self.machine.parameters)\n self.machine.plot.notify(**eventDict)",
"def __draw_polygon(self, event, klick):\n global creating_polygon\n curX = self.canvas.canvasx(event.x)\n curY = self.canvas.canvasy(event.y)\n if not klick and len(self.polygon_points) >= 2:\n c_r_x, c_r_y = self.get_canvas_relative_coords((self.polygon_points[-2], self.polygon_points[-1]))\n distanceX = curX - c_r_x\n distanceY = curY - c_r_y\n if pow(pow(distanceX, 2) + pow(distanceY, 2), 1 / 2) <= 15:\n return\n image_relative_x, image_relative_y = self.get_image_relative_coords((curX, curY))\n self.polygon_points.extend((image_relative_x, image_relative_y))\n self.polygon_groundstructure.append(self.canvas.create_rectangle(curX - 2, curY - 2, curX + 2, curY + 2,\n outline='magenta', width=1,\n activewidth=2))\n if not creating_polygon: # start with a new polygon\n creating_polygon = True\n return\n else: # draw a line between the last points\n c_r_x1, c_r_y1 = self.get_canvas_relative_coords((self.polygon_points[-4], self.polygon_points[-3]))\n c_r_x2, c_r_y2 = self.get_canvas_relative_coords((self.polygon_points[-2], self.polygon_points[-1]))\n self.polygon_groundstructure.append(self.canvas.create_line([c_r_x1, c_r_y1, c_r_x2, c_r_y2],\n fill='magenta', width=2))",
"def polySelectEditCtx(*args, adjustEdgeFlow: Union[float, bool]=1.0, divisions: Union[int,\n bool]=2, exists: bool=True, fixQuads: bool=False, image1: Union[AnyStr,\n bool]=\"\", image2: Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\",\n insertWithEdgeFlow: bool=False, smoothingAngle: Union[float, bool]=0.0,\n splitType: Union[int, bool]=0, useEqualMultiplier: bool=True,\n absoluteOffset: bool=True, autoComplete: bool=True, deleteEdge: bool=True,\n endVertexOffset: Union[float, bool]=0.0, mode: Union[int, bool]=0,\n startVertexOffset: Union[float, bool]=0.0, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[AnyStr, Any]:\n pass",
"def event_click_polygon(self, event):\n\n event_x_pos = self.canvasx(event.x)\n event_y_pos = self.canvasy(event.y)\n drag_lims = self.get_vector_object(self.variables.current_shape_id).image_drag_limits\n if drag_lims:\n canvas_lims = self.image_coords_to_canvas_coords(drag_lims)\n if event_x_pos < canvas_lims[0]:\n event_x_pos = canvas_lims[0]\n elif event_x_pos > canvas_lims[2]:\n event_x_pos = canvas_lims[2]\n if event_y_pos < canvas_lims[1]:\n event_y_pos = canvas_lims[1]\n elif event_y_pos > canvas_lims[3]:\n event_y_pos = canvas_lims[3]\n\n if self.variables.actively_drawing_shape:\n old_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)\n new_coords = list(old_coords) + [event_x_pos, event_y_pos]\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)\n # re-initialize shape if we're not actively drawing\n else:\n new_coords = (event.x, event.y, event_x_pos+1, event_y_pos+1)\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)\n self.variables.actively_drawing_shape = True",
"def createImgOverlay(sli, graph, drawing_params, notAnEmptySlice):\n \n (Y, X) = sli.shape\n \n # Creation of the superposition file (3 dimensions because RGB for the graph drawing)\n sli_ovl = np.zeros([Y, X, 3], np.uint8)\n sli_ovl[:, :, 0] = sli\n sli_ovl[:, :, 1] = sli\n sli_ovl[:, :, 2] = sli\n \n if notAnEmptySlice:\n line = drawing_params[0]\n line_color = drawing_params[1]\n line_size = drawing_params[2]\n apex_color = drawing_params[3]\n apex_size = drawing_params[4]\n node_color = drawing_params[5]\n node_size = drawing_params[6]\n body_color = drawing_params[7]\n body_size = drawing_params[8]\n \n graph = nx.convert_node_labels_to_integers(graph, first_label=0, \n ordering='default', \n label_attribute=None)\n \n # Creation of arrays from graph elements \n x_node = np.fromiter(nx.get_node_attributes(graph, 'x').values(), \n dtype=int) \n y_node = np.fromiter(nx.get_node_attributes(graph, 'y').values(), \n dtype=int) \n degrees = np.array([degree for node, degree in nx.degree(graph)], \n dtype=int) \n edges = np.array(graph.edges())\n \n # Lists of edges coordinates\n x1 = x_node[edges[:, 0]]\n y1 = y_node[edges[:, 0]]\n x2 = x_node[edges[:, 1]]\n y2 = y_node[edges[:, 1]]\n \n # Edges drawing\n if line:\n for i in range(len(x1)):\n cv2.line(sli_ovl, (x1[i], y1[i]), (x2[i], y2[i]), \n line_color, line_size)\n \n # Nodes drawing\n for i in range(len(x_node)):\n if degrees[i] == 1: # apex points\n color = apex_color\n size = apex_size\n elif degrees[i] == 2: # body/hypha points\n color = body_color\n size = body_size\n else: # branching/node points\n color = node_color\n size = node_size\n cv2.circle(sli_ovl, (x_node[i], y_node[i]), size, color, \n thickness=-1)\n\n return sli_ovl",
"def sf_dotset():\n global fig,ax,ss,statusL,statusM,axh,axv\n #get the handle of figure and axis\n fig=plt.gcf()\n ax=plt.gca()\n statusL,statusM=None,None\n ss=[]\n print 'Drawing a line, you should select at least 2 points'\n #\n def onmouse(event):\n global fig,ax,ss,statusL,statusM,axh,axv\n if statusL==None:\n if event.button==1:\n x,y=event.xdata,event.ydata\n print 'Coordinate of current point', x, y\n ss.append([x,y])\n axv=ax.axvline(event.xdata)\n axh=ax.axhline(event.ydata)\n fig.canvas.draw()\n #remove the vertical and horizontal lines\n axh.remove()\n axv.remove()\n #print ss\n statusL=None\n elif event.button==2:\n #polygon=np.array([ss],dtype=np.int32)\n #cv2.fillPoly(res['map'],polygon,0)\n #ax.imshow(res['map'],interpolation='nearest',cmap=cmapp)\n #polygondraw=plt.Polygon(ss,fill=None,edgecolor='b')\n #convert ss to to two-col array\n sxy=np.asarray(ss)\n sx,sy=sxy[:,0],sxy[:,1]\n plt.plot(sx,sy)\n #ax.add_patch(polygondraw)\n #show the patch\n fig.canvas.draw()\n print 'Press the middle button to remove the bad points'\n print 'Press the right button to quit and return the res'\n statusL=1\n statusM=1\n elif event.button==3:\n plt.close()\n #returen res\n elif statusM==1:\n if event.button==2:\n res=deepcopy(img)\n #ax.imshow(res['map'],interpolation='nearest',cmap=cmapp)\n #show the original pattern\n polygondraw.remove()\n fig.canvas.draw()\n statusL=None\n ss=[]\n print 'Press the left button to select a new point'\n elif event.button==3:\n plt.close()\n\n cid=fig.canvas.mpl_connect('button_press_event',onmouse)\n plt.show(block=True)\n #remove the polygon area\n #tmp=np.ones((res['height'],res['width']))\n line=np.asarray(ss,dtype=np.float)\n return line",
"def event_drag_multipoint_polygon(self, event):\n\n if self.variables.current_shape_id:\n event_x_pos = self.canvasx(event.x)\n event_y_pos = self.canvasy(event.y)\n drag_lims = self.get_vector_object(self.variables.current_shape_id).image_drag_limits\n if drag_lims:\n canvas_lims = self.image_coords_to_canvas_coords(drag_lims)\n if event_x_pos < canvas_lims[0]:\n event_x_pos = canvas_lims[0]\n elif event_x_pos > canvas_lims[2]:\n event_x_pos = canvas_lims[2]\n if event_y_pos < canvas_lims[1]:\n event_y_pos = canvas_lims[1]\n elif event_y_pos > canvas_lims[3]:\n event_y_pos = canvas_lims[3]\n\n self.show_shape(self.variables.current_shape_id)\n coords = self.coords(self.variables.current_shape_id)\n new_coords = list(coords[0:-2]) + [event_x_pos, event_y_pos]\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)\n else:\n pass",
"def make_percent_access_polygons(self):\r\n self.logger.info(\r\n f\"Processing FacilityID {self.facility_id}, FromBreak {self.from_break}, ToBreak {self.to_break}...\")\r\n self.scratch_gdb = self._create_output_gdb()\r\n selected_polygons = self._select_polygons()\r\n joined_polygons = self._join_polygons(selected_polygons)\r\n dissolved_polygons = self._dissolve_cells(joined_polygons)\r\n self.job_result[\"polygons\"] = dissolved_polygons",
"def _set_poly_roi(self, event, x, y, flags, params):\r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n self.tpoly.append((x, y))\r\n self.ix = x\r\n self.iy = y\r\n self.drawing = True\r\n if event == cv2.EVENT_MOUSEMOVE:\r\n if self.drawing:\r\n self.img = self.current_frame.copy()\r\n _poly = self.tpoly + [(x, y), self.tpoly[0]]\r\n for p1, p2 in zip(_poly[:-1], _poly[1:]):\r\n cv2.line(self.img, p1, p2, (0, 0, 255), 3)\r\n if event == cv2.EVENT_LBUTTONDBLCLK:\r\n self.poly = self.tpoly\r\n self.tpoly = []\r\n self.drawing = False",
"def generatePolygons():",
"def polyclip(i, j, pol_x, pol_y, area=False):\n n = len(pol_x)\n nout = n + 4\n px_out, py_out = [0] * nout, [0] * nout\n clip_vals = [i, i + 1, j + 1, j]\n\n for ctype in range(4):\n cv = clip_vals[ctype]\n if ctype == 0:\n inside = [px > i for px in pol_x]\n elif ctype == 1:\n inside = [(px < i + 1) for px in pol_x]\n elif ctype == 2:\n inside = [(py < j + 1) for py in pol_y]\n else:\n inside = [py > j for py in pol_y]\n if all(inside):\n continue\n\n shiftp1 = inside.copy()\n shiftp1.insert(0, shiftp1.pop(-1))\n crosses = [i1 != i2 for (i1, i2) in zip(inside, shiftp1)]\n pind = 0\n for k in range(n):\n px, py = pol_x[k], pol_y[k]\n if crosses[k]: # out->in or in->out, add intersection\n ind = n - 1 if k == 0 else k - 1\n sx, sy = pol_x[ind], pol_y[ind]\n try:\n if ctype <= 1: # left or right\n px_out[pind] = cv\n py_out[pind] = sy + ((py - sy) / (px - sx)) * (cv - sx)\n else: # top or bottom\n px_out[pind] = sx + ((px - sx) / (py - sy)) * (cv - sy)\n py_out[pind] = cv\n except ZeroDivisionError: # pragma: no cover\n px_out[pind] = np.nan\n py_out[pind] = np.nan\n pind += 1\n\n if inside[k]: # out->in or in->in, add 2nd point\n px_out[pind] = px\n py_out[pind] = py\n pind += 1\n\n if pind >= nout - 2:\n nout *= 2\n px_out = px_out + [0] * nout\n py_out = py_out + [0] * nout\n nout *= 2\n\n if pind == 0: # polygon is entirely outside this line\n return None, None\n n = pind\n pol_x = px_out[:n].copy()\n pol_y = py_out[:n].copy()\n\n if area:\n if pol_x is None: # pragma: no cover\n return 0.0\n shiftx = pol_x.copy()\n shifty = pol_y.copy()\n shiftx.append(shiftx.pop(0))\n shifty.append(shifty.pop(0))\n a1 = [p[0] * p[1] for p in zip(pol_x, shifty)]\n a2 = [p[0] * p[1] for p in zip(pol_y, shiftx)]\n a = [p[0] - p[1] for p in zip(a1, a2)]\n return abs(sum(a)) / 2\n\n return pol_x, pol_y",
"def layer_coords(label_lst): #full path\n \n #if a fundus then do this block\n gyrus_check = all(i.__contains__(\"fundus\") for i in label_lst)\n if gyrus_check:\n for layer in label_lst:\n #read data\n df_layer = pd.read_csv(layer)\n df_layer = df_layer.iloc[1:,0]\n df_layer = pd.DataFrame( [list(map(float, i)) for i in [list(i.split()) for i in \\\n df_layer.values]], columns=['idk1', 'X', 'Y', 'Z', 'idk2'])[['X', 'Y', 'Z']]\n\n #compute slope\n yvals = [(y2 - y1) for y1, y2 in zip(df_layer['Y'], df_layer['Y'][1:])]\n xvals = [(x2 - x1) for x1, x2 in zip(df_layer['X'], df_layer['X'][1:])]\n layer_slope = [round(i,2) for i in np.divide(yvals, xvals)]\n\n #split lam label into three\n split = math.floor(len(df_layer['X'].values)/3)\n df_layer_right = df_layer[0:split]\n df_layer_left = df_layer[-split:]\n df_layer_middle = df_layer[split:-split]\n\n plt.plot(df_layer['X'], df_layer['Y'], lw=3) #color='#000000'\n # plt.plot(df_layer['X'], df_layer['Y'], linewidth=1, marker='o', markersize=5)\n plt.axis('off')\n plt.savefig('layer_contour.png')\n # plt.show()\n plt.close()\n\n #read, convert to grayscale, find edges\n layer_img = cv2.imread('layer_contour.png')\n layer_img_grey = cv2.cvtColor(layer_img, cv2.COLOR_BGR2GRAY)\n layer_edges = cv2.Canny(layer_img_grey, 30, 200)\n\n #find contours\n contours, hierachy = cv2.findContours(layer_edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n # cv2.imshow('contour', layer_edges)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n \n #order contours\n contours = [np.squeeze(i) for i in contours]\n df_contours = pd.DataFrame(contours)\n contours_ord = df_contours.loc[6].values, df_contours.loc[4].values, \\\n df_contours.loc[3].values, df_contours.loc[2].values, \\\n df_contours.loc[1].values, df_contours.loc[0].values, \\\n df_contours.loc[5].values\n contours_ord = np.squeeze(contours_ord)\n \n\n #plot all layers and add coordinate data to dict \n lay_coords_dict = {}\n for laycon, i in zip(contours_ord, list(range(len(contours)))): \n #split coordinates into top and bottom edge\n if i == 0: #0 == pial\n c_idx = int(np.floor(len(laycon)/2))\n coords_top = np.array(list(reversed(laycon[:c_idx])))\n lay_coords_dict[i] = coords_top[10:]\n # print(coords_top)\n\n else:\n c_idx = int(np.floor(len(laycon)/2))\n coords_top = np.array(list(reversed(laycon[c_idx:])))\n lay_coords_dict[i] = coords_top[5:-7]\n\n \n #plot coords\n # for key, val in lay_coords_dict.items():\n # plt.plot([i[0] for i in val], [i[1] for i in val], lw=1.75)\n # plt.gca().invert_yaxis()\n # plt.show()\n # plt.close()\n # sys.exit()\n\n #delete edge detect image and return dict\n rm_img_cmd = \"rm layer_contour.png\"\n os.system(rm_img_cmd)\n return(lay_coords_dict)\n \n\n #for crown data do this block\n else:\n for layer in label_lst:\n #read data\n df_layer = pd.read_csv(layer)\n df_layer = df_layer.iloc[1:,0]\n df_layer = pd.DataFrame( [list(map(float, i)) for i in [list(i.split()) for i in \\\n df_layer.values]], columns=['idk1', 'X', 'Y', 'Z', 'idk2'])[['X', 'Y', 'Z']]\n\n #compute slope\n yvals = [(y2 - y1) for y1, y2 in zip(df_layer['Y'], df_layer['Y'][1:])]\n xvals = [(x2 - x1) for x1, x2 in zip(df_layer['X'], df_layer['X'][1:])]\n layer_slope = [round(i,2) for i in np.divide(yvals, xvals)]\n\n #split lam label into three\n split = math.floor(len(df_layer['X'].values)/3)\n df_layer_right = df_layer[0:split]\n df_layer_left = df_layer[-split:]\n df_layer_middle = df_layer[split:-split]\n\n plt.plot(df_layer['X'], df_layer['Y'], lw=3) #color='#000000', lw=5\n # plt.plot(df_layer['X'], df_layer['Y'], linewidth=1, marker='o', markersize=5)\n plt.axis('off')\n plt.savefig('layer_contour.png')\n # plt.show()\n plt.close()\n\n #read, convert to grayscale, find edges\n layer_img = cv2.imread('layer_contour.png')\n layer_img_grey = cv2.cvtColor(layer_img, cv2.COLOR_BGR2GRAY)\n layer_edges = cv2.Canny(layer_img_grey, 30, 200)\n\n #find contours\n contours, hierachy = cv2.findContours(layer_edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n # cv2.imshow('contour', layer_edges)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n \n\n #plot all layers and add coordinate data to dict\n lay_coords_dict = {}\n for laycon, i in zip(contours, list(range( len(contours) ) )[::-1] ):#7\n #split coordinates into top and bottom edge\n # print(laycon)\n coords_lst = [list(ii) for i in laycon for ii in i] # 0 == GWB\n # print(coords_lst)\n\n c_split = math.floor(len(coords_lst)/4)\n coords_top = coords_lst[:c_split][::-1] + coords_lst[-c_split:][::-1]\n lay_coords_dict[i] = coords_top\n df_coords = pd.DataFrame(coords_top, columns=['X', 'Y'])\n # print(df_coords)\n\n #plot using all coordinates\n plt.plot(df_coords['X'].values, df_coords['Y'].values, lw=3)\n plt.gca().invert_yaxis()\n # plt.show()\n plt.close()\n\n\n # use k means to get rid of extra coords on short lines\n for i in list(range(1,6)):\n # kMEANS clustering, separate short line bottom half\n df_short = pd.DataFrame(lay_coords_dict[i], columns=['X', 'Y']) #1=L1,\n # plt.scatter( df_short['X'].values, df_short['Y'].values, s=5 )\n # plt.gca().invert_yaxis()\n # plt.show()\n\n #scale data\n scaler = StandardScaler()\n scaler.fit( df_short[['X', 'Y']].values )\n short_scale = scaler.transform( df_short[['X', 'Y']].values )\n\n init = np.array([[0.514, -0.629], [-1.101, 1.344]])\n\n #predict\n # kmeans_classifier = KMeans(n_clusters=2, init=init) #fixed centroids\n kmeans_classifier = KMeans(n_clusters=2) \n\n y_kmeans = kmeans_classifier.fit_predict(short_scale)\n centroids = kmeans_classifier.cluster_centers_\n inertia = kmeans_classifier.inertia_\n\n\n #update df\n df_short.insert(2, column='kClass', value=y_kmeans)\n\n #df scaled\n df_short_scale = pd.DataFrame(short_scale, columns=['X', 'Y'])\n df_short_scale.insert(2, column='kClass', value=y_kmeans)\n \n\n \"\"\"\n #plot data points for k means, clusters\n colmap = {0: '#029386', 1: '#D2691E', 2: '#A52A2A'}\n for i in range(2):\n new_df = df_short_scale[df_short_scale['kClass']==i]\n plt.scatter(new_df['X'].values, new_df['Y'].values, s=20, \\\n label='cluster' + str(i+1), color=colmap[i])\n\n #plot centroids\n for i in range (2):\n plt.scatter(centroids[i][0], centroids[i][1], marker='x', s=500, \\\n label='centroid' + str(i+1), color=colmap[i])\n \n plt.legend()\n plt.gca().invert_yaxis()\n plt.show()\n \"\"\"\n\n\n #new df for clean data, take centroid with more data points\n num_class0 = len(df_short[df_short['kClass']==0])\n num_class1 = len(df_short[df_short['kClass']==1])\n\n if num_class0 > num_class1:\n \n df_short_clean = df_short[df_short['kClass']==0]\n lay_coords_dict[i] = [[i,j] for i,j in zip(df_short_clean['X'].values,\\\n df_short_clean['Y'].values)]\n else:\n df_short_clean = df_short[df_short['kClass']==1]\n lay_coords_dict[i] = [[i,j] for i,j in zip(df_short_clean['X'].values,\\\n df_short_clean['Y'].values)]\n\n #plot clean short line\n # plt.scatter(df_short_clean['X'].values, df_short_clean['Y'].values, s=20)\n # plt.gca().invert_yaxis()\n # plt.show()\n\n #delete edge detect image and return dict\n rm_img_cmd = \"rm layer_contour.png\"\n os.system(rm_img_cmd)\n return(lay_coords_dict)",
"def select_vert(img):\n\n # Local variable which breaks loop if area of interest is selected well\n OK = False\n\n # Main while-loop\n while OK == False:\n\n # Plot image\n fig, ax = plt.subplots(figsize=(10, 10))\n ax.imshow(img, cmap=\"gray\")\n\n # Let user specify points\n coord = np.asarray(plt.ginput(4, show_clicks=True))\n p = Polygon(coord, linewidth=1, edgecolor='r', facecolor='none')\n plt.gca().add_artist(p)\n # Include area of interest in plot\n plt.draw()\n plt.show()\n\n # Ask user to accept or reject the proposed area of interest\n val = input(\"Is the region correct ([Y]/n)?\\n\")\n\n # Break if OK, re-do if not\n if val == \"Y\" or val == \"\":\n OK = True\n\n \"\"\"\n Creates a mask which marks the vertical line based on the coordinates given by the user.\n \"\"\"\n \n x, y = np.meshgrid(np.arange(img.shape[0]), np.arange(img.shape[1]), indexing='xy')\n x, y = x.flatten(), y.flatten()\n pts = np.vstack((x,y)).T\n pts_t = tuple(map(tuple, pts))\n mask = np.ones((img.shape[0],img.shape[1]))\n for (x,y) in pts_t:\n if p.get_path().contains_point((x,y)):\n mask[y][x] = 0\n\n # Return mask which is the area of interest with value 1, 0 else\n return mask",
"def lasso(image, save=True):\n if image.dtype == np.uint8:\n image = image / 255.\n\n TITLE = 'Press ENTER when satisfied with your selection.'\n fig = plt.figure()\n plt.tick_params(axis='both', which='both', bottom='off', top='off',\n labelbottom='off', right='off', left='off', labelleft='off')\n ax = fig.add_subplot(111)\n ax.imshow(image)\n ax.set_title(TITLE)\n\n height, width, _ = image.shape\n x, y = np.meshgrid(np.arange(width), np.arange(height))\n pix = np.vstack((x.flatten(), y.flatten())).T\n output = None\n\n def onselect(verts):\n # Select elements in original array bounded by selector path.\n verts = np.array(verts)\n p = Path(verts)\n ind = p.contains_points(pix, radius=1)\n selected = np.copy(image)\n selected[:, :, 0].flat[ind] = image[:, :, 0].flat[ind] * 0.8\n selected[:, :, 1].flat[ind] = image[:, :, 1].flat[ind] * 0.8\n selected[:, :, 2].flat[ind] = image[:, :, 2].flat[ind] * 0.8\n\n nonlocal output\n b = path_bbox(verts)\n ymin, ymax = int(min(b[:, 1])), int(max(b[:, 1])) + 1\n xmin, xmax = int(min(b[:, 0])), int(max(b[:, 0])) + 1\n alpha_mask = np.zeros((height, width))\n alpha_mask.flat[ind] = 1.0\n alpha_mask = alpha_mask[ymin:ymax, xmin:xmax]\n output = np.dstack((image[ymin:ymax, xmin:xmax], alpha_mask))\n\n ax.clear()\n ax.imshow(selected)\n ax.set_title(TITLE)\n ax.plot(*p.vertices.T, scalex=False, scaley=False)\n fig.canvas.draw_idle()\n\n def quit_figure(event):\n # Source: https://github.com/matplotlib/matplotlib/issues/830/.\n if event.key == 'enter':\n plt.close(event.canvas.figure)\n\n cid = plt.gcf().canvas.mpl_connect('key_press_event', quit_figure)\n lasso = LassoSelector(ax, onselect)\n plt.show()\n if save:\n plt.imsave('source.png', output)\n return output",
"def draw_polygon(left_x, right_x, left_y, right_y, img_):\n pts_left = np.array([np.flipud(np.transpose(np.vstack([left_x, left_y])))])\n pts_right = np.array([np.transpose(np.vstack([right_x, right_y]))])\n pts = np.hstack((pts_left, pts_right))\n img_ = cv2.polylines(img_, np.int_([pts]), isClosed=False, color=(60, 200, 60), thickness=10, lineType=cv2.LINE_AA)\n img_ = cv2.fillPoly(img_, np.int_(pts), (50, 90, 50))\n return img_",
"def add_overlay(self, data, vertices=None, to_overlay=None, mask_data=None,\n **kwargs):\n # Check input variables :\n if vertices is None:\n vertices = np.ones((len(self),), dtype=bool)\n if not len(vertices):\n logger.warning('Vertices array is empty. Abandoning.')\n return\n\n data = np.asarray(data)\n to_overlay = self._n_overlay if to_overlay is None else to_overlay\n data_lim = (data.min(), data.max())\n if len(self._data_lim) < to_overlay + 1:\n self._data_lim.append(data_lim)\n else:\n self._data_lim[to_overlay] = data_lim\n # -------------------------------------------------------------\n # TEXTURE COORDINATES\n # -------------------------------------------------------------\n need_reshape = to_overlay >= self._xrange.shape[1]\n if need_reshape:\n # Add column of zeros :\n z_ = np.zeros((len(self),), dtype=np.float32)\n z_text = np.zeros((1, LUT_LEN, 4), dtype=np.float32)\n self._xrange = np.c_[self._xrange, z_]\n self._alphas = np.c_[self._alphas, z_]\n self._text2d_data = np.concatenate((self._text2d_data, z_text))\n # (x, y) coordinates of the overlay for the texture :\n self._xrange[vertices, to_overlay] = normalize(data)\n # Transparency :\n self._alphas[vertices, to_overlay] = 1. # transparency level\n\n # -------------------------------------------------------------\n # TEXTURE COLOR\n # -------------------------------------------------------------\n # Colormap interpolation (if needed):\n colormap = Colormap(**kwargs)\n vec = np.linspace(data_lim[0], data_lim[1], LUT_LEN)\n self._text2d_data[to_overlay, ...] = colormap.to_rgba(vec)\n # Send data to the mask :\n if isinstance(mask_data, np.ndarray) and len(mask_data) == len(self):\n self._bgd_data[mask_data] = .5\n self._bgd_buffer.set_data(self._bgd_data)\n # -------------------------------------------------------------\n # BUFFERS\n # -------------------------------------------------------------\n if need_reshape:\n # Re-define buffers :\n self._xrange_buffer = gloo.VertexBuffer(self._xrange)\n self._text2d = gloo.Texture2D(self._text2d_data)\n self._alphas_buffer = gloo.VertexBuffer(self._alphas)\n # Send buffers to vertex shader :\n self.shared_program.vert['u_range'] = self._xrange_buffer\n self.shared_program.vert['u_alphas'] = self._alphas_buffer\n self.shared_program.vert['u_over_text'] = self._text2d\n else:\n self._xrange_buffer.set_data(self._xrange)\n self._text2d.set_data(self._text2d_data)\n self._alphas_buffer.set_data(self._alphas)\n # Update the number of overlays :\n self._n_overlay = to_overlay + 1\n self.shared_program.vert['u_n_overlays'] = self._n_overlay",
"def drawShapes(self):\n self.draw_polygon(self.poly3.get_points() , color = \"#000\")\n self.draw_polygon(self.poly2.get_points() , color = \"#000\")\n self.draw_polygon(self.poly1.get_points() , color = \"#000\")\n self.draw_rect(0, 0, self.width, self.height, color= \"#000\")\n \"\"\"These statements are used to determine if a point is inside any of the\n 3 polygons and if so changes the point's color\"\"\"\n if (self.poly2.point_inside_polygon(self.p1) or self.poly1.point_inside_polygon(self.p1)\n or self.poly3.point_inside_polygon(self.p1)):\n color = \"#0F0\"\n else:\n color = \"#F00\"\n self.fill_oval(self.p1.x, self.p1.y, 7, 7, color)\n\n if (self.poly2.point_inside_polygon(self.p2) or self.poly1.point_inside_polygon(self.p2)\n or self.poly3.point_inside_polygon(self.p2)):\n color = \"#0F0\"\n else:\n color = \"#F00\"\n self.fill_oval(self.p2.x, self.p2.y, 7, 7, color)\n if (self.poly2.point_inside_polygon(self.p3) or self.poly1.point_inside_polygon(self.p3)\n or self.poly3.point_inside_polygon(self.p3)):\n color = \"#0F0\"\n else:\n color = \"#F00\"\n self.fill_oval(self.p3.x, self.p3.y, 7, 7, color)",
"def clip_polygon(subject, clipper, operation = 'difference'):\n Subject = Polygon()\n Clipper = Polygon()\n\n for s in subject:\n Subject.add(Vertex(s))\n\n for c in clipper:\n Clipper.add(Vertex(c))\n\n clipped = Clipper.difference(Subject)\\\n if operation == 'reversed-diff'\\\n else Subject.__getattribute__(operation)(Clipper)\n\n clipped = [(ext.points,[hole.points for hole in holes]) for ext,holes in clipped]\n return clipped",
"def _finish_polygon(self):\n global undo_stack, choose_polygon\n if len(self.polygon_points) < 6:\n messagebox.showinfo(title='Info', message='Too few points for a polygon')\n return 'too_few_points'\n relative_poly_points = []\n for p in range(0, len(self.polygon_points), 2):\n relative_poly_points.extend(self.get_canvas_relative_coords((self.polygon_points[p],\n self.polygon_points[p + 1])))\n if choose_polygon:\n undo_stack.append('p')\n self.polygons.append(self.canvas.create_polygon(relative_poly_points,\n outline='blue', activewidth=3, width=1,\n fill='magenta', stipple='gray50'))\n self.canvas.tag_bind(self.polygons[-1], '<ButtonPress-1>', self.callback_click_polygon)\n self.canvas.tag_bind(self.polygons[-1], '<ButtonRelease-1>', self.callback_release_polygon)\n self.canvas.tag_bind(self.polygons[-1], '<B1-Motion>', self.callback_move_polygon)\n for p in self.polygon_groundstructure:\n self.canvas.delete(p)\n self.polygon_points_history[self.polygons[-1]] = np.reshape(np.asarray(self.polygon_points),\n (round(len(self.polygon_points) / 2),\n 2))\n self.polygon_points.clear()\n self.polygon_groundstructure.clear()\n self.parent_class.activate_save_bt()",
"def generatePolygons(self, *args, **kwargs): \n return 'var PloneMapPolygons = [' + \\\n ''.join([\"{ 'id': '%s', 'path' : %s,'title':'%s'},\" % (object.id, object.polygon, object.Title()) \n for object in self.context.objectValues() \n if hasattr(object, 'polygon') and len(object.polygon) > 0 ])[:-1] \\\n + '];'"
] |
[
"0.66459286",
"0.6113643",
"0.5918995",
"0.581146",
"0.5736627",
"0.57313544",
"0.55649126",
"0.5532528",
"0.5501249",
"0.5472025",
"0.5466082",
"0.5463649",
"0.54483116",
"0.543107",
"0.5429053",
"0.5395462",
"0.5387459",
"0.53762674",
"0.53500515",
"0.5334074",
"0.5333486",
"0.5261257",
"0.5242383",
"0.51676404",
"0.5163463",
"0.5152799",
"0.5138941",
"0.5138438",
"0.51342595",
"0.5131651"
] |
0.662988
|
1
|
Display error messages and exit if no lore environment can be found.
|
def validate():
if not os.path.exists(os.path.join(ROOT, APP, '__init__.py')):
message = ansi.error() + ' Python module not found.'
if os.environ.get('LORE_APP') is None:
message += ' $LORE_APP is not set. Should it be different than "%s"?' % APP
else:
message += ' $LORE_APP is set to "%s". Should it be different?' % APP
sys.exit(message)
if exists():
return
if len(sys.argv) > 1:
command = sys.argv[1]
else:
command = 'lore'
sys.exit(
ansi.error() + ' %s is only available in lore '
'app directories (missing %s)' % (
ansi.bold(command),
ansi.underline(VERSION_PATH)
)
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def verify_environment():\n\n if \"MROPATH\" not in os.environ:\n raise EnvironmentError(\n \"MROPATH is not in the environment. You probably need to source \"\n \"sourceme.bash in cellranger before running this tool.\")\n\n try:\n import martian\n except ImportError:\n print sys.path\n traceback.print_exc()\n raise ImportError(\n \"Could not import martian. You probably need to source \"\n \"sourceme.bash in cellranger before running this tool.\")",
"def launch():\n if launched():\n check_version()\n os.chdir(ROOT)\n return\n\n if not os.path.exists(BIN_LORE):\n missing = ' %s virtualenv is missing.' % APP\n if '--launched' in sys.argv:\n sys.exit(ansi.error() + missing + ' Please check for errors during:\\n $ lore install\\n')\n else:\n print(ansi.warning() + missing)\n import lore.__main__\n lore.__main__.install(None, None)\n\n reboot('--env-launched')",
"def _check_env():\n\tif os.getenv(_DATA_DIRECTORY_ENV_KEY) is None:\n\t\texit_everything(ERROR_DATA_DIRECTORY_NOT_SET, f'{_DATA_DIRECTORY_ENV_KEY} env var not set')\n\t\n\tif os.getenv(_FRONTEND_URL_ENV_KEY) is None:\n\t\texit_everything(ERROR_FRONTEND_NOT_SET, f'{_FRONTEND_URL_ENV_KEY} env var not set')",
"def check_environment() -> None:\n for item in ['IB_USER', 'IB_PASSWORD', 'IB_URL']:\n if os.getenv(item) is None:\n raise click.UsageError(f'{item} environment variable must be set before using ib.')",
"def verify_environment():\n reqs = ['NAME', 'RECIPIENT', 'SUBJECT', 'MESSAGE',\n 'MAILGUN_API_KEY', 'MAILGUN_DOMAIN']\n for req in reqs:\n if not os.getenv(req):\n logging.error('Environment variable ' + req + ' is not set')\n sys.exit(2)",
"def exit_error():\n if SPDK_ERROR_ENV in os.environ.keys():\n ret_val = SPDK_ERR_TBL[os.environ[SPDK_ERROR_ENV]]\n sys.exit(ret_val)\n else:\n sys.exit(0)",
"def env_check(self):\n b_status : bool = True\n str_error : str = \"no error\"\n if not os.path.exists(self.str_inputDir):\n b_status = False\n if self.toConsole():\n error.warn(self, 'inputDirFail', exitToOS = True, drawBox = True)\n str_error = 'error captured while accessing input directory'\n return {\n 'status' : b_status,\n 'error' : str_error\n }",
"def _perform_environment_check(check_auth=True):\n correct, errors = verify_environment(check_auth)\n\n if not correct:\n print_error(\n \"Cannot execute command because of problem(s) with environment:\")\n for error in errors:\n print_error(\" - \" + error)\n sys.exit(1)",
"def error_exit():\n print(\"Invalid arguments!\")\n print(\"Type -h to get help.\")\n exit(0)",
"def test_check_environment(monkeypatch):\n monkeypatch.delenv(\"PYTHONPATH\")\n with pytest.raises(SystemExit):\n check_environment()",
"def cli(ctx: click.Context):\n try:\n # Ensure the necessary environment variables are set before proceeding.\n all(environ[env_var] for env_var in Env.values())\n\n except KeyError as exc:\n ctx.fail(f\"Missing environment variable: {exc}\")",
"def check_errors():\n\n for error in errors:\n ERROR('%s' % str(error))\n\n if len(errors) != 0:\n sys.exit(1)",
"def main() -> None:\n try:\n run()\n except errors.BaseError as e:\n sys.stderr.write(f'{str(e)}\\n')\n sys.exit(e.code)",
"def check_environment():\n if 'OS_USERNAME' not in os.environ:\n print \"Error gathering facts! Please ensure that the openstack\" +\\\n \" credentials of an admin user are set as environment\" + \\\n \" variables.\"\n sys.exit(-1)\n if not find_executable('nova'):\n return False\n if not find_executable('openstack'):\n return False\n if not find_executable('glance'):\n return False\n if not find_executable('cinder'):\n return False\n return True",
"def exit_on_error(self) -> None:\n if self.errors:\n sys.exit(1)",
"def check_version():\n if sys.version_info[0:3] == PYTHON_VERSION_INFO[0:3]:\n return\n\n sys.exit(\n ansi.error() + ' your virtual env points to the wrong python version. '\n 'This is likely because you used a python installer that clobbered '\n 'the system installation, which breaks virtualenv creation. '\n 'To fix, check this symlink, and delete the installation of python '\n 'that it is brokenly pointing to, then delete the virtual env itself '\n 'and rerun lore install: ' + os.linesep + os.linesep + BIN_PYTHON +\n os.linesep\n )",
"def test_main_succeeds_in_production_env(runner: CliRunner) -> None:\n result = runner.invoke(console.main)\n assert result.exit_code == 0",
"def runmain():\n\n if roboapps.Unchecked():\n roboapps.Exit()\n else:\n ReversePrograms()",
"def getenv_check(e):\n res = os.getenv(e)\n if res == None:\n print(e, 'environment variable not set - stopping.')\n exit(1)\n else:\n return res",
"def entry_point():\n raise SystemExit(main(sys.argv))",
"def entry_point():\n raise SystemExit(main(sys.argv))",
"def entry_point():\n raise SystemExit(main(sys.argv))",
"def bail( msg ):\n # Terminate, with helpful error message:\n print(\"ERROR: \" + msg + \"... exiting.\", file=sys.stderr)\n exit(1)",
"def troubleshoot():\n libraries = (sys, pd, openpyxl, matplotlib, pip)\n for i in libraries:\n try:\n print(str(i), 'version:', i.__version__)\n except AttributeError:\n pass\n except ModuleNotFoundError:\n print('You do not have', str(i), 'installed.')\n print('You can do so via your interpreter or:')\n print('py -m pip install', '-' + str(i))\n print('in command prompt')",
"def testFailure():\n run(\"chariot-me\") #Start management-engine without initial deplflag\n egress()",
"def main():\n\n options = get_options()\n\n cf.use_style(\"solarized\")\n if options[\"nocolor\"]:\n cf.disable()\n\n newline()\n header(\"Thumbor v%s (of %s)\" % (__version__, __release_date__))\n\n newline()\n print(\n \"Thumbor doctor will analyze your install and verify if everything is working as expected.\"\n )\n\n errors = check_modules()\n errors += check_compiled_extensions()\n errors += check_filters()\n errors += check_extensions()\n\n newline()\n\n if errors:\n print(cf.bold_red(\"😞 Oh no! We found some things that could improve... 😞\"))\n newline()\n print(\"\\n\".join([\"* %s\" % str(err) for err in errors]))\n newline()\n newline()\n print(\n cf.cyan(\n \"If you don't know how to fix them, please open an issue with thumbor.\"\n )\n )\n print(\n cf.cyan(\n \"Don't forget to copy this log and add it to the description of your issue.\"\n )\n )\n print(\"Open an issue at https://github.com/thumbor/thumbor/issues/new\")\n sys.exit(1)\n return\n\n print(cf.bold_green(\"🎉 Congratulations! No errors found! 🎉\"))",
"def main():\n try:\n merge_envs(parse_args())\n except MergeError:\n return 1",
"def precond_failed(e):\n envs = environments()\n return render_template('412.html', envs=envs), 412",
"def display_help_screen():\r\n\tsys.exit(0)",
"def finalize_error():\n print('')\n exit(-1)"
] |
[
"0.61916924",
"0.6157401",
"0.60019594",
"0.57859117",
"0.5725944",
"0.5690237",
"0.56561863",
"0.5636664",
"0.5628953",
"0.5622101",
"0.5586993",
"0.558566",
"0.5548352",
"0.5535722",
"0.552629",
"0.5431093",
"0.54161894",
"0.541161",
"0.5392171",
"0.5378731",
"0.5378731",
"0.5378731",
"0.53786314",
"0.53697354",
"0.536775",
"0.536476",
"0.5352128",
"0.53509927",
"0.53353614",
"0.5298796"
] |
0.6898733
|
0
|
Reboot python in the Lore virtualenv
|
def reboot(*args):
args = list(sys.argv) + list(args)
if args[0] == 'python' or not args[0]:
args[0] = BIN_PYTHON
elif os.path.basename(sys.argv[0]) in ['lore', 'lore.exe']:
args[0] = BIN_LORE
try:
os.execv(args[0], args)
except Exception as e:
if args[0] == BIN_LORE and args[1] == 'console' and JUPYTER_KERNEL_PATH:
print(ansi.error() + ' Your jupyter kernel may be corrupt. Please remove it so lore can reinstall:\n $ rm ' + JUPYTER_KERNEL_PATH)
raise e
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def launch():\n if launched():\n check_version()\n os.chdir(ROOT)\n return\n\n if not os.path.exists(BIN_LORE):\n missing = ' %s virtualenv is missing.' % APP\n if '--launched' in sys.argv:\n sys.exit(ansi.error() + missing + ' Please check for errors during:\\n $ lore install\\n')\n else:\n print(ansi.warning() + missing)\n import lore.__main__\n lore.__main__.install(None, None)\n\n reboot('--env-launched')",
"def reboot():\n sudo('/mnt/apps/bin/restart-all-apache.sh')",
"def stop_and_restart():\n updater.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)",
"def stop_and_restart():\n updater.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)",
"def stop_and_restart():\n U.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)",
"def RebootInstance(self, instance):\n raise HypervisorError(\"The chroot manager doesn't implement the\"\n \" reboot functionality\")",
"def reboot(self):\n raise NotImplementedError",
"def reboot(self, node):",
"def reboot():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><restart><system></system></restart></request>\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def _restart(self):\n pass",
"def restart_with_reloader():\n while True:\n print(f'Restarting with reloader')\n args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions] + sys.argv\n new_environ = os.environ.copy()\n new_environ[\"RUN_MAIN\"] = 'true'\n exit_code = os.spawnve(os.P_WAIT, sys.executable, args, new_environ)\n if exit_code != 3:\n return exit_code",
"def restart_salt():\n stop_salt()\n start_salt()",
"def restart():\n require('PROJECT_NAME')\n\n sudo('supervisorctl restart {0}'.format(env.PROJECT_NAME))",
"def sudo_restart ( self, ):\r\n pass\r\n \"sudo reboot\"",
"def reload_test(test_name):\n sudo(\"restart %s\" % test_name)",
"def restart():\n with cd('/apps/sharejs-rethinkdb-example'):\n run('fig -f prod.yml stop')\n run('fig -f prod.yml up -d')",
"def restart():\n with cd(env.directory):\n sudo('./bin/supervisorctl restart all', user=env.deploy_user)",
"def restart():\n with cd(env.directory):\n sudo('./bin/supervisorctl restart all', user=env.deploy_user)",
"def restart():\n log.info('restart')\n samuraix.restarting = True\n samuraix.app.stop()",
"def restart_treesheets():\n # The restart command in my init.d script fails for some reason.\n # But stop and start works.\n # TODO(eob): Fix the restart init.d script.\n sudo('/etc/init.d/treesheets stop')\n sudo('/etc/init.d/treesheets start')",
"def bootstrap():\n local('virtualenv fabric_factory/ve')",
"async def reboot(self, ctx):\n restart_land = discord.Embed(\n title=\"Restarting\", description=\"Please wait...\", colour=0x690E8\n )\n re_msg = await ctx.send(embed=restart_land)\n pm2_id = os.environ.get(\"pm_id\")\n if_systemd = os.environ.get(\"systemd_supervised\")\n if pm2_id:\n await re_msg.edit(content=\"pm2: :wave: bye!\")\n await self.bot.session.close()\n await self.bot.logout()\n await run_cmd(f\"pm2 restart {pm2_id}\")\n elif if_systemd:\n await re_msg.edit(content=\"systemd: :wave: bye!\")\n await self.bot.session.close()\n await run_cmd(\"systemctl --user restart lolbot\")\n await self.bot.logout()\n else:\n await re_msg.edit(content=\":warning: No supervisor; invoking\" \" `shutdown`\")\n await ctx.invoke(self.bot.get_command(\"shutdown\"))",
"def stop_and_restart():\n logging.info(\"Restarting eduzen_bot...\\n\")\n bot.updater.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)",
"def reboot(*args, **kwargs):\n try:\n master.main_exit()\n except Exception:\n log.error(\"main_exit error\")\n with open('/tmp/reboot', 'w+') as f:\n f.write(\"REBOOT\")\n log.info(\"Reboot ...\")",
"def restart():\n run_commands('python manage.py supervisor restart all')",
"def rebuild():\n try:\n cmd = 'rm -rf %s' % VENV\n if VENVWRAPPER:\n cmd = 'rmvirtualenv %s' % VENV\n _do_virtualenvwrapper_command(cmd)\n except Exception as e:\n print(unicode(e))\n\n cmd = 'virtualenv --no-site-packages -p /usr/bin/python{major}.{minor} {v}'\\\n .format(\n major=sys.version_info[0],\n minor=sys.version_info[1],\n v=VENV,\n )\n if VENVWRAPPER:\n cmd = 'mkvirtualenv --no-site-packages -p /usr/bin/python{major}.{minor} {v}'\\\n .format(\n major=sys.version_info[0],\n minor=sys.version_info[1],\n v=VENV,\n )\n _do_virtualenvwrapper_command(cmd)\n\n # Do two things here:\n # - remove all *.pyc that exist in srcdir.\n # - remove all data/templates dirs that exist (mako caches).\n for base, dirs, files in os.walk(os.getcwd()):\n for fname in files:\n if fname.endswith(\".pyc\"):\n os.remove(os.path.sep.join([base, fname]))\n\n if base.endswith('data/templates'):\n shutil.rmtree(base)",
"def Reboot():\n # This envrionment is defined only when testing the slave on a dev machine.\n is_testing = 'TESTING_MASTER' in os.environ\n\n should_reboot = False\n try:\n import config_bootstrap\n master = getattr(config_bootstrap.Master, 'active_master', None)\n should_reboot = getattr(master, 'reboot_on_step_timeout', True)\n Log('Reboot: reboot_on_step_timeout = %r (from master_site_config: %r)'\n % (should_reboot, master))\n except: # pylint: disable=W0702\n Log('Reboot: failed to read master config: %s' % str(sys.exc_info()[0]))\n return\n\n if should_reboot:\n if not is_testing:\n Log('Reboot: Issuing Reboot...')\n ReallyReboot()\n else:\n Log('Reboot: Testing mode enabled, skipping the actual reboot')",
"def restart_with_root():\n if platform.system() == 'Windows':\n os.execv('cmd', join_path(os.path.pardir(os.path.pardir(__file__)), 'start_root.cmd'))\n else:\n os.execv(sys.executable, ['sudo python'] + sys.argv)",
"def restart(self):\n print \"Restarting \" + executable + \" \" + str(argv) \n execl(executable, *([executable]+argv))",
"def _graceful_restart(self, wait):\n\n self._sut.shutdown(True)\n self._sut.start()\n\n if wait:\n sleep(BespokeGlobals.VM_BOOT_WAIT)"
] |
[
"0.7200766",
"0.6681993",
"0.64902824",
"0.6444445",
"0.6389589",
"0.6350412",
"0.6226529",
"0.620856",
"0.6193457",
"0.61873376",
"0.6184521",
"0.6169208",
"0.61558735",
"0.61380976",
"0.6134002",
"0.61229736",
"0.61192197",
"0.61192197",
"0.6106537",
"0.61041933",
"0.60894567",
"0.60856986",
"0.60587764",
"0.6026833",
"0.6011501",
"0.60029507",
"0.600207",
"0.59819555",
"0.5979123",
"0.5964972"
] |
0.7048641
|
1
|
Sanity check version information for corrupt virtualenv symlinks
|
def check_version():
if sys.version_info[0:3] == PYTHON_VERSION_INFO[0:3]:
return
sys.exit(
ansi.error() + ' your virtual env points to the wrong python version. '
'This is likely because you used a python installer that clobbered '
'the system installation, which breaks virtualenv creation. '
'To fix, check this symlink, and delete the installation of python '
'that it is brokenly pointing to, then delete the virtual env itself '
'and rerun lore install: ' + os.linesep + os.linesep + BIN_PYTHON +
os.linesep
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def virtualenvwrapper_check():\n compare_result = False\n try:\n output = subprocess.check_output(shlex.split(\"virtualenv --version\"), stderr=subprocess.STDOUT)\n lines = output.split(\"\\n\")\n version_string = re.compile(\"(\\d)+.(\\d)+.(\\d)\")\n line0_result = version_string.match(lines[0])\n if not line0_result:\n compare_result = False\n else:\n compare_result = True\n except OSError as ose:\n # Assume not installed\n compare_result = False\n return compare_result",
"def check_venv():\n if not args.novenv:\n assert not sys.prefix == sys.base_prefix, 'Please use pip-upgrade in a virtualenv. If you would like to surpass this use pip-upgrade --novenv'",
"def _check_virtualenv(venv_command):\n try:\n command = \"%s --version\" % (venv_command)\n logger.debug(\"Checking virtualenv with command: %s\" % (command))\n subprocess.call(command, stdout=open(os.devnull, 'wb'), shell=True)\n except:\n return False\n return True",
"def validate_configurator_version():\n if settings.CONFIGURATOR_MODULE == \"bootmachine.contrib.configurators.salt\":\n pkgver = settings.SALT_AUR_PKGVER\n pkgrel = settings.SALT_AUR_PKGREL\n response = urllib2.urlopen(\"https://aur.archlinux.org/packages/sa/salt/PKGBUILD\")\n for line in response:\n if line.startswith(\"pkgver=\") and not pkgver in line:\n abort(\"The requested Salt 'pkgrel={0}' in the AUR was updated to '{1}'.\".format(\n pkgver, line.strip()))\n if line.startswith(\"pkgrel=\") and not pkgrel in line:\n abort(\"The requested Salt 'pkgrel={0}' in the AUR was updated to '{1}'.\".format(\n pkgrel, line.strip()))",
"def check_venv():\n\n # Check if virtualenv directory exists in current directory\n if not fs.exists(VENV_DIR):\n \n cprint(\"%sNo virtual-envrionment detected\" % (OUT_PRFX), OUT_STD_COLOR)\n \n # Create a new virtualenv\n create_venv()",
"def _check_version () -> None:\n py_version_info: typing.Tuple = sys.version_info[:2]\n\n if py_version_info < MIN_PY_VERSION:\n error_msg = \"This version of pytextrank requires Python {} or later ({} detected)\\n\"\n raise RuntimeError(error_msg.format(_versify(MIN_PY_VERSION), _versify(py_version_info)))",
"def check_versions(ctx, show=False):\n sys.path.insert(0, os.path.join(ROOT_DIR, '_tools'))\n import versions\n versions.main()",
"def check_all():\n for package, version in required_versions.items():\n try:\n module = importlib.import_module(package)\n except ImportError:\n return\n else:\n if StrictVersion(version) > StrictVersion(module.__version__):\n raise RuntimeError(\"Your version of %s is too old - it must be at least %s\" % (\n package,\n version,\n ))",
"def test_robert_match_does_not_resolve_symlinks(self):\n with tempfile.TemporaryDirectory() as d:\n matches = self.__parse_gitignore_string([\"*.venv\"], mock_base_path=d)\n os.makedirs(f\"{d}/.venv/bin\")\n os.symlink(sys.executable, f\"{d}/.venv/bin/python\")\n for is_dir in (False, True):\n with self.subTest(i=is_dir):\n self.assertTrue(matches(f\"{d}/.venv\", is_dir=is_dir))\n self.assertTrue(matches(f\"{d}/.venv/\", is_dir=is_dir))\n self.assertTrue(matches(f\"{d}/.venv/bin\", is_dir=is_dir))\n self.assertTrue(matches(f\"{d}/.venv/bin/\", is_dir=is_dir))\n self.assertTrue(matches(f\"{d}/.venv/bin/python\", is_dir=is_dir))\n self.assertFalse(matches(f\"{d}/.venv2\", is_dir=is_dir))\n self.assertFalse(matches(f\"{d}/.venv2/\", is_dir=is_dir))\n self.assertFalse(matches(f\"{d}/.venv2/bin\", is_dir=is_dir))\n self.assertFalse(matches(f\"{d}/.venv2/bin/\", is_dir=is_dir))\n self.assertFalse(matches(f\"{d}/.venv2/bin/python\", is_dir=is_dir))\n self.assertTrue(matches(f\"{d}/a.venv\", is_dir=is_dir))\n self.assertTrue(matches(f\"{d}/a.venv/\", is_dir=is_dir))\n self.assertTrue(matches(f\"{d}/a.venv/bin\", is_dir=is_dir))\n self.assertTrue(matches(f\"{d}/a.venv/bin/\", is_dir=is_dir))\n self.assertTrue(matches(f\"{d}/a.venv/bin/python\", is_dir=is_dir))",
"def test_robert_match_does_not_resolve_symlinks(self):\n with tempfile.TemporaryDirectory() as d:\n matches = self.__parse_gitignore_string([\"*.venv\"], mock_base_path=d)\n os.makedirs(f\"{d}/.venv/bin\")\n os.symlink(sys.executable, f\"{d}/.venv/bin/python\")\n for is_dir in (False, True):\n with self.subTest(i=is_dir):\n self.assertTrue(matches(f\"{d}/.venv\", is_dir=is_dir))\n self.assertTrue(matches(f\"{d}/.venv/\", is_dir=is_dir))\n self.assertTrue(matches(f\"{d}/.venv/bin\", is_dir=is_dir))\n self.assertTrue(matches(f\"{d}/.venv/bin/\", is_dir=is_dir))\n self.assertTrue(matches(f\"{d}/.venv/bin/python\", is_dir=is_dir))\n self.assertFalse(matches(f\"{d}/.venv2\", is_dir=is_dir))\n self.assertFalse(matches(f\"{d}/.venv2/\", is_dir=is_dir))\n self.assertFalse(matches(f\"{d}/.venv2/bin\", is_dir=is_dir))\n self.assertFalse(matches(f\"{d}/.venv2/bin/\", is_dir=is_dir))\n self.assertFalse(matches(f\"{d}/.venv2/bin/python\", is_dir=is_dir))\n self.assertTrue(matches(f\"{d}/a.venv\", is_dir=is_dir))\n self.assertTrue(matches(f\"{d}/a.venv/\", is_dir=is_dir))\n self.assertTrue(matches(f\"{d}/a.venv/bin\", is_dir=is_dir))\n self.assertTrue(matches(f\"{d}/a.venv/bin/\", is_dir=is_dir))\n self.assertTrue(matches(f\"{d}/a.venv/bin/python\", is_dir=is_dir))",
"def test_3x_only_python_versions_deploy():\n pass",
"def test_version(self):\n v = version('/no/such/executable')\n self.assertEqual(v, '0.0.1.dev0')\n v = version('false')\n self.assertEqual(v, '0.0.1.dev0')\n v = version('echo')\n self.assertEqual(v, 'describe .devrev-list --count HEAD')",
"def package_version_check(args, parser):\n if (args.build or args.check) and args.package_version:\n parser.error('--package-version works only with --create')",
"def _checkUpdateNeeded(self):\n try:\n currentVersionLine = str(subprocess.run(['pacman', '-Q', '-i', self._name],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True).stdout)\n currentVersion = re.sub(r'.*Version\\s*: ([\\d|\\.]*)-.*', r'\\1', currentVersionLine).split('.')\n newVersion = self._version.split('.')\n for i in range(0, min(len(currentVersion), len(newVersion))):\n if currentVersion[i].isdigit():\n # TODO: test if new version is only digits too, two of them should be the same anyway\n if int(newVersion[i]) > int(currentVersion[i]):\n return True\n if int(newVersion[i]) < int(currentVersion[i]):\n return False\n return len(newVersion) > len(currentVersion)\n except subprocess.CalledProcessError:\n # Package not found: to be installed then\n return True",
"def test_2x_only_python_version_deploy():\n pass",
"def verify_python(self, app):\n output = self.tools[app].app_context.check_output(\n [\n f\"python{app.python_version_tag}\",\n \"-c\",\n (\n \"import sys; \"\n \"print(f'{sys.version_info.major}.{sys.version_info.minor}')\"\n ),\n ]\n )\n # Update the python version tag with the *actual* python version.\n app.python_version_tag = output.split(\"\\n\")[0]\n target_python_version = tuple(int(v) for v in app.python_version_tag.split(\".\"))\n\n if target_python_version < self.briefcase_required_python_version:\n briefcase_min_version = \".\".join(\n str(v) for v in self.briefcase_required_python_version\n )\n raise BriefcaseCommandError(\n f\"The system python3 version provided by {app.target_image} \"\n f\"is {app.python_version_tag}; Briefcase requires a \"\n f\"minimum Python3 version of {briefcase_min_version}.\"\n )\n elif target_python_version != (\n self.tools.sys.version_info.major,\n self.tools.sys.version_info.minor,\n ):\n self.logger.warning(\n f\"\"\"\n*************************************************************************\n** WARNING: Python version mismatch! **\n*************************************************************************\n\n The system python3 provided by {app.target_image} is {app.python_version_tag}.\n This is not the same as your local system ({self.python_version_tag}).\n\n Ensure you have tested for Python version compatibility before\n releasing this app.\n\n*************************************************************************\n\"\"\"\n )",
"def verify_inputs(self):\n if self.has_source():\n raise Exception(\"Installation from source is only available for \"\n \"`virtualenv` manager\")\n if self.has_extras():\n raise Exception(\"Installation of extras only possible for \"\n \"`virtualenv` manager\")",
"def test_version_exists():\n assert ztm.__version__",
"def _check_python_version(self):\n python_exe = tools.which(\"python\")\n if not python_exe:\n msg = (\"Python must be available in PATH \"\n \"in order to build v8\")\n raise ConanInvalidConfiguration(msg)\n # In any case, check its actual version for compatibility\n from six import StringIO # Python 2 and 3 compatible\n version_buf = StringIO()\n cmd_v = \"{} --version\".format(python_exe)\n self.run(cmd_v, output=version_buf)\n p = re.compile(r'Python (\\d+\\.\\d+\\.\\d+)')\n verstr = p.match(version_buf.getvalue().strip()).group(1)\n if verstr.endswith('+'):\n verstr = verstr[:-1]\n version = tools.Version(verstr)\n # >= 2.7.5 & < 3\n py2_min = \"2.7.5\"\n py2_max = \"3.0.0\"\n py3_min = \"3.8.0\"\n if (version >= py2_min) and (version < py2_max):\n msg = (\"Found valid Python 2 required for v8:\"\n \" version={}, path={}\".format(version_buf.getvalue().strip(), python_exe))\n self.output.success(msg)\n elif version >= py3_min:\n msg = (\"Found valid Python 3 required for v8:\"\n \" version={}, path={}\".format(version_buf.getvalue().strip(), python_exe))\n self.output.success(msg)\n else:\n msg = (\"Found Python in path, but with invalid version {}\"\n \" (v8 requires >= {} and < \"\n \"{} or >= {})\".format(verstr, py2_min, py2_max, py3_min))\n raise ConanInvalidConfiguration(msg)",
"def check_update():\n try:\n raw_version = urllib.urlopen(VERSIONFILE)\n except IOError as e:\n print UPDATE_FAIL + \"can't fetch version file: \" + str(e)\n else:\n if raw_version.getcode() == 200:\n remote_version = raw_version.read().rstrip()\n if remote_version != VERSION:\n print(UPDATE_WARN + \"version \" + remote_version + \" is available, you have version \"\n + VERSION + \"\\n\\t\" + \"to update run: \" + UPDATECOMMAND)\n else:\n print UPDATE_FAIL + \"can't fetch version file\"",
"def test_version():\n with open(\"pyproject.toml\") as f:\n tomllines = f.read().splitlines()\n tomlversion = set([l for l in tomllines if \"version =\" in l])\n initversion = set([f'version = \"{mei2volpiano.__version__}\"'])\n # set is there to catch any duplicate/additional entries\n assert initversion == tomlversion",
"def _verify_patchelf() -> None:\n if not find_executable(\"patchelf\"):\n raise ValueError(\"Cannot find required utility `patchelf` in PATH\")\n try:\n version = check_output([\"patchelf\", \"--version\"]).decode(\"utf-8\")\n except CalledProcessError:\n raise ValueError(\"Could not call `patchelf` binary\")\n\n m = re.match(r\"patchelf\\s+(\\d+(.\\d+)?)\", version)\n if m and tuple(int(x) for x in m.group(1).split(\".\")) >= (0, 14):\n return\n raise ValueError(\n f\"patchelf {version} found. auditwheel repair requires \" \"patchelf >= 0.14.\"\n )",
"def test_version():\n versions = ((2, 7, 16), (3, 5, 7), (3, 6, 8), (3, 7, 3))\n assert sys.version_info[:3] in versions",
"def test_subversion_binary_exists(host):\n assert host.file(PACKAGE_BINARY).exists",
"def test_require_virtualenv():\n\n from fabtools.require.python import virtualenv\n\n try:\n virtualenv('/tmp/venv')\n\n assert is_dir('/tmp/venv')\n assert is_file('/tmp/venv/bin/python')\n\n finally:\n run('rm -rf /tmp/venv')",
"def test_package_version():\n coverage_version = package_version('coverage')\n pytest_version = package_version('pytest')\n\n assert coverage_version is not None\n assert coverage_version < (1000, 0, 0)\n assert pytest_version is not None\n assert pytest_version > (5, 0)",
"def check_shell_program_deps(deps):\n for dep in deps:\n success = shell_command([dep, \"--version\"])\n if not success:\n print(dep, \"är nog inte installerat :(\")\n return False\n print(\"Alla skal-kommandon finns!\")\n return True",
"def test_version(self) -> None:\n with open(\"pyproject.toml\") as f:\n for line in f:\n if \"version\" in line:\n version = line.split()[-1].replace('\"', \"\")\n break\n self.assertEqual(__version__, version)",
"def should_check_for_binary_versions(self):\n explicitly_asked_for_binaries_check = 'CHECK_BINARIES_VERSIONS' in config_vars\n update_was_requested = \"__UPDATE_INSTALLED_ITEMS__\" in config_vars.get(\"MAIN_INSTALL_TARGETS\", []).list()\n retVal = explicitly_asked_for_binaries_check or update_was_requested\n return retVal",
"def is_valid_version(self):\n pass"
] |
[
"0.7584612",
"0.7228096",
"0.6507939",
"0.6470493",
"0.6291816",
"0.6286967",
"0.62677675",
"0.6260375",
"0.6246277",
"0.6246277",
"0.62108517",
"0.61992663",
"0.6196206",
"0.6093963",
"0.6093267",
"0.60822755",
"0.60623604",
"0.59810627",
"0.59753",
"0.59671587",
"0.5955721",
"0.5949849",
"0.5910995",
"0.5906389",
"0.58942527",
"0.58777124",
"0.5873961",
"0.5844609",
"0.58311707",
"0.58184725"
] |
0.7871301
|
0
|
Attempts to read a python version string from a runtime.txt file
|
def read_version(path):
version = None
if os.path.exists(path):
version = open(path, 'r', encoding='utf-8').read().strip()
if version:
return re.sub(r'^python-', '', version)
return version
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def read_version():\n # code parts were taken from here https://stackoverflow.com/a/67692\n\n path2setup = os.path.dirname(__file__)\n version_file = os.path.abspath(\n os.path.join(path2setup, \"diffusion_maps\", \"version.py\"))\n\n spec = importlib.util.spec_from_file_location(\"version\", version_file)\n version = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(version)\n return version.version.v_short",
"def read_release_version():\n with open(\"RELEASE-VERSION\", \"r\") as f:\n return f.readline().strip()",
"def get_version(rel_path: str) -> str:\n for line in read(rel_path).splitlines():\n if line.startswith(\"VERSION\"):\n delim = '\"' if '\"' in line else \"'\"\n return line.split(delim)[1]\n raise RuntimeError(\"Unable to find version string.\")",
"def read_version():\n filename = os.path.join(source_root_dir(), 'pyitlib/pyitlib_version.py')\n with open(filename) as fin:\n namespace = {}\n exec(fin.read(), namespace) # pylint: disable=exec-used\n return namespace['__version__']",
"def get_version(rel_path):\n for line in read(rel_path).splitlines():\n if line.startswith(\"__version__\"):\n delim = '\"' if '\"' in line else \"'\"\n return line.split(delim)[1]\n raise RuntimeError(f\"Unable to find a valid __version__ string in {rel_path}.\")",
"def versionRead():\n xuvtop = os.environ['XUVTOP']\n vFileName = os.path.join(xuvtop, 'VERSION')\n vFile = open(vFileName)\n versionStr = vFile.readline()\n vFile.close()\n return versionStr.strip()",
"def read_version(self, fname):\n version = 'unknown'\n lines = open(fname).readlines()\n for line in lines:\n if \" Version\" in line:\n version = line.split()[-2]\n break\n return version",
"def find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file, re.M\n )\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")",
"def find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")",
"def get_version(*file_paths):\n filename = os.path.join(os.path.dirname(__file__), *file_paths)\n version_file = open(filename).read()\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError('Unable to find version string.')",
"def get_version(*file_paths):\n filename = os.path.join(os.path.dirname(__file__), *file_paths)\n version_file = open(filename).read()\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError('Unable to find version string.')",
"def get_version(*file_paths):\n filename = os.path.join(os.path.dirname(__file__), *file_paths)\n version_file = open(filename).read()\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError('Unable to find version string.')",
"def find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")",
"def version(path):\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, path), encoding='utf-8') as f:\n version_file = f.read()\n version_match = re.search(r\"\"\"^__version__ = ['\"]([^'\"]*)['\"]\"\"\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")",
"def find_version(fname):\n version = ''\n with open(fname, 'r') as fp:\n reg = re.compile(r'__version__ = [\\'\"]([^\\'\"]*)[\\'\"]')\n for line in fp:\n m = reg.match(line)\n if m:\n version = m.group(1)\n break\n if not version:\n raise RuntimeError('Cannot find version information')\n return version",
"def get_version():\n path = CWD / \"pettingzoo\" / \"__init__.py\"\n content = path.read_text()\n\n for line in content.splitlines():\n if line.startswith(\"__version__\"):\n return line.strip().split()[-1].strip().strip('\"')\n raise RuntimeError(\"bad version data in __init__.py\")",
"def get_version():\r\n try:\r\n with open('version', 'r') as version_file:\r\n return str(version_file.readline())\r\n except:\r\n return False",
"def get_version():\n version = \"unknown\"\n try:\n version_file = open(VERSIONFILE, \"r\")\n for line in version_file:\n if line.startswith('__version__'):\n version = line.split(\"'\")[1]\n break\n except EnvironmentError:\n pass # Okay, there is no version file.\n return version",
"def find_version():\n regex = r\"^ATRAM_VERSION = ['\\\"]v?([^'\\\"]*)['\\\"]\"\n with open(\"./lib/db.py\", 'r') as f:\n match = re.search(regex, f.read(), re.M)\n if match:\n return match.group(1)\n\n raise RuntimeError(\"Unable to find version string.\")",
"def get_version():\n version_file = Path(__file__).resolve().parent / \"clinker\" / \"__init__.py\"\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file.read_text(), re.M\n )\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Failed to find version string\")",
"def find_version(*file_paths):\n version_file = Path(__file__).parent.joinpath(*file_paths)\n with open(str(version_file), 'r') as openf:\n data = openf.read()\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n data,\n re.M,\n )\n if version_match:\n return version_match.group(1)\n\n raise RuntimeError(\"Unable to find version string.\")",
"def get_version():\n init = read(\"src\", \"{{cookiecutter.module_name}}\", \"__init__.py\")\n return VERSION_RE.search(init).group(1)",
"def parse_version(module_file):\n f = open(module_file)\n s = f.read()\n f.close()\n match = re.findall(\"__version__ = '([^']+)'\", s)\n return match[0]",
"def find_version(*file_paths):\n with open(os.path.join(abs_base_dir, *file_paths), 'r') as fp:\n version_file = fp.read()\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file, re.M\n )\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")",
"def get_version():\n\n with open('u2fval/__init__.py', 'r') as f:\n match = VERSION_PATTERN.search(f.read())\n return match.group(1)",
"def get_version():\n found = None\n with open(os.path.join(PATH, \"pyproject.toml\"), \"rt\") as setup_file:\n for line in setup_file:\n line = line.strip()\n if line.startswith(\"version\"):\n found = line\n break\n\n if found is None:\n raise ValueError(\"Unable to detect version\")\n\n return found.split(\"=\")[-1].replace('\"', \"\").strip()",
"def find_version():\n version_file = read_file('__init__.py')\n version_match = re.search(r'^__version__ = [\"\\']([^\"\\']*)[\"\\']',\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError('Unable to find version string.')",
"def ReadVersion():\n return _ReadNumericFile(pathutils.JOB_QUEUE_VERSION_FILE)",
"def get_version():\n\n with open('yubico/yubico_version.py', 'r') as f:\n match = VERSION_PATTERN.search(f.read())\n return match.group(1)",
"def load_version_information() -> None:\n to_update = {\"VERSION_MAJOR\", \"VERSION_MINOR\", \"VERSION_PATCH\", \"VERSION_SUFFIX\"}\n with VERSION_FILE.open(\"r\", encoding=\"utf-8\") as fp:\n for line in fp:\n name, _, value = line.strip().partition(\"=\")\n # Don't overwrite random variables by trusting an external file.\n var = name.strip()\n if var in to_update:\n globals()[var] = value.strip()"
] |
[
"0.7306961",
"0.6943205",
"0.6914566",
"0.6862325",
"0.6823303",
"0.6819166",
"0.6711935",
"0.67099226",
"0.6701405",
"0.66575587",
"0.66575587",
"0.66575587",
"0.66559863",
"0.66173005",
"0.6604965",
"0.65997416",
"0.6592785",
"0.6542442",
"0.65181917",
"0.64456487",
"0.6434404",
"0.6424184",
"0.64240015",
"0.63977736",
"0.6387894",
"0.63270193",
"0.6317274",
"0.63155514",
"0.628866",
"0.628372"
] |
0.73625326
|
0
|
Idempotently caches the list of packages installed in the virtualenv. Can be run safely before the virtualenv is created, and will be rerun afterwards.
|
def set_installed_packages():
global INSTALLED_PACKAGES, REQUIRED_VERSION
if INSTALLED_PACKAGES:
return
if os.path.exists(BIN_PYTHON):
pip = subprocess.Popen(
(BIN_PYTHON, '-m', 'pip', 'freeze'),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
(stdout, stderr) = pip.communicate()
pip.wait()
INSTALLED_PACKAGES = [normalize_package_name(r.decode().split('==')[0].lower()) for r in stdout.split()]
REQUIRED_VERSION = next((package for package in INSTALLED_PACKAGES if re.match(r'^lore[!<>=]', package)), None)
if REQUIRED_VERSION:
REQUIRED_VERSION = re.split(r'[!<>=]', REQUIRED_VERSION)[-1]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_installed_packages():\n global INSTALLED_PACKAGES\n chk = Popen(\"{} -m pip freeze\".format(sys.executable),\n shell=True, stdout=PIPE)\n installed = chk.communicate()[0].decode().splitlines()\n for pkg in installed:\n item = pkg.split(\"==\")\n INSTALLED_PACKAGES[item[0]] = item[1]",
"def getInstalledPackages():\n reqs = subprocess.check_output([sys.executable,\n '-m', 'pip', 'freeze'])\n installed_packages = [r.decode().split('==')[0]\n for r in reqs.split()]\n return installed_packages",
"def freeze():\n proc = subprocess.run(['pip', 'freeze'], stdout=subprocess.PIPE)\n with open('requirements.txt', 'wb') as fout:\n fout.write(proc.stdout)",
"def install_deps():\n pipenv_dev = run('pipenv install --dev'.split(), check=True)\n print('Installed dependencies and virtual environment. Type `pipenv shell` to activate later.')",
"def sync_virtualenv(ctx):\n if not path.isfile(\"./pyenv/bin/pip\"):\n ctx.run(\"virtualenv --no-site-packages --python=/usr/bin/python2.7 pyenv\")\n ctx.run(\"PIP_DOWNLOAD_CACHE=/var/tmp/ ./pyenv/bin/pip install -r requirements.txt\")\n print(\"\"\"\n Installation completed. Please check any error messages above.\n\n If you are going to use `openstack` or ansible directly on the command line, run\n\n . ./pyenv/bin/activate\n\n or even add it to your ~/.bashrc\n \"\"\")",
"def build_wheel_cache():\n pkg_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n pyvers = ['2.6', '2.7', '3.3', '3.4', '3.5']\n old_python_path = os.environ['PYTHONPATH']\n for pyver in pyvers:\n pycmd = which('python{0}'.format(pyver))\n if not pycmd:\n print('Python {0} not found'.format(pyver))\n continue\n pipcmd = which('pip{0}'.format(pyver))\n if not pipcmd:\n print('pip {0} not found'.format(pyver))\n continue\n os.environ['PYTHONPATH'] = ''\n lines = load_requirements(pkg_dir, pyver)\n for line in lines:\n if 'numpy' in line:\n numpy_line = line\n break\n else:\n raise RuntimeError('Numpy dependency could not be found')\n for line in lines:\n print(\n _pcolor(\n 'Building {0} wheel cache for Python {1}'.format(\n line,\n pyver\n ),\n 'cyan'\n )\n )\n if 'scipy' in line:\n # Install numpy before scipy otherwise pip throws an exception\n pobj = subprocess.Popen(\n [\n 'pip{0}'.format(pyver),\n 'install',\n '--upgrade',\n '--force-reinstall',\n numpy_line.strip()\n ],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n stdout, _ = pobj.communicate()\n print(stdout)\n pobj = subprocess.Popen(\n ['pip{0}'.format(pyver), 'wheel', line],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n stdout, _ = pobj.communicate()\n print(stdout)\n os.environ['PYTHONPATH'] = old_python_path",
"def prod_server():\n sh(\"bin/pip freeze -r requirements.txt production/requirements.txt\")",
"def get_installed_packages(cache=False,\n output_dir='.',\n output_filename='installed.pkgs.txt'):\n output = os.path.join(output_dir, output_filename)\n cmd = '''aptitude search '~i !~M' -F '%%p' | sort -u > %r''' % (\n output)\n ensure_file(cmd, output, shell=True, overwrite=not(cache))\n installed = list(read_lines(output))\n return installed",
"def test_lock_missing_cache_entries_gets_all_hashes(PipenvInstance, tmpdir):\n\n with temp_environ():\n os.environ[\"PIPENV_CACHE_DIR\"] = str(tmpdir.strpath)\n with PipenvInstance(chdir=True) as p:\n p._pipfile.add(\"pathlib2\", \"*\")\n assert \"pathlib2\" in p.pipfile[\"packages\"]\n c = p.pipenv(\"install\")\n assert c.return_code == 0, (c.err, (\"\\n\".join([\"{0}: {1}\\n\".format(k, v) for k, v in os.environ.items()])))\n c = p.pipenv(\"lock --clear\")\n assert c.return_code == 0, c.err\n assert \"pathlib2\" in p.lockfile[\"default\"]\n assert \"scandir\" in p.lockfile[\"default\"]\n assert isinstance(p.lockfile[\"default\"][\"scandir\"][\"hashes\"], list)\n assert len(p.lockfile[\"default\"][\"scandir\"][\"hashes\"]) > 1",
"def test_in_virtualenv(self):\n new_executor = self.executor.in_virtualenv('/appenv')\n output, _err = new_executor.pip.install('a-local-package').batch()\n self.assertEqual(output, 'a-local-package installed')\n new_executor_one = self.executor.patch_env(PATH='/bin')\n new_executor_two = new_executor_one.in_virtualenv('/appenv')\n output, _err = new_executor_two.pip.install('a-local-package').batch()\n self.assertEqual(output, 'a-local-package installed')",
"def get_all_packages(self):\n return self._package_cache.values()",
"def install_deps_temp(self):\n if self.distribution.install_requires:\n self.distribution.fetch_build_eggs(\n self.distribution.install_requires)\n if self.distribution.tests_require:\n self.distribution.fetch_build_eggs(self.distribution.tests_require)",
"def update_requirements():\n\n with virtualenv(VIRTUALENV_PATH):\n cmd = ['pip install']\n cmd += ['--requirement %s' % os.path.join(CODE_DIR,'requirements.txt')]\n run(' '.join(cmd))",
"def update_dependencies():\n pip = env.virtualenv.child('bin', 'pip')\n reqs = env.code_dir.child('deploy-requirements.txt')\n sudo('%s -q install -U pip' % pip)\n sudo('%s -q install -r %s' % (pip, reqs))",
"def install_packages():\n with open(\"requirements.txt\", \"w\") as requirements_file:\n subprocess.run([\"pipenv\", \"lock\", \"-r\"], stdout=requirements_file)\n\n subprocess.run(\n [\"pip\", \"install\", \"-r\", \"requirements.txt\", \"--no-deps\", \"-t\", BUILD_DIR]\n )",
"def get_package_list():\n pip_freeze = subprocess.check_output(('pip', 'freeze')).decode('utf8')\n package_list = [x.strip().split('==') for x in pip_freeze.split('\\n') if x.find('==') != -1]\n package_list = [(x[0].lower(), x[1]) for x in package_list]\n return package_list",
"def update_requirements():\n\n require('code_root', provided_by=env.environments)\n requirements = os.path.join(env.code_root, 'requirements')\n sdists = os.path.join(requirements, 'sdists')\n base_cmd = ['pip install']\n base_cmd += ['-q -E %(virtualenv_root)s' % env]\n base_cmd += ['--no-index --find-links=file://%s' % sdists]\n # install GDAL by hand, before anything else that might depend on it\n cmd = base_cmd + ['--no-install \"GDAL==1.6.1\"']\n sudo(' '.join(cmd), user=env.deploy_user)\n # this directory won't exist if GDAL was already installed\n if files.exists('%(virtualenv_root)s/build/GDAL' % env):\n sudo('rm -f %(virtualenv_root)s/build/GDAL/setup.cfg' % env, user=env.deploy_user)\n with cd('%(virtualenv_root)s/build/GDAL' % env):\n sudo('%(virtualenv_root)s/bin/python setup.py build_ext '\n '--gdal-config=gdal-config '\n '--library-dirs=/usr/lib '\n '--libraries=gdal1.6.0 '\n '--include-dirs=/usr/include/gdal '\n 'install' % env, user=env.deploy_user)\n # force reinstallation of OpenBlock every time\n with settings(warn_only=True):\n sudo('pip uninstall -y -E %(virtualenv_root)s ebpub ebdata obadmin' % env)\n for file_name in ['ebpub.txt', 'ebdata.txt', 'obadmin.txt', 'openrural.txt']:\n apps = os.path.join(requirements, file_name)\n cmd = base_cmd + ['--requirement %s' % apps]\n sudo(' '.join(cmd), user=env.deploy_user)",
"def log_installed_python_prereqs():\n sh(\"pip freeze > {}\".format(Env.GEN_LOG_DIR + \"/pip_freeze.log\"))",
"def freeze():\n dependencies = sh('pip freeze', capture=True).split(os.linesep)\n\n with open('requirements.txt', 'w') as file:\n for dep in dependencies:\n if not dep.startswith('bones-testing'):\n file.write(dep+'\\n')",
"def build_env_wheels() -> Iterable[Path]:\n return []",
"def pip_packages():\n packages = reduce(lambda a, x: \"%s %s\" % (a, x), PIP_PACKAGES, '')\n sudo(\"pip install %s &> /dev/null\" % packages)",
"def get_installed_versions(cls) -> list[str]:\n\n pyenv_root = os.getenv(\"PYENV_ROOT\")\n if pyenv_root is None:\n raise Failure(\"PYENV_ROOT is not configured\")\n\n root_dir = Path(pyenv_root)\n version_dir = root_dir / \"versions\"\n\n return [i.name for i in version_dir.iterdir() if i.is_dir()]",
"def getsitepackages():\n\tpass",
"def getAllInstalledPackages(installedPkgPath):\n allPkgVers = []\n if os.path.exists(installedPkgPath):\n for pkg in os.listdir(installedPkgPath):\n pkgVersions = os.listdir(os.path.join(installedPkgPath, pkg))\n for pkgVersion in pkgVersions:\n pkgPath = os.path.join(installedPkgPath, pkg)\n if not fnmatch.fnmatch(pkgVersion, '*.inprogress'):\n allPkgVers.append(os.path.join(pkgPath, pkgVersion))\n return allPkgVers",
"def avoid_pip_isolation(env: Mapping[str, str]) -> dict[str, str]:\n new_env = {k: v for k, v in env.items() if k != \"PYTHONNOUSERSITE\"}\n if \"PYTHONPATH\" not in new_env:\n return new_env\n\n new_env[\"PYTHONPATH\"] = os.pathsep.join(\n [\n path\n for path in new_env[\"PYTHONPATH\"].split(os.pathsep)\n if \"pip-build-env-\" not in path\n ]\n )\n return new_env",
"def getusersitepackages():\n\tpass",
"def install_requirements():\r\n if env.hosts:\r\n run ('cd %(path)s %(command_join)s env/bin/pip install -r current-release/requirements.txt' % env)\r\n else:\r\n local('%spip install -r requirements.txt' % virtualenv_bin, capture=False)",
"def freeze():\n do('export FLASK_CONFIG=config/dev.py && %s/bin/python manage.py freeze' % venv_path)",
"def get_installed_files(packagename, venv_pip, temp_dir):\n result = check_output(venv_pip + ['show', '-f', packagename])\n result = (result.decode()).split('\\n')\n files = []\n\n for line in result:\n # this line contains path to venv directory\n if line.startswith('Location:'):\n line = line[len('Location: '):]\n prefix = '/' + line.replace(temp_dir, 'usr') + '/'\n if line.startswith(' '*2):\n path = os.path.abspath(prefix + line.strip())\n if os.path.isdir(path):\n path += \"/\"\n files.append(path)\n return files",
"def complete_env() -> Python:\n return Python([\n 'click==0.0.1',\n 'googleapis-common-protos==0.0.1',\n 'numpy==0.0.1',\n 'pandas==0.0.1',\n 'Pillow==0.0.1',\n 'requests==0.0.1',\n 'scikit-learn==0.0.1',\n 'torch==0.0.1',\n 'urllib3==0.0.1',\n 'PyYAML==0.0.1',\n ]) # `verta` and `cloudpickle` included by default"
] |
[
"0.688422",
"0.64512485",
"0.612748",
"0.61209935",
"0.61056507",
"0.60841095",
"0.60402966",
"0.60189337",
"0.5927029",
"0.5900896",
"0.58980316",
"0.587823",
"0.58747333",
"0.587424",
"0.5825978",
"0.581681",
"0.57706964",
"0.5763765",
"0.56364876",
"0.5628184",
"0.56278956",
"0.56237453",
"0.5623741",
"0.5613599",
"0.56078774",
"0.5604394",
"0.56037873",
"0.55963266",
"0.5595803",
"0.5581105"
] |
0.72177386
|
0
|
Given two matching sets of coordinates on detector and sky, compute the WCS. Fits a WCS object to matched set of input detector and sky coordinates. Optionally, a SIP can be fit to account for geometric distortion. Returns an `~astropy.wcs.WCS` object with the best fit parameters for mapping between input pixel and sky coordinates. The projection type (default 'TAN') can passed in as a string, one of the valid threeletter projection codes or as a WCS object with projection keywords already set. Note that if an input WCS has any nonpolynomial distortion, this will be applied and reflected in the fit terms and coefficients. Passing in a WCS object in this way essentially allows it to be refit based on the matched input coordinates and projection point, but take care when using this option as nonprojection related keywords in the input might cause unexpected behavior. Notes The fiducial point for the spherical projection can be set to 'center' to use the mean position of input sky coordinates, or as an `~astropy.coordinates.SkyCoord` object. Units in all output WCS objects will always be in degrees. If the coordinate frame differs between `~astropy.coordinates.SkyCoord` objects passed in for ``world_coords`` and ``proj_point``, the frame for ``world_coords`` will override as the frame for the output WCS. If a WCS object is passed in to ``projection`` the CD/PC matrix will be used as an initial guess for the fit. If this is known to be significantly off and may throw off the fit, set to the identity matrix (for example, by doing wcs.wcs.pc = [(1., 0.,), (0., 1.)])
|
def fit_wcs_from_points(xy, world_coords, proj_point='center',
projection='TAN', sip_degree=None): # pragma: no cover
from astropy.coordinates import SkyCoord # here to avoid circular import
import astropy.units as u
from astropy.wcs import Sip
from scipy.optimize import least_squares
xp, yp = xy
try:
lon, lat = world_coords.data.lon.deg, world_coords.data.lat.deg
except AttributeError:
unit_sph = world_coords.unit_spherical
lon, lat = unit_sph.lon.deg, unit_sph.lat.deg
# verify input
if (proj_point != 'center') and (type(proj_point) != type(world_coords)):
raise ValueError("proj_point must be set to 'center', or an" +
"`~astropy.coordinates.SkyCoord` object with " +
"a pair of points.")
if proj_point != 'center':
assert proj_point.size == 1
proj_codes = [
'AZP', 'SZP', 'TAN', 'STG', 'SIN', 'ARC', 'ZEA', 'AIR', 'CYP',
'CEA', 'CAR', 'MER', 'SFL', 'PAR', 'MOL', 'AIT', 'COP', 'COE',
'COD', 'COO', 'BON', 'PCO', 'TSC', 'CSC', 'QSC', 'HPX', 'XPH'
]
if type(projection) == str:
if projection not in proj_codes:
raise ValueError("Must specify valid projection code from list of "
+ "supported types: ", ', '.join(proj_codes))
# empty wcs to fill in with fit values
wcs = celestial_frame_to_wcs(frame=world_coords.frame,
projection=projection)
else: #if projection is not string, should be wcs object. use as template.
wcs = copy.deepcopy(projection)
wcs.cdelt = (1., 1.) # make sure cdelt is 1
wcs.sip = None
# Change PC to CD, since cdelt will be set to 1
if wcs.wcs.has_pc():
wcs.wcs.cd = wcs.wcs.pc
wcs.wcs.__delattr__('pc')
if (type(sip_degree) != type(None)) and (type(sip_degree) != int):
raise ValueError("sip_degree must be None, or integer.")
# set pixel_shape to span of input points
wcs.pixel_shape = (xp.max()+1-xp.min(), yp.max()+1-yp.min())
# determine CRVAL from input
close = lambda l, p: p[np.argmin(np.abs(l))]
if str(proj_point) == 'center': # use center of input points
sc1 = SkyCoord(lon.min()*u.deg, lat.max()*u.deg)
sc2 = SkyCoord(lon.max()*u.deg, lat.min()*u.deg)
pa = sc1.position_angle(sc2)
sep = sc1.separation(sc2)
midpoint_sc = directional_offset_by(sc1, pa, sep/2)
wcs.wcs.crval = ((midpoint_sc.data.lon.deg, midpoint_sc.data.lat.deg))
wcs.wcs.crpix = ((xp.max()+xp.min())/2., (yp.max()+yp.min())/2.)
elif proj_point is not None: # convert units, initial guess for crpix
proj_point.transform_to(world_coords)
wcs.wcs.crval = (proj_point.data.lon.deg, proj_point.data.lat.deg)
wcs.wcs.crpix = (close(lon-wcs.wcs.crval[0], xp),
close(lon-wcs.wcs.crval[1], yp))
# fit linear terms, assign to wcs
# use (1, 0, 0, 1) as initial guess, in case input wcs was passed in
# and cd terms are way off.
p0 = np.concatenate([wcs.wcs.cd.flatten(), wcs.wcs.crpix.flatten()])
xpmin, xpmax, ypmin, ypmax = xp.min(), xp.max(), yp.min(), yp.max()
if xpmin==xpmax: xpmin, xpmax = xpmin-0.5, xpmax+0.5
if ypmin==ypmax: ypmin, ypmax = ypmin-0.5, ypmax+0.5
fit = least_squares(_linear_wcs_fit, p0,
args=(lon, lat, xp, yp, wcs),
bounds=[[-np.inf,-np.inf,-np.inf,-np.inf, xpmin, ypmin],
[ np.inf, np.inf, np.inf, np.inf, xpmax, ypmax]])
wcs.wcs.crpix = np.array(fit.x[4:6])
wcs.wcs.cd = np.array(fit.x[0:4].reshape((2, 2)))
# fit SIP, if specified. Only fit forward coefficients
if sip_degree:
degree = sip_degree
if '-SIP' not in wcs.wcs.ctype[0]:
wcs.wcs.ctype = [x + '-SIP' for x in wcs.wcs.ctype]
coef_names = ['{0}_{1}'.format(i, j) for i in range(degree+1)
for j in range(degree+1) if (i+j) < (degree+1) and
(i+j) > 1]
p0 = np.concatenate((np.array(wcs.wcs.crpix), wcs.wcs.cd.flatten(),
np.zeros(2*len(coef_names))))
fit = least_squares(_sip_fit, p0,
args=(lon, lat, xp, yp, wcs, degree, coef_names))
coef_fit = (list(fit.x[6:6+len(coef_names)]),
list(fit.x[6+len(coef_names):]))
# put fit values in wcs
wcs.wcs.cd = fit.x[2:6].reshape((2, 2))
wcs.wcs.crpix = fit.x[0:2]
a_vals = np.zeros((degree+1, degree+1))
b_vals = np.zeros((degree+1, degree+1))
for coef_name in coef_names:
a_vals[int(coef_name[0])][int(coef_name[2])] = coef_fit[0].pop(0)
b_vals[int(coef_name[0])][int(coef_name[2])] = coef_fit[1].pop(0)
wcs.sip = Sip(a_vals, b_vals, np.zeros((degree+1, degree+1)),
np.zeros((degree+1, degree+1)), wcs.wcs.crpix)
return wcs
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def WCS(imname, outname, astronet=False, timeout=None):\n\n print_cmd_line(\"STAP_WCS.py\", imname, outname,\n astronet=astronet, timeout=timeout)\n\n if astronet:\n cmd = \"solve-mosaic_single.py {0} {1}\".format(imname,outname)\n else:\n cmd = \"{0}/bin/SM-WCS-perchip.py {1} --outname {2}\".format(\n os.environ['BRIANWCS'],imname,outname)\n\n status, stdoutstr = STAP_callexternal(cmd, combinestderr=True,\n getstdout=True, timeout=timeout)\n print stdoutstr\n if status != 0:\n os.system('rm -fr {0}'.format(outname))\n raise ExternalFailure(cmd=cmd, exit_code=status)\n\n # Check to ensure the solution makes sense\n print \"Initiating sanity checks...\"\n with pyfits.open(outname, mode='update') as hdulist:\n hdr = hdulist[0].header\n cd11, cd12 = hdr['CD1_1'], hdr['CD1_2']\n cd21, cd22 = hdr['CD2_1'], hdr['CD2_2']\n # Is the plate scale right?\n psx = 3600*math.sqrt(cd11**2 + cd12**2)\n psy = 3600*math.sqrt(cd21**2 + cd22**2)\n print \" psx, psy =\", psx, psy, \"arcsec/pix\"\n if (abs(1.0-psx/Imager.pixel_scale) > 0.05 or\n abs(1.0-psx/Imager.pixel_scale) > 0.05):\n os.system('rm -fr {0}'.format(outname))\n raise TrackableException(\"WCS solution doesn't make sense\")\n # Are the axes orthogonal?\n ctheta = (cd11*cd21 + cd12*cd22)/(psx*psy)\n theta = math.acos(ctheta)*180/math.pi\n print \" ctheta =\", ctheta, \"theta =\", theta, \"deg\"\n if abs(ctheta) > 0.01:\n os.system('rm -fr {0}'.format(outname))\n raise TrackableException(\"WCS solution doesn't make sense\")\n # What's the position angle?\n if not astronet:\n pa = math.atan2(cd12, cd11)\n print \" pa =\", pa*180/math.pi, \"deg\"\n if abs(math.sin(pa)) > 0.02:\n os.system('rm -fr {0}'.format(outname))\n raise TrackableException(\"WCS solution doesn't make sense\")\n print \"All checks done, WCS makes sense.\"",
"def test_wcs_fit():\n import astropy.units as u\n rng = np.random.default_rng(57721)\n camera = imsim.get_camera()\n\n for _ in range(30):\n # Random spherepoint for boresight\n z = rng.uniform(-1, 1)\n th = rng.uniform(0, 2*np.pi)\n x = np.sqrt(1-z**2) * np.cos(th)\n y = np.sqrt(1-z**2) * np.sin(th)\n boresight = galsim.CelestialCoord(\n np.arctan2(y, x) * galsim.radians,\n np.arcsin(z) * galsim.radians\n )\n\n # Random obstime. No attempt to make sky dark.\n obstime = Time(\"J2020\") + rng.uniform(0, 1)*u.year\n\n # Rotator\n rotTelPos = rng.uniform(-np.pi/2, np.pi/2)\n\n # Ambient conditions\n temperature = rng.uniform(270, 300)\n pressure = rng.uniform(66, 72)\n H2O_pressure = rng.uniform(0.1, 10)\n\n wavelength = 620. # nm\n telescope = imsim.load_telescope(\n \"LSST_r.yaml\", rotTelPos=rotTelPos*galsim.radians\n )\n\n factory = imsim.BatoidWCSFactory(\n boresight, obstime, telescope, wavelength,\n camera, temperature, pressure, H2O_pressure\n )\n\n aob, zob, hob, dob, rob, eo = factory._ICRF_to_observed(\n boresight.ra.rad, boresight.dec.rad, all=True\n )\n\n # If zenith angle > 70 degrees, try again\n if np.rad2deg(zob) > 70:\n continue\n\n # Pick a few detectors randomly\n for idet in rng.choice(len(camera), 3):\n det = camera[idet]\n wcs = factory.getWCS(det, order=3)\n\n # center of detector:\n xc, yc = det.getCenter(cameraGeom.PIXELS)\n x = xc + rng.uniform(-2000, 2000, 100)\n y = yc + rng.uniform(-2000, 2000, 100)\n rc, dc = wcs.xyToradec(x, y, units='radians')\n rc1, dc1 = factory.pixel_to_ICRF(x, y, det)\n\n dist = sphere_dist(rc, dc, rc1, dc1)\n np.testing.assert_allclose( # sphere dist < 1e-5 arcsec\n 0,\n np.rad2deg(np.max(np.abs(dist)))*3600,\n rtol=0,\n atol=1e-5\n )\n print(\n \"ICRF dist (arcsec) \",\n np.rad2deg(np.mean(dist))*3600,\n np.rad2deg(np.max(np.abs(dist)))*3600,\n np.rad2deg(np.std(dist))*3600\n )\n x, y = wcs.radecToxy(rc, dc, units='radians')\n x1, y1 = factory.ICRF_to_pixel(rc, dc, det)\n np.testing.assert_allclose( # pix dist < 2e-3\n 0,\n np.max(np.abs(x-x1)),\n rtol=0,\n atol=2e-3\n )\n np.testing.assert_allclose(\n 0,\n np.max(np.abs(y-y1)),\n rtol=0,\n atol=2e-3\n )\n print(\n \"x-x1 (pixel) \",\n np.mean(x-x1),\n np.max(np.abs(x-x1)),\n np.std(x-x1)\n )\n print(\n \"y-y1 (pixel) \",\n np.mean(y-y1),\n np.max(np.abs(y-y1)),\n np.std(y-y1)\n )\n print(\"\\n\")\n\n if __name__ != '__main__':\n # In regular unit testing, just do one of these.\n break",
"def makeWcs(projName, destCtrInd, skyOffset, rotAng, scaleFac, srcWcs, srcCtrInd):\n ps = dafBase.PropertySet()\n srcCtrPix = lsst.geom.Point2D(*[float(val) for val in srcCtrInd])\n destCtrFitsPix = lsst.geom.Point2D(*[ind + 1.0 for ind in destCtrInd])\n srcCtrFitsPix = lsst.geom.Point2D(*[ind + 1.0 for ind in srcCtrInd])\n # offset 1 pixel in x to compute orientation\n srcOffFitsPix = srcCtrFitsPix + lsst.geom.Extent2D(1.0, 0.0)\n srcCtrSkyPos = srcWcs.pixelToSky(srcCtrFitsPix)\n srcOffSkyPos = srcWcs.pixelToSky(srcOffFitsPix)\n srcAngleRad = srcCtrSkyPos.bearingTo(srcOffSkyPos).asRadians()\n destAngleRad = srcAngleRad + (rotAng / DegPerRad)\n srcScale = srcWcs.getPixelScale(srcCtrPix).asDegrees()\n destScale = srcScale / scaleFac\n for i in range(2):\n ip1 = i + 1\n ctypeStr = (\"%-5s%3s\" % ((\"RA\", \"DEC\")[i], projName)).replace(\" \", \"-\")\n ps.add(\"CTYPE%1d\" % (ip1,), ctypeStr)\n ps.add(\"CRPIX%1d\" % (ip1,), destCtrFitsPix[i])\n ps.add(\"CRVAL%1d\" % (ip1,), srcCtrSkyPos[i].asDegrees() + skyOffset[i])\n ps.add(\"RADESYS\", \"ICRS\")\n ps.add(\"EQUINOX\", 2000)\n ps.add(\"CD1_1\", -destScale * math.cos(destAngleRad))\n ps.add(\"CD2_1\", destScale * math.sin(destAngleRad))\n ps.add(\"CD1_2\", destScale * math.sin(destAngleRad))\n ps.add(\"CD2_2\", destScale * math.cos(destAngleRad))\n return lsst.afw.geom.makeSkyWcs(ps)",
"def refitWcs(self, exposure, sources, matches):\n sip = None\n if self.config.solver.calculateSip:\n self.log.info(\"Refitting WCS\")\n origMatches = matches\n wcs = exposure.getWcs()\n\n import lsstDebug\n display = lsstDebug.Info(__name__).display\n frame = lsstDebug.Info(__name__).frame\n pause = lsstDebug.Info(__name__).pause\n\n def fitWcs(initialWcs, title=None):\n \"\"\"Do the WCS fitting and display of the results\"\"\"\n sip = makeCreateWcsWithSip(matches, initialWcs, self.config.solver.sipOrder)\n resultWcs = sip.getNewWcs()\n if display:\n showAstrometry(exposure, resultWcs, origMatches, matches, frame=frame,\n title=title, pause=pause)\n return resultWcs, sip.getScatterOnSky()\n\n numRejected = 0\n try:\n for i in range(self.config.rejectIter):\n wcs, scatter = fitWcs(wcs, title=\"Iteration %d\" % i)\n\n ref = numpy.array([wcs.skyToPixel(m.first.getCoord()) for m in matches])\n src = numpy.array([m.second.getCentroid() for m in matches])\n diff = ref - src\n rms = diff.std()\n trimmed = []\n for d, m in zip(diff, matches):\n if numpy.all(numpy.abs(d) < self.config.rejectThresh*rms):\n trimmed.append(m)\n else:\n numRejected += 1\n if len(matches) == len(trimmed):\n break\n matches = trimmed\n\n # Final fit after rejection iterations\n wcs, scatter = fitWcs(wcs, title=\"Final astrometry\")\n\n except LsstCppException as e:\n if not isinstance(e.message, LengthErrorException):\n raise\n self.log.warn(\"Unable to fit SIP: %s\" % e)\n\n self.log.info(\"Astrometric scatter: %f arcsec (%s non-linear terms, %d matches, %d rejected)\" %\n (scatter.asArcseconds(), \"with\" if wcs.hasDistortion() else \"without\",\n len(matches), numRejected))\n exposure.setWcs(wcs)\n\n # Apply WCS to sources\n for index, source in enumerate(sources):\n sky = wcs.pixelToSky(source.getX(), source.getY())\n source.setCoord(sky)\n else:\n self.log.warn(\"Not calculating a SIP solution; matches may be suspect\")\n \n self.display('astrometry', exposure=exposure, sources=sources, matches=matches)\n\n return sip",
"def fitWcs(initialWcs, title=None):\n sip = makeCreateWcsWithSip(matches, initialWcs, self.config.solver.sipOrder)\n resultWcs = sip.getNewWcs()\n if display:\n showAstrometry(exposure, resultWcs, origMatches, matches, frame=frame,\n title=title, pause=pause)\n return resultWcs, sip.getScatterOnSky()",
"def test_create_fitswcs(tmpdir, create_model_3d):\n im = create_model_3d\n w3d = pointing.create_fitswcs(im)\n gra, gdec, glam = w3d(1, 1, 1)\n\n path = str(tmpdir.join(\"fitswcs.fits\"))\n im.save(path)\n with fits.open(path) as hdulist:\n hdu = hdulist[\"SCI\"]\n w = wcs.WCS(hdu.header)\n wcel = w.sub(['celestial'])\n ra, dec = wcel.all_pix2world(1, 1, 0)\n\n # Check that astropy.wcs.WCS and gwcs.WCS give same result\n assert_allclose((ra, dec), (gra, gdec))",
"def reproject(wcs1, wcs2):\n\n if isinstance(wcs1, fitswcs.WCS):\n forward_transform = wcs1.all_pix2world\n elif isinstance(wcs1, gwcs.WCS):\n forward_transform = wcs1.forward_transform\n elif issubclass(wcs1, Model):\n forward_transform = wcs1\n else:\n raise TypeError(\"Expected input to be astropy.wcs.WCS or gwcs.WCS \"\n \"object or astropy.modeling.Model subclass\")\n\n if isinstance(wcs2, fitswcs.WCS):\n backward_transform = wcs2.all_world2pix\n elif isinstance(wcs2, gwcs.WCS):\n backward_transform = wcs2.backward_transform\n elif issubclass(wcs2, Model):\n backward_transform = wcs2.inverse\n else:\n raise TypeError(\"Expected input to be astropy.wcs.WCS or gwcs.WCS \"\n \"object or astropy.modeling.Model subclass\")\n\n def _reproject(x, y):\n sky = forward_transform(x, y)\n flat_sky = []\n for axis in sky:\n flat_sky.append(axis.flatten())\n # Filter out RuntimeWarnings due to computed NaNs in the WCS\n warnings.simplefilter(\"ignore\")\n det = backward_transform(*tuple(flat_sky))\n warnings.resetwarnings()\n det_reshaped = []\n for axis in det:\n det_reshaped.append(axis.reshape(x.shape))\n return tuple(det_reshaped)\n return _reproject",
"def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None):\n msgs.info(\"Calculating the WCS\")\n # Get the x and y binning factors, and the typical slit length\n binspec, binspat = parse.parse_binning(self.get_meta_value([hdr], 'binning'))\n\n # Get the pixel and slice scales\n pxscl = platescale * binspat / 3600.0 # Need to convert arcsec to degrees\n slscl = self.get_meta_value([hdr], 'slitwid')\n if spatial_scale is not None:\n if pxscl > spatial_scale / 3600.0:\n msgs.warn(\"Spatial scale requested ({0:f}'') is less than the pixel scale ({1:f}'')\".format(spatial_scale, pxscl*3600.0))\n # Update the pixel scale\n pxscl = spatial_scale / 3600.0 # 3600 is to convert arcsec to degrees\n\n # Get the typical slit length (this changes by ~0.3% over all slits, so a constant is fine for now)\n slitlength = int(np.round(np.median(slits.get_slitlengths(initial=True, median=True))))\n\n # Get RA/DEC\n raval = self.get_meta_value([hdr], 'ra')\n decval = self.get_meta_value([hdr], 'dec')\n\n # Create a coordinate\n coord = SkyCoord(raval, decval, unit=(units.deg, units.deg))\n\n # Get rotator position\n msgs.warn(\"HACK FOR MAAT SIMS --- NEED TO FIGURE OUT RPOS and RREF FOR MAAT FROM HEADER INFO\")\n if 'ROTPOSN' in hdr:\n rpos = hdr['ROTPOSN']\n else:\n rpos = 0.\n if 'ROTREFAN' in hdr:\n rref = hdr['ROTREFAN']\n else:\n rref = 0.\n # Get the offset and PA\n rotoff = 0.0 # IFU-SKYPA offset (degrees)\n skypa = rpos + rref # IFU position angle (degrees)\n crota = np.radians(-(skypa + rotoff))\n\n # Calculate the fits coordinates\n cdelt1 = -slscl\n cdelt2 = pxscl\n if coord is None:\n ra = 0.\n dec = 0.\n crota = 1\n else:\n ra = coord.ra.degree\n dec = coord.dec.degree\n # Calculate the CD Matrix\n cd11 = cdelt1 * np.cos(crota) # RA degrees per column\n cd12 = abs(cdelt2) * np.sign(cdelt1) * np.sin(crota) # RA degrees per row\n cd21 = -abs(cdelt1) * np.sign(cdelt2) * np.sin(crota) # DEC degress per column\n cd22 = cdelt2 * np.cos(crota) # DEC degrees per row\n # Get reference pixels (set these to the middle of the FOV)\n crpix1 = 11 # i.e. see get_datacube_bins (11 is used as the reference point - somewhere in the middle of the FOV)\n crpix2 = slitlength / 2.\n crpix3 = 1.\n # Get the offset\n msgs.warn(\"HACK FOR MAAT SIMS --- Need to obtain offset from header?\")\n off1 = 0.\n off2 = 0.\n off1 /= binspec\n off2 /= binspat\n crpix1 += off1\n crpix2 += off2\n\n # Create a new WCS object.\n msgs.info(\"Generating MAAT WCS\")\n w = wcs.WCS(naxis=3)\n w.wcs.equinox = hdr['EQUINOX']\n w.wcs.name = 'MAAT'\n w.wcs.radesys = 'FK5'\n # Insert the coordinate frame\n w.wcs.cname = ['MAAT RA', 'MAAT DEC', 'MAAT Wavelength']\n w.wcs.cunit = [units.degree, units.degree, units.Angstrom]\n w.wcs.ctype = [\"RA---TAN\", \"DEC--TAN\", \"WAVE\"]\n w.wcs.crval = [ra, dec, wave0] # RA, DEC, and wavelength zeropoints\n w.wcs.crpix = [crpix1, crpix2, crpix3] # RA, DEC, and wavelength reference pixels\n w.wcs.cd = np.array([[cd11, cd12, 0.0], [cd21, cd22, 0.0], [0.0, 0.0, dwv]])\n w.wcs.lonpole = 180.0 # Native longitude of the Celestial pole\n w.wcs.latpole = 0.0 # Native latitude of the Celestial pole\n\n return w",
"def define_wcs(skypos,skyrange,width=False,height=False,verbose=0,\n\t\t\t pixsz=0.000416666666666667):\n\tif verbose:\n\t\tprint_inline('Defining World Coordinate System (WCS).')\n\twcs = pywcs.WCS(naxis=2) # NAXIS = 2\n\timsz = gxt.deg2pix(skypos,skyrange)\n\twcs.wcs.cdelt = np.array([-pixsz,pixsz])\n\twcs.wcs.ctype = ['RA---TAN','DEC--TAN']\n\twcs.wcs.crpix = [(imsz[1]/2.)+0.5,(imsz[0]/2.)+0.5]\n\twcs.wcs.crval = skypos\n\treturn wcs",
"def _sip_fit(params, lon, lat, u, v, w_obj, order, coeff_names): # pragma: no cover\n\n from astropy.modeling.models import SIP, InverseSIP # here to avoid circular import\n\n # unpack params\n crpix = params[0:2]\n cdx = params[2:6].reshape((2, 2))\n a_params = params[6:6+len(coeff_names)]\n b_params = params[6+len(coeff_names):]\n\n # assign to wcs, used for transfomations in this function\n w_obj.wcs.cd = cdx\n w_obj.wcs.crpix = crpix\n\n a_coeff, b_coeff = {}, {}\n for i in range(len(coeff_names)):\n a_coeff['A_' + coeff_names[i]] = a_params[i]\n b_coeff['B_' + coeff_names[i]] = b_params[i]\n\n sip = SIP(crpix=crpix, a_order=order, b_order=order,\n a_coeff=a_coeff, b_coeff=b_coeff)\n fuv, guv = sip(u, v)\n\n xo, yo = np.dot(cdx, np.array([u+fuv-crpix[0], v+guv-crpix[1]]))\n\n # use all pix2world in case `projection` contains distortion table\n x, y = w_obj.all_world2pix(lon, lat, 0)\n x, y = np.dot(w_obj.wcs.cd, (x-w_obj.wcs.crpix[0], y-w_obj.wcs.crpix[1]))\n\n resids = np.concatenate((x-xo, y-yo))\n # to avoid bad restuls if near 360 -> 0 degree crossover\n resids[resids > 180] = 360 - resids[resids > 180]\n resids[resids < -180] = 360 + resids[resids < -180]\n\n return resids",
"def __call__(self, sphere_or_coeffs):\n if self.input_representation == \"spectral\":\n ell_max = sphere_or_coeffs.shape[1] - 1\n resolution = 2 * (ell_max + 1)\n if sphere_or_coeffs.shape[2] != 2*ell_max + 1:\n raise ValueError(\"Axes 1 and 2 must have dimensions \"\n \"(ell_max+1, 2*ell_max+1).\")\n elif self.input_representation == \"spatial\":\n resolution = sphere_or_coeffs.shape[1]\n ell_max = sphere_utils.ell_max_from_resolution(resolution)\n if sphere_or_coeffs.shape[2] != resolution:\n raise ValueError(\"Axes 1 and 2 must have the same dimensions!\")\n else:\n raise ValueError(\"`input_representation` must be either \"\n \"'spectral' or 'spatial'.\")\n\n if sphere_or_coeffs.shape[3] != len(list(self.spins_in)):\n raise ValueError(\"Input axis 3 (spins_in) doesn't match layer's.\")\n\n if self.spectral_pooling and self.spectral_upsampling:\n raise ValueError(\"`spectral_pooling` and `spectral_upsampling` \"\n \"should not be both True.\")\n\n if self.spectral_pooling:\n resolution //= 2\n ell_max = sphere_utils.ell_max_from_resolution(resolution)\n\n # Make sure constants contain all spins for input resolution.\n for spin in set(self.spins_in).union(self.spins_out):\n if not self.transformer.validate(resolution, spin):\n raise ValueError(\"Constants are invalid for given input!\")\n\n num_channels_in = sphere_or_coeffs.shape[-1]\n if self.num_filter_params is None:\n kernel = self._get_kernel(ell_max, num_channels_in)\n else:\n kernel = self._get_localized_kernel(ell_max, num_channels_in)\n\n # Map over the batch dimension.\n vmap_convolution = jax.vmap(\n _spin_spherical_convolution,\n in_axes=(None, 0, None, None, None, None, None, None, None))\n return vmap_convolution(self.transformer,\n sphere_or_coeffs, kernel,\n self.spins_in,\n self.spins_out,\n self.spectral_pooling,\n self.spectral_upsampling,\n self.input_representation,\n self.output_representation)",
"def make_output_wcs(input_models, pscale_ratio=1.0):\n wcslist = [i.meta.wcs for i in input_models]\n for w, i in zip(wcslist, input_models):\n if w.bounding_box is None:\n w.bounding_box = wcs_bbox_from_shape(i.data.shape)\n naxes = wcslist[0].output_frame.naxes\n\n if naxes == 2:\n output_wcs = wcs_from_footprints(input_models, pscale_ratio=pscale_ratio)\n output_wcs.data_size = shape_from_bounding_box(output_wcs.bounding_box)\n else:\n raise RuntimeError(\"Output WCS needs 2 spatial axes. \"\n f\"{wcslist[0]} has {naxes}.\")\n\n # Check that the output data shape has no zero length dimensions\n if not np.product(output_wcs.data_size):\n raise ValueError(\"Invalid output frame shape: \"\n \"{}\".format(output_wcs.data_size))\n\n return output_wcs",
"def calc_wcs(fxn_dict2, call_graph1, parents):\n\n # If the wcs is already known, then nothing to do\n if 'wcs' in fxn_dict2:\n return\n\n # Check for pointer calls\n if fxn_dict2['has_ptr_call']:\n fxn_dict2['wcs'] = 'unbounded'\n return\n\n # Check for recursion\n if fxn_dict2 in parents:\n fxn_dict2['wcs'] = 'unbounded'\n return\n\n # Calculate WCS\n call_max = 0\n for call_dict in fxn_dict2['r_calls']:\n\n # Calculate the WCS for the called function\n parents.append(fxn_dict2)\n calc_wcs(call_dict, call_graph1, parents)\n parents.pop()\n\n # If the called function is unbounded, so is this function\n if call_dict['wcs'] == 'unbounded':\n fxn_dict2['wcs'] = 'unbounded'\n return\n\n # Keep track of the call with the largest stack use\n call_max = max(call_max, call_dict['wcs'])\n\n # Propagate Unresolved Calls\n for unresolved_call in call_dict['unresolved_calls']:\n fxn_dict2['unresolved_calls'].add(unresolved_call)\n\n fxn_dict2['wcs'] = call_max + fxn_dict2['local_stack']",
"def make_gwcs(shape, galactic=False):\n from gwcs import coordinate_frames as cf\n from gwcs import wcs as gwcs_wcs\n\n rho = np.pi / 3.0\n scale = 0.1 / 3600.0 # 0.1 arcsec/pixel in deg/pix\n\n shift_by_crpix = (models.Shift((-shape[1] / 2) + 1)\n & models.Shift((-shape[0] / 2) + 1))\n\n cd_matrix = np.array([[-scale * np.cos(rho), scale * np.sin(rho)],\n [scale * np.sin(rho), scale * np.cos(rho)]])\n\n rotation = models.AffineTransformation2D(cd_matrix, translation=[0, 0])\n rotation.inverse = models.AffineTransformation2D(\n np.linalg.inv(cd_matrix), translation=[0, 0])\n\n tan = models.Pix2Sky_TAN()\n celestial_rotation = models.RotateNative2Celestial(197.8925, -1.36555556,\n 180.0)\n\n det2sky = shift_by_crpix | rotation | tan | celestial_rotation\n det2sky.name = 'linear_transform'\n\n detector_frame = cf.Frame2D(name='detector', axes_names=('x', 'y'),\n unit=(u.pix, u.pix))\n\n if galactic:\n sky_frame = cf.CelestialFrame(reference_frame=coord.Galactic(),\n name='galactic', unit=(u.deg, u.deg))\n else:\n sky_frame = cf.CelestialFrame(reference_frame=coord.ICRS(),\n name='icrs', unit=(u.deg, u.deg))\n\n pipeline = [(detector_frame, det2sky), (sky_frame, None)]\n\n return gwcs_wcs.WCS(pipeline)",
"def _linear_wcs_fit(params, lon, lat, x, y, w_obj): # pragma: no cover\n cd = params[0:4]\n crpix = params[4:6]\n\n w_obj.wcs.cd = ((cd[0], cd[1]), (cd[2], cd[3]))\n w_obj.wcs.crpix = crpix\n lon2, lat2 = w_obj.wcs_pix2world(x, y, 0)\n\n resids = np.concatenate((lon-lon2, lat-lat2))\n resids[resids > 180] = 360 - resids[resids > 180]\n resids[resids < -180] = 360\t+ resids[resids < -180]\n\n return resids",
"def test_sip_hst():\n\n test_file = get_pkg_data_filename(os.path.join(\"data\", \"hst_sip.hdr\"))\n hdr = fits.Header.fromtextfile(test_file)\n crpix1 = hdr[\"CRPIX1\"]\n crpix2 = hdr[\"CRPIX2\"]\n wobj = wcs.WCS(hdr)\n a_pars = dict(**hdr[\"A_*\"])\n b_pars = dict(**hdr[\"B_*\"])\n a_order = a_pars.pop(\"A_ORDER\")\n b_order = b_pars.pop(\"B_ORDER\")\n sip = SIP([crpix1, crpix2], a_order, b_order, a_pars, b_pars)\n coords = [1, 1]\n rel_coords = [1 - crpix1, 1 - crpix2]\n astwcs_result = wobj.sip_pix2foc([coords], 1)[0] - rel_coords\n assert_allclose(sip(1, 1), astwcs_result)\n\n # Test changing of inputs and calling it with keyword argumenrts.\n sip.inputs = (\"r\", \"t\")\n assert_allclose(sip(r=1, t=1), astwcs_result)\n assert_allclose(sip(1, t=1), astwcs_result)\n\n # Test representations\n assert (\n repr(sip) == \"<SIP([<Shift(offset=-2048.)>, <Shift(offset=-1024.)>, \"\n \"<_SIP1D(4, 'A', A_2_0=0.00000855, A_3_0=-0., A_4_0=0., A_0_2=0.00000217, \"\n \"A_0_3=0., A_0_4=0., A_1_1=-0.0000052, A_1_2=-0., A_1_3=-0., \"\n \"A_2_1=-0., A_2_2=0., A_3_1=0.)>, \"\n \"<_SIP1D(4, 'B', B_2_0=-0.00000175, B_3_0=0., B_4_0=-0., B_0_2=-0.00000722, \"\n \"B_0_3=-0., B_0_4=-0., B_1_1=0.00000618, B_1_2=-0., B_1_3=0., \"\n \"B_2_1=-0., B_2_2=-0., B_3_1=-0.)>])>\"\n )\n with conf.set_temp(\"max_width\", 80):\n # fmt: off\n assert str(sip) == (\n \"Model: SIP\\n\"\n \" Model: Shift\\n\"\n \" Inputs: ('x',)\\n\"\n \" Outputs: ('y',)\\n\"\n \" Model set size: 1\\n\"\n \" Parameters:\\n\"\n \" offset\\n\"\n \" -------\\n\"\n \" -2048.0\\n\"\n \"\\n\"\n \" Model: Shift\\n\"\n \" Inputs: ('x',)\\n\"\n \" Outputs: ('y',)\\n\"\n \" Model set size: 1\\n\"\n \" Parameters:\\n\"\n \" offset\\n\"\n \" -------\\n\"\n \" -1024.0\\n\"\n \"\\n\"\n \" Model: _SIP1D\\n\"\n \" Inputs: ('x', 'y')\\n\"\n \" Outputs: ('z',)\\n\"\n \" Model set size: 1\\n\"\n \" Order: 4\\n\"\n \" Coeff. Prefix: A\\n\"\n \" Parameters:\\n\"\n \" A_2_0 A_3_0 ... A_3_1 \\n\"\n \" --------------------- ---------------------- ... ---------------------\\n\"\n \" 8.551277582556502e-06 -4.730444829222791e-10 ... 1.971022971660309e-15\\n\"\n \"\\n\"\n \" Model: _SIP1D\\n\"\n \" Inputs: ('x', 'y')\\n\"\n \" Outputs: ('z',)\\n\"\n \" Model set size: 1\\n\"\n \" Order: 4\\n\"\n \" Coeff. Prefix: B\\n\"\n \" Parameters:\\n\"\n \" B_2_0 B_3_0 ... B_3_1 \\n\"\n \" ---------------------- --------------------- ... ----------------------\\n\"\n \" -1.746491877058669e-06 8.567635427816317e-11 ... -3.779506805487476e-15\\n\"\n )\n # fmt: on\n\n # Test get num of coeffs\n assert sip.sip1d_a.get_num_coeff(1) == 6\n # Test error\n MESSAGE = \"Degree of polynomial must be 2< deg < 9\"\n sip.sip1d_a.order = 1\n with pytest.raises(ValueError, match=MESSAGE):\n sip.sip1d_a.get_num_coeff(1)\n sip.sip1d_a.order = 10\n with pytest.raises(ValueError, match=MESSAGE):\n sip.sip1d_a.get_num_coeff(1)",
"def test_as_multi_wcs(self):\n from .. import builder, collection, multi_wcs, pyramid\n\n reproject_function = reproject.reproject_interp\n outdir = self.work_path(\"as_multi_wcs\")\n\n pio = pyramid.PyramidIO(outdir, default_format=\"fits\")\n bld = builder.Builder(pio)\n coll = collection.SimpleFitsCollection(\n [test_path(\"wcs512.fits.gz\")], hdu_index=0\n )\n proc = multi_wcs.MultiWcsProcessor(coll)\n proc.compute_global_pixelization(bld)\n proc.tile(pio, reproject_function, cli_progress=False, parallel=1)\n bld.write_index_rel_wtml()\n\n args = [\n \"cascade\",\n \"--start\",\n \"1\",\n self.work_path(\"as_multi_wcs\"),\n ]\n cli.entrypoint(args)\n\n self.maybe_test_barycenter(\n self.work_path(\"as_multi_wcs\", \"0\", \"0\", \"0_0.fits\"), self.WCS512_BARYDATA\n )",
"def _check_wcs_structure(self, wcs):\n if wcs is None:\n return False, \"WCS cannot be None.\"\n\n if not wcs.is_celestial:\n return False, \"WCS must be exclusively a celestial WCS.\"\n\n wcs = wcs.deepcopy()\n naxis1, naxis2 = wcs.pixel_shape\n\n # check mapping of corners and CRPIX:\n pts = np.array([[1.0, 1.0], [1.0, naxis2], [naxis1, 1.0],\n [naxis1, naxis2], wcs.wcs.crpix])\n\n sky_all = wcs.all_pix2world(pts, 1)\n foc_all = wcs.pix2foc(pts, 1)\n\n # strip all *known* distortions:\n wcs.cpdis1 = None\n wcs.cpdis2 = None\n wcs.det2im1 = None\n wcs.det2im2 = None\n wcs.sip = None\n\n # check that pix2foc includes no other distortions besides the ones\n # that we have turned off above:\n if not np.allclose(pts, wcs.pix2foc(pts, 1)):\n False, \"'pix2foc' contains unknown distortions\"\n\n wcs.wcs.set()\n\n # check that pix2foc contains all known distortions:\n if not np.allclose(wcs.all_world2pix(sky_all, 1), foc_all, atol=1e-3,\n rtol=0):\n return False, \"'WCS.pix2foc()' does not include all distortions.\"\n\n return True, ''",
"def testWcsFailure(self):\n self.exposure.setWcs(self.tanWcs)\n config = AstrometryTask.ConfigClass()\n config.wcsFitter.order = 2\n config.wcsFitter.maxScatterArcsec = 0.0 # To ensure a WCS failure\n sourceSchema = afwTable.SourceTable.makeMinimalSchema()\n measBase.SingleFrameMeasurementTask(schema=sourceSchema) # expand the schema\n # schema must be passed to the solver task constructor\n solver = AstrometryTask(config=config, refObjLoader=self.refObjLoader, schema=sourceSchema)\n sourceCat = self.makeSourceCat(self.tanWcs, sourceSchema=sourceSchema, doScatterCentroids=True)\n with self.assertLogs(level=logging.WARNING) as cm:\n results = solver.run(\n sourceCat=sourceCat,\n exposure=self.exposure,\n )\n logOutput = \";\".join(cm.output)\n self.assertIn(\"WCS fit failed.\", logOutput)\n self.assertIn(\"Setting exposure's WCS to None and coord_ra & coord_dec cols in sourceCat to nan.\",\n logOutput)\n # Check that matches is set to None, the sourceCat coord cols are all\n # set to nan and that the WCS attached to the exposure is set to None.\n self.assertTrue(results.matches is None)\n self.assertTrue(np.all(np.isnan(sourceCat[\"coord_ra\"])))\n self.assertTrue(np.all(np.isnan(sourceCat[\"coord_dec\"])))\n self.assertTrue(self.exposure.getWcs() is None)\n self.assertTrue(results.scatterOnSky is None)\n self.assertTrue(results.matches is None)",
"def reproject(wcs1, wcs2, origin=0):\n\n def _reproject(x, y):\n sky = wcs1.forward_transform(x, y)\n return wcs2.backward_transform(*sky)\n return _reproject",
"def parse_coordinates(self):\n header = self.header\n wcs = WCS()\n try:\n wcs.crval = header['crval1'], header['crval2']\n wcs.crpix = header['crpix1'] - 1, header['crpix2'] - 1\n wcs.cdelt = header['cdelt1'], header['cdelt2']\n except KeyError:\n msg = \"Coordinate system not specified in FITS\"\n logger.error(msg)\n raise TypeError(msg)\n try:\n wcs.ctype = header['ctype1'], header['ctype2']\n except KeyError:\n wcs.ctype = 'unknown', 'unknown'\n try:\n wcs.crota = float(header['crota1']), float(header['crota2'])\n except KeyError:\n wcs.crota = 0., 0.\n try:\n wcs.cunit = header['cunit1'], header['cunit2']\n except KeyError:\n # The \"Definition of the Flexible Image Transport System\", version\n # 3.0, tells us that \"units for celestial coordinate systems defined\n # in this Standard must be degrees\", so we assume that if nothing else\n # is specifiedj\n msg = \"WCS units unknown; using degrees\"\n logger.warning(msg)\n wcs.cunit = 'deg', 'deg'\n return wcs",
"def convertToWCS(x, y, wcs_hdr):\n w = WCS(wcs_hdr)\n xy_coords = np.column_stack([x, y])\n \n # FITS convention, so use Fortran-like 1-based origin\n world = w.all_pix2world(xy_coords, 1)\n ra, dec = world[:, 0], world[:, 1]\n \n return ra, dec",
"def init_compact_source(\n sky_coord, frame, observations,\n):\n\n # get PSF-corrected center pixel spectrum\n spectrum = get_pixel_spectrum(sky_coord, observations, correct_psf=True)\n\n # position in frame coordinates\n center = frame.get_pixel(sky_coord)\n center_index = np.round(center).astype(np.int)\n\n # morphology initialized as a point source\n morph_ = frame.psf.get_model().mean(axis=0)\n origin = (\n center_index[0] - (morph_.shape[0] // 2),\n center_index[1] - (morph_.shape[1] // 2),\n )\n bbox_ = Box(morph_.shape, origin=origin)\n\n # adjust box size to conform with extended sources\n size = max(morph_.shape)\n boxsize = get_minimal_boxsize(size)\n morph = np.zeros((boxsize, boxsize))\n origin = (\n center_index[0] - (morph.shape[0] // 2),\n center_index[1] - (morph.shape[1] // 2),\n )\n bbox = Box(morph.shape, origin=origin)\n\n slices = overlapped_slices(bbox, bbox_)\n morph[slices[0]] = morph_[slices[1]]\n\n # apply max normalization\n morph_max = morph.max()\n morph /= morph_max\n spectrum *= morph_max\n\n # expand to full bbox\n bbox = frame.bbox[0] @ bbox\n\n return spectrum, morph, bbox",
"def make_wcs_from_hpx(self, sum_ebins=False, proj='CAR', oversample=2,\n normalize=True):\n self._wcs_proj = proj\n self._wcs_oversample = oversample\n self._wcs_2d = self.hpx.make_wcs(2, proj=proj, oversample=oversample)\n self._hpx2wcs = HpxToWcsMapping(self.hpx, self._wcs_2d)\n wcs, wcs_data = self.convert_to_cached_wcs(self.counts, sum_ebins,\n normalize)\n return wcs, wcs_data",
"def check_crs(input_crs, return_rasterio=False):\n if not isinstance(input_crs, pyproj.CRS) and input_crs is not None:\n out_crs = pyproj.CRS(input_crs)\n else:\n out_crs = input_crs\n\n if return_rasterio:\n if LooseVersion(rasterio.__gdal_version__) >= LooseVersion(\"3.0.0\"):\n out_crs = rasterio.crs.CRS.from_wkt(out_crs.to_wkt())\n else:\n out_crs = rasterio.crs.CRS.from_wkt(out_crs.to_wkt(\"WKT1_GDAL\"))\n\n return out_crs",
"def wcs(self):\n model = self.model\n return gwcs.WCS(forward_transform=model,\n input_frame=_generate_generic_frame(model.n_inputs, u.pix),\n output_frame=self.frame)",
"def find_sky_transform(cr):\n\n assert isinstance(cr, pycrates.IMAGECrate)\n\n axes = cr.get_axisnames()\n if axes == []:\n # Assume that if there's no axis names then there's no\n # transform data.\n return None\n\n # The simple case is if there's a \"vector\" column (e.g. SKY)\n # with a 2D transform.\n #\n try:\n tr = cr.get_transform(axes[0])\n except KeyError:\n # For now return None, but could try something like\n # iterating through the other axis names, if there\n # are any.\n return None\n\n if isinstance(tr, pytransform.LINEAR2DTransform):\n return tr\n\n elif not isinstance(tr, pytransform.LINEARTransform):\n # For now return None, but could try something like\n # iterating through the other axis names, if there\n # are any.\n return None\n\n # Assume that the second component is the second\n # axis.\n xtr = tr\n try:\n ytr = cr.get_transform(axes[1])\n except KeyError:\n return None\n\n # Create a 2D transform based on the two 1D transforms.\n #\n trs = [xtr, ytr]\n scales = [itr.get_parameter('SCALE').get_value()\n for itr in trs]\n offsets = [itr.get_parameter('OFFSET').get_value()\n for itr in trs]\n out = pytransform.LINEAR2DTransform()\n out.get_parameter('ROTATION').set_value(0)\n out.get_parameter('SCALE').set_value(scales)\n out.get_parameter('OFFSET').set_value(offsets)\n return out",
"def test_wcs_extras():\n data = np.ones([6, 6], dtype=np.float64)\n header = {'CRVAL1': 0,\n 'CRVAL2': 0,\n 'CRPIX1': 5,\n 'CRPIX2': 5,\n 'CDELT1': 10,\n 'CDELT2': 10,\n 'CUNIT1': 'arcsec',\n 'CUNIT2': 'arcsec',\n 'PC1_1': 0,\n 'PC1_2': -1,\n 'PC2_1': 1,\n 'PC2_2': 0,\n 'NAXIS1': 6,\n 'NAXIS2': 6,\n 'CTYPE1': 'HPLN-TAN',\n 'CTYPE2': 'HPLT-TAN',\n 'date-obs': '1970-01-01T00:00:00',\n 'obsrvtry': 'Foo',\n 'detector': 'bar',\n 'wavelnth': 10,\n 'waveunit': 'm',\n 'hglt_obs': 0,\n 'hgln_obs': 0,\n 'dsun_obs': 10,\n 'rsun_ref': 690000000}\n generic_map = sunpy.map.Map((data, header))\n\n wcs = generic_map.wcs\n\n assert wcs.heliographic_observer.lat.value == 0\n assert wcs.heliographic_observer.lon.value == 0\n assert wcs.heliographic_observer.radius.value == 10\n assert wcs.rsun.value == header['rsun_ref']\n\n result = solar_wcs_frame_mapping(wcs)\n\n assert isinstance(result, Helioprojective)\n assert result.observer.lat.value == 0\n assert result.observer.lon.value == 0\n assert result.observer.radius.value == 10\n assert result.rsun.value == header['rsun_ref']",
"def include_wcs_in_masks(input_images):\n img_list = [astroim.Astroim(im_name, memmap=True) for im_name in input_images]\n mask_names = [im.primary_header.get(\"MASK\") for im in img_list]\n output = []\n for im_object, mask_name in zip(img_list, mask_names):\n with fits.open(mask_name, 'readonly') as mask:\n mask_header = im_object.chips[0].header.hdr\n mask_data = mask[0].data.copy()\n mask_data[mask_data>0] = 1\n _, path = tempfile.mkstemp(suffix=\".fits\")\n fits.writeto(path, mask_data * 1., mask_header, clobber=True)\n output.append(path)\n return output",
"def setup_input_srs(input_dataset, options):\n\n input_srs = None\n input_srs_wkt = None\n\n if options.s_srs:\n input_srs = osr.SpatialReference()\n input_srs.SetFromUserInput(options.s_srs)\n input_srs_wkt = input_srs.ExportToWkt()\n else:\n input_srs_wkt = input_dataset.GetProjection()\n if not input_srs_wkt and input_dataset.GetGCPCount() != 0:\n input_srs_wkt = input_dataset.GetGCPProjection()\n if input_srs_wkt:\n input_srs = osr.SpatialReference()\n input_srs.ImportFromWkt(input_srs_wkt)\n\n return input_srs, input_srs_wkt"
] |
[
"0.5859561",
"0.5762514",
"0.56438285",
"0.56424916",
"0.5291895",
"0.5234622",
"0.5210285",
"0.5084179",
"0.50814235",
"0.50549525",
"0.49854365",
"0.49420464",
"0.49419275",
"0.4907917",
"0.49040973",
"0.48516577",
"0.48470798",
"0.48208737",
"0.4797842",
"0.47903037",
"0.47534636",
"0.4751714",
"0.47516257",
"0.47470832",
"0.46625125",
"0.46132857",
"0.45513272",
"0.45449418",
"0.45326382",
"0.4470431"
] |
0.7024653
|
0
|
The function creates an new dictionary entry 'AUX' that includes starting values of each parameter and the number of covariates.
|
def auxiliary(dict_):
dict_['AUX'] = {}
if dict_['DIST']['coeff'] == [0.0] * len(dict_['DIST']['coeff']):
is_deterministic = True
else:
is_deterministic = False
for key_ in ['UNTREATED', 'TREATED', 'COST', 'DIST']:
if key_ in ['UNTREATED', 'TREATED', 'COST']:
dict_[key_]['all'] = dict_[key_]['coeff']
dict_[key_]['all'] = np.array(dict_[key_]['all'])
else:
dict_[key_]['all'] = dict_[key_]['coeff']
dict_[key_]['all'] = np.array(dict_[key_]['all'])
# Number of covariates
num_covars_out = len(dict_['TREATED']['all'])
num_covars_cost = len(dict_['COST']['all'])
dict_['AUX']['num_covars_out'] = num_covars_out
dict_['AUX']['num_covars_cost'] = num_covars_cost
# Number of parameters
dict_['AUX']['num_paras'] = 2 * num_covars_out + num_covars_cost + 2 + 2
# Starting values
dict_['AUX']['init_values'] = []
for key_ in ['TREATED', 'UNTREATED', 'COST', 'DIST']:
dict_['AUX']['init_values'] += dict_[key_]['all'].tolist()
for j in sorted(dict_[key_].keys()):
if j in ['all', 'types']:
pass
else:
del dict_[key_][j]
dict_['DETERMINISTIC'] = is_deterministic
dict_ = check_types(dict_)
return dict_
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _defineNAAuxVars(self, aux_vars):\n # Initialise aux var itesms as empty lists unless already defined when\n # setting up independent variables\n for item in (\"ANAME\", \"AMISS\", \"ASCAL\", \"A\"):\n if not item in self.na_dict:\n self.na_dict[item] = [] \n\n for var in aux_vars:\n name = xarray_utils.getBestName(var)\n self.na_dict[\"ANAME\"].append(name)\n miss = xarray_utils.getMissingValue(var)\n miss = self._resolve_float(miss)\n\n self.na_dict[\"AMISS\"].append(miss)\n self.na_dict[\"ASCAL\"].append(1)\n # Populate the variable list with the array\n self.na_dict[\"A\"].append(xarray_utils.getArrayAsList(var, missing_value=miss))\n\n self.na_dict[\"NAUXV\"] = len(self.na_dict[\"A\"])",
"def build_coeff_dict(l):\n coeff_dict = {}\n for triplet in l:\n coeff_dict[triplet[0]] = triplet[2]\n return coeff_dict",
"def _init_dict(self):\n dict_ord = self.MIN_VALID\n\n for da in self.train_das:\n for dai in da:\n if dai.name not in self.dict_slot:\n self.dict_slot[dai.name] = dict_ord\n dict_ord += 1\n if dai.value not in self.dict_value:\n self.dict_value[dai.value] = dict_ord\n dict_ord += 1\n\n for tree in self.train_trees:\n for t_lemma, formeme in tree.nodes:\n if t_lemma not in self.dict_t_lemma:\n self.dict_t_lemma[t_lemma] = dict_ord\n dict_ord += 1\n if formeme not in self.dict_formeme:\n self.dict_formeme[formeme] = dict_ord\n dict_ord += 1\n\n self.dict_size = dict_ord",
"def createDict( self ):\n self.d = {}\n self.d['comp1'] = compensation_channel('comp1', 0, (-479.0, -10.0))\n self.d['comp2'] = compensation_channel('comp2', 1, (-479.0, -10.0))\n self.addCalibration()",
"def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['apex' ] = self.apex\n paramDict['min' ] = self.min\n paramDict['max' ] = self.max\n return paramDict",
"def gen_params(self) -> Dict:\n param_dict: Dict = {}\n\n gX_name: List[str] = ['g_leak', 'g_kvhh', 'g_cav', 'g_kca', 'g_nap']\n gX_log: np.ndarray = 4 * np.random.rand(5) - 2 # from -2 to 2\n gX: np.ndarray = (10 * np.ones(5)) ** gX_log # 0.01 ~ 100\n gX_itr: Iterator = zip(gX_name, gX)\n\n tCa_log: float = 2 * np.random.rand(1) + 1 # from 1 to 3\n tCa: float = 10 ** tCa_log # 10 ~ 1000\n tCa_dict: Dict = {'t_ca': tCa}\n\n param_dict.update(gX_itr)\n param_dict.update(tCa_dict)\n return param_dict",
"def init_dict(self, train_das, dict_ord=None):\n\n if dict_ord is None:\n dict_ord = self.MIN_VALID\n\n for da in train_das:\n for dai in da:\n if dai.slot not in self.dict_slot:\n self.dict_slot[dai.slot] = dict_ord\n dict_ord += 1\n if dai.value not in self.dict_value:\n self.dict_value[dai.value] = dict_ord\n dict_ord += 1\n\n return dict_ord",
"def __init__(self):\r\n\r\n self.maxid = 0\r\n self.alphas = { 0 : {}, 1 : {}, 2 : {} }\r\n self.positions = {}",
"def _make_observation(self) -> Dict[str, np.ndarray]:\n return {\n \"cur_pos\": np.array([self.cur_pos], dtype=int),\n }",
"def createIndivitual(self) -> Dict[str, Any]:\n ind = {\n \"genome\": {\n key: numpy.random.randint(0, len(value), size=self.ref_count[key]) for (\n key, value) in self.grammar.items()\n },\n \"fitness\": None,\n \"fenotype\": None,\n }\n return ind",
"def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['low' ] = self.low\n paramDict['high' ] = self.high\n paramDict['alpha'] = self.alpha\n paramDict['beta' ] = self.beta\n return paramDict",
"def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['lambda'] = self.lambdaVar\n paramDict['k' ] = self.k\n paramDict['low' ] = self.low\n return paramDict",
"def __init__(self,n):\n\t\tself._dictOut={}\n\t\tself._dictIn = {}\n\t\tfor i in range(n):\n\t\t\tself._dictOut[i]=[]\n\t\t\tself._dictIn[i] = []",
"def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['low'] = self.low\n paramDict['alpha'] = self.alpha\n paramDict['beta'] = self.beta\n return paramDict",
"def getInitParams(self):\n paramDict = {}\n paramDict['upperBoundUsed' ] = self.upperBoundUsed\n paramDict['lowerBoundUsed' ] = self.lowerBoundUsed\n paramDict['hasInfiniteBound'] = self.hasInfiniteBound\n paramDict['upperBound' ] = self.upperBound\n paramDict['lowerBound' ] = self.lowerBound\n paramDict['adjustmentType' ] = self.__adjustmentType\n paramDict['dimensionality' ] = self.dimensionality\n return paramDict",
"def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['mu' ] = self.mu\n return paramDict",
"def getCrowDistDict(self):\n retDict = Distribution.getCrowDistDict(self)\n retDict['lambda'] = self.lambdaVar\n retDict['k'] = self.k\n retDict['low'] = self.low\n return retDict",
"def getInitParams(self):\n paramDict = Distribution.getInitParams(self)\n paramDict['mapping'] = self.mapping\n paramDict['values'] = self.values\n return paramDict",
"def prepare_set(N_ITEMS, N_FEATURES, dict_data, sol_cont):\n\tX = np.zeros((N_ITEMS, N_FEATURES))\n\tfor i in range(N_ITEMS):\n\t\t\tpositive_syn, negative_syn = countSynergies(str(i), dict_data['polynomial_gains'])\n\t\t\tX[i, 0] = sol_cont[i]\n\t\t\tX[i, 1] = dict_data['profits'][i]\n\t\t\tX[i, 2] = dict_data['costs'][i][0]/dict_data['budget']\n\t\t\tX[i, 3] = dict_data['costs'][i][1]/dict_data['budget']\n\t\t\tX[i, 4] = positive_syn\n\t\t\tX[i, 5] = negative_syn\n\treturn X",
"def initDictionnary(self):\n partitions = self.vocabulary.getPartitions()\n for partition in partitions:\n for mod in partition.modalities:\n self.summaryDict[partition.getAttName() + \" : \" + mod] = 0.0\n self.summaryFilteredDict[partition.getAttName() + \" : \" + mod] = 0.0",
"def _readCharAuxVariablesHeaderSection(self):\n self.NAUXV = nappy.utils.text_parser.readItemFromLine(self.file.readline(), int)\n self.NAUXC = nappy.utils.text_parser.readItemFromLine(self.file.readline(), int)\n nonCharAuxVars = self.NAUXV - self.NAUXC\n if self.NAUXV > 0:\n self.ASCAL = nappy.utils.text_parser.readItemsFromUnknownLines(self.file, nonCharAuxVars, float)\n self.AMISS = nappy.utils.text_parser.readItemsFromUnknownLines(self.file, nonCharAuxVars, float)\n self.LENA = nappy.utils.text_parser.readItemsFromUnknownLines(self.file, self.NAUXC, int)\n for i in range(nonCharAuxVars):\n self.LENA.insert(0, None)\n self.AMISS = self.AMISS + nappy.utils.text_parser.readItemsFromUnknownLines(self.file, self.NAUXC, str) \n self.ANAME = nappy.utils.text_parser.readItemsFromLines(self._readLines(self.NAUXV), self.NAUXV, str)",
"def _build_param_dict(self, **kwargs):\n \n if 'correlation_strength' in kwargs.keys():\n\n correlation_strength = kwargs['correlation_strength']\n if custom_len(correlation_strength) > 1:\n try:\n self.correlation_strength_abcissa = kwargs['correlation_strength_abcissa']\n except KeyError:\n msg = (\"If correlation_strength keyword is passed to the constructor, \\n\" + \n \"you must also pass a correlation_strength_abcissa keyword argument \" + \n \"storing an array of the same length as correlation_strength.\")\n raise(msg)\n else:\n self.correlation_strength_abcissa = [0]\n correlation_strength = [correlation_strength]\n\n self._param_dict_keys = ['correlation_param' + str(i+1) for i in range(len(correlation_strength))]\n self.param_dict = {key:value for key, value in zip(self._param_dict_keys, correlation_strength)}\n else:\n self.param_dict = {'correlation_param1': 1.0}\n self._set_correlation_strength()",
"def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['n' ] = self.n\n paramDict['p' ] = self.p\n return paramDict",
"def prime_error_rate_dic(aa_order):\n aa_error_rate_dic = {}\n for i in aa_order:\n #first element of definitions are the from mutation rate\n #and the second element is the to mutation rate\n aa_error_rate_dic[i] = [0.0, 0.0]\n return aa_error_rate_dic",
"def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['method'] = self.method\n paramDict['dimension'] = self.dimension\n paramDict['rank'] = self.rank\n paramDict['mu'] = self.mu\n paramDict['covariance'] = self.covariance\n return paramDict",
"def variables(self) -> OrderedDict:\n return OrderedDict({'mu': self.mu, 'sig': self.sig})",
"def gen_params(self) -> Dict:\n param_dict: Dict = {}\n\n gX_name: List[str] = ['g_leak', 'g_nav', 'g_kvhh', 'g_kva', 'g_kvsi', \n 'g_cav', 'g_kca', 'g_nap', 'g_kir']\n gX_log: np.ndarray = 4 * np.random.rand(9) - 2 # from -2 to 2\n gX: np.ndarray = (10 * np.ones(9)) ** gX_log # 0.01 ~ 100\n gX_itr: Iterator = zip(gX_name, gX)\n\n gR_name: List[str] = ['g_ampar', 'g_nmdar', 'g_gabar']\n gR_log: np.ndarray = 4 * np.random.rand(3) - 3 # from -3 to 1\n gR: np.ndarray = (10 * np.ones(3)) ** gR_log # 0.001 ~ 10\n gR_itr: Iterator = zip(gR_name, gR)\n\n tCa_log: float = 2 * np.random.rand(1) + 1 # from 1 to 3\n tCa: float = 10 ** tCa_log # 10 ~ 1000\n tCa_dict: Dict = {'t_ca': tCa}\n\n param_dict.update(gX_itr)\n param_dict.update(gR_itr)\n param_dict.update(tCa_dict)\n return param_dict",
"def oxy_dict(calib, P, K, T, S, V):\n\n \"\"\"Assumes all are arrays, or none are arrays. Need way to test for them. \"\"\"\n try:\n oxygen = []\n for P_x, K_x, T_x, S_x, V_x in zip(P, K, T, S, V):\n temp = (calib['Soc'] * (V_x + calib['offset'])\n * (1.0 + calib['A'] * T_x + calib['B'] * math.pow(T_x,2) + calib['C'] * math.pow(T_x,3) )\n * OxSol(T_x,S_x)\n * math.exp(calib['E'] * P_x / K_x)) #foo\n temp = round(temp,4)\n oxygen.append(temp)\n #Single mode.\n except:\n oxygen = (calib['Soc'] * (V + calib['offset'])\n * (1.0 + calib['A'] * T + calib['B'] * math.pow(T,2) + calib['C'] * math.pow(T,3) )\n * OxSol(T,S)\n * math.exp(calib['E'] * P / K))\n return oxygen",
"def append_counting(dict):\n row_c = []\n # for nuc in NUC: #Scans all the elements and adds it to the table.\n # row_c.append(dict[nuc])\n for mot in MOT:\n row_c.append(dict[mot])\n for nuc_nr in NUC_NR :\n row_c.append(dict[nuc_nr + \"_NR\"])\n # #row.extend([dict[\"AA_NR\"], dict[\"TT_NR\"], dict[\"CC_NR\"], dict[\"GG_NR\"]])\n return row_c",
"def init_dict(self, train_das, dict_ord=None):\n if dict_ord is None:\n dict_ord = self.MIN_VALID\n\n for da in train_das:\n for dai in da:\n if dai.da_type not in self.dict_act:\n self.dict_act[dai.da_type] = dict_ord\n dict_ord += 1\n if dai.slot not in self.dict_slot:\n self.dict_slot[dai.slot] = dict_ord\n dict_ord += 1\n if dai.value not in self.dict_value:\n self.dict_value[dai.value] = dict_ord\n dict_ord += 1\n\n return dict_ord"
] |
[
"0.6057564",
"0.5864776",
"0.5754266",
"0.5638249",
"0.5591831",
"0.5581381",
"0.5527932",
"0.54958975",
"0.547379",
"0.54407537",
"0.5427459",
"0.5399545",
"0.53982776",
"0.5394155",
"0.5391664",
"0.5377814",
"0.536098",
"0.53573656",
"0.53436345",
"0.53409773",
"0.53394157",
"0.53355145",
"0.5322089",
"0.5322028",
"0.5312958",
"0.5308927",
"0.5301541",
"0.52996945",
"0.5283027",
"0.528188"
] |
0.7014374
|
0
|
Train hotel cluster embeddings model on data saved in ../processed.
|
def main(input_filepath, output_model_filepath):
logger = logging.getLogger(__name__)
logger.info('training hotel cluster embeddings models')
input_file = os.path.join(input_filepath, 'sentences.pkl')
output_model_file = os.path.join(output_model_filepath, 'hotelcluster2vec.bin')
train(input_file, output_model_file)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def load_data_and_embedding():\n\n # Load data\n df_data = pd.read_csv('../new_data/train_ids_and_labels_1400.txt',nrows=10000)\n y = df_data['class'] - 1 # class (0 ~ 18)\n X = df_data.drop(['class'], axis=1).values\n\n # Transform to binary class matrix\n y = to_categorical(y.values)\n\n # Randomly shuffle data\n np.random.seed(10)\n\n shuffle_indices = np.random.permutation(range(len(y)))\n X_shuffled = X[shuffle_indices]\n y_shuffled = y[shuffle_indices]\n\n # Split to train/test set\n # TODO: This is very crude, should use cross validation\n val_sample_index = -1 * int(0.2 * len(y))\n X_train, X_val = X_shuffled[:val_sample_index], X_shuffled[val_sample_index:]\n y_train, y_val = y_shuffled[:val_sample_index], y_shuffled[val_sample_index:]\n\n del df_data, X, y, X_shuffled, y_shuffled\n\n embedding_matrix = np.load(\"../embedding/word-embedding-200d-mc5.npy\")\n\n return X_train, y_train, X_val, y_val,embedding_matrix",
"def train(self, epoches, batch_size):\n def _padding_batch(x_inputs, y_inputs):\n # x_inputs is 2-d array\n max_length = max([len(x) for x in x_inputs])\n real_length = [len(x) for x in x_inputs]\n\n x_outputs = []\n for x in x_inputs:\n padding_size = max_length - len(x)\n x_outputs.append(\n np.concatenate([np.array(x), np.zeros(padding_size,\n dtype=\"int32\")]))\n return np.array(x_outputs), np.array(y_inputs), real_length\n\n\n x_inputs, y_inputs = self.convert_data_to_model_input(self.train_data)\n test_x_inputs, test_y_inputs = self.convert_data_to_model_input(self.test_data, add_unknow_words=False)\n test_x_inputs, test_y_inputs, test_real_length = _padding_batch(test_x_inputs,\n test_y_inputs)\n\n self.save_lexicon(\"log/lexicon\")\n\n train_x_inputs = x_inputs[0:11000]\n train_y_inputs = y_inputs[0:11000]\n\n validate_x_inputs = x_inputs[11000:]\n validate_y_inputs = y_inputs[11000:]\n validate_x_inputs, validate_y_inputs, validate_real_length = _padding_batch(\n validate_x_inputs, validate_y_inputs)\n\n assert len(train_y_inputs) == len(train_x_inputs)\n assert len(validate_y_inputs) == len(validate_x_inputs)\n print(\"train {} validate {} test {}\".format(len(train_y_inputs),\n len(validate_y_inputs), len(test_y_inputs)))\n assert len(self.vocab) == len(self.embeddings)\n\n # do training\n batches = len(train_y_inputs) // batch_size\n\n rnn_model = RnnTextClassifyModel(\n class_number=len(self.labels), learning_rate=0.01,\n gradients_norm=5, keep_rate=0.5, vocab_size=len(self.vocab),\n embedding_size=self.embedding_size, hidden_units_size=128)\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n # must assigned\n embedding_input = tf.constant(np.array(self.embeddings), dtype=tf.float32)\n assign_embedding_op = tf.assign(rnn_model.embeddings, embedding_input)\n embedding_in_graph = sess.run(assign_embedding_op);\n\n global_step = 0\n for epoch in range(epoches):\n print(\"training @ epoch \", epoch)\n for i in range(batches):\n\n x_inputs_batch = train_x_inputs[i * batch_size:(i+1) *\n batch_size]\n y_inputs_batch = train_y_inputs[i * batch_size:(i+1) *\n batch_size]\n x_inputs_batch, y_inputs_batch, real_length = _padding_batch(\n x_inputs_batch, y_inputs_batch)\n\n loss_val, _ = sess.run(\n [rnn_model.loss, rnn_model.train_op],\n {rnn_model.x_holder:x_inputs_batch,\n rnn_model.y_holder:y_inputs_batch,\n rnn_model.sequence_length: real_length})\n\n print(\"loss {} @ step {}\".format(loss_val, global_step))\n\n #saver.save(sess, \"log/cnn_model\", global_step=global_step)\n global_step += 1\n\n if global_step % 100 == 0:\n print(\"______validating\")\n accuracy_val = sess.run(rnn_model.accuracy,\n {rnn_model.x_holder: validate_x_inputs,\n rnn_model.y_holder: validate_y_inputs,\n rnn_model.sequence_length: validate_real_length,\n rnn_model.keep_rate : 1.0})\n print(\"______valiation_accuracy {} at step {}\".format(accuracy_val,\n global_step))\n\n accuracy_val = sess.run(rnn_model.accuracy,\n {rnn_model.x_holder: x_inputs_batch,\n rnn_model.y_holder: y_inputs_batch,\n rnn_model.sequence_length: real_length,\n rnn_model.keep_rate : 1.0})\n print(\"______train_accuracy {} at step {}\".format(accuracy_val,\n global_step))\n\n\n if batches * batch_size < len(train_y_inputs):\n x_inputs_batch = train_x_inputs[batches * batch_size:]\n y_inputs_batch = train_y_inputs[batches * batch_size:]\n x_inputs_batch, y_inputs_batch, real_length = _padding_batch(\n x_inputs_batch, y_inputs_batch)\n\n loss_val, _ = sess.run(\n [rnn_model.loss, rnn_model.train_op],\n {rnn_model.x_holder:x_inputs_batch,\n rnn_model.y_holder:y_inputs_batch,\n rnn_model.sequence_length: real_length})\n\n print(\"loss {} @ step {}\".format(loss_val, global_step))\n global_step += 1\n\n # do evaluate on test data\n print(\"______test accuracy on epoch \", epoch)\n accuracy_val = sess.run(rnn_model.accuracy,\n {rnn_model.x_holder: test_x_inputs,\n rnn_model.y_holder: test_y_inputs,\n rnn_model.sequence_length: test_real_length,\n rnn_model.keep_rate : 1.0})\n\n print(\"______test_accuracy {} at epoch {}\".format(accuracy_val,\n epoch))",
"def train_routine(training_file, output_folder):\n if output_folder[-1] != '/':\n output_folder += '/'\n\n svm_file = output_folder + 'svm.txt'\n centroid_file = output_folder + 'centroids.txt'\n ids_file = output_folder + 'ids.txt'\n\n surf = cv2.SURF(250, extended=False)\n categories = dict()\n ids = dict()\n id = 1\n features = list()\n\n print \"Extracting features\"\n for line in open(training_file):\n try:\n category, path = line.split(';')\n except:\n print \"Error: File not in proper format. Ensure: <category/class name>; <path to image of said category>\"\n sys.exit(0)\n path = path.strip()\n\n try:\n img = cv2.imread(path)\n #img = cv2.resize(img, (500, 500))\n except Exception as e:\n print e\n continue\n\n keypoints, descriptors = surf.detectAndCompute(img, None)\n\n if not category in categories:\n categories[category] = Category(label=category)\n ids[category] = id\n id += 1\n categories[category].add_feature(descriptors)\n\n #for category in categories:\n #f = categories[category].yield_features()\n ##features.extend(f)\n #for i in f:\n #features.extend(i)\n\n print \"Calculating centroids\"\n #np_features = numpy.array(features)\n #print \"Features: \", np_features.shape\n #centroids, labels = kmeans2(np_features, FEATURE_TYPES)\n centroids = helpers.loadObject(output_folder + 'centroids.txt')\n print centroids.shape\n\n print \"Forming bag of words\"\n X, Y = [], []\n for category in categories:\n categories[category].calc_bagofwords(centroids)\n for bow in categories[category].bagofwords:\n X.append(bow)\n Y.append(ids[category])\n print \"Fitting linear SVMs onto the bag of words\"\n lin_clf = svm.LinearSVC()\n lin_clf.fit(X, Y)\n\n helpers.saveObject(lin_clf, svm_file)\n helpers.saveObject(centroids, centroid_file)\n helpers.saveObject(ids, ids_file)",
"def train(self, x_train, y_train):\n\n # convert input to format for classifier\n list_of_embeddings = list(x_train[self.embeddings_col])\n x_train = np.array([[float(i) for i in embedding.strip('[]').split()] for embedding in list_of_embeddings])\n\n # discard fold ID column from labels\n review_groups = [col for col in y_train.columns if not col=='k']\n\n for review_group in tqdm(review_groups, desc='Train Review Groups'):\n\n # pull label column\n labels = y_train[review_group]\n\n # logistic classifier\n classifier = SGDClassifier(loss=\"log\", alpha=self.alpha,\n l1_ratio = self.l1_ratio, penalty=\"elasticnet\").fit(x_train, labels)\n\n # save the model in dictionary of models\n self.models[review_group] = classifier",
"def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)",
"def train(self):\n params = self.params\n self.embedder.train()\n self.proj.train()\n\n # training variables\n losses = []\n ns = 0 # number of sentences\n nw = 0 # number of words\n t = time.time()\n\n iterator = self.get_iterator('train')\n lang_id = params.lang2id['en']\n\n while True:\n\n # batch\n try:\n batch = next(iterator)\n except StopIteration:\n break\n if self.n_sent == 1:\n (x, lengths), idx = batch\n x, lengths = truncate(x, lengths, params.max_len, params.eos_index)\n else:\n (sent1, len1), (sent2, len2), idx = batch\n sent1, len1 = truncate(sent1, len1, params.max_len, params.eos_index)\n sent2, len2 = truncate(sent2, len2, params.max_len, params.eos_index)\n x, lengths, _, _ = concat_batches(sent1, len1, lang_id, sent2, len2, lang_id, params.pad_index, params.eos_index, reset_positions=False)\n y = self.data['train']['y'][idx]\n bs = len(lengths)\n\n # cuda\n x, y, lengths = to_cuda(x, y, lengths)\n\n # loss\n output = self.proj(self.embedder.get_embeddings(x, lengths, positions=None, langs=None))\n if self.is_classif:\n loss = F.cross_entropy(output, y, weight=self.weights)\n else:\n loss = F.mse_loss(output.squeeze(1), y.float())\n\n # backward / optimization\n self.optimizer_e.zero_grad()\n self.optimizer_p.zero_grad()\n loss.backward()\n self.optimizer_e.step()\n self.optimizer_p.step()\n\n # update statistics\n ns += bs\n nw += lengths.sum().item()\n losses.append(loss.item())\n\n # log\n if ns != 0 and ns % (10 * bs) < bs:\n logger.info(\n \"GLUE - %s - Epoch %s - Train iter %7i - %.1f words/s - %s Loss: %.4f\"\n % (self.task, self.epoch, ns, nw / (time.time() - t), 'XE' if self.is_classif else 'MSE', sum(losses) / len(losses))\n )\n nw, t = 0, time.time()\n losses = []\n\n # epoch size\n if params.epoch_size != -1 and ns >= params.epoch_size:\n break",
"def create_train_model(self):\n st = LancasterStemmer()\n with open(self.data_path, encoding='utf8') as f_name:\n sentences = [[st.stem(w) for w, t in pos_tag(line.lower().split()) if 'N' in t] for line in f_name]\n sentences = [filter(lambda x: len(x) > 2, (word.strip(punctuation) for word in sentences)) for sent in sentences]\n model = Word2Vec(sentences,\n min_count=self.min_count,\n size=self.size,\n window=self.window,\n workers=4)\n model.save(self.model_path)",
"def _add_pre_trained_embedding(self):\n\n if self.embedding_type['type'] == 'glove':\n self.logging.info('use pre-trained glove word2vec')\n # a. load pre trained glove\n GLOVE_DIR = '../data/glove_pretrained/glove.6B'\n glove_suffix_name = 'glove.6B.' + str(self.embedding_size) + 'd.txt'\n import os\n import numpy as np\n\n embeddings_index = {}\n f = open(os.path.join(GLOVE_DIR, glove_suffix_name)) # 'glove.6B.100d.txt'))\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n\n self.logging.info('')\n self.logging.info('Found %s word vectors.' % len(embeddings_index))\n\n # b. compute embedding matrix\n embedding_matrix = np.zeros((len(self.word_index) + 1, self.embedding_size))\n cnt = 0\n for word, i in self.word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector # words not found in embedding index will be all-zeros.\n else:\n # self.logging.info('token in train missing in word2vec: ' + str(word))\n cnt += 1\n self.logging.info('total tokens missing: ' + str(cnt) + ' / ' + str(len(self.word_index)))\n\n # c. build embedding layer\n from keras.layers import Embedding\n embedding_layer = Embedding(len(self.word_index) + 1,\n self.embedding_size,\n weights=[embedding_matrix],\n input_length=self.maxlen,\n trainable=False)\n\n elif self.embedding_type['type'] == 'gensim':\n self.logging.info('use pre-trained gensim word2vec')\n\n import gzip\n import gensim\n from keras.layers import Embedding\n import numpy as np\n\n # fname = '../data/word2vec_pretrained/motors/d_300_k_712904_w_6_e_60_v_motors'\n # fname = '../data/word2vec_pretrained/fashion/d_300_k_1341062_w_6_e_70_v_fashion'\n\n self.logging.info('load word2vec path: ' + str(self.embedding_type['path']))\n model = gensim.models.Word2Vec.load(self.embedding_type['path'])\n pretrained_weights = model.wv.syn0\n vocab_size, vector_dim = pretrained_weights.shape\n\n method = 3\n if method == 1:\n self.logging.info('word2vec attempt to fit into embedding layer - middle complex')\n # convert the wv word vectors into a numpy matrix that is suitable for insertion\n # into our TensorFlow and Keras models\n\n embedding_matrix = np.zeros((len(model.wv.vocab), vector_dim))\n for i in range(len(model.wv.vocab)):\n embedding_vector = model.wv[model.wv.index2word[i]]\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\n embedding_layer = Embedding(input_dim=embedding_matrix.shape[0],\n output_dim=embedding_matrix.shape[1],\n # input_length=self.maxlen,\n weights=[embedding_matrix],\n trainable=False)\n elif method == 2:\n self.logging.info('word2vec simple embedding matching - simple complex')\n embedding_layer = Embedding(input_dim=vocab_size,\n output_dim=vector_dim,\n input_length=self.maxlen,\n weights=[pretrained_weights],\n trainable=False)\n elif method == 3:\n\n self.logging.info('word2vec match using word_index from keras tokenizer - as used in glove match above')\n # b. compute embedding matrix\n\n # sd = 1 / np.sqrt(len(self.word_index) + 1)\n # embedding_matrix = np.random.normal(0, scale=sd, size=(len(self.word_index) + 1, self.embedding_size))\n\n embedding_matrix = np.zeros((len(self.word_index) + 1, self.embedding_size))\n cnt = 0\n for word, i in self.word_index.items():\n if word in model.wv:\n embedding_vector = model.wv[word]\n embedding_matrix[i] = embedding_vector\n else:\n # self.logging.info('token in train missing in word2vec: ' + str(word))\n cnt += 1\n self.logging.info('total tokens missing: ' + str(cnt))\n\n\n # c. build embedding layer\n from keras.layers import Embedding\n embedding_layer = Embedding(len(self.word_index) + 1,\n self.embedding_size,\n weights=[embedding_matrix],\n input_length=self.maxlen,\n trainable=False)\n else:\n raise ValueError('unknown method value')\n\n else:\n raise ValueError('unknown embedding type')\n self.logging.info('create glove pre-trained embedding: ' + str(self.embedding_size))\n return embedding_layer",
"def train(self, data):\n \n logger('[.] Training with whole dataset ...')\n \n datalist = self.unpack_data(data)\n self.knn_model.fit(datatuple['features'], datatuple['labels'])",
"def train(self):\n # >>> YOUR ANSWER HERE\n\n fake_docs = []\n fake_words = []\n fake_words_freq = {}\n real_docs = []\n real_words = []\n real_words_freq = {}\n\n # load fake data of the training dataset, store the docs and words\n fake_data = open(self.train_data['fake']).readlines()\n for sentence in fake_data:\n preprocess_sentence = sentence.strip()\n fake_docs.append(preprocess_sentence)\n fake_words.extend(preprocess_sentence.split())\n\n # load real data of the training dataset, store the docs, words and word frequencies.\n real_data = open(self.train_data['real']).readlines()\n for sentence in real_data:\n preprocess_sentence = sentence.strip()\n real_docs.append(preprocess_sentence)\n real_words.extend(preprocess_sentence.split())\n\n # remove stop words if necessary\n if self.REMOVE_STOPWORDS:\n fake_words = [word for word in fake_words if word not in self.stopwords]\n real_words = [word for word in real_words if word not in self.stopwords]\n\n # calculate all words' frequency\n for word in fake_words:\n self.vocabulary.add(word)\n fake_words_freq[word] = fake_words_freq.get(word, 0) + 1\n for word in real_words:\n self.vocabulary.add(word)\n real_words_freq[word] = real_words_freq.get(word, 0) + 1\n\n # pre-calculate the number of all docs, the number of docs per class and words frequency per class for\n # calculation in the training loop.\n n_doc = len(fake_docs) + len(real_docs)\n n_class = {'fake': len(fake_docs), 'real': len(real_docs)}\n big_doc_dict = {'fake': fake_words_freq, 'real': real_words_freq}\n fake_words_num = 0\n real_words_num = 0\n for w in self.vocabulary:\n fake_words_num += fake_words_freq.get(w, 0)\n real_words_num += real_words_freq.get(w, 0)\n words_frequency_per_class = {'fake': fake_words_num, 'real': real_words_num}\n\n # Training\n for c in self.classes:\n self.logprior[c] = math.log(n_class[c] / n_doc)\n for w in self.vocabulary:\n count_w_c = big_doc_dict[c].get(w, 0)\n log_likelihood = math.log((count_w_c + 1) / (len(self.vocabulary) + words_frequency_per_class[c]))\n self.loglikelihood[(w, c)] = log_likelihood\n # >>> END YOUR ANSWER",
"def train(self):\n\n print \"==> Running Kmeans on data set of shape: {}\".format(self.data.shape)\n km = KMeans(n_clusters = self.n_clusters)\n km.fit(self.data.values)\n self.labels = km.labels_\n self.inertia = km.inertia_",
"def train(self, input_vects):\n \n #Training iterations\n for iter_no in range(self._n_iterations):\n #Train with each vector one by one\n if iter_no % 20 == 0:\n print(iter_no)\n for input_vect in input_vects:\n self._sess.run(self._training_op,\n feed_dict={self._vect_input: input_vect,\n self._iter_input: iter_no})\n \n #Store a centroid grid for easy retrieval later on\n centroid_grid = [[] for i in range(self._m)]\n self._weightages = list(self._sess.run(self._weightage_vects))\n self._locations = list(self._sess.run(self._location_vects))\n for i, loc in enumerate(self._locations):\n centroid_grid[loc[0]].append(self._weightages[i])\n self._centroid_grid = centroid_grid\n \n self._trained = True",
"def train(self):\n params = self.params\n self.encoder.train()\n self.proj.train()\n\n # training variables\n losses = []\n ns = 0 # number of sentences\n nw = 0 # number of words\n t = time.time()\n\n iterator = self.get_iterator('train', 'en')\n lang, lang_id = 'en', params.lang2id['en']\n while True:\n # batch\n try:\n batch = next(iterator)\n except StopIteration:\n break\n (sent1, len1), idx = batch\n x, lengths = truncate(sent1, len1, params.max_len, params.eos_index)\n lang_ids = x.clone().fill_(lang_id)\n\n y = self.data['en']['train']['y'][idx]\n bs = len(len1)\n\n # cuda\n x, y, lengths, lang_ids = to_cuda(x, y, lengths, lang_ids)\n\n # loss\n output = self.proj(self.encoder.get_embeddings(x, lengths, langs=lang_ids))\n loss = F.cross_entropy(output, y)\n\n # backward / optimization\n self.optimizer_e.zero_grad()\n self.optimizer_p.zero_grad()\n loss.backward()\n self.optimizer_e.step()\n self.optimizer_p.step()\n\n # update statistics\n ns += bs\n nw += lengths.sum().item()\n losses.append(loss.item())\n\n # log\n if ns % (100 * bs) < bs:\n logger.info(\"CLF - Epoch %i - Train iter %7i - %.1f words/s - Loss: %.4f\" % (\n self.epoch, ns, nw / (time.time() - t), sum(losses) / len(losses)))\n nw, t = 0, time.time()\n losses = []\n\n # epoch size\n if params.epoch_size != -1 and ns >= params.epoch_size:\n break",
"def preprocess(data_path, glove_path, embed_size):\n train_data = read_imdb(data_path, 'train')\n test_data = read_imdb(data_path, 'test')\n\n train_tokenized = []\n test_tokenized = []\n for review, _ in train_data:\n train_tokenized.append(tokenizer(review))\n for review, _ in test_data:\n test_tokenized.append(tokenizer(review))\n\n vocab = set(chain(*train_tokenized))\n vocab_size = len(vocab)\n print(\"vocab_size: \", vocab_size)\n\n word_to_idx = {word: i + 1 for i, word in enumerate(vocab)}\n word_to_idx['<unk>'] = 0\n\n train_features = np.array(pad_samples(encode_samples(train_tokenized, word_to_idx))).astype(np.int32)\n train_labels = np.array([score for _, score in train_data]).astype(np.int32)\n test_features = np.array(pad_samples(encode_samples(test_tokenized, word_to_idx))).astype(np.int32)\n test_labels = np.array([score for _, score in test_data]).astype(np.int32)\n\n weight_np = collect_weight(glove_path, vocab, word_to_idx, embed_size)\n return train_features, train_labels, test_features, test_labels, weight_np, vocab_size",
"def main():\r\n # Prepare the data and the pretrained embedding matrix\r\n if FRESH_START:\r\n print(\"Preprocessing all data from scratch....\")\r\n train, dev, test = utils.get_data(DATA_FN)\r\n # train_data includes .word2idx and .label_enc as fields if you would like to use them at any time\r\n train_generator, dev_generator, test_generator, embeddings, train_data = utils.vectorize_data(train, dev, test, BATCH_SIZE, EMBEDDING_DIM)\r\n print(\"Saving DataLoaders and embeddings so you don't need to create them again; you can set FRESH_START to \"\r\n \"False to load them from file....\")\r\n with open(TEMP_FILE, \"wb+\") as f:\r\n pickle.dump((train_generator, dev_generator, test_generator, embeddings, train_data), f)\r\n else:\r\n try:\r\n with open(TEMP_FILE, \"rb\") as f:\r\n print(\"Loading DataLoaders and embeddings from file....\")\r\n train_generator, dev_generator, test_generator, embeddings, train_data = pickle.load(f)\r\n except FileNotFoundError:\r\n raise FileNotFoundError(\"You need to have saved your data with FRESH_START=True once in order to load it!\")\r\n \r\n\r\n # Use this loss function in your train_model() and test_model()\r\n loss_fn = nn.CrossEntropyLoss()\r\n\r\n ########## YOUR CODE HERE ##########\r\n HIDDEN_DIM = 64\r\n ########## Base DNN ################\r\n # # TODO: for each of the two models, you should 1) create it,\r\n print(\"train and test on DNN!\")\r\n dnn = models.DenseNetwork(EMBEDDING_DIM, NUM_CLASSES, HIDDEN_DIM, embeddings)\r\n optimizer = optim.Adam(dnn.parameters())\r\n # TODO 2) run train_model() to train it, and\r\n #trained_dnn = train_model(dnn, loss_fn, optimizer, train_generator, dev_generator)\r\n DNN_PATH = 'dense.pth'\r\n #torch.save(trained_dnn, DNN_PATH)\r\n # TODO: 3) run test_model() on the result\r\n print(\"Test on the saved Dense Network\")\r\n dnn_test = torch.load(DNN_PATH)\r\n test_model(dnn_test, loss_fn, test_generator)\r\n \"\"\"\r\n Output:\r\n Test loss: tensor([25.7230])\r\n F-score: 0.4399188910197242\r\n \"\"\"\r\n\r\n ########## Base RNN ################\r\n # TODO: for each of the two models, you should 1) create it,\r\n print(\"train and test on RNN!\")\r\n SENTENCE_LEN = 91\r\n rnn = models.RecurrentNetwork(SENTENCE_LEN, NUM_CLASSES, HIDDEN_DIM, embeddings)\r\n optimizer = optim.Adam(rnn.parameters())\r\n # TODO 2) run train_model() to train it, and\r\n #trained_rnn = train_model(rnn, loss_fn, optimizer, train_generator, dev_generator)\r\n RNN_PATH = 'recurrent.pth'\r\n #torch.save(trained_rnn, RNN_PATH)\r\n # TODO: 3) run test_model() on the result\r\n print(\"Test on the saved Recurrent Network\")\r\n rnn_test = torch.load(RNN_PATH)\r\n test_model(rnn_test, loss_fn, test_generator)\r\n \"\"\"\r\n Output:\r\n Test loss: tensor([25.7136])\r\n F-score: 0.42172967869116373\r\n \"\"\"\r\n\r\n # extension-grading: Extension 1, changes to the preprocessing of the data - Tweets tokenizers.\r\n # Major changes are in the utils.py labeled by \"extension-grading\"\r\n Extension1 = False\r\n if Extension1:\r\n print(\"Train and test dnn with Extension 1: Tweets tokenizers\")\r\n train, dev, test = utils.get_data(DATA_FN)\r\n train_generator, dev_generator, test_generator, embeddings,train_data = utils.vectorize_data(train, dev, test, BATCH_SIZE, EMBEDDING_DIM, extension=True)\r\n # try on DNN\r\n dnn = models.DenseNetwork(EMBEDDING_DIM, NUM_CLASSES, HIDDEN_DIM, embeddings)\r\n optimizer = optim.Adam(dnn.parameters())\r\n trained_dnn = train_model(dnn, loss_fn, optimizer, train_generator, dev_generator)\r\n test_model(trained_dnn, loss_fn, test_generator)\r\n \"\"\"\r\n Output:\r\n Test loss: tensor([25.5987])\r\n F-score: 0.4465511728425936\r\n # Compared with original tokenizer, F-score increased by 1.6%.\r\n \"\"\"\r\n\r\n # extension-grading: Extension 2, architecture changes - flattening embeddings using the average of unpadded sentence words other than sum. \r\n # Major changes are in the models.py labeled by \"extension-grading\"\r\n Extension2 = False\r\n if Extension2:\r\n print(\"Train and test dnn with Extension 2: Architecture changes - flattening embeddings\")\r\n # initialize the experimental model\r\n exp = models.ExperimentalNetwork(EMBEDDING_DIM, NUM_CLASSES, HIDDEN_DIM, embeddings)\r\n optimizer = optim.Adam(exp.parameters())\r\n # run train_model() to train it\r\n trained_exp = train_model(exp, loss_fn, optimizer, train_generator, dev_generator)\r\n # run test_model() on the result\r\n test_model(trained_exp, loss_fn, test_generator)\r\n \"\"\"\r\n Output:\r\n Test loss: tensor([29.4298])\r\n F-score: 0.22199231332724553\r\n # Compared with original architecture, F-score decreased by half.\r\n \"\"\"",
"def train(self, training_data):\n pass",
"def train(self, input_vects):\n \n #Training iterations\n for iter_no in range(self._n_iterations):\n print(iter_no)\n if (iter_no % 1==0) & (iter_no>0) :\n \n self.map_plot(iter_no)\n centroid_grid = [[] for i in range(self._m)]\n self._weightages = list(self._sess.run(self._weightage_vects))\n self._locations = list(self._sess.run(self._location_vects))\n \n for i, loc in enumerate(self._locations):\n centroid_grid[loc[0]].append(self._weightages[i])\n self._centroid_grid = centroid_grid \n \n #Train with each vector one by one\n for input_vect in input_vects:\n self._sess.run(self._training_op,\n feed_dict={self._vect_input: input_vect,\n self._iter_input: iter_no})\n print(iter_no)\n self.map_plot(iter_no) \n self._trained = True\n gif.build_gif(imgs, saveto='exoplaneta005s6 .gif')",
"def training(training_data):\n\n # get the embeddings and labels to process further for SVM model training\n df = get_embeddings_label_dataframe(training_data)\n\n # converting labels into int\n le = LabelEncoder()\n y = le.fit_transform(df[128])\n # print(y)\n print(\"Training for {} classes.\".format(len(le.classes_)))\n X = df.drop(128, axis=1)\n print(\"Training with {} pictures.\".format(len(X)))\n\n # training\n clf = SVC(C=2, kernel='linear', probability=True)\n clf.fit(X, y)\n\n # dumping model\n print(\"Saving classifier to '{}'\".format(svm_classifier_filename))\n with open(svm_classifier_filename, 'wb') as f:\n pickle.dump((le, clf), f)",
"def train(self):\r\n for class_ in set(self.train_classes):\r\n data = map(lambda (ind, datum): datum, filter(lambda (ind, datum): self.train_classes[ind] == class_, enumerate(self.train_data)))\r\n self.distribution.index_data(data, class_)",
"def embedding_train(total_corpus,emoteonly_corpus,textonly_corpus,save_fname_emote,save_fname_text,save_fname_intersect):\n wv_model = Word2Vec(min_count=100,size=100,negative=0.75,sg=0,hs=1,window=60)\n wv_model.build_vocab(sentences=total_corpus())\n wv_model2 = copy.deepcopy(wv_model)\n \n # train emoteonly\n wv_model.train(sentences=emoteonly_corpus(),epochs=10,total_examples=wv_model.corpus_count)\n wv_model.save(save_fname_emote)\n # train_textonly\n wv_model2.train(sentences=textonly_corpus(),epochs=10,total_examples=wv_model.corpus_count)\n wv_model2.save(save_fname_text)\n \n src_model = Word2Vec.load(save_fname_emote)\n dest_model = Word2Vec.load(save_fname_text)\n \n src_model.wv.save_word2vec_format(save_fname_intersect)\n dest_model.intersect_word2vec_format(save_fname_intersect, lockf=1.0, binary=False)\n\n dest_model.train(sentences=train_corpus(), total_examples=dest_model.corpus_count, epochs=20)\n dest_model.save(save_fname_intersect)\n return",
"def preprocess(path):\n \"\"\"Load the dictionary and the tokenizer.\"\"\"\n with open(('aux_files/enc_dic_%s_%d_%d_%s.pkl' % (FLAGS.data, MAX_VOCAB_SIZE, FLAGS.sn, FLAGS.sigma)), 'rb') as f:\n enc_dic = pickle.load(f)\n with open(('aux_files/tokenizer_%s_%d.pkl' % (FLAGS.data, MAX_VOCAB_SIZE)), 'rb') as f:\n tokenizer = pickle.load(f)\n\n \"\"\"We only use the original sequence `train_seq_o` and `test_seq_o`\"\"\"\n train_seq, train_seq_o, train_labels = encode_utils.text_encode(tokenizer, enc_dic, FLAGS.data+'/train', MAX_VOCAB_SIZE)\n test_seq, test_seq_o, test_labels = encode_utils.text_encode(tokenizer, enc_dic, FLAGS.data+'/test', MAX_VOCAB_SIZE)\n\n \"\"\"If use adversarial training method, add the adversarial samples to the original data:\"\"\"\n if FLAGS.adv:\n # Load adversarial samples.\n adv_train_seq_o, adv_train_labels = encode_utils.adv_text_encode(tokenizer, enc_dic, FLAGS.data, FLAGS.nn_type, MAX_VOCAB_SIZE)\n train_seq_o.extend(adv_train_seq_o)\n train_labels.extend(adv_train_labels)\n print('Adversarial Training, and extend the data.')\n\n \"\"\"Load the embedding matrix, and pad sequence to the same length\"\"\"\n embedding_matrix = np.load(('aux_files/embeddings_glove_%s_%d.npy' %(FLAGS.data, MAX_VOCAB_SIZE)))\n max_len = 250\n x_train = pad_sequences(train_seq_o, maxlen=max_len, padding='post')\n y_train = np.array(train_labels)\n x_test = pad_sequences(test_seq_o, maxlen=max_len, padding='post')\n y_test = np.array(test_labels)\n\n print('Training data: %d' % len(y_train))\n\n return x_train, y_train, x_test, y_test, embedding_matrix",
"def trainModel( self, featureTrain, classTrain):",
"def augment_train_data(self):\n print(\"Augmenting train data.\")\n elastic_flag = 1\n read_path: Path = Path(os.environ[\"DATA_PATH\"]) / \"characters\" / \"train\"\n for letter_dir in read_path.iterdir():\n original_images = list(letter_dir.iterdir())\n length = len(original_images)\n max_kernel = (240 - length) / 2 / length + 2\n if max_kernel >= 2.6:\n max_kernel = min(round(max_kernel), 5)\n for j in original_images:\n img_path = str(j)\n self.augmenter.dilate_image(img_path, 3, max_kernel)\n self.augmenter.erosion_image(img_path, 3, max_kernel)\n new_len = len(\n list(letter_dir.iterdir())\n ) # Length after regular augmentation\n if elastic_flag == 1:\n try: # to make the program runnable if you are not on linux\n if new_len < 160:\n reps = 4 - new_len // 50\n self.augmenter.elastic_morphs(letter_dir, reps)\n except:\n print(\"Continuing without elastic morph\")\n elastic_flag = 0\n continue",
"def train(self):\n # self.recognizer.train()\n self.detector.train()\n self.shared_conv.train()",
"def train(self, sentences):\n\n dictionary = Dictionary(sentences)\n\n ft = Word2Vec(sentences, workers=cpu_count(), min_count=5, size=300, seed=12345)\n\n index = WordEmbeddingSimilarityIndex(ft.wv)\n matrix = SparseTermSimilarityMatrix(index, dictionary)\n\n self.dictionary = dictionary\n self.ft = ft\n self.matrix = matrix",
"def train_test_model_batch():\n train=learning.Train_kmer_clf()\n train.run()",
"def retrain_model(self, new_sentences, with_punctiations):\n if with_punctiations:\n model_ = Word2Vec.load('./model/model_word2vec.bin')\n else:\n model_ = Word2Vec.load('./model/model_no_punctuation_word2vec.bin')\n\n model_.build_vocab(new_sentences, update=True)\n model_.train(new_sentences, total_examples=model_.corpus_count, epochs=model_.iter)\n\n if with_punctiations:\n model_.save('./model/model_word2vec.bin')\n else:\n model_.save('./model/model_no_punctuation_word2vec.bin')\n\n\n pass",
"def load_data():\n # Load and preprocess data\n x_text_train1, x_text_train2, x_text_dev1, x_text_dev2, y_train, y_dev = load_data_and_labels_without_shuffled()\n\n x_text_train1 = split_sentence(x_text_train1)\n x_text_train2 = split_sentence(x_text_train2)\n x_text_dev1 = split_sentence(x_text_dev1)\n x_text_dev2 = split_sentence(x_text_dev2)\n\n x_text_train1 = pad_sentences(x_text_train1)\n x_text_train2 = pad_sentences(x_text_train2)\n x_text_dev1 = pad_sentences(x_text_dev1)\n x_text_dev2 = pad_sentences(x_text_dev2)\n\n # sentences = x_text_train1 + x_text_train2 + x_text_dev1 + x_text_dev2\n # vocabulary, vocabulary_inv = build_vocab(sentences)\n # x_text_train1 = build_input_data(x_text_train1, vocabulary)\n # x_text_train2 = build_input_data(x_text_train2, vocabulary)\n # x_text_dev1 = build_input_data(x_text_dev1, vocabulary)\n # x_text_dev2 = build_input_data(x_text_dev2, vocabulary)\n\n x_train1 = sentence_word2vec(x_text_train1)\n x_train2 = sentence_word2vec(x_text_train2)\n x_dev1 = sentence_word2vec(x_text_dev1)\n x_dev2 = sentence_word2vec(x_text_dev2)\n\n y_train = np.array(y_train)\n y_dev = np.array(y_dev)\n # return [x_text_train1, x_text_train2, x_text_dev1, x_text_dev2, y_train, y_dev, vocabulary, vocabulary_inv]\n\n return [x_train1, x_train2, x_dev1, x_dev2, y_train, y_dev]",
"def train(self, examples):\n print(examples)\n # first we will do gensim to get word embeddings\n tokens = []\n for example in examples:\n for tuple in example:\n tokens.append([tuple[0]])\n self.model = Word2Vec(tokens, min_count=1, size=100).wv\n # shuffle the examples so that they are gone through 'randomly'\n #print(examples)\n random.shuffle(examples)\n #print(examples)\n # iterate through our examples\n for j in range(len(examples)):\n # the stored label for the previous token\n prev_label = None\n prev_word = None\n # iterate through our tokens for the example\n for i in range(len(examples[j])):\n # store our token and its label\n token = examples[j][i][0]\n y = examples[j][i][1]\n # get the features for our current token\n next_word = None\n if i <= (len(examples)-1):\n next_word = examples[j][i+1][0]\n features = self.featurize(prev_label, prev_word, token, next_word)\n # set our previous label to our current since\n # we are done featurizing and need to store it for\n # the next iteration\n prev_label = y\n # a dictionary that will store our z values\n z = {}\n # calculate our z value for every state for\n # the example we are on\n # z(state) = features * weights\n # z[state] = np.dot(features, weights[state])\n for state in self.states:\n z[state] = np.dot(features, self.weights[state])\n # store our max\n max = -1\n # store our y_hat\n y_hat = None\n # store our probabilities\n prob = {}\n # this runs softmax on our z's\n # y_hat = softmax(z)\n denom = sum(np.exp(np.array(list(z.values()))))\n for state in self.states:\n # softmax = p(state) = e^z[state] / (sum[e^z for all z's)\n # making sure this works the way I want it to, should\n # be three values\n #print(np.array(list(z.values())))\n #print(np.exp(np.array(list(z.values()))))\n prob[state] = np.exp(z[state]) / denom\n # if our current prob is greater than the others then it is our boy\n if prob[state] > max:\n # save the new prob as the max\n max = prob[state]\n # save the state as our prediction y_hat\n y_hat = state\n # this will hold our gradients for all the states\n gradients = {}\n for state in self.states:\n # gradient[state] = ((y_hat == state) - prob[state]) * features\n gradients[state] = ((y_hat == state) - prob[state]) * features\n # weights[state] -= loss * gradients\n self.weights[state] -= self.loss * gradients[state]",
"def main(data_file,\n epochs=20,\n max_vocab_size=50000,\n embedding_file_path='data/glove/glove.6B.300d.txt',\n max_length=50,\n embedding_dim=300,\n validation_split=5000, # Number of Pairs in Validation set\n test_split=5000, # Number of Pairs in test set\n data_pickle_file='data/data.pkl',\n use_pickled_data=True,\n data_aug=False,\n model_save_filepath='data/model.h5',\n tokenizer_path='data/tokenizer.json'\n ):\n\n word_index = None\n if use_pickled_data and os.path.exists(data_pickle_file):\n print(\"Loading Pickled Data...\")\n word_index, train_question1, train_question2, y_train,\\\n valid_question1, valid_question2, y_valid,\\\n test_question1, test_question2, y_test, test_df = pickle.load(open(data_pickle_file, 'rb'))\n else:\n # Load and process all the data\n word_index, train_question1, train_question2, y_train,\\\n valid_question1, valid_question2, y_valid,\\\n test_question1, test_question2, y_test, test_df = _load_and_process_data(\n data_file=data_file,\n max_length=max_length,\n max_vocab_size=max_vocab_size,\n validation_split=validation_split,\n test_split=test_split,\n tokenizer_path=tokenizer_path\n )\n pickle.dump(\n [word_index, train_question1, train_question2,\n y_train, valid_question1, valid_question2,\n y_valid, test_question1, test_question2,\\\n y_test, test_df],\n open(data_pickle_file, \"wb\")\n )\n\n # Limit word Vocab to Max Vocab\n word_index = {k:v for k, v in word_index.items() if v < max_vocab_size}\n\n # Case to handle when len(word_index) < max_vocab_size\n max_vocab_size = len(word_index)+1\n\n # Embedding Loader\n embedding_weight = load_embedding(embedding_file_path,\n word_index,\n embedding_dim)\n\n # Define the model\n model = get_model(max_length=max_length,\n max_vocab_size=max_vocab_size,\n embedding_dim=embedding_dim,\n embedding_weight=embedding_weight)\n\n if data_aug:\n # Augumentation: reverse Q1 and Q2 ordering \n train_question_aug1 = np.concatenate([train_question1, train_question2], axis=0)\n train_question_aug2 = np.concatenate([train_question2, train_question1], axis=0)\n y_train = np.concatenate([y_train, y_train], axis=0)\n train_question1, train_question2 = train_question_aug1, train_question_aug2\n\n callbacks = [EarlyStopping(monitor='val_acc', patience=2),\n ModelCheckpoint(model_save_filepath, monitor='val_acc', save_best_only=True)]\n\n model.compile(optimizer='adam', loss=\"binary_crossentropy\", metrics=[f1, 'acc'])\n model.summary()\n model.fit([train_question1, train_question2], y_train,\\\n epochs=epochs,\n batch_size=64,\n validation_data=([valid_question1, valid_question2], y_valid),\n callbacks=callbacks)\n\n test_pred = model.predict([test_question1, test_question2])\n test_df[\"prediction\"] = test_pred\n test_df.to_csv(\"predictions.csv\")"
] |
[
"0.6813423",
"0.67908955",
"0.6574832",
"0.65220845",
"0.65132666",
"0.64946496",
"0.64639014",
"0.6463797",
"0.6428264",
"0.6420494",
"0.6386705",
"0.6360645",
"0.6349112",
"0.6337634",
"0.6322277",
"0.6321008",
"0.6292582",
"0.6291619",
"0.6290588",
"0.62819844",
"0.6276907",
"0.62515783",
"0.62487257",
"0.62265503",
"0.6180531",
"0.6171606",
"0.61617273",
"0.616112",
"0.6127575",
"0.6115261"
] |
0.7133405
|
0
|
Create an unresolved Entry for this builder. Calls `_CONTEXT.on_entry` to register/check dependencies etc.
|
def __call__(self, config):
entry = Entry(self.name, make_key(config), config, None, None, None)
if not hasattr(_CONTEXT, "on_entry"):
return entry
on_entry = _CONTEXT.on_entry
if on_entry:
on_entry(entry)
return entry
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create_entry(entry):\n Entry.create(**entry)\n return entry",
"def get_entry(self):\n # Filter out any fields that are invalid for the type of a new entry.\n properties = {\n field: value\n for field, value in self.properties.items()\n if field in self.type_cls.entry_fields\n }\n\n return self.type_cls.from_proxy(self.name, self.description,\n self.updated, self.notes, properties)",
"def create_entry(validator):\n entry = ValidationEntry()\n entry.setValidator(validator.build(entry))\n return entry",
"def create_entry(self, entry_group_name, entry_id, entry):\n try:\n entry = self.__datacatalog.create_entry(parent=entry_group_name,\n entry_id=entry_id,\n entry=entry)\n self.__log_entry_operation('created', entry=entry)\n return entry\n except (exceptions.FailedPrecondition,\n exceptions.PermissionDenied) as e:\n entry_name = '{}/entries/{}'.format(entry_group_name, entry_id)\n self.__log_entry_operation('was not created',\n entry_name=entry_name)\n raise e",
"def add_entry(self, *args, **kwargs):\n entry = Entry(*args, **kwargs) # NOTE: not sure this is good\n self._entries[entry.uuid] = entry\n return entry",
"def __call__(self, entry):\n return self",
"def init_new_entry(args, page=False):\n\n buildingfor = \"posts\"\n if (page):\n buildingfor = \"pages\"\n\n def _remove_temporary_entries(entries):\n result = {}\n for key, value in processed_entries.items():\n if (not \"_\" in key):\n result[key] = value\n\n return result\n\n def _get_new_entry(final_header):\n default_entry = \"---\\n\" + yaml.dump(final_header, allow_unicode=True,\n default_flow_style=False) + \"---\"\n return default_entry\n\n # Get configs\n user_config = configurator.get_config(os.path.join(args.src, paths.CFG_FILE))\n if (not user_config):\n logging.error(\"Error, could not find user config at {}\".format(\n os.path.join(args.src, paths.CFG_FILE)))\n return\n\n theme_headers = defaults.DEFAULT_THEME_HEADERS\n theme_headers_file = os.path.join(args.src, paths.THEMES_PATH,\n user_config[\"theme\"], paths.THEME_HEADERS_FILE)\n if (os.path.isfile(theme_headers_file)):\n tmp = configurator.get_yaml(theme_headers_file)\n # theme headers file might only define entries for posts/pages\n if (tmp[buildingfor]):\n theme_headers = tmp\n\n # Parse remainder (header content)\n processed_entries = _process_header_dict(theme_headers[buildingfor], args.header_content)\n final_entries = _remove_temporary_entries(processed_entries)\n\n # Generate entry file name from user / default template\n file_name = _get_new_entry_path(args, user_config, processed_entries, page)\n\n logging.debug(\"Creating new entry file at \" + file_name)\n\n with open(file_name, 'w+') as stream:\n stream.write(_get_new_entry(final_entries))\n\n logging.debug(\"Done creating entry.\")",
"def _initNewEntryDocument(self, atomDoc): #@UnusedVariable #$NON-NLS-1$\r\n pass",
"def test_bundle_entry_instanciation() -> None:\n issue = r4.OperationOutcomeIssue(code=\"not-found\", severity=\"warning\")\n outcome = r4.OperationOutcome(issue=[issue])\n entry = r4.BundleEntry(resource=outcome)\n assert entry.resource.issue[0].code == \"not-found\"",
"def creating_entry(self):\n response = \"\"\n today = str(date.today())\n curent_time = str(datetime.time(datetime.now()))\n entry = Diary(self.entry_id, self.title, self.body)\n lst = {}\n lst[\"entry_id\"] = entry.entry_id\n lst[\"title\"] = entry.title\n lst[\"date\"] = today\n lst[\"time\"] = curent_time\n lst[\"body\"] = entry.body\n lst[\"updated\"] = entry.updated\n if Validate.validate_entry(Diary.entries, entry):\n response = jsonify({\"message\": \"Duplicate data,Try again\"})\n response.status_code = 409\n else:\n Diary.entries.append(lst)\n response = jsonify({\"message\": \"Entry saved\", \"data\": lst})\n response.status_code = 201\n return response",
"def create_base_entry(vin=\"INVALID\", time_unix=None):\n\t\treturn LogEntry(vin=vin, app_id=\"INVALID\", time_unix=time_unix)",
"def create_and_add_entry(self, **attrs):\n return self.add_entry(self.create_entry(**attrs))",
"def createAtomEntry(self, postLink, atomNewEntry): #$NON-NLS-1$\r\n atomRequest = self._createNewEntryRequest(postLink, atomNewEntry)\r\n self._sendAtomEntry(atomRequest, atomNewEntry)\r\n atomEntry = atomRequest.getEntry()\r\n del atomRequest\r\n return atomEntry",
"def create_entry_for_topic(cls, topic, entry_id, content_hash):\n\t\tkey = cls.create_key(topic, entry_id)\n\t\treturn cls(key_name=key.name(),\n\t\t\t\t\t\t\t parent=key.parent(),\n\t\t\t\t\t\t\t entry_id=entry_id,\n\t\t\t\t\t\t\t entry_id_hash=utils.sha1_hash(entry_id),\n\t\t\t\t\t\t\t entry_content_hash=content_hash)",
"def build(self, name, opened, entry):\n raise NotImplementedError()",
"def _new_entry(self, home_coordinates, feature, global_data):\n return NswRuralFireServiceFeedEntry(home_coordinates, feature)",
"def add_item_entry(self, the_spec):\n debug(\"Adding entry {}\".format(the_spec))\n entry = tk.Entry(self.current_parent)\n self.entries[the_spec.value] = entry\n if not self.parent_is_grid:\n entry.pack()\n return entry",
"def _create_entry():\r\n entry_widget = tk.Entry(password_window, bd=0, font=('Helvetica', 16), width=40,\r\n bg='gray15', fg='white', insertbackground='white')\r\n entry_widget.place(x=10, y=105)\r\n\r\n entry_widget.focus()\r\n\r\n return entry_widget",
"def build_entry(var, win):\n\tif var.type.is_range():\n\t\tentry = build_range_entry(var, win)\n\telif var.type.is_enum():\n\t\tentry = build_enum_entry(var, win)\n\telse:\n\t\tentry = Gtk.Label(var.label)\n\tif var.help != \"\":\n\t\tentry.set_tooltip_text(var.help)\n\treturn entry",
"def _create_failure_entry(self):\r\n # view task entry for task failure\r\n progress = {'message': TEST_FAILURE_MESSAGE,\r\n 'exception': TEST_FAILURE_EXCEPTION,\r\n }\r\n return self._create_entry(task_state=FAILURE, task_output=progress)",
"def from_gsx_entry(entry: Dict[str, Dict[str, str]]) -> Optional[\"Resource\"]:\n if not entry:\n return None\n\n main_title = get_gsx_entry_value(entry, \"title\")\n if not main_title:\n return None\n\n title = Title.get_or_create(main_title)\n date_display = get_gsx_entry_value(entry, \"year\")\n\n if date_display:\n resource, _ = Resource.objects.get_or_create(\n _is_paratext=False, title=title, date__date_display=date_display\n )\n\n date = Date.from_date_display(date_display)\n resource.date = date\n else:\n resource, _ = Resource.objects.get_or_create(\n _is_paratext=False, title=title\n )\n\n Contribution.from_gsx_entry(resource, entry, \"authors\", \"author\")\n\n Resource.languages_from_gsx_entry(resource, entry)\n\n Resource.subjects_from_gsx_entry(resource, entry)\n\n Classification.get_or_create(resource, get_gsx_entry_value(entry, \"status\"))\n\n value = get_gsx_entry_value(entry, \"editionnumber\")\n if value:\n resource.edition_enumeration = value\n\n value = get_gsx_entry_value(entry, \"location\")\n if value:\n for name in value.split(\"; \"):\n place = get_geonames_place_from_gsx_place(name)\n if place:\n ResourcePlace.objects.get_or_create(resource=resource, place=place)\n\n Contribution.from_gsx_entry(resource, entry, \"organisation\", \"publisher\")\n\n value = get_gsx_entry_value(entry, \"notes\")\n if value:\n resource.notes = value\n\n Resource.paratext_from_gsx_entry(entry, resource)\n\n libraries = get_gsx_entry_value(entry, \"libraries\")\n if libraries:\n for library in libraries.split(\"; \"):\n library = library.strip()\n if library:\n org, _ = Organisation.objects.get_or_create(name=library)\n resource.held_by.add(org)\n\n url = get_gsx_entry_value(entry, \"url\")\n if url:\n resource.electronic_locator = url\n\n resource.save()\n\n return resource",
"def info_from_entry(self, entry):\n info = super().info_from_entry(entry)\n return info",
"def get_page_from_entry(self, entry):\n page = Page()\n if '_uri' not in entry or entry['_uri'] is None:\n prefix = '' if '_list_id' not in entry or entry['_list_id'] is None else ('/' + entry['_list_id'])\n entry['_uri'] = prefix + \"/\" + self.get_slug(entry['_headline'])\n page.load(**entry)\n hydrate(page)\n page.static = True\n return page",
"def __init__(self, name: unicode, entry: ghidra.program.model.address.Address, body: ghidra.program.model.address.AddressSetView, source: ghidra.program.model.symbol.SourceType, findEntryPoint: bool, recreateFunction: bool):\n ...",
"def add_entry(self, number: int, entry: Entry) -> None:\n raise NotImplementedError",
"def _async_create_entry_from_vars(self):\n return self.async_create_entry(\n title=TITLE,\n data={\n CONF_USB_PATH: self.usb_path,\n CONF_NETWORK_KEY: self.network_key,\n CONF_USE_ADDON: self.use_addon,\n CONF_INTEGRATION_CREATED_ADDON: self.integration_created_addon,\n },\n )",
"def _create_uefi_entry(self, target, psci_enable, entry_name):\n self._wait_for_vemsd_mount(target)\n try:\n selection_pattern = '\\[([0-9]+)\\] *'\n\n # Identify and select boot manager menu item.\n target.expect(selection_pattern + 'Boot Manager', timeout=15)\n bootmanager_item = target.match.group(1)\n target.sendline(bootmanager_item)\n\n # Identify and select 'add new entry'.\n target.expect(selection_pattern + 'Add Boot Device Entry', timeout=15)\n new_entry_item = target.match.group(1)\n target.sendline(new_entry_item)\n\n # Identify and select BootMonFs.\n target.expect(selection_pattern + 'NOR Flash .*', timeout=15)\n BootMonFs_item = target.match.group(1)\n target.sendline(BootMonFs_item)\n\n # Specify the parameters of the new entry.\n target.expect('.+the kernel', timeout=5)\n target.sendline(self.config.kernel) # kernel path\n target.expect('Has FDT support\\?.*\\[y\\/n\\].*', timeout=5)\n time.sleep(0.5)\n target.sendline('y') # Has Fdt support? -> y\n target.expect('Add an initrd.*\\[y\\/n\\].*', timeout=5)\n time.sleep(0.5)\n target.sendline('y') # add an initrd? -> y\n target.expect('.+the initrd.*', timeout=5)\n time.sleep(0.5)\n target.sendline(self.config.initrd) # initrd path\n target.expect('.+to the binary.*', timeout=5)\n time.sleep(0.5)\n _slow_sendline(target, self.config.kernel_arguments + psci_enable) # arguments to pass to binary\n time.sleep(0.5)\n target.expect('.+new Entry.+', timeout=5)\n _slow_sendline(target, entry_name) # Entry name\n target.expect('Choice.+', timeout=15)\n time.sleep(2)\n except pexpect.TIMEOUT:\n raise DeviceError('Timed out while creating UEFI entry.')\n self._perform_uefi_reboot(target)",
"def __init__(self, owner, entries=None):\n\n self.owner = owner\n # self.entries = EntriesDict({})\n self.entries = EntriesDict(self)\n\n if entries is None:\n return\n\n # self.add_entries(entries)",
"def createNewBlogEntry(self): #$NON-NLS-1$\r\n atomdoc = self._createNewEntryDocument()\r\n self._initNewEntryDocument(atomdoc)\r\n return ZAtomNewBlogEntry(atomdoc)",
"def new_entry(self, entry=\"entry\", program_name=\"pyFAI\",\n title=\"description of experiment\",\n force_time=None, force_name=False):\n\n if not force_name:\n nb_entries = len(self.get_entries())\n entry = \"%s_%04i\" % (entry, nb_entries)\n entry_grp = self.h5.require_group(entry)\n entry_grp.attrs[\"NX_class\"] = numpy.string_(\"NXentry\")\n entry_grp[\"title\"] = numpy.string_(title)\n entry_grp[\"program_name\"] = numpy.string_(program_name)\n if force_time:\n entry_grp[\"start_time\"] = numpy.string_(force_time)\n else:\n entry_grp[\"start_time\"] = numpy.string_(get_isotime())\n self.to_close.append(entry_grp)\n return entry_grp"
] |
[
"0.62241614",
"0.57561404",
"0.57318985",
"0.5663275",
"0.56416744",
"0.55154335",
"0.53690875",
"0.5359811",
"0.53573525",
"0.52931273",
"0.5256571",
"0.52439976",
"0.5211755",
"0.52079546",
"0.51699185",
"0.5136273",
"0.5114029",
"0.50949085",
"0.5077293",
"0.50508136",
"0.5043724",
"0.50355244",
"0.50231814",
"0.50124425",
"0.5006169",
"0.49813443",
"0.4965388",
"0.4954249",
"0.49535614",
"0.49492192"
] |
0.5899941
|
1
|
Convert from mjd utc to et using tempo2 out put tt2tdb
|
def mjd2et(mjd,tt2tdb):
mjdJ2000 = mp.mpf("51544.5")
secDay = mp.mpf("86400.0")
mjdTT = tc.mjd2tdt(mp.mpf(mjd))
# Convert mjdutc to mjdtdt using HP time convert lib
# print "python ",mjdTT
et = (mp.mpf(mjdTT)-mjdJ2000)*mp.mpf(86400.0)+mp.mpf(tt2tdb)
return et
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def mjd2et(self, time):\n time=time+2400000.5\n return (sp.str2et('JD '+repr(time)))",
"def jd_ut2tt(jd_ut):\n return jd_ut + (tt_utc_diff(jd_ut) / SECS_PER_DAY)",
"def mjd2et(self, time):\n\n time=time+2400000.5\n return (sp.str2et('JD '+repr(time)))",
"def tt(self):\n return self.MJD + self.tt_ut1 + 2400000.5",
"def mjdToUT(mjd=None, use_metool=True, prec=6):\n if mjd is None:\n mjdsec = getCurrentMJDSec()\n else:\n mjdsec = mjd*86400\n utstring = mjdSecondsToMJDandUT(mjdsec, use_metool, prec=prec)[1]\n return(utstring)",
"def mjd_to_met(mjd):\n return (mjd-MET.mjd_ref)*86400",
"def _write_antti_datetime(DT, dt_file):\n if dt_file.split('.')[-1] == 'gz':\n ff = gzip.open(dt_file, 'w')\n else:\n ff = open(dt_file, 'w')\n\n ff.write(\"%% Date and time of the geoelectric field distribution. \" +\n \" Data produced on %s\\n\"%(dt.datetime.utcnow()))\n ff.write(\"%% \\n\")\n ff.write(\"%% This data comes together with files BX.txt, BY.txt, LatLon.txt\" +\n \" and Stations.txt. \\n\")\n ff.write(\"%% \\n\")\n ff.write(\"%% Contact: \\n\")\n ff.write(\"%% \\n\")\n ff.write(\"%% The format of the data is as follows:\\n\")\n ff.write(\"%% \\n\")\n ff.write(\"%% year1 month1 day1 hour1 minute1 second1 \\n\")\n ff.write(\"%% year2 month2 day2 hour2 minute2 second2 \\n\")\n ff.write(\"%% . . . . . . \\n\")\n ff.write(\"%% . . . . . . \\n\")\n ff.write(\"%% . . . . . . \\n\")\n ff.write(\"%% \\n\")\n ff.write(\"\\n\")\n\n for d in DT:\n ff.write(\"%02.0f %02.0f %02.0f %02.0f %02.0f %02.0f\\n\"%\n (d.year, d.month, d.day, d.hour, d.minute, d.second))\n\n ff.close()",
"def jd_ut2_j2000(jd_ut):\n return jd_ut2tt(jd_ut) - JD_AT_1_JAN_2000",
"def tt_ut1(self):\n # return the delta time for the input date converted to days\n return interpolate_delta_time(_delta_file, self.tide)",
"def OPCtimetransformOld(data, to):\n outtimes = []\n \n times = {\n 'ms':[],\n 'SS':[],\n 'MM':[],\n 'HH':[]\n }\n for i in range(0, len(data)):\n item = data[i]\n try: \n times['HH'].append(int(item[0:2]))\n times['MM'].append(int(item[2:4]))\n times['SS'].append(int(item[4:6]))\n times['ms'].append(int(item[7:9]))\n except ValueError:\n # strange value 2319010.00 in 201129 file...\n olditem = item\n newitem = item[:4] + item[4+1:]\n print( ('Repairing strange value %s into %s')%(olditem, newitem) )\n try:\n times['HH'].append(int(newitem[0:2]))\n times['MM'].append(int(newitem[2:4]))\n times['SS'].append(int(newitem[4:6]))\n times['ms'].append(int(newitem[7:9]))\n except ValueError:\n print(newitem)\n\n # OPC times go up to 60 minutes. This is corrected by moving one minute\n times['MM'] = [max(0,x-1) for x in times['MM']]\n times['SS'] = [max(0,x-1) for x in times['SS']]\n\n for i in range(0, len(data)):\n md = dt.datetime(1900,1,1,times['HH'][i], times['MM'][i], times['SS'][i]) \n outtimes.append( dt.datetime.strftime(md, to))\n\n return outtimes",
"def convert_time(t):\n return datetime.fromtimestamp(t / 1e7 - 11644473600)",
"def _MET2MJD(self, met): \n return (met - 10676868.60)/86400. + 52033.575",
"def ConvertTime( self ) :\n \n # modules:\n import logging\n import datetime\n import netCDF4\n import numpy\n \n #\n # Original 'Time' units and description:\n #\n # title = \"Time at Start of Scan (s, UNIX time / POSIX time), number of seconds that have elapsed since midnight Coordinated Universal Time (UTC), 1 January 1970.\"\n # units = \"s\"\n #\n # Create new field 'Datetime' field with units:\n # units = \"Seconds since 1970-01-01 00:00'\n #\n # select:\n varid = self.swaths[self.component]['Geolocation Fields']['Time']\n # values:\n tvalues = varid['data']\n # extract description:\n long_name = varid['long_name'].decode('latin-1')\n # check ...\n key = 'Time at Start of Scan (s, UNIX time / POSIX time), number of seconds that have elapsed since midnight Coordinated Universal Time (UTC),'\n if long_name.startswith(key) :\n # remove leading description:\n time0 = long_name.replace(key,'').replace('.','').strip()\n # extract datetime object:\n t0 = datetime.datetime.strptime(time0,'%d %B %Y')\n # convert:\n var = {}\n var['units' ] = t0.strftime('seconds since %Y-%m-%d %H:%M:%H')\n var['long_name'] = long_name\n if 'mask' in dir(tvalues) :\n values1d = netCDF4.num2date( tvalues.data, var['units'] )\n else :\n values1d = netCDF4.num2date( tvalues , var['units'] )\n #endif\n # alternative:\n # \"Time at Start of Scan (s, TAI93)\"\n elif 'TAI' in long_name :\n # find start:\n i0 = long_name.index('TAI')\n # extract:\n year = int(long_name[i0+3:].replace(')',''))\n # convert to 4-digits if necessary:\n if year < 100 :\n if year > 50 :\n year = 1900 + year\n else :\n year = 2000 + year\n #endif\n #endif\n # reference time:\n t0 = datetime.datetime(year,1,1,0,0,0)\n # convert:\n var = {}\n var['units' ] = t0.strftime('seconds since %Y-%m-%d %H:%M:%H')\n var['long_name'] = long_name\n values1d = netCDF4.num2date( tvalues, var['units'] )\n else :\n self.logger.error( 'could not convert time units \"%s\"' % long_name )\n self.logger.error( 'first value : %f' % tvalues[0] )\n raise Exception\n #endif\n \n # expand to 2D:\n var['data'] = numpy.zeros( (self.ntime,self.np), values1d.dtype )\n for ip in range(self.np) :\n var['data'][:,ip] = values1d\n #endfor\n \n # set dim names:\n var['dimnames'] = ('time','pixel')\n \n # store:\n self.swaths[self.component]['Geolocation Fields']['Datetime'] = var",
"def to_eitime(self):\n ts_type = self.ts_types['eitime']\n try:\n dt_obj = duparser.parse(self.timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(self.timestamp, ignoretz=True)\n else:\n dt_tz = 0\n unix_time = int((dt_obj - self.epoch_1970).total_seconds() + int(dt_tz))\n unix_hex = struct.pack(\"<L\", unix_time)\n urlsafe_encode = base64.urlsafe_b64encode(unix_hex)\n self.out_eitime = urlsafe_encode.decode(encoding=\"UTF-8\").strip(\"=\")\n ts_output = str(\"{}\\t\\t\\t{}\".format(ts_type, self.out_eitime))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_eitime = ts_output = False\n return self.out_eitime, ts_output",
"def mjdSecondsToMJDandUT(mjdsec, debug=False, prec=6, delimiter='-'):\n myme = createCasaTool(metool)\n today = myme.epoch('utc','today')\n mjd = np.array(mjdsec) / 86400.\n today['m0']['value'] = mjd\n hhmmss = call_qa_time(today['m0'], prec=prec)\n date = qa.splitdate(today['m0'])\n myme.done()\n utstring = \"%s%s%02d%s%02d %s UT\" % (date['year'],delimiter,date['month'],delimiter,\n date['monthday'],hhmmss)\n return(mjd, utstring)",
"def OPCtimetransform(data, to):\n \n remove_times = []\n outtimes = []\n times = {'ms':[],'SS':[],'MM':[],'HH':[]}\n\n for i in range(0, len(data)):\n times['HH'] = 0\n times['MM'] = 0\n times['SS'] = 0\n times['ms'] = 0\n\n item = data[i]\n \n try:\n if len(item.split('.')[1]) < 2:\n item += '0'\n except IndexError:\n item += '.00'\n if len(item) < 9:\n item = item.zfill(9)\n if int(item[:2]) > 23:\n item = '0' + item\n \n # remove items with extra zero (2319010.00 to 231910)\n if len(item) > 9:\n olditem = item\n newitem = item[:4] + item[5:]\n print( ('Repairing strange value %s into %s')%(olditem, newitem) )\n item = newitem\n else:\n pass\n try:\n md = dt.datetime.strptime(item, \"%H%M%S.%f\")\n \n # round off items which exceed 59 minutes or 59 seconds \n # (i.e. 146001 to 150001.)\n except ValueError:\n \n try:\n times['HH'] = int(item[0:2])\n times['MM'] = int(item[2:4])\n times['SS'] = int(item[4:6])\n times['ms'] = int(item[7:9])\n except ValueError:\n print(i, item)\n\n if times['SS'] > 59:\n times['MM'] += 1\n times['SS'] = 0\n if times['MM'] > 59:\n times['HH'] += 1\n times['MM'] = 0\n # discard items which exceed 23 hours\n if times['HH'] > 23:\n times['HH'] = 23\n print( ('resetting value %s')%(item) )\n \n\n md = dt.datetime(1900,1,1,times['HH'], times['MM'], times['SS']) \n\n \n outtimes.append( dt.datetime.strftime(md, to) )\n\n return outtimes",
"def make_outstr_bp( mjd, utc_tstart_dt, utc_tend_dt, zenith_midtime, airmass, \\\n trtype, moonpos, moondist, moonphase ):\n\n mjd_str = '{0:.2f}'.format( mjd ).center( 8 )\n \n utc_tstart_str = '{0:04d}:{1:02d}:{2:02d}:{3:02d}:{4:02d}:{5:02d}'\\\n .format( utc_tstart_dt.year, \\\n utc_tstart_dt.month, \\\n utc_tstart_dt.day, \\\n utc_tstart_dt.hour, \\\n utc_tstart_dt.minute, \\\n utc_tstart_dt.second )\n utc_tstart_str = utc_tstart_str.center( 19 )\n \n utc_tend_str = '{0:04d}:{1:02d}:{2:02d}:{3:02d}:{4:02d}:{5:02d}'\\\n .format( utc_tend_dt.year, \\\n utc_tend_dt.month, \\\n utc_tend_dt.day, \\\n utc_tend_dt.hour, \\\n utc_tend_dt.minute, \\\n utc_tend_dt.second )\n utc_tend_str = utc_tend_str.center( 19 )\n\n zenith_str = '{0:d}'.format( int( np.round( zenith_midtime ) ) ).center( 6 )\n airmass_str = '{0:.2f}'.format( airmass ).center( 4 )\n trtype_str = trtype.center( 21 )\n moonpos_str = moonpos.center( 12 )\n moondist_str = moondist.center( 9 )\n moonphase_str = moonphase.center( 10 )\n outstr = ' {0} {1} {2} {3} {4} {5} {6} {7} {8}\\n'\\\n .format( mjd_str, \\\n utc_tstart_str, \\\n utc_tend_str, \\\n zenith_str, \\\n airmass_str, \\\n trtype_str, \\\n moonpos_str, \\\n moondist_str, \\\n moonphase_str )\n \n return outstr",
"def met_to_mjd(met):\n return float(met)/86400+MET.mjd_ref",
"def write_db_times(table,data):\n\n duration = -999\n\n # Define Sudbury time zone\n sudburytimezone = pytz.timezone('US/Eastern')\n\n monthname = ('Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec')\n monthid = ('01','02','03','04','05','06','07','08','09','10','11','12')\n\n rows = data['rows']\n\n for value in rows:\n\n timestartstring = value['doc']['sudbury_time_start']\n\n timeendstring = value['doc']['sudbury_time_end']\n\n timestampstartstring = value['doc']['timestamp_start']\n \n timestampendstring = value['doc']['timestamp_end']\n\n if not (timestartstring or timestampstartstring):\n table.write('start_time: \"no start-of-run time information in the run document\"\\n')\n duration = -9\n return duration\n else:\n\n # Put the start time in correct format\n timestartsplit = timestartstring.split(\" \")\n\n startyear = timestartsplit[3]\n\n for i in range(0,12):\n if monthname[i] == timestartsplit[2]:\n startmonth = monthid[i]\n\n startday = timestartsplit[1]\n\n starthour = timestartsplit[4]\n\n starttimezone = timestartsplit[5]\n\n # Create an ISO 8601 compliant string for startdate\n timestampstart = \"{0}-{1}-{2}T{3}{4}\".format(startyear,startmonth,startday,starthour,starttimezone)\n\n # Convert the string to a datetime object\n timestampstart_obj = dateutil.parser.parse(timestampstart)\n\n # If there was no time zone information in the string, assume it's UTC\n if timestampstart_obj.tzinfo is None:\n timestampstart_obj = (pytz.timezone('UTC')).localize(timestampstart_obj)\n\n starttime = timestampstart_obj.astimezone(sudburytimezone).isoformat()\n start_time = \"\\\"start_time\\\": \\\"{0}\\\",\\n\".format(starttime)\n table.write(start_time)\n\n if not (timeendstring or timestampendstring):\n table.write('end_time: \"no end-of-run time information in the run document\"\\n')\n duration = -99\n return duration\n else:\n\n # Put the end time in correct format\n timeendsplit = timeendstring.split(\" \")\n\n endyear = timestartsplit[3]\n\n for i in range(0,12):\n if monthname[i] == timeendsplit[2]:\n endmonth = monthid[i]\n\n endday = timeendsplit[1]\n\n endhour = timeendsplit[4]\n\n endtimezone = timeendsplit[5]\n\n # Create an ISO 8601 compliant string for enddate\n timestampend = \"{0}-{1}-{2}T{3}{4}\".format(endyear,endmonth,endday,endhour,endtimezone)\n\n # Convert the string to a datetime object\n timestampend_obj = dateutil.parser.parse(timestampend)\n\n # If there was no time zone information in the string, assume it's UTC\n if timestampend_obj.tzinfo is None:\n timestampend_obj = (pytz.timezone('UTC')).localize(timestampend_obj)\n\n endtime = timestampend_obj.astimezone(sudburytimezone).isoformat()\n end_time = \"\\\"end_time\\\": \\\"{0}\\\",\\n\".format(endtime)\n table.write(end_time)\n\n timestampdiff = timestampendstring - timestampstartstring\n\n duration = int(round(timestampdiff))\n\n duration_sec = \"\\\"duration_seconds\\\": {0},\\n\".format(int(round(timestampdiff)))\n table.write(duration_sec)\n\n table.write(\"\\n\")\n\n return duration",
"def make_outstr_ch( target, mjd, utc_tstart_dt, utc_tend_dt, zenith_midtime, airmass, \\\n trtype, moonpos, moondist, moonphase ):\n\n target_str = target.replace( ' ', '' ).rjust( 11 )\n mjd_str = '{0:.2f}'.format( mjd ).center( 8 )\n \n utc_tstart_str = '{0:04d}:{1:02d}:{2:02d}:{3:02d}:{4:02d}:{5:02d}'\\\n .format( utc_tstart_dt.year, \\\n utc_tstart_dt.month, \\\n utc_tstart_dt.day, \\\n utc_tstart_dt.hour, \\\n utc_tstart_dt.minute, \\\n utc_tstart_dt.second )\n utc_tstart_str = utc_tstart_str.center( 19 )\n \n utc_tend_str = '{0:04d}:{1:02d}:{2:02d}:{3:02d}:{4:02d}:{5:02d}'\\\n .format( utc_tend_dt.year, \\\n utc_tend_dt.month, \\\n utc_tend_dt.day, \\\n utc_tend_dt.hour, \\\n utc_tend_dt.minute, \\\n utc_tend_dt.second )\n utc_tend_str = utc_tend_str.center( 19 )\n\n zenith_str = '{0:d}'.format( int( np.round( zenith_midtime ) ) ).center( 6 )\n airmass_str = '{0:.2f}'.format( airmass ).center( 4 )\n trtype_str = trtype.center( 21 )\n moonpos_str = moonpos.center( 12 )\n moondist_str = moondist.center( 9 )\n moonphase_str = moonphase.center( 10 )\n outstr = '{0} {1} {2} {3} {4} {5} {6} {7} {8} {9}\\n'\\\n .format( target_str, \\\n mjd_str, \\\n utc_tstart_str, \\\n utc_tend_str, \\\n zenith_str, \\\n airmass_str, \\\n trtype_str, \\\n moonpos_str, \\\n moondist_str, \\\n moonphase_str )\n \n return outstr",
"def convertEPGTime(p_time=\"\", dt_obj=False, epg_fmt=False):\n est_dt = pd.to_datetime(p_time).tz_convert('US/Eastern')\n if dt_obj:\n return est_dt\n if epg_fmt:\n return est_dt.strftime(\"%Y%m%d%H%M%S %z\")\n return est_dt.strftime(\"%Y-%m-%d %I:%M:%S %p\")",
"def fromTdTtoUtc(tdt):\r\n \r\n # Correction lookup table has entry for every even year between TBLfirst and TBLlast\r\n firstCorrectionYear = 1620\r\n lastCorrectionYear = 2002\r\n correctionInSeconds = [\r\n 121, 112, 103, 95, 88, 82, 77, 72, 68, 63, 60, 56, 53, 51, 48, 46, 44, 42, 40, 38, # from 1620\r\n 35, 33, 31, 29, 26, 24, 22, 20, 18, 16, 14, 12, 11, 10, 9, 8, 7, 7, 7, 7, # from 1660 \r\n 7, 7, 8, 8, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, # from 1700\r\n 11, 11, 12, 12, 12, 12, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, 15, 16, 16, # from 1740\r\n 16, 16, 16, 16, 16, 16, 15, 15, 14, 13, # from 1780\r\n 13.1, 12.5, 12.2, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 11.9, 11.6, 11.0, 10.2, 9.2, 8.2, # from 1800\r\n 7.1, 6.2, 5.6, 5.4, 5.3, 5.4, 5.6, 5.9, 6.2, 6.5, 6.8, 7.1, 7.3, 7.5, 7.6, # from 1830\r\n 7.7, 7.3, 6.2, 5.2, 2.7, 1.4, -1.2, -2.8, -3.8, -4.8, -5.5, -5.3, -5.6, -5.7, -5.9, # from 1860\r\n -6.0, -6.3, -6.5, -6.2, -4.7, -2.8, -0.1, 2.6, 5.3, 7.7, 10.4, 13.3, 16.0, 18.2, 20.2, # from 1890\r\n 21.1, 22.4, 23.5, 23.8, 24.3, 24.0, 23.9, 23.9, 23.7, 24.0, 24.3, 25.3, 26.2, 27.3, 28.2, # from 1920\r\n 29.1, 30.0, 30.7, 31.4, 32.2, 33.1, 34.0, 35.0, 36.5, 38.3, 40.2, 42.2, 44.5, 46.5, 48.5, # from 1950\r\n 50.5, 52.5, 53.8, 54.9, 55.8, 56.9, 58.3, 60.0, 61.6, 63.0, 63.8, 64.3 # from 1980 to 2002\r\n ]\r\n\r\n # Values for Delta T for 2000 thru 2002 from NASA\r\n deltaT = 0 # deltaT = TDT - UTC (in Seconds)\r\n year = tdt.year\r\n t = (year - 2000) / 100.0 # Centuries from the epoch 2000.0\r\n\r\n if year >= firstCorrectionYear and year <= lastCorrectionYear:\r\n # Find correction in table\r\n if year % 2 != 0:\r\n # Odd year - interpolate\r\n deltaT = (correctionInSeconds[(year - firstCorrectionYear - 1) / 2] + correctionInSeconds[(year - firstCorrectionYear + 1) / 2]) / 2\r\n else:\r\n # Even year - direct table lookup\r\n deltaT = correctionInSeconds[(year - firstCorrectionYear) / 2]\r\n elif year < 948:\r\n deltaT = 2177 + 497 * t + 44.1 * (t * t)\r\n elif year >= 948:\r\n deltaT = 102 + 102 * t + 25.3 * (t * t)\r\n if year >= 2000 and year <= 2100:\r\n # Special correction to avoid discontinurity in 2000\r\n deltaT += 0.37 * (year - 2100);\r\n else:\r\n raise ValueError(\"Error: TDT to UTC correction not computed\")\r\n \r\n return tdt - timedelta(seconds = deltaT)",
"def utc_to_unix_time(self, t):\n (y,m,d) = str(t).split('-')\n return datetime(int(y), int(m), int(d)).strftime('%s')",
"def to_gsm(self):\n ts_type = self.ts_types['gsm']\n try:\n dt_obj = duparser.parse(self.timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n else:\n dt_tz = 0\n if dt_tz == 0:\n hex_tz = '{:02d}'.format(0)\n elif dt_tz < 0:\n dt_tz = dt_tz / 3600\n conversion = str('{:02d}'.format(int(abs(dt_tz)) * 4))\n conversion_list = []\n for char in range(len(conversion)):\n conversion_list.append(conversion[char])\n high_order = '{0:04b}'.format(int(conversion_list[0]))\n low_order = '{0:04b}'.format(int(conversion_list[1]))\n high_order = '{0:04b}'.format(int(high_order, 2) + 8)\n hex_tz = hex(int((high_order + low_order), 2)).lstrip('0x').upper()\n else:\n dt_tz = dt_tz / 3600\n conversion = str(int(dt_tz) * 4)\n conversion_list = []\n for char in range(len(conversion)):\n conversion_list.append(conversion[char])\n high_order = '{0:04b}'.format(int(conversion_list[0]))\n low_order = '{0:04b}'.format(int(conversion_list[1]))\n hex_tz = hex(int((high_order + low_order), 2)).lstrip('0x').upper()\n date_list = [str(dt_obj.year - 2000),\n '{:02d}'.format(dt_obj.month),\n '{:02d}'.format(dt_obj.day),\n '{:02d}'.format(dt_obj.hour),\n '{:02d}'.format(dt_obj.minute),\n '{:02d}'.format(dt_obj.second), hex_tz]\n date_value_swap = []\n for value in date_list[:]:\n be = value[::-1]\n date_value_swap.append(be)\n self.out_gsm = ''.join(date_value_swap)\n ts_output = str(\"{}\\t\\t\\t{}\".format(ts_type, self.out_gsm))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_gsm = ts_output = False\n return self.out_gsm, ts_output",
"def format_data(self, raw_data):\n opz = raw_data.copy()\n opz['datetime'] = pd.to_datetime(opz['Datum-tijd'], format='%Y-%m-%dT%H:%M:%SZ')\n opz.drop(['Datum-tijd'],axis=1, inplace=True)\n opz['dag']=opz['datetime'].dt.day\n opz['tijd'] = opz['datetime'].dt.time\n #voeg open/dicht data toe en bepaal momenten waarop dit wisselt\n opz['Opzetstuk Noord (°)'] = opz['Opzetstuk Noord (°)'].str.replace(',', '.').astype(float)\n opz['Opzetstuk Zuid (°)'] = opz['Opzetstuk Zuid (°)'].str.replace(',', '.').astype(float)\n opz['Opzetstuk Noord (°)'].fillna(opz['Opzetstuk Zuid (°)'], inplace=True)\n opz['Opzetstuk Zuid (°)'].fillna(opz['Opzetstuk Noord (°)'], inplace=True)\n return opz",
"def convert_time(slog_time_str):\n \n base_time = datetime.datetime(2007, 1, 1)\n delta = datetime.timedelta(0, float(slog_time_str))\n \n timestamp = base_time + delta\n taml_dtg = timestamp.strftime('%Y-%m-%dT%H:%M:%S')\n return taml_dtg",
"def _datetime2et(time: datetime) -> float:\n if isinstance(time, float):\n return time\n if not isinstance(time, datetime):\n raise TypeError(\"Time must be a float or a datetime object.\")\n return spy.str2et(time.isoformat())",
"def unixTime2JD(ts, tu):\n\n return date2JD(*unixTime2Date(ts, tu))",
"def _filetime_to_dt(ft):\r\n # Get seconds and remainder in terms of Unix epoch\r\n s, ns100 = divmod(ft - EPOCH_AS_FILETIME, HUNDREDS_OF_NANOSECONDS)\r\n # Convert to datetime object\r\n dt = datetime.utcfromtimestamp(s)\r\n # Add remainder in as microseconds. Python 3.2 requires an integer\r\n dt = dt.replace(microsecond=(ns100 // 10))\r\n return dt",
"def entries_from_goes_ts_file2(file, default_waveunit=None):\n\n headers = fits.get_header(file)\n if isinstance(file, (str, six.text_type)):\n filename = file\n else:\n filename = getattr(file, 'name', None)\n\n statinfo = os.stat(file)\n #print('a header')\n entry = DatabaseEntry(path=filename)\n size = statinfo.st_size\n\n # Add/tweak start/end entries for GOES\n if headers[0].get('TELESCOP','') != '':\n #header['INSTRUME'] = header['TELESCOP']# So E.G. 'GOES 6' instead 'X-ray Detector'\n entry.instrument = headers[0]['TELESCOP']\n if (headers[0].get('DATE-OBS','') != ''):\n if is_time_in_given_format(headers[0]['DATE-OBS'], '%d/%m/%Y'):\n start_time = datetime.strptime(headers[0]['DATE-OBS'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[0]['DATE-OBS'], '%d/%m/%y'):\n start_time = datetime.strptime(headers[0]['DATE-OBS'], '%d/%m/%y')\n else:\n start_time = parse_time(headers[0]['DATE-OBS'])\n elif (headers[1].get('DATE-OBS','') != ''):\n if is_time_in_given_format(headers[1]['DATE-OBS'], '%d/%m/%Y'):\n start_time = datetime.strptime(headers[1]['DATE-OBS'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[1]['DATE-OBS'], '%d/%m/%y'):\n start_time = datetime.strptime(headers[1]['DATE-OBS'], '%d/%m/%y')\n else:\n start_time = parse_time(headers[1]['DATE-OBS'])\n\n if (headers[0].get('DATE-END','') != ''):\n if is_time_in_given_format(headers[0]['DATE-END'], '%d/%m/%Y'):\n end_time = datetime.strptime(headers[0]['DATE-END'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[0]['DATE-END'], '%d/%m/%y'):\n end_time = datetime.strptime(headers[0]['DATE-END'], '%d/%m/%y')\n else:\n end_time = parse_time(headers[0]['DATE-END'])\n elif (headers[1].get('DATE-END','') != ''):\n if is_time_in_given_format(headers[1]['DATE-END'], '%d/%m/%Y'):\n end_time = datetime.strptime(headers[1]['DATE-END'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[1]['DATE-END'], '%d/%m/%y'):\n end_time = datetime.strptime(headers[1]['DATE-END'], '%d/%m/%y')\n else:\n end_time = parse_time(headers[1]['DATE-END'])\n else:\n end_time = start_time + timedelta(days=1,seconds=-1)\n\n # Add these to the entry\n observation_time_start = start_time\n observation_time_end = end_time\n\n wavemax = 0.8 # XRSB '1.0--8.0 $\\AA$'\n wavemin = 0.05 # XRSA '0.5--4.0 $\\AA$'\n\n metadata = MetaDict(headers[1])\n #entry.tags = sunpy.database.attrs.Tag('raw')\n\n entry = DatabaseEntry(observation_time_start=start_time,\n observation_time_end = end_time,\n instrument='EIT',\n wavemin=wavemin,\n wavemax=wavemax,\n metadata=metadata,\n size=size)\n\n return entry"
] |
[
"0.6171598",
"0.6124459",
"0.6097692",
"0.60189444",
"0.58888805",
"0.5768944",
"0.5681068",
"0.56782854",
"0.56636965",
"0.5663579",
"0.5656989",
"0.5644563",
"0.5634823",
"0.5623353",
"0.5617294",
"0.5596455",
"0.55814743",
"0.55583227",
"0.5466163",
"0.53965133",
"0.53805786",
"0.53609616",
"0.53599703",
"0.5342653",
"0.5338269",
"0.53326416",
"0.53229463",
"0.5258835",
"0.52570695",
"0.52535605"
] |
0.7648814
|
0
|
Suggest a default file name for a results (dac) file. If the file is in a '/femdata' folder, will suggest the respective '/results' folder. Otherwise the file is simply renamed from '.fem' to '.dac'.
|
def suggest_dac_filename(self, relative=False):
fempath = os.path.abspath(self.doc.getProblemPath())
folders = os.path.dirname(fempath).split(os.path.sep)
if folders[-1] == "femdata":
folders[-1] = "results"
folder = os.path.sep.join(folders)
dacpath = folder + os.path.sep + os.path.basename(fempath).replace(".fem", ".dac")
else:
dacpath = fempath.replace(".fem", ".dac")
if relative:
return os.path.relpath(dacpath)
else:
return os.path.abspath(dacpath)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_default_file_path(file_name: str) -> str:\n return join(SOURCE_PATH, 'data', file_name)",
"def set_result_file(self, file_name):\n if file_name is not None:\n self.opts[\"result_file_name\"] = file_name\n else:\n self.opts[\"result_file_name\"] = \"\"",
"def guess(filename):\n for marker in [\".stem\",\"stem.\",\".seed\",\"seed.\"]:\n if filename.find(marker)>-1: \n return (filename.replace(marker,\"\"))\n\n if \"/\" in filename:\n index = filename.rfind(\"/\")\n return ( filename[:index+1]+\"generated_\"+filename[index+1:])\n else:\n return ( \"generated_\"+filename )",
"def select_results_file(self) -> None:\n from PyQt5.QtWidgets import QFileDialog\n from PyQt5.QtCore import QUrl, QDir\n \n \n # Get results file\n fname, _ = QFileDialog.getOpenFileName(self, caption='Choose KVFinder Results File', directory=os.getcwd(), filter=\"KVFinder Results File (*.KVFinder.results.toml)\")\n\n if fname:\n fname = QDir.toNativeSeparators(fname)\n if os.path.exists(fname):\n self.vis_results_file_entry.setText(fname)\n\n return",
"def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'visual_genome')",
"def getDefaultOutputFileName(inputFileName):\n baseName = os.path.basename(inputFileName)\n rootName = os.path.splitext(baseName)[0]\n return string.join([rootName, \"xls\"], '.')",
"def getDefaultOutputFileName(inputFileName):\n\tbaseName = os.path.basename(inputFileName)\n\trootName = os.path.splitext(baseName)[0]\n\treturn string.join([rootName, \"xls\"], '.')",
"def getDefaultDataSearchPath():\n return FileSearchPath(os.path.dirname(__file__))",
"def fuzzy_search_datafile(\n self, data_store, datafile_name, datafile_type, privacy, change_id\n ):\n datafiles = data_store.session.query(data_store.db_classes.Datafile).all()\n completer = [datafile.reference for datafile in datafiles]\n choice = create_menu(\n \"Please start typing to show suggested values\",\n cancel=\"datafile search\",\n choices=[],\n completer=FuzzyWordCompleter(completer),\n )\n if datafile_name and choice in completer:\n new_choice = create_menu(\n f\"Do you wish to keep {datafile_name} as synonym for {choice}?\",\n [\"Yes\", \"No\"],\n )\n if new_choice == str(1):\n datafile = (\n data_store.session.query(data_store.db_classes.Datafile)\n .filter(data_store.db_classes.Datafile.reference == choice)\n .first()\n )\n # Add it to synonyms and return existing datafile\n data_store.add_to_synonyms(\n constants.DATAFILE, datafile_name, datafile.datafile_id, change_id\n )\n print(f\"'{datafile_name}' added to Synonyms!\")\n return datafile\n elif new_choice == str(2):\n return self.add_to_datafiles(\n data_store, datafile_name, datafile_type, privacy, change_id\n )\n elif new_choice == \".\":\n print(\"-\" * 61, \"\\nReturning to the previous menu\\n\")\n return self.fuzzy_search_datafile(\n data_store, datafile_name, datafile_type, privacy, change_id\n )\n elif choice == \".\":\n print(\"-\" * 61, \"\\nReturning to the previous menu\\n\")\n return self.resolve_datafile(\n data_store, datafile_name, datafile_type, privacy, change_id\n )\n elif choice not in completer:\n print(f\"'{choice}' could not found! Redirecting to adding a new datafile..\")\n return self.add_to_datafiles(\n data_store, choice, datafile_type, privacy, change_id\n )",
"def test_answerfile_default(tmpdir):\n\n _test_answerfile(tmpdir, \"cgcs_config.default\")",
"def handle_file_name(self):\r\n self.tmp_name = (os.path.basename(self.source_file_name)).split('.')[0]\r\n result_name = self.tmp_name + '_result_'\r\n log_name = self.tmp_name + '_log.csv'\r\n \r\n self.result_file_name = os.path.join(self.save_path , result_name) \r\n self.log_file_name = os.path.join(self.log_path , log_name)",
"def _getResultsFileName(self, toilPath):\n return os.path.join(toilPath, \"results.txt\")",
"def FindDataFile(filename):\n filename = os.path.expanduser(filename)\n if os.path.exists(filename):\n return filename\n\n # If it's not a relative path, we can't do anything useful.\n if os.path.isabs(filename):\n return filename\n\n other_places = [os.getcwd(),\n os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'Contents', 'Resources'),\n os.path.join(os.getcwd(), 'namebench.app', 'Contents', 'Resources'),\n os.path.join(os.getcwd(), '..'),\n os.path.join(sys.prefix, 'namebench'),\n '/usr/local/share/namebench'\n '/usr/local/etc/namebench',\n '/usr/local/namebench',\n '/etc/namebench',\n '/usr/share/namebench',\n '/usr/namebench']\n for directory in reversed(sys.path):\n other_places.append(directory)\n other_places.append(os.path.join(directory, 'namebench'))\n\n for place in other_places:\n path = os.path.join(place, filename)\n if os.path.exists(path):\n return path\n\n print 'I could not find \"%s\". Tried:' % filename\n for path in other_places:\n print ' %s' % path\n return filename",
"def _get_default_path(self):\n # return os.path.join(datasets.ROOT_DIR, 'data', 'MSRC21')\n # set local path\n return u'/Users/danilonunes/workspace/datasets/msrc21/'",
"def get_default_data_dir(self):\n data_dir_path = os.path.join(self.comicsite.short_name,self.folder_prefix,self.cleantitle)\n return data_dir_path",
"def autodetect(data_file, filename=None):\n\n if not filename:\n if type(data_file) == str:\n filename = data_file\n elif hasattr(data_file, 'name'):\n filename = data_file.name\n elif hasattr(data_file, 'filename'):\n filename = data_file.filename\n\n file_ext = filename.split('.')[-1].lower()\n\n if file_ext and file_ext == 'xls':\n temp_csv_path, xls_read_mode = util.xls_to_csv(data_file)\n fid = open(temp_csv_path, 'rb')\n lines = [fid.readline() for i in range(3)]\n fid.close()\n os.remove(temp_csv_path)\n\n else:\n if type(data_file) == str:\n fid = open(data_file, 'r')\n else:\n fid = data_file\n\n file_initial_location = fid.tell()\n fid.seek(0)\n lines = [fid.readline() for i in range(3)]\n fid.seek(file_initial_location)\n\n\n if lines[0].lower().find('greenspan') != -1:\n return 'greenspan'\n if lines[0].lower().find('macroctd') != -1:\n return 'macroctd'\n if lines[0].lower().find('minisonde4a') != -1:\n return 'hydrotech'\n if lines[0].lower().find('data file for datalogger.') != -1:\n return 'solinst'\n if lines[0].find('Serial_number:')!= -1 and lines[2].find('Project ID:')!= -1:\n return 'solinst'\n if lines[0].lower().find('log file name') != -1:\n return 'hydrolab'\n if lines[0].lower().find('pysonde csv format') != -1:\n return 'generic'\n # possible binary junk in first line of hydrotech file\n if lines[1].lower().find('log file name') != -1:\n return 'hydrotech'\n if lines[0].lower().find('the following data have been') != -1:\n return 'lcra'\n\n # ascii files for ysi in brazos riv.\n if lines[0].find('espey') != -1:\n return 'espey'\n\n #check for ysi:\n # binary\n if lines[0][0] == 'A':\n return 'ysi_binary'\n # txt file\n if lines[0].find('=') != -1:\n return 'ysi_text'\n if lines[0].find('##YSI ASCII Datafile=') != -1:\n return 'ysi_ascii'\n # cdf file\n if file_ext and file_ext == 'cdf':\n return 'ysi_cdf'\n if lines[0].find(\"Date\") > -1 and lines[1].find(\"M/D/Y\") > -1:\n return 'ysi_csv'\n\n #eureka try and detect degree symbol\n if lines[1].find('\\xb0') > -1 or lines[2].find('Manta') > -1 or \\\n lines[0].find('Start time : ') > -1:\n return 'eureka'\n\n # files from various intruments processed by an old script.\n if lines[0].lower().find('request date') != -1:\n return 'midgewater'\n else:\n return False",
"def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'vehicles_dataset_v{}'.format(self._version))",
"def data_abex_results_dir(experiment_name: str) -> Path: # pragma: no cover\n return experiment_dir(experiment_name) / \"Results\"",
"def update_filename(instance, filename):\n path = os.path.join(\"documents_analizer\", \"documents\")\n name = \"{}{}\".format(highly_random_name(),\n os.path.splitext(filename)[1])\n return os.path.join(path, name)",
"def defaultFile(self):\n filename = _odb.getCurrentFrame().filename\n if filename == '<string>' and self.mainpyfile:\n filename = self.mainpyfile\n return filename",
"def get_result_file_type(file):\n if file.endswith('.res'):\n file_type = 'results'\n print('\\n--------------\\nResults file detected!\\n--------------')\n elif file.endswith('.req'):\n file_type = 'request'\n print('\\n--------------\\nRequst file detected!\\n--------------')\n else:\n raise AviewError('Files must be .res or .req files.')\n return file_type",
"def SaveAsAnyFile(default_dir=None, default_file=None):\n\n if default_dir is None:\n default_dir = os.path.dirname(paths.sppas)\n\n if default_file is None:\n default_file = \"newfile.txt\"\n\n save_file = None\n wildcard = create_wildcard(\"All files\", ['*', '*.*'])\n dlg = wx.FileDialog(\n None,\n message=\"Choose a file name...\",\n defaultDir=default_dir,\n defaultFile=default_file,\n wildcard=wildcard,\n style=wx.FD_SAVE | wx.FD_CHANGE_DIR )\n\n if dlg.ShowModal() == wx.ID_OK:\n save_file = dlg.GetPath()\n\n dlg.Destroy()\n\n return save_file",
"def SaveAsAnnotationFile(default_dir=None, default_file=None):\n\n if default_dir is None:\n default_dir = os.path.dirname(paths.sppas)\n\n if default_file is None:\n default_file = \"newfile.xra\"\n\n save_file = None\n\n wildcard = create_wildcard(\"All files\", sppas.src.anndata.aio.extensions_out)\n wildcard += '|'+create_wildcard(\"SPPAS\", sppas.src.anndata.aio.ext_sppas)\n wildcard += '|'+create_wildcard(\"Praat\", sppas.src.anndata.aio.ext_praat)\n wildcard += '|'+create_wildcard(\"ELAN\", sppas.src.anndata.aio.ext_elan)\n wildcard += '|'+create_wildcard(\"Phonedit\", sppas.src.anndata.aio.ext_phonedit)\n wildcard += '|'+create_wildcard(\"ASCII\", sppas.src.anndata.aio.ext_ascii)\n wildcard += '|'+create_wildcard(\"AnnotationPro\", sppas.src.anndata.aio.ext_annotationpro)\n wildcard += '|'+create_wildcard(\"Subtitles\", sppas.src.anndata.aio.ext_subtitles)\n\n dlg = wx.FileDialog(\n None, message=\"Choose a file name...\",\n defaultDir=default_dir,\n defaultFile=default_file,\n wildcard=wildcard,\n style=wx.FD_SAVE | wx.FD_CHANGE_DIR )\n\n if dlg.ShowModal() == wx.ID_OK:\n save_file = dlg.GetPath()\n\n dlg.Destroy()\n\n return save_file",
"def find_default(self, fs_path):\n if os.path.isdir(fs_path):\n default = None\n for name in self.defaults:\n _path = os.path.join(fs_path, name)\n if os.path.isfile(_path):\n default = _path\n break\n if default is None:\n raise Response(403)\n fs_path = default\n return fs_path",
"def GetResultFile(self):\n\n file_path = self.configfile.map['ResultFilePath']\n\n # Check if several entrie\n if file_path is not None:\n if len(file_path) > 1:\n warning(\n 'Many path for the result file are setted ({}), I will take the first one'\n .format(file_path))\n file_path = file_path[0]\n\n # If the storing file is elsewhere\n if file_path != \"#\":\n sys.path.insert(0, file_path)\n base = DBASE.open('Anna')\n\n if base is not None:\n return base\n else:\n error(\n 'Cannot find Anna file in {}'\n .format(file_path))\n return None\n\n else:\n base = DBASE.open('Anna')\n if base is not None:\n return base\n else:\n error(\n 'Cannot find Anna file in {}'\n .format(file_path))\n return None",
"def _guess_log_path(hint: Union[str, pathlib.Path]=None) -> pathlib.Path:\n path = pathlib.Path(hint) if hint else None\n home = os.environ.get('HOME', os.environ.get('USERPROFILE'))\n\n if path and path.exists():\n if path.is_file():\n return path\n elif not path.is_dir():\n return None\n elif home:\n path = pathlib.Path(home) / 'Documents'\n else:\n return None\n\n results = list(path.glob('PwrData*.csv'))\n\n if results:\n return sorted(results)[-1]",
"def default():\n raise NotImplementedError(\"Pvwattsv7 default file no longer exists!\")",
"def prompt_report_file_name(self):\n while True:\n report_file = input(\"Enter name for your report file: \")\n if os.path.isfile(report_file):\n print(\"'{}' is already exist!\".format(report_file))\n else:\n break\n return report_file",
"def fullpath(data_folder, name):\n return os.path.join(data_folder, f\"{alias(name)}.json\")",
"def fixfile(dir, file):\n # First check the existance of common capitalized ways to avoid searching\n # the directory if possible\n names = get_basic_case_combos(file)\n for name in names:\n if os.path.exists(os.path.join(dir, name)):\n return name\n # Failing that, compare with each file in the directory.\n names = os.listdir(dir)\n for name in names:\n if name.upper() == file.upper():\n return name\n\n return None"
] |
[
"0.5992798",
"0.56903106",
"0.5443243",
"0.54410255",
"0.54301697",
"0.5357587",
"0.5340983",
"0.53233373",
"0.5294057",
"0.524858",
"0.5244192",
"0.52249396",
"0.51978284",
"0.5184426",
"0.51652",
"0.51495284",
"0.5128286",
"0.512668",
"0.50775504",
"0.5075263",
"0.5066384",
"0.5054406",
"0.5047603",
"0.5039177",
"0.5026485",
"0.50214016",
"0.5009379",
"0.4997139",
"0.4972362",
"0.49711"
] |
0.73746794
|
0
|
Load the first time step after the time step provided by time
|
def load_first_ts_after(self, time):
# get time step list
df_ts = self.doc.c.sim.df.time_steps()
if type(time) in [float, int]:
if len(df_ts[df_ts.simulation_time > time]) == 0:
raise RuntimeError("{} contains no timestep after {} d".format(self.doc.c.original_filename, time))
else:
ts_no = int(df_ts[df_ts.simulation_time > time].reset_index().iloc[0].file_index)
self.doc.loadTimeStep(ts_no)
return df_ts[df_ts.simulation_time > time].reset_index().iloc[0]
elif type(time) == datetime:
if len(df_ts[df_ts.simulation_date>time])==0:
raise RuntimeError("{} contains no timestep after {}".format(self.doc.c.original_filename, time))
else:
ts_no = int(df_ts[df_ts.simulation_date > time].reset_index().iloc[0].file_index)
self.doc.loadTimeStep(ts_no)
return df_ts[df_ts.simulation_date > time].reset_index().iloc[0]
else:
raise ValueError("parameter 'time' must be of type float (simulation time in days) ")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_first_machine_time_step(self, first_machine_time_step):",
"def onTimeStepStart(self, timeStep):\n pass",
"def next_step(self):\n if self.time_point + 1 >= len(self.data):\n print(\"Error: at last time point\")\n else:\n self.time_point = self.time_point + 1\n self.load_frame()",
"def first_tick(self, time):\n pass",
"def test_first_timestep(self):\n m = build_model({}, \"simple_supply,two_hours,investment_costs\")\n m.run(build_only=True)\n timestep_0 = \"2005-01-01 00:00\"\n assert m._backend_model.timesteps.ord(timestep_0) == 1",
"def onTimeStepStart(self, timeStep):\n self.queuedInTimeStep = 0\n self.dequeuedInTimeStep = 0\n \n pass",
"def pre_step(self,status):\n self.t0 = time.time()\n pass",
"def observe_first(self, timestep: dm_env.TimeStep) -> None:\n self._last_timestep = timestep\n if self._logger is not None:\n self._logger.info('START')\n self._reset_deck()",
"def onTimeStep(self, timeStep):\n pass",
"def start(self, step=None):\n\n if step is None:\n while True:\n next_event = self._pop_next_event()\n if next_event:\n self.current_time = next_event.datetime\n next_event.call()\n else:\n break\n else:\n # TODO: this is not right...\n while True:\n run_to = self.current_time + step\n while True:\n next_event = self._pop_next_event(run_to)\n if next_event:\n next_event.call()\n else:\n break\n print \"{time} Simulation Finished\".format(time=self.current_time)",
"def before(self, time: float) -> 'Trajectory':\n return self.split(time)[0]",
"def _timestep_before_hook(self, *args, **kwargs):\n pass",
"def step_time(self, timestep=None):\n if self._plumb is not None:\n self._plumb.step(timestep)\n self.update_conditions()\n\n # TODO(jacob): Consider if this function should return anything\n # about the state of the system: plumbing engine state, current\n # time, etc.",
"def prepare_for_model_step(self, model_time):\n model_time = date_to_sec(model_time)\n self.grid.set_interval(model_time)",
"def step(self, value):\n self.real_time += pd.DateOffset(**{self.time_unit: value})\n self.simu_time += value\n logger.debug(\"NEW TIME\")",
"def step(self):\n self.schedule.step()",
"def reset(self, time):\n for key in self.data['step']:\n self.data['step'][key] = None\n\n self.time = time",
"def get_next_known_start_time(self, current_time):\n raise NotImplementedError()",
"def time(self, step: int) -> float:\n return self._start_time + self._parameters.dt*(step - self._start_step)",
"def initialTime(self):\n return self.params['t0']",
"def dt(self, _):\n raise NotImplementedError(\n \"We do not support setting dt/ time step except during setup\")",
"def _map_timestep2timeind(self, timestep):\n if not self.simulation and timestep not in self.timemap:\n # for steady state computation include year 0 or first 12 months\n if self.md.litter_mode=='monthly':\n incl = range(1, 13)\n infall = self.md.monthly_litter\n elif self.md.litter_mode=='yearly':\n incl = [0]\n infall = self.md.yearly_litter\n for ind in range(len(infall)):\n if infall[ind].timestep in incl:\n self.timemap[timestep].append(ind)\n if timestep not in self.timemap and self.md.litter_mode=='yearly':\n # if no year 0 specification, use the one for year 1\n for ind in range(len(infall)):\n if infall[ind].timestep==1:\n self.timemap[timestep].append(ind)\n if self.simulation and timestep not in self.timemap:\n # now for the simulation run\n now, end = self._get_now_and_end(timestep)\n if self.md.duration_unit=='month':\n dur = relativedelta(months=self.timestep_length)\n elif self.md.duration_unit=='year':\n dur = relativedelta(years=self.timestep_length)\n end = now + dur - relativedelta(days=1)\n if self.md.litter_mode=='monthly':\n inputdur = relativedelta(months=1)\n infall = self.md.monthly_litter\n elif self.md.litter_mode=='yearly':\n inputdur = relativedelta(years=1)\n infall = self.md.yearly_litter\n # the first mont/year will have index number 1, hence deduce 1 m/y\n start = STARTDATE - inputdur\n for ind in range(len(infall)):\n incl = self._test4inclusion(ind, infall, now, start, end)\n if incl:\n self.timemap[timestep].append(ind)\n # check for possible area reductions to be mapped\n areachange = self.md.area_change\n for ind in range(len(areachange)):\n incl = self._test4inclusion(ind, areachange, now, start, end)\n if incl:\n self.area_timemap[timestep].append(ind)\n if timestep not in self.timemap:\n self.timemap[timestep] = []\n if timestep not in self.area_timemap:\n self.area_timemap[timestep] = []\n return self.timemap[timestep]",
"def _step(self, action: types.NestedArray) -> ts.TimeStep:",
"def __firstRun(self, t1, t2):\n # this is the first run - initialize the timestep manager of metafor\n tsm = self.metafor.getTimeStepManager()\n dt = t2-t1 # time-step size\n dt0 = dt # initial time step\n dtmax = dt # maximum size of the time step\n tsm.setInitialTime(t1, dt0)\n tsm.setNextTime(t2, 1, dtmax)\n # launches metafor from t1 to t2\n #meta() # use toolbox.utilities\n log = LogFile(\"resFile.txt\")\n self.runOK = self.metafor.getTimeIntegration().integration()\n # at this stage, 2 archive files have been created in the workspace",
"def start_time(self) -> float:\r\n ...",
"def __init__(self, step_time, step=None):\n self.step_vector = step\n self.step_time = step_time\n self.ref_timer = None",
"def readFirst(self):\n return self.models[0].time_next",
"def compute_time_step():\n\n dt = Hydro.compute_time_step()\n\n return dt",
"def first_order_posint(self, timestep):\n self.prev_pos = self.position\n self.position = self.position + (self.velocity * timestep)",
"def step(self, action: types.NestedArray) -> ts.TimeStep:\n if self._current_time_step is None or self.should_reset(\n self._current_time_step\n ):\n return self.reset()\n\n self._current_time_step = self._step(action)\n return self._current_time_step"
] |
[
"0.71698123",
"0.6915467",
"0.6753831",
"0.661643",
"0.6487823",
"0.6473755",
"0.64649963",
"0.6445686",
"0.6356552",
"0.6292669",
"0.609172",
"0.607975",
"0.59767365",
"0.5927678",
"0.58358914",
"0.58277494",
"0.58078086",
"0.5761383",
"0.57262194",
"0.5708094",
"0.5700845",
"0.56679344",
"0.56639886",
"0.5643964",
"0.56354713",
"0.5626885",
"0.56252044",
"0.5622874",
"0.5605731",
"0.5597102"
] |
0.76748043
|
0
|
Converts a calendar time (datetime) to simulation time (in hours). Requires that the Reference Time is set in the model.
|
def calendar_to_simtime(self, calendar_time):
time_ref = self.doc.getReferenceTime()
if time_ref is None:
raise (RuntimeError("Reference time not set in model"))
return (calendar_time - time_ref).total_seconds() / (24 * 60 * 60)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def simtime_to_calendar(self, sim_time):\n time_ref = self.doc.getReferenceTime()\n if time_ref is None:\n raise (RuntimeError(\"Reference time not set in model\"))\n\n return time_ref + timedelta(sim_time)",
"def getAbsoluteSimulationTimeCalendar(self):\n if self.doc.getReferenceTime() is None:\n raise ValueError(\"Reference Time not set in FEFLOW model.\")\n\n self.doc.getReferenceTime() + datetime.timedelta(days=self.doc.getAbsoluteSimulationTime())",
"def calculate_hours(time):\n return int(time / 3600)",
"def get_chime_time(self):\n actual_time = datetime(year=self.start_time.year, month=self.start_time.month, day=self.start_time.day,\n hour=self.start_time.hour, minute=0, second=0, microsecond=0)\n if self.start_time.minute > 30:\n actual_time = actual_time + timedelta(hours=1)\n return actual_time",
"def time(self, the_datetime=None):\n if the_datetime:\n if self._request(\n 'S1',\n the_datetime.strftime('%y'), str(the_datetime.month), str(the_datetime.day),\n str(the_datetime.hour), str(the_datetime.minute), str(the_datetime.second)\n )[0]:\n return the_datetime\n else:\n done, data = self._request('GT')\n if done:\n if data == ['165', '165', '165', '165', '165', '85']:\n raise NoClock\n return datetime.datetime(\n year=int(data[0])+2000, month=int(data[1]), day=int(data[2]),\n hour=int(data[3]), minute=int(data[4]), second=int(data[5])\n )\n\n raise EvseError",
"def to_hours(self, timesteps, to_label=False):\n out = timesteps*self.dt/(60*60)\n if to_label:\n out = [ '{:.2f} hours'.format(el) for el in out ]\n return out",
"def to_hours(self, timesteps, to_label=False):\n out = timesteps*self.dt/(60*60)\n if to_label:\n out = [ '{:.2f} hours'.format(el) for el in out ]\n return out",
"def to_hours(self, timesteps, to_label=False):\n out = timesteps*self.dt/(60*60)\n if to_label:\n out = [ '{:.2f} hours'.format(el) for el in out ]\n return out",
"def to_hours(self, timesteps, to_label=False):\n out = timesteps*self.dt/(60*60)\n if to_label:\n out = [ '{:.2f} hours'.format(el) for el in out ]\n return out",
"def _clock_time(self):\n return self._shifted_time % (24*3600)",
"def get_datetime(hours):\n return datetime.datetime.utcfromtimestamp(hours * 60 * 60)",
"def seconds2hours(time_in_seconds):\n seconds_since_midnight = np.mod(time_in_seconds, SECONDS_PER_DAY)\n fraction_hour = seconds_since_midnight/SECONDS_PER_HOUR\n if fraction_hour[-1] == 0:\n fraction_hour[-1] = 24\n return fraction_hour",
"def _get_number_of_hours(self):\n if self.date_to:\n DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n from_dt = datetime.strptime(self.date_from, DATETIME_FORMAT)\n to_dt = datetime.strptime(self.date_to, DATETIME_FORMAT)\n timedelta = to_dt - from_dt\n diff_day =(float(timedelta.seconds) / 3600) - self.break_hour\n self.number_of_hours_temp = diff_day",
"def duration(self):\n delta = self.occurrence.end - self.occurrence.start\n real_hours = delta.days * 24 + delta.seconds / (60.0 * 60.0)\n\n adjusted_hours = attendance_settings.HOUR_MULTIPLIER * real_hours\n\n return adjusted_hours",
"def get_time(self) -> int:\n t = str(self.eval(\"pyb.RTC().datetime()\").encode(\"utf-8\"))[1:-1].split(\", \")\n return int(t[4]) * 3600 + int(t[5]) * 60 + int(t[6])",
"def remaintime_hour(self):\n return self._get_time_info([\"Remain_Time_H\", \"remainTimeHour\"])",
"def hours(self):\n return int(self.minutes / 60)",
"def _get_time(self) -> None:\n self.data[\"time\"] = np.zeros(len(self.data[\"yyyymmdd\"]), dtype=object)\n \n for idx, (yyyymmdd, hhmmss) in enumerate(zip(self.data[\"yyyymmdd\"], self.data[\"hhmmss\"])):\n year, month, day = yyyymmdd.split(\"/\")\n hour, minute, second = hhmmss.split(\":\")\n self.data[\"time\"][idx] = datetime(int(year), int(month), int(day), int(hour), int(minute), int(second))\n \n del self.data[\"yyyymmdd\"]\n del self.data[\"hhmmss\"]",
"def datetime(self):\n\n time = self.time()\n if time is None or time < 0 or time >= 24:\n time = 0\n\n try:\n d = datetime(self.year(), self.month(), self.day()) + timedelta(hours=time)\n return d\n except:\n return",
"def datetime(self):\n time = self.time()\n if time is None or time < 0 or time >= 24:\n time = 0\n\n try:\n d = datetime(self.year(), self.month(), self.day()) + \\\n timedelta(hours=time)\n return d\n except:\n return",
"def unit_hr(self):\n return self.time_base * 60.0",
"def Time(row):\r\n try:\r\n timeadd = dt.datetime.strptime(row['TickIssueTime'], '%H:%M').time()\r\n except:\r\n timeadd = dt.datetime.strptime('00:00', '%H:%M').time()\r\n\r\n newtime = dt.datetime.combine(dt.datetime.strptime(row['TickIssueDate'], '%Y-%m-%d %H:%M:%S') , timeadd)\r\n return newtime",
"def hours(input=None):\n return get(input).hours",
"def time_detected(self) -> datetime:\n return datetime.fromtimestamp(\n self.properties[DBUS_ATTR_TIME_DETECTED] * 10**-6\n ).astimezone(timezone.utc)",
"def initialtime_hour(self):\n return self._get_time_info([\"Initial_Time_H\", \"initialTimeHour\"])",
"def test_emp_man_hours(self):\n start = timezone.make_aware(dt.datetime(2016, 6, 3, 6, 30))\n stop = timezone.make_aware(dt.datetime(2016, 6, 3, 10, 30))\n emp_hours = 0\n\n expected_emp_hours = 20.95\n\n # getting employee objects that are clocked in\n clocked_in_emp = get_clocked_in(start)\n emp_that_left = get_emp_who_left_during_shift(start, stop)\n emp_that_breaked = get_emp_who_left_on_break(start, stop)\n\n # testing return of number of hours\n for employee in clocked_in_emp:\n print(\"EMP= \", employee.PRSN_NBR_TXT)\n emp_hour = get_emp_man_hours(employee, start, stop)\n print(\"EMP HOUR= \", emp_hour)\n emp_hours += emp_hour\n\n for employee in emp_that_left:\n print(\"EMP= \", employee.PRSN_NBR_TXT)\n emp_hour = get_emp_man_hours(employee, start, stop)\n print(\"EMP HOUR= \", emp_hour)\n emp_hours += emp_hour\n\n for employee in emp_that_breaked:\n print(\"EMP= \", employee.PRSN_NBR_TXT)\n emp_hour = get_emp_man_hours(employee, start, stop)\n print(\"EMP HOUR= \", emp_hour)\n emp_hours += emp_hour\n\n self.assertAlmostEqual(emp_hours, expected_emp_hours)",
"def release_time(date_time):\n\n time_hour = int(date_time.strftime('%H'))\n\n quotient = int(time_hour / 4)\n\n if quotient == 5:\n date_time = datetime.combine(date_time.date()+timedelta(1), time(0,0))\n else:\n date_time = datetime.combine(date_time.date(), time((quotient+1)*4,0))\n \n return date_time",
"def scaledTime():\n #return (time.gmtime().tm_wday, time.gmtime().tm_hour)\n epoch = time.strptime(\"2013-02-21 11:30:00\", \"%Y-%m-%d %H:%M:%S\")\n timeInSec = time.mktime(time.gmtime()) - time.mktime(epoch)\n hourSince = timeInSec / Predictor.hourScale\n day = int(hourSince / 24 % 7)\n hour = int(hourSince % 24)\n return (day, hour)",
"def hours_in(sec):\r\n return int(sec//3600)",
"def hourly(self, start_time: str = \"now\", end_time: Optional[str] = None,\n fields: List[str] = list()) -> dict:\n end_time = end_time or str(pendulum.parse(start_time).add(hours=108))\n query = {\n \"start_time\": start_time,\n \"end_time\": end_time,\n \"fields\": fields or self.fields\n }\n return self.call(\"weather/forecast/hourly\", query)"
] |
[
"0.68510634",
"0.56945",
"0.56266",
"0.5587167",
"0.5559629",
"0.54573816",
"0.54573816",
"0.54573816",
"0.54573816",
"0.5355372",
"0.531853",
"0.5231836",
"0.52008826",
"0.519405",
"0.5153251",
"0.5136625",
"0.5122477",
"0.5098299",
"0.5098221",
"0.503388",
"0.5017425",
"0.4986636",
"0.49634442",
"0.4955079",
"0.49406913",
"0.493276",
"0.49101692",
"0.4895211",
"0.4885453",
"0.4877579"
] |
0.66940224
|
1
|
Converts a simulation time (in unit days since reference time) into a calendar date (datetime.datetime). Requires that the Reference Time is set in the model.
|
def simtime_to_calendar(self, sim_time):
time_ref = self.doc.getReferenceTime()
if time_ref is None:
raise (RuntimeError("Reference time not set in model"))
return time_ref + timedelta(sim_time)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getAbsoluteSimulationTimeCalendar(self):\n if self.doc.getReferenceTime() is None:\n raise ValueError(\"Reference Time not set in FEFLOW model.\")\n\n self.doc.getReferenceTime() + datetime.timedelta(days=self.doc.getAbsoluteSimulationTime())",
"def calendar_to_simtime(self, calendar_time):\n time_ref = self.doc.getReferenceTime()\n\n if time_ref is None:\n raise (RuntimeError(\"Reference time not set in model\"))\n\n return (calendar_time - time_ref).total_seconds() / (24 * 60 * 60)",
"def date(self):\n try:\n return datetime.date.fromordinal(self.round)\n except ValueError:\n raise ValueError(\"you need to run ABCE in calendar mode, use simulation.declare_calendar(2000, 1, 1)\")",
"def get_ref_time(self):\n from datetime import datetime, timedelta\n\n ref_time = datetime(2010, 1, 1, 0, 0, 0)\n ref_time += timedelta(seconds=int(self.fid['/PRODUCT/time'][0]))\n return ref_time",
"def datetime(self):\n\n time = self.time()\n if time is None or time < 0 or time >= 24:\n time = 0\n\n try:\n d = datetime(self.year(), self.month(), self.day()) + timedelta(hours=time)\n return d\n except:\n return",
"def datetime(self):\n time = self.time()\n if time is None or time < 0 or time >= 24:\n time = 0\n\n try:\n d = datetime(self.year(), self.month(), self.day()) + \\\n timedelta(hours=time)\n return d\n except:\n return",
"def dreset(self):\n if self.calc:\n return dt.datetime.combine(self.calc(self.vardef), dt.time(hour=0, minute=0))\n else:\n val = dt.date.today()\n if self.period in [\"minute\", \"hour\", \"day\"]:\n return dt.datetime.combine(val, dt.time(hour=0, minute=0))\n param = {}\n if self.period == \"week\":\n return dt.datetime.combine(\n val - dt.timedelta(days=val.weekday()),\n dt.time(hour=0, minute=0))\n for x, y in {\"month\": \"day\", \"year\": \"month\"}.items():\n param[y] = 1\n if self.period == x:\n return dt.datetime.combine(val.replace(**param).date(), dt.time(hour=0, minute=0))\n\n raise Exception()",
"def datetime(self):\n return datetime.datetime(self.year, self.month, self.day,\n self.hour, self.min, self.sec)",
"def Make(year, month, day, hour, minute, second):\n micro = round(math.fmod(second, 1.0) * 1000000)\n second = math.floor(second - micro/1000000)\n d = datetime.datetime(year, month, day, hour, minute, second, micro)\n ut = (d - _EPOCH).total_seconds() / 86400\n return Time(ut)",
"def todate(self):\n return date(self.year, self.month, self.day)",
"def reference_time(self):\n if hasattr(self, '_reference_time') is False:\n self._reference_time = self.midtime\n\n return self._reference_time",
"def get_simulate_date(start, end):\n start_year, start_month, start_day = parse_string_datetime(start)\n end_year, end_month, end_day = parse_string_datetime(end)\n if simulatedate_checkinput(start, end) == 0:\n start_time = datetime.datetime(start_year, start_month, start_day)\n end_time = datetime.datetime(end_year, end_month, end_day)\n return start_time, end_time",
"def _getRelTime(self, obsData):\n\n # get unix time for start of data sample (midnight) as ref point\n dt = datetime.datetime(self.obsStart[0], self.obsStart[1], self.obsStart[2], 0, 0)\n startOfDay = int(time.mktime(dt.timetuple()))\n\n # strip date string\n dateString = [x.strip() for x in obsData[self._getFIdx('Date')].split('-')]\n\n # get unix time for start of observation date\n obsStartOfDay = int(time.mktime(datetime.datetime( \\\n int(dateString[0]), int(dateString[1]), int(dateString[2]), 0, 0).timetuple()))\n\n # calculate relative time (hours)\n relTime = int((obsStartOfDay + (int(obsData[self._getFIdx('Time since midnight')])*60) \\\n - startOfDay)/3600.)\n\n return str(relTime)",
"def to_pydatetime(self) -> npt.NDArray[np.object_]:\n return ints_to_pydatetime(self.asi8, tz=self.tz, reso=self._creso)",
"def make_simulation(self):\n if self.skip_reference:\n self.units = 'time steps per second'\n else:\n self.units = 'calls per second'\n\n self.reference_sim, self.compare_sim = self.make_simulations()\n return self.reference_sim",
"def date(self):\n return Date(self.year, self.month, self.day)",
"def to_datetime(self):\n # convert Modified Julian Day epoch to datetime variable\n epoch = np.datetime64(datetime.datetime(*_mjd_epoch))\n # use nanoseconds to keep as much precision as possible\n delta_time = np.atleast_1d(self.MJD*self.day*1e9).astype(np.int64)\n # return the datetime array\n return np.array(epoch + delta_time.astype('timedelta64[ns]'))",
"def test_0_turn_time(self):\n test_time = dt.datetime(2021, 6, 18, 15, 0, 0)\n test_turn_time = 0\n result = CalculateDueDate.add(test_time, test_turn_time)\n self.assertEqual(dt.datetime(2021, 6, 18, 15, 0, 0), result)",
"def jd2Date(jd, UT_corr=0, dt_obj=False):\n\n \n dt = timedelta(days=jd)\n\n try:\n date = dt + JULIAN_EPOCH - J2000_JD + timedelta(hours=UT_corr) \n\n # If the date is out of range (i.e. before year 1) use year 1. This is the limitation in the datetime\n # library. Time handling should be switched to astropy.time\n except OverflowError:\n date = datetime(MINYEAR, 1, 1, 0, 0, 0)\n\n\n # Return a datetime object if dt_obj == True\n if dt_obj:\n return date\n\n return date.year, date.month, date.day, date.hour, date.minute, date.second, date.microsecond/1000.0",
"def generate_datetime(self):\n min_year = 1980\n max_year = 2021\n start = datetime.datetime(min_year, 1, 1, 00, 00, 00)\n years = max_year - min_year + 1\n end = start + datetime.timedelta(days=365 * years)\n random_datetime = start + (end - start) * random.random()\n assert isinstance(random_datetime, datetime.datetime)\n return random_datetime",
"def time(self, the_datetime=None):\n if the_datetime:\n if self._request(\n 'S1',\n the_datetime.strftime('%y'), str(the_datetime.month), str(the_datetime.day),\n str(the_datetime.hour), str(the_datetime.minute), str(the_datetime.second)\n )[0]:\n return the_datetime\n else:\n done, data = self._request('GT')\n if done:\n if data == ['165', '165', '165', '165', '165', '85']:\n raise NoClock\n return datetime.datetime(\n year=int(data[0])+2000, month=int(data[1]), day=int(data[2]),\n hour=int(data[3]), minute=int(data[4]), second=int(data[5])\n )\n\n raise EvseError",
"def get_next_trading_day_schedule(reference_day: dt):\n reference_day = reference_day.date()\n schedule = get_trading_calendar(reference_day, reference_day)\n while schedule.empty:\n reference_day += timedelta(days=1)\n schedule = get_trading_calendar(reference_day, reference_day)\n return schedule",
"def get_data(self):#id in db\n\t\tarr = self.startTime.split(\"\\/\")\n\t\treturn new Date(arr[0],arr[1]-1,arr[2].split(\" \")[0])",
"def hydrate_date(days):\n return Date.from_ordinal(unix_epoch_date_ordinal + days)",
"def make_date(year, month, day):\n return datetime64(datetime.datetime(year, month, day))",
"def actionDate():\n \n year = 2010\n month = 1\n day = 1\n hour=random.randint(0, 23)\n minute=random.randint(0, 59)\n second=random.randint(0, 59)\n action_date = datetime(year, month, day,hour,minute,second)\n return action_date",
"def link2date(self, component, rel, y, m, d):\n date_node = self.calendar.date(y, m, d).day\n self.create_relation(component, rel, date_node)\n return",
"def from_datetime(self, dtime: np.ndarray):\n # convert delta time array from datetime object\n # to days relative to 1992-01-01T00:00:00\n self.MJD = convert_datetime(dtime, epoch=_mjd_epoch)/self.day\n return self",
"def datedev_py(matlab_datenum):\n python_datetime = datetime.datetime.fromordinal(int(matlab_datenum)) + datetime.timedelta(days=matlab_datenum%1) - datetime.timedelta(days = 366)\n return python_datetime",
"def asdatetime(self):\n tznaive = self.timezoneNaive()\n if tznaive:\n tzinfo = None\n else:\n tzinfo = _TZINFO[self._tz].tzinfo\n second = int(self._second)\n microsec = self.micros() % 1000000\n dt = datetime(self._year, self._month, self._day, self._hour,\n self._minute, second, microsec, tzinfo)\n return dt"
] |
[
"0.715675",
"0.6405978",
"0.55148673",
"0.54061246",
"0.5035797",
"0.5000875",
"0.49441096",
"0.49226648",
"0.49099126",
"0.48785847",
"0.4797319",
"0.47609946",
"0.47559208",
"0.47478265",
"0.4734923",
"0.47134128",
"0.47104707",
"0.47017756",
"0.46849194",
"0.46693674",
"0.46639422",
"0.46608347",
"0.46057153",
"0.4581168",
"0.4562146",
"0.45618305",
"0.45598298",
"0.45414275",
"0.45333484",
"0.4525845"
] |
0.7452497
|
0
|
Augments the image by rotating the image by max_angle in the axial plane in both directions Also flips the image from left to right and rotates by max_angle in both directions
|
def augment_image(image,max_angle):
angles = [-max_angle,max_angle]
axes = [(0,1),(0,2),(1,2)]
images_aug = [image,image[::-1]]
for angle in angles:
for axis in axes:
images_aug.append(aug.rotate(image, angle, axes=axis, reshape=False, order=0))
images_aug.append(aug.rotate(image[::-1], angle, axes=axis, reshape=False, order=0))
return images_aug
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def image_augmentation(img):\n return np.fliplr(img)",
"def _augment(img):\r\n return flip(img, axis=2)",
"def _augment(img):\n return flip(img, axis=2)",
"def get_opt_rotate(obj_img, back_img,\n back_center_x, back_center_y,\n obj_center_x, obj_center_y,\n prev_rot_angle=0.,\n is_erosion=False):\n width = obj_img.shape[0]\n rot_img = ndimage.rotate(obj_img, prev_rot_angle, reshape=False)\n induce_x, induce_y = int(back_center_x - obj_center_x), int(back_center_y - obj_center_y)\n combine_img = back_img.copy()\n combine_img[induce_y:induce_y + width, induce_x:induce_x + width] -= rot_img\n neg_count = len(np.argwhere(combine_img < 0))\n if is_erosion:\n angle_amount = 4.\n else:\n angle_amount = 16.\n # check combine_img.dtype; rot_img.dtype; back_img\n curr_angle = prev_rot_angle\n while angle_amount > 0.5:\n angle_amount /= 2.\n\n rotate_1 = ndimage.rotate(obj_img, curr_angle + angle_amount, reshape=False)\n combine_img = back_img.copy()\n combine_img[induce_y:induce_y+width, induce_x:induce_x+width] -= rotate_1\n neg_count_1 = len(np.argwhere(combine_img < 0))\n\n rotate_2 = ndimage.rotate(obj_img, curr_angle - angle_amount, reshape=False)\n combine_img = back_img.copy()\n combine_img[induce_y:induce_y + width, induce_x:induce_x + width] -= rotate_2\n neg_count_2 = len(np.argwhere(combine_img < 0))\n\n if neg_count_1 < neg_count_2:\n if neg_count_1 < neg_count:\n neg_count = neg_count_1\n curr_angle = curr_angle + angle_amount\n else:\n if neg_count_2 < neg_count:\n neg_count = neg_count_2\n curr_angle = curr_angle - angle_amount\n # print(curr_angle)\n # print(neg_count, neg_count_1, neg_count_2)\n # print('Negative Pix Count Rotation: %d.' % neg_count)\n # print('Optimal Rotation: ', curr_angle)\n return curr_angle, neg_count",
"def rotate_augmentation():\n rand_rotate = np.random.randint(180)\n return lambda image: rotate_with_extension(image, rand_rotate)",
"def rotate_and_wrap_image(self, image, degree_of_rotation):\n\n image_center = tuple(np.array(image.shape[1::-1]) / 2)\n rot_mat = cv2.getRotationMatrix2D(image_center, degree_of_rotation, 1.0)\n # borderMode (constant) and borderValue are important for maintaiing consistency \n ri = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR, borderMode = cv2.BORDER_CONSTANT,borderValue = (255,255,255))\n return ri",
"def rotate(self, angle):\n image_center = np.array(self.img.shape[1::-1]) / 2\n rot_mat = cv2.getRotationMatrix2D(tuple(image_center), angle, 1.0)\n\n self.img = cv2.warpAffine(\n self.img, rot_mat, self.img.shape[1::-1], flags=cv2.INTER_LINEAR\n )\n\n self.edits.append(f\"rotate:{angle}\")\n return self",
"def augment_image(im):\n # First crop out the face to save reduce computation load\n bb = im.landmarks['bb'].lms\n bb_vec = bb.as_vector()\n bb_ul = (np.array([bb_vec[0], bb_vec[1]]) - bb.centre()) * 2\n bb_lr = (np.array([bb_vec[4], bb_vec[5]]) - bb.centre()) * 2\n ul = bb_ul + bb.centre()\n lr = bb_lr + bb.centre()\n im = im.crop(ul, lr, constrain_to_boundary=True)\n if im.pixels.shape[0] == 1:\n pix = np.zeros((3, im.pixels.shape[1], im.pixels.shape[2]))\n pix[:,] = im.pixels\n im.pixels = pix\n\n beta = 0.3\n cx = np.random.uniform(-beta, beta)\n cy = np.random.uniform(-beta, beta)\n fx = 1.0\n fy = np.random.uniform(0.6, 1.4)\n max_rotation = 30\n theta = np.random.uniform(-max_rotation, max_rotation)\n\n rotation = menpo.transform.Rotation.init_from_2d_ccw_angle(theta)\n shear = menpo.transform.Affine(np.array([[1, cx, 0],[cy, 1, 0], [0,0,1]]))\n scale = menpo.transform.Affine(np.array([[fx, 0, 0],[0, fy, 0], [0,0,1]]))\n T = scale.compose_after(shear).compose_after(rotation)\n\n t_im = im.transform_about_centre(T)\n\n t_im = add_color_jetting(t_im)\n t_im = add_occlusion(t_im)\n\n\n new_bb = t_im.landmarks['PTS'].lms.bounding_box()\n\n #new_bb contains the gt bounding box\n augmented_bb = add_bb_noise(new_bb)\n augmented_bb = augmented_bb.reshape((4,2))\n augmented_bb = menpo.shape.PointCloud(augmented_bb)\n t_im.landmarks['bb'] = menpo.landmark.LandmarkGroup.init_with_all_label(augmented_bb)\n\n return t_im",
"def flip_augmentation():\n return lambda image: ImageOps.flip(image)",
"def rotate(img, angle, resample=False, expand=False, center=None):\r\n \r\n return img.rotate(angle, resample, expand, center)",
"def img_rotate(img, angle, center, fillval=0):\n rows, cols = img.shape[:2]\n M = cv2.getRotationMatrix2D(center, angle, 1)\n return cv2.warpAffine(img, M, (cols, rows), borderValue=fillval)",
"def rotate(self):\n\n last_center = self.rect.center\n self.image = pg.transform.rotate(self.image_copy,self.angle)\n self.rect = self.image.get_rect()\n self.rect.center = last_center\n self.angle +=self.rotate_by",
"def align(image):\n angle = find_angle(image)\n image = rotate(image, angle)\n return image",
"def visualize_augmentation(image, angle):\n\n # Create a copy of the image to prevent changing the original\n img = np.copy(image)\n\n cols = 2\n rows = 6\n fig_size = (7 * cols, 4 * rows) # Figure width and height, in inches\n\n fig, ax = plt.subplots(rows, cols, figsize=fig_size)\n # Plot original images in the left column\n for idx in range(rows):\n ax[idx, 0].imshow(img)\n ax[idx, 0].set_title(\"Original, Angle = \" + str(round(angle, 3)))\n # Horizontal Flip\n tmp_img, tmp_angle = random_horizontal_flip(img, angle, 1.0)\n ax[0, 1].imshow(tmp_img)\n ax[0, 1].set_title(\"Horizontal Flip, Angle = \" + str(round(tmp_angle, 3)))\n # Translation\n tmp_img, tmp_angle = random_translation(img, angle)\n ax[1, 1].imshow(tmp_img)\n ax[1, 1].set_title(\"Translation, Angle = \" + str(round(tmp_angle, 3)))\n # Gaussian Noise\n tmp_img = random_gaussian(img)\n ax[2, 1].imshow(tmp_img)\n ax[2, 1].set_title(\"Gaussian Noise, Angle = \" + str(round(angle, 3)))\n # Shadows\n tmp_img = random_shadows(img, 1.0, 0.9)\n ax[3, 1].imshow(tmp_img)\n ax[3, 1].set_title(\"Shadows, Angle = \" + str(round(angle, 3)))\n # Brightness\n tmp_img = random_brightness(img)\n ax[4, 1].imshow(tmp_img)\n ax[4, 1].set_title(\"Brightness, Angle = \" + str(round(angle, 3)))\n # All Augmentation\n tmp_img, tmp_angle = random_all(img, angle)\n ax[5, 1].imshow(tmp_img)\n ax[5, 1].set_title(\"All Randomization, Angle = \" +\n str(round(tmp_angle, 3)))\n\n return fig",
"def rotatedView(img, angle, enlarge=True, extend=Views.extendBorder):\n cx = img.dimension(0) / 2.0\n cy = img.dimension(1) / 2.0\n toCenter = AffineTransform2D()\n toCenter.translate(-cx, -cy)\n rotation = AffineTransform2D()\n # Step 1: place origin of rotation at the center of the image\n rotation.preConcatenate(toCenter)\n # Step 2: rotate around the Z axis\n rotation.rotate(radians(angle))\n # Step 3: undo translation to the center\n rotation.preConcatenate(toCenter.inverse())\n rotated = RV.transform(Views.interpolate(extend(img),\n NLinearInterpolatorFactory()), rotation)\n if enlarge:\n # Bounds:\n bounds = repeat((sys.maxint, 0)) # initial upper- and lower-bound values \n # for min, max to compare against \n transformed = zeros(2, 'f')\n for corner in product(*zip(repeat(0), Intervals.maxAsLongArray(img))):\n rotation.apply(corner, transformed)\n bounds = [(min(vmin, int(floor(v))), max(vmax, int(ceil(v))))\n for (vmin, vmax), v in zip(bounds, transformed)]\n minC, maxC = map(list, zip(*bounds)) # transpose list of 2 pairs\n # into 2 lists of 2 values\n imgRot = Views.zeroMin(Views.interval(rotated, minC, maxC))\n else:\n imgRot = Views.interval(rotated, img)\n return imgRot",
"def rotate_image(image, angle):\n\n image_size = (image.shape[1], image.shape[0])\n image_center = tuple(array(image_size) / 2)\n\n rot_mat = vstack([cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]])\n trans_mat = identity(3)\n\n w2 = image_size[0] * 0.5\n h2 = image_size[1] * 0.5\n\n rot_mat_notranslate = matrix(rot_mat[0:2, 0:2])\n\n tl = (array([-w2, h2]) * rot_mat_notranslate).A[0]\n tr = (array([w2, h2]) * rot_mat_notranslate).A[0]\n bl = (array([-w2, -h2]) * rot_mat_notranslate).A[0]\n br = (array([w2, -h2]) * rot_mat_notranslate).A[0]\n\n x_coords = [pt[0] for pt in [tl, tr, bl, br]]\n x_pos = [x for x in x_coords if x > 0]\n x_neg = [x for x in x_coords if x < 0]\n\n y_coords = [pt[1] for pt in [tl, tr, bl, br]]\n y_pos = [y for y in y_coords if y > 0]\n y_neg = [y for y in y_coords if y < 0]\n\n right_bound = max(x_pos)\n left_bound = min(x_neg)\n top_bound = max(y_pos)\n bot_bound = min(y_neg)\n\n new_w = int(abs(right_bound - left_bound))\n new_h = int(abs(top_bound - bot_bound))\n new_image_size = (new_w, new_h)\n\n new_midx = new_w * 0.5\n new_midy = new_h * 0.5\n\n dx = int(new_midx - w2)\n dy = int(new_midy - h2)\n\n trans_mat = getTranslationMatrix2d(dx, dy)\n affine_mat = (matrix(trans_mat) * matrix(rot_mat))[0:2, :]\n result = cv2.warpAffine(image, affine_mat, new_image_size, flags=cv2.INTER_LINEAR)\n\n return result",
"def Rotate(angle):\n def rotate_img(img, angle=angle):\n img = Ft.rotate(img, angle, resample=BILINEAR)\n return img\n return rotate_img",
"def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0",
"def data_augmentation(image, mode):\n if mode == 0:\n # original\n out = image\n elif mode == 1:\n # flip up and down\n out = np.flipud(image)\n elif mode == 2:\n # rotate counterwise 90 degree\n out = np.rot90(image)\n elif mode == 3:\n # rotate 90 degree and flip up and down\n out = np.rot90(image)\n out = np.flipud(out)\n elif mode == 4:\n # rotate 180 degree\n out = np.rot90(image, k=2)\n elif mode == 5:\n # rotate 180 degree and flip\n out = np.rot90(image, k=2)\n out = np.flipud(out)\n elif mode == 6:\n # rotate 270 degree\n out = np.rot90(image, k=3)\n elif mode == 7:\n # rotate 270 degree and flip\n out = np.rot90(image, k=3)\n out = np.flipud(out)\n else:\n raise Exception('Invalid choice of image transformation')\n\n return out",
"def data_augmentation(image, mode):\n if mode == 0:\n # original\n return image\n elif mode == 1:\n # flip up and down\n return np.flipud(image)\n elif mode == 2:\n # rotate counter-clockwise 90 degree\n return np.rot90(image)\n elif mode == 3:\n # rotate 90 degree and flip up and down\n image = np.rot90(image)\n return np.flipud(image)\n elif mode == 4:\n # rotate 180 degree\n return np.rot90(image, k=2)\n elif mode == 5:\n # rotate 180 degree and flip\n image = np.rot90(image, k=2)\n return np.flipud(image)\n elif mode == 6:\n # rotate 270 degree\n return np.rot90(image, k=3)\n elif mode == 7:\n # rotate 270 degree and flip\n image = np.rot90(image, k=3)\n return np.flipud(image)",
"def rotate_no_clip(self, angle):\n # Calculate the size the expanded image needs to be to contain rotated image\n x, y = self.width, self.height\n w = abs(x*math.cos(angle)) + abs(y*math.sin(angle))\n h = abs(x*math.sin(angle)) + abs(y*math.cos(angle))\n\n # Paste the image into a larger frame and rotate\n img = Image.blank(w, h, 4, 0)\n img.paste(self, w/2-x/2, h/2-y/2)\n rotated = img.rotate(angle, (w/2, h/2))\n\n return rotated",
"def rotate(\n img,\n angle,\n interpolation='nearest',\n expand=False,\n center=None,\n fill=None,\n data_format='CHW',\n):\n\n angle = -angle % 360\n img = img.unsqueeze(0)\n\n # n, c, h, w = img.shape\n w, h = _get_image_size(img, data_format=data_format)\n\n img = img if data_format.lower() == 'chw' else img.transpose((0, 3, 1, 2))\n\n post_trans = [0, 0]\n\n if center is None:\n rotn_center = [0, 0]\n else:\n rotn_center = [(p - s * 0.5) for p, s in zip(center, [w, h])]\n\n if paddle.in_dynamic_mode():\n angle = math.radians(angle)\n matrix = [\n math.cos(angle),\n math.sin(angle),\n 0.0,\n -math.sin(angle),\n math.cos(angle),\n 0.0,\n ]\n matrix = paddle.to_tensor(matrix, place=img.place)\n\n matrix[2] += (\n matrix[0] * (-rotn_center[0] - post_trans[0])\n + matrix[1] * (-rotn_center[1] - post_trans[1])\n + rotn_center[0]\n )\n matrix[5] += (\n matrix[3] * (-rotn_center[0] - post_trans[0])\n + matrix[4] * (-rotn_center[1] - post_trans[1])\n + rotn_center[1]\n )\n else:\n angle = angle / 180 * math.pi\n matrix = paddle.concat(\n [\n paddle.cos(angle),\n paddle.sin(angle),\n paddle.zeros([1]),\n -paddle.sin(angle),\n paddle.cos(angle),\n paddle.zeros([1]),\n ]\n )\n matrix = paddle.static.setitem(\n matrix,\n 2,\n matrix[2]\n + matrix[0] * (-rotn_center[0] - post_trans[0])\n + matrix[1] * (-rotn_center[1] - post_trans[1])\n + rotn_center[0],\n )\n matrix = paddle.static.setitem(\n matrix,\n 5,\n matrix[5]\n + matrix[3] * (-rotn_center[0] - post_trans[0])\n + matrix[4] * (-rotn_center[1] - post_trans[1])\n + rotn_center[1],\n )\n\n matrix = matrix.reshape((1, 2, 3))\n\n if expand:\n # calculate output size\n if paddle.in_dynamic_mode():\n corners = paddle.to_tensor(\n [\n [-0.5 * w, -0.5 * h, 1.0],\n [-0.5 * w, 0.5 * h, 1.0],\n [0.5 * w, 0.5 * h, 1.0],\n [0.5 * w, -0.5 * h, 1.0],\n ],\n place=matrix.place,\n ).astype(matrix.dtype)\n else:\n corners = paddle.assign(\n [\n [-0.5 * w, -0.5 * h, 1.0],\n [-0.5 * w, 0.5 * h, 1.0],\n [0.5 * w, 0.5 * h, 1.0],\n [0.5 * w, -0.5 * h, 1.0],\n ],\n ).astype(matrix.dtype)\n\n _pos = (\n corners.reshape((1, -1, 3))\n .bmm(matrix.transpose((0, 2, 1)))\n .reshape((1, -1, 2))\n )\n _min = _pos.min(axis=-2).floor()\n _max = _pos.max(axis=-2).ceil()\n\n npos = _max - _min\n nw = npos[0][0]\n nh = npos[0][1]\n\n if paddle.in_dynamic_mode():\n ow, oh = int(nw), int(nh)\n else:\n ow, oh = nw.astype(\"int32\"), nh.astype(\"int32\")\n\n else:\n ow, oh = w, h\n\n grid = _affine_grid(matrix, w, h, ow, oh)\n\n out = _grid_transform(img, grid, mode=interpolation, fill=fill)\n\n out = out if data_format.lower() == 'chw' else out.transpose((0, 2, 3, 1))\n\n return out.squeeze(0)",
"def rotateImage(image, angle):\n image_center = tuple(np.array(image.shape[1::-1]) / 2)\n rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)\n result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_NEAREST)\n return result",
"def rotate(self, image, angle):\n # Get the image size\n # No that's not an error - NumPy stores image matricies backwards\n image_size = (image.shape[1], image.shape[0])\n image_center = tuple(np.array(image_size) / 2)\n\n # Convert the OpenCV 3x2 rotation matrix to 3x3\n rot_mat = np.vstack(\n [cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]]\n )\n\n rot_mat_notranslate = np.matrix(rot_mat[0:2, 0:2])\n\n # Shorthand for below calcs\n image_w2 = image_size[0] * 0.5\n image_h2 = image_size[1] * 0.5\n\n # Obtain the rotated coordinates of the image corners\n rotated_coords = [\n (np.array([-image_w2, image_h2]) * rot_mat_notranslate).A[0],\n (np.array([image_w2, image_h2]) * rot_mat_notranslate).A[0],\n (np.array([-image_w2, -image_h2]) * rot_mat_notranslate).A[0],\n (np.array([image_w2, -image_h2]) * rot_mat_notranslate).A[0]\n ]\n\n # Find the size of the new image\n x_coords = [pt[0] for pt in rotated_coords]\n x_pos = [x for x in x_coords if x > 0]\n x_neg = [x for x in x_coords if x < 0]\n\n y_coords = [pt[1] for pt in rotated_coords]\n y_pos = [y for y in y_coords if y > 0]\n y_neg = [y for y in y_coords if y < 0]\n\n right_bound = max(x_pos)\n left_bound = min(x_neg)\n top_bound = max(y_pos)\n bot_bound = min(y_neg)\n\n new_w = int(abs(right_bound - left_bound))\n new_h = int(abs(top_bound - bot_bound))\n\n # We require a translation matrix to keep the image centred\n trans_mat = np.matrix([\n [1, 0, int(new_w * 0.5 - image_w2)],\n [0, 1, int(new_h * 0.5 - image_h2)],\n [0, 0, 1]\n ])\n\n # Compute the tranform for the combined rotation and translation\n affine_mat = (np.matrix(trans_mat) * np.matrix(rot_mat))[0:2, :]\n\n # Apply the transform\n result = cv2.warpAffine(\n image,\n affine_mat,\n (new_w, new_h),\n flags=cv2.INTER_LINEAR\n )\n\n return result",
"def data_augmentation(image, mode):\n out = np.transpose(image, (1, 2, 0))\n if mode == 0:\n # original\n out = out\n elif mode == 1:\n # flip up and down\n out = np.flipud(out)\n elif mode == 2:\n # rotate counterwise 90 degree\n out = np.rot90(out)\n elif mode == 3:\n # rotate 90 degree and flip up and down\n out = np.rot90(out)\n out = np.flipud(out)\n elif mode == 4:\n # rotate 180 degree\n out = np.rot90(out, k=2)\n elif mode == 5:\n # rotate 180 degree and flip\n out = np.rot90(out, k=2)\n out = np.flipud(out)\n elif mode == 6:\n # rotate 270 degree\n out = np.rot90(out, k=3)\n elif mode == 7:\n # rotate 270 degree and flip\n out = np.rot90(out, k=3)\n out = np.flipud(out)\n else:\n raise Exception('Invalid choice of image transformation')\n return np.transpose(out, (2, 0, 1))",
"def orient(self,Y):\r\n self.orientation[Y]+=1\r\n if self.orientation[Y]>3:\r\n self.orientation[Y]=0\r\n if self.orientation[Y]<0:\r\n self.orientation[Y]=3\r\n self.can.delete(self.image_bateau[Y])\r\n self.image_bateau[Y]=self.create_image(self.img[self.orientation[Y]][Y],0,0)\r\n self.affichage(Y)",
"def rotateImage(self, img, angle=90):\n if (angle == 90) :\n return(cv2.flip(cv2.transpose(img),flipCode=0))\n elif (angle == -90) :\n return(cv2.flip(cv2.transpose(img),flipCode=1))\n else :\n center = (img.shape[1]/2.0,img.shape[0]/2.0)\n rotate = cv2.getRotationMatrix2D(center, angle, 1.0)\n return cv2.warpAffine(img, rotate, (img.shape[1], img.shape[0]))",
"def augment_image_angle_pair(img_in, angle_in):\n flip_mode = random.randint(1, 2)\n shade_mode = random.randint(1,3)\n\n if flip_mode == 1:\n image_fl = img_in\n angle_fl = angle_in\n elif flip_mode == 2:\n image_fl, angle_fl = augm.flip_image_angle_pair(img_in, angle_in)\n\n if shade_mode == 1:\n img_out = image_fl\n elif shade_mode == 2:\n img_out = augm.change_brightness(image_fl)\n elif shade_mode == 3:\n img_out = augm.add_random_shadow(image_fl)\n\n angle_out = augm.gaussian_angle(angle_fl)\n\n return img_out, angle_out",
"def rotate_right_90(img):\r\n #reading image\r\n im = Image.open(\"filename\")\r\n \r\n #flipping image 90 degrees\r\n newimg = im.transpose(PIL.Image.ROTATE_90)\r\n \r\n return img",
"def rotate_image(image, angle):\n\n # Get the image size\n # No that's not an error - NumPy stores image matricies backwards\n image_size = (image.shape[1], image.shape[0])\n image_center = tuple(np.array(image_size) / 2)\n\n ''' Calculate Rotation Matrix '''\n # Convert the OpenCV 3x2 rotation matrix to 3x3\n rot_mat = np.vstack(\n [cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]]\n )\n\n rot_mat_notranslate = np.matrix(rot_mat[0:2, 0:2])\n\n # Shorthand for below calcs\n image_w2 = image_size[0] * 0.5\n image_h2 = image_size[1] * 0.5\n\n # Obtain the rotated coordinates of the image corners\n rotated_coords = [\n (np.array([-image_w2, image_h2]) * rot_mat_notranslate).A[0],\n (np.array([image_w2, image_h2]) * rot_mat_notranslate).A[0],\n (np.array([-image_w2, -image_h2]) * rot_mat_notranslate).A[0],\n (np.array([image_w2, -image_h2]) * rot_mat_notranslate).A[0]\n ]\n\n # Find the size of the new image\n x_coords = [pt[0] for pt in rotated_coords]\n x_pos = [x for x in x_coords if x > 0]\n x_neg = [x for x in x_coords if x < 0]\n\n y_coords = [pt[1] for pt in rotated_coords]\n y_pos = [y for y in y_coords if y > 0]\n y_neg = [y for y in y_coords if y < 0]\n\n right_bound = max(x_pos)\n left_bound = min(x_neg)\n top_bound = max(y_pos)\n bot_bound = min(y_neg)\n\n new_w = int(abs(right_bound - left_bound))\n new_h = int(abs(top_bound - bot_bound))\n\n ''' Calculate Translation Matrix '''\n # We require a translation matrix to keep the image centred\n trans_mat = np.matrix([\n [1, 0, int(new_w * 0.5 - image_w2)],\n [0, 1, int(new_h * 0.5 - image_h2)],\n [0, 0, 1]\n ])\n\n # Compute the transform for the combined rotation and translation\n affine_mat = (np.matrix(trans_mat) * np.matrix(rot_mat))[0:2, :]\n\n # Apply the transform\n result = cv2.warpAffine(\n image,\n affine_mat,\n (new_w, new_h),\n flags=cv2.INTER_LINEAR\n )\n\n return result"
] |
[
"0.68872035",
"0.6749438",
"0.6689302",
"0.65078604",
"0.6465042",
"0.63611543",
"0.63405776",
"0.63356954",
"0.6331301",
"0.62980765",
"0.6275488",
"0.624571",
"0.6139461",
"0.6121177",
"0.6092224",
"0.60787594",
"0.60602623",
"0.60586965",
"0.6043328",
"0.60368246",
"0.60284185",
"0.60254097",
"0.5984994",
"0.59557325",
"0.59544337",
"0.59381175",
"0.59359425",
"0.59338015",
"0.59318715",
"0.5885015"
] |
0.83135
|
0
|
Creates a survival class label list from a list of survival days categories are 0 for under 10 month survival, 1 for 1015 months and 2 for 15+ months
|
def categorize(y):
y_out = []
for yi in y:
if int(yi)<(365*10.0)/12.0:
y_out.append(0)
elif int(yi)<(365*15.0)/12.0:
y_out.append(1)
else:
y_out.append(2)
return np.array(y_out)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def cvpr2018_labels():\n\n return {\n 0: 'others',\n 33: 'car',\n 34: 'motorcycle',\n 35: 'bicycle',\n 36: 'pedestrian',\n 38: 'truck',\n 39: 'bus',\n 40: 'tricycle'\n }",
"def _labels_for_historical_data(days: int, lang: str) -> List[str]:\n labels = []\n for days_diff in range(days, -1, -1):\n date_begin = date.today() - timedelta(days=days_diff)\n timestamp = pretty_print_timestamp(date_begin, lang)\n labels.append(timestamp)\n return labels",
"def get_labels(labels_name):\n labels = {\n \"labels_num\":['Blogs - Change', 'Customer Activity - Change', 'Days Since Last Login - Change', \n 'Happiness Index - Change', 'Happiness Index - Current Month', 'Happiness Index - Monthly', \n 'Logins - Change', 'Longevity - Modulo 12', 'Longevity - Modulo 18', 'Longevity - Modulo 24', \n 'Longevity - Months', 'Views - Change'],\n \"labels_cat\":['Longevity - Modulo 6', 'Support Cases - Change', 'Support Cases - Current Month', 'Support Priority - Change',\n 'Support Priority - Current Month'],\n \"target\":\"Churn\",\n \"labels_pca\":['Happiness Index - Monthly', 'Longevity - Modulo 12', 'Happiness Index - Change', \n 'Blogs - Change', 'Happiness Index - Current Month', 'Longevity - Modulo 24', \n 'Customer Activity - Change', 'Logins - Change', 'Longevity - Modulo 18', \n 'Days Since Last Login - Change']\n }\n return labels[labels_name]",
"def get_class_labels(\n tuning_level: list, query_list: ProcessedQueryList\n ) -> List[str]:\n if TuneLevel.INTENT.value in tuning_level:\n return [\n f\"{d}.{i}\" for d, i in zip(query_list.domains(), query_list.intents())\n ]\n else:\n return [f\"{d}\" for d in query_list.domains()]",
"def categorize(cleaned_news):\r\n _ret = ''\r\n relative_categories = ['robbery', 'mugging', 'murder', 'sexual harrasment', 'theft', 'road accident', 'drugs']\r\n classification = nv.newsClassifier.classify(cleaned_news)\r\n for cat in classification:\r\n if cat[1] > 0 and cat[0] in relative_categories:\r\n _ret += cat[0] + ', '\r\n return _ret",
"def classlist_from_intervals(the_intervals):\n\n return pd.Series(pd.IntervalIndex.from_breaks(np.array(the_intervals))).astype(str)",
"def classlist_from_intervals(the_intervals):\n\n return pd.Series(pd.IntervalIndex.from_breaks(np.array(the_intervals))).astype(str)",
"def get_categories_from_labels(labels):\n cats = []\n for cat in label_dict:\n for label in labels: \n if label in label_dict[cat]:\n cats.append(cat)\n return cats",
"def to_class_id(y):\n ret_val = []\n for y_id in range(len(y)):\n if y[y_id] > 3: ret_val.append(2)\n if y[y_id] < 3: ret_val.append(0)\n if y[y_id] == 3: ret_val.append(1)\n return ret_val",
"def clean_labels(Y):\n Y_cleaned = []\n for i in range(Y.shape[0]):\n Y_cleaned.append([Y.iloc[i][0].total_seconds()])\n return Y_cleaned",
"def transform_labels(self, categs):\n label_encoder = LabelEncoder()\n enc_label = label_encoder.fit_transform(categs)\n\n self._label_to_int = { categ: label_encoder.transform([categ])[0] for categ in categs }\n self._int_to_label = { label_encoder.transform([categ])[0]: categ for categ in categs }\n\n y = np_utils.to_categorical(enc_label)\n\n return y",
"def add_incident_count_class_label(data, count_col=\"incidents\", num_classes=6, one_hot=True):\n def add_plus(x, value=num_classes - 1):\n if int(x) == value:\n return str(x) + \"+\"\n return x\n\n data = data.copy()\n data[\"class\"] = np.minimum(data[count_col].values, num_classes - 1)\n data[\"class\"] = data[\"class\"].astype(int).astype(str)\n data[\"class\"] = data[\"class\"].map(add_plus)\n\n # to onehot\n if one_hot:\n classes = np.sort(data[\"class\"].unique())\n data = pd.concat([data, data[\"class\"].str.get_dummies()], axis=1, ignore_index=False)\n class_labels = [\"class_{}\".format(x) for x in classes]\n data = data.rename(columns={x: \"class_{}\".format(x) for x in classes})\n \n return data, class_labels\n\n else:\n return data",
"def classify(self, features):\n \n class_labels = []\n # TODO: finish this.\n features = np.array(features)\n feat_shape = features.shape\n for indx in range(feat_shape[0]):\n# print list(features[indx,:]), features[indx,:]\n decision = self.root.decide(list(features[indx,:]))\n class_labels.append(decision)\n return class_labels",
"def make_labels(self, ilines):\n\n llist = []\n for lind, lstr in enumerate(ilines):\n # get label and value list\n rv, label, vals = self.get_label_vals(lstr)\n if rv < 1: continue\n\n nvals = len(vals)\n\n # label = self.find_parent_label(label)\n\n if self.verb > 2: print('++ label: %s, %d val(s)' % (label, nvals))\n\n llist.append(label)\n self.maxcounts[label] = nvals\n self.subjcounts[label] = 0\n\n if not UTIL.vals_are_unique(llist):\n print('** warning: labels are not unique, will use only last values')\n llist = UTIL.get_unique_sublist(llist)\n\n return 0, llist",
"def find_labels(df_in, period):\n \n #create regression label\n \n #make a dataframe to hold the last cycle for each enginge in the dataset\n df_max_cycle = pd.DataFrame(df_in.groupby(['id.engine.id','id.maintenanceIndex'])['id.cycle'].max())\n df_max_cycle.reset_index(inplace=True)\n df_max_cycle.columns = ['id.engine.id','id.maintenanceIndex', 'lastCycle']\n \n #add time-to-failure ttf as a new column - regression label\n df_in = pd.merge(df_in, df_max_cycle, on=['id.engine.id','id.maintenanceIndex'])\n df_in['labels.ttf'] = df_in['lastCycle'] - df_in['id.cycle']\n #df_in.drop(['lastCycleReached'], axis=1, inplace=True)\n \n #create binary classification label\n df_in['labels.bnc'] = df_in['labels.ttf'].apply(lambda x: 1 if x <= period else 0)\n \n #create multi-class classification label\n df_in['labels.mcc'] = df_in['labels.ttf'].apply(lambda x: 2 if x <= period/2 else 1 if x <= period else 0)\n \n return df_in",
"def label_data(data):\n if data == 'cat': return [1, 0]\n elif data == 'dog': return [0, 1]",
"def get_labels(self):\n return [\"00:00\", \"04:00\", \"08:00\", \"12:00\", \"16:00\", \"20:00\", \"00:00\"]",
"def get_labels(self):\n return [\"00:00\", \"04:00\", \"08:00\", \"12:00\", \"16:00\", \"20:00\", \"00:00\"]",
"def get_labels(self):\n return [\"00:00\", \"04:00\", \"08:00\", \"12:00\", \"16:00\", \"20:00\", \"00:00\"]",
"def encode_ST_labels(labels):\n return np.array([1 if sentiment == 'bullish' else 0 for sentiment in labels])",
"def findCumLabel(row, cv=5):\n labels = [row[\"Label_{}\".format(x)] for x in range(cv) if\n row[\"top_stat_{}\".format(x)] == 1]\n countTSG = labels.count(\"TSG\")\n countOG = labels.count(\"OG\")\n if countTSG > countOG:\n return \"TSG\"\n elif countOG > countTSG:\n return \"OG\"\n else:\n return \"Unlabelled\"",
"def group_classes(vec):\n grouped = []\n current = 'init'\n counter = 0\n for n, label in enumerate(vec):\n if label != current:\n if current != 'init':\n initial = n - counter\n grouped.append((current, counter, initial))\n counter = 0\n current = label\n counter += 1\n initial = (n + 1) - counter\n grouped.append((current, counter, initial))\n return grouped",
"def create_categories(values: List[str]) -> Tuple[List[Tuple[int, str]], List[int]]:\r\n string_map = {}\r\n for i, value in enumerate(sorted(set(values))):\r\n string_map[value] = i\r\n\r\n legend_labels = []\r\n for key, value in string_map.items():\r\n legend_labels.append((value, key))\r\n\r\n return (legend_labels, [string_map[s] for s in values])",
"def get_labels(self):\n return [\"A轮\", \"B轮\",\"C轮\",\"天使轮\",\"战略融资\"]",
"def add_labels(df, binary=True, DELAY_THRESHOLD=20, categorical=False):\n\n def delay_class(minutes):\n if minutes <= 5:\n return 0\n if 5 < minutes <= 20:\n return 1\n if 20 < minutes <= 60:\n return 2\n if 60 < minutes <= 120:\n return 3\n if 120 < minutes:\n return 4\n else:\n return None\n\n if binary and not categorical:\n # add the target label \"binary: delayed (positive) not-delayed (negative)\" based on the threshold in minutes\n df['DELAYED'] = df['DEP_DELAY'].apply(lambda x: 1 if x >= DELAY_THRESHOLD else 0)\n\n # balance the data (same number of samples for delayed / not delayed flights)\n delayed = df[df['DELAYED'] == 1].copy()\n no_delay = df[df['DELAYED'] == 0][:delayed.shape[0]].copy()\n\n # concat into one dateframe\n data = delayed.append(no_delay, ignore_index=True)\n # logging\n percentage = delayed_percentage(df, DELAY_THRESHOLD)\n print('{:.2f}% of the total flights were delayed {} minutes or more.'.format(percentage, DELAY_THRESHOLD))\n\n del delayed, no_delay, df # release some memory\n\n elif categorical:\n df['DELAY_CLASS'] = df['DEP_DELAY'].apply(lambda row: delay_class(row))\n counts = df['DELAY_CLASS'].value_counts()\n m = min(counts)\n c0 = df[df['DELAY_CLASS'] == 0][:m].copy()\n c1 = df[df['DELAY_CLASS'] == 1][:m].copy()\n c2 = df[df['DELAY_CLASS'] == 2][:m].copy()\n c3 = df[df['DELAY_CLASS'] == 3][:m].copy()\n c4 = df[df['DELAY_CLASS'] == 4][:m].copy()\n data = c0.append([c1, c2, c3, c4])\n data['DELAY_CLASS'] = data['DELAY_CLASS'].astype(int)\n del c0, c1, c2, c3, c4 # release memory\n else:\n raise('either of binary or categorical must be true')\n\n # shuffle dataframe\n data = data.sample(frac=1).reset_index(drop=True)\n\n return data",
"def get_categories(race_name, event_discipline):\n # FIXME - need to handle pro/elite (cat 0) for MTB\n # FIXME - MTB categories are a disaster and probably need a completely different set of patterns\n cat_match = CATEGORY_RE.search(race_name)\n age_match = AGE_RANGE_RE.search(race_name)\n if age_match:\n return []\n elif cat_match:\n cats = cat_match.group(1).lower().replace('pro', '1')\n if cats in ['beginner', 'novice']:\n cats = '5'\n elif cats == 'c':\n cats = '4'\n elif cats == 'b':\n cats = '3'\n elif cats == 'a':\n cats = '1/2'\n elif cats == 'a/b':\n cats = '1/2/3'\n elif cats == 'b/c':\n cats = '3/4'\n return list(set(int(c) for c in cats.split('/')))\n else:\n return []",
"def get_fashion_mnist_labels(labels): #@save\n text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',\n 'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']\n return [text_labels[int(i)] for i in labels]",
"def get_causal_labels(posX, posY, Xrange=32, Yrange=32, nclasses=8):\n x_label = posX // (Xrange/nclasses)\n y_label = posY // (Yrange/nclasses)\n xy_class = nclasses * x_label + y_label\n return xy_class",
"def to_categorical(data, categories=[\"Silencer\", \"Inactive\", \"Weak enhancer\", \"Strong enhancer\"]):\n data = pd.Categorical(data, categories=categories, ordered=True)\n return data",
"def svm_classify(train_image_feats, train_labels, test_image_feats, kernel_type):\r\n\r\n categories = np.unique(train_labels)\r\n # [Desc] make 15 different SVM solver (one(each category) vs. the other(14 other category))\r\n svc_list = []\r\n num_categories = len(categories)\r\n for cat_i in tqdm(range(num_categories)):\r\n category = categories[cat_i]\r\n if kernel_type == 'RBF':\r\n svc = svm.SVC(kernel='rbf', probability=True)\r\n elif kernel_type == 'linear':\r\n svc = svm.SVC(kernel='linear', probability=True)\r\n new_label_for_svm = np.where(train_labels == category, 1, 0)\r\n\r\n svc.fit(train_image_feats, new_label_for_svm)\r\n svc_list.append(svc)\r\n\r\n # [Desc] get test images' class using trained svm\r\n probability_list = []\r\n for cat_i in range(num_categories):\r\n svc = svc_list[cat_i]\r\n logit = svc.decision_function(test_image_feats)\r\n probability = logit\r\n probability_list.append(probability)\r\n probability_mat = np.array(probability_list)\r\n probability_mat = np.transpose(probability_mat)\r\n # [Desc] get each class to argmax each logit value.\r\n argmax_class = np.argmax(probability_mat, axis=1)\r\n\r\n return categories[argmax_class]"
] |
[
"0.58232903",
"0.54263556",
"0.5314535",
"0.5298069",
"0.52912474",
"0.52161705",
"0.52161705",
"0.5207141",
"0.51999825",
"0.5147364",
"0.51438856",
"0.5126135",
"0.511763",
"0.51065487",
"0.50985366",
"0.50954306",
"0.5069807",
"0.5069807",
"0.5069807",
"0.50318503",
"0.50317323",
"0.50303096",
"0.50301856",
"0.5020958",
"0.5004512",
"0.49989635",
"0.4949794",
"0.49429923",
"0.4940973",
"0.4932965"
] |
0.587338
|
0
|
returns all provas >>> self._getAllProvas()
|
def _getAllProvas(self):
return self.execSql("select_all_provas")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def paquetes(self):\n\n paquetes = []\n\n for propietario in self.propietarios:\n for casa in propietario.casas:\n for paquete_de_casa in casa.paquetes_de_casa:\n paquetes.append(paquete_de_casa)\n for dormitorio in casa.dormitorios:\n for paquete_de_dormitorio in casa.paquetes_de_dormitorio:\n paquetes.append(paquete_de_dormitorio)\n\n return paquetes",
"def _getConteudoProvas(self, id_conteudo):\n return self.execSql(\"select_conteudo_provas\",\n id_conteudo=int(id_conteudo))",
"def get_all_drawables(self): \n drawables = []\n if len(self.component_list) > 0:\n for c in self.component_list:\n drawables.append(c.get_drawables())\n return drawables",
"def propietarios(self):\n return self.expedientepersona_set.filter(propietario=True)",
"def graphicsItems(self):\n return self.ctrl.getGraphicsItems()",
"def pruebas(self):\n self.gestor_pca.pruebas()\n return None",
"def get_procedures(self):\n return self.procs[:]",
"def draw_particles(self):\n for particle in self.particles:\n particle.draw()",
"def pais(self):\n return self._pais",
"def GetAllPanes(self):\r\n \r\n return self._panes",
"def create_plasma(self) -> list:\n\n self.plasma = paramak.Plasma(\n major_radius=6.2e2,\n minor_radius=2e2,\n elongation=1.7,\n triangularity=0.33,\n vertical_displacement=5.7e1,\n configuration=\"single-null\",\n rotation_angle=self.rotation_angle,\n )\n\n return [self.plasma]",
"def casas(self):\n casas = []\n for propietario in self.propietarios:\n for casa in propietario.casas:\n casas.append(casa)\n return casas",
"def pens(self):\n return self._pencils[:]",
"def get_components_drawables(self):\n # print self.component_list\n print len(self.component_list)\n for c in self.component_list:\n return c.get_drawables()",
"def get_background_drawables(self):\n return [background.get_drawables() for background in self.background]",
"def read_all_pram(self):\n return self.PRAM",
"def getProcs(**options):\n procSeq = search.ProcSearch.byOptions(**options).procs\n return [Proc(p) for p in procSeq.procs]",
"def get_background_drawables(self):\n return self.background.get_drawables()",
"def getProperties(self, prop_colour):\n props = database_creator.db.query(\n \"SELECT name FROM main_property_deck WHERE property_colour = :prop_colour\", prop_colour=prop_colour)\n properties = []\n for i in props:\n properties.append(i[\"name\"])\n return properties",
"def monitoredProcs(self):\n return self._pidToProcess.itervalues()",
"def _all_children(self) -> list[Container]:\n\n def get() -> list[Container]:\n result: list[Container] = []\n\n # Padding Top.\n if self.align in (VerticalAlign.CENTER, VerticalAlign.BOTTOM):\n result.append(Window(width=Dimension(preferred=0)))\n\n # The children with padding.\n for child in self.children:\n result.append(child)\n result.append(\n Window(\n height=self.padding,\n char=self.padding_char,\n style=self.padding_style,\n )\n )\n if result:\n result.pop()\n\n # Padding right.\n if self.align in (VerticalAlign.CENTER, VerticalAlign.TOP):\n result.append(Window(width=Dimension(preferred=0)))\n\n return result\n\n return self._children_cache.get(tuple(self.children), get)",
"def dormitorios(self):\n\n dormitorios = []\n\n for propietario in self.propietarios:\n #Un propietario puede tener muchas casas\n for casa in propietario.casas:\n #En una casa pueden haber 0 o varios dormitorios\n for dormitorio in casa.dormitorios:\n dormitorios.append(dormitorio)\n\n return dormitorios",
"def evasion(self):\n return self.rpc.call(MsfRpcMethod.ModuleEvasion)['modules']",
"def get_drawables(self):\n to_draw = []\n for k,v in self._to_draw.items():\n if isinstance(v,Iterable):\n for i in v:\n to_draw.append(i)\n else:\n to_draw.append(v)\n return to_draw",
"def buscaPalavras(self):\n dataSet=self.stemmerAplay()\n todasPalavras =[]\n for (notice, clazz) in dataSet:\n todasPalavras.extend(notice)\n return todasPalavras",
"def getPropertiesAll():",
"def get_provisions(self, obj):\n serializer = ProvisionOfTheActSerializer(\n obj.provisions.order_by('display_order'),\n many=True,\n read_only=True\n )\n\n return serializer.data",
"def get_plane_drawables(self):\n return self.plane.get_drawables()",
"def drawables(self):\n\treturn self._Widget__w['drawables']",
"def printAllPion(self):\n idx = 0\n for pion in self.arrayPion:\n print(\"ID = \", idx, end=\" --> \")\n pion.printPion()\n idx += 1"
] |
[
"0.6424439",
"0.61300254",
"0.5836489",
"0.5585282",
"0.5564049",
"0.544887",
"0.5441835",
"0.5334608",
"0.5263509",
"0.5240838",
"0.5235551",
"0.52304024",
"0.52212775",
"0.5186363",
"0.518535",
"0.5140931",
"0.5085166",
"0.5077748",
"0.5069945",
"0.5028898",
"0.49414274",
"0.49229604",
"0.49214742",
"0.49181134",
"0.49117395",
"0.4906736",
"0.4887643",
"0.4880896",
"0.4879792",
"0.4878873"
] |
0.8105716
|
0
|
returns all provas from conteudo >>> self._getConteudoProvas()
|
def _getConteudoProvas(self, id_conteudo):
return self.execSql("select_conteudo_provas",
id_conteudo=int(id_conteudo))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _getAllProvas(self):\n return self.execSql(\"select_all_provas\")",
"def paquetes(self):\n\n paquetes = []\n\n for propietario in self.propietarios:\n for casa in propietario.casas:\n for paquete_de_casa in casa.paquetes_de_casa:\n paquetes.append(paquete_de_casa)\n for dormitorio in casa.dormitorios:\n for paquete_de_dormitorio in casa.paquetes_de_dormitorio:\n paquetes.append(paquete_de_dormitorio)\n\n return paquetes",
"def _getConteudoCadastros(self, id_conteudo):\n return self.execSql(\"select_conteudo_cadastros\",\n id_conteudo=int(id_conteudo))",
"def pruebas(self):\n self.gestor_pca.pruebas()\n return None",
"def _getConteudoCadastrosSelecionados(self, id_conteudo):\n return self.execSql(\"select_conteudo_cadastros_selecionados\",\n id_conteudo=int(id_conteudo))",
"def propietarios(self):\n return self.expedientepersona_set.filter(propietario=True)",
"def casas(self):\n casas = []\n for propietario in self.propietarios:\n for casa in propietario.casas:\n casas.append(casa)\n return casas",
"def parametros_conteudo(self):\n return self._parametros_conteudo",
"def getCostoPromedioCompras(self, codigo):\n return self.conexion.ejecutarSQL(\"select sum(valor_total)/sum(cantidad) from productosXcompras where codigo_producto = '%s'\"%(codigo))[0][0]",
"def returnIdDadosPessoais(self):\r\n self.cursor.execute(\"SELECT DOCUMENTO FROM DADOS_PESSOAIS;\")\r\n self.__result = self.cursor.fetchall()\r\n self.__lista = []\r\n try:\r\n for self.__i in self.__result:\r\n self.__lista.append(self.__i[0])\r\n return self.__lista\r\n except:\r\n return []",
"def print_pessoas() -> None:\n lista_pessoas = select_todos_registros('pessoa')\n\n print_colunas('pessoa')\n for pessoa in lista_pessoas:\n print(*pessoa, sep=', ')",
"def buscaPalavras(self):\n dataSet=self.stemmerAplay()\n todasPalavras =[]\n for (notice, clazz) in dataSet:\n todasPalavras.extend(notice)\n return todasPalavras",
"def mostrar_podio(self):\n participantes = self.__puntuacion_total()\n podio = self.__armar_podio(participantes)\n podio.reverse()\n for i in range(len(podio)):\n print(\n f\"\"\"\n ===================================\n ========== PUESTO Nº: {i+1} ==========\n ===================================\n id disparo: {podio[i]['idDisparo']},\n Disparos: {podio[i]['disparos']},\n Numero participante: {podio[i]['nroParticipante']},\n Nombre: {podio[i]['nombre']},\n Apellido: {podio[i]['apellido']},\n Edad: {podio[i]['edad']},\n sexo: {podio[i]['sexo']},\n Puntaje: {podio[i]['puntaje_total']}\n ===================================\n ===================================\n \"\"\"\n )",
"def listar_gabarito():\n return GabaritoProva.listar(gabarito)",
"def dormitorios(self):\n\n dormitorios = []\n\n for propietario in self.propietarios:\n #Un propietario puede tener muchas casas\n for casa in propietario.casas:\n #En una casa pueden haber 0 o varios dormitorios\n for dormitorio in casa.dormitorios:\n dormitorios.append(dormitorio)\n\n return dormitorios",
"def listar_proyectos(request):\n proyectos = Proyecto.objects.all()\n PROYECTOS_USUARIO= CantProyectos(request)\n cant = len(PROYECTOS_USUARIO)\n context={\n 'proyectos':proyectos,###### TODOS LOS PROYECTOS\n 'list': PROYECTOS_USUARIO,##PROYECTOS DEL USUARIO LOS CUAL SE DEBE MOSTRAR, SOLO ID\n 'cant': cant####CANTIDAD DE PROYECTOS QUE POSEE\n }\n return render(request, 'Menu/listar_proyectos.html', context)",
"def carregar_contatos(self):\r\n self.clientes = []\r\n fornecedor = SistemaFornecedor()\r\n fornecedor.carregar_arquivo('')\r\n for contato in fornecedor.fornecedores:\r\n self.adicionar_cliente(contato.nome, '', [contato.telefone], [contato.email], empresa='')\r\n return len(self.clientes)",
"def list_classificacao_pilotoss_cmd():\n return ListClassificacaoPilotosCommand()",
"def get_contenu(self):\n return self.objets",
"def mostrar_promedio_disparo(self):\n participantes = self.__disparos.copy()\n promedios = self.__calcular_promedio_disparo(participantes)\n for promedio in promedios:\n print(\n f\"\"\"\n =================================\n ====== PARTICIPANTE Nº: {promedio['nroParticipante']} ======\n =================================\n Disparos: {promedio['disparos']},\n Nombre: {promedio['nombre']},\n Apellido: {promedio['apellido']},\n Promedio: {promedio['promedio']}\n =================================\n =================================\n \"\"\"\n )",
"def get_partidos_con_campos_para_forzar(fase):\n\tpartidos_espera_list = get_partidos_espera_list(fase)\n\tcampos_para_forzar_list = CampoController.get_campos_para_forzar(fase)\n\tpartido_campo = []\n\tfor x in range(0, len(partidos_espera_list)):\n\t\taux = [partidos_espera_list[x], campos_para_forzar_list[x]]\n\t\tpartido_campo.append(aux)\n\treturn partido_campo",
"def listaProfesion():\n prof = ProfesionModel()\n\n return prof.listarTodos()",
"def get_pasos_proceso(request, proceso):\n del request\n tra = Paso.objects.filter(\n proceso_id=proceso).order_by('numero').values()\n tra = [dict(t) for t in tra]\n return JsonResponse(tra, safe=False)",
"def carregarDadosPessoais(self, documento):\r\n self.__id = int(documento)\r\n self.cursor.execute(\"SELECT * FROM DADOS_PESSOAIS WHERE DOCUMENTO = %s;\" %(self.__id))\r\n if self.cursor.rowcount == 1:\r\n return self.cursor.fetchone()\r\n else:\r\n return None",
"def _getCadastroCursos(self, id_cadastro):\n return self.execSql(\"select_cadastro_cursos\",\n id_cadastro=int(id_cadastro))",
"def get_procedures(self):\n return self.procs[:]",
"def getFichas_disponibles(self):\n \n lista = copy.deepcopy(self.__fichas_disponibles)\n return lista #EJ. [\"T\", \"O\", \"P\", \"O\"]",
"def generar_poblacion():\n poblacion = []\n ind = Arbol()\n for i in range(size_pop):\n poblacion.append(generar_individuo_recursivo(ind))\n return poblacion",
"def get_convos():\n file_path = os.path.join(config.DATA_PATH, config.CONVO_FILE)\n convos = []\n with open(file_path, 'rb') as f:\n for line in f.readlines():\n parts = line.split(b' +++$+++ ')\n if len(parts) == 4:\n convo = []\n for line in parts[3][1:-2].split(b', '):\n convo.append(line[1:-1])\n convos.append(convo)\n\n return convos",
"def get_escobas(self):\n return self.escobas"
] |
[
"0.7646942",
"0.66558737",
"0.62366885",
"0.6154832",
"0.59434545",
"0.5830311",
"0.5807141",
"0.5712288",
"0.55908936",
"0.55568075",
"0.55074483",
"0.54849684",
"0.54467976",
"0.54265547",
"0.5366095",
"0.5328654",
"0.5267592",
"0.525496",
"0.5244163",
"0.5236238",
"0.5230633",
"0.521121",
"0.52028644",
"0.519842",
"0.5197241",
"0.51775724",
"0.5172398",
"0.5165784",
"0.51306456",
"0.51262474"
] |
0.8148251
|
0
|
returns all cadastros from conteudo >>> self._getConteudoCadastros()
|
def _getConteudoCadastros(self, id_conteudo):
return self.execSql("select_conteudo_cadastros",
id_conteudo=int(id_conteudo))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def listar_cadastros():\n return cadastro_alunos.listar_aluno()",
"def _getConteudoCadastrosSelecionados(self, id_conteudo):\n return self.execSql(\"select_conteudo_cadastros_selecionados\",\n id_conteudo=int(id_conteudo))",
"def carregar_contatos(self):\r\n self.clientes = []\r\n fornecedor = SistemaFornecedor()\r\n fornecedor.carregar_arquivo('')\r\n for contato in fornecedor.fornecedores:\r\n self.adicionar_cliente(contato.nome, '', [contato.telefone], [contato.email], empresa='')\r\n return len(self.clientes)",
"def _getCadastroCursos(self, id_cadastro):\n return self.execSql(\"select_cadastro_cursos\",\n id_cadastro=int(id_cadastro))",
"def obtener_comentarios(idActividad):\n comentarios = Comentario.objects.filter(idactcomentario=idActividad)\n lista = []\n for elem in comentarios:\n lista.append(elem)\n return lista",
"def get(self, idconta):\n transacoes = Transacoes.get_all_transacoes_conta(idconta)\n if not transacoes:\n api.abort(404, 'Transacao not found')\n return transacoes",
"def get_escobas(self):\n return self.escobas",
"def _getConteudoProvas(self, id_conteudo):\n return self.execSql(\"select_conteudo_provas\",\n id_conteudo=int(id_conteudo))",
"def _getCadastroIdiomas(self, id_cadastro):\n return self.execSql(\"select_cadastro_idiomas\",\n id_cadastro=int(id_cadastro))",
"def escobas(self):\n return self._escobas",
"def casas(self):\n casas = []\n for propietario in self.propietarios:\n for casa in propietario.casas:\n casas.append(casa)\n return casas",
"def obtenerCuentas(self, unaSeccion):\n #return self.cuentasManager.searchBy(Cuenta.seccion, unaSeccion.nombre)\n return [obj for obj in self.cuentasManager.almacen.find(Cuenta, Cuenta.seccion_id == unaSeccion.ide)]",
"def _getCadastroCursosSuperiores(self, id_cadastro):\n return self.execSql(\"select_cadastro_cursos_superiores\",\n id_cadastro=int(id_cadastro))",
"def obtenerCodigos(self):\n\t\tlistado = []\n\t\tcolumnaCodigos = self.Hoja.col(1)\n\t\tfor i, c in enumerate(columnaCodigos):\n\t\t\tif self.patronCodigo.match(c.value.strip()) != None:\n\t\t\t\tcontenido = {}\n\t\t\t\tfila = self.Hoja.row(i)\n\t\t\t\tcontenido[\"codigo\"] = fila[1].value.strip()\n\t\t\t\tcontenido[\"localidad\"] = fila[2].value.strip()\n\t\t\t\tcontenido[\"campania\"] = str(int(fila[3].value))\n\t\t\t\tcontenido[\"deuda\"] = fila[4].value\n\n\t\t\t\tlistado.append(contenido)\n\t\treturn listado",
"def _getCadastroEmpregos(self, id_cadastro):\n return self.execSql(\"select_cadastro_empregos\",\n id_cadastro=int(id_cadastro))",
"def getDatosBasicosCliente(self, idCliente):\n return self.conexion.ejecutarSQL(\"select id_tipoIdentificacion, id, nombres, primer_apellido from clientes where id = '%s' and activo='SI' \"%(idCliente))",
"def listarTodos(self):\n # SQL\n consulta = \"SELECT * FROM referenciales.nacionalidad\"\n\n try:\n \n conexion = Conexion()\n con = conexion.getConexion()\n cur = con.cursor()\n cur.execute(consulta)\n\n return cur.fetchall()\n\n except con.Error as e:\n print(e.pgerror)\n return False\n finally:\n if con is not None:\n cur.close()\n con.close()",
"def get_datos_jugadores(self):\n return self.__partido.get_datos_jugadores()",
"def get_resultados(self):\n return self.__resultados",
"def _getCadastroEstagios(self, id_cadastro):\n return self.execSql(\"select_cadastro_estagios\",\n id_cadastro=int(id_cadastro))",
"def cargarObras(self):\n self.cargarObjetos(self.tableOs,\n ObraSocialModel.buscarTodos(\"razon_social\", self.sesion).all(),\n (\"razon_social\", \"cuit\", \"direccion\")\n )",
"def returnIdDadosPessoais(self):\r\n self.cursor.execute(\"SELECT DOCUMENTO FROM DADOS_PESSOAIS;\")\r\n self.__result = self.cursor.fetchall()\r\n self.__lista = []\r\n try:\r\n for self.__i in self.__result:\r\n self.__lista.append(self.__i[0])\r\n return self.__lista\r\n except:\r\n return []",
"def get_all_by_id_vaga(id_vaga):\n return Candidatura.query.filter_by(\n id_vaga=id_vaga\n ).all()",
"async def get_all_cards():\n card_tuple = await ex.conn.fetch(\"SELECT id FROM blackjack.cards\")\n all_cards = []\n for card in card_tuple:\n all_cards.append(card[0])\n return all_cards",
"def listar(self):\n conn = None\n\n try:\n params = config()\n conn = psycopg2.connect(**params)\n cur = conn.cursor()\n cur.execute(\"SELECT id_aluno, nome_aluno, cpf_aluno, data_nasc_aluno, telefone_aluno FROM Alunos\")\n\n # Imprime o número de alunos cadastrados.\n print(f\"\\nHá {cur.rowcount} aluno(s) cadastrado(s): \")\n row = cur.fetchone()\n\n while row is not None:\n print(f\"\\nID: {row[0]}\\nNome: {row[1]}\\nCPF: {row[2]}\\nData de Nascimento: {row[3].strftime('%d/%m/%Y')}\\nTelefone: {row[4]}\\n\")\n row = cur.fetchone()\n \n cur.close()\n\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n \n finally:\n if conn is not None:\n conn.close()",
"def read_all():\n # Create the list of CIs from our data\n ci = db.session.query(CI).order_by(CI.id).all()\n app.logger.debug(pformat(ci))\n # Serialize the data for the response\n ci_schema = CISchema(many=True)\n data = ci_schema.dump(ci)\n return data",
"def listaNacionalidades():\n nac = NacionalidadModel()\n\n return nac.listarTodos()",
"def get_all_content(self):\n return self._get_all_content()",
"def all_dicoms(self):\n return [dcm_ctr_pair[1:] for dcm_ctr_pair in self.data]",
"def obter_lista_arquivos(self):\n if os.path.exists(self.caminho):\n return [arq for arq in self.obter_lista_conteudo() \\\n if os.path.isfile(arq)]\n else:\n return []"
] |
[
"0.78284174",
"0.7825673",
"0.7200778",
"0.7105828",
"0.67812115",
"0.6602772",
"0.6459946",
"0.6330621",
"0.63059235",
"0.6274711",
"0.6226249",
"0.6196035",
"0.61312944",
"0.59542704",
"0.5948513",
"0.5909978",
"0.5889182",
"0.57821816",
"0.57669735",
"0.5753354",
"0.56854796",
"0.5678534",
"0.5654553",
"0.56362325",
"0.55907714",
"0.55897456",
"0.558595",
"0.5556935",
"0.55521667",
"0.55374855"
] |
0.8525138
|
0
|
returns all cadastros selecionados from conteudo >>> self._getConteudoCadastrosSelecionados()
|
def _getConteudoCadastrosSelecionados(self, id_conteudo):
return self.execSql("select_conteudo_cadastros_selecionados",
id_conteudo=int(id_conteudo))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _getConteudoCadastros(self, id_conteudo):\n return self.execSql(\"select_conteudo_cadastros\",\n id_conteudo=int(id_conteudo))",
"def listar_cadastros():\n return cadastro_alunos.listar_aluno()",
"def _getCadastroCursos(self, id_cadastro):\n return self.execSql(\"select_cadastro_cursos\",\n id_cadastro=int(id_cadastro))",
"def carregar_contatos(self):\r\n self.clientes = []\r\n fornecedor = SistemaFornecedor()\r\n fornecedor.carregar_arquivo('')\r\n for contato in fornecedor.fornecedores:\r\n self.adicionar_cliente(contato.nome, '', [contato.telefone], [contato.email], empresa='')\r\n return len(self.clientes)",
"def _getCadastroCursosSuperiores(self, id_cadastro):\n return self.execSql(\"select_cadastro_cursos_superiores\",\n id_cadastro=int(id_cadastro))",
"def obtenerCuentas(self, unaSeccion):\n #return self.cuentasManager.searchBy(Cuenta.seccion, unaSeccion.nombre)\n return [obj for obj in self.cuentasManager.almacen.find(Cuenta, Cuenta.seccion_id == unaSeccion.ide)]",
"def obtener_comentarios(idActividad):\n comentarios = Comentario.objects.filter(idactcomentario=idActividad)\n lista = []\n for elem in comentarios:\n lista.append(elem)\n return lista",
"def returnIdDadosPessoais(self):\r\n self.cursor.execute(\"SELECT DOCUMENTO FROM DADOS_PESSOAIS;\")\r\n self.__result = self.cursor.fetchall()\r\n self.__lista = []\r\n try:\r\n for self.__i in self.__result:\r\n self.__lista.append(self.__i[0])\r\n return self.__lista\r\n except:\r\n return []",
"def getDatosBasicosCliente(self, idCliente):\n return self.conexion.ejecutarSQL(\"select id_tipoIdentificacion, id, nombres, primer_apellido from clientes where id = '%s' and activo='SI' \"%(idCliente))",
"def get_resultados(self):\n return self.__resultados",
"def get_escobas(self):\n return self.escobas",
"def escobas(self):\n return self._escobas",
"def _getConteudoProvas(self, id_conteudo):\n return self.execSql(\"select_conteudo_provas\",\n id_conteudo=int(id_conteudo))",
"def _getCadastroIdiomas(self, id_cadastro):\n return self.execSql(\"select_cadastro_idiomas\",\n id_cadastro=int(id_cadastro))",
"def casas(self):\n casas = []\n for propietario in self.propietarios:\n for casa in propietario.casas:\n casas.append(casa)\n return casas",
"def select_todos_registros(nome_tabela: str) -> list:\n query = f'SELECT * FROM {nome_tabela};'\n\n lista_registros = banco_operacoes(query)\n\n return lista_registros",
"def getDiputados(self):\n\n prog = re.compile('(sr.|sra.).*,*(\\.-)', re.IGNORECASE)\n result = prog.finditer(self.dialogo)\n\n lista_diputados = []\n for diputado in result:\n lista_diputados.append((self.dialogo[diputado.span()[0]:diputado.span()[1]], diputado.span()))\n\n return lista_diputados",
"def _getCadastroEstagios(self, id_cadastro):\n return self.execSql(\"select_cadastro_estagios\",\n id_cadastro=int(id_cadastro))",
"def resultadosDiarios(self):\r\n self.checkingConnection()\r\n self.model = QSqlQueryModel()\r\n self.model.setQuery('''SELECT date1, ingresos, compras, gastos,\r\n (ingresos - compras - gastos) AS Saldo FROM (SELECT date1,\r\n ingresos, compras, gastos FROM ((SELECT Clients.date AS date1,\r\n SUM(Clients.value) AS ingresos FROM Clients GROUP BY Clients.date)\r\n JOIN (SELECT Compras.date AS date2, SUM(Compras.value) AS compras\r\n FROM Compras GROUP BY Compras.date) JOIN (SELECT Gastos.date AS date3,\r\n SUM(Gastos.value) AS gastos FROM Gastos GROUP BY Gastos.date)\r\n ON date1 = date2 AND date2 = date3))''', self.db)\r\n self.setModel(self.model)",
"def get_queryset(self):\n queryset = Cliente.objects.all()\n id = self.request.query_params.get('id')\n cedula = self.request.query_params.get('cedula')\n \n if id is not None:\n queryset = queryset.filter(id=id)\n return queryset\n if cedula is not None:\n queryset = queryset.filter(cedula=cedula)\n return queryset\n return queryset",
"def comitentes(self):\n return self.expedientepersona_set.filter(comitente=True)",
"def obtenerCodigos(self):\n\t\tlistado = []\n\t\tcolumnaCodigos = self.Hoja.col(1)\n\t\tfor i, c in enumerate(columnaCodigos):\n\t\t\tif self.patronCodigo.match(c.value.strip()) != None:\n\t\t\t\tcontenido = {}\n\t\t\t\tfila = self.Hoja.row(i)\n\t\t\t\tcontenido[\"codigo\"] = fila[1].value.strip()\n\t\t\t\tcontenido[\"localidad\"] = fila[2].value.strip()\n\t\t\t\tcontenido[\"campania\"] = str(int(fila[3].value))\n\t\t\t\tcontenido[\"deuda\"] = fila[4].value\n\n\t\t\t\tlistado.append(contenido)\n\t\treturn listado",
"def comprueba_casos_seleccionados(self):\r\n \r\n if self.lst_casos_carga_grupo.count() == 0:\r\n \r\n msg = MessageBox('Warning', 'Seleccionar al menos un caso de carga para el grupo.')\r\n msg.show_message_box()\r\n \r\n self.nuevo_grupo.setFocus()\r\n \r\n else:\r\n \r\n self.comprueba_nombre_grupo()",
"def buscarOs(self):\n\n if self.lineRazon.isEnabled():\n self.filtrarObra()\n\n elif not self.lineRazon.isEnabled() and (self.tableNC.rowCount() != 0 or self.tableFactura.rowCount() != 0):\n QtGui.QMessageBox.information(self,\"Aviso\",\"Imposible cambiar de Obra Social. Ya se ha seleccionado\\\n una\")\n else:\n self.gbNotaCredito.setEnabled(False)\n self.gbFactura.setEnabled(False)\n self.lineRazon.clear()\n self.lineRazon.setEnabled(True)\n self.lineCuit.clear()\n self.lineCuit.setEnabled(True)\n self.tableOs.setEnabled(True)",
"def _getCadastroEmpregos(self, id_cadastro):\n return self.execSql(\"select_cadastro_empregos\",\n id_cadastro=int(id_cadastro))",
"def get_selects(self):\r\n return self.board.get_selects()",
"def reservas(self):\n reservas = []\n for cliente in self.clientes:\n for reserva in cliente.reservas:\n reservas.append(reserva)\n return reservas",
"def get_all_by_id_vaga(id_vaga):\n return Candidatura.query.filter_by(\n id_vaga=id_vaga\n ).all()",
"def get(self, idconta):\n transacoes = Transacoes.get_all_transacoes_conta(idconta)\n if not transacoes:\n api.abort(404, 'Transacao not found')\n return transacoes",
"def sucursales(self):\n\n return Sucursal.objects.filter(id__in=self.horarios_atencion.values_list('sucursal'))"
] |
[
"0.7286904",
"0.7140278",
"0.674005",
"0.6571689",
"0.65568024",
"0.6380306",
"0.6226275",
"0.6122682",
"0.6118822",
"0.6110704",
"0.60445625",
"0.603889",
"0.59635156",
"0.5894455",
"0.58356327",
"0.58334166",
"0.5807106",
"0.5766607",
"0.5716018",
"0.570755",
"0.56913656",
"0.5638996",
"0.5621243",
"0.5616881",
"0.55922014",
"0.5589493",
"0.55223364",
"0.5514848",
"0.55095",
"0.5481416"
] |
0.8558676
|
0
|
returns all cursos superiores from cadastro >>> self._getCadastroCursosSuperiores()
|
def _getCadastroCursosSuperiores(self, id_cadastro):
return self.execSql("select_cadastro_cursos_superiores",
id_cadastro=int(id_cadastro))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def super_categories(self):\n R = self.base().base_ring()\n category = GradedHopfAlgebrasWithBasis(R)\n return [Realizations(self.base()), category.Quotients()]",
"def sucursales(self):\n\n return Sucursal.objects.filter(id__in=self.horarios_atencion.values_list('sucursal'))",
"def _getCadastroCursos(self, id_cadastro):\n return self.execSql(\"select_cadastro_cursos\",\n id_cadastro=int(id_cadastro))",
"def obtenerCuentas(self, unaSeccion):\n #return self.cuentasManager.searchBy(Cuenta.seccion, unaSeccion.nombre)\n return [obj for obj in self.cuentasManager.almacen.find(Cuenta, Cuenta.seccion_id == unaSeccion.ide)]",
"def escobas(self):\n return self._escobas",
"def get_escobas(self):\n return self.escobas",
"def get_socios(self):\n return self.__socios",
"def cellules(self): # itérateur rendu safe\n cellule_courante = self.tete\n while cellule_courante is not None:\n cellule_suivante = cellule_courante.suivant # sauvegarde\n yield cellule_courante\n cellule_courante = cellule_suivante # récupération de la sauvegarde",
"def super_categories(self):\n return [Sets()]",
"def getCubes():",
"def super_categories(self):\n return [Sets().Metric(), Realizations(self.base())]",
"def superposiciones(materias):\n\n combinaciones = []\n for materia in materias:\n\n nuevas = []\n for curso in materia['cursos']:\n\n for combinacion in combinaciones:\n if _cursos_compatibles(combinacion, curso):\n nuevas.append(combinacion + [curso])\n\n # cuando no hay combinaciones agregar el curso directamente\n if not combinaciones:\n nuevas.append([curso])\n\n combinaciones = nuevas\n\n return combinaciones",
"def get_contenu(self):\n return self.objets",
"def _per_supercls_summarize(self):\n\n per_super_class_result = {}\n superCats = self.FPParams.catSuperClsName\n\n for superCatId, superCat in enumerate(superCats):\n superCatKey = \"supercls-\" + superCat\n\n resultDet = self._summarize_with_cat(\n f1=False, catSuperIdx=superCatId)\n per_super_class_result[superCatKey] = {\"iou\": resultDet}\n if superCatId != 1:\n results = self._summarize_with_cat(catSuperIdx=superCatId)\n resultF1 = self._summarize_with_cat(\n iou=False, catSuperIdx=superCatId)\n\n per_super_class_result[superCatKey][\"f1\"] = resultF1\n per_super_class_result[superCatKey][\"iou + f1\"] = results\n\n return per_super_class_result",
"def getBaseScenesInCategory(self):\n logger.debug(\"Func: getBaseScenesInCategory\")\n\n self.scanBaseScenes()\n # return sorted(self._baseScenesInCategory.keys())\n return self._baseScenesInCategory",
"def listar_cadastros():\n return cadastro_alunos.listar_aluno()",
"def extra_super_categories(self):\n return [self.base_category()]",
"def extra_super_categories(self):\n return [Semigroups()]",
"def extra_super_categories(self):\n return [Semigroups()]",
"def carregar_contatos(self):\r\n self.clientes = []\r\n fornecedor = SistemaFornecedor()\r\n fornecedor.carregar_arquivo('')\r\n for contato in fornecedor.fornecedores:\r\n self.adicionar_cliente(contato.nome, '', [contato.telefone], [contato.email], empresa='')\r\n return len(self.clientes)",
"def all_subconstituents(self, compute=False):\n out = {}\n for i in range(self._.d+1):\n try:\n out[i] = self.subconstituent(i, compute=compute)\n except IndexError:\n pass\n return out",
"def getCVTerms(self):\n return _libsbml.SBase_getCVTerms(self)",
"def get(self):\n return get_clientes()",
"def casas(self):\n casas = []\n for propietario in self.propietarios:\n for casa in propietario.casas:\n casas.append(casa)\n return casas",
"def get_resultados(self):\n return self.__resultados",
"def comitentes(self):\n return self.expedientepersona_set.filter(comitente=True)",
"def get_cursos(request):\n if request.method == 'GET':\n cursos = Curso.nodes.all()\n cursos_list = []\n for i in range(0, len(cursos)):\n cursos_list.append(cursos[i].__dict__[\"nombre\"])\n return JsonResponse({\"cursos\": cursos_list})",
"def sulcus(self):\n pass\n # return self._sulcus",
"def _get_subobjects(self) -> Iterable[SymbolicObject]:\n\n return self._subobjects",
"def scatters(self):\n\t\treturn self._scatter"
] |
[
"0.624646",
"0.5991194",
"0.5920284",
"0.5828413",
"0.58202296",
"0.57951957",
"0.5728039",
"0.57278585",
"0.56486577",
"0.5621824",
"0.5589727",
"0.5585636",
"0.54832244",
"0.54572535",
"0.5445204",
"0.53770113",
"0.5349885",
"0.527543",
"0.527543",
"0.5256881",
"0.52321404",
"0.52217215",
"0.5198283",
"0.5178089",
"0.5164421",
"0.5136546",
"0.50846434",
"0.50817555",
"0.50654966",
"0.5053707"
] |
0.8686882
|
0
|
returns all idiomas from cadastro >>> self._getCadastroIdiomas()
|
def _getCadastroIdiomas(self, id_cadastro):
return self.execSql("select_cadastro_idiomas",
id_cadastro=int(id_cadastro))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _getCadastroEmpregos(self, id_cadastro):\n return self.execSql(\"select_cadastro_empregos\",\n id_cadastro=int(id_cadastro))",
"def _getCadastroCursos(self, id_cadastro):\n return self.execSql(\"select_cadastro_cursos\",\n id_cadastro=int(id_cadastro))",
"def returnIdAluno(self):\r\n self.cursor.execute(\"SELECT MATRICULA FROM ALUNO;\")\r\n self.__result = self.cursor.fetchall()\r\n self.__lista = []\r\n try:\r\n for self.__i in self.__result:\r\n self.__lista.append(self.__i[0])\r\n return self.__lista\r\n except:\r\n return []",
"def returnIdDadosPessoais(self):\r\n self.cursor.execute(\"SELECT DOCUMENTO FROM DADOS_PESSOAIS;\")\r\n self.__result = self.cursor.fetchall()\r\n self.__lista = []\r\n try:\r\n for self.__i in self.__result:\r\n self.__lista.append(self.__i[0])\r\n return self.__lista\r\n except:\r\n return []",
"def listar_cadastros():\n return cadastro_alunos.listar_aluno()",
"def obtenerCodigos(self):\n\t\tlistado = []\n\t\tcolumnaCodigos = self.Hoja.col(1)\n\t\tfor i, c in enumerate(columnaCodigos):\n\t\t\tif self.patronCodigo.match(c.value.strip()) != None:\n\t\t\t\tcontenido = {}\n\t\t\t\tfila = self.Hoja.row(i)\n\t\t\t\tcontenido[\"codigo\"] = fila[1].value.strip()\n\t\t\t\tcontenido[\"localidad\"] = fila[2].value.strip()\n\t\t\t\tcontenido[\"campania\"] = str(int(fila[3].value))\n\t\t\t\tcontenido[\"deuda\"] = fila[4].value\n\n\t\t\t\tlistado.append(contenido)\n\t\treturn listado",
"def returnIdDisciplina(self):\r\n self.cursor.execute(\"SELECT ID FROM DISCIPLINA ORDER BY NOME;\")\r\n self.__result = self.cursor.fetchall()\r\n self.__lista = []\r\n try:\r\n for self.__i in self.__result:\r\n self.__lista.append(self.__i[0])\r\n return self.__lista\r\n except:\r\n return []",
"def _getCadastroEstagios(self, id_cadastro):\n return self.execSql(\"select_cadastro_estagios\",\n id_cadastro=int(id_cadastro))",
"def _getConteudoCadastrosSelecionados(self, id_conteudo):\n return self.execSql(\"select_conteudo_cadastros_selecionados\",\n id_conteudo=int(id_conteudo))",
"def _getCadastroCursosSuperiores(self, id_cadastro):\n return self.execSql(\"select_cadastro_cursos_superiores\",\n id_cadastro=int(id_cadastro))",
"def _getConteudoCadastros(self, id_conteudo):\n return self.execSql(\"select_conteudo_cadastros\",\n id_conteudo=int(id_conteudo))",
"def returnIdResponsavel(self):\r\n self.cursor.execute(\"SELECT ID FROM RESPONSAVEL;\")\r\n self.__result = self.cursor.fetchall()\r\n self.__lista = []\r\n try:\r\n for self.__i in self.__result:\r\n self.__lista.append(self.__i[0])\r\n return self.__lista\r\n except:\r\n return []",
"def obtener_comentarios(idActividad):\n comentarios = Comentario.objects.filter(idactcomentario=idActividad)\n lista = []\n for elem in comentarios:\n lista.append(elem)\n return lista",
"def dormitorios(self):\n\n dormitorios = []\n\n for propietario in self.propietarios:\n #Un propietario puede tener muchas casas\n for casa in propietario.casas:\n #En una casa pueden haber 0 o varios dormitorios\n for dormitorio in casa.dormitorios:\n dormitorios.append(dormitorio)\n\n return dormitorios",
"def returnIdEndereco(self):\r\n self.cursor.execute(\"SELECT ID FROM ENDERECO;\")\r\n self.__result = self.cursor.fetchall()\r\n self.__lista = []\r\n try:\r\n for self.__i in self.__result:\r\n self.__lista.append(self.__i[0])\r\n return self.__lista\r\n except:\r\n return []",
"def listaNacionalidades():\n nac = NacionalidadModel()\n\n return nac.listarTodos()",
"def returnIdTurma(self):\r\n self.cursor.execute(\"SELECT ID FROM TURMA;\")\r\n self.__result = self.cursor.fetchall()\r\n self.__lista = []\r\n try:\r\n for self.__i in self.__result:\r\n self.__lista.append(self.__i[0])\r\n return self.__lista\r\n except:\r\n return []",
"def returnIdData(self):\r\n self.cursor.execute(\"SELECT ID FROM DATAS;\")\r\n self.__result = self.cursor.fetchall()\r\n self.__lista = []\r\n try:\r\n for self.__i in self.__result:\r\n self.__lista.append(self.__i[0])\r\n return self.__lista\r\n except:\r\n return []",
"def getIntervencionesDiputados(self):\n prog_indices = re.compile('(sr.|sra.).*', re.IGNORECASE)\n prog_nombre = re.compile('(sr.|sra.).*,*(\\.-)', re.IGNORECASE)\n\n result = prog_indices.finditer(self.dialogo)\n\n indices = []\n for i in result:\n indices.append(i.span()[0])\n\n dips = []\n for indice in range(len(indices) - 1):\n inicio, final = prog_nombre.match(self.dialogo[indices[indice]:indices[indice + 1]]).span()\n\n discurso = self.dialogo[indices[indice]:indices[indice + 1]]\n\n nombre = discurso[inicio:final]\n dips.append(nombre)\n self.intervenciones.append([nombre, discurso])\n\n dips_unicos = list(set(dips))\n\n for dip in dips_unicos:\n temp_dip = []\n for entrada in self.intervenciones:\n if dip == entrada[0]:\n temp_dip.append(entrada[1])\n\n self.intervenciones_por_diputado[dip] = temp_dip",
"def get_datos_jugadores(self):\n return self.__partido.get_datos_jugadores()",
"def getAminos(self):\n\t\treturn self.aminos",
"def eula_ids(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"eula_ids\")",
"def get_all_incidents():\n allIncidents = Incident.get_all()\n #allCops = get_all_cops()\n incidents = []\n for i in allIncidents:\n if(\n (i['operations_center']['id'] in allCops) and\n (inicioAmostragem <= i.reporting_date and i.reporting_date <=terminoAmostragem)\n ):\n \n i['operations_center']['id'] = changeCop(i['operations_center']['id'])\n incidents.append(i)\n \n return incidents",
"def _get_invariom_list(self):\n self.invariom_list = []\n for molecule in self.values():\n for atom in molecule.atoms:\n for invariom in atom.invarioms:\n if not invariom in self.invariom_list:\n self.invariom_list.append(invariom)",
"def carregar_contatos(self):\r\n self.clientes = []\r\n fornecedor = SistemaFornecedor()\r\n fornecedor.carregar_arquivo('')\r\n for contato in fornecedor.fornecedores:\r\n self.adicionar_cliente(contato.nome, '', [contato.telefone], [contato.email], empresa='')\r\n return len(self.clientes)",
"def identer(self) -> List[str]:\n self._populer_identer()\n if self._identer:\n return [str(ident) for ident in self._identer if ident]\n return []",
"def getIDs():",
"def __iter__(self):\n return _Iter_Ciudad_(self.aeropuertos)",
"def get_ids(self) -> List[str]:",
"def todos(self):\n socios = session.query(Socio).all()\n return socios"
] |
[
"0.6638407",
"0.6615875",
"0.6518311",
"0.63684267",
"0.6315263",
"0.62116504",
"0.61529374",
"0.6018137",
"0.5997833",
"0.59039795",
"0.589317",
"0.5857431",
"0.5828872",
"0.5719018",
"0.57138324",
"0.569726",
"0.5658001",
"0.5619028",
"0.55859584",
"0.5571677",
"0.55598164",
"0.5553581",
"0.5517987",
"0.5490287",
"0.53839236",
"0.537958",
"0.5362935",
"0.53574944",
"0.5328134",
"0.53254557"
] |
0.8564768
|
0
|
returns all cursos from cadastro >>> self._getCadastroCursos()
|
def _getCadastroCursos(self, id_cadastro):
return self.execSql("select_cadastro_cursos",
id_cadastro=int(id_cadastro))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _getCadastroCursosSuperiores(self, id_cadastro):\n return self.execSql(\"select_cadastro_cursos_superiores\",\n id_cadastro=int(id_cadastro))",
"def listar_cadastros():\n return cadastro_alunos.listar_aluno()",
"def get_cursos(request):\n if request.method == 'GET':\n cursos = Curso.nodes.all()\n cursos_list = []\n for i in range(0, len(cursos)):\n cursos_list.append(cursos[i].__dict__[\"nombre\"])\n return JsonResponse({\"cursos\": cursos_list})",
"def get_socios(self):\n return self.__socios",
"def obtener_comentarios(idActividad):\n comentarios = Comentario.objects.filter(idactcomentario=idActividad)\n lista = []\n for elem in comentarios:\n lista.append(elem)\n return lista",
"def _getConteudoCadastros(self, id_conteudo):\n return self.execSql(\"select_conteudo_cadastros\",\n id_conteudo=int(id_conteudo))",
"def _getConteudoCadastrosSelecionados(self, id_conteudo):\n return self.execSql(\"select_conteudo_cadastros_selecionados\",\n id_conteudo=int(id_conteudo))",
"def get(self) -> list:\n return self.__cogs",
"def get_available_cops():\n allIncidents = Incident.get_all()\n cops = []\n \n for i in allIncidents:\n if(inicioAmostragem <= i.reporting_date and i.reporting_date <=terminoAmostragem):\n cops.append(i['operations_center']['id'])\n \n allReports = RelatoDeSituacao.get_all()\n \n for r in allReports:\n if (\n inicioAmostragem <= r.data_hora and \n r.data_hora <=terminoAmostragem and\n 'cop' in r.relator and # todos tem que ter o COP\n 'id' in r.relator['cop'] # todos tem que ter o id \n ):\n cops.append(r.relator['cop']['id'])\n \n return set(cops)",
"def list():\n\n return {\"cncs\": [{\"id\": id.split(\"/\")[-1]} for id in sorted(flask.current_app.redis.keys(\"/cnc/*\"))]}",
"def get_all_cars(self):\n return self.cars.get_all_cars()",
"def obtenerCuentas(self, unaSeccion):\n #return self.cuentasManager.searchBy(Cuenta.seccion, unaSeccion.nombre)\n return [obj for obj in self.cuentasManager.almacen.find(Cuenta, Cuenta.seccion_id == unaSeccion.ide)]",
"def casas(self):\n casas = []\n for propietario in self.propietarios:\n for casa in propietario.casas:\n casas.append(casa)\n return casas",
"def get_all_cars(self):\n\n all_cars = TheCar.objects.all()\n\n return all_cars",
"def carregar_contatos(self):\r\n self.clientes = []\r\n fornecedor = SistemaFornecedor()\r\n fornecedor.carregar_arquivo('')\r\n for contato in fornecedor.fornecedores:\r\n self.adicionar_cliente(contato.nome, '', [contato.telefone], [contato.email], empresa='')\r\n return len(self.clientes)",
"def getArcs(self):\n return self.getArcsFrom()",
"def get_currencies(self) -> list:\n return self.client.currencies.get_all()",
"def cargarObras(self):\n self.cargarObjetos(self.tableOs,\n ObraSocialModel.buscarTodos(\"razon_social\", self.sesion).all(),\n (\"razon_social\", \"cuit\", \"direccion\")\n )",
"def get_all() -> list:\n categorias = []\n conn = GenericDao.connect()\n cursor = conn.execute(\"SELECT * FROM categorias\")\n for row in cursor:\n categoria = Categoria(row[1], row[0])\n categorias.append(categoria)\n if debug:\n print(str(categoria))\n\n conn.close()\n return categorias",
"def get_all_currencies(self):\r\n method = self.public_endpoints['currency_info']['method']\r\n url = self.base_url + self.public_endpoints['currency_info']['url']\r\n req = requests.request(method, url)\r\n res = req.json()\r\n\r\n if res['success'] == True:\r\n return res[\"result\"]\r\n else:\r\n return res",
"def GetAllCategoryOfCost():\n\n logs.logger.debug(\n \"Start to get back all categories of Cost objects from database.\")\n try:\n searchedCostsItems = session.query(Cost.Cost).all()\n logs.logger.info(\n \"Get back all categories of Cost objects from database.\")\n return [CostItems.category for CostItems in searchedCostsItems]\n except Exception as e:\n logs.logger.error(e, exc_info=True)",
"def getCubes():",
"def getcurso(curso):\n\n dataset = {\n \"curso\": [],\n \"materia\": [],\n \"professor\": [],\n \"horas\": [],\n \"ids\": []\n }\n request_data_get = cursos_collections.find({\"curso\": curso})\n\n for result in request_data_get:\n dataset['curso'].append(result[\"curso\"])\n dataset['materia'].append(result[\"materia\"])\n dataset['professor'].append(result[\"professor\"])\n dataset['horas'].append(result[\"horas\"])\n dataset['ids'].append(str(result[\"_id\"]))\n\n return dataset",
"def get_escobas(self):\n return self.escobas",
"def _getCadastroIdiomas(self, id_cadastro):\n return self.execSql(\"select_cadastro_idiomas\",\n id_cadastro=int(id_cadastro))",
"def get_currencies(self):\n return self.__call__('currencies', 'getcurrencies')",
"def cargar_obras(self):\n self.cargarObjetos(self.tableObra,\n ObraSocialModel.buscarTodos(\"razon_social\", self.sesion).all(),\n (\"razon_social\", \"cuit\", \"direccion\")\n )",
"def concentrations(self):\n with sql_connection.TRN as TRN:\n sql = \"\"\"SELECT quantitated_composition_id, raw_concentration,\n computed_concentration\n FROM qiita.concentration_calculation\n WHERE upstream_process_id = %s\n ORDER BY concentration_calculation_id\"\"\"\n TRN.add(sql, [self._id])\n return [\n (composition_module.Composition.factory(comp_id), r_con, c_con)\n for comp_id, r_con, c_con in TRN.execute_fetchindex()]",
"def cursors(self):\n return self.__cursors",
"def escobas(self):\n return self._escobas"
] |
[
"0.73462015",
"0.6953587",
"0.66569704",
"0.641626",
"0.6390572",
"0.6291141",
"0.61828154",
"0.6040256",
"0.59803355",
"0.5945287",
"0.5939955",
"0.5931112",
"0.59067345",
"0.5881739",
"0.58784807",
"0.58678705",
"0.5852543",
"0.5841695",
"0.58370304",
"0.57984644",
"0.5792945",
"0.5775797",
"0.57633793",
"0.57318413",
"0.57075983",
"0.569259",
"0.5689766",
"0.56868494",
"0.5683736",
"0.5669252"
] |
0.8433589
|
0
|
returns all estagios from cadastro >>> self._getCadastroEstagios()
|
def _getCadastroEstagios(self, id_cadastro):
return self.execSql("select_cadastro_estagios",
id_cadastro=int(id_cadastro))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def escobas(self):\n return self._escobas",
"def get_escobas(self):\n return self.escobas",
"def get_resultados(self):\n return self.__resultados",
"def getSituacaoAeroporto(self):\n soup = BeautifulSoup(self.getContent(url_direct.get('dash-aero-situacao')))\n list_situation = []\n for aeport in self.list_aeport.keys():\n element = soup.findAll('li', {'id': aeport})[0]\n name_class = element.get('class')[0]\n list_aeport = self.list_aeport.get(aeport)\n list_situation.append({'sigla': aeport,\n 'name': list_aeport.get('name'),\n 'local': list_aeport.get('local'),\n 'codigo': list_aeport.get('codigo'),\n 'status': self.situation_aeport.get(name_class),\n 'name_class': name_class,\n 'site': list_aeport.get('site')})\n return list_situation",
"def listar_cadastros():\n return cadastro_alunos.listar_aluno()",
"def _getCadastroEmpregos(self, id_cadastro):\n return self.execSql(\"select_cadastro_empregos\",\n id_cadastro=int(id_cadastro))",
"def getVentas(self, fechaInicio, fechaFin, usuarioColaborador=\"\"):\n if usuarioColaborador == \"\":\n return self.conexion.ejecutarSQL(\"select v.id, v.fecha, v.hora, v.subtotal, v.totalIVA, v.total, v.estado, v.usuario_Colaborador, \\\n v.id_Cliente, v.id_TipoPago, tP.tipo from ventas v, tipoPagos tP where v.id_tipoPago=tP.id \\\n and v.fecha between '%s' and '%s'\" %(fechaInicio,fechaFin))\n else:\n return self.conexion.ejecutarSQL(\"select v.id, v.fecha, v.hora, v.subtotal, v.totalIVA, v.total, v.estado, v.usuario_Colaborador, \\\n v.id_Cliente, v.id_TipoPago, tP.tipo from ventas v, tipoPagos tP where v.id_tipoPago=tP.id \\\n and v.fecha between '%s' and '%s' \\\n and usuario_colaborador='%s'\" %(fechaInicio,fechaFin,usuarioColaborador))",
"def list_all(self):\n\n url = 'equipamento/list/'\n\n code, xml = self.submit(None, 'GET', url)\n\n return self.response(code, xml)",
"def resultadosDiarios(self):\r\n self.checkingConnection()\r\n self.model = QSqlQueryModel()\r\n self.model.setQuery('''SELECT date1, ingresos, compras, gastos,\r\n (ingresos - compras - gastos) AS Saldo FROM (SELECT date1,\r\n ingresos, compras, gastos FROM ((SELECT Clients.date AS date1,\r\n SUM(Clients.value) AS ingresos FROM Clients GROUP BY Clients.date)\r\n JOIN (SELECT Compras.date AS date2, SUM(Compras.value) AS compras\r\n FROM Compras GROUP BY Compras.date) JOIN (SELECT Gastos.date AS date3,\r\n SUM(Gastos.value) AS gastos FROM Gastos GROUP BY Gastos.date)\r\n ON date1 = date2 AND date2 = date3))''', self.db)\r\n self.setModel(self.model)",
"def _getCadastroCursosSuperiores(self, id_cadastro):\n return self.execSql(\"select_cadastro_cursos_superiores\",\n id_cadastro=int(id_cadastro))",
"def comitentes(self):\n return self.expedientepersona_set.filter(comitente=True)",
"def siniestros_queryset(self):\n qs = self._filtrar_siniestros()\n values = qs.values(*self.campos)\n # Formatear campos\n for sin in values:\n # Evitar campos nulos\n try:\n sin['fecha'] = sin['fecha'].strftime('%d-%m-%Y')\n #sin['participantes'] = ' - '.join([s for s in sin['participantes'] if s])\n except TypeError:\n pass\n except AttributeError:\n pass\n sin['año'] = sin.pop('anio')\n return values",
"def get_all_elections(self) -> list:",
"def data_estagio(self):\n return self._data_estagio",
"def hent_alle_sager(self, aktive=True) -> List[Sag]:\n return self.session.query(Sag).all()",
"def get_all_teas(self):\n self.tView.all_teas_display(self.manyTea)\n self.tView.prompt_display(0)",
"def getAll(self):\n # Get VT\n self.getVT()\n # Process VT data\n self.processVT()\n # Get reverse DNS\n self.getRDNS()\n # Get passivetotal\n self.getPT()\n # Get Geolocation\n self.getGeo()\n # Get Shodan\n self.getShodan()",
"def tiene_epicrisis(historia_id):\n return Epicrisis.objects.filter(historia=historia_id).all()",
"def defineEstadosFinaisAFD(self):\n\n for e in self.estadosFinais:\n for e_AFD in self.afd.estados:\n if e in e_AFD and e_AFD not in self.afd.estadosFinais:\n self.afd.estadosFinais.append(e_AFD)",
"def __iter__(self):\n return _Iter_Ciudad_(self.aeropuertos)",
"def cargarObras(self):\n self.cargarObjetos(self.tableOs,\n ObraSocialModel.buscarTodos(\"razon_social\", self.sesion).all(),\n (\"razon_social\", \"cuit\", \"direccion\")\n )",
"def getStations(self) :\n return self._stations",
"def getArcs(self):\n return self.getArcsFrom()",
"def Tratamentos_dos_dados(self):\r\n self.tabela_clientes[\"TotalGasto\"] = pd.to_numeric(self.tabela_clientes[\"TotalGasto\"], errors=\"coerce\") # transformar coluna que deveria ser número e está como texto em número, errors=\"coerce\" -> se der erro em algo deixa vazio\r\n \r\n self.tabela_clientes = self.tabela_clientes.dropna(how='all', axis=1) # remover as colunas que estam 100% vazia, how='all' -> todas\r\n \r\n self.tabela_clientes = self.tabela_clientes.dropna() # remover a linha que tem algum valor vazio\r\n\r\n print(self.tabela_clientes.info()) # informações sobre a tabela \r",
"def getEstado(self):\n\n dic = copy.deepcopy(self.__estado)\n return dic #EJ. {'A':{'cantidad':2,'valor':1} , 'B':{'cantidad':3,'valor':1}} ",
"def get_all_festivals(self):\n self.cursor.execute(\"select * from festivals\")\n self.connection.commit()\n return self.cursor.fetchall()",
"def get_station_entrances(self):\n station_entrances = []\n for wrapper in self.soup.find_all(\"div\", {\"class\": \"stop-wrapper\"}):\n text = wrapper.find(\"span\").text\n if text == '' or text is None:\n entrance = ''\n else:\n entrance = text.split(',')[0].lstrip().rstrip()\n station_entrances.append(entrance)\n return np.array(station_entrances).T",
"def getIntervencionesDiputados(self):\n prog_indices = re.compile('(sr.|sra.).*', re.IGNORECASE)\n prog_nombre = re.compile('(sr.|sra.).*,*(\\.-)', re.IGNORECASE)\n\n result = prog_indices.finditer(self.dialogo)\n\n indices = []\n for i in result:\n indices.append(i.span()[0])\n\n dips = []\n for indice in range(len(indices) - 1):\n inicio, final = prog_nombre.match(self.dialogo[indices[indice]:indices[indice + 1]]).span()\n\n discurso = self.dialogo[indices[indice]:indices[indice + 1]]\n\n nombre = discurso[inicio:final]\n dips.append(nombre)\n self.intervenciones.append([nombre, discurso])\n\n dips_unicos = list(set(dips))\n\n for dip in dips_unicos:\n temp_dip = []\n for entrada in self.intervenciones:\n if dip == entrada[0]:\n temp_dip.append(entrada[1])\n\n self.intervenciones_por_diputado[dip] = temp_dip",
"def getSituacaoAeroportoVoo(self, list_aeport=[]):\n soup = BeautifulSoup(self.getContent(url_direct.get('dash-aero')))\n aeport = None\n for aeport in list_aeport:\n aeport_status = soup.find('td', text=aeport.get('local'))\n if aeport_status:\n _aeport_status = aeport_status.parent.findAll('span')\n aeport['atrasados'] = str(_aeport_status[0].text)\n aeport['cancelados'] = str(_aeport_status[8].text)\n else:\n aeport['atrasados'] = None\n aeport['cancelados'] = None\n return list_aeport",
"def consulta_estado(request=None):\n\n estado = dict()\n tipo_atenciones = AttentionType.objects.values('name')\n try:\n registros = Registers.objects.filter(start_attention__contains=timezone.now().date()).values('attention_type__name').annotate(Count('attention_number'))\n except Exception:\n registros = None\n\n try:\n atenciones = InitialAttention.objects.filter(created__contains=timezone.now().date()).values('attention_type__name').annotate(Count('attention_number'))\n except Exception:\n atenciones = None\n\n for atencion in atenciones:\n for registro in registros:\n if registro['attention_type__name'] == atencion['attention_type__name']:\n tipo_estado = registro['attention_type__name']\n estado[tipo_estado] = atencion['attention_number__count'] - registro['attention_number__count']\n\n for tipo_atencion in tipo_atenciones:\n if not tipo_atencion['name'] in estado:\n estado[tipo_atencion['name']] = atenciones.get(attention_type__name=tipo_atencion['name'])['attention_number__count'] \\\n if atenciones.filter(attention_type__name=tipo_atencion['name']).exists() else \\\n 0\n\n return JSONResponse(estado, status=201)"
] |
[
"0.6319616",
"0.6259798",
"0.61760014",
"0.59320027",
"0.58920735",
"0.5871899",
"0.57897377",
"0.57393724",
"0.57185704",
"0.57155216",
"0.5697578",
"0.5683173",
"0.5638902",
"0.56373924",
"0.56075317",
"0.556858",
"0.5567548",
"0.5533924",
"0.54867834",
"0.54842824",
"0.5481808",
"0.54419637",
"0.5403309",
"0.53968644",
"0.5383179",
"0.53595674",
"0.5350902",
"0.53349847",
"0.53318584",
"0.5314954"
] |
0.81034905
|
0
|
returns all empregos from cadastro >>> self._getCadastroEmpregos()
|
def _getCadastroEmpregos(self, id_cadastro):
return self.execSql("select_cadastro_empregos",
id_cadastro=int(id_cadastro))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_employees(self):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('select * from employee')\n\n employees = list()\n for row in cursor:\n employee = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])\n employees.append(employee)\n return employees",
"def get_employees(self):\n return self.employees",
"def getEmployees(self):\n return self.employees",
"def get(self):\n resultado = EmployeeModel.query.all()\n return resultado",
"def _getCadastroEstagios(self, id_cadastro):\n return self.execSql(\"select_cadastro_estagios\",\n id_cadastro=int(id_cadastro))",
"def employees(self) -> object:\n return self._employees",
"def listar_cadastros():\n return cadastro_alunos.listar_aluno()",
"def comitentes(self):\n return self.expedientepersona_set.filter(comitente=True)",
"def get_employees(self, active_only):\n cursor = self.dbconnect.get_cursor()\n\n if active_only:\n cursor.execute(\n 'SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external, '\n 'is_admin, is_active FROM employee WHERE is_active = TRUE')\n else:\n cursor.execute(\n 'SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external, '\n 'is_admin, is_active FROM employee')\n\n employees = list()\n for row in cursor:\n obj = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n employees.append(obj)\n return employees",
"def list_all(self):\n\n url = 'equipamento/list/'\n\n code, xml = self.submit(None, 'GET', url)\n\n return self.response(code, xml)",
"def escobas(self):\n return self._escobas",
"def get_escobas(self):\n return self.escobas",
"def get_employees(cls, strategy=lazyload):\n cls._check_strategy(strategy)\n\n return db.session.query(Employee).options(\n strategy(Employee.department)\n ).all()",
"def get_emp_list(self):\n\t\tcondition= ''\n\t\temp_list=[]\n\t\tif self.is_for_all==0:\n\t\t\tif not self.selected_employees:\n\t\t\t\tfrappe.throw(_(\"No employees for the mentioned criteria\"))\n\t\t\t#emp_list = [cstr(d.employee) for d in self.selected_employees]\n\t\t\temp_list = frappe.db.sql_list(\"\"\"\n\t\t\t\tselect\n\t\t\t\t\temployee from `tabAttendance Salary Tool Employee`\n\t\t\t\twhere\n\t\t\t\t\tparent = '%(parent)s' \n\t\t\t\"\"\"%{\"parent\": self.name})\n\t\t\tcondition+= \"\"\" and t1.employee IN %(employees)s \"\"\"\n\t\tif self.is_open_period==0:\n\t\t\tif not self.start_date or not self.end_date:\n\t\t\t\tfrappe.throw(_(\"Satart Date and End Date are Mandatories\"))\n\t\t\tcondition= \"\"\" and attendance_date >= %(start_date)s and attendance_date <= %(end_date)s\"\"\"\n\t\temp_list = frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.employee as employee, count(*) as attendance_days\n\t\t\tfrom\n\t\t\t\t`tabAttendance` t1\n\t\t\twhere\n\t\t\t\tt1.attendance_salary_tool is null\n\t\t\t\tand t1.docstatus = 1 and t1.status='Present'\n\t\t\t\t{condition} group by t1.employee order by t1.employee asc\n\t\t\"\"\".format(condition=condition),{\"employees\": tuple(emp_list),\"start_date\": self.start_date,\"end_date\": self.end_date}, as_dict=True)\n\t\treturn emp_list",
"def get_resultados(self):\n return self.__resultados",
"def carregar_contatos(self):\r\n self.clientes = []\r\n fornecedor = SistemaFornecedor()\r\n fornecedor.carregar_arquivo('')\r\n for contato in fornecedor.fornecedores:\r\n self.adicionar_cliente(contato.nome, '', [contato.telefone], [contato.email], empresa='')\r\n return len(self.clientes)",
"def get_birthday_employees(self):\n birthday_employees = []\n\n employees = self.search([\n ('birthday_reminders', '=', True),\n ('birthday', '!=', False),\n ])\n if not employees:\n return birthday_employees\n\n return employees.filtered(lambda x: self.check_emp_birthday(x.birthday))",
"def listar(self):\n conn = None\n\n try:\n params = config()\n conn = psycopg2.connect(**params)\n cur = conn.cursor()\n cur.execute(\"SELECT id_aluno, nome_aluno, cpf_aluno, data_nasc_aluno, telefone_aluno FROM Alunos\")\n\n # Imprime o número de alunos cadastrados.\n print(f\"\\nHá {cur.rowcount} aluno(s) cadastrado(s): \")\n row = cur.fetchone()\n\n while row is not None:\n print(f\"\\nID: {row[0]}\\nNome: {row[1]}\\nCPF: {row[2]}\\nData de Nascimento: {row[3].strftime('%d/%m/%Y')}\\nTelefone: {row[4]}\\n\")\n row = cur.fetchone()\n \n cur.close()\n\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n \n finally:\n if conn is not None:\n conn.close()",
"def show_all_employees(self):\n try:\n employees = self.admin_repository.show_all_employees()\n if employees:\n for employee in employees:\n print(\"Employee Id : {}\".format(employee[0]))\n print(\"Name : {}\".format(employee[1]))\n print(\"Email : {}\".format(employee[2]))\n print(\"----------------------------\")\n return True\n else:\n print(\"No records found.\")\n return False\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False",
"def returnIdDadosPessoais(self):\r\n self.cursor.execute(\"SELECT DOCUMENTO FROM DADOS_PESSOAIS;\")\r\n self.__result = self.cursor.fetchall()\r\n self.__lista = []\r\n try:\r\n for self.__i in self.__result:\r\n self.__lista.append(self.__i[0])\r\n return self.__lista\r\n except:\r\n return []",
"def listaNacionalidades():\n nac = NacionalidadModel()\n\n return nac.listarTodos()",
"def _getCadastroIdiomas(self, id_cadastro):\n return self.execSql(\"select_cadastro_idiomas\",\n id_cadastro=int(id_cadastro))",
"def list(self):\n # Grupos en los que el usuario formo parte\n curso = self.get_curso_actual()\n entregadores = identity.current.user.get_entregadores(curso)\n r = cls.select(IN(cls.q.entregador, entregadores), orderBy=-Entrega.q.fecha)\n return dict(records=r, name=name, namepl=namepl, limit_to=identity.current.user.paginador)",
"def gather_employee_entries(self):\n user_inputs = [\n self.emp_lname.get(), self.emp_mi.get(), self.emp_fname.get(),\n self.emp_hiredate.get()\n ]\n\n return self.check_input_empty(user_inputs)",
"def get(self):\n employees = self.service.get_employees(strategy=selectinload)\n return self.schema.dump(employees, many=True), 200",
"def returnIdEndereco(self):\r\n self.cursor.execute(\"SELECT ID FROM ENDERECO;\")\r\n self.__result = self.cursor.fetchall()\r\n self.__lista = []\r\n try:\r\n for self.__i in self.__result:\r\n self.__lista.append(self.__i[0])\r\n return self.__lista\r\n except:\r\n return []",
"def propietarios(self):\n return self.expedientepersona_set.filter(propietario=True)",
"def consultar_todos_DB(self):\n registros = db.session.query(ModelConcurso).all()\n for registro in registros:\n print(registro)",
"def list(self, request):\n employee = self.controller.retrieve_all_employees()\n serializer = data_serializers.PresentEmployeeDataSerializer(employee, many=True)\n return Response(serializer.data)",
"def select_todos_registros(nome_tabela: str) -> list:\n query = f'SELECT * FROM {nome_tabela};'\n\n lista_registros = banco_operacoes(query)\n\n return lista_registros"
] |
[
"0.71649987",
"0.69541067",
"0.69509244",
"0.6686057",
"0.6627273",
"0.6490655",
"0.64192116",
"0.6373798",
"0.6273055",
"0.6254344",
"0.61355954",
"0.6130747",
"0.61046684",
"0.60459995",
"0.60410386",
"0.5918352",
"0.5901199",
"0.5899546",
"0.58882326",
"0.58859956",
"0.5810601",
"0.5705442",
"0.5653376",
"0.5636825",
"0.5634932",
"0.56293404",
"0.5625247",
"0.5584601",
"0.55649304",
"0.55603313"
] |
0.8396344
|
0
|
Converts the CSS shortcut string into a valid innertag.
|
def process_shortcut(s):
if s.count('[') != s.count(']'):
raise MismatchedGrouping('Invalid grouping of brackets, %s' % s)
if s.count('"') % 2 != 0 or s.count("'") % 2 != 0:
raise MismatchedGrouping('Quotation groupings are mismatched, %s' % s)
ret_dict = {}
# find the classes and id
for match in rgx_class.findall(s):
if match.startswith('#'):
ret_dict.setdefault('id', match.strip('#'))
elif match.startswith('.'):
classes = ret_dict.setdefault('_classes', [])
classes.append(match.strip('.'))
# find all of our named attributes
for key, value in rgx_n_attr.findall(s):
ret_dict.setdefault(key, value)
ret_dict['class'] = ret_dict.pop('_classes', [])
return ret_dict
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def as_css_identifier(s):\n return re.sub(r'[^a-z0-9-]', '-', s, flags=re.IGNORECASE)",
"def parse_rule(rule_string):\n count, _, color_bags = rule_string.partition(\" \")\n return color_bags.rpartition(\" bag\")[0], int(count)",
"def category2url(cat):\n return remove_diacritics(cat).replace(\" \", \"_\")",
"def sanitize_css(self, style):\n # disallow urls\n style = re.compile('url\\s*\\(\\s*[^\\s)]+?\\s*\\)\\s*').sub(' ', style)\n\n # gauntlet\n\n # Validate the css in the style tag and if it's not valid, then drop\n # the whole thing.\n parts = style.split(';')\n gauntlet = re.compile(\n r\"\"\"^([-/:,#%.'\"\\sa-zA-Z0-9!]|\\w-\\w|'[\\s\\w]+'\\s*|\"[\\s\\w]+\"|\\([\\d,%\\.\\s]+\\))*$\"\"\"\n )\n\n for part in parts:\n if not gauntlet.match(part):\n return ''\n\n if not re.match(\"^\\s*([-\\w]+\\s*:[^:;]*(;\\s*|$))*$\", style):\n return ''\n\n clean = []\n for prop, value in re.findall('([-\\w]+)\\s*:\\s*([^:;]*)', style):\n if not value:\n continue\n\n if prop.lower() in self.allowed_css_properties:\n clean.append(prop + ': ' + value + ';')\n\n elif prop.lower() in self.allowed_svg_properties:\n clean.append(prop + ': ' + value + ';')\n\n return ' '.join(clean)",
"def _handle_ansi_color_codes(self, s):\r\n def ansi_code_to_css(code):\r\n return ' '.join(['ansi-%s' % c for c in code.split(';')])\r\n return '<span>' +\\\r\n HtmlReporter._ANSI_COLOR_CODE_RE.sub(\r\n lambda m: '</span><span class=\"%s\">' % ansi_code_to_css(m.group(1)), s) +\\\r\n '</span>'",
"def colorize(self, string):\n D = \"(%s)\" % colorize(\"@R{D}\")\n L = \"(%s)\" % colorize(\"@G{L}\")\n DL = \"(%s,%s)\" % (colorize(\"@R{D}\"), colorize(\"@G{L}\"))\n colorized = string.replace(\"(D)\", D)\n colorized = colorized.replace(\"(L)\", L)\n colorized = colorized.replace(\"(D,L)\", DL)\n return colorized",
"def css2dict(css):\n cssdict = {}\n if None == css:\n return cssdict\n for pair in css.split(';'): #TODO: what about escaped separators\n if pair.find(':') >= 0:\n key, value = pair.split(':')\n cssdict[ key.strip() ] = value.strip()\n return cssdict",
"def fancy( _inStr):\r\n return '<head><style type=\"text/css\">td.special{ background-color:aqua;font-size: 100%;margin-left: 20px;font-family: times, sans-serif, arial}</style></head><table><tr><td class=\"special\">' + _inStr + '</td></tr></table>'",
"def _tag_of(entry: _LexiconEntry) -> str:\n return entry[\"tag\"].upper()",
"def _colorify_rule(rule):\n # Colorize normal parts in bold-blue\n # /abc/<def>/ghi/<klm>\n # ^^^ ^^^\n rule = re.sub(r'/([^</]+)',\n r'/\\033[34;1m\\1\\033[39;22m', rule)\n # Colorize dynamic parts in bold-red and <> in gray\n # /abc/<def>/ghi/<klm>\n # ^^^^^ ^^^^^\n rule = re.sub(r'<([^>]*)>',\n r'\\033[90m<\\033[31;1m\\1\\033[90m>\\033[39;22m', rule)\n # Colorize slashes in gray\n # /abc/<def>/ghi/<klm>\n # ^ ^ ^ ^\n rule = rule.replace('/', '\\033[90m/\\033[39m')\n return rule",
"def sub_ansi(self, ansimatch):\n return self.ansi_map_dict.get(ansimatch.group(), \"\")",
"def stylecrunch(stystr):\n return dict(pair.split(\":\") for pair in semicolons.findall(stystr))",
"def parse_unicode_emojis_to_alias(text):\n #return EmojiParser.parseFromUnicode(text, EMOJI_ALIAS_TRANSFORMER)\n return text",
"def _restricted_search_hashtags(val: str):\n try:\n val = str(val).lower()\n except ValueError:\n raise argparse.ArgumentTypeError(f\"{val} could not be parsed to a string\")\n\n if not val.startswith('#'):\n return '#' + val\n return val",
"def format_icon(icon):\n return icon.strip().replace(\" \", \"_\")",
"def crawler_format(keyword):\r\n tmp = re.sub('[^0-9a-zA-Z]', '_', keyword).strip('_')\r\n return re.sub('_+', ' ', tmp).lower()",
"def _process_colors(self, s: str) -> str:\r\n return self._color_regexp.sub(lambda m: self._ansi_equivalent(m.group()), s)",
"def POStag(self, word):\n \t\tif word in (\"'\",\",\",\".\",':',';','.'):\n \t\t\ttag = 'PUNCT'\n \t\telif word == '-':\n \t\t\ttag = 'DASH'\n \t\telse:\n \t\t\ttag = 'NOTAG'\n \t\treturn tag",
"def as_identifier(self, string):\n t = string.lower()\n\n if t in self.keywords:\n return idaapi.COLSTR(string, idaapi.SCOLOR_ASMDIR)\n\n elif t in self.statements:\n return idaapi.COLSTR(string, idaapi.SCOLOR_LOCNAME)\n\n elif t in self.types:\n return idaapi.COLSTR(string, idaapi.SCOLOR_IMPNAME)\n\n else:\n return string",
"def _extract_css_declaration(self, ac, styleClassifier, trg,\n is_for_calltip=False):\n DEBUG = DebugStatus\n #DEBUG = True\n #PERF: Use accessor.gen_chars_and_styles() if possible.\n try:\n ac.resetToPosition(trg.pos)\n p, ch, style = ac.getPrevPosCharStyle()\n if not styleClassifier.is_operator(style, ac):\n if DEBUG:\n print \"Current ch is not an operator, so getting the \" \\\n \"preceeding one, p: %d, ch: %r, style: %d\" % \\\n (p, ch, style, )\n p, ch, style = ac.getPrevPosCharStyle(\n ignore_styles=styleClassifier.ignore_styles)\n except IndexError:\n # This occurs when already at the end of the buffer, so we reset to\n # the last buffer position then\n ac.resetToPosition(trg.pos - 1)\n p, ch, style = ac.getCurrentPosCharStyle()\n if DEBUG:\n print \"\"\"------ _extract_css_declaration -----\"\"\"\n print \" _extract_css_declaration:: Trg.pos: %d\" % (trg.pos)\n #ac._debug = True\n print \" _extract_css_declaration:: pos: %r\" % (p)\n print \" _extract_css_declaration:: ch: %r\" % (ch)\n print \" _extract_css_declaration:: style: %r\" % (style)\n ac.dump()\n # Walk back to ':' operator.\n num_close_parenthesis = 0\n min_pos = max(0, trg.pos - 200) # Lookback up to 200 chars in total\n while p >= min_pos:\n #print \"ch: %r, style: %d\" % (ch, style, )\n if ch == ':' and styleClassifier.is_operator(style, ac):\n break\n elif num_close_parenthesis > 0:\n if ch == \"(\":\n num_close_parenthesis -= 1\n if DEBUG:\n print \"Found matching open paren,\" \\\n \" num_close_parenthesis now: %d\" % (\n num_close_parenthesis)\n elif DEBUG:\n print \"Ignoring everything inside the parenthesis\"\n elif ch == \"(\" and (styleClassifier.is_operator(style) or\n styleClassifier.is_value(style)):\n if DEBUG:\n print \"Already inside a paren, no cpln's then.\"\n #XXX SCSS and Less support arithmetic expressions\n return (None, None, None)\n elif ch == \")\" and (styleClassifier.is_operator(style) or\n styleClassifier.is_value(style)):\n num_close_parenthesis += 1\n if DEBUG:\n print \"Found close paren, need to skip over contents,\" \\\n \" num_close_parenthesis: %d\" % (\n num_close_parenthesis)\n elif styleClassifier.is_operator(style):\n if ch not in \":,%\":\n if DEBUG:\n print \"%s: couldn't find ':' operator, found invalid \" \\\n \"operator: %d %r %d\" % (trg.name, p, ch, style)\n #TODO: SCSS and Less support arithmetic expressions\n return (None, None, None)\n elif styleClassifier.is_string(style):\n # Used to skip over string items in property values\n if DEBUG:\n print \"Found string style, ignoring it\"\n elif not (styleClassifier.is_value(style) or styleClassifier.is_default(style)):\n # old CSS lexer: everything betwee \":\" and ';' used to be a value.\n if DEBUG:\n print \"%s: couldn't find ':' operator, found invalid \" \\\n \"style: pcs: %d %r %d\" % (trg.name, p, ch, style)\n return (None, None, None)\n p, ch, style = ac.getPrevPosCharStyle(\n ignore_styles=styleClassifier.ignore_styles)\n else:\n if DEBUG:\n print \"%s: couldn't find ':' operator within 200 chars, \" \\\n \"giving up\" % (trg.name)\n return (None, None, None)\n\n if DEBUG:\n print \" _extract_css_declaration:: Found ':' at pos: %d\" % (p)\n # Parse out the property name.\n colan_pos = p\n p, ch, style = ac.getPrecedingPosCharStyle(style,\n ignore_styles=styleClassifier.ignore_styles,\n max_look_back=150)\n if style not in styleClassifier.identifier_styles:\n if DEBUG:\n print \" _extract_css_declaration:: No identifier style found\" \\\n \" before ':', found style %d instead\" % (style)\n return (None, None, None)\n p, property = ac.getTextBackWithStyle(style)\n property = property.strip()\n\n if is_for_calltip:\n # We have all the info we need\n if DEBUG:\n print \" _extract_css_declaration:: Returning property: %r\" % (\n property)\n return (property, '', [])\n\n # Walk forward parsing the value information, ends when we hit a \";\" or\n # have gone ahead a maximum of 200 chars.\n ac.resetToPosition(colan_pos)\n prev_pos, prev_ch, prev_style = ac.getCurrentPosCharStyle()\n from_pos = prev_pos\n p = colan_pos\n # Value info, list of tuples (pos, text)\n value_info = []\n max_p = p + 200\n try:\n while p < max_p:\n p, ch, style = ac.getNextPosCharStyle(max_look_ahead=100, ignore_styles=styleClassifier.comment_styles)\n if p is None or not styleClassifier.is_css_style(style):\n # Went past max_look_ahead, just use what we've got then\n if DEBUG:\n print \"%s: css value reached max length or end of \" \\\n \"document: trg.pos %d\" % (trg.name, trg.pos)\n value_info.append((from_pos, ac.text_range(from_pos, p)))\n break\n \n # Sass test\n if ch == \"\\n\" and self.lang == \"Sass\" and styleClassifier.is_default(style):\n value_info.append((from_pos, ac.text_range(from_pos, p)))\n break\n if ch in WHITESPACE or styleClassifier.is_string(style):\n if not prev_ch in WHITESPACE and not styleClassifier.is_string(prev_style):\n value_info.append((from_pos, ac.text_range(from_pos, p)))\n from_pos = p+1\n elif styleClassifier.is_operator(style):\n if ch in \";{}\":\n value_info.append((from_pos, ac.text_range(from_pos, p)))\n break\n # Other chars should be okay to collect\n elif not styleClassifier.is_value(style) and \\\n style not in styleClassifier.ignore_styles:\n if DEBUG:\n print \"%s: invalid style found: pos %d, style: %d\" % (\n trg.name, trg.pos, style)\n return (None, None, None)\n prev_pos, prev_ch, prev_style = p, ch, style\n else:\n if DEBUG:\n print \"%s: css value too long: trg.pos %d\" % (trg.name, trg.pos)\n return (None, None, None)\n except IndexError:\n if DEBUG:\n print \"ran out of buffer\"\n\n # Work out the values and the current value\n current_value = None\n values = []\n trg_pos = trg.pos\n for p, value in value_info:\n if value and _isident_first_char(value[0]):\n if DEBUG:\n print \"Is a valid value, p: %d, value: %r\" % (p, value, )\n values.append(value)\n if current_value is None and trg_pos >= p and \\\n trg_pos <= p + len(value):\n current_value = value\n\n if DEBUG:\n print \" _extract_css_declaration:: Returning property: %r, \" \\\n \"current_value: %r, values: %r\" % (property, current_value,\n values)\n return (property, current_value, values)",
"def normalise_tag_id(input_id):\n return input_id.replace(\" \", \"\").replace(\"\\n\", \"\").replace(\"\\r\", \"\")",
"def fixup_css(text):\n return text.replace('\\x00', '\\\\0')",
"def shortURLToId(self, shortURL):\n id = 0\n for i in shortURL: \n val_i = ord(i) \n if(val_i >= ord('a') and val_i <= ord('z')): \n id = id*62 + val_i - ord('a') \n elif(val_i >= ord('A') and val_i <= ord('Z')): \n id = id*62 + val_i - ord('Z') + 26\n else: \n id = id*62 + val_i - ord('0') + 52\n return id",
"def _ansi_equivalent(self, s: str) -> str:\r\n color_id = self._color_id_regexp.search(s).groups()[0]\r\n\r\n # TODO: Replace this with a class the handles dynamic color configuration!\r\n return {\r\n '0': '\\u001b[37m',\r\n '1': '\\u001b[32m',\r\n '2': '\\u001b[31m',\r\n '3': '\\u001b[33m',\r\n '4': '\\u001b[34m',\r\n '5': '\\u001b[36m',\r\n '6': '\\u001b[37m',\r\n '7': '\\u001b[35m',\r\n '8': '\\u001b[30m',\r\n '.': '\\u001b[0m',\r\n }[color_id]",
"def hotdogify(my_string):\n return '{}_hotdog'.format(my_string)",
"def remove_unnecessary_whitespace(css):\n log.debug(\"Removing all unnecessary white spaces.\")\n\n def pseudoclasscolon(css):\n \"\"\"Prevent 'p :link' from becoming 'p:link'.\n\n Translates 'p :link' into 'p ___PSEUDOCLASSCOLON___link'.\n This is translated back again later.\n \"\"\"\n regex = re.compile(r\"(^|\\})(([^\\{\\:])+\\:)+([^\\{]*\\{)\")\n match = regex.search(css)\n while match:\n css = ''.join([\n css[:match.start()],\n match.group().replace(\":\", \"___PSEUDOCLASSCOLON___\"),\n css[match.end():]])\n match = regex.search(css)\n return css\n\n css = pseudoclasscolon(css)\n # Remove spaces from before things.\n css = re.sub(r\"\\s+([!{};:>\\(\\)\\],])\", r\"\\1\", css)\n # If there is a `@charset`, then only allow one, and move to beginning.\n css = re.sub(r\"^(.*)(@charset \\\"[^\\\"]*\\\";)\", r\"\\2\\1\", css)\n css = re.sub(r\"^(\\s*@charset [^;]+;\\s*)+\", r\"\\1\", css)\n # Put the space back in for a few cases, such as `@media screen` and\n # `(-webkit-min-device-pixel-ratio:0)`.\n css = re.sub(r\"\\band\\(\", \"and (\", css)\n # Put the colons back.\n css = css.replace('___PSEUDOCLASSCOLON___', ':')\n # Remove spaces from after things.\n css = re.sub(r\"([!{}:;>\\(\\[,])\\s+\", r\"\\1\", css)\n return css",
"def __replaceStyle(self, match):\n if not match:\n return;\n\n # we got \"\\<\" escaped char\n if ('\\\\' == match.group(1)) :\n return self.__applyCurrentStyle(match.group(0));\n\n\n if not match.group(3) :\n if ('/' == match.group(2)) :\n # we got \"</>\" tag\n self.__styleStack.pop();\n\n return self.__applyCurrentStyle(match.group(4));\n\n\n # we got \"<>\" tag\n return '<>'+self.__applyCurrentStyle(match.group(4));\n\n\n if str(match.group(3)).lower() in self.__styles :\n style = self.__styles[str(match.group(3)).lower()];\n else :\n style = self.__createStyleFromString(match.group(3));\n\n if (False is style) :\n return self.__applyCurrentStyle(match.group(0));\n\n\n\n if ('/' == match.group(2)) :\n self.__styleStack.pop(style);\n else :\n self.__styleStack.push(style);\n\n\n return self.__applyCurrentStyle(match.group(4));",
"def remove_link_icons(t):\n EXTERN_ICON_OLD = u'<span class=\"red\"><strong>»</strong></span> '\n NEW_WINDOW_OLD = u'<span class=\"red\"><strong>··</strong></span> '\n t = t.replace(EXTERN_ICON_OLD + '<a', '<a')\n t = t.replace(EXTERN_ICON + '<a', '<a')\n t = t.replace(NEW_WINDOW_OLD + '<a', '<a')\n t = t.replace(NEW_WINDOW_ICON + '<a', '<a')\n t = t.replace(MAIL_ICON + '<a', '<a')\n t = t.replace('<span class=\"red\"></span>', '')\n return t",
"def _restricted_hashtags(val: str):\n try:\n val = str(val).lower()\n except ValueError:\n raise argparse.ArgumentTypeError(f\"{val} could not be parsed to a string\")\n\n val = re.sub(_REGEX_CHAR_MATCHER_HASHTAGS, \"\", val)\n return val",
"def as_directive(self, string):\n return idaapi.COLSTR(string, idaapi.SCOLOR_KEYWORD)"
] |
[
"0.49875832",
"0.4964536",
"0.48959762",
"0.48508155",
"0.48253295",
"0.47944248",
"0.47622633",
"0.46999708",
"0.46071175",
"0.45776024",
"0.45625073",
"0.44926435",
"0.44823688",
"0.44580403",
"0.4448435",
"0.4416446",
"0.4413485",
"0.4408352",
"0.44072515",
"0.43934834",
"0.43734425",
"0.4362968",
"0.43600926",
"0.4354311",
"0.43472695",
"0.434576",
"0.43412453",
"0.4336011",
"0.43211073",
"0.43190488"
] |
0.5314905
|
0
|
Generates a new password reset code returns user
|
def get_reset_code(self, email):
try:
user = self.get(email__iexact=email)
user.reset_code = self.make_random_password(length=20)
user.reset_code_expire = timezone.now() + timedelta(days=2)
user.save()
return user
except get_user_model().DoesNotExist:
raise DoorstepError('We can\'t find that email address, sorry!')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def request_password_reset():",
"def get_reset_code(self, email):\n\n try:\n user = self.get(email__iexact=email)\n user.reset_code = self.make_random_password(length=20)\n user.reset_code_expire = timezone.now() + timedelta(days=2)\n user.save()\n\n return user\n\n except get_user_model().DoesNotExist:\n raise Exception('We can\\'t find that email address, sorry!')",
"def new_password(self):\n # create new password\n return password_generator.create_password()\n # have password reset",
"def create_reset_code(self, phone, new_password):\r\n code = self.random_code(settings.CODE_LENGTH)\r\n # if phone in [\"+77753721232\", \"+77752470125\", \"+77074443333\"]:\r\n # code = \"4512\"\r\n # else:\r\n # code = \"%0.4d\" % random.randint(0, 9999)\r\n # mobizonproxy.send_sms(phone, text=u\"{} - Код активации для Pillowz365\".format(code))\r\n activation = Activation(phone=phone,\r\n to_reset=True,\r\n password=make_password(new_password),\r\n code=code)\r\n activation.save()\r\n return activation",
"def reset_password():\n pass",
"def passwordGen() :\n\treturn __randomString(12)",
"def change_password(reset_code):\n return dict(reset_code=reset_code)",
"def generate_code(self):\n code = ''.join(\n random.choices(string.ascii_lowercase + string.digits, k=5))\n self.code = '{}{}'.format(self.user.id, code)",
"def request_password_reset_token():\n j = request.get_json(force=True)\n user_requested = j['user'].lower()\n\n # Disabled user accounts can not request for a new password.\n target_user = User.query.filter_by(mail=user_requested).first()\n\n if target_user is None:\n return Errors.UNKNOWN_USER.make_json_response(status.HTTP_400_BAD_REQUEST)\n\n if target_user.state == StateType.DEACTIVATED:\n return Errors.DEACTIVATED_USER.make_json_response(status.HTTP_400_BAD_REQUEST)\n\n target_user.generate_password_request_token()\n\n send_mail(target_user.mail, render_template(\"password/reset_password_mail.txt\",\n greeting=get_opening_greeting(target_user),\n wlink=\"{}/password/reset/{}\".format(\n app.config['BUZZN_BASE_URL'],\n target_user.password_reset_token\n )), 'Passwort zurücksetzen für Buzzn-App')\n\n db.session.commit()\n return '', status.HTTP_201_CREATED",
"def password_token_oracle():\n past_time = int(time.time()) - random.randint(1, 3600)\n return generate_password_reset_token(past_time), past_time",
"def reset_password(newpass, challenge):",
"def generate_password_reset_token(self, expiration=3600):\n app = current_app._get_current_object()\n serializer = Serializer(app.config[\"SECRET_KEY\"], expiration)\n return serializer.dumps({\"reset\": self.id}).decode(\"utf-8\")",
"def post(self):\n args = password_reset.parse_args()\n email = args.get('email')\n new_password = password_generator()\n\n validation_email = email_validation(email)\n if validation_email:\n return validation_email\n\n user = User.query.filter_by(email=email).first()\n if user:\n user.password = new_password\n user.save()\n response = {\n \"message\": \"Password has been reset\",\n \"status\": \"Reset password succesful!\",\n \"new_password\": new_password\n }\n return response, 200\n else:\n response = {\n 'message': 'User email does not exist, Please try again',\n 'status': 'Reset password failed!'\n }\n return response, 400",
"def link(self):\n return f\"https://{DOMAIN}/password-reset/{self.code}\"",
"def create_password_reset_token(self, user_id):\n try:\n self.logger.debug('Password Reset attempt %s', user_id)\n password_reset_token = ''\n nosqldb = self.pers.nosql_db\n db_user_record = nosqldb['users'].find_one(\n {\n '$or': [\n {'username': user_id},\n {'email': user_id}\n ]\n }\n )\n # Confirm the user exists from previous query\n if db_user_record:\n # purge any old requests, even if unrelated\n nosqldb['passwordResets'].delete_many(\n {\n 'requestDate': {'$lt': datetime.datetime.utcnow() -\n datetime.timedelta(minutes=5)}\n }\n )\n\n already_sent = nosqldb['passwordResets'].find_one(\n {\n 'email': user_id\n }\n )\n if not already_sent:\n # create a password reset token\n #password_reset_token = hashlib.sha512('abc'.encode('utf-8')).hexdigest()\n password_reset_token = secrets.token_urlsafe(255)\n #persist the password reset request\n nosqldb['passwordResets'].insert_one(\n {\n 'username': db_user_record['username'],\n 'email': db_user_record['email'],\n 'requestDate': datetime.datetime.utcnow(),\n 'resetToken': password_reset_token,\n }\n )\n else:\n self.logger.debug('Password Reset Email Denied: existing request in flight')\n return password_reset_token\n except Exception as exc:\n self.logger.debug('Unexpected Error %s', str(exc))\n raise",
"def reset(request):\r\n rdict = request.matchdict\r\n params = request.params\r\n\r\n # This is an initial request to show the activation form.\r\n username = rdict.get('username', None)\r\n activation_key = rdict.get('reset_key', None)\r\n user = ActivationMgr.get_user(username, activation_key)\r\n new_username = None\r\n\r\n if user is None:\r\n # just 404 if we don't have an activation code for this user\r\n raise HTTPNotFound()\r\n\r\n if 'code' in params:\r\n # This is a posted form with the activation, attempt to unlock the\r\n # user's account.\r\n username = params.get('username', None)\r\n activation = params.get('code', None)\r\n password = params.get('new_password', None)\r\n new_username = params.get('new_username', None)\r\n error = None\r\n\r\n if new_username:\r\n new_username = new_username.lower()\r\n\r\n # Check whether username exists or not. During signup request , a\r\n # record of current user is created with username as his email id\r\n # which is already checked for uniqueness. So when new_username is\r\n # equal to username ie the email id then no need to check for\r\n # uniqueness , but if new_username is something else it has to be\r\n # verified\r\n\r\n if username != new_username and \\\r\n UserMgr.get(username=new_username) is not None:\r\n # Set an error message to the template.\r\n error = \"Username already exists.\"\r\n elif not UserMgr.acceptable_password(password):\r\n # Set an error message to the template.\r\n error = \"Come on, pick a real password please.\"\r\n else:\r\n res = ActivationMgr.activate_user(username, activation, password)\r\n if res:\r\n # success so respond nicely\r\n AuthLog.reactivate(username, success=True, code=activation)\r\n\r\n # if there's a new username and it's not the same as our\r\n # current username, update it\r\n if new_username and new_username != username:\r\n try:\r\n user = UserMgr.get(username=username)\r\n user.username = new_username\r\n except IntegrityError:\r\n error = 'There was an issue setting your new username'\r\n else:\r\n AuthLog.reactivate(username, success=False, code=activation)\r\n error = ('There was an issue attempting to activate'\r\n 'this account.')\r\n\r\n if error:\r\n return {\r\n 'message': error,\r\n 'user': user\r\n }\r\n else:\r\n # Log the user in and move along.\r\n headers = remember(request, user.id, max_age=60 * 60 * 24 * 30)\r\n user.last_login = datetime.utcnow()\r\n\r\n # log the successful login\r\n AuthLog.login(user.username, True)\r\n\r\n # we're always going to return a user to their own /recent after a\r\n # login\r\n return HTTPFound(\r\n location=request.route_url(\r\n 'user_bmark_recent',\r\n username=user.username),\r\n headers=headers)\r\n\r\n else:\r\n LOG.error(\"CHECKING\")\r\n LOG.error(username)\r\n\r\n if user is None:\r\n # just 404 if we don't have an activation code for this user\r\n raise HTTPNotFound()\r\n\r\n LOG.error(user.username)\r\n LOG.error(user.email)\r\n return {\r\n 'user': user,\r\n }",
"def passwordCode(code):\n #Check if code exists and for the correct purpose. Else abort\n if (hl.checkCode(code,\"Password\")):\n user = hl.getUserFromCode(code)\n else:\n abort(404)\n\n if request.method == 'POST':\n #Get new password and handle\n passwordform(user)\n #Mark code as used\n hl.flagCode(code)\n #return\n return redirect(url_for('confirm', confirmed = 'Changed Password'))\n\n return render_template('password.html')",
"def create_email_signup_code(self, email, password):\r\n code = self.random_code(settings.CODE_LENGTH)\r\n activation = Activation(email=email,\r\n to_reset=False,\r\n password=make_password(password),\r\n code=code)\r\n activation.save()\r\n return activation",
"def generate_password_reset_token(self, expiration=3600):\n s = Serializer(current_app.config['SECRET_KEY'], expiration)\n return s.dumps({'reset': self.id})",
"def reset_password():\n form = ResetPassword()\n if form.validate_on_submit():\n user_email = form.email.data\n mail_exist = db.check_email(user_email)\n if mail_exist is not None:\n new_password = generate_password()\n new_password_hash = generate_password_hash(new_password)\n username = mail_exist['username']\n db.update_password_username(username, new_password_hash)\n flash('Your new password has been sent to your mailbox')\n redirect('login')\n # send_password_reset_email(user_email, new_password)\n return redirect(url_for('login'))\n else:\n flash('This email address is not registered')\n return redirect('reset_password')\n return render_template('resetpassword.html', form=form)",
"def generate_forgot_password_token(self, email):\n payload = {'appkey': self._lr_object._get_api_key(), 'appsecret': self._lr_object._get_api_secret(),\n 'email': email}\n url = SECURE_API_URL + \"raas/v1/account/password/forgot\"\n return self._lr_object._get_json(url, payload)",
"def create(self, data):\n # Make User\n code = (random.randint(1000, 9999))\n user = User.objects.get(pk=self.context['user'].pk)\n new = str(code).strip()\n hs = hashlib.sha1(new.encode()).hexdigest()\n user.password = hs\n user.save()\n send_verification_email.delay(email=data['email'], code=code)\n return user",
"def POST(self):\n session = web.ctx.session\n nav = get_nav_bar(session)\n data = web.input(reset_token = \"\", new_password=\"\")\n \n reset_password_colum = reset_password_form()\n \n # check each field is endered values.\n if not reset_password_colum.validates():\n return render.reset_password(nav, reset_password_form, \"All fields must be valid.\")\n \n try:\n # log ip information\n ip_addr = web.ctx[\"ip\"]\n accessed_path = web.ctx[\"fullpath\"]\n\n # query user's name (username) and token (extra secruity)\n token = data.reset_token\n username = search_for_user(token, ip_addr, accessed_path)\n #print(\"-\"*16)\n #print(username)\n \n #update token to null database\n result_update_token = update_token_to_null(username, token, ip_addr, accessed_path)\n print(\"-\" * 16 + \"updated!\")\n\n # generate new password\n new_salt = generate_salt()\n hashed_password = hashed_value(data.new_password, new_salt)\n hashed_password = new_salt + hashed_password\n\n # update password \n result_update_password = update_user_password(username, hashed_password, ip_addr, accessed_path )\n raise web.seeother(\"/\")\n except Exception as e:\n print(e)\n except:\n print(exit[0])\n return render.login(nav, reset_password_form, \"- Something went wrong!\")",
"def generate_reset_password_token(self, expiration=3600):\n ser = Serializer(current_app.config['SECRET_KEY'], expiration)\n return ser.dumps({'reset_password': self.id}).decode('utf-8')",
"def get_password_reset_token(user):\n return base64.urlsafe_b64encode(_create_security_token(user))",
"def forgot_password():\r\n form = ForgotPasswordForm(request.form)\r\n if form.validate_on_submit():\r\n user = model.user.User.query\\\r\n .filter_by(email_addr=form.email_addr.data)\\\r\n .first()\r\n if user and user.email_addr:\r\n msg = Message(subject='Account Recovery',\r\n recipients=[user.email_addr])\r\n if user.twitter_user_id:\r\n msg.body = render_template(\r\n '/account/email/forgot_password_openid.md',\r\n user=user, account_name='Twitter')\r\n elif user.facebook_user_id:\r\n msg.body = render_template(\r\n '/account/email/forgot_password_openid.md',\r\n user=user, account_name='Facebook')\r\n elif user.google_user_id:\r\n msg.body = render_template(\r\n '/account/email/forgot_password_openid.md',\r\n user=user, account_name='Google')\r\n else:\r\n userdict = {'user': user.name, 'password': user.passwd_hash}\r\n key = signer.signer.dumps(userdict, salt='password-reset')\r\n recovery_url = url_for('.reset_password',\r\n key=key, _external=True)\r\n msg.body = render_template(\r\n '/account/email/forgot_password.md',\r\n user=user, recovery_url=recovery_url)\r\n msg.html = markdown(msg.body)\r\n mail.send(msg)\r\n flash(gettext(\"We've send you email with account \"\r\n \"recovery instructions!\"),\r\n 'success')\r\n else:\r\n flash(gettext(\"We don't have this email in our records. \"\r\n \"You may have signed up with a different \"\r\n \"email or used Twitter, Facebook, or \"\r\n \"Google to sign-in\"), 'error')\r\n if request.method == 'POST' and not form.validate():\r\n flash(gettext('Something went wrong, please correct the errors on the '\r\n 'form'), 'error')\r\n return render_template('/account/password_forgot.html', form=form)",
"def get_password_reset_token(known_plaintext, current_time):\n return encryption_oracle(known_plaintext, current_time)[1].encode('hex')",
"def reset_password():\n if request.method == 'POST':\n email = request.json.get('email')\n new_password = request.json.get('new_password')\n if len(new_password.strip()) < 4:\n return make_response(jsonify(\n {'message': 'password too short'}\n )), 409\n user = User.query.filter_by(email=email).first()\n if user:\n user.password_hash = generate_password_hash(new_password)\n user.save_user()\n return make_response(jsonify(\n {\n 'message': 'password reset successful',\n 'your new password': new_password\n }\n )), 201\n return make_response(jsonify(\n {'message': 'Wrong email, please provide a valid email and try again'}\n )), 401\n return None",
"def generate_authentication_code(user):\n\n salt = 'd9!1l@39#c3'\n\n expire_timestamp = time.time() + EXPIRE_TIME_LIMIT\n # Make a string which depends on restaurant id\n # Same encoding mechanism will be used in seerpod hardware\n\n composite_string = \"%s%s%s\" % (user.id, user.password, salt)\n\n str_hex = hashlib.md5(composite_string).hexdigest()\n decoded_str = str(user.owner_email_id) + str(user.id) + \"_\" + str(expire_timestamp) + \"_\" + str_hex\n\n # Encoded string will be a multiple line string, if it is greater\n # than maximum bin size of 76. Browser strips the newline character\n # in the url.\n encoded = base64.encodestring(decoded_str).strip().replace('\\n', '')\n return encoded",
"def forgot_password():\n if request.method == 'POST':\n if 'username' in request.form:\n username = request.form['username']\n user = Users.query.get(username)\n if user:\n reset_slug = utils.encrypt(username)\n reset_url = request.host_url + 'reset_password' + '/' + reset_slug\n from_email = ('[email protected]', 'TSG Bot')\n to_email = [(user.email, user.name)]\n subject = 'Password reset for Hades account'\n content = f\"Hello {user.name}, please click <a href=\\\"{reset_url}\\\">here</a> to reset your password!\"\n utils.send_mail(from_email, to_email, subject, content)\n return redirect(url_for('login'))\n return render_template('forgot_password.html')"
] |
[
"0.71952367",
"0.71164334",
"0.7102545",
"0.70755523",
"0.68331474",
"0.6816541",
"0.68077534",
"0.67629087",
"0.6724371",
"0.6711852",
"0.6694719",
"0.66374165",
"0.6595471",
"0.6518255",
"0.6481541",
"0.6472153",
"0.645154",
"0.64254034",
"0.64150655",
"0.6381783",
"0.6370532",
"0.6368503",
"0.6304628",
"0.6303712",
"0.6263815",
"0.62601626",
"0.62577516",
"0.6236819",
"0.62221104",
"0.6221258"
] |
0.7360784
|
0
|
Adds a ramp to the specified ramp parameter.
|
def add_ramp(self, parameter, start_time, ramp_duration, delta, clear_existing=False):
if clear_existing:
self.parameters[parameter] = [[],[],[]]
self.parameters[parameter][0].append(start_time)
self.parameters[parameter][1].append(ramp_duration)
self.parameters[parameter][2].append(delta)
self.parameters['_' + parameter] = zip(*self.parameters[parameter])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def setRampDuration(self, ramp_duration):\r\n\t\tself.RampDuration = ramp_duration\r\n\t\tself.OutputValueIncrement = (self.MaxValue - self.MinValue)/(self.RampDuration/self.Ts)",
"def setRampDuration(self, ramp_duration):\r\n\t\tself.RampDuration = ramp_duration\r\n\t\tself.OutputValueIncrement = (self.MaxValue - self.MinValue)/(self.RampDuration/self.Ts)",
"def ramp(self,gradient, onset, t_stop, baseline=0.0, time_step=0.125, t_start=0.0):\n if onset > t_start:\n times = np.hstack((np.array((t_start, onset)), # flat part\n np.arange(onset + time_step, t_stop + time_step, time_step))) # ramp part\n else:\n times = np.arange(t_start, t_stop + time_step, time_step)\n amps = baseline + gradient*(times - onset) * (times > onset)\n return times, amps",
"def write_ramp(bram):\n snap.write_int('test',1)\n ramp = np.arange(2**9,dtype='uint64')\n snap.write(bram,struct.pack('>512L',*ramp))",
"def ramp11(params, phase, args=dict(n=3, guess=[1, 0.14, -1.9])):\n # 2013-12-07 14:08 IJMC: Created.\n\n return params[0] * (1. + params[1] * (phase - 0.5) + params[2] * (phase - 0.5)**2)",
"def ramp_up(self):\n value = self.current_event[\"ramp_up\"][\"value\"]\n self.current_value.append(self.current_value[-1] + value)",
"def ramp(length = 10, width1 = 5, width2 = 8, layer = 0):\n if width2 is None: width2 = width1\n xpts = [0, length, length, 0]\n ypts = [width1, width2, 0, 0]\n D = Device('ramp')\n D.add_polygon([xpts, ypts], layer = layer)\n D.add_port(name = 1, midpoint = [0, width1/2],\n width = width1, orientation = 180)\n D.add_port(name = 2, midpoint = [length, width2/2],\n width = width2, orientation = 0)\n return D",
"def ramp10(params, phase, args=dict(n=2, guess=[1, 0.2])):\n # 2013-12-07 14:08 IJMC: Created.\n\n return params[0] * (1. + params[1] * (phase - 0.5))",
"def rampColorPort(*args, annotation: Union[AnyStr, bool]=\"\", backgroundColor: Union[List[float,\n float, float], bool]=None, defineTemplate: AnyStr=\"\", docTag: Union[AnyStr,\n bool]=\"\", dragCallback: Script=None, dropCallback: Script=None, enable:\n bool=True, enableBackground: bool=True, enableKeyboardFocus: bool=True,\n exists: bool=True, fullPathName: bool=True, height: Union[int, bool]=0,\n highlightColor: Union[List[float, float, float], bool]=None, isObscured:\n bool=True, manage: bool=True, noBackground: bool=True, node: name=None,\n numberOfPopupMenus: bool=True, parent: Union[AnyStr, bool]=\"\",\n popupMenuArray: bool=True, preventOverride: bool=True, selectedColorControl:\n AnyStr=\"\", selectedInterpControl: AnyStr=\"\", selectedPositionControl:\n AnyStr=\"\", statusBarMessage: AnyStr=\"\", useTemplate: AnyStr=\"\",\n verticalLayout: bool=True, visible: bool=True, visibleChangeCommand:\n Union[Script, bool]=None, width: Union[int, bool]=0, q=True, query=True,\n e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass",
"def ramp(self, channel, group, rate, target, unitCode=0):\n resp = self.XAPCommand('RAMP', channel, group, rate, target, unitCode=unitCode)\n return int(resp)",
"def __init__(self, ramp_type, ramp):\r\n super(ParameterWidget, self).__init__(None)\r\n self.ramp_type = ramp_type\r\n self.parameters = ramp.required_parameters\r\n self.populate()",
"def ramp_rate(self) -> IMockPin:\n return self[\"ramp_rate\"]",
"def setRamp(self, channel, group, rate, target, unitCode=0):\n resp = self.XAPCommand(\"Ramp\", channel, group, rate, target, unitCode=unitCode)\n return int(resp)",
"def ramp9(params, phase, args=dict(n=6, guess=[1, 0.003, 0.6, 0.009, 0.35, 4e-4])):\n # 2013-12-07 14:08 IJMC: Created.\n\n if params[4]>=phase.min():\n params[4] = phase.min() - np.diff(phase).mean()/1e6\n \n return params[0] * (1. + params[1] * (phase - 0.5) + \\\n params[2] * (phase - 0.5)**2 + \\\n params[3] * np.log(phase - params[4]) + \\\n params[5] * np.log(phase - params[4])**2)",
"def ramp_voltage_job(\n queue,\n resource,\n order,\n voltage_Start,\n voltage_End,\n step,\n wait_time=0.2,\n compliance=100e-6,\n):\n job = {\n \"Measurement\": {\n \"ramp_voltage\": {\n \"Resource\": resource,\n \"Order\": order,\n \"StartVolt\": voltage_Start,\n \"EndVolt\": voltage_End,\n \"Steps\": step,\n \"Wait\": wait_time,\n \"compliance\": compliance,\n }\n }\n }\n queue.put(job)",
"def add_param(self, param):\n self.params.append(param)\n return self",
"def ramp7(params, phase, args=dict(n=5, guess=[1, 0.034, 0.35, 0.005, 0.35])):\n # 2013-12-07 14:08 IJMC: Created.\n\n if params[4]>=phase.min():\n params[4] = phase.min() - np.diff(phase).mean()/1e6\n \n return params[0] * (1. + params[1] * (phase - 0.5) + \\\n params[2] * (phase - 0.5)**2 + \\\n params[3] * np.log(phase - params[4]))",
"def ramp2p(params, phase, args=dict(n=3, guess=[1, -0.16, 4.2])):\n # 2013-12-07 14:08 IJMC: Created.\n\n return params[0] * (1. + np.exp(-params[1]*phase + params[2]))",
"def ramp4p(params, phase, args=dict(n=5, guess=[1, -0.068, 2.33, 0.933, -20.5])):\n # 2013-12-07 14:08 IJMC: Created.\n\n return params[0] * (1. + np.exp(-params[1]*phase + params[2]) + \\\n params[3] * (phase - 0.5) + \\\n params[4] * (phase - 0.5)**2)",
"def from_ramp(\n start: int,\n stop: int,\n step: int,\n period: float,\n peak: Optional[int] = None,\n **kwargs,\n ):\n if start > stop:\n raise ValueError(\"Start power must be less than stop power\")\n\n if stop < start:\n raise ValueError(\"Stop power must be greater than start power\")\n\n if step == 0 or period == 0:\n raise ValueError(\"Step or period cannot be zero\")\n\n if peak and (peak < stop or peak < start):\n raise ValueError(\n \"Peak value if used must be greater than start and stop value\"\n )\n\n if peak:\n intervals = [(power, period) for power in range(start, peak, step)]\n intervals += [(power, period) for power in range(peak, stop, step * -1)]\n else:\n intervals = [(power, period) for power in range(start, stop, step)]\n\n return Workout(intervals=intervals, **kwargs)",
"def add(self, attr):\n self.validate_type(attr)\n value = attr.value\n if not self.range:\n self.range = (value, value)\n else:\n self.range = min(self.range[0], value), max(self.range[1], value)",
"def addParam(self, var: IRVariable):\n self.params[var.name] = var",
"def ramp8(params, phase, args=dict(n=4, guess=[1, 0.0096, 0.35, 5.3e-4])):\n # 2013-12-07 14:08 IJMC: Created.\n\n if params[2]>=phase.min():\n params[2] = phase.min() - np.diff(phase).mean()/1e6\n \n return params[0] * (1. + params[1] * np.log(phase - params[2]) + \\\n params[3] * np.log(phase - params[2])**2)",
"def add(self, p):\n self._pumps.add(p)",
"def ramp_color_rgb(values, feature, parent): \r\n ramp_name = values[0]\r\n ramp_position = values[1]\r\n \r\n ramp = QgsStyleV2.defaultStyle().colorRampRef(ramp_name)\r\n if not ramp:\r\n parent.setEvalErrorString( QObject.tr( '\"{}\" is not a valid color ramp'.format(ramp_name)))\r\n return QColor(0,0,0).name()\r\n \r\n value, error = getFloat(ramp_position)\r\n if error:\r\n parent.setEvalErrorString(error)\r\n \r\n color = ramp.color(value)\r\n return \"{},{},{}\".format(color.red(), color.green(), color.blue())",
"def get_ramp(x0,x1,vmax,a,dt, output='ramp only'):\n # Insure we are dealing with floating point numbers\n x0, x1 = float(x0), float(x1)\n vmax, a = float(vmax), float(a)\n dt = float(dt)\n vmax, a = abs(vmax), abs(a) # Make sure that v_max and a are positive\n\n # Check to see if there is anything to do\n if x0==x1:\n return scipy.array([x0])\n\n # Get distance and sign indicating direction\n dist = abs(x1-x0)\n sign = scipy.sign(x1-x0)\n\n # Determine if we will reach v_max\n t2vmax = vmax/a\n t2halfdist = scipy.sqrt(0.5*dist/a)\n \n if t2vmax > t2halfdist:\n # Trajectory w/o constant velocity segment \n T = scipy.sqrt(dist/a)\n n = int(scipy.round_((1.0/dt)*T))\n \n # Adjust accel and duration for rounding of n (discrete time steps)\n a = dist/(n*dt)**2\n T = scipy.sqrt(dist/a)\n \n # Generate trajectory\n t = scipy.linspace(0.0,2.0*T,2*n+1)\n def f1(t):\n return 0.5*sign*a*(t**2)\n def f2(t):\n s = t-T\n return f1(T)+ sign*a*T*s - 0.5*sign*a*s**2\n func_list = [f1,f2]\n cond_list = [t<=T, t>T]\n ramp = x0+scipy.piecewise(t,cond_list,func_list)\n \n else:\n # Trajectory w/ constant velocity segment\n # Compute acceleration time and adjust acceleration\n T1 = vmax/a \n n = int(scipy.round_(T1/dt))\n a = vmax/(n*dt) # Adjusted acceleration \n T1 = vmax/a # Adjusted acceleration time \n\n # Compute and adjust constant velocity time\n T2 = dist/vmax - T1 \n m = int(scipy.round_(T2/dt))\n vmax = dist/(dt*(n+m)) # Adjusted max velocity \n T2 = dist/vmax - T1 # Adjusted constant velocity time\n\n # Generate trajectory\n t = scipy.linspace(0.0,2.0*T1+T2,2*n+m+1)\n def f1(t):\n return 0.5*sign*a*(t**2)\n def f2(t):\n s = t-T1\n return f1(T1) + sign*vmax*s\n def f3(t):\n s = t-T1-T2\n return f2(T1+T2)+sign*vmax*s-0.5*sign*a*s**2 \n func_list = [f1,f2,f3]\n cond_list = [t<=T1, scipy.logical_and(t>T1,t<=T1+T2), t>T1+T2]\n ramp = x0+scipy.piecewise(t,cond_list,func_list)\n\n if output=='ramp only':\n return ramp\n elif output=='full':\n return ramp, vmax, a\n else:\n raise(ValueError, 'unknown keyword option output=%s'%(output,))",
"def ramp_down(self):\n value = self.current_event[\"ramp_down\"][\"value\"]\n self.current_value.append(self.current_value[-1] - value)",
"def add(self, term):\n self._value = self.accum_param.addInPlace(self._value, term)",
"def add_pump(self, name, start_node_name, end_node_name, pump_type='POWER',\n pump_parameter=50.0, speed=1.0, pattern=None, initial_status='OPEN'):\n self._link_reg.add_pump(name, start_node_name, end_node_name, pump_type, \n pump_parameter, speed, pattern, initial_status)",
"def add_parameter(self,\n name, # The name of the parameter\n scaling=None, # The type of scaling to be used for the parameter\n type=\"int\", # The type of the parameter, such as float\n min=0, # The minimum value of the parameter\n max=100, # The maximum value of the parameter\n significance=1, # The smallest significant step size\n value=None, # The value or value parameters\n distribution=None): # The distribution of the parameter\n config = {\"scaling\" : scaling, \n \"type\": type,\n \"min\": min, \n \"max\": max, \n \"significance\": significance,\n \"value\": value,\n \"distribution\": distribution}\n self.param_names.append(name)\n self.param_settings.append(config)"
] |
[
"0.6292615",
"0.6292615",
"0.618963",
"0.6015142",
"0.58060014",
"0.5775534",
"0.5737288",
"0.5722807",
"0.55971915",
"0.5569704",
"0.5560601",
"0.53862077",
"0.526049",
"0.5222054",
"0.51991385",
"0.51792717",
"0.51688516",
"0.5064401",
"0.5030468",
"0.5009544",
"0.49850935",
"0.49801663",
"0.49277997",
"0.4926527",
"0.49235603",
"0.49002182",
"0.4895447",
"0.4829476",
"0.48176715",
"0.48175612"
] |
0.70723915
|
0
|
Evaluate file cadence to see if it is daily or greater than daily.
|
def is_daily_file_cadence(file_cadence):
is_daily = True
if hasattr(file_cadence, 'days'):
if file_cadence.days > 1:
is_daily = False
else:
if not (hasattr(file_cadence, 'microseconds')
or hasattr(file_cadence, 'seconds')
or hasattr(file_cadence, 'minutes')
or hasattr(file_cadence, 'hours')):
is_daily = False
return is_daily
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def check_filedates(config, data, filename):\n\n min_file_date = re.match(config[\"min_file_date_regex\"], filename)\n max_file_date = re.match(config[\"max_file_date_regex\"], filename)\n\n # Try and convert to date time\n if min_file_date and max_file_date:\n min_file_date = pd.to_datetime(\n min_file_date.groups()[0], errors=\"coerce\", format=\"%d%m%y\", exact=True\n )\n max_file_date = pd.to_datetime(\n max_file_date.groups()[0], errors=\"coerce\", format=\"%d%m%y\", exact=True\n )\n\n if pd.isna(min_file_date) or pd.isna(max_file_date):\n logging.error(\"Could not identify dates from filename.\")\n return False\n\n logging.info(\n f\"Date range from the filename is {min_file_date.strftime('%d/%m/%Y')} to {max_file_date.strftime('%d/%m/%Y')}\"\n )\n\n # Check data quality\n if min_file_date != first_of_month(\n min_file_date\n ) or max_file_date != last_of_month(max_file_date):\n logging.error(\"Dates in filename are not first and last of the month.\")\n return False\n else:\n logging.error(\"Could not identify dates from filename.\")\n return False\n\n # Convert the date column to datetime\n data = data.map(get_date)\n # Identify date range in the file\n min_date = data.dropna().min()\n max_date = data.dropna().max()\n\n if not (min_date and max_date):\n logging.error(\"Unable to read dates from the date column.\")\n return False\n else:\n logging.info(\n f\"Date range included in this file is {min_date.strftime('%d/%m/%Y')} to {max_date.strftime('%d/%m/%Y')}\"\n )\n\n # Check whether the intended period matches the file\n grace_days = config[\"grace_days\"]\n if min_date < min_file_date - datetime.timedelta(\n days=grace_days\n ) or max_date > max_file_date + datetime.timedelta(days=grace_days):\n logging.error(\"File dates outside range in filename.\")\n return False\n\n if min_date < min_file_date or max_date > max_file_date:\n logging.warning(\"Some dates slightly outside range in filename.\")\n logging.info(\n \"Dates in the file are within tolerance to dates used in the filename.\"\n )\n return True",
"def compare_daily_data(self):\n return self._compare_daily_data",
"def is_daily_limit_reached(self):\n return self._tag == 'daily_limit_reached'",
"def date_older_than_file_date(date_and_time: str, file: str) -> bool:\n # compare_date: 0 is monthday, 1 is monthname, 2 is year, 3 is time, 4 is GMT\n if file[1:] == \"index.html\":\n compare_date = HttpServer.INDEX_DATE_LAST_MOD.split()\n else:\n compare_date = HttpServer.SEA_DATE_LAST_MOD.split()\n\n # 0 is weekday, 1 is monthday, 2 is monthname, 3 is year, 4 is time, 5 is GMT\n split_date_and_time = date_and_time.split()\n\n if split_date_and_time[3] == compare_date[2]:\n if HttpServer.MONTHS.get(split_date_and_time[2]) == HttpServer.MONTHS.get(compare_date[1]):\n if split_date_and_time[1] == compare_date[0]:\n # 0 is hours, 1 is minutes, 2 is seconds\n split_time = split_date_and_time[4].split(\":\")\n split_compare_time = compare_date[3].split(\":\")\n\n if split_time[0] == split_compare_time[0]:\n if split_time[1] == split_compare_time[1]:\n if split_time[2] == split_compare_time[2]:\n return True\n elif split_time[2] < split_compare_time[2]:\n return True\n else:\n return False\n elif split_time[1] < split_compare_time[1]:\n return True\n else:\n return False\n elif split_time[0] < split_compare_time[0]:\n return True\n else:\n return False\n elif split_date_and_time[1] < compare_date[0]:\n return True\n else:\n return False\n elif HttpServer.MONTHS.get(split_date_and_time[2]) < HttpServer.MONTHS.get(compare_date[1]):\n return True\n else:\n return False\n elif split_date_and_time[3] < compare_date[2]:\n return True\n else:\n return False",
"def check_less_than_one_min_submission(year: int, day: int, session: str) -> bool:\n last_time_file = _join_path(year, day, session, file_type=\"last_time_file\")\n with open(last_time_file, \"r\") as opened_file:\n last_time = float(opened_file.read())\n current_time = time.time()\n early_submission = current_time - last_time < 60.0\n return early_submission",
"def check_date(dates):\n\n # Loads file list from raw and processed data dirs\n data_dir = basedir + '/app/static/data/saved'\n data_files = [f for f in os.listdir(data_dir) if not f.startswith('.DS')]\n\n print data_files\n\n raw_data_dir = basedir + '/app/static/data/raw'\n raw_data_files = [f for f in os.listdir(raw_data_dir) if not f.startswith('.DS')]\n\n # If neither a raw or processed file exists, we haven't collected it\n # Sorts these uncollected files by date for reference\n if data_files or raw_data_files:\n saved_dates = []\n raw_dates = []\n\n if data_files:\n saved_dates = [data_file.split('.')[0] for data_file in data_files]\n saved_dates = sorted(saved_dates, reverse=True)\n if raw_data_files:\n raw_dates = [raw_file.split('.')[0] for raw_file in raw_data_files]\n raw_dates = sorted(raw_dates, reverse=True)\n\n uncrawled_dates = []\n for date in dates:\n if date not in saved_dates and date not in raw_dates:\n uncrawled_dates.append(date)\n else:\n uncrawled_dates = dates\n\n print 'Uncrawled file dates'\n print uncrawled_dates\n print ''\n\n return uncrawled_dates",
"def is_daily_emails(self):\n return self._tag == 'daily_emails'",
"def _evaluate_dates(self):\n if len(self.date_strings)==0: \n return \"\"\n\n # high confidence: filter for date with a time attached, expected only 1 unique time\n high_confidence = self._extract_high_conf_dates(set(self.date_strings))\n if high_confidence: \n return high_confidence\n\n #else pick majority vote using absolute\n majority_vote = self.date_strings.most_common(1)[0][0]\n return majority_vote",
"def check_if_up_to_date():\n last_daily = get_latest_dl_date()\n last_trading_day = get_last_open_trading_day()",
"def needs_refreshing(filepath):\n today = datetime.date.today()\n year = today.year - 2000 # Obviously does not work prior to 2000\n if today.month <= 6:\n current_season = str(year - 1) + str(year)\n else:\n current_season = str(year) + str(year + 1)\n return (current_season in filepath and\n last_modified_date(filepath) != today)",
"def _check_data_continuity(self):\n dates = list(self.data.open_date.unique())\n dates.sort()\n\n f = lambda x : tb.DateConvert(x).datetime\n dates = list(map(f, dates))\n\n delta = []\n for i, _ in enumerate(dates, start=1):\n if i < len(dates):\n delta.append(dates[i]-dates[i-1])\n\n if len(pd.unique(delta)) > 1:\n raise DiscontinuousError(\n 'There appear to be missing dates in the market data.'\n )",
"def check_dataset_dates(self):\n # TODO: graph traverse and date checking\n pass",
"def check_date(date):\r\n try:\r\n d_max = str(datetime.today() + timedelta(days=5))\r\n d_max = d_max[:10]\r\n date_list = date.strip().split(\"-\")[-1]\r\n d_max = d_max.strip().split(\"-\")\r\n for i in range(3):\r\n if int(date_list[i]) > int(d_max[i]):\r\n return False\r\n return True\r\n except Exception as r:\r\n # print(Exception)\r\n return None",
"def daily_avg(dacycle,avg):\n \n if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:\n raise IOError,'Choice of averaging invalid'\n\n analysisdir = dacycle['dir.analysis']\n weekdir = os.path.join(analysisdir , 'data_%s_weekly'%avg)\n daydir = os.path.join(analysisdir , 'data_%s_daily'%avg)\n\n if not os.path.exists(daydir):\n print \"Creating new output directory \" + daydir\n os.makedirs(daydir)\n\n files = os.listdir(weekdir)\n files = [f for f in files if '-' in f and f.endswith('.nc')]\n\n fileinfo = {}\n for filename in files:\n date=datetime.datetime.strptime(filename.split('.')[-2],'%Y-%m-%d')\n fileinfo[filename] = date\n \n dt = dacycle['cyclelength']\n\n for k,v in fileinfo.iteritems():\n cycle_file = os.path.join(weekdir,k)\n for i in range(abs(dt.days)):\n daily_file = os.path.join(daydir,'%s_fluxes.%s.nc'%(avg,(v+datetime.timedelta(days=i)).strftime('%Y-%m-%d')))\n if not os.path.lexists(daily_file):\n os.symlink(cycle_file,daily_file)\n #print daily_file,cycle_file",
"def program_out_of_date(self, stamp_path):\n if not os.path.exists(stamp_path) or self.clean:\n return True\n with open(stamp_path, 'r') as stamp:\n return self.date != stamp.read()",
"def is_timeseries(filepath):\n\n if os.path.isdir(os.path.dirname(filepath)):\n\n if len(os.listdir(os.path.dirname(filepath))) > 1:\n ts = True\n else:\n ts = False\n else:\n ts = None\n\n return ts",
"def periodCheck(data):",
"def find_duration(discharge, enroll_date, discharge_date):\n #pass\n today = datetime.datetime.today()\n if discharge : #True\n return (discharge_date - enroll_date).days\n else:\n return (today - enroll_date).days",
"def check_date(self):\n parse_date = datetime.datetime.strptime(self.json_parsed_file['date'], \"%d %b %Y\")\n current_day = datetime.datetime.now()\n\n # Check that the parsed date is older then the current date.\n if parse_date > current_day:\n self.output_message += \"Issue detected on date of the progress report. Parsed date: {}\\n\".format(parse_date)\n self.is_parsed_pdf_valid = False",
"def test_run(filename='prices.csv'):\n prices = pd.read_csv(filename, parse_dates=['date'])\n print(\"Most volatile stock: {}\".format(get_most_volatile(prices)))",
"def test_run(filename='prices.csv'):\n prices = pd.read_csv(filename, parse_dates=['date'])\n print(\"Most volatile stock: {}\".format(get_most_volatile(prices)))",
"def check(self):\n validity_year = int(self.date[0:4])\n validity_month = int(self.date[5:7])\n validity_day = int(self.date[8:10])\n if datetime.today().year > validity_year:\n self.flag = False\n elif datetime.today().year == validity_year:\n if datetime.today().month > validity_month:\n self.flag = False\n elif datetime.today().month == validity_month:\n if datetime.today().day > validity_day:\n self.flag = False\n else:\n self.flag = True\n else:\n self.flag = True\n else:\n self.flag = True",
"def test_new_items_have_equal_higher_dates(self):\n input_ = [\n self.indicator_record(date=datetime.date(2014, 2, 24),\n end_date=datetime.date(2014, 3, 24),\n value=0.0000),\n self.indicator_record(date=datetime.date(2014, 2, 25),\n end_date=datetime.date(2014, 3, 25),\n value=0.0007),\n ]\n records = self.expander._daily_three_field_indicator_expander(input_)\n increasing_days = [records[index_].date <= record.date and\n records[index_].end_date <= record.end_date\n for index_, record in enumerate(records[1:])]\n\n self.assertTrue(all(increasing_days))",
"def test_divide_csv_daily(self):\n\n with tempfile.TemporaryDirectory() as td:\n filename = \"storage_data.csv\"\n file_path = f\"{td}/{filename}\"\n with patch(\"masu.external.downloader.ocp.ocp_report_downloader.pd\") as mock_pd:\n with patch(\n \"masu.external.downloader.ocp.ocp_report_downloader.utils.detect_type\",\n return_value=(\"storage_usage\", None),\n ):\n dates = [\"2020-01-01 00:00:00 +UTC\", \"2020-01-02 00:00:00 +UTC\"]\n mock_report = {\n \"interval_start\": dates,\n \"persistentvolumeclaim_labels\": [\"label1\", \"label2\"],\n }\n df = pd.DataFrame(data=mock_report)\n mock_pd.read_csv.return_value = df\n daily_files = divide_csv_daily(file_path, self.ocp_manifest_id)\n self.assertNotEqual([], daily_files)\n self.assertEqual(len(daily_files), 2)\n gen_files = [\n f\"storage_usage.2020-01-01.{self.ocp_manifest_id}.0.csv\",\n f\"storage_usage.2020-01-02.{self.ocp_manifest_id}.0.csv\",\n ]\n expected_dates = [datetime.strptime(date[:10], \"%Y-%m-%d\") for date in dates]\n expected = [\n {\"filename\": gen_file, \"filepath\": f\"{td}/{gen_file}\", \"date\": expected_dates[i]}\n for i, gen_file in enumerate(gen_files)\n ]\n for expected_item in expected:\n self.assertIn(expected_item, daily_files)",
"def seasonal(path, date_inf=\"15-05\", date_sup=\"15-10\"):\n with open(os.path.join(path, \"info.json\"), \"r\") as f:\n info = json.load(f)\n\n date_inf = datetime.strptime(date_inf, \"%d-%m\").timetuple().tm_yday\n date_sup = datetime.strptime(date_sup, \"%d-%m\").timetuple().tm_yday\n day_of_year = timestamp_to_datetime(\n info['Sensing start']).timetuple().tm_yday\n\n return (day_of_year > date_inf) and (day_of_year < date_sup)",
"def _atime_op_argdate(self, fn, op, argdate):\n threshold_epoch = self._get_threshold_epoch(argdate)\n\n if dynamic_comparison(os.path.getatime(fn), op, threshold_epoch):\n return True\n else:\n return False",
"def monthly_avg(dacycle,avg):\n \n if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:\n raise IOError,'Choice of averaging invalid'\n\n analysisdir = dacycle['dir.analysis']\n\n daydir = os.path.join(analysisdir , 'data_%s_daily'%avg)\n monthdir = os.path.join(analysisdir,'data_%s_monthly'%avg)\n\n if not os.path.exists(monthdir):\n print \"Creating new output directory \" + monthdir\n os.makedirs(monthdir)\n\n\n files = os.listdir(daydir) # get daily files\n files = [f for f in files if '-' in f and f.endswith('.nc')]\n\n if len(files) < 28:\n print 'No month is yet complete, skipping monthly average'\n return\n\n fileinfo = {}\n for filename in files: # parse date from each of them\n date=datetime.datetime.strptime(filename.split('.')[-2],'%Y-%m-%d')\n fileinfo[filename] = date\n\n years = [d.year for d in fileinfo.values()] # get actual years\n months = set([d.month for d in fileinfo.values()]) # get actual months\n \n sd = datetime.datetime(min(years),1,1)\n ed = datetime.datetime(max(years)+1,1,1)\n\n while sd < ed: \n\n nd = sd + relativedelta(months=+1)\n\n ndays_in_month = (nd-sd).days\n \n avg_files = [os.path.join(daydir,k) for k,v in fileinfo.iteritems() if v < nd and v >= sd]\n \n if len(avg_files) != ndays_in_month: # only once month complete \n #print 'New month (%02d) is not yet complete, skipping monthly average'%(sd.month)\n pass\n else:\n targetfile = os.path.join(monthdir,'%s_fluxes.%s.nc'%(avg,sd.strftime('%Y-%m')))\n if not os.path.exists(targetfile):\n print \"New month (%02d) is complete, I have %d days for the next file\"%(sd.month,ndays_in_month)\n command = ['ncra','-O']+ avg_files + [targetfile]\n status = subprocess.check_call(command)\n else:\n pass\n\n sd = nd",
"def get_file_age_win(path, cur_parsed, cur_datetime):\n\ttry:\n\t\tdate_str = time.ctime(os.path.getctime(path))\n\t\tdate_parsed = parse_date_str(date_str, ' ', 4, 1, 2)\n\t\tprint(\"file crtime: {}\".format(date_parsed))\n\t\tdate_dif = date_dif_precomputed(cur_parsed[0], cur_parsed[1], cur_parsed[2], date_parsed[0], date_parsed[1], date_parsed[2])\n\t\treturn date_dif\n\texcept:\n\t\tprint(\"EXCEPTION! fallback to mtime!\")\n\t\treturn get_file_age_default(path, cur_parsed, cur_datetime)",
"def isEffective( self, date ):\n return 1",
"def _ctime_op_argdate(self, fn, op, argdate):\n threshold_epoch = self._get_threshold_epoch(argdate)\n\n if dynamic_comparison(os.path.getctime(fn), op, threshold_epoch):\n return True\n else:\n return False"
] |
[
"0.6441482",
"0.6316295",
"0.6255487",
"0.5723394",
"0.57095486",
"0.5651001",
"0.55116266",
"0.5480839",
"0.5454888",
"0.5430491",
"0.54286057",
"0.5423096",
"0.5411704",
"0.53888714",
"0.53809935",
"0.53465474",
"0.5339659",
"0.53350204",
"0.53264946",
"0.5325297",
"0.5325297",
"0.5317432",
"0.5315717",
"0.53031474",
"0.52528125",
"0.5252321",
"0.51878387",
"0.51735497",
"0.517276",
"0.51726305"
] |
0.7852443
|
0
|
sum the total of weighted cells in each polygon, compute the unit value (where possible), and write to raster vect = input vector map val = vector map attribute with desired value weights = input rast map with cell weights out = output raster map wcol = weight column in vect (default=name of output raster) deleted prior to function return
|
def calcUnitWeight(vect, val, weights, out, wcol=None):
if not wcol:
wcol = out
grass.run_command('v.rast.sum', zones=vect, _input=weights, column=wcol)
grass.run_command('v.db.update', column=wcol, value=val+'/'+wcol,
where='wcol>0')
grass.run_command('v.to.rast', _input=vect, use='attr',
column=wcol, output=out)
grass.run_command('v.db.dropcol', input=vect, column=wcol)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def calcweighted(store):\n nobs = store['yvec'].shape[0]\n store['Upper'].put(-store['rho'], range(0, nobs - 1), range(1, nobs))\n store['Upper'].matvec(store['yvec'], store['yvectil'])\n for i in xrange(store['xmat'].shape[1]):\n store['Upper'].matvec(store['xmat'][:, i], store['xmattil'][:, i])",
"def peridym_compute_weighted_volume(cell_cent, cell_vol, nbr_lst, nbr_beta_lst, horizon, omega_fun):\n\n mw = np.zeros(len(cell_vol), dtype=float) #m is wighted volume\n\n for i in range(len(cell_cent)):\n curr_node_coord = cell_cent[i]\n \n #declare empty lists for current node neighbor\n #attributes like neighbor bond vector, bond len,\n #and influence field \n #refer ch5 algo1 of handbook of peridynamic modelling\n #by silling etal \n\n curr_nbr_lst = nbr_lst[i] \n curr_beta_lst = nbr_beta_lst[i]\n curr_nbr_bnd_vct = cell_cent[curr_nbr_lst] - curr_node_coord\n curr_nbr_bnd_len = la.norm(curr_nbr_bnd_vct, 2, axis=1)\n mw[i] = sum(omega_fun(curr_nbr_bnd_vct, horizon)*curr_nbr_bnd_len**2*cell_vol[curr_nbr_lst]*curr_beta_lst)\n\n return mw",
"def calculate_weighted_results():\n pass",
"def pz_weight_mcal(cat,mask,bins,binnum=100,pdf=False):\n\n if pdf:\n print 'transfer pdf support'\n return\n else:\n if hasattr(cat,'pzstore'):\n nz = cat.pzstore.pz_full\n else:\n nz = cat.pz_full\n mask1=catalog.CatalogMethods.get_cuts_mask(cat,full=True)\n e1,e2,w,m1,m2=lin.linear_methods.get_lin_e_w_ms(cat,mask=mask1,xi=True)\n weights = (m1+m2)/2.*np.ones(len(cat.coadd))\n w0 = []\n for i in range(len(mask1)):\n if i==0:\n mask = mask1[0]\n else:\n mask = np.append(mask1[i],mask1[5])\n h0,b0=np.histogram(nz[mask],bins=binnum,weights=weights[mask])\n w=np.ones(len(nz))\n print 'w0',len(w)\n for j in range(cat.sbins):\n binmask=bins[j]\n h,b=np.histogram(nz[binmask],bins=b0,weights=weights[binmask])\n for k in range(binnum):\n binmask2=(nz>b[k])&(nz<=b[k+1])\n mask_=binmask[np.in1d(binmask,np.where(binmask2)[0])]\n if h[k]<0.01*h0[k]:\n w[mask_]=0.\n else:\n w[mask_]=0.5*h0[k]/h[k]\n w0.append(w)\n\n print 'max/min/mean weight', k,np.max(w),np.min(w),np.mean(w[binmask])\n\n return w0,weights",
"def molecular_weight(elements):\n return (np.array([atomic_mass[i.upper()] for i in elements]).sum())",
"def calc_weight(base):\n return weights[base] + sum([calc_weight(i) for i in leafs[base]])",
"def pz_weight(cat,mask,bins,binnum=100,pdf=False):\n\n if pdf:\n print 'transfer pdf support'\n return\n else:\n if hasattr(cat,'pzstore'):\n nz = cat.pzstore.pz_full\n else:\n nz = cat.pz_full\n mask1=mask\n e1,e2,w,m1,m2=lin.linear_methods.get_lin_e_w_ms(cat,mask=mask1,xi=True)\n if cat.wt:\n weights = w * (m1+m2)/2.\n else:\n weights = (m1+m2)/2.*np.ones(np.sum(mask))\n h0,b0=np.histogram(nz[mask],bins=binnum,weights=weights)\n w=np.ones(len(nz))\n print 'w0',len(w)\n for j in range(cat.sbins):\n binmask=(bins==j)&mask\n h,b=np.histogram(nz[binmask],bins=b0,weights=weights[bins[mask]==j])\n for k in range(binnum):\n binmask2=(nz>b[k])&(nz<=b[k+1])\n mask_=binmask&binmask2\n if h[k]<0.01*h0[k]:\n w[mask_]=0.\n else:\n w[mask_]=0.5*h0[k]/h[k]\n\n print 'max/min/mean weight', k,np.max(w),np.min(w),np.mean(w[binmask])\n\n return w,weights",
"def wsum(self):\n return reduce(operator.add, self.wvalues, 0.0)",
"def __weight_func(self, u: str, _: str, d: Dict):\n # # # Weight of edge\n edge_wt = d.get(\"weight\", 0)\n # # # List of required compounds\n tmp_required_compounds = d.get(\"required_compounds\", None)\n # # # Sum over costs of required compounds.\n # # # Only for edges from compound node to rxn node\n if ';' not in u and tmp_required_compounds is not None:\n required_compound_costs = np.sum([self.compound_costs[n] for n in tmp_required_compounds])\n else:\n required_compound_costs = 0.0\n\n return edge_wt + required_compound_costs",
"def weighted_sum(self):\n return sum(self.wvalues)",
"def compute(self, inputs, outputs):\n #super().compute(inputs, outputs)\n outputs['stuff'] = inputs['widths'] * 2\n outputs['areas'] = inputs['lengths'] * 2\n\n outputs['total_volume'] = np.sum(outputs['areas']) + np.sum(outputs['stuff'])",
"def polys_to_mask_wrt_box_rec(rec_rois_gt_chars, polygon, box, M_HEIGHT, M_WIDTH, shrink = 0.5, weight_wh=False):\n char_map = np.zeros((2, M_HEIGHT, M_WIDTH), dtype=np.float32)\n char_weight = np.ones((M_HEIGHT, M_WIDTH), dtype=np.float32)\n char_box = np.zeros((M_HEIGHT, M_WIDTH, 4), dtype=np.float32)\n char_box_inside_weight = np.zeros((M_HEIGHT, M_WIDTH, 4), dtype=np.float32)\n # char_map_weight = np.zeros((2, M_HEIGHT, M_WIDTH), dtype=np.float32)\n\n xmin = box[0]\n ymin = box[1]\n w = box[2] - box[0]\n h = box[3] - box[1]\n\n w = np.maximum(w, 1)\n h = np.maximum(h, 1)\n\n polygon_norm = np.array(polygon[0], dtype=np.float32)\n polygon_norm[0::2] = (polygon_norm[0::2] - xmin) * M_WIDTH / w\n polygon_norm[1::2] = (polygon_norm[1::2] - ymin) * M_HEIGHT / h\n polygon_reshape = polygon_norm.reshape((-1, 2)).astype(np.int32)\n cv2.fillPoly(char_map[0,:,:], [polygon_reshape], 1)\n\n if rec_rois_gt_chars.size > 0:\n rec_rois_gt_chars[0,:,0:8:2] = (rec_rois_gt_chars[0,:,0:8:2] - xmin) * M_WIDTH / w\n rec_rois_gt_chars[0,:,1:8:2] = (rec_rois_gt_chars[0,:,1:8:2] - ymin) * M_HEIGHT / h\n x_center = np.mean(rec_rois_gt_chars[0,:,0:8:2], axis = 1).astype(np.int32)\n y_center = np.mean(rec_rois_gt_chars[0,:,1:8:2], axis = 1).astype(np.int32)\n for i in range(rec_rois_gt_chars.shape[1]): \n if x_center[i]>=0 and x_center[i]<M_WIDTH and y_center[i]>=0 and y_center[i]<M_HEIGHT:\n gt_poly = rec_rois_gt_chars[0,i,:8]\n box_xmin = max(0,min(gt_poly[0:8:2]))\n box_xmax = min(M_WIDTH - 1, max(gt_poly[0:8:2]))\n box_ymin = max(0,min(gt_poly[1:8:2]))\n box_ymax = min(M_HEIGHT - 1, max(gt_poly[1:8:2]))\n gt_poly_reshape = gt_poly.reshape((4, 2))\n char_cls = int(rec_rois_gt_chars[0,i,8])\n if shrink>0:\n # rpoly = _shrink_poly(gt_poly_reshape.copy(), shrink) ## shrink for regression\n # spoly = _shrink_poly(gt_poly_reshape.copy(), shrink*1.5) ## shrink for classification\n rpoly = _shrink_rect(gt_poly_reshape.copy(), shrink) ## shrink for regression\n spoly = _shrink_rect(gt_poly_reshape.copy(), shrink/2) ## shrink for classification\n # print('gt_poly_reshape', gt_poly_reshape)\n # print('spoly', spoly)\n else:\n rpoly = gt_poly_reshape.copy()\n spoly = gt_poly_reshape.copy()\n rpoly = rpoly.astype(np.int32)\n box_xmin_shrink = max(0, min(rpoly[:,0]))\n box_xmax_shrink = min(M_WIDTH - 1, max(rpoly[:,0]))\n box_ymin_shrink = max(0, min(rpoly[:,1]))\n box_ymax_shrink = min(M_HEIGHT - 1, max(rpoly[:,1]))\n \n if weight_wh:\n char_box_inside_weight[box_ymin_shrink:box_ymax_shrink, box_xmin_shrink:box_xmax_shrink, 0] = 1.0\n char_box_inside_weight[box_ymin_shrink:box_ymax_shrink, box_xmin_shrink:box_xmax_shrink, 1] = (box_ymax - box_ymin)*1.0/(box_xmax - box_xmin)\n char_box_inside_weight[box_ymin_shrink:box_ymax_shrink, box_xmin_shrink:box_xmax_shrink, 2] = 1.0\n char_box_inside_weight[box_ymin_shrink:box_ymax_shrink, box_xmin_shrink:box_xmax_shrink, 3] = (box_ymax - box_ymin)*1.0/(box_xmax - box_xmin)\n else:\n char_box_inside_weight[box_ymin_shrink:box_ymax_shrink, box_xmin_shrink:box_xmax_shrink, :] = 1.0\n tmp_char_box = np.zeros((M_HEIGHT, M_WIDTH))\n tmp_char_box[box_ymin_shrink:box_ymax_shrink, box_xmin_shrink:box_xmax_shrink] = 1.0\n index = np.where(tmp_char_box == 1)\n if weight_wh:\n char_box[box_ymin_shrink:box_ymax_shrink, box_xmin_shrink:box_xmax_shrink, 0] = np.reshape((index[0] - box_ymin) / float(M_HEIGHT), (box_ymax_shrink - box_ymin_shrink, box_xmax_shrink - box_xmin_shrink))\n char_box[box_ymin_shrink:box_ymax_shrink, box_xmin_shrink:box_xmax_shrink, 1] = np.reshape((box_xmax - index[1]) / float(M_HEIGHT), (box_ymax_shrink - box_ymin_shrink, box_xmax_shrink - box_xmin_shrink))\n char_box[box_ymin_shrink:box_ymax_shrink, box_xmin_shrink:box_xmax_shrink, 2] = np.reshape((box_ymax - index[0]) / float(M_HEIGHT), (box_ymax_shrink - box_ymin_shrink, box_xmax_shrink - box_xmin_shrink))\n char_box[box_ymin_shrink:box_ymax_shrink, box_xmin_shrink:box_xmax_shrink, 3] = np.reshape((index[1] - box_xmin) / float(M_HEIGHT), (box_ymax_shrink - box_ymin_shrink, box_xmax_shrink - box_xmin_shrink))\n else:\n char_box[box_ymin_shrink:box_ymax_shrink, box_xmin_shrink:box_xmax_shrink, 0] = np.reshape((index[0] - box_ymin) / float(M_HEIGHT), (box_ymax_shrink - box_ymin_shrink, box_xmax_shrink - box_xmin_shrink))\n char_box[box_ymin_shrink:box_ymax_shrink, box_xmin_shrink:box_xmax_shrink, 1] = np.reshape((box_xmax - index[1]) / float(M_WIDTH), (box_ymax_shrink - box_ymin_shrink, box_xmax_shrink - box_xmin_shrink))\n char_box[box_ymin_shrink:box_ymax_shrink, box_xmin_shrink:box_xmax_shrink, 2] = np.reshape((box_ymax - index[0]) / float(M_HEIGHT), (box_ymax_shrink - box_ymin_shrink, box_xmax_shrink - box_xmin_shrink))\n char_box[box_ymin_shrink:box_ymax_shrink, box_xmin_shrink:box_xmax_shrink, 3] = np.reshape((index[1] - box_xmin) / float(M_WIDTH), (box_ymax_shrink - box_ymin_shrink, box_xmax_shrink - box_xmin_shrink))\n \n ## get classification target\n spoly = spoly.astype(np.int32)\n sbox_xmin_shrink = max(0, min(spoly[:,0]))\n sbox_xmax_shrink = min(M_WIDTH - 1, max(spoly[:,0]))\n sbox_ymin_shrink = max(0, min(spoly[:,1]))\n sbox_ymax_shrink = min(M_HEIGHT - 1, max(spoly[:,1]))\n\n ## very small char box\n if sbox_xmax_shrink == sbox_xmin_shrink:\n sbox_xmax_shrink = sbox_xmin_shrink + 1\n if sbox_ymax_shrink == sbox_ymin_shrink:\n sbox_ymax_shrink = sbox_ymin_shrink + 1\n\n char_map[1, sbox_ymin_shrink:sbox_ymax_shrink, sbox_xmin_shrink:sbox_xmax_shrink] = char_cls\n\n ## char_weight \n pos_index = np.where(char_map[1, :, :] > 0)\n pos_num = pos_index[0].size\n if pos_num > 0:\n pos_weight = 1.0 * (M_WIDTH*M_HEIGHT - pos_num)/pos_num\n char_weight[pos_index] = pos_weight\n else: ## for samples without char ann\n char_map[1, :, :].fill(-1)\n\n\n return char_map, char_weight, char_box, char_box_inside_weight",
"def test_weighting_implementation():\n\n # generate two locusts of points\n npts = 100\n epsilon = 0.05\n # cluster 1\n coords1 = generate_locus_of_3d_points(npts, 0.1, 0.1, 0.1, epsilon=epsilon)\n # cluster 2\n coords2 = generate_locus_of_3d_points(npts, 0.9, 0.9, 0.9, epsilon=epsilon)\n\n # generate orientation vectors for cluster 1\n vectors1 = generate_aligned_vectors(len(coords1))\n\n # generate a random index value to check for each cluster\n idx = np.random.randint(npts)\n idx2 = np.random.randint(npts)\n\n # calculate dot product between vectors1 and cluster 2\n r = np.sqrt((0.9 - 0.1) ** 2 + (0.9 - 0.1) ** 2 + (0.9 - 0.1) ** 2)\n # s, vector between coords1 and cluster2\n s = np.zeros((3))\n s[0] = coords2[idx2, 0] - coords1[idx, 0]\n s[1] = coords2[idx2, 1] - coords1[idx, 1]\n s[2] = coords2[idx2, 2] - coords1[idx, 2]\n\n # calculate dot product between orientation and direction between cluster 1 and 2\n angles = angles_between_list_of_vectors(vectors1[idx], s)\n costheta = np.cos(angles) # dot product between vectors\n\n idx_costheta = costheta\n\n # define radial bins\n rbins = np.array([0.0, 0.1, r + 2.0 * epsilon])\n\n # define weights appropiate for weighting function\n weights1 = np.zeros((npts, 4))\n weights1[idx] = 1.0\n weights1[:, 1] = vectors1[:, 0]\n weights1[:, 2] = vectors1[:, 1]\n weights1[:, 3] = vectors1[:, 2]\n weights2 = np.zeros(npts)\n weights2[idx2] = 1.0\n\n # calculate weighted counts\n\n # weighting 1\n # calculate weighted counts\n weighted_counts, counts = positional_marked_npairs_3d(\n coords1,\n coords2,\n rbins,\n period=None,\n weights1=weights1,\n weights2=weights2,\n weight_func_id=1,\n num_threads=1,\n )\n\n msg = \"weighted counts do not match expected result given the weighting function\"\n assert np.isclose(weighted_counts[-1], idx_costheta, rtol=0.01 / npts), msg",
"def map(self, mapunit):\n\n #The number of bands to measure the LF for\n if len(mapunit['luminosity'].shape)>1:\n self.nbands = mapunit['luminosity'].shape[1]\n else:\n mapunit['luminosity'] = np.atleast_2d(mapunit['luminosity']).T\n self.nbands = 1\n\n #If only measuring for centrals, get the appropriate\n #rows of the mapunit\n\n mu = {}\n if self.central_only:\n delete_after_map = True\n for k in mapunit.keys():\n mu[k] = mapunit[k][mapunit['central']==1]\n else:\n delete_after_map = False\n mu = mapunit\n\n #Want to count galaxies in bins of luminosity for\n #self.nbands different bands in self.nzbins\n #redshift bins\n if self.lumcounts is None:\n self.lumcounts = np.zeros((self.njack, len(self.magbins)-1,\n self.nbands, self.nzbins))\n\n #Assume redshifts are provided, and that the\n #mapunit is sorted in terms of them\n \n if self.lightcone:\n for i, z in enumerate(self.zbins[:-1]):\n zlidx = mu['redshift'].searchsorted(self.zbins[i])\n zhidx = mu['redshift'].searchsorted(self.zbins[i+1])\n\n #Count galaxies in bins of luminosity\n for j in range(self.nbands):\n if not self.CMASS:\n c, e = np.histogram(mu['luminosity'][zlidx:zhidx,j],\n bins=self.magbins)\n else:\n cidx = self.selectCMASS(mu['appmag'][zlidx:zhidx])\n c, e = np.histogram(mu['luminosity'][zlidx:zhidx,j][cidx],\n bins=self.magbins)\n \n self.lumcounts[self.jcount,:,j,i] += c\n else:\n for j in range(self.nbands):\n if not self.CMASS:\n c, e = np.histogram(mu['luminosity'][:,j],\n bins=self.magbins)\n else:\n cidx = self.selectCMASS(mu['appmag'][:])\n c, e = np.histogram(mu['luminosity'][:,j][cidx],\n bins=self.magbins)\n \n self.lumcounts[self.jcount,:,j,0] += c\n\n if delete_after_map:\n True",
"def distance_map(self, scaling='sum'):\n\n if scaling not in ['sum', 'mean']:\n raise ValueError(f'scaling should be either \"sum\" or \"mean\" ('\n f'\"{scaling}\" not valid)')\n\n um = nan * zeros((self._weights.shape[0],\n self._weights.shape[1],\n 8)) # 2 spots more for hexagonal topology\n\n ii = [[0, -1, -1, -1, 0, 1, 1, 1]]*2\n jj = [[-1, -1, 0, 1, 1, 1, 0, -1]]*2\n\n if self.topology == 'hexagonal':\n ii = [[1, 1, 1, 0, -1, 0], [0, 1, 0, -1, -1, -1]]\n jj = [[1, 0, -1, -1, 0, 1], [1, 0, -1, -1, 0, 1]]\n\n for x in range(self._weights.shape[0]):\n for y in range(self._weights.shape[1]):\n w_2 = self._weights[x, y]\n e = y % 2 == 0 # only used on hexagonal topology\n for k, (i, j) in enumerate(zip(ii[e], jj[e])):\n if (x+i >= 0 and x+i < self._weights.shape[0] and\n y+j >= 0 and y+j < self._weights.shape[1]):\n w_1 = self._weights[x+i, y+j]\n um[x, y, k] = fast_norm(w_2-w_1)\n\n if scaling == 'mean':\n um = nanmean(um, axis=2)\n if scaling == 'sum':\n um = nansum(um, axis=2)\n\n return um/um.max()",
"def computeWeights(xcorr_km,ycorr_km,Tmask,kmpix,dx):\n\n #create grid z where each point represents radius from center of Titan\n lim = kmpix * len(Tmask[0])/2.\n newxpix = int(kmpix * len(Tmask[0]) / dx)\n newypix = int(kmpix * len(Tmask[1]) / dx)\n \n # Adjust dx to match the new (integer) number of pixels\n dx = kmpix * len(Tmask[0]) / newxpix\n \n # new pixel area (sr)\n Sr_pix = np.arctan(dx/Titan_dist)**2\n \n #Need to apply Gaussian taper to the extraction region (not convolution!), to simulate effect of the telescope beam\n TmaskT = taperMask(Tmask)\n \n #Resample the extraction mask to the new grid\n g = zoom(TmaskT, (newxpix/len(Tmask[0]), newypix/len(Tmask[1])), order=1)\n\n x, y = np.indices(g.shape)\n x = dx * (x-(x.max()-x.min())/2.0)\n y = dx * (y-(y.max()-y.min())/2.0)\n z = np.hypot(x, y)\n \n midpoints = 0.5 * (radii[1:] + radii[:-1])\n \n #Change values of z to the angles corresponding to the mid-point radii\n angles = [np.degrees(np.arcsin(float(r)/float(top_atm))) for r in midpoints] #top of atmosphere emission angle\n for i in range(len(angles)):\n z[np.logical_and(z >= radii[i], z < radii[i+1])] = angles[i]\n \n z[z >= radii[-1]] = float('NaN')\n \n #compute normalized weights, taking account of any blank sky (outside top_atm) inside the aperture\n wts = {}\n for val in angles:\n garr = g[np.where(z == val)]\n wts[val] = sum(garr)\n gnanarr = g[np.where(np.isnan(z))] \n s = sum(wts.values())+sum(gnanarr)\n for key,val in wts.items():\n val = float(val)/float(s)\n wts[key] = val\n\n meanangle = sum([val*key for key,val in wts.items()])\n print('Mean emission angle: %.2f deg' %(meanangle))\n \n ########################################################################\n\n #Now compute mean latitude and longitude of observation\n\n #Finding vector of true north of Titan\n northx = -np.sin(ccw)*np.cos(subobslat)\n northy = np.cos(ccw)*np.cos(subobslat)\n northz = np.sin(subobslat) \n\n with np.errstate(divide='ignore',invalid='ignore'): #We actually want all y > Titan_radius + top_atm to be nans, so the invalid inputs to arcsin are helping here\n zcoord = np.sqrt((top_atm)**2 - x**2 - y**2) #these are the actual z-coordinates (distance from Titan center to observer) at each x,y point\n dprod = (northx*x + northy*y + northz*zcoord)/(top_atm) #dot product of north pole vector and each vector in model planet\n z_lat = 90 - np.degrees(np.arccos(dprod)) #latitude of each point on the 2-d grid\n\n conv = np.multiply(g,z_lat)\n meanlat = np.nansum(conv)/np.nansum(g)\n print('Mean top-of-atmosphere latitude: %.2f deg' %(meanlat))\n\n ########################################################################\n\n #Plots\n \n if showplot:\n # Plot extraction aperture overlaid on Titan with lines of latitude\n\n fig = plt.figure() # a new figure window\n ax = fig.add_subplot(1, 1, 1)\n\n img=ax.imshow(g.transpose(),extent=[x.min(), x.max(), y.min(),y.max()], origin='lower', interpolation='nearest')\n \n titanlimb = plt.Circle((0, 0), 2575, color='k',fill=false)\n titanatm = plt.Circle((0, 0), top_atm, color='w',linestyle='dashed',fill=false)\n ax.add_artist(titanlimb)\n ax.add_artist(titanatm)\n \n #Overlay latitudes as contours\n ctr=ax.contour(z_lat.transpose(),colors='gold',extent=[x.min(), x.max(), y.min(),y.max()],linestyles='dashed')\n ax.clabel(ctr, inline=1, fontsize=12, fmt='%.0f')\n for line in ctr.collections: #Making negative contours solid instead of dashed\n if line.get_linestyle() != [(None, None)]:\n line.set_linestyle([(None, None)])\n \n #Overlay the original extraction aperture (interpolated to new grid so it looks a bit pixelated)\n ax.contour(g.transpose(),levels=[0.999],extent=[x.min(), x.max(), y.min(),y.max()],colors='0.75',linestyles=\"dotted\")\n \n ax.set_xlabel('Distance (km)',fontsize=16)\n ax.set_ylabel('Distance (km)',fontsize=16)\n ax.set_title('NEMESIS .spx weights with respect to atmosphere', fontsize=16)\n \n #Colorbar\n cbar = fig.colorbar(img)\n cbar.set_label('Weight',fontsize=14)\n \n fig.show()\n fig.savefig(outimg)\n\n #Other diagnostic plots\n \n ## #Plot Gaussian\n ## fig0 = plt.figure(figsize = (15,15))\n ## ax = fig0.add_subplot(111) \n ## ax.imshow(g,cmap='RdBu',origin='lower')\n ## plt.show()\n ## #Plot z\n ## fig1 = plt.figure(figsize = (15,15))\n ## ax = fig1.add_subplot(111)\n ## ax.imshow(z, cmap='Blues',origin='lower')\n ## plt.show()\n\n ## #Plot convolution\n ## z_flat = np.copy(z)*0 + 1\n ## conv_flat = np.multiply(g,z_flat)\n ## conv = np.multiply(g,z)\n ## fig2,ax = plt.subplots(figsize = (15,15))\n ## im = ax.imshow(conv_flat, cmap='RdBu',origin='lower')\n ## ctr = ax.contour(z,colors='yellow')\n ## ax.clabel(ctr, inline=1, fontsize=14, fmt='%1.1f')\n ## cbar = fig2.colorbar(im,orientation=\"horizontal\")\n ## cbar.ax.set_xlabel('Weighting',fontsize=18)\n \n return wts,meanlat,g,Sr_pix",
"def weighted(df,year,vector_col,level='label') -> pd.Series:\n # 1 over the distance in years\n df['temp_dist'] = (1 / (abs(year - df.year) + 1))\n # normalize, so weights add up to one\n df['temp_dist'] = df['temp_dist'] / sum(df['temp_dist'])\n # time weighted vector (tw_vector) is the product of the vector and the weight\n df['tw_vector'] = df[vector_col] * df['temp_dist']\n # sum vectors by label (sum or mean??)\n return df.groupby(level)['tw_vector'].apply(np.sum,axis=0)",
"def add_character_others(image, weight_map, weight_val, bbox):\n\n\tif not Polygon(bbox.reshape([4, 2]).astype(np.int32)).is_valid:\n\t\treturn image\n\n\ttop_left = np.array([np.min(bbox[:, 0]), np.min(bbox[:, 1])]).astype(np.int32)\n\tif top_left[1] > image.shape[0] or top_left[0] > image.shape[1]:\n\t\treturn image, weight_map\n\tbbox -= top_left[None, :]\n\ttransformed = four_point_transform(gaussian_heatmap.copy(), bbox.astype(np.float32))\n\n\tstart_row = max(top_left[1], 0) - top_left[1]\n\tstart_col = max(top_left[0], 0) - top_left[0]\n\tend_row = min(top_left[1] + transformed.shape[0], image.shape[0])\n\tend_col = min(top_left[0] + transformed.shape[1], image.shape[1])\n\timage[max(top_left[1], 0):end_row, max(top_left[0], 0):end_col] += \\\n\t\ttransformed[\n\t\tstart_row:end_row - top_left[1],\n\t\tstart_col:end_col - top_left[0]]\n\n\tweight_map[max(top_left[1], 0):end_row, max(top_left[0], 0):end_col] += \\\n\t\tnp.float32(transformed[\n\t\t\tstart_row:end_row - top_left[1],\n\t\t\tstart_col:end_col - top_left[0]] != 0)*weight_val\n\n\treturn image, weight_map",
"def compute_weights(self, unprojected_outs, select_single=None):\n if select_single is not None:\n sz = unprojected_outs[0].size()\n ret = maybe_cuda(torch.zeros((sz[0], sz[1], len(unprojected_outs))))\n ret[:, :, select_single] = 1.0\n return ret\n if self.fixed_weights is not None:\n return self.fixed_weights\n logits = self.logit_fn(self.gating_network(torch.cat(unprojected_outs, 2)))\n return torch.clamp(logits / torch.sum(logits, dim=2, keepdim=True), 0.0, 1.0)",
"def get_grid_weights(lookup, u):\n locations = np.arange(\n int(np.floor(u - lookup.W / 2)) + 1,\n int(np.floor(u + lookup.W / 2) + 1))\n nu_C = (u - lookup.W / 2) - np.floor(u - lookup.W / 2)\n rev = (nu_C > 0.5)\n if rev:\n nu_C = 1.0 - nu_C\n loc = lookup.Ms * nu_C\n pt = int(np.floor(loc)) if lookup.degree > 0 else int(np.round(loc))\n ft = loc - pt\n # Perform polynomial interpolation\n weights = lookup.table[0][:, pt].copy()\n factor = 1\n for k in range(lookup.degree):\n factor *= (ft - k) / (k + 1)\n weights += lookup.table[k + 1][:, pt] * factor\n if rev:\n weights = weights[::-1]\n return locations, weights",
"def _eval(self, v):\n return super(weighted_sum_squares, self)._eval(self.weight * v)",
"def molecular_weight(self):\n mw = 0.0\n for a in self.allAtoms:\n try:\n mw += self.atomic_weight[a.element]\n except KeyError as key:\n print(\"Unknown element: %s\" % (key))\n return mw",
"def _buildWeights(self):\r\n # Compute the spatial tree\r\n kd = spatial.cKDTree(self.XYin)\r\n \r\n # Perform query on all of the points in the grid\r\n dist,self.ind=kd.query(self.XYout,distance_upper_bound=self.maxdist,k=self.NNear)\r\n \r\n self.Nc = np.size(self.ind,axis=0)\r\n print '%d interpolation points.'%self.Nc\r\n # Now loop through and get the weights for each point\r\n self.W = np.zeros((self.NNear,self.Nc))\r\n\r\n # Print percentages\r\n p0=0\r\n pstep=5\r\n for ii in range(0,self.Nc):\r\n \r\n if self.verbose:\r\n pfinish = float(ii)/float(self.Nc)*100.0\r\n if pfinish> p0:\r\n print '%3.1f %% complete...'%pfinish\r\n p0+=pstep\r\n \r\n W = self.getWeights(dist[ii,:],self.XYin[self.ind[ii,:],0],self.XYin[self.ind[ii,:],1])\r\n self.W[:,ii] = W.T",
"def compute_W2(elec_sets, districts, min_cand_weights_dict, black_pref_cands_df, hisp_pref_cands_df, \\\n cand_race_dict):\n \n min_cand_black_W2 = np.empty((len(elec_sets),0), float)\n min_cand_hisp_W2 = np.empty((len(elec_sets),0), float)\n min_cand_neither_W2 = np.empty((len(elec_sets),0), float)\n \n for dist in districts:\n black_pref = list(black_pref_cands_df[dist])\n\n black_pref_race = [cand_race_dict[bp] for bp in black_pref]\n black_cand_weight = [min_cand_weights_dict[\"Relevant Minority\"] if \"Black\" in bpr else \\\n min_cand_weights_dict[\"Other\"] for bpr in black_pref_race]\n min_cand_black_W2 = np.append(min_cand_black_W2, np.array([black_cand_weight]).transpose(), axis = 1)\n \n hisp_pref = list(hisp_pref_cands_df[dist])\n hisp_pref_race = [cand_race_dict[hp] for hp in hisp_pref]\n hisp_cand_weight = [min_cand_weights_dict[\"Relevant Minority\"] if \"Hispanic\" in hpr else \\\n min_cand_weights_dict[\"Other\"] for hpr in hisp_pref_race]\n min_cand_hisp_W2 = np.append(min_cand_hisp_W2, np.array([hisp_cand_weight]).transpose(), axis = 1)\n \n neither_cand_weight = [min_cand_weights_dict['Relevant Minority'] if ('Hispanic' in hpr and 'Black' in bpr) else\\\n min_cand_weights_dict['Other'] if ('Hispanic' not in hpr and 'Black' not in bpr) else \\\n min_cand_weights_dict['Partial '] for bpr,hpr in zip(black_pref_race, hisp_pref_race)]\n min_cand_neither_W2 = np.append(min_cand_neither_W2, np.array([neither_cand_weight]).transpose(), axis = 1)\n \n return min_cand_black_W2, min_cand_hisp_W2, min_cand_neither_W2",
"def _compute_W():\n if penalty == \"consensus\":\n W = 1.0 * np.array(\n [[0, 1, 0, 1, 1],\n [0, 0, 1, 0, 1],\n [1, 1, 1, 1, 1],\n [1, 1, 0, 0, 0],\n [1, 1, 1, 1, 0],\n [0, 0, 1, 0, 0]]\n )\n elif penalty in ['var', 'std']:\n W = np.empty((6, 5))\n for i, _ in enumerate(df_main.iterrows()):\n for j in range(5):\n vals = [df.iloc[i, j] for df in dfs]\n W[i, j] = np.std(vals)\n\n if penalty == 'var':\n W = W ** 2\n W = 1 / W\n else:\n W = np.ones((6, 5))\n\n return W / W.sum(axis=1).reshape((-1, 1))",
"def weighted_sum(W, X):\n\n if len(W) != len(X):\n print(\"Dimension of weight vector should be same as input vector.\")\n return\n\n else:\n H = 0\n\n for i in range(len(W)):\n H += (W[i] * X[i])\n \n return H",
"def calcweight( self ):\n weight = 0\n zeroval = 0\n for sensor in ('right_top', 'right_bottom', 'left_top', 'left_bottom'):\n\t\treading = self.readings[sensor]\n\t\tcalibration = self.named_calibration[sensor]\n if sensor == 'right_top':\n zeroval = self.rtzv\n elif sensor == 'right_bottom':\n zeroval = self.rbzv\n elif sensor == 'left_top':\n zeroval = self.ltzv\n else:\n zeroval = self.lbzv\n\t\tif reading > calibration[2]:\n\t\t\tprint \"Warning, %s reading above upper calibration value\" % sensor\n\t\tif reading < calibration[1]:\n\t\t\tweight += 1700 * (reading + zeroval - calibration[0]) / (calibration[1] - calibration[0])\n\t\telse:\n\t\t\tweight += 1700 * (reading + zeroval - calibration[1]) / (calibration[2] - calibration[1]) + 1700\n\n if self.debug == 1:\n print \"weight calculated pre-conversion\", weight\n print \"return val\", self.converttolbs( weight / 100.0 )\n\n # return self.converttolbs( weight / 100.0 )\n return weight / 100.0",
"def normalize_weights(self, weights, estimated_value, kriging_type):\n\n if kriging_type == 'ord':\n weight_matrix = weights[:-1].copy()\n output_matrix = weights[:-1].copy()\n elif kriging_type == 'sim':\n weight_matrix = weights.copy()\n output_matrix = weights.copy()\n else:\n print('You did not choose any kriging type. Chosen type: <sim> - simple kriging.')\n weight_matrix = weights.copy()\n output_matrix = weights.copy()\n\n ###### Calculate average covariance between the location being ######\n ###### estimated and the locations with negative weights ######\n\n locs = np.argwhere(weight_matrix < 0) # Check where weights are below 0.0\n locs = locs[:, 0]\n\n # Calculate covariance between those points and unknown point\n if len(locs) >= 1:\n c = []\n mu = 0\n for i in locs:\n _c = estimated_value * self.prepared_data[i, 2]\n mu = mu + estimated_value + self.prepared_data[i, 2]\n c.append(_c)\n output_matrix[i, 0] = 0\n mu = mu / len(c)\n cov = np.sum(c) / len(c) - mu * mu\n\n ###### Calculate absolute magnitude of the negative weights #####\n\n w = weight_matrix[weight_matrix < 0]\n w = w.T\n magnitude = np.sum(np.abs(w)) / len(w)\n\n ###### Test values greater than 0 and check if they need to be\n ###### rescaled to 0 ######\n\n ###### if weight > 0 and Covariance between unknown point and known\n ###### point is less than the average covariance between the location\n ###### being estimated and the locations with negative weights and\n ###### and weight is less than absolute magnitude of the negative\n ###### weights then set weight to zero #####\n\n positive_locs = np.argwhere(weight_matrix > 0) # Check where weights are greater than 0.0\n positive_locs = positive_locs[:, 0]\n\n for j in positive_locs:\n cov_est = (estimated_value * self.prepared_data[j, 2]) / 2\n mu = (estimated_value + self.prepared_data[j, 2]) / 2\n cov_est = cov_est - mu * mu\n if cov_est < cov:\n if weight_matrix[j, 0] < magnitude:\n output_matrix[j, 0] = 0\n\n ###### Normalize weight matrix to get a sum of all elements equal to 1 ######\n\n output_matrix = output_matrix / np.sum(output_matrix)\n\n return output_matrix\n else:\n return weights",
"def _calculate_weights(curr_level, edge_sum):\n curr_level_weights = {n: 1 for n in curr_level}\n for curr_node in edge_sum:\n curr_level_weights[curr_node] += edge_sum[curr_node]\n return curr_level_weights",
"def calculate_weight(self, element, total_cores_used, total_disk_used,\n total_memory_used):\n cpu_capacity = self.model.get_resource_from_id(\n resource.ResourceType.cpu_cores).get_capacity(element)\n\n disk_capacity = self.model.get_resource_from_id(\n resource.ResourceType.disk).get_capacity(element)\n\n memory_capacity = self.model.get_resource_from_id(\n resource.ResourceType.memory).get_capacity(element)\n\n score_cores = (1 - (float(cpu_capacity) - float(total_cores_used)) /\n float(cpu_capacity))\n\n # It's possible that disk_capacity is 0, e.g., m1.nano.disk = 0\n if disk_capacity == 0:\n score_disk = 0\n else:\n score_disk = (1 - (float(disk_capacity) - float(total_disk_used)) /\n float(disk_capacity))\n\n score_memory = (\n 1 - (float(memory_capacity) - float(total_memory_used)) /\n float(memory_capacity))\n # TODO(jed): take in account weight\n return (score_cores + score_disk + score_memory) / 3"
] |
[
"0.637614",
"0.5914817",
"0.5691052",
"0.56579494",
"0.5562731",
"0.55098605",
"0.548975",
"0.54792434",
"0.5427453",
"0.54175353",
"0.54081774",
"0.54019856",
"0.5401473",
"0.539906",
"0.5365114",
"0.5355184",
"0.5343921",
"0.53425604",
"0.5322546",
"0.5273757",
"0.51998305",
"0.519553",
"0.518479",
"0.51761115",
"0.51726943",
"0.5169847",
"0.5166425",
"0.5163352",
"0.51576656",
"0.5146848"
] |
0.7656683
|
0
|
Gets all paths from input to symbol
|
def get_paths_from(self, symbol):
to_return = []
visitation_queue = [self.head]
while len(visitation_queue) != 0:
visiting = visitation_queue.pop(0)
for elem in visiting.children:
visitation_queue.append(elem)
if symbol in visiting.inputs:
v = visiting
model_trail = []
while v.parent is not None:
model_trail.append(v.m)
v = v.parent
to_return.append(SymbolPath(visiting.inputs, model_trail))
return to_return
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_paths(symbol: Union[str, int]) -> str:\n if isinstance(symbol, str):\n return {\n 'circle':\n '\"M\"+b1+\",0A\"+b1+\",\"+b1+\" 0 1,1 0,-\"+b1+\"A\"+b1+\",\"+b1+\" 0 0,1 \"+b1+\",0Z\"',\n 'square':\n '\"M\"+b1+\",\"+b1+\"H-\"+b1+\"V-\"+b1+\"H\"+b1+\"Z\"',\n 'diamond':\n '\"M\"+b1+\",0L0,\"+b1+\"L-\"+b1+\",0L0,-\"+b1+\"Z\"',\n 'hexagram':\n '\"M-\"+b3+\",0l-\"+b2+\",-\"+b1+\"h\"+b3+\"l\"+b2+\",-\"+b1+\"l\"+b2+\",\"+b1+\"h\"+b3+\"l-\"+b2+\",\"+b1+\"l\"+'\n 'b2+\",\"+b1+\"h-\"+b3+\"l-\"+b2+\",\"+b1+\"l-\"+b2+\",-\"+b1+\"h-\"+b3+\"Z\"'\n }[symbol]\n return {\n 37: '\"M-\"+d1+\",\"+d3+\"L0,0M\"+d1+\",\"+d3+\"L0,0M0,-\"+d2+\"L0,0\"',\n 38: '\"M-\"+d1+\",-\"+d3+\"L0,0M\"+d1+\",-\"+d3+\"L0,0M0,\"+d2+\"L0,0\"',\n 39: '\"M\"+d3+\",\"+d1+\"L0,0M\"+d3+\",-\"+d1+\"L0,0M-\"+d2+\",0L0,0\"',\n 40: '\"M-\"+d3+\",\"+d1+\"L0,0M-\"+d3+\",-\"+d1+\"L0,0M\"+d2+\",0L0,0\"',\n 34: '\"M\"+d1+\",\"+d1+\"L-\"+d1+\",-\"+d1+\"M\"+d1+\",-\"+d1+\"L-\"+d1+\",\"+d1',\n 33: '\"M0,\"+d1+\"V-\"+d1+\"M\"+d1+\",0H-\"+d1',\n 35: '\"M0,\"+d1+\"V-\"+d1+\"M\"+d1+\",0H-\"+d1+\"M\"+d2+\",\"+d2+\"L-\"+d2+\",-\"+d2+\"M\"+d2+\",-\"+d2+\"L-\"+d2+\",\"+d2',\n 36: '\"M\"+d1+\",\"+d2+\"V-\"+d2+\"m-\"+d2+\",0V\"+d2+\"M\"+d2+\",\"+d1+\"H-\"+d2+\"m0,-\"+d2+\"H\"+d2'\n }[symbol]",
"def get_path(input_dictionary, output_dictionary,\n input_species_list, output_species_list):\n\n input_operon_list = []\n path_queue = [(input_operon_list, input_species_list) ]\n\n final_operon_path_list = []\n final_species_path_list = []\n\n while path_queue != []:\n\n ###print \"\\nget_path: path queue:\",path_queue\n\n path_queue,\\\n final_operon_path_list,\\\n final_species_path_list = traverse(input_dictionary,\n output_dictionary,\n input_species_list,\n output_species_list,\n path_queue,\n final_operon_path_list,\n final_species_path_list)\n\n return final_operon_path_list, final_species_path_list",
"def path_rules(self, from_symbol, to_symbol):\n # type: (Type[Nonterminal], Type[Nonterminal]) -> List[Type[Rule]]\n if from_symbol not in self.t or to_symbol not in self.t:\n return []\n return self.f[self.t[from_symbol]][self.t[to_symbol]] or []",
"def listPaths():\n try:\n paths = [x[1] for x in parseFstab(FSTAB)]\n return paths\n except DMException:\n return []",
"def find_paths(t, entry):\n paths = []\n if t.label == entry:\n return [[entry]]\n for b in t.branches:\n for p in find_paths(b, entry):\n paths.append([t.label] + p)\n return paths",
"def getSourcePaths(self, makeGlyphs=True, makeKerning=True, makeInfo=True):\n paths = []\n for name in self.sources.keys():\n paths.append(self.sources[name][0].path)\n return paths",
"def _find_all_paths(sample, previous_path=None):\n paths = []\n for key in sample:\n current_path = []\n if previous_path:\n current_path.extend(previous_path)\n current_path.append(key)\n #If the current value ist a mapping, search in this mapping for more paths\n if isinstance(sample[key], abc.Mapping):\n paths.extend(MappingValidator._find_all_paths(sample[key],\n previous_path=current_path))\n paths.append(current_path)\n return sorted(paths, key=lambda k: len(k))",
"def find_path_to(output_var, input_var):\r\n\r\n #If output and input are the same we have a singleton path\r\n if output_var is input_var:\r\n return [output_var]\r\n\r\n #If output has no inputs then there is no path\r\n owner = output_var.owner\r\n\r\n if owner is None:\r\n return None\r\n\r\n #If input_var is an input to the output node, there is a\r\n #simple two element path\r\n inputs = owner.inputs\r\n\r\n if input_var in inputs:\r\n return [input_var, output_var]\r\n\r\n #Otherwise we must recurse by searching for a path to one\r\n #of our inputs, then appending the output to that path\r\n for ipt in inputs:\r\n path = find_path_to(ipt, input_var)\r\n\r\n if path is not None:\r\n path.append(output_var)\r\n\r\n return path\r\n\r\n #Since none of the above methods returned a path, there is none\r\n return None",
"def _find_all_paths(self, start_vertex: str, end_vertex: str, path=[]):\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n\n paths = []\n for vertex in self.graph[start_vertex]:\n if vertex not in path:\n extended_paths = self._find_all_paths(vertex,\n end_vertex,\n path)\n for p in extended_paths:\n paths.append(p)\n return paths",
"def paths(self):\n return self._visit(self.start)",
"def solution_path(self) -> list[State]:",
"def paths(self, return_indices=False):\n paths = []\n for tree in self.components():\n paths += self._single_tree_paths(tree, return_indices=return_indices)\n return paths",
"def find_all_path(self, start_vertex, end_vertex, path=[]):\n\n graph = self.__graph_dict\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n\n if start_vertex not in graph:\n return []\n\n paths = []\n for vertex in graph[start_vertex]:\n if vertex not in path:\n extended_paths = self.find_all_path(vertex, end_vertex,path)\n for p in extended_paths:\n paths.append(p)\n return paths",
"def get_symbol(symbols):\n # Figure out if list of symbols or single symbol.\n if not hasattr(symbols, '__getitem__'):\n symbols = [symbols]\n elif len(symbols) == 3 and symbols[0] in ('p', 'P'):\n # Most likely a polygon specification (at least not a valid other\n # symbol).\n symbols = [symbols]\n\n symbols = [symbol_dict[symbol] if symbol in symbol_dict else symbol for\n symbol in symbols]\n\n paths = []\n for symbol in symbols:\n if isinstance(symbol, matplotlib.path.Path):\n return symbol\n elif hasattr(symbol, '__getitem__') and len(symbol) == 3:\n kind, n, angle = symbol\n\n if kind in ['p', 'P']:\n if kind == 'p':\n radius = 1. / cos(pi / n)\n else:\n # make the polygon such that it has area equal\n # to a unit circle\n radius = sqrt(2 * pi / (n * sin(2 * pi / n)))\n\n angle = pi * angle / 180\n patch = matplotlib.patches.RegularPolygon((0, 0), n,\n radius=radius,\n orientation=angle)\n else:\n raise ValueError(\"Unknown symbol definition \" + str(symbol))\n elif symbol == 'o':\n patch = matplotlib.patches.Circle((0, 0), 1)\n\n paths.append(patch.get_path().transformed(patch.get_transform()))\n\n return paths",
"def incoming_paths(root_dir, parent_dir):\n return {\n 'F1' : os.path.join(root_dir, \"F1\"),\n 'F' : os.path.join(parent_dir, \"F\"),\n 'F2' : os.path.join(parent_dir, \"F2-in\"),\n 'D1' : os.path.join(root_dir, \"D1\"),\n 'D' : os.path.join(parent_dir, \"D\"),\n 'D2' : os.path.join(parent_dir, \"D2-in\"),\n }",
"def _get_paths():\n paths = [\n '/'\n ]\n return paths",
"def hypernym_paths(self):\n paths = []\n hypernyms = self._direct_hypernyms\n if self.is_root():\n paths = [[self]]\n for hypernym in hypernyms:\n for ancestor_list in hypernym.hypernym_paths():\n ancestor_list.append(self)\n paths.append(ancestor_list)\n return paths",
"def extract_path(self):\n if self.extracted_path is not None:\n return self.extracted_path\n current = self\n path = []\n while current:\n path.append([current.end, current.path_cost])\n current = current.parent\n return list(reversed(path))",
"def find_all_path(self, start, end, path=[]):\n path = path+[start]\n if start == end:\n return path\n paths = []\n for node in self.graph[start]:\n if node not in path:\n newpaths = self.find_path(node, end, path)\n paths.append(newpaths)\n return paths",
"def _extract_kiss_path(self, start):\n for i in range(2, start):\n path_call = aprs.Callsign(self.frame[i * 7:])\n\n if path_call:\n if ord(self.frame[i * 7 + 6]) & 0x80:\n path_call.digi = True\n\n self.path.append(path_call)",
"def a_path(t,x):\n if label(t) == x:\n return [x]\n\n for branch in branches(t):\n rest_of_path = a_path(b,x)\n if rest_of_path:\n return [label(t)] + rest_of_path",
"def reconstructPath(came_from, current):\n path = [current]\n while current in came_from:\n current = came_from[current]\n path.append(current)\n print(f\"path: {path}\")\n return path",
"def find_paths(self, source, destination, closed=None):\n if closed is None:\n closed = set()\n closed.add(source)\n links = {x.trusted for x in self._tau\n if x.truster == source and x.trusted not in closed}\n if len(links) == 0: # base\n return []\n if destination in links: # base\n return [[Trust(source, destination)]]\n # recurse\n retval = []\n for link in links:\n linkpaths = self.find_paths(link, destination, closed)\n for path in linkpaths:\n path.insert(0, Trust(source, link))\n retval += linkpaths\n\n for path in retval:\n if None in path:\n retval.remove(path)\n if len(retval) == 0:\n return []\n return retval",
"def find_path(tree, x):\n if label(tree) == x:\n return [label(tree)]\n for b in branches(tree):\n path = find_path(b, x)\n if path:\n return [label(tree)] + path",
"def get_literal_beard_paths(beard_paths):\n\n return [get_literal_path(x) for x in beard_paths]",
"def get_all_paths(self):\n seen = set()\n for v in self:\n # v in self returns all nodes in the pathgraph\n if v not in seen:\n # self [v] returns a path containing v. If the v does not belong to a path\n # a singleton path [v] is returned\n yield self[v]\n seen.update(self[v])",
"def getPaths(self):\n return self.pathTuple",
"def _build_path(self):\r\n\r\n path = []\r\n \r\n for i in range(len(self.path) - 1):\r\n current_node = self.path[i]\r\n next_node = self.path[i + 1]\r\n \r\n key_list = [i for i in range(len(current_node.leaving_roads)) if current_node.leaving_roads[i].end == next_node]\r\n \r\n if len(key_list) == 0:\r\n raise Exception('ERROR (in gps._build_path()) : there is no route.')\r\n \r\n path.append(key_list[0])\r\n \r\n return path",
"def get_node_paths_by_full_object(self, name):\n components = re.split('[\\.:]', name)\n cur_node = self.top\n paths = []\n\n # Handle a case where we may have split things up by wildcard\n if '_' in components[0]:\n (left, right) = components[0].rsplit('_', 1)\n test_name = '{}_*'.format(left.lower())\n if test_name in cur_node.children:\n cur_node = cur_node.children[test_name]\n paths.append(cur_node)\n if len(components) == 1 and components[0][-1] == '*':\n return paths\n\n # Now iterate\n for component in components:\n cur_node = cur_node.children[component.lower()]\n paths.append(cur_node)\n\n # Return the list\n return paths",
"def paths(self, name=None):\n eh = SimpleErrorHandler()\n \n out = self._client.execute('paths', name, eh=eh)\n\n if name is not None:\n if not bool(eh):\n return None\n return out.strip()\n \n result = {}\n for line in out.splitlines():\n name, path = out.split(' = ', 1)\n result[name] = path.strip()\n \n return result"
] |
[
"0.6845581",
"0.6428056",
"0.63166505",
"0.6227308",
"0.6226829",
"0.6221099",
"0.6219737",
"0.60980934",
"0.6054122",
"0.6047832",
"0.60343784",
"0.6013765",
"0.5980628",
"0.5938047",
"0.5872366",
"0.5864859",
"0.5860468",
"0.58514625",
"0.5807138",
"0.5789408",
"0.57541513",
"0.57218534",
"0.57155764",
"0.5712815",
"0.5711151",
"0.56890386",
"0.5687685",
"0.5685194",
"0.56844556",
"0.5680255"
] |
0.8160578
|
0
|
Test surf on regularly spaced coordinates like MayaVi.
|
def test_surf():
def f(x, y):
sin, cos = numpy.sin, numpy.cos
return sin(x + y) + sin(2 * x - y) + cos(3 * x + 4 * y)
x, y = numpy.mgrid[-7.:7.05:0.1, -5.:5.05:0.05]
s = surf(x, y, f)
mlab.show()
#cs = contour_surf(x, y, f, contour_z=0)
return
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def drawRegularSurface(matrix, nx, ny, xinterp, yinterp):\n dislin.surmat(matrix, nx, ny, xinterp, yinterp)",
"def surfcut_points(**kwargs):\n npoints = kwargs.get( 'npoints', 240 )\n origin = kwargs.get( 'origin', vec3(0.,0.,0.)) \n normal = kwargs.get( 'normal', (np.pi/2., 0.) ) \n lims0 = kwargs.get( 'lims0', (-50., 50.) ) \n lims1 = kwargs.get( 'lims1', (-50., 50.) ) \n extents = kwargs.get( 'extents', None) \n \n if extents is not None:\n lims0 = (-extents, extents)\n lims1 = (-extents, extents)\n \n # Make the unit vectors that define the plane\n unit = vec3()\n th = normal[0]\n ph = normal[1]\n unit.set_spherical( 1, th, ph) \n orth0 = vec3( -1.*np.sin(ph), np.cos(ph), 0. )\n orth1 = cross(unit,orth0)\n \n t0 = np.linspace( lims0[0], lims0[1], npoints )\n t1 = np.linspace( lims1[0], lims1[1], npoints ) \n \n # Obtain points on which function will be evaluated\n T0,T1 = np.meshgrid(t0,t1)\n X = origin[0] + T0*orth0[0] + T1*orth1[0] \n Y = origin[1] + T0*orth0[1] + T1*orth1[1]\n Z = origin[2] + T0*orth0[2] + T1*orth1[2] \n \n\n # If given an axes it will plot the reference surface to help visusalize\n # the surface cut\n \n # Note that the axes needs to be created with a 3d projection. \n # For example: \n # fig = plt.figure( figsize=(4.,4.) ) \n # gs = matplotlib.gridspec.GridSpec( 1,1 ) \n # ax0 = fig.add_subplot( gs[0,0], projection='3d' ) \n \n ax0 = kwargs.get( 'ax0', None ) \n if ax0 is not None: \n\n # Plot the reference surface\n ax0.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3, linewidth=0.)\n ax0.set_xlabel('X')\n ax0.set_ylabel('Y')\n ax0.set_zlabel('Z')\n lmin = min([ ax0.get_xlim()[0], ax0.get_ylim()[0], ax0.get_zlim()[0] ] )\n lmax = max([ ax0.get_xlim()[1], ax0.get_ylim()[1], ax0.get_zlim()[1] ] )\n ax0.set_xlim( lmin, lmax )\n ax0.set_ylim( lmin, lmax )\n ax0.set_zlim( lmin, lmax )\n ax0.set_yticklabels([])\n ax0.set_xticklabels([])\n ax0.set_zticklabels([])\n \n # If given an axes and a potential it will plot the surface cut of the \n # potential \n\n ax1 = kwargs.get( 'ax1', None) \n pot = kwargs.get( 'potential', None) \n\n if (ax1 is not None) and (pot is not None):\n # Evaluate function at points and plot\n EVAL = pot.evalpotential(X,Y,Z)\n\n im =ax1.pcolormesh(T0, T1, EVAL, cmap = plt.get_cmap('jet')) \n # cmaps: rainbow, jet\n\n plt.axes( ax1)\n cbar = plt.colorbar(im)\n cbar.set_label(pot.unitlabel, rotation=0 )#self.unitlabel\n \n return T0, T1, X, Y, Z",
"def drawIrregularSurface(xlist, ylist, zmatrix):\n dislin.surfce(xlist, len(xlist), ylist, len(ylist), zmatrix)",
"def return_surf_xyz(self, layername: str = 'depth', pcolormesh: bool = True):\n\n if self.is_vr:\n raise NotImplementedError(\"VR surfacing doesn't currently return gridded data arrays yet, have to figure this out\")\n\n surf, new_mins, new_maxs = self.get_layer_trimmed(layername)\n valid_nodes = ~np.isnan(surf)\n if not pcolormesh: # get the node locations for each cell\n x = (np.arange(self.mins[0], self.maxs[0], self.min_grid_size) + self.min_grid_size / 2)[new_mins[0]:new_maxs[0]]\n y = (np.arange(self.mins[1], self.maxs[1], self.min_grid_size) + self.min_grid_size / 2)[new_mins[1]:new_maxs[1]]\n else: # get the cell boundaries for each cell, will be one longer than the node locations option (this is what matplotlib pcolormesh wants)\n x = np.arange(self.mins[0], self.maxs[0], self.min_grid_size)[new_mins[0]:new_maxs[0] + 1]\n y = np.arange(self.mins[1], self.maxs[1], self.min_grid_size)[new_mins[1]:new_maxs[1] + 1]\n\n return x, y, surf, valid_nodes, new_mins, new_maxs",
"def draw_f():\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n x_matrix = np.arange(-10, 11, 0.1)\n y_matrix = np.arange(-10, 11, 0.1)\n x_matrix, y_matrix = np.meshgrid(x_matrix, y_matrix)\n # print(x_matrix)\n u_matrix = x_matrix.copy()\n for i in range(x_matrix.shape[0]):\n for j in range(x_matrix.shape[0]):\n u_matrix[i][j] = f(x_matrix[i][j], y_matrix[i][j])\n surf = ax.plot_surface(x_matrix, y_matrix, u_matrix)\n\n plt.show()\n return surf",
"def test_outside_grid(dataset):\n\n tf = Delft3D_Mudflats(dataset, dry_depth=-10000) # make sure nothing is dry\n\n points = ((3.5, 54.0), # off\n (7.5, 53.4), # off\n (6.0, 52.0), # off\n (5.3, 53.3), # on\n (5.2, 53.25), # on\n )\n\n time = datetime(2009, 1, 15, 0)\n\n result = tf.is_dry(points, time)\n\n print \"results\", result\n\n assert list(result) == [True, True, True, False, False]",
"def qsurface(zmatrix, nx, ny):\n dislin.qplsur(zmatrix, ny, ny)",
"def test_surface_normal(self):\n vertices = np.array([[0, 1, 0], [0, 0, 0], [1, 0, 0]])\n expected = np.array([0, 0, 1])\n np.testing.assert_almost_equal(surface_normal(vertices), expected)\n\n # Test against multiple triangles\n vertices = np.r_[vertices[np.newaxis, :, :], [[[0, 0, 0], [0, 2, 0], [2, 0, 0]]]]\n expected = np.array([[0, 0, 1], [0, 0, -1]])\n np.testing.assert_almost_equal(surface_normal(vertices), expected)\n\n # Some real data\n vertices = np.array([[2.435, -1.82, -0.53], [2.635, -2., -0.58], [2.535, -1.7, -0.58]])\n expected = np.array([0.33424239, 0.11141413, 0.93587869])\n np.testing.assert_almost_equal(surface_normal(vertices), expected)\n\n # Test input validation\n self.assertRaises(ValueError, surface_normal, np.array([[1, 2, 3, 4]]))",
"def py_SurfStatInflate(surf, w=0.5, spherefile=None):\n \n v = surf['coord'].shape[1]\n \n if v <= 81924:\n # MATLAB RAPPING FOR *obj FILE READ IN --> has to be changed...\n if spherefile is None:\n spherefile = 'sphere.obj'\n sphere_mat = eng.SurfStatReadSurf(spherefile)\n sphere = {}\n sphere['tri'] = np.array(sphere_mat['tri']) \n sphere['coord'] = np.array(sphere_mat['coord'])\n \n if v == 81924:\n sphere['tri'] = np.concatenate((sphere['tri'],\n sphere['tri']+v), axis=1)\n col1 = sphere['coord'][0,:] * (sphere['coord'][0,:] < 0)\n col2 = -1 *sphere['coord'][0,:] * (sphere['coord'][0,:] < 0)\n x = np.concatenate((col1,col2))\n x = x.reshape(1, len(x))\n row2 = row3 = sphere['coord'][1:3,:]\n y = np.concatenate((row2,row3), axis=1)\n sphere['coord'] = np.concatenate((x,y))\n else:\n if surf['coord'][0,:].mean()/abs(surf['coord'][0,:]).mean() <-0.5:\n row1 = sphere['coord'][0,:] * (sphere['coord'][0,:] < 0)\n row1 = row1.reshape(1, len(row1))\n sphere['coord'] = np.concatenate((row1,\n sphere['coord'][1:3,:]))\n else:\n row1 = -sphere['coord'][0,:] * (sphere['coord'][0,:] < 0) \n row1 = row1.reshape(1, len(row1))\n sphere['coord'] = np.concatenate((row1,\n sphere['coord'][1:3,:]))\n else:\n if spherefile is None:\n spherefile = 'lh.sphere'\n # MATLAB RAPPING FOR *sphere FILE READ IN --> has to be changed...\n sphere_mat = eng.SurfStatReadSurf(spherefile)\n sphere = {}\n sphere['tri'] = np.array(sphere_mat['tri'])\n sphere['coord'] = np.array(sphere_mat['coord'])\n \n if v == 327684:\n sphere['tri'] = np.concatenate((sphere['tri'],\n sphere['tri']+v), axis=1)\n col1 = sphere['coord'][0,:] * (sphere['coord'][0,:] < 0)\n col2 = sphere['coord'][0,:] * (sphere['coord'][0,:] > 0)\n x = np.concatenate((col1,col2))\n x = x.reshape(1, len(x))\n row2 = row3 = sphere['coord'][1:3,:]\n y = np.concatenate((row2,row3), axis=1)\n sphere['coord'] = np.concatenate((x,y))\n else:\n if surf['coord'][0,:].mean()/abs(surf['coord'][0,:]).mean() <-0.5:\n row1 = sphere['coord'][0,:] * (sphere['coord'][0,:] < 0)\n row1 = row1.reshape(1, len(row1))\n sphere['coord'] = np.concatenate((row1,\n sphere['coord'][1:3,:]))\n else:\n row1 = sphere['coord'][0,:] * (sphere['coord'][0,:] > 0)\n row1 = row1.reshape(1, len(row1))\n sphere['coord'] = np.concatenate((row1,\n sphere['coord'][1:3,:]))\n maxs = surf['coord'].max(1)\n mins = surf['coord'].min(1)\n maxsp = sphere['coord'].max(1)\n minsp = sphere['coord'].min(1)\n surfw = surf\n\n for i in range(0,3): \n surfw['coord'][i,:] = ((sphere['coord'][i,:] - minsp[i]) / \\\n (maxsp[i]-minsp[i]) * (maxs[i]-mins[i]) + mins[i]) * w + \\\n surf['coord'][i,:]*(1-w) \n\n return surfw",
"def compute_surfel(x, y, z, s):\n coverage = sum(s) / 8.0\n signed_dist_approx = (0.5 - (coverage / 255.0)) * 1.4\n grad_x = (s[1] + s[3] + s[5] + s[7]) - (s[0] + s[2] + s[4] + s[6])\n grad_y = (s[2] + s[3] + s[6] + s[7]) - (s[0] + s[1] + s[4] + s[5])\n grad_z = (s[4] + s[5] + s[6] + s[7]) - (s[0] + s[1] + s[2] + s[3])\n grad_mag = max(1e-5, (grad_x*grad_x + grad_y*grad_y + grad_z*grad_z)**0.5)\n pos_x = x + signed_dist_approx * (grad_x / grad_mag)\n pos_y = y + signed_dist_approx * (grad_y / grad_mag)\n pos_z = z + signed_dist_approx * (grad_z / grad_mag)\n return (pos_x, pos_y, pos_z), (grad_x, grad_y, grad_z)",
"def clashTest(self, px, py, pz, rad):\r\n radSq = rad**2\r\n # adjust for map not set at origin\r\n px -= self.unif[0]\r\n py -= self.unif[1]\r\n pz -= self.unif[2]\r\n ht = self.height/255\r\n halfw = self.width/2.0\r\n halfd = self.depth/2.0\r\n dx = self.width/self.ix\r\n dz = self.depth/self.iy\r\n\r\n # work out x and z ranges to check, x0 etc correspond with vertex indices in grid\r\n x0 = int(math.floor((halfw + px - rad)/dx + 0.5)) - 1\r\n if x0 < 0: x0 = 0\r\n x1 = int(math.floor((halfw + px + rad)/dx + 0.5)) + 1\r\n if x1 > self.ix-1: x1 = self.ix-1\r\n z0 = int(math.floor((halfd + pz - rad)/dz + 0.5)) - 1\r\n if z0 < 0: z0 = 0\r\n z1 = int(math.floor((halfd + pz + rad)/dz + 0.5)) + 1\r\n if z1 > self.iy-1: z1 = self.iy-1\r\n\r\n # go through grid around px, pz\r\n minDist, minLoc = 1000000, (0, 0)\r\n for i in xrange(x0+1, x1):\r\n for j in xrange(z0+1, z1):\r\n # use the locations stored in the one dimensional vertices matrix\r\n #generated in __init__. 3 values for each location\r\n p = j*self.ix + i # pointer to the start of xyz for i,j in the vertices array\r\n p1 = j*self.ix + i - 1 # pointer to the start of xyz for i-1,j\r\n p2 = (j-1)*self.ix + i # pointer to the start of xyz for i, j-1\r\n vertp = self.buf[0].vertices[p]\r\n normp = self.buf[0].normals[p]\r\n # work out distance squared from this vertex to the point\r\n distSq = (px - vertp[0])**2 + (py - vertp[1])**2 + (pz - vertp[2])**2\r\n if distSq < minDist: # this vertex is nearest so keep a record\r\n minDist = distSq\r\n minLoc = (i, j)\r\n #now find the distance between the point and the plane perpendicular\r\n #to the normal at this vertex\r\n pDist = dot([px - vertp[0], py - vertp[1], pz - vertp[2]],\r\n [-normp[0], -normp[1], -normp[2]])\r\n #and the position where the normal from point crosses the plane\r\n xIsect = px - normp[0]*pDist\r\n zIsect = pz - normp[2]*pDist\r\n\r\n #if the intersection point is in this rectangle then the x,z values\r\n #will lie between edges\r\n if xIsect > self.buf[0].vertices[p1][0] and \\\r\n xIsect < self.buf[0].vertices[p][0] and \\\r\n zIsect > self.buf[0].vertices[p2][2] and \\\r\n zIsect < self.buf[0].vertices[p][2]:\r\n pDistSq = pDist**2\r\n # finally if the perpendicular distance is less than the nearest so far\r\n #keep a record\r\n if pDistSq < minDist:\r\n minDist = pDistSq\r\n minLoc = (i,j)\r\n\r\n gLevel = self.calcHeight(px, pz) #check it hasn't tunnelled through by going fast\r\n if gLevel > (py-rad):\r\n minDist = py - gLevel\r\n minLoc = (int((x0+x1)/2), int((z0+z1)/2))\r\n\r\n if minDist <= radSq: #i.e. near enough to clash so return normal\r\n p = minLoc[1]*self.ix + minLoc[0]\r\n normp = self.buf[0].normals[p]\r\n if minDist < 0:\r\n jump = rad - minDist\r\n else:\r\n jump = 0\r\n return(True, normp[0], normp[1], normp[2], jump)\r\n else:\r\n return (False, 0, 0, 0, 0)",
"def test_surface_feature(self):\n\n # Fully valid image\n sf1 = SurfaceFeature(1, 1, 2, 2, 'dummy_wkt_string', 0.5, 'dummy_id')\n sf1.determine_quadkey()\n\n self.assertEqual(sf1.quadkey, '3000000')",
"def test_voxel(self):\n for m in [g.get_mesh('featuretype.STL'),\n g.trimesh.primitives.Box(),\n g.trimesh.primitives.Sphere()]:\n for pitch in [.1, .1 - g.tol.merge]:\n surface = m.voxelized(pitch=pitch)\n\n # make sure the voxelized pitch is similar to passed\n assert g.np.allclose(surface.pitch, pitch)\n\n for fill_method in ('base', 'orthographic'):\n solid = surface.copy().fill(method=fill_method)\n\n assert len(surface.encoding.dense.shape) == 3\n assert surface.shape == surface.encoding.dense.shape\n assert surface.volume > 0.0\n\n assert isinstance(surface.filled_count, int)\n assert surface.filled_count > 0\n\n box_surface = surface.as_boxes()\n box_solid = solid.as_boxes()\n\n assert isinstance(box_surface, g.trimesh.Trimesh)\n assert abs(box_solid.volume - solid.volume) < g.tol.merge\n\n assert g.trimesh.util.is_shape(\n surface.sparse_indices, (-1, 3))\n assert len(\n solid.sparse_indices) >= len(\n surface.sparse_indices)\n assert solid.sparse_indices.shape == solid.points.shape\n outside = m.bounds[1] + m.scale\n for vox in surface, solid:\n assert vox.sparse_indices.shape == vox.points.shape\n assert g.np.all(vox.is_filled(vox.points))\n assert not vox.is_filled(outside)\n\n try:\n cubes = surface.marching_cubes\n assert cubes.area > 0.0\n except ImportError:\n g.log.info('no skimage, skipping marching cubes test')\n\n g.log.info('Mesh volume was %f, voxelized volume was %f',\n m.volume,\n surface.volume)",
"def test_form_near_clipping_plane() -> None:\n width_px = 10\n height_px = 15\n near_clip_dist = 30.0\n\n pinhole_camera = _create_pinhole_camera(\n fx_px=1,\n fy_px=0,\n cx_px=0,\n cy_px=0,\n height_px=30,\n width_px=width_px,\n cam_name=\"ring_front_center\",\n )\n near_plane = pinhole_camera.near_clipping_plane(near_clip_dist)\n\n points_xyz: NDArrayFloat = np.array(\n [\n [width_px / 2, 0, near_clip_dist],\n [-width_px / 2, 0, near_clip_dist],\n [width_px / 2, -height_px / 2.0, near_clip_dist],\n [width_px / 2, height_px / 2.0, near_clip_dist],\n ]\n )\n\n a, b, c, d = _fit_plane_to_point_cloud(points_xyz)\n near_plane_expected: NDArrayFloat = np.array([a, b, c, d])\n\n assert np.allclose(near_plane, near_plane_expected)",
"def test_frustum_planes_ring_cam() -> None:\n near_clip_dist = 6.89 # arbitrary value\n\n # Set \"focal_length_x_px_\"\n fx_px = 1402.4993697398709\n\n # Set \"focal_length_y_px_\"\n fy_px = 1405.1207294310225\n\n # Set \"focal_center_x_px_\"\n cx_px = 957.8471720086527\n\n # Set \"focal_center_y_px_\"\n cy_px = 600.442948946496\n\n camera_name = \"ring_front_right\"\n height_px = 1550\n width_px = 2048\n\n pinhole_camera = _create_pinhole_camera(\n fx_px=fx_px,\n fy_px=fy_px,\n cx_px=cx_px,\n cy_px=cy_px,\n height_px=height_px,\n width_px=width_px,\n cam_name=camera_name,\n )\n (\n left_plane,\n right_plane,\n near_plane,\n bottom_plane,\n top_plane,\n ) = pinhole_camera.frustum_planes(near_clip_dist)\n\n left_plane_expected: NDArrayFloat = np.array([fx_px, 0.0, width_px / 2.0, 0.0])\n right_plane_expected: NDArrayFloat = np.array([-fx_px, 0.0, width_px / 2.0, 0.0])\n near_plane_expected: NDArrayFloat = np.array([0.0, 0.0, 1.0, -near_clip_dist])\n bottom_plane_expected: NDArrayFloat = np.array([0.0, -fx_px, height_px / 2.0, 0.0])\n top_plane_expected: NDArrayFloat = np.array([0.0, fx_px, height_px / 2.0, 0.0])\n\n assert np.allclose(\n left_plane, left_plane_expected / np.linalg.norm(left_plane_expected)\n )\n assert np.allclose(\n right_plane, right_plane_expected / np.linalg.norm(right_plane_expected)\n )\n assert np.allclose(\n bottom_plane, bottom_plane_expected / np.linalg.norm(bottom_plane_expected)\n )\n assert np.allclose(\n top_plane, top_plane_expected / np.linalg.norm(top_plane_expected)\n )\n assert np.allclose(near_plane, near_plane_expected)",
"def screenToCamera(self,x,y):\n #self.x = x\n #self.y = y\n new_x = x / (self.surf.get_width() - 1) - 0.5\n #-(new_x)\n new_y = y / (self.surf.get_height() - 1)\n new_y = (1.0 - cy) - 0.5\n new_z = -self.camNear\n formula = math3dsol.VectorN((new_x,new_y,new_z))\n return formula\n\n # FINISH ME!!!",
"def clashTest(self, px, py, pz, rad):\n radSq = rad**2\n # adjust for map not set at origin\n px -= self.unif[0]\n py -= self.unif[1]\n pz -= self.unif[2]\n ht = self.height/255\n halfw = self.width/2.0\n halfd = self.depth/2.0\n dx = self.width/self.ix\n dz = self.depth/self.iy\n\n # work out x and z ranges to check, x0 etc correspond with vertex indices in grid\n x0 = int(math.floor((halfw + px - rad)/dx + 0.5)) - 1\n if x0 < 0: x0 = 0\n x1 = int(math.floor((halfw + px + rad)/dx + 0.5)) + 1\n if x1 > self.ix-1: x1 = self.ix-1\n z0 = int(math.floor((halfd + pz - rad)/dz + 0.5)) - 1\n if z0 < 0: z0 = 0\n z1 = int(math.floor((halfd + pz + rad)/dz + 0.5)) + 1\n if z1 > self.iy-1: z1 = self.iy-1\n\n # go through grid around px, pz\n minDist, minLoc = 1000000, (0, 0)\n for i in xrange(x0+1, x1):\n for j in xrange(z0+1, z1):\n # use the locations stored in the one dimensional vertices matrix\n #generated in __init__. 3 values for each location\n p = j*self.ix + i # pointer to the start of xyz for i,j in the vertices array\n p1 = j*self.ix + i - 1 # pointer to the start of xyz for i-1,j\n p2 = (j-1)*self.ix + i # pointer to the start of xyz for i, j-1\n vertp = self.buf[0].vertices[p]\n normp = self.buf[0].normals[p]\n # work out distance squared from this vertex to the point\n distSq = (px - vertp[0])**2 + (py - vertp[1])**2 + (pz - vertp[2])**2\n if distSq < minDist: # this vertex is nearest so keep a record\n minDist = distSq\n minLoc = (i, j)\n #now find the distance between the point and the plane perpendicular\n #to the normal at this vertex\n pDist = dot([px - vertp[0], py - vertp[1], pz - vertp[2]],\n [-normp[0], -normp[1], -normp[2]])\n #and the position where the normal from point crosses the plane\n xIsect = px - normp[0]*pDist\n zIsect = pz - normp[2]*pDist\n\n #if the intersection point is in this rectangle then the x,z values\n #will lie between edges\n if xIsect > self.buf[0].vertices[p1][0] and \\\n xIsect < self.buf[0].vertices[p][0] and \\\n zIsect > self.buf[0].vertices[p2][2] and \\\n zIsect < self.buf[0].vertices[p][2]:\n pDistSq = pDist**2\n # finally if the perpendicular distance is less than the nearest so far\n #keep a record\n if pDistSq < minDist:\n minDist = pDistSq\n minLoc = (i,j)\n\n gLevel = self.calcHeight(px, pz) #check it hasn't tunnelled through by going fast\n if gLevel > (py-rad):\n minDist = py - gLevel\n minLoc = (int((x0+x1)/2), int((z0+z1)/2))\n\n if minDist <= radSq: #i.e. near enough to clash so return normal\n p = minLoc[1]*self.ix + minLoc[0]\n normp = self.buf[0].normals[p]\n if minDist < 0:\n jump = rad - minDist\n else:\n jump = 0\n return(True, normp[0], normp[1], normp[2], jump)\n else:\n return (False, 0, 0, 0, 0)",
"def semi_plan_check(coords_list, normal_plane, point_on_plane, tol=1e-8):\n center_to_coords = coords_list - \\\n np.repeat(point_on_plane.reshape((-1,3)), len(coords_list), axis=0)\n normal_plane = \\\n np.repeat(normal_plane.reshape((-1,3)), len(coords_list), axis=0)\n inner_product = np.sum(center_to_coords*normal_plane,axis=1)\n flag = np.zeros(inner_product.shape, dtype=bool)\n flag[inner_product >= 0] = True\n return flag",
"def exposed(self, position):\r\n x, y, z = position\r\n for dx, dy, dz in FACES:\r\n if (x + dx, y + dy, z + dz) not in self.world:\r\n return True\r\n return False",
"def IsSolid(self,coord):\r\n x,y=coord\r\n if x<0 or x>=self.size[0] or y<0 or y>=self.size[1]: return True \r\n return self.map[x][y].solid",
"def test_local(self):\n from trimesh.voxel import creation\n\n mesh = g.trimesh.creation.box()\n\n # it should have some stuff\n voxel = creation.local_voxelize(\n mesh=mesh,\n point=[.5, .5, .5],\n pitch=.1,\n radius=5,\n fill=True)\n\n assert len(voxel.shape) == 3\n\n # try it when it definitely doesn't hit anything\n empty = creation.local_voxelize(\n mesh=mesh,\n point=[10, 10, 10],\n pitch=.1,\n radius=5,\n fill=True)\n # shouldn't have hit anything\n assert empty is None\n\n # try it when it is in the center of a volume\n creation.local_voxelize(\n mesh=mesh,\n point=[0, 0, 0],\n pitch=.1,\n radius=2,\n fill=True)",
"def mV_surf(T):\n A = erf(T**.5)/2**.5\n return A",
"def surface(*args, degreeU: int=0, degreeV: int=0, formU: AnyStr=\"\", formV: AnyStr=\"\", knotU:\n Union[float, List[float]]=0.0, knotV: Union[float, List[float]]=0.0, name:\n AnyStr=\"\", objectSpace: bool=True, point: Union[List[float, float, float],\n List[List[float, float, float]]]=None, pointWeight: Union[List[float, float, float,\n float], List[List[float, float, float, float]]]=None, worldSpace: bool=True,\n **kwargs)->AnyStr:\n pass",
"def test_comp_surface(self, test_dict):\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.comp_surface()\n\n a = result\n b = test_dict[\"S_exp\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)\n\n b = comp_surface(test_obj.slot)\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)",
"def spherical(self, x, y):\n\t\twhile x >= self.planet.width or x < 0 or y >= self.planet.height or y < 0:\n\t\t\t#change x if x is out of boundary\n\t\t\tif x >= self.planet.width:\n\t\t\t\tx -= (self.planet.width)\n\t\t\telif x < 0:\n\t\t\t\tx += (self.planet.width)\n\t\t\t#change y if y is out of boundary\n\t\t\tif y >= self.planet.height:\n\t\t\t\ty -= (self.planet.height)\n\t\t\telif y < 0:\n\t\t\t\ty += (self.planet.height)\n\t\treturn x, y",
"def test_comp_surface_wind(self, test_dict):\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.comp_surface_wind()\n\n a = result\n b = test_dict[\"SW_exp\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)",
"def test_RawRun_perspective_reference_old_style():\n ref = (0, 0)\n m = config.ideal_m\n grid = r.perspective_reference(ref, 'old', 'cam1')\n lower_right, upper_right, lower_left, upper_left = grid\n assert_equal((0, 0), lower_right)\n assert_equal((ref[0] - int(1.47 * m), ref[1] - int(0.25 * m)), upper_left)",
"def populate_frustum_voxels(planes: List[np.ndarray], fig: Figure, axis_pair: str) -> Figure:\n sparse_xz_voxel_grid = get_mesh_grid_as_point_cloud(-20, 20, 0, 40, downsample_factor=0.1)\n sparse_voxel_grid = np.zeros((sparse_xz_voxel_grid.shape[0], 3))\n\n if axis_pair == \"xz\":\n sparse_voxel_grid[:, 0] = sparse_xz_voxel_grid[:, 0]\n sparse_voxel_grid[:, 2] = sparse_xz_voxel_grid[:, 1]\n elif axis_pair == \"yz\":\n sparse_voxel_grid[:, 1] = sparse_xz_voxel_grid[:, 0]\n sparse_voxel_grid[:, 2] = sparse_xz_voxel_grid[:, 1]\n\n # keep only the points that have signed distance > 0 (inside the frustum, plane\n # normals also point into the frustum)\n for plane in planes:\n signed_d = np.matmul(sparse_voxel_grid, plane[:3]) + plane[3]\n sparse_voxel_grid = sparse_voxel_grid[np.where(signed_d > 0)]\n\n plot_points_3D_mayavi(sparse_voxel_grid, fig, fixed_color=(1, 0, 0))\n return fig",
"def test_surface_one_forms(self, faces, point):\n space = self.Space(faces=faces)\n\n result = space.surface_one_forms(point=point)\n assert result.shape == (space.n_faces, 2, 3), result.shape\n\n first_vec = result[:, 0, :]\n second_vec = result[:, 1, :]\n inner_prods = gs.einsum(\"ni,ni->n\", first_vec, second_vec)\n result = [prod in [0.0, 4.0] for prod in inner_prods]\n assert gs.all(result)\n\n singleton_point = gs.expand_dims(point, axis=0)\n result = space.surface_one_forms(point=singleton_point)\n assert result.shape == (1, space.n_faces, 2, 3)\n\n point = gs.array([point, point])\n result = space.surface_one_forms(point=point)\n assert result.shape == (2, space.n_faces, 2, 3)\n\n first_vec = result[:, :, 0, :]\n second_vec = result[:, :, 1, :]\n inner_prods = gs.einsum(\"mni,mni->mn\", first_vec, second_vec)\n result = []\n for inner_prod in inner_prods:\n result.append([prod in [0.0, 4.0] for prod in inner_prod])\n assert gs.all(result)",
"def locate_on_surface(sx, sy, sz, t, x, y, zz, v=1500):\n # Check sizes\n assert (len(sx) == len(sy)) and (len(sx) == len(sz))\\\n and (len(sx) == len(t)),\\\n 'Source position and time arrays must all be of the same size.'\n assert np.shape(zz) == (len(x), len(y)),\\\n 'shape(zz) must equal (len(x), len(y)).'\n # Calculate RMS for each grid node\n rms = [] \n pos = []\n for ix, _x in enumerate(x):\n for iy, _y in enumerate(y):\n _t = slant_time(sx, sy, sz, _x, _y, zz[ix, iy], v=v)\n rms.append(np.sqrt(np.sum(np.power(_t - t, 2))))\n pos.append([_x, _y, zz[ix, iy]])\n i = np.argmin(rms)\n return pos[i] + [rms[i]]"
] |
[
"0.63351434",
"0.5809967",
"0.5588674",
"0.5506265",
"0.54521286",
"0.5335231",
"0.52462965",
"0.5227828",
"0.522159",
"0.52122545",
"0.5200661",
"0.51747805",
"0.5165662",
"0.51652",
"0.51571435",
"0.51357627",
"0.5126349",
"0.5109323",
"0.5098706",
"0.5086226",
"0.50475496",
"0.50441563",
"0.5041154",
"0.5033483",
"0.5030029",
"0.5029244",
"0.50208515",
"0.49990535",
"0.49970374",
"0.49882862"
] |
0.69315696
|
0
|
Initializes the data taken from the completeData.csv and the formattedXValues.csv. Note that these must be the names of the arrays in your folder.
|
def initializeData():
# Read in the CSV
allX = pd.read_csv('completeData.csv', keep_default_na=False)
xValues = pd.read_csv('formattedXValues.csv')
filename = "completeData.csv and formattedXValues.csv"
# Separate the CSV columns into array variables and numpy vars to store new categorical variables
mixNum = allX['Mix Number']
mixP = allX['Mix Proportion']
mixPFinal = np.empty(len(mixP))
scm = allX['SCM']
scmFinal = np.empty(len(scm))
fineA = allX['Fine Aggregate']
fineAFinal = np.empty(len(fineA))
coarseA = allX['Coarse Aggregate']
coarseAFinal = np.empty(len(coarseA))
# Loop through every mix in the csv file
# Not sure how to do 3 different variables
for y in range(0, len(mixNum)):
# Sort Mix Proportions
if mixP[y] == "A-F":
mixPFinal[y] = 2
elif mixP[y] == "A-S":
mixPFinal[y] = 1
elif mixP[y] == "A":
mixPFinal[y] = 0
else:
print('Unidentified Variable in mixP: ')
print(mixP[y])
# Sort SCM into slag or fly ash
if scm[y] == 'N/A':
scmFinal[y] = 1000
elif scm[y] == 'Slag 1':
scmFinal[y] = 0
elif scm[y] == 'Slag 2':
scmFinal[y] = 0
elif scm[y] == 'Fly Ash 1':
scmFinal[y] = 1
elif scm[y] == 'Fly Ash 2':
scmFinal[y] = 1
elif scm[y] == 'Fly Ash 3':
scmFinal[y] = 1
else:
print('Unidentified Variable in scm: ')
print(scm[y])
# Sort the fine aggregate
if fineA[y] == 'Sand A':
fineAFinal[y] = 0
elif fineA[y] == 'Sand B':
fineAFinal[y] = 1
else:
print('Unidentified Variable in fineA: ')
print(fineA[y])
# Sort the coarse aggregate
if coarseA[y] == 'GG1':
coarseAFinal[y] = 0
elif coarseA[y] == 'GG2':
coarseAFinal[y] = 0
elif coarseA[y] == 'GG3':
coarseAFinal[y] = 0
elif coarseA[y] == 'GG4':
coarseAFinal[y] = 0
elif coarseA[y] == 'GG5':
coarseAFinal[y] = 0
elif coarseA[y] == 'GG6':
coarseAFinal[y] = 0
elif coarseA[y] == 'CS1':
coarseAFinal[y] = 1
elif coarseA[y] == 'CS2':
coarseAFinal[y] = 1
elif coarseA[y] == 'CS3':
coarseAFinal[y] = 1
elif coarseA[y] == 'CS4':
coarseAFinal[y] = 1
elif coarseA[y] == 'CS5':
coarseAFinal[y] = 1
elif coarseA[y] == 'CS6':
coarseAFinal[y] = 1
elif coarseA[y] == 'CS7':
coarseAFinal[y] = 1
elif coarseA[y] == 'CS8':
coarseAFinal[y] = 1
elif coarseA[y] == 'CS9':
coarseAFinal[y] = 1
else:
print('Unidentified Variable in coarseA: ')
print(coarseA[y])
# One Hot Encode the sorted variables
encodedMixP = pd.get_dummies(mixPFinal)
encodedSCM = pd.get_dummies(scmFinal)
encodedFineA = pd.get_dummies(fineAFinal)
encodedCoarseA = pd.get_dummies(coarseAFinal)
# Update the headers for onehotencoded variables
# Get the current variable names
encodedSCMlist = list(encodedSCM.columns.values)
encodedFineAlist = list(encodedFineA.columns.values)
encodedCoarseAlist = list(encodedCoarseA.columns.values)
encodedMixPlist = list(encodedMixP.columns.values)
# go through and replace the current names with the updated ones
encodedSCM.rename(columns={encodedSCMlist[0]: 'SCM_0', encodedSCMlist[1]: 'SCM_1', encodedSCMlist[2]: 'SCM_1000'},
inplace=True)
encodedFineA.rename(columns={encodedFineAlist[0]: 'FineA_0', encodedFineAlist[1]: 'FineA_1'}, inplace=True)
encodedCoarseA.rename(columns={encodedCoarseAlist[0]: 'CoarseA_0', encodedCoarseAlist[1]: 'CoarseA_1'},
inplace=True)
encodedMixP.rename(columns={encodedMixPlist[0]: 'MixP_0', encodedMixPlist[1]: 'MixP_1', encodedMixPlist[2]: 'MixP_2'},
inplace=True)
# Remake the dataframe to include the onehotencoded columns instead of the regular columns.
firstHalf = allX.ix[:, :21]
cte = allX.ix[:, 25]
oneHotEncodedframe = pd.concat([encodedMixP, encodedSCM, encodedFineA, encodedCoarseA], axis=1)
secondHalf = xValues.ix[:, 6:]
completearray = pd.concat([firstHalf, cte, oneHotEncodedframe, secondHalf], axis=1)
variablenames = list(completearray.columns.values)
# convert to numpy array
completenumpyarray = completearray.as_matrix()
# remove the first 15 rows in the array to clear the NaN entries
completenumpyarray = completenumpyarray[15:, :]
# Also, remove the columns that include mix A as well as SCM_1000
#####
# Now, Ask whether or not to run decision trees on batch A data or batch B
batch = input("which batch to run tests on (A or B)? ")
if batch == "A":
# break up the data into the batch A values
batchAYcolumns = [0, 5, 6, 7, 8, 21]
yvariables = np.transpose(completenumpyarray[:, batchAYcolumns])
numyvariables = 6
yvariablenames = [variablenames[x] for x in batchAYcolumns]
batchAXcolumns = [23, 24, 25, 26, 28, 29, 30, 31, 32, 35, 38, 41]
# normalize the x variables. Will normalize y variables in the main body
# after a histogram of the data is created.
xvariables = completenumpyarray[:, batchAXcolumns]
# Normalize each of the x variables
# get number of columns of x variables
xVariablesShape = xvariables.shape
# index through each of the columns and find the l2 norm
for p in range(0, xVariablesShape[1]):
x_mean = xvariables[:, p].mean()
x_std = xvariables[:, p].std()
# index through each value of the column (thus, go through each row) and divide by the l2 norm
xvariables[:, p] = (xvariables[:, p] - x_mean) / x_std
xvariablenames = [variablenames[x] for x in batchAXcolumns]
elif batch == "B":
# break up the data into the batch B values
batchBYcolumns = [0, 1, 2, 3, 4, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
yvariables = np.transpose(completenumpyarray[:, batchBYcolumns])
numyvariables = 17
yvariablenames = [variablenames[x] for x in batchBYcolumns]
batchBXcolumns = [23, 24, 25, 26, 28, 29, 30, 31, 33, 36, 39, 42]
# normalize the x variables. Will normalize y variables in the main body
# after a histogram of the data is created.
xvariables = completenumpyarray[:, batchBXcolumns]
# Normalize each of the x variables
# get number of columns of x variables
xVariablesShape = xvariables.shape
# index through each of the columns and find the l2 norm
for p in range(0, xVariablesShape[1]):
x_mean = xvariables[:, p].mean()
x_std = xvariables[:, p].std()
# index through each value of the column (thus, go through each row) and divide by the l2 norm
xvariables[:, p] = (xvariables[:, p] - x_mean) / x_std
xvariablenames = [variablenames[x] for x in batchBXcolumns]
else:
print("Invalid Input.")
exit(0)
return completenumpyarray, xvariables, filename, xvariablenames, yvariablenames, numyvariables, yvariables, batch
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def data_input(self):\n path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'data'))\n if not os.path.isfile('{0}/{1}.csv'.format(path, self.data_file)):\n print 'Error: Dataset file is not exist.'\n exit()\n # Uplead Dataset.csv file.\n f = open('{0}/{1}.csv'.format(path, self.data_file), 'r')\n print 'Now uploading dataset File.....'\n f = list(f)\n # The Dataset contains heading, number of lines - heading\n self.number_of_VOCs = sum(1 for row in f)-1\n # Count number of columns, last column's value is empty, that is why -1.\n self.number_of_columns = len(f[0].split(',')) -1\n self.first_m_z = int(f[0].split(',')[3]) # find the first m/z value.\n self.last_m_z = int(f[0].split(',')[-2]) # find the last m/z value.\n print 'dataset includes ', self.number_of_VOCs, 'VOCs in all samples '\n print ('dataset includes ', self.number_of_columns, ' Columns, ',\n 'm/z values start from ', self.first_m_z,\n 'and end ', self.last_m_z)\n # Create a matrix with a shape of (number_of_VOCs X number_of_columns) filled with zeros.\n self.dataset = np.zeros((self.number_of_VOCs,\n self.number_of_columns))\n for line in range(1, len(f)):\n if int(float(f[line].strip().split(',')[0])) not in self.loaded_samples:\n self.loaded_samples.append(int(float(f[line].strip().split(',')[0])))\n for column in range(self.number_of_columns):\n self.dataset[line-1][column] = int(float(f[line].strip().split(',')[column]))",
"def loadData (x_file=\"../ass1_data/logisticX.csv\", y_file=\"../logisticY.csv\"):\n\n X = np.genfromtxt(x_file, delimiter=',')\n Y = np.genfromtxt(y_file, dtype=int)\n\n return (X, Y)",
"def __init__(self, data):\n\n self.produce_csv = data['produce_csv']\n self.produce_graphics = data['produce_graphics']\n self.report_name = data['report_name']\n self.file_name = self.report_name + '.csv'\n self.annual_file_name = self.report_name + '_annual.csv'\n self.csv_dir = ''\n self.diagnostic_dir = ''\n\n self.daily_variables = {\n 'year': ['time.cal_year', '', []],\n 'j_day': ['time.day', '', []]\n }\n\n self.annual_variables = {\n 'year': ['time.cal_year', '', 0]\n }",
"def initialiseData(self):\n self.currentPosition = 0\n self.xs = scipy.linspace(0.0, self.numberOfPoints*self.resolution, self.numberOfPoints)\n self.cursorXS = self.getCurrentPositionArray()\n self.cursorVertical = scipy.array([self.verticalLimit,0.0])\n self.array0 = scipy.zeros(self.numberOfPoints)\n self.array1 = scipy.zeros(self.numberOfPoints)\n self.array2 = scipy.zeros(self.numberOfPoints)\n self.array3 = scipy.zeros(self.numberOfPoints)\n self.array4 = scipy.zeros(self.numberOfPoints)\n self.array5 = scipy.zeros(self.numberOfPoints)\n self.array6 = scipy.zeros(self.numberOfPoints)\n self.array7 = scipy.zeros(self.numberOfPoints)\n self.channels = [self.array0,self.array1,self.array2,self.array3,\n self.array4,self.array5,self.array6,self.array7]\n self.arrayPlotData = chaco.ArrayPlotData(xs=self.xs,channel0=self.array0,channel1=self.array1,\n channel2=self.array2,channel3=self.array3,\n channel4=self.array4,channel5=self.array5,\n channel6=self.array6,channel7=self.array7,\n cursorXS = self.cursorXS, cursorVertical=self.cursorVertical)#will be the ArrayPlotData We need",
"def load_train_data():\n\n # Load X_train\n with open('X_train.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n feature_string_matrix = []\n for row in reader:\n feature_list = []\n for i in range(TRAIN_N):\n x_value = row['x' + str(i)]\n # Hit missing values\n if x_value == '':\n feature_list.append(np.nan)\n else:\n feature_list.append(float(row['x' + str(i)]))\n feature_string_matrix.append(feature_list)\n X_train = np.array(feature_string_matrix)\n # Load Y_train\n with open('y_train.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n y_string = []\n for row in reader:\n y_value = [float(row['y'])]\n y_string.append(y_value)\n y_train = np.array(y_string)\n return X_train, y_train",
"def _init_from_DataArrays(self, data, validate=True):\n self._data_vars = self._DataArrays_as_mapping(data)\n\n if (len(self) > 1) and validate:\n first = self[0]\n for i in range(1, len(self)):\n da = self[i]\n first._is_compatible(da, raise_error=True)\n\n self._check_all_different_ids(self._data_vars.values())\n\n self.__itemattr = []\n for key, value in self._data_vars.items():\n self._set_name_attr(key, value)\n\n self.plot = _DatasetPlotter(self)\n\n if len(self) > 0:\n self._set_spectral_attributes(self.geometry)\n\n # since Dataset is MutableMapping it has values and keys by default\n # but we delete those to avoid confusion\n # self.values = None\n self.keys = None",
"def load_test_data():\n\n # Load X_test\n with open('X_test.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n feature_string_matrix = []\n for row in reader:\n feature_list = []\n for i in range(TEST_N):\n x_value = row['x' + str(i)]\n # Hit missing values\n if x_value == '':\n feature_list.append(np.nan)\n else:\n feature_list.append(float(row['x' + str(i)]))\n feature_string_matrix.append(feature_list)\n X_test = np.array(feature_string_matrix)\n return X_test",
"def loadData():\n #dictionary for datasets\n data = {'twcr': [\"twcr19802010Validation.csv\", \"20CR\"],\n 'era20c': [\"era20c19802010Validation.csv\", \"ERA20C\"],\n 'eraint':[\"eraint19802010Validation.csv\", \"ERA-Interim\"],\n 'merra': [\"merra19802010Validation.csv\", \"MERAA\"],\n 'erafive': [\"erafive19802010Validation.csv\", \"ERA-FIVE\"]\n }\n os.chdir(\"G:\\\\data\\\\allReconstructions\\\\validation\\\\commonPeriodValidation\")\n\n twcrDat = pd.read_csv(data['twcr'][0])\n twcrDat.columns = ['deleteIt','tg', 'lon', 'lat', 'reanalysis', 'corrTwcr', 'rmseTwcr', 'nseTwcr']\n era20cDat = pd.read_csv(data['era20c'][0])\n era20cDat.columns = ['deleteIt','tg', 'long', 'latt', 'reanalysis', 'corrEra20c', 'rmseEra20c', 'nseEra20c']\n eraintDat = pd.read_csv(data['eraint'][0])\n eraintDat.columns = ['deleteIt','tg', 'long', 'latt', 'reanalysis', 'corrEraint', 'rmseEraint', 'nseEraint']\n merraDat = pd.read_csv(data['merra'][0])\n merraDat.columns = ['deleteIt','tg', 'long', 'latt', 'reanalysis', 'corrMerra', 'rmseMerra', 'nseMerra']\n erafiveDat = pd.read_csv(data['erafive'][0])\n erafiveDat.columns = ['deleteIt','tg', 'long', 'latt', 'reanalysis', 'corrErafive', 'rmseErafive', 'nseErafive']\n\n\n return twcrDat, era20cDat, eraintDat, merraDat, erafiveDat",
"def import_data(self):\n\n # Import ordered names of origins\n origins_file = os.path.join(self.data_directory,'origins.txt')\n self.origins = np.loadtxt(origins_file,dtype=str,ndmin=1)\n\n # Import ordered names of destinations\n destinations_file = os.path.join(self.data_directory,'destinations.txt')\n self.destinations = np.loadtxt(destinations_file,dtype=str,ndmin=1)\n\n # Import origin supply\n originsupply_file = os.path.join(self.data_directory,'origin_supply.txt')\n self.origin_supply = np.loadtxt(originsupply_file,ndmin=1).astype('float64')\n\n # In case origin supply is not a list\n if not isinstance(self.origin_supply,(np.ndarray, np.generic)):\n self.origin_supply = np.array([self.origin_supply])\n\n # Import destination demand\n destinationdemand_file = os.path.join(self.data_directory,'destination_demand.txt')\n self.destination_demand = np.loadtxt(destinationdemand_file,ndmin=1).astype('float64')\n\n # In case destination demand is not a list\n if not isinstance(self.destination_demand,(np.ndarray, np.generic)):\n self.destination_demand = np.array([self.destination_demand])\n\n # Import origin locations\n originlocations_file = os.path.join(self.data_directory,'origin_locations.txt')\n self.origin_locations = np.loadtxt(originlocations_file,ndmin=1)\n\n # Import destination locations\n destinationlocations_file = os.path.join(self.data_directory,'destination_locations.txt')\n self.destination_locations = np.loadtxt(destinationlocations_file,ndmin=1)\n\n # Import initial and final destination sizes\n initialdestinationsizes_file = os.path.join(self.data_directory,'initial_destination_sizes.txt')\n self.initial_destination_sizes = np.loadtxt(initialdestinationsizes_file,ndmin=1)\n\n # In case destination sizes are not a list\n if not isinstance(self.initial_destination_sizes,(np.ndarray, np.generic)):\n self.initial_destination_sizes = np.array([self.initial_destination_sizes])\n\n # Import N,M\n self.N = self.origin_supply.shape[0]\n self.M = self.initial_destination_sizes.shape[0]\n\n # Import cost matrix\n costmatrix_file = os.path.join(self.data_directory,'cost_matrix.txt')\n self.cost_matrix = np.loadtxt(costmatrix_file).astype('float64')\n\n # Reshape cost matrix if necessary\n if self.N == 1:\n self.cost_matrix = np.reshape(self.cost_matrix[:,np.newaxis],(self.N,self.M))\n if self.M == 1:\n self.cost_matrix = np.reshape(self.cost_matrix[np.newaxis,:],(self.N,self.M))\n\n # Compute total initial and final destination sizes\n self.total_initial_sizes = np.sum(self.initial_destination_sizes)\n\n # Compute naive total cost\n self.total_cost = 0\n for i in range(self.N):\n for j in range(self.M):\n self.total_cost += self.cost_matrix[i,j]*(self.origin_supply[i]/self.N)",
"def _load_data(self):\n data_x, data_y = make_classification(n_samples=5000, n_features=20,\n n_informative=10,\n n_redundant=0, n_repeated=0,\n n_classes=2,\n n_clusters_per_class=4,\n weights=None, flip_y=0.01,\n class_sep=1.0, hypercube=True,\n shift=0.0, scale=1.0,\n shuffle=True,\n random_state=self.args.rand_seed)\n\n self.orig_column_names = np.arange(data_x.shape[-1])\n self.data_x = data_x\n self.data_y = self.to_one_hot_encoding(data_y)\n self.numerical_idx = np.arange(data_x.shape[-1])\n self.non_num_idx = None\n self.all_non_numerical_idx = None\n\n # Calculate adjacency matrix\n self.meta_inf = data_x[:, :1].astype('float32')\n if self.args.graph_type:\n self.adj = self.get_adjacency()",
"def initialize(self):\n self.muondEdx = []\n self.muondNdx = []\n self.muonmomentum = []\n self.piondEdx = []\n self.piondNdx = []\n self.pionmomentum = []\n self.kaondEdx = []\n self.kaondNdx = []\n self.kaonmomentum = []\n self.protdEdx = []\n self.protdNdx = []\n self.protmomentum = []\n self.elecdEdx = []\n self.elecdNdx = []\n self.elecmomentum = []",
"def load_data_and_labels(data_file, labels_file):\r\n x_text = []\r\n y = []\r\n \r\n with open(data_file, encoding = \"utf-8\") as csvFile:\r\n readCSV = csv.reader(csvFile, delimiter = \",\")\r\n for row in readCSV:\r\n row = \"\".join(row)\r\n x_text.append(row) \r\n \r\n with open(labels_file, encoding = \"utf-8\") as csvFile2:\r\n readCSV = csv.reader(csvFile2, delimiter = \",\")\r\n for row in readCSV:\r\n d = defaultdict(list)\r\n for k,va in [(v,i) for i,v in enumerate(row)]:\r\n d[k].append(va)\r\n \r\n for k in range(len(d.get(\"1.0\"))):\r\n index = d.get(\"1.0\")[k]\r\n row[index] = 1\r\n for k in range(len(d.get(\"0.0\"))):\r\n index = d.get(\"0.0\")[k]\r\n row[index] = 0\r\n \r\n# print(len(row))\r\n y.append(row)\r\n \r\n\r\n\r\n\r\n \r\n print(\"x = {}\".format(len(x_text)))\r\n print(\"y = {}\".format(len(y)))\r\n \r\n return x_text, y",
"def read_data(self):\r\n IS_REMAPPED = 1\r\n if IS_REMAPPED:\r\n data_as_strings = list(csv.reader(open('ring-data.csv'), delimiter=','))\r\n self.TRAINING_DATA = [[remap(float(f1),float(f2))[0],remap(float(f1),float(f2))[1],\\\r\n int(c)] for [f1, f2, c] in data_as_strings]\r\n data_as_strings = list(csv.reader(open('ring-data.csv'), delimiter=','))\r\n self.TESTING_DATA = [[remap(float(f1),float(f2))[0],remap(float(f1),float(f2))[1],\\\r\n int(c)] for [f1, f2, c] in data_as_strings]\r\n else:\r\n data_as_strings = list(csv.reader(open('ring-data.csv'), delimiter=','))\r\n self.TRAINING_DATA = [[float(f1), float(f2), int(c)] for [f1, f2, c] in data_as_strings]\r\n data_as_strings = list(csv.reader(open('ring-data.csv'), delimiter=','))\r\n self.TESTING_DATA = [[float(f1), float(f2), int(c)] for [f1, f2, c] in data_as_strings]",
"def __init__(self):\n self.A = pd.read_csv(_TFP+'/a.txt',sep=' ',header=None)[0].tolist()\n self.B = pd.read_csv(_TFP+'/b.txt',sep=' ',header=None)[0].tolist()\n assert (len(self.A) == len(self.B))\n self.load()\n pass",
"def load():\n filepath = dirname(abspath(__file__))\n##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####\n data = recfromtxt(open(filepath + '/spector.csv',\"rb\"), delimiter=\" \",\n names=True, dtype=float, usecols=(1,2,3,4))\n names = list(data.dtype.names)\n endog = array(data[names[3]], dtype=float)\n endog_name = names[3]\n exog = column_stack(data[i] for i in names[:3]).astype(float)\n exog_name = names[:3]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset",
"def __init__(self, data_array, cui2name):\n\t\tassert isinstance(data_array, xr.DataArray), 'Constructor requires xr.DataArray.'\n\n\t\tself.data_array = data_array\n\t\tself.data = self.data_array.values\n\t\tself.sources = self.data_array.source.values\n\t\tself.targets = self.data_array.target.values\n\t\tself.metrics = self.data_array.metric.values\n\t\tself.metapaths = self.data_array.metapath.values\n\n\t\t# Fetch the verbose names\n\t\t#int_rf_df = pd.read_csv('data/impd_cogn_sources.tsv', delimiter='\\t')\n\t\t#cui2name = {cui:name for cui, name in zip(int_rf_df.identifier, int_rf_df.name)} \n\t\tself.source_names = np.array([cui2name[source] for source in self.sources])",
"def dataLoad():\n try:\n try: #Python3\n f = open(__file__ + \".csv\",\"rt\")\n except: #Python2\n f = open(__file__ + \".csv\",\"rb\")\n data = f.read().split(',')\n entryCol.entry0.delete(0,END)\n entryCol.entry0.insert(0,data[0])\n entryCol.entry1.delete(0,END)\n entryCol.entry1.insert(0,data[1])\n entryCol.entry2.delete(0,END)\n entryCol.entry2.insert(0,data[2])\n entryCol.entry3.delete(0,END)\n entryCol.entry3.insert(0,data[3])\n botWind.writeN(\"DataLoad: File\")\n except:\n botWind.writeN(\"DataLoad: Default\")",
"def __init__(self, path):\n self.csv_path = path\n # check if csv format is valid or not\n self.check_valid_csvformat(self.csv_path)\n \"\"\" empty dict to store all company names\n prepare initial company data in dictionary format \"\"\"\n self.company_data = dict()",
"def __loadPreProcessedData(self):\n le = joblib.load(self.le_filename)\n X = np.loadtxt(self.X_filename, delimiter=',').astype(int)\n raw_y = np.loadtxt(self.y_filename, delimiter=',').astype(int)\n y = le.inverse_transform(raw_y)\n ##Initialize atrtribute for this class\n self.le, self.X, self.y = le, X, y",
"def _load_data(self):\n\n path_data_x = '/workspace/base-ml/data/dizzyreg/t%s_df.csv' % \\\n self.task_num\n path_data_y = '/workspace/base-ml/data/dizzyreg/label_df_t%s.csv' % self.task_num\n path_meta = '/workspace/base-ml/data/dizzyreg/meta_df_t%s.csv' % self.task_num\n path_numerical_columns = '/workspace/base-ml/data/dizzyreg/num_columns_v2.csv'\n path_nonnumerical_columns = '/workspace/base-ml/data/dizzyreg/non_num_columns_v2.csv'\n\n read_data_x = pd.read_csv(path_data_x)\n read_data_y = pd.read_csv(path_data_y)\n read_data_meta = pd.read_csv(path_meta)\n\n # Drop columns if it only contains 1 unique element\n read_data_x = pd.DataFrame(self.drop_one_elem_columns(read_data_x))\n\n num_col = pd.read_csv(path_numerical_columns)\n num_col = read_data_x.columns.isin(num_col['0'].values).nonzero()[0]\n col_idx = np.arange(read_data_x.shape[-1])\n non_num_col = np.setdiff1d(col_idx, num_col)\n\n # new_data_x = np.array(read_data_x).astype(np.float32)\n new_data_x = np.array(read_data_x)\n new_data_y = np.array(read_data_y).astype(np.float32)\n new_data_meta = np.array(read_data_meta).astype(np.float32)\n\n print(new_data_x.shape, new_data_y.shape, new_data_meta.shape)\n\n\n # Winsorize dataset\n len_feat = new_data_x.shape[-1]\n idx_list = list(num_col)\n for i in range(len_feat):\n if i in idx_list:\n cur_data = new_data_x[:, i]\n cur_data = np.array(cur_data)\n lower_p = np.percentile(cur_data, 5)\n higher_p = np.percentile(cur_data, 95)\n cur_data[cur_data < lower_p] = lower_p\n cur_data[cur_data > higher_p] = higher_p\n new_data_x[:, i] = cur_data\n\n # Make sure target data is one-hot encoded\n if new_data_y.shape[-1] == 1:\n num_class = len(np.unique(new_data_y))\n new_data_y = np.eye(num_class)[new_data_y.astype(int).reshape(-1)]\n new_data_y = new_data_y.astype('float32')\n self.orig_column_names = read_data_x.columns\n self.data_x = new_data_x # N x F\n self.data_y = new_data_y # N x C\n self.numerical_idx = num_col # list of idx\n self.non_num_idx = non_num_col # None\n\n # Calculate adjacency matrix\n self.meta_inf = new_data_meta.astype('float32') # N x 3\n if self.args.graph_type:\n self.adj = self.get_adjacency()",
"def load_data(self):\n \n # only loader implemented so far !\n try:\n _ascii_array = Utilities.load_ascii(filename=self.filename, sep='')\n start_row = TOF._first_line_number_with_real_data(_ascii_array[0, 0])\n\n _tof_column = _ascii_array[start_row:, 0]\n\n if not TOF._is_this_numeric(_tof_column[0]):\n start_row += 1\n\n _tof_column = _ascii_array[start_row:, 0]\n _counts_column = _ascii_array[start_row:, 1]\n\n self.tof_array = _tof_column\n self.counts_array = _counts_column\n return\n\n except IndexError:\n pass # try another format\n\n try:\n _ascii_array = Utilities.load_ascii(filename=self.filename, sep=',')\n start_row = TOF._first_line_number_with_real_data(_ascii_array[0, 0])\n\n _tof_column = _ascii_array[start_row:, 0] # first row must be excluded in this format\n\n if not TOF._is_this_numeric(_tof_column[0]):\n start_row += 1\n\n _tof_column = _ascii_array[start_row:, 0]\n _counts_column = _ascii_array[start_row:, 1]\n\n self.tof_array = _tof_column\n self.counts_array = _counts_column\n return\n\n except IndexError:\n raise IndexError(\"Format not implemented!\")",
"def load_data():\n\t\t# load the data\n\t\tDATPATH = \"../data/\"\n\t\t#fnino = DATPATH + \"nino3.csv\" # 1871-2000\n\t\tfnino = DATPATH + \"tas_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_nino3_tseries.csv\" # 1871-2016\n\t\t#fnino = DATPATH + \"nino34.long.data\"\n\t\t#nc_data_nino3 = netCDF4.Dataset(fnino)\n\t\t#nino3_load = nc_data_nino3.variables['tas'][:]\n\t\t#dnino = nino3_load.flatten()\n\n\t\tdnino = np.genfromtxt(fnino, delimiter=\",\", dtype=float).flatten()\n\t\t#fismr = DATPATH + \"ismr.csv\" # 1871-2000\n\t\t#fismr = DATPATH + \"psl_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_1_india_goswami_2002_tseries.csv\" # 1871-2016\n\t\tfismr = DATPATH + \"pr_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_goswami_india_tseries.csv\" # 1871-2016\n\t\tdismr = np.genfromtxt(fismr, delimiter=\",\", dtype=float).flatten()\n\t\t#fvolc = DATPATH + \"robock.txt\" # 1871-2000\n\t\tfvolc = DATPATH + \"sigl.txt\" # 1871-2016\n\t\tdvolc = np.genfromtxt(fvolc, delimiter=\",\", dtype=float).flatten()\n\n\t\tfvolc_source = DATPATH + \"volc_source_850_1850.csv\" # 1871-2016\n\t\tdvolc_source = np.genfromtxt(fvolc_source, delimiter=\",\", dtype=float).flatten()\n\t\t# simple check for data consistency\n\t\tassert dnino.shape == dismr.shape, \"Data sets are unequal!\"\n\t\tassert int(dismr.shape[0]/12) == dvolc.shape[0], \"Data sets are unequal\"\n\t\treturn dnino, dismr, dvolc, dvolc_source",
"def __init__(self, dataset_dir, listfile=None):\n Reader.__init__(self, dataset_dir, listfile)\n self._data = [line.split(',') for line in self._data]\n self._data = [(x, float(t), float(y)) for (x, t, y) in self._data]",
"def __init__(self):\n self.project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n self.excel_file = os.path.join(self.project_dir, \"data\", \"Literature_Data.xlsx\")\n self.spreadsheet_name = \"Individualized Data\"\n self.filled_output_file = os.path.join(self.project_dir, \"data\", \"filled_data.csv\")\n self.output_file = os.path.join(self.project_dir, \"data\", \"final.csv\")\n self.use_fake_data = False # For testing\n # This instance value \"self.df\" is the pandas DataFrame that contains all of the data\n # from the literature case studies. Manipulating this field is the purpose of this class.\n \n self.num_negative = 500\n self.df = None",
"def __init__(self, name, title=None, unit=None):\n super().__init__(name.lower(), title, unit)\n filename = os.path.join(DATA, name + '.csv')\n with open(filename) as csv_file:\n reader = csv.DictReader(csv_file)\n for row in reader:\n try:\n value = float(row[name])\n except ValueError:\n continue\n date = datetime.strptime(row['DATE'], \"%Y-%m-%d\")\n self.data[date] = value\n self.first_date = min(self.data)\n self.last_date = max(self.data)",
"def load_data(self):\n self.data = pd.read_csv(self.data_path, dtype=self.dtype)\n self.data.columns = self.data_cols\n self.data.topic = self.data.topic.str.lower()\n logging.debug(f'Data Load Complete: {self.data_path}')",
"def load_data():\n x = np.genfromtxt(X_FILE, usecols=(0, 1))\n y = np.genfromtxt(Y_FILE, usecols=(0))\n\n return x, y",
"def import_csv_dataset():\n import_fields = pd.read_csv('redacted-2020-june-30-wprdc-.csv', header=None).to_numpy()[0, :]\n import_values = pd.read_csv('redacted-2020-june-30-wprdc-.csv').to_numpy()\n import_values = clean_values(import_values)\n return import_fields, import_values",
"def init(fileName):\r\n global grand_prod_cost, grand_album_sales\r\n infile = ''\r\n try:\r\n with open(fileName, mode='r') as infile:\r\n reader = csv.reader(infile)\r\n sniffer = csv.Sniffer()\r\n has_header = sniffer.has_header(infile.read(2048))\r\n infile.seek(0)\r\n if (has_header):\r\n next(reader) # move curser to next row so the header is not included\r\n initBands(reader)\r\n # Reset the curser to start based on presence of header\r\n if(has_header):\r\n infile.seek(0)\r\n # avoid header\r\n next(reader)\r\n else:\r\n infile.seek(0)\r\n splitByBand(reader)\r\n except Exception as e:\r\n print('Exception in init')\r\n raise e",
"def __init__(self, dataset_dir, listfile=None):\n Reader.__init__(self, dataset_dir, listfile)\n self._data = [line.split(',') for line in self._data]\n self._data = [(x, float(t), int(y)) for (x, t, y) in self._data]"
] |
[
"0.63987756",
"0.60396355",
"0.6033745",
"0.6007925",
"0.5936185",
"0.5930858",
"0.59216917",
"0.5916312",
"0.59140754",
"0.5901727",
"0.5890288",
"0.58721507",
"0.5862617",
"0.5855333",
"0.5845277",
"0.5844009",
"0.58408755",
"0.5811851",
"0.5779655",
"0.57742786",
"0.5758923",
"0.5743206",
"0.5735882",
"0.5734092",
"0.5729459",
"0.5700267",
"0.5699999",
"0.56936765",
"0.56927407",
"0.5689793"
] |
0.71311456
|
0
|
For the given datasize from the .csv file generates a set of random indices to use for the training set. Uses a user defined percentage of the data for training and testing.
|
def generaterandomindices(dataSize, percentTest):
TrainIndices = np.array([])
while TrainIndices.__len__() < int(percentTest * dataSize):
# Randomly select an index value and store it. If it has already been chosen, pick again.
index = int(random.random() * dataSize)
if not TrainIndices.__contains__(index):
TrainIndices = np.append(TrainIndices, [index])
# For aesthetic purposes:
TrainIndices = np.sort(TrainIndices)
return TrainIndices
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def load_data(input_file, test_size, random_seed ):\n ratings = np.genfromtxt(input_file, delimiter=',')\n #ratings = ratings[:200,:]\n train_masks = np.zeros_like(ratings)\n test_masks = np.zeros_like(ratings)\n pairs = list()\n for i in range(ratings.shape[0]):\n for j in range(ratings.shape[1]):\n pairs.append((i,j))\n train, test = train_test_split(pairs ,test_size = test_size, random_state= random_seed)\n for user,item in train:\n train_masks[user, item]= 1\n for user,book in test:\n test_masks[user, item]= 1 \n return ratings, train_masks, test_masks",
"def load_occupancy_dataset(trainsize=500, testsize=1000):\n filename = 'datasets/numericsequence.csv'\n dataset = loadcsv(filename)\n trainset, testset = splitdataset(dataset, trainsize, testsize)\n return trainset, testset",
"def read_random_data_from_csv(\n file_name, training_set_size, unlabeled_set_size, holdout_set_size, validation_set_size):\n data = samp_file_to_arr(\n file_name, training_set_size + unlabeled_set_size + holdout_set_size + validation_set_size)\n y_raw = np.array([x[0] for x in data])\n x_all = np.array([x[1:] for x in data])\n # Now transform so that the lower label is -1, always. \n uq = np.unique(y_raw) # Assumed to be only two unique labels!\n y_all = np.zeros(len(y_raw))\n y_all[np.where(y_raw == uq[0])[0]] = -1\n y_all[np.where(y_raw == uq[1])[0]] = 1\n xtrhoval, x_unl, ytrhoval, y_unl = sklearn.model_selection.train_test_split(\n x_all, y_all, test_size=unlabeled_set_size)\n x_trho, x_validate, y_trte, y_validate = sklearn.model_selection.train_test_split(\n xtrhoval, ytrhoval, test_size=validation_set_size)\n x_train, x_out, y_train, y_out = sklearn.model_selection.train_test_split(\n x_trho, y_trte, test_size=holdout_set_size)\n return (x_train, y_train, x_unl, y_unl, x_out, y_out, x_validate, y_validate)",
"def train_test_split(filename: str, split=0.5) -> tuple:\n training_set = []\n test_set = []\n content = load_from_csv(filename)\n for _, value in enumerate(content):\n if random.random() < split:\n training_set.append(value)\n else:\n test_set.append(value)\n return training_set, test_set",
"def train_test_samples(df):\n\n from math import floor\n\n shuffled_df = df.reindex(np.random.permutation(df.index))\n\n seventy_five_percent = int(floor(len(shuffled_df) * 0.75))\n train_df = shuffled_df.iloc[:seventy_five_percent, ]\n test_df = shuffled_df.iloc[seventy_five_percent:, ]\n\n return train_df, test_df",
"def load_susy(trainsize=500, testsize=1000):\n filename = 'datasets/susysubset.csv'\n dataset = loadcsv(filename)\n trainset, testset = splitdataset(dataset, trainsize, testsize)\n return trainset, testset",
"def get_random_train_validation_set(images, percent=0.8):\n\n indexes = np.arange(len(images))\n train_indexes = np.random.choice(indexes, int(np.round(len(images) * percent)))\n validation_indexes = np.delete(indexes, train_indexes)\n\n return train_indexes, validation_indexes",
"def train_test(in_path, train_out_path, test_out_path):\n df = pd.read_csv(in_path, sep='|')\n rng = RandomState()\n\n train = df.sample(frac=0.8, random_state=rng)\n test = df.loc[~df.index.isin(train.index)]\n\n train.to_csv(train_out_path, sep='|', index = None, header=True)\n test.to_csv(test_out_path, sep='|', index = None, header=True)",
"def open_MRI_data(csv_path, train_set = 0.8, n_followups=5, normalize=True):\n\n data_df = pd.read_csv(csv_path)\n\n mri_col = data_df.columns.str.contains(\"SV_UCSFFSX_11_02_15_UCSFFSX51_08_01_16\")\n mri_col = data_df.columns[mri_col].values\n\n data_df = data_df.dropna(axis=0, subset=mri_col)\n\n # Select only the subjects with nfollowups\n # Code to only select 5 first appearances of each PTID\n ptid_list = np.unique(data_df[\"PTID\"])\n\n idx_to_drop = []\n for ptid in ptid_list:\n i_list = data_df.index[data_df['PTID'] == ptid].tolist()\n if len(i_list) < 5:\n idx_to_drop = idx_to_drop + i_list\n elif len(i_list) > 5:\n idx_to_drop = idx_to_drop + i_list[5:]\n\n data_final = data_df.drop(idx_to_drop)\n\n print(data_final.shape)\n\n # Normalize only features\n data_final.loc[:,mri_col] = data_final.loc[:,mri_col].apply(lambda x: (x-x.mean())/ x.std(), axis=0)\n\n # Divide between test and train\n from sklearn.model_selection import GroupShuffleSplit\n gss = GroupShuffleSplit(n_splits=1, test_size=1.0-train_set)\n train_dataset, test_dataset = next(gss.split(X=data_final, y=data_final.DX_bl.values, groups=data_final.PTID.values))\n\n df_train = data_final.iloc[train_dataset]\n df_test = data_final.iloc[test_dataset]\n\n df_train = df_train.reset_index(drop=True)\n df_test = df_test.reset_index(drop=True)\n\n # Return the features in the correct shape (Nsamples, timesteps, nfeatures)\n X_train = pandas_to_data_timeseries(df_train, mri_col)\n X_test = pandas_to_data_timeseries(df_test, mri_col)\n\n return X_train, X_test",
"def splitData(data, class_label, seed, ratio):\n\t\n\trandom.seed(seed)\n\tsubset = data.clone()\n\tsize_data = subset.data.shape[0]\n\tn = int(np.floor(size_data * ratio)) # number of datasets in train\n\tindex = random.sample(range(1, size_data), n)\n\tsplit_list = [item for item in [0] for i in range(size_data)]\n\t\n\tfor i in index:\n\t\tsplit_list[i]=1\n\t\n\treturn split_list #returns list of indeces where 0 is test and 1 is training data ",
"def split_train_test_by_percentage(dataset, train_percentage=0.8):\n train_length = int(len(dataset) * train_percentage)\n return torch.utils.data.random_split(dataset, (train_length, len(dataset) - train_length))",
"def load_test_data(batch_size=32):\n log = logging.getLogger(__name__)\n\n log.info('Reading TEST csv file...')\n # read csv data file\n\n samples = []\n with open('./test_driving_log.csv') as f:\n csv_reader = csv.reader(f)\n next(csv_reader) #just skip the header line for test data set provided by Udacity\n\n for row in csv_reader:\n samples.append(row)\n\n generator = input_generator(samples, batch_size, is_for_validation=True, drop_zero_samples=True)\n return generator, math.ceil(len(samples) / batch_size)",
"def read_data():\n csv_data = pd.read_csv('./dataset.csv')\n x = csv_data[['X1', 'X2']]\n x = x.values # numpy array for x: (180, 2)\n y = csv_data['Label']\n y = y.values # numpy array for y: (180, )\n\n\t# shuffle the data\n total = x.shape[0]\n mask = list(range(total))\n np.random.shuffle(mask)\n x = x[mask]\n y = y[mask]\n\t\n\t# 80 percent for train and 20 percent for test\n train_split = int(0.8 * total)\n x_train, y_train = x[:train_split], y[:train_split]\n x_test, y_test = x[train_split:], y[train_split:]\n return x_train, y_train, x_test, y_test",
"def train_test_set_split(dataset, dataset_name, test_size=0.1):\n train_indices_path = './' + dataset_name + '_train_indices(' + str(test_size) + ').txt'\n test_indices_path = './' + dataset_name + '_test_indices(' + str(test_size) + ').txt'\n try:\n train_indices = []\n test_indices = []\n file = open(train_indices_path, 'rt', encoding='utf-8')\n while True:\n line = file.readline()\n if not line:\n break\n train_indices.append(int(line[:-1]))\n file.close()\n file = open(test_indices_path, 'rt', encoding='utf-8')\n while True:\n line = file.readline()\n if not line:\n break\n test_indices.append(int(line[:-1]))\n file.close()\n train_labels = [dataset.targets[i] for i in train_indices]\n except FileNotFoundError:\n indices = np.arange(len(dataset))\n labels = np.array(dataset.targets)\n train_indices, test_indices, train_labels, _ = train_test_split(\n indices, labels, test_size=test_size, stratify=labels\n )\n file = open(train_indices_path, 'wt', encoding='utf-8')\n for i in train_indices:\n line = str(i) + '\\n'\n file.write(line)\n file.close()\n file = open(test_indices_path, 'wt', encoding='utf-8')\n for i in test_indices:\n line = str(i) + '\\n'\n file.write(line)\n file.close()\n\n train_set = torch.utils.data.Subset(dataset, indices=train_indices)\n test_set = torch.utils.data.Subset(dataset, indices=test_indices)\n return train_set, test_set, train_labels",
"def loadtrainData_undersampling():\n train = []\n fileIn = open(PATH + 'traindata_Subtask4.txt')\n for line in fileIn.readlines():\n lineArr = line.strip().split()\n train.append([float(lineArr[i]) for i in range(len(lineArr))])\n\n pos = []\n neg = []\n for i in train:\n if i[-1] == 1.0:\n pos.append(i)\n else:\n neg.append(i)\n slice1 = random.sample(neg, len(pos))\n data = pos + slice1\n train_x = []\n train_y = []\n y = []\n for line in data:\n train_x.append([float(line[i]) for i in range(len(line) - 1)])\n y.append([int(line[-1])])\n for i in range(len(y)):\n train_y.append(y[i][0])\n return np.mat(train_x), np.mat(train_y).transpose()",
"def readData(path_to_dataset, train_size=0.8, validation_size=0.2):\n data = pd.read_csv(os.path.join(path_to_dataset, 'training_set_rel3.tsv'), sep='\\t', encoding='ISO-8859-1')\n # Drop columns that has null value \n data = data.dropna(axis=1)\n # Only take 4 columns of data from the dataset: essay_id, essay_set, essay, domain1_score\n data = data[['essay_id', 'essay_set', 'essay', 'domain1_score']]\n # Perform 80:20 train-test split on the training data\n train_set, test_set = train_test_split(data, train_size=train_size, random_state=0)\n # Split the 80% training set further into 60:20\n training_set, validation_set = train_test_split(train_set, test_size=validation_size, random_state=0)\n return training_set, test_set, validation_set",
"def load_all(test_num=100):\n\ttrain_data = pd.read_csv(\n\t\tconfig.train_rating, \n\t\tsep='\\t', header=None, names=['user', 'item'], \n\t\tusecols=[0, 1], dtype={0: np.int32, 1: np.int32})\n\n\tuser_num = train_data['user'].max() + 1\n\titem_num = train_data['item'].max() + 1\n\n\ttrain_data = train_data.values.tolist()\n\n\t# load ratings as a dok matrix\n\ttrain_mat = sp.dok_matrix((user_num, item_num), dtype=np.float32)\n\tfor x in train_data:\n\t\ttrain_mat[x[0], x[1]] = 1.0\n\n\ttest_data = []\n\twith open(config.test_negative, 'r') as fd:\n\t\tline = fd.readline()\n\t\twhile line != None and line != '':\n\t\t\tarr = line.split('\\t')\n\t\t\tu = eval(arr[0])[0]\n\t\t\ttest_data.append([u, eval(arr[0])[1]])\n\t\t\tfor i in arr[1:]:\n\t\t\t\ttest_data.append([u, int(i)])\n\t\t\tline = fd.readline()\n\treturn train_data, test_data, user_num, item_num, train_mat",
"def splitData(filename, testing_set_percentage):\n matFile = sio.loadmat(filename)\n data = matFile['mydata']\n\n np.savetxt('test.out', data, fmt='%d', delimiter=',') # X is an array\n print('BEFORE SHUFFLE', data)\n\n np.random.shuffle(data)\n\n np.savetxt('test_after.out', data, fmt='%d', delimiter=',') # X is an array\n\n print('AFTER SHUFFLE', data)\n\n r, c = np.array(data).shape\n\n testing_set_size = int(r * testing_set_percentage)\n training_set_size = r - testing_set_size\n\n train_data = data[:training_set_size]\n test_data = data[training_set_size:]\n\n return train_data, test_data",
"def sampling(train_set, train_meta, klass, label, n_samples_pos, rate_neg, fold, path_idxs):\n\tprint('-- SAMPLING TRAINNING')\n\tdirectory_idxs = path_idxs+fold+'/'+str(int(klass))+'/'\n\tif(os.path.isdir(directory_idxs)):\n\t\tprint('loading indexes...')\n\t\tidxs_class_pos = np.loadtxt(directory_idxs+'idxs_pos_train.txt', dtype=int)\n\t\tidxs_class_neg = np.loadtxt(directory_idxs+'idxs_neg_train.txt', dtype=int)\n\telse:\n\t\tidxs_class_pos = (train_meta[ : , label] == klass).nonzero()[0]\n\t\tidxs_class_neg = (train_meta[ : , label] != klass).nonzero()[0]\n\t\tif(n_samples_pos < len(idxs_class_pos)):\n\t\t\tidxs_class_pos = np.random.choice(idxs_class_pos, n_samples_pos)\n\t\tidxs_class_neg = np.random.choice(idxs_class_neg, int(n_samples_pos*rate_neg))\n\t\tprint('saving indexes...')\n\t\tos.makedirs(directory_idxs)\n\t\tnp.savetxt(directory_idxs+'idxs_pos_train.txt', idxs_class_pos, fmt='%d')\n\t\tnp.savetxt(directory_idxs+'idxs_neg_train.txt', idxs_class_neg, fmt='%d')\n\n\ttrain_set = np.vstack((train_set[idxs_class_pos], train_set[idxs_class_neg]))\n\ttrain_meta = np.vstack((train_meta[idxs_class_pos], train_meta[idxs_class_neg]))\n\ttrain_meta[:, label] = 1\n\ttrain_meta[len(idxs_class_pos):, label] = -1\n\treturn [train_set, train_meta]",
"def split_dataset(dataset, test_size):\r\n random.shuffle(dataset)\r\n \r\n rating_negativ = []\r\n rating_positiv = []\r\n \r\n for row in dataset:\r\n if int(row[1]) == 0:\r\n rating_negativ.append(row)\r\n elif int(row[1]) == 1:\r\n rating_positiv.append(row)\r\n\r\n random.shuffle(rating_positiv)\r\n random.shuffle(rating_negativ) \r\n \r\n neg_train_data, neg_val_data = train_test_split(rating_negativ, test_size=test_size)\r\n pos_train_data, pos_val_data = train_test_split(rating_positiv, test_size=test_size)\r\n \r\n train_data = neg_train_data + pos_train_data\r\n val_data = neg_val_data + pos_val_data\r\n \r\n random.shuffle(train_data)\r\n random.shuffle(val_data)\r\n \r\n return train_data, val_data",
"def data_set_maker():\n\n # crate a folder in your code directory and name it: \"files\". put the .npy files iside that folder\n\n x_all = np.load(path + '/files/tinyX.npy', 'r') # reads the input file\n y_all = np.load(path + '/files/tinyY.npy', 'r') # reads the input file\n\n # split the data into 10% validation-set and 90% training set\n raw_train, raw_valid, y_train, y_valid = train_test_split(x_all, y_all, test_size=0.2, random_state=43)\n return raw_train, raw_valid, y_train, y_valid",
"def shuffle_train(self):\r\n if self.data_container.task == 'Classify':\r\n id_train_list=[]\r\n for i in self.idx_train_list:\r\n id_train_list.append(self._random_state.choice(i,self.train_parms[0]))\r\n for j in self._random_state.choice(self.unique_value, self.train_parms[1]):\r\n id_train_list.append(self._random_state.choice(self.idx_train_list[j],1))\r\n self.idx['train'] = np.concatenate(id_train_list, axis=0)\r\n \r\n self.idx['train'] = self._random_state.permutation(self.idx['train'])",
"def load_data_set(file):\n df = pd.read_csv(file)\n msk = np.random.rand(len(df)) < 0.8\n return df[msk], df[~msk]",
"def split_data(dataset, test_size=0.5):\n shuffled_data = np.random.RandomState(seed=721).permutation(dataset)\n train_set = shuffled_data[: int(len(dataset) * (1 - test_size)), :]\n test_set = shuffled_data[int(len(dataset) * (1 - test_size)):, :]\n return train_set, test_set",
"def get_random_cases(size = 20):\n temp_dfs = []\n for file in os.listdir(\"data\"):\n df = pd.read_csv(\"data/\" + file, header = 1, names = ['0', \"primary_site\", \"case_uuid\", \"rna_seq_uuid\"])\n df = df.drop(columns=['0'])\n rows = random.sample(range(0, len(df) -1), size)\n temp_dfs.append(df.iloc[rows])\n\n res = pd.concat(temp_dfs)\n filename = \"random_case_selection_size_\"+str(size)+\".csv\"\n res.to_csv(filename)\n return filename",
"def open_MRI_data_var(csv_path, train_set = 0.8, normalize=True):\n data_df = pd.read_csv(csv_path)\n\n mri_col = data_df.columns.str.contains(\"SV_UCSFFSX_11_02_15_UCSFFSX51_08_01_16\")\n mri_col = data_df.columns[mri_col].values\n\n data_df = data_df.dropna(axis=0, subset=mri_col)\n\n # Select only the subjects with nfollowups\n # Code to only select 5 first appearances of each PTID\n ptid_list = np.unique(data_df[\"PTID\"])\n\n idx_to_drop = []\n data_final = data_df.drop(idx_to_drop)\n\n # Divide between test and train\n from sklearn.model_selection import GroupShuffleSplit\n gss = GroupShuffleSplit(n_splits=1, test_size=1.0-train_set)\n train_dataset, test_dataset = next(gss.split(X=data_final, y=data_final.DX_bl.values, groups=data_final.PTID.values))\n\n df_train = data_final.iloc[train_dataset]\n df_test = data_final.iloc[test_dataset]\n\n df_train = df_train.reset_index(drop=True)\n df_test = df_test.reset_index(drop=True)\n\n # Return the features in the correct shape list of Tensors (timesteps, nfeatures)\n X_train = pandas_to_data_timeseries_var(df_train, mri_col)\n X_test = pandas_to_data_timeseries_var(df_test, mri_col)\n\n return X_train, X_test",
"def val_train_idxs(n, val_pct=0.2, seed=42):\n#def get_cv_idxs(n, cv_idx=0, val_pct=0.2, seed=42):\n np.random.seed(seed)\n n_val = int(val_pct*n)\n #idx_start = cv_idx*n_val\n idxs = np.random.permutation(n)\n # np.random.permutation has two differences from np.random.shuffle:\n # if passed an array, it will return a shuffled copy of the array; np.random.shuffle shuffles the array inplace\n # if passed an integer, it will return a shuffled range i.e. np.random.shuffle(np.arange(n))\n #return idxs[idx_start:idx_start+n_val], idxs[idx_start+n_val,:]\n val = idxs[:n_val]\n trn = idxs[n_val:]\n return val, trn",
"def read_data_sets(data_path, fake_data=False, one_hot=False,\n validation_size=5000, source_url={},\n augment=False,\n percentage_train=100.,\n unbalance=False, unbalance_dict={\"percentage\": 20, \"label1\": 0, \"label2\": 8},\n ):\n\n class DataSets(object):\n pass\n\n data_sets = DataSets()\n\n if fake_data:\n data_sets.train = DataSet([], [], fake_data=True, one_hot=True)\n data_sets.validation = DataSet([], [], fake_data=True, one_hot=True)\n data_sets.test = DataSet([], [], fake_data=True, one_hot=True)\n return data_sets\n\n if not source_url: # empty string check\n if 'fashion' in data_path:\n source_url = DEFAULT_SOURCE_URL_FASHION\n else:\n source_url = DEFAULT_SOURCE_URL_MNIST\n\n if 'fashion' in data_path or 'mnist' in data_path: # mnist or fashion\n train_images, train_labels, val_images, val_labels, test_images, test_labels = \\\n load_mnist(data_path, validation_size, source_url, one_hot)\n reshape = True\n else:\n train_images, train_labels, val_images, val_labels, test_images, test_labels = \\\n load_medical_data(data_path)\n reshape = False\n\n # add random permutation to train & validation\n np.random.seed(42)\n\n n_train = train_images.shape[0]\n perm = np.random.permutation(n_train)\n train_images = train_images[perm]\n train_labels = train_labels[perm]\n\n n_val = val_images.shape[0]\n perm = np.random.permutation(n_val)\n val_images = val_images[perm]\n val_labels = val_labels[perm]\n\n # For experiments with data-augmentation\n if augment:\n if 'fashion' in data_path: # rotations +-10 and horizontal flips\n augmented_images, augmented_labels = augment_data(train_images, train_labels, hflip=True)\n elif 'mnist' in data_path: # rotations +-10\n augmented_images, augmented_labels = augment_data(train_images, train_labels, hflip=False)\n train_images = np.concatenate([train_images, np.expand_dims(augmented_images, 3)])\n train_labels = np.concatenate([train_labels, augmented_labels])\n # for the medical datasets, you can use the \"augment\" argument while doing patch extraction\n\n # For experiments with limited amount of data\n if percentage_train != 100.:\n train_size = int(0.01*percentage_train*train_images.shape[0])\n Xtrain_images, Xval_images, ytrain, yval = train_test_split(train_images, train_labels, train_size=train_size)\n train_images = Xtrain_images\n train_labels = ytrain\n\n # For experiments with class-imbalance distribution\n if unbalance:\n n_classes = len(np.unique(np.argmax(train_labels, 1)))\n reduceto = 0.01*unbalance_dict['percentage']\n label1 = unbalance_dict['label1']\n label2 = unbalance_dict['label2']\n\n pick_ids = []\n newsize = 0\n all_classes = np.arange(0, n_classes)\n all_classes = np.delete(all_classes, np.where(all_classes == label1)[0])\n all_classes = np.delete(all_classes, np.where(all_classes == label2)[0])\n\n for lab in [label1, label2]:\n allids = np.where(np.argmax(train_labels, 1) == lab)[0]\n selectedids = np.random.choice(allids, int(reduceto * allids.shape[0]), replace=False)\n pick_ids.append(selectedids)\n newsize += len(selectedids)\n\n new_ids = convert_list_to_array(pick_ids, newsize)\n\n other_ids = []\n othersize = 0\n for lab in all_classes.tolist():\n selectedids = np.where(np.argmax(train_labels, 1) == lab)[0]\n other_ids.append(selectedids)\n othersize += len(selectedids)\n\n keep_ids = convert_list_to_array(other_ids, othersize)\n\n # new_ids: contains the indices of the reduced (imbalance) classes\n # keep_ids: contains the indices of the rest (keep the same class distribution)\n resulting_ids = np.concatenate((new_ids, keep_ids))\n np.random.shuffle(resulting_ids)\n\n train_images = train_images[resulting_ids, ...]\n train_labels = train_labels[resulting_ids, ...]\n\n data_sets.train = DataSet(train_images, train_labels, fake_data=True, one_hot=True, reshape=reshape)\n data_sets.validation = DataSet(val_images, val_labels, fake_data=True, one_hot=True, reshape=reshape)\n data_sets.test = DataSet(test_images, test_labels, fake_data=True, one_hot=True, reshape=reshape)\n\n return data_sets",
"def split_train_and_test(num_examples, test_percentage):\n all_samples_idx = np.arange(num_examples)\n np.random.shuffle(all_samples_idx)\n test_examples = int(np.ceil(num_examples * test_percentage))\n # Train and validation indexes\n train_val_idx = all_samples_idx[0:len(all_samples_idx) - test_examples]\n test_idx = all_samples_idx[len(all_samples_idx) - test_examples:len(all_samples_idx)]\n\n return [train_val_idx, test_idx]",
"def genTrainingSet(set_of_CSVs, file_to_classify, train_size = 5):\n set_of_csvs_minus_target = copy.copy(set_of_CSVs)\n # remove the file we want to classify\n set_of_csvs_minus_target.remove(file_to_classify)\n\n # extract out the random noise files\n # first, set the seed\n random.seed(time.time())\n # now sample\n return_list = random.sample(set_of_csvs_minus_target, train_size)\n return return_list"
] |
[
"0.6406667",
"0.6314212",
"0.62506485",
"0.6096988",
"0.6061452",
"0.5996755",
"0.5953733",
"0.5914187",
"0.5883699",
"0.5882433",
"0.5846584",
"0.5827348",
"0.58264536",
"0.57857716",
"0.5761794",
"0.573058",
"0.5699392",
"0.5697324",
"0.5695694",
"0.5686362",
"0.5678938",
"0.5675676",
"0.5674196",
"0.56713194",
"0.56691766",
"0.5647618",
"0.5643903",
"0.56047934",
"0.55986506",
"0.5591199"
] |
0.6647869
|
0
|
Generate an example dask HighLevelGraph.
|
def dask_highlevelgraph() -> HighLevelGraph:
@dask.delayed(pure=True) # type: ignore
def create_dataframe(num_rows: int, num_cols: int) -> pd.DataFrame:
print('Creating DataFrame...')
return pd.DataFrame(data=[range(num_cols)] * num_rows)
@dask.delayed(pure=True) # type: ignore
def create_dataframe2(num_rows: int, num_cols: int) -> pd.DataFrame:
print('Creating DataFrame...')
return pd.DataFrame(data=[range(num_cols)] * num_rows)
@dask.delayed(pure=True) # type: ignore
def complicated_computation(df: pd.DataFrame, num_quantiles: int) \
-> pd.DataFrame:
print('Running complicated computation on DataFrame...')
return df.quantile(q=[i / num_quantiles for i in range(num_quantiles)])
@dask.delayed(pure=True) # type: ignore
def summarise_dataframes(*dfs: pd.DataFrame) -> float:
print('Summing DataFrames...')
return sum(df.sum().sum() for df in dfs)
df_a = create_dataframe(1000, 1000)
df_b = create_dataframe2(1000, 1000)
df_c = complicated_computation(df_a, 2048)
df_d = complicated_computation(df_b, 2048)
result = summarise_dataframes(df_c, df_d)
return result
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def gen_graph(self):",
"def test_highleveldag(dask_highlevelgraph: HighLevelGraph) -> None:\n with dask.config.set(scheduler='sync'):\n result = dask_highlevelgraph.compute()\n assert result == 2045952000.0",
"def test_highlevelgraph(dask_highlevelgraph: HighLevelGraph) -> None:\n with dask.config.set(scheduler='sync', delayed_optimize=optimize):\n result = dask_highlevelgraph.compute()\n assert result == 2045952000.0",
"def populate_graph(self):",
"def _build_graph(self):\n pass",
"def build_graph(self):\n pass",
"def test_documentation_popxl_basic_subgraph(self):\n filename = \"basic_graph.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)",
"def _construct_graph(self):\n raise NotImplementedError",
"def gen_graph():\n if config_pagination:\n gdata = tgraph.call_graph(offset=offset, limit=limit)\n else:\n gdata = tgraph.call_graph(start=start, end=end, contineous=contineous)\n\n for data in gdata:\n yield data",
"def build_graph(self):\n raise NotImplementedError",
"def generate(self):\n self.graph_repl = self.master.graph_repl",
"def generateGraph(mids, chaptersField, labelsField):\n output = \"digraph G { \\n\"\n # On ne traite que les chapitres qui ont actives le graphe\n chapts = chapters.graphChapters()\n # le dico nodes contient une liste pour chaque chapitre. Chaque liste\n # contient tous les neuds (un par note) presents dans ce chapitre, et\n # representes par des tuples (noteId, label)\n nodes = {}\n for mid in mids:\n chapterField = chaptersField[mid]\n labelField = labelsField[mid]\n for id, flds in mw.col.db.execute(\"\"\"\n SELECT id, flds FROM notes WHERE mid=%d\n \"\"\" % mid):\n fields = splitFields(flds)\n chapter = fields[chapterField]\n if not chapter in chapts:\n continue\n label = fields[labelField]\n if(not chapter in nodes):\n nodes[chapter] = []\n nodes[chapter].append((id, label))\n # On genere les noeuds, dans des clusters (un par chapitre)\n notes = []\n for chap in nodes:\n output += \"\"\"subgraph cluster_%d {\n node [style=filled];\n label = \"%s\";\n color=blue;\n \"\"\" % (chapts[chap], chap)\n for n in nodes[chap]:\n output += \"\"\"n%d [label=\"%s\", URL=\"%d\"];\\n\"\"\" % (n[0], n[1], n[0])\n notes.append(n)\n output += \"\"\"\n }\\n\"\"\"\n # Puis on ajoute tous les liens ..\n for n in notes:\n for nid in mw.col.db.execute(\"\"\"SELECT N.noteId FROM `PATH.links` AS L\n JOIN `PATH.match` AS M ON M.id = L.matchId\n JOIN `PATH.nodes` AS N ON M.nodeId = N.id\n WHERE L.noteId = %d\"\"\" % (n[0])):\n output += \"\"\"n%d -> n%d;\\n\"\"\" % (nid[0], n[0])\n output += \"}\"\n generateGraphImage(output)",
"def test_tracker_graph():\n\n objects = _load_csv()\n ground_truth_graph = _load_ground_truth_graph()\n\n # run the tracking\n tracker = full_tracker_example(objects)\n _, _, graph = tracker.to_napari(ndim=2)\n\n assert ground_truth_graph == graph",
"def test_tree_graph_creation(self):\n # There is little to test here other than simple creation\n # Whether it comes out OK or not ... ¯\\_(ツ)_/¯\n model = FairModel(name='Test')\n model.input_data('Loss Magnitude', mean=50, stdev=5)\n model.input_data('Loss Event Frequency', low=10, mode=20, high=30)\n metamodel = FairMetaModel(name='Test Meta', models=[model, model])\n with warnings.catch_warnings(record=False):\n warnings.simplefilter(\"ignore\")\n fvp = FairViolinPlot(metamodel)\n _, _ = fvp.generate_image()",
"def create_graph_domain():\n \n \"\"\"\n Fetch data\n \"\"\"\n \n from input.read_input import read_item_data\n df = read_item_data()\n df['item_id'] = df.index\n dct_title = df['title'].to_dict()\n dct_domain = df['domain_id'].to_dict()\n dct_cat= df['category_id'].to_dict()\n \n dct_price = df['price'].to_dict()\n \n \"\"\" Ratio stuff \"\"\" \n from input.create_ratio import get_ratio\n dct_ratio_dom = get_ratio(which='domain_id')\n \n ratio_df = get_ratio(which='item_id',full=True)\n ratio_df['popularity'] = 100.0*ratio_df['bought'] + ratio_df['searched']\n dct_ratio_item_b = ratio_df['popularity'].to_dict()\n \n \n \n \"\"\"\n JSON\n \n \"\"\"\n check = lambda x: x <= np.round(413163*0.8).astype(np.int32)\n \n DATA_PATH = path.join(DATA_DIR,'train_dataset.jl')\n line_i = 0\n \n \n\n \"\"\"\n Create graph vertices\n \"\"\"\n g = ig.Graph() \n from input.read_input import get_mappings\n counter, f_map_func, r_map_func = get_mappings()\n \n num_items = df.shape[0]\n for k in dct_title.keys():\n g.add_vertex(value=k,deg=dct_ratio_item_b[k],domain_id=dct_domain[k],price=dct_price[k],cat='item_id')\n\n \"\"\" ['item_id','domain_id','category_id','product_id'] \"\"\"\n \n for k in pd.unique(df['domain_id']):\n g.add_vertex(value=k,cat='domain_id')\n\n\n for k in pd.unique(df['category_id']):\n g.add_vertex(value=k,cat='category_id')\n\n\n for k in pd.unique(df['product_id']):\n g.add_vertex(value=k,cat='product_id')\n\n \n \n \"\"\"\n Create edges\n \"\"\"\n E1 = []\n E2 = []\n \n with jsonlines.open(DATA_PATH) as reader:\n for line_i, obj in enumerate(reader):\n if check(line_i):\n print(line_i)\n L = []\n for h in obj['user_history']:\n if h['event_type'] == 'view':\n #print(\"Viewed {}\".format(dct[h['event_info']]))\n L.append(h['event_info'])\n elif h['event_type'] == 'search':\n #print(\"Searched {}\".format(h['event_info']))\n pass\n L_domain = [dct_domain[k] for k in L]\n L_domain = pd.unique(L_domain)\n L_cat = [dct_cat[k] for k in L]\n L_cat = pd.unique(L_cat)\n \n for i in range(len(L)):\n E1.append(dct_domain[L[i]])\n E2.append(dct_domain[obj['item_bought']] )\n\n \n \n E1 = f_map_func['domain_id'](E1)\n E2 = f_map_func['domain_id'](E2)\n \n \n E = pd.Series(list(zip(E1,E2))).value_counts()\n g.add_edges(E.index)\n g.es[\"weight\"] = E.values\n \n \n g.write_pickle(fname=path.join(DATA_DIR,'graph_domain_to_domain.pkl'))",
"def show_custom_graph(self):\n pass",
"def CreateGraph(graph_def):\n option = GetGlobalOptions()\n LogMetaGraph(graph_def)\n ExportMetaGraph(graph_def)\n return _C.CreateGraph(\n _stringify_proto(graph_def),\n option['log_optimized_graph'],\n )",
"def plot_graph(self) -> None:",
"def print_graph() -> None:\n raise NotImplementedError",
"def generate_graph(self, z, upsample=False):\n outputs = self._module(z, signature='generate', as_dict=True)\n return outputs['upsampled' if upsample else 'default']",
"def buildGraph(self):\n return None",
"def ggraph(variables, weight, tempname, title, missing, alldatacolor):\n\n varspecstr = \" \".join(variables)\n if weight:\n wt = \", weight(%(weight)s)\" % locals()\n else:\n wt = \"\"\n missingfunc = missing == \"variablewise\" and \"pairwise\" or \"listwise\"\n \n gg = r\"\"\"GGRAPH /GRAPHDATASET NAME=\"graphdataset\" MISSING=%(missing)s\nVARIABLES= %(varspecstr)s\n/GRAPHDATASET NAME=\"csvdataset\" CSVFILE=\"%(tempname)s\"\n/GRAPHSPEC SOURCE=INLINE EDITABLE=NO DEFAULTTEMPLATE=NO LABEL=\"%(title)s\"\ninlinetemplate='<addFrame count=\"1\" type=\"subtitle\">'+\n'<location left=\"0%%\" right=\"100%%\" top=\"0%%\" bottom=\"0.2in\"/>'+\n'<style color=\"%(alldatacolor)s\" color2=\"transparent\" opacity=\"0.20\"/>'+\n'<label><style number=\"0\" font-weight=\"bold\" color=\"black\" /></label>'+\n'</addFrame>'.\nBEGIN GPL\nSOURCE: s=userSource(id(\"graphdataset\"))\nSOURCE: t=csvSource(file(\"%(tempname)s\"), missing.%(missingfunc)s() %(wt)s)\"\"\" % locals()\n return gg",
"def _setup_graph(self):\n pass",
"def _setup_graph(self):\n pass",
"def generate(self, diagram):",
"def _get_full_graph(self):",
"def createGraph(self):\n self.measurements(45,50,10)\n avg = self.readFile(\"avg.pickle\")\n table = []\n for a in avg:\n table.append((a[0], a[1], a[2], a[3], a[4], \"Boolean\"))\n table.append((a[0], a[1], a[2], a[5], a[6], \"Fractional\"))\n table.append((a[0], a[1], a[2], a[7], a[8], \"Hierarchical\"))\n df = pd.DataFrame(table)\n df.columns = [\"nPages\", \"nCentroids\", \"Time\", \"Mean\", \"Std\", \"Type\"]\n print(df)\n sns.set(style = 'darkgrid')\n sns.lmplot(x = \"nCentroids\", y = \"Mean\", col = \"Type\", hue=\"Type\", data = df)\n #sns.lmplot(x = \"nPages\", y = \"Mean\", col = \"Type\", hue=\"Type\", data = df)\n #sns.scatterplot(x = \"nCentroids\", y = \"Mean\", size = \"Time\", hue = \"Type\", sizes = (20, 200), data = df)\n #sns.scatterplot(x = \"nPages\", y = \"Mean\", size = \"Time\", hue = \"Type\", sizes = (20, 200), data = df)\n plt.show()",
"def generate_graph(self):\n\t\tif self.joins == None:\n\t\t\tself.get_joins()\n\t\tprint('generating Networkx DiGraph object of {database} from query results'.format(**self.__dict__))\n\t\t# save distinct Child column values\n\t\tchilds = set([j.Child for j in self.joins])\n\t\t# save distinct Parent column values\n\t\tparents = set([j.Parent for j in self.joins])\n\t\t# save names of Leaf tables\n\t\tleafs = list(childs - parents)\n\t\tself._traverse_joins(leafs)",
"def CreateGraph(meta_graph):\n LogMetaGraph(meta_graph)\n ExportMetaGraph(meta_graph)\n CreateGraphCC(_stringify_proto(meta_graph))\n LogOptimizedGraph(meta_graph)",
"def create_test_graph():\n return {\n 'A': ['C', 'B'],\n 'B': ['D', 'E'],\n 'C': ['M', 'F'],\n 'D': ['G'],\n 'E': ['H', 'I'],\n 'F': ['E', 'J'],\n 'G': [],\n 'H': [],\n 'I': [],\n 'J': [],\n 'K': ['D'],\n 'L': ['F'],\n 'M': ['C']\n }"
] |
[
"0.6500455",
"0.6189144",
"0.61473995",
"0.6083819",
"0.6010887",
"0.595251",
"0.5880676",
"0.5839304",
"0.5770262",
"0.5770068",
"0.57431555",
"0.5741383",
"0.57074356",
"0.5654323",
"0.5578547",
"0.5577143",
"0.5572333",
"0.55684257",
"0.5562661",
"0.5557265",
"0.5539527",
"0.5536529",
"0.5535401",
"0.5535401",
"0.55333817",
"0.5531861",
"0.55284363",
"0.55175394",
"0.55160344",
"0.5501228"
] |
0.67145634
|
0
|
Since the bcbio object does not retain all the information necessary for some of the templates, this finds and adds the additional information and then fills the template file and writes as the output file.
|
def __fill_template__(self,template_file,output_fname):
dictionary = {}
for k,v in self.__dict__.iteritems():
if k == 'sample_key':
try:
int(v)
new_sample_key = "Sample_" + str(v)
dictionary.update({k:new_sample_key})
continue
except ValueError:
pass
dictionary.update({k:str(v)})
dictionary.update({'restats_tail': self.restats_file + '.tail'})
with open(output_fname,'w') as f:
string = fill_template(template_file,dictionary)
f.write(string)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def Write(self):\n template_mappings = {}\n\n template_file = os.path.join(self._l2tdevtools_path, self._TEMPLATE_FILE)\n file_content = self._GenerateFromTemplate(template_file, template_mappings)\n\n file_content = file_content.encode('utf-8')\n\n with open(self.PATH, 'wb') as file_object:\n file_object.write(file_content)",
"def write_template_body1(template_filename):\n template_type = template_filename.split('/')[-1].split('_')[0]\n template_file = open(template_filename, 'a')\n template_file.write('<body>\\n') \n template_file.write('<div id=\"pageTitle\">\\n')\n template_file.write('<?php echo $stat_title; ?>\\n') \n template_file.write('</div>\\n')\n template_file.write('<div class=\"page-menu\"><div class=\"table\">\\n')\n template_file.write(' <div class=\"element\">\\n')\n template_file.write(' <span class=\"bold\">Basin:</span>\\n')\n template_file.write(\n ' <select id=\"maptype\" '\n +'onchange=\"changeMaptype(this.value)\"></select>\\n'\n )\n template_file.write(' </div>\\n')\n template_file.write(' <div class=\"element\">\\n')\n template_file.write(' <span class=\"bold\">Name:</span>\\n')\n template_file.write(\n ' <select id=\"domain\" '\n +'onchange=\"changeDomain(this.value);\"></select>\\n'\n )\n template_file.write(' </div>\\n')\n template_file.write(' <div class=\"element\">\\n')\n template_file.write(\n ' <span class=\"bold\">Forecast Lead:</span>\\n'\n )\n template_file.write(\n ' <select id=\"variable\" '\n +'onchange=\"changeVariable(this.value)\"></select>\\n'\n )\n template_file.write(' </div>\\n')\n template_file.write('</div></div>\\n')\n template_file.write('\\n')\n template_file.write('<!-- Middle menu -->\\n')\n template_file.write('<div class=\"page-middle\" id=\"page-middle\">\\n')\n template_file.write(\n 'Left/Right arrow keys = Change forecast lead | Up/Down arrow keys '\n +'= Change Storm\\n'\n )\n template_file.write(\n '<br>For information on tropical cyclone verification, '\n +'<button class=\"infobutton\" id=\"myBtn\">click here</button>\\n'\n )\n template_file.write('<div id=\"myModal\" class=\"modal\">\\n')\n template_file.write(' <div class=\"modal-content\">\\n')\n template_file.write(' <span class=\"close\">×</span>\\n')\n template_file.write(' Tropical Cyclone Verification Information\\n')\n template_file.write(\n ' <embed width=100% height=100% src=\"../main.php\">\\n'\n )\n template_file.write(' </div>\\n')\n template_file.write('</div>\\n')\n template_file.write('<!-- /Middle menu -->\\n')\n template_file.write('</div>\\n')\n template_file.write('\\n')\n template_file.write(\n '<div id=\"loading\"><img style=\"width:100%\" '\n +'src=\"../../images/loading.png\"></div>\\n'\n )\n template_file.write('\\n')\n template_file.write('<!-- Image -->\\n')\n template_file.write('<div id=\"page-map\">\\n')\n template_file.write(' <image name=\"map\" style=\"width:100%\">\\n')\n template_file.write('</div>\\n')\n template_file.write('\\n')\n template_file.write('<script type=\"text/javascript\">\\n')\n template_file.write('// Get the modal\\n')\n template_file.write('var modal = document.getElementById(\"myModal\");\\n')\n template_file.write('\\n')\n template_file.write('// Get the button that opens the modal\\n')\n template_file.write('var btn = document.getElementById(\"myBtn\");\\n')\n template_file.write('\\n')\n template_file.write('// Get the <span> element that closes the modal\\n')\n template_file.write(\n 'var span = document.getElementsByClassName(\"close\")[0];\\n'\n )\n template_file.write('\\n')\n template_file.write(\n '// When the user clicks the button, open the modal\\n'\n )\n template_file.write('btn.onclick = function() {\\n')\n template_file.write(' modal.style.display = \"block\";\\n')\n template_file.write('}\\n')\n template_file.write('\\n')\n template_file.write(\n '// When the user clicks on <span> (x), close the modal\\n'\n )\n template_file.write('span.onclick = function() {\\n')\n template_file.write(' modal.style.display = \"none\";\\n')\n template_file.write('}\\n')\n template_file.write('\\n')\n template_file.write(\n '// When the user clicks anywhere outside of the modal, close it\\n'\n )\n template_file.write('window.onclick = function(event) {\\n')\n template_file.write(' if (event.target == modal) {\\n')\n template_file.write(' modal.style.display = \"none\";\\n')\n template_file.write(' }\\n')\n template_file.write('}\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('//User-defined variables\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('\\n')\n template_file.write('//Global variables\\n')\n template_file.write(\n 'var minFrame = 0; //Minimum frame for every variable\\n'\n )\n template_file.write(\n 'var maxFrame = 26; //Maximum frame for every variable\\n'\n )\n template_file.write(\n 'var incrementFrame = 1; //Increment for every frame\\n'\n )\n template_file.write('\\n')\n template_file.write('var startFrame = 0; //Starting frame\\n')\n template_file.write('\\n')\n template_file.write('var cycle = 2018100600\\n')\n template_file.write('\\n')\n template_file.write('/*\\n')\n template_file.write(\n 'When constructing the URL below, DDD = domain, VVV = variable, '\n +'LLL = level, SSS = season, Y = frame number.\\n'\n )\n template_file.write(\n 'For X and Y, labeling one X or Y represents an integer '\n +'(e.g. 0, 10, 20). Multiple of these represent a string\\n'\n )\n template_file.write(\n 'format (e.g. XX = 00, 06, 12 --- XXX = 000, 006, 012).\\n'\n )\n template_file.write('*/\\n')\n template_file.write(\n 'var url = \"<?php echo $'+template_type+'_url; ?>\";\\n'\n )\n template_file.write('\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('//Add variables & domains\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('\\n')\n template_file.write('var variables = [];\\n')\n template_file.write('var domains = [];\\n')\n template_file.write('var levels = [];\\n')\n template_file.write('var seasons = [];\\n')\n template_file.write('var maptypes = [];\\n')\n template_file.write('var validtimes = [];\\n')\n template_file.write('\\n')\n template_file.write('\\n')\n template_file.close()",
"def create_page(self):\n with open(self.outfile, 'w') as outfile, open(self.template, 'r') as infile:\n for row in infile:\n if '<!--rows go here-->' in row:\n self.__write_row(outfile)\n else:\n outfile.write(row)",
"def __fill_qsub_file__(self,configs):\n template_file= os.path.join(configs['system'].get('Common_directories','template'),configs['pipeline'].get('Template_files','flowcell_report'))\n dictionary = {}\n for k,v in self.__dict__.iteritems():\n dictionary.update({k:str(v)})\n dictionary.update({'post_pipeline':configs['pipeline'].get('Db_reports','post_pipeline')})\n dictionary.update({'concord_script':configs['pipeline'].get('Flowcell_reports','concord_script')})\n dictionary.update({'dbsnp_script':configs['pipeline'].get('Flowcell_reports','dbsnp_script')})\n dictionary.update({'tenx_script':configs['pipeline'].get('Flowcell_reports','tenx_script')})\n dictionary.update({'zero_script':configs['pipeline'].get('Flowcell_reports','zero_script')})\n dictionary.update({'hethom_script':configs['pipeline'].get('Flowcell_reports','hethom_script')})\n dictionary.update({'reads_script':configs['pipeline'].get('Flowcell_reports','reads_script')})\n with open(self.qsub_file,'w') as f:\n f.write(fill_template(template_file,dictionary))",
"def __write_file_from_template(self, file, template, macros):\n create_dir(file)\n with open(file, 'a') as db, open(template, 'r') as template:\n for line in template:\n db.write(self.__expand_macros(line, macros))",
"def __fill_all_templates__(self,configs):\n template_dir = configs['system'].get('Common_directories','template')\n sample_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','sample'))\n system_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','system'))\n qsub_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','bcbio'))\n self.__fill_template__(sample_template,self.sample_file)\n self.__fill_template__(system_template,self.systems_file)\n self.__fill_template__(qsub_template,self.qsub_file)",
"def writeDomainFile():\n writeTemplate(localTemplate)",
"def WriteAeroDynTemplate(TurbDict,TmplDir,ModlDir,AeroDir,WrDir,\n verbose=0):\n \n TurbName = TurbDict['TurbName']\n if verbose:\n sys.stdout.write('\\nWriting AeroDyn v13 template' + \\\n ' for turbine {:s}...'.format(TurbName))\n \n # define path to base template and output filename\n fpath_temp = os.path.join(TmplDir,'Template_AD.ipt')\n fpath_out = os.path.join(WrDir,TurbName + '_AD_template.ipt')\n \n # get list of keys to skip (they depend on wind file)\n version, FastFlag = 7, 0\n windfile_keys = GetWindfileKeys(version,FastFlag)\n \n # open template file and file to write to\n with open(fpath_temp,'r') as f_temp:\n with open(fpath_out,'w') as f_write:\n \n # read each line in template file\n for r_line in f_temp:\n \n # default to copying without modification\n w_line = r_line\n \n # if line has a write-able field\n if ('{:' in r_line):\n \n # get fieldname, format for value, and remaining string\n field = r_line.split()[1]\n value_format = r_line.split(field)[0]\n comment = r_line.split(field)[-1]\n \n # check if comment line\n if ('ADCmnt' in field):\n w_line = TurbDict[field] + '\\n'\n \n # if foilnames, print them all with path to AeroDir\n elif (field == 'FoilNm'):\n FoilNames = TurbDict[field]\n \n # print first foilname manually\n FoilPath = os.path.join(AeroDir,FoilNames[0])\n w_line = field.join([value_format.format(FoilPath),\n comment])\n f_write.write(w_line) \n \n # loop through remaining airfoils\n for i_line in range(1,len(TurbDict['FoilNm'])):\n FoilPath = os.path.join(AeroDir,FoilNames[i_line])\n f_write.write('\\\"{:s}\\\"\\n'.format(FoilPath))\n w_line = ''\n \n # if AeroDyn schedule, print it\n elif (field == 'ADSched'): \n \n # loop through remaining airfoils\n for i_line in range(0,len(TurbDict['ADSched'])):\n w_line = value_format.format( \\\n *TurbDict['ADSched'][i_line])\n f_write.write(w_line + '\\n')\n w_line = ''\n \n # if key is not to be skipped\n elif (field not in windfile_keys):\n value = TurbDict[field]\n w_line = field.join([value_format.format(value),\n comment])\n \n f_write.write(w_line)\n \n if verbose:\n print('done.')\n \n return",
"def update_template():\n\n # Open, and read, the template file\n with open(\"template.html\", \"r\") as f:\n soup = BeautifulSoup(f.read(), features=\"html5lib\")\n\n # Add the plots in the correct places\n for div in soup.find_all(\"div\", class_=\"plot\"):\n with open(div[\"src\"], \"r\") as f:\n plot = BeautifulSoup(f.read(), features=\"html5lib\")\n div.replace_with(plot.html.body.div)\n\n # Write the finished report to document.html\n with open(\"document.html\", \"w\") as f:\n f.write(soup.prettify())",
"def create_base_templates(outdir, templateEnv):\n for file in ME_TEMPLATES:\n filename = os.path.join(outdir, ME_FILENAME.format(file))\n template = templateEnv.get_template(file + '.go.jinja')\n\n with open(filename, 'w') as f:\n output = template.render(copyright=COPYRIGHT,\n generator_warning=GENERATOR_WARNING,\n package_name=PACKAGE_NAME)\n f.write(output)\n pass",
"def _ProcessTemplate(self,topdir):\n self.dicomdir = \"%s/anatomicals\" % self.topdir\n self.rawdir = \"%s/raw\" % topdir\n self.rawdirs = {}\n tmplt = self._GetTemplate()\n if self.opts.outdir is not None:\n# Override template output directory.\n tmplt['top_outdir'] = self.opts.outdir\n self.tmplt = tmplt\n if len(tmplt['top_outdir']) == 0:\n tmplt['top_outdir'] = os.path.realpath(self.topdir)\n raise RuntimeError('Template file must specify an output directory.')\n tmplt['top_outdir'] = os.path.realpath(tmplt['top_outdir'])\n if '/home' in tmplt['top_outdir'][:7]:\n raise RuntimeError('Image data cannot be stored in the /home partition. Change the \"top_outdir\" entry in the template file: %s.' % (' '.join(self.templates)))\n# tmplt['subject'] = 'orig'\n self.procdir = os.path.abspath(\"%s/%s\" % \\\n (tmplt['top_outdir'],tmplt['subject']))\n target = os.path.abspath('%s/../..' % tmplt['top_outdir'])\n if not ismounted(target):\n raise RuntimeError('Could not access partition at %s' % target)\n\n self.anatdir = \"%s/anat\" % self.procdir\n self.fmapdir = \"%s/%s\" % (self.procdir,tmplt['fmap']['outdir'])\n self.dtidir = \"%s/%s\" % (self.procdir,tmplt['dti']['outdir'])\n self.logdir = \"%s/%s\" % (self.procdir,tmplt['logdir'])\n self.skip = tmplt.get('skip', DEFAULT_SKIP)\n self.acq_tr = tmplt.get('acq_tr',None)\n self.episetup_dir = \"%s/%s\" % (self.procdir,tmplt['first_epi'])\n self.fsl_cmpblty = tmplt.get('fsl_compatibility',False)\n self.epi_file_format = self.tmplt['epi_file_format']\n self.censor_thresh = tmplt.get('censor_threshold', 2.)\n self.censor_interleave = tmplt.get('censor_interleave', True)\n# self.server_userid = self.tmplt.get('server_userid','default')\n\n# Overide flags for aligning EPIs and skull-stripping with command-\n# line options.\n if self.opts.align_fmaps:\n self.align_fmaps = True\n else:\n self.align_fmaps = self.tmplt.get('epi_align', False)\n\n if self.opts.no_align_fmaps:\n self.no_align_fmaps = True\n else:\n self.no_align_fmaps = self.tmplt.get('no_epi_align', False)\n\n if self.opts.skull_strip:\n self.skull_strip = True\n else:\n self.skull_strip = self.tmplt.get('skull_strip', False)\n\n# Create log file now so it can be used immediately.\n if not os.path.exists(self.logdir):\n if self.verbose:\n print 'mkdir %s' % self.logdir\n if not self.opts.fake_opts:\n self.MakeDir(self.logdir)\n\n self._ProcessTemplateEpiInfo()",
"def create_file_from_template(self, file_path, template_path, context_variables):\n if os.path.exists(file_path):\n print(\"\\033[91m\" + file_path + \" already exists. Skipping.\" + \"\\033[0m\")\n return\n with open(file_path, 'w') as new_file:\n new_file.write(get_template(template_path).render(Context(context_variables)))\n print(\"\\033[92m\" + \"successfully baked \" + file_path + \"\\033[0m\")",
"def write_template_body2(template_filename):\n template_type = template_filename.split('/')[-1].split('_')[0]\n basin = template_filename.split('/')[-1].split('_')[1].replace('.php', '')\n template_file = open(template_filename, 'a')\n template_file.write('domains.push({\\n')\n template_file.write(' displayName: \"All\",\\n')\n template_file.write(' name: \"'+basin+'\",\\n')\n template_file.write('});\\n')\n template_file.write('\\n')\n template_file.write('\\n')\n template_file.write('variables.push({\\n')\n template_file.write(' displayName: \"Mean\",\\n')\n template_file.write(' name: \"<?php echo $LeadMean_name; ?>\",\\n')\n template_file.write('});\\n')\n template_file.write('\\n')\n template_file.write('\\n')\n template_file.write('maptypes.push({\\n')\n template_file.write(' url: \"'+template_type+'_AL.php\",\\n')\n template_file.write(' displayName: \"Atlantic\",\\n')\n template_file.write(' name: \"'+template_type+'_AL\",\\n')\n template_file.write('});\\n')\n template_file.write('maptypes.push({\\n')\n template_file.write(' url: \"'+template_type+'_CP.php\",\\n')\n template_file.write(' displayName: \"Central Pacific\",\\n')\n template_file.write(' name: \"'+template_type+'_CP\",\\n')\n template_file.write('});\\n')\n template_file.write('maptypes.push({\\n')\n template_file.write(' url: \"'+template_type+'_EP.php\",\\n')\n template_file.write(' displayName: \"Eastern Pacific\",\\n')\n template_file.write(' name: \"'+template_type+'_EP\",\\n')\n template_file.write('});\\n')\n template_file.write('maptypes.push({\\n')\n template_file.write(' url: \"'+template_type+'_WP.php\",\\n')\n template_file.write(' displayName: \"Western Pacific\",\\n')\n template_file.write(' name: \"'+template_type+'_WP\",\\n')\n template_file.write('});\\n')\n template_file.write('\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('//Initialize the page\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('//function for keyboard controls\\n')\n template_file.write('document.onkeydown = keys;\\n')\n template_file.write('\\n')\n template_file.write(\n '//Decare object containing data about the currently displayed map\\n'\n )\n template_file.write('imageObj = {};\\n')\n template_file.write('\\n')\n template_file.write('//Initialize the page\\n')\n template_file.write('initialize();\\n')\n template_file.write('\\n')\n template_file.write(\n '//Format initialized run date & return in requested format\\n'\n )\n template_file.write('function formatDate(offset,format){\\n')\n template_file.write(' var newdate = String(cycle);\\n')\n template_file.write(' var yyyy = newdate.slice(0,4)\\n')\n template_file.write(' var mm = newdate.slice(4,6);\\n')\n template_file.write(' var dd = newdate.slice(6,8);\\n')\n template_file.write(' var hh = newdate.slice(8,10);\\n')\n template_file.write(\n ' var curdate = new Date(yyyy,parseInt(mm)-1,dd,hh)\\n'\n )\n template_file.write('\\n')\n template_file.write('\\n')\n template_file.write(' //Offset by run\\n')\n template_file.write(\n ' var newOffset = curdate.getHours() + offset;\\n'\n )\n template_file.write(' curdate.setHours(newOffset);\\n')\n template_file.write('\\n')\n template_file.write(\n ' var yy = String(curdate.getFullYear()).slice(2,4);\\n'\n )\n template_file.write(' yyyy = curdate.getFullYear();\\n')\n template_file.write(' mm = curdate.getMonth()+1;\\n')\n template_file.write(' dd = curdate.getDate();\\n')\n template_file.write(' if(dd < 10){dd = \"0\" + dd;}\\n')\n template_file.write(' hh = curdate.getHours();\\n')\n template_file.write(' if(hh < 10){hh = \"0\" + hh;}\\n')\n template_file.write('\\n')\n template_file.write(' var wkday = curdate.getDay();\\n')\n template_file.write(\n ' var day_str = [\"Sun\", \"Mon\", \"Tue\", \"Wed\", '\n +'\"Thu\", \"Fri\", \"Sat\"];\\n'\n )\n template_file.write('\\n')\n template_file.write(' //Return in requested format\\n')\n template_file.write(\" if(format == 'valid'){\\n\")\n template_file.write('//06Z Thu 03/22/18 (90 h)\\n')\n template_file.write(\n 'var txt = hh + \"Z \" + day_str[wkday] + \" \" + '\n +'mm + \"/\" + dd + \"/\" + yy;\\n'\n )\n template_file.write(' return txt;\\n')\n template_file.write(' }\\n')\n template_file.write('}\\n')\n template_file.write('\\n')\n template_file.write('//Initialize the page\\n')\n template_file.write('function initialize(){\\n')\n template_file.write('\\n')\n template_file.write(\n ' //Set image object based on default variables\\n'\n )\n template_file.write(' imageObj = {\\n')\n template_file.write(\n ' variable: \"<?php echo $LeadMean_name; ?>\",\\n'\n )\n template_file.write(' domain: \"'+basin+'\"\\n')\n template_file.write(' };\\n')\n template_file.write('\\n')\n template_file.write(\n ' //Change domain based on passed argument, if any\\n'\n )\n template_file.write(' var passed_domain = \"\";\\n')\n template_file.write(' if(passed_domain!=\"\"){\\n')\n template_file.write(\n ' if(searchByName(passed_domain,domains)>=0){\\n'\n )\n template_file.write(\n ' imageObj.domain = passed_domain;\\n'\n )\n template_file.write(' }\\n')\n template_file.write(' }\\n')\n template_file.write('\\n')\n template_file.write(\n ' //Change variable based on passed argument, if any\\n'\n )\n template_file.write(' var passed_variable = \"\";\\n')\n template_file.write(' if(passed_variable!=\"\"){\\n')\n template_file.write(\n ' if(searchByName(passed_variable,variables)>=0){\\n'\n )\n template_file.write(\n ' imageObj.variable = passed_variable;\\n'\n )\n template_file.write(' }\\n')\n template_file.write(' }\\n')\n template_file.write('\\n')\n template_file.write(\n ' //Populate forecast hour and dprog/dt arrays for this '\n +'run and frame\\n'\n )\n template_file.write(\" populateMenu('variable');\\n\")\n template_file.write(\" populateMenu('domain');\\n\")\n template_file.write(\" populateMenu('maptype')\\n\")\n template_file.write('\\n')\n template_file.write(' //Populate the frames arrays\\n')\n template_file.write(' frames = [];\\n')\n template_file.write(\n ' for(i=minFrame;i<=maxFrame;i=i+incrementFrame)'\n +'{frames.push(i);}\\n'\n )\n template_file.write('\\n')\n template_file.write(\n ' //Predefine empty array for preloading images\\n'\n )\n template_file.write(' for(i=0; i<variables.length; i++){\\n')\n template_file.write(' variables[i].images = [];\\n')\n template_file.write(' variables[i].loaded = [];\\n')\n template_file.write(' variables[i].dprog = [];\\n')\n template_file.write(' }\\n')\n template_file.write('\\n')\n template_file.write(' //Preload images and display map\\n')\n template_file.write(' preload(imageObj);\\n')\n template_file.write(' showImage();\\n')\n template_file.write('\\n')\n template_file.write(' //Update mobile display for swiping\\n')\n template_file.write(' updateMobile();\\n')\n template_file.write('\\n')\n template_file.write('}\\n')\n template_file.write('\\n')\n template_file.write('var xInit = null;\\n')\n template_file.write('var yInit = null;\\n')\n template_file.write('var xPos = null;\\n')\n template_file.write('var yPos = null;\\n')\n template_file.write('\\n')\n template_file.write('</script>\\n')\n template_file.write('\\n')\n template_file.write('</body>\\n')\n template_file.write('</html>\\n')\n template_file.close()",
"def WriteTowerFile(TurbDict,TmplDir,WrDir,\n verbose=0):\n \n TurbName = TurbDict['TurbName']\n if verbose:\n sys.stdout.write('\\nWriting FAST v7.02 tower' + \\\n ' file for turbine {:s}...'.format(TurbName))\n \n # define path to base template and output file\n fpath_temp = os.path.join(TmplDir,'Template_Tower.dat')\n fname_out = TurbDict['TwrFile']\n fpath_out = os.path.join(WrDir,fname_out)\n \n # open template file and file to write to\n with open(fpath_temp,'r') as f_temp:\n with open(fpath_out,'w') as f_write:\n \n # read each line in template file\n for r_line in f_temp:\n \n # default to copying without modification\n w_line = r_line\n \n # if line has a write-able field\n if ('{:' in r_line):\n \n # get fieldname, format for value, and remaining string\n field = r_line.split()[1]\n value_format = r_line.split(field)[0]\n comment = r_line.split(field)[-1]\n \n # check if comment line\n if ('TwrCmnt' in field):\n w_line = TurbDict[field] + '\\n'\n \n # if blade schedule\n elif (field == 'TwrSched'):\n \n TwrSched = TurbDict['TwrSched']\n \n # loop blade schedule\n for i_line in range(len(TwrSched)):\n w_line = value_format.format( \\\n *TwrSched[i_line])\n f_write.write(w_line + '\\n')\n w_line = ''\n \n # otherwise, print key normally\n else:\n# TODO: add try/except to load default value if field not in dictionary\n value = TurbDict[field]\n w_line = field.join([value_format.format(value),\n comment])\n \n f_write.write(w_line)\n \n if verbose:\n sys.stdout.write('done.\\n')\n\n return",
"def _write_context_to_file(self, context):\n om.out.debug('[xml_file.flush()] Starting _write_context_to_file()')\n\n template = self._jinja2_env.get_template('root.tpl')\n\n # We use streaming as explained here:\n #\n # http://flask.pocoo.org/docs/0.12/patterns/streaming/\n #\n # To prevent having the whole XML in memory\n # pylint: disable=E1101\n report_stream = template.stream(context)\n report_stream.enable_buffering(3)\n # pylint: enable=E1101\n\n # Write everything to a temp file, this is useful in two cases:\n #\n # * An external tool will always see a valid XML in the output,\n # and not just a partially written XML document.\n #\n # * If w3af is killed in the middle of writing the XML report,\n # the report file will still be valid -- if xml_file.flush() was\n # run successfully at least once\n tempfh = NamedTemporaryFile(delete=False,\n prefix='w3af-xml-output',\n suffix='.xml')\n\n om.out.debug('[xml_file.flush()] write_context_to_file() created'\n ' template.stream and NamedTemporaryFile')\n\n try:\n # Write each report section to the temp file\n for report_section in report_stream:\n tempfh.write(report_section.encode(DEFAULT_ENCODING))\n except Exception:\n # No exception handling is done here, we just raise the exception\n # so that the core can handle it properly\n raise\n else:\n # Close the temp file so all the content is flushed\n tempfh.close()\n\n om.out.debug('[xml_file.flush()] write_context_to_file() starting to'\n ' copy temp file to destination')\n\n # Copy to the real output file\n report_file_name = os.path.expanduser(self._file_name)\n\n cmd = 'cp %s %s' % (tempfh.name, report_file_name)\n subprocess.call(cmd, shell=True)\n\n om.out.debug('[xml_file.flush()] write_context_to_file() finished copy'\n ' operation.')\n\n stat_info = os.stat(report_file_name)\n om.out.debug('The XML output file size is %s bytes.' % stat_info.st_size)\n\n finally:\n os.remove(tempfh.name)\n\n om.out.debug('[xml_file.flush()] write_context_to_file() finished')",
"def process_tempita(fromfile):\n if not fromfile.endswith('.in'):\n raise ValueError(\"Unexpected extension: %s\" % fromfile)\n\n from_filename = tempita.Template.from_filename\n template = from_filename(fromfile,\n encoding=sys.getdefaultencoding()) \n\n content = template.substitute()\n\n outfile = os.path.splitext(fromfile)[0]\n with open(outfile, 'w') as f:\n f.write(content)",
"def update():\n if Project.use_templates:\n defaults = _project_defaults()\n\n template = Template()\n\n for template_dir in [os.path.abspath(os.path.join(herringlib, 'herringlib', 'templates'))\n for herringlib in HerringFile.herringlib_paths]:\n\n info(\"template directory: %s\" % template_dir)\n # noinspection PyArgumentEqualDefault\n template.generate(template_dir, defaults, overwrite=False)",
"def _create_from_template(self):\n template_file = self._helper._get_template_file_path()\n self._engine.open_file_by_path(template_file)\n self._save_current_as_new()",
"def deploy(self):\n if not self._ini:\n self._load_template()\n if not self._ini:\n raise RuntimeError('Could not load template. __init__.ini missing or damaged.')\n if 'dirs' in self._ini:\n for dirname in self._ini['dirs']:\n comp_makedirs(os.path.join(self._path, dirname), exist_ok=True)\n if 'files' in self._ini:\n conf = ApplicationConf.get_instance()\n for filename in self._ini['files']:\n with comp_open(\n os.path.join(get_conf('DEFAULT_TEMPLATE_PATH'), self._template, filename),\n mode='r'\n ) as fp:\n content = fp.read()\n content = content.format(**conf)\n with comp_open(os.path.join(self._path, filename), mode='w') as wp:\n wp.write(content)\n if 'binaries' in self._ini:\n for filename in self._ini['binaries']:\n shutil.copy2(\n os.path.join(get_conf('DEFAULT_TEMPLATE_PATH'), self._template, filename),\n os.path.join(self._path, filename)\n )",
"def _write_template_to(self, tmpl_file, dest_file, template_dict):\n template = self.env.get_template(tmpl_file)\n mkdir_p(os.path.dirname(dest_file))\n logging.info(\"Writing: %s -> %s with %s\", tmpl_file, dest_file, template_dict)\n with open(dest_file, \"w\") as dfile:\n dfile.write(template.render(template_dict))",
"def _generate_from_template(self, name, path, context):\n template = self._templates.get_template(name)\n with open(path, 'w') as f:\n f.write(template.render(context))",
"def WriteBladeFiles(TurbDict,TmplDir,WrDir,\n verbose=0):\n \n TurbName = TurbDict['TurbName']\n if verbose:\n print('\\nWriting FAST v7.02 blade files for turbine {:s}...'.format(TurbName))\n \n # define path to base template\n fpath_temp = os.path.join(TmplDir,'Template_Blade.dat')\n \n # loop through blades\n for i_bl in range(1,int(TurbDict['NumBl'])+1):\n \n if verbose:\n sys.stdout.write(' Blade {:d}...'.format(i_bl))\n \n # get output paths and string appender for blade\n fname_out = TurbDict['BldFile({:d})'.format(i_bl)]\n fpath_out = os.path.join(WrDir,fname_out)\n bld_str = '_{:d}'.format(i_bl)\n \n # open template file and file to write to\n with open(fpath_temp,'r') as f_temp:\n with open(fpath_out,'w') as f_write:\n \n # read each line in template file\n for r_line in f_temp:\n \n # default to copying without modification\n w_line = r_line\n \n # if line has a write-able field\n if ('{:' in r_line):\n \n # get fieldname, format for value, and remaining string\n field = r_line.split()[1]\n value_format = r_line.split(field)[0]\n comment = r_line.split(field)[-1]\n \n # check if comment line\n if ('BldCmnt' in field):\n w_line = TurbDict[field + bld_str] + '\\n'\n \n # if blade schedule\n elif (field == 'BldSched'):\n \n BldSched = TurbDict['BldSched' + bld_str]\n \n # loop blade schedule\n for i_line in range(len(BldSched)):\n w_line = value_format.format( \\\n *BldSched[i_line])\n f_write.write(w_line + '\\n')\n w_line = ''\n \n # otherwise, print key normally\n else:\n # TODO: add try/except to load default value if field not in dictionary\n value = TurbDict[field + bld_str]\n w_line = field.join([value_format.format(value),\n comment])\n \n f_write.write(w_line)\n \n if verbose:\n sys.stdout.write('done.\\n')\n \n return",
"def create_document(self, output):\n if not os.path.exists(self.template_path):\n raise IOError('Template file not found.')\n\n documents = []\n with open(self.template_path, 'rb') as f:\n data = f.read()\n template = Template(to_unicode(data))\n indent_targets = ['params', 'response_body']\n for v in self.vars:\n if self.template_path.endswith('.rst'):\n for k in indent_targets:\n lines = v[k].split('\\n')\n ret = []\n for i, l in enumerate(lines):\n if i > 0:\n ret.append(' {0}'.format(l).rstrip())\n else:\n ret.append(l)\n v[k] = '\\n'.join(ret)\n\n document = template.substitute(v)\n documents.append(document)\n\n with open(output, 'w') as f:\n f.write('\\n'.join(documents))",
"def WriteFAST7Template(TurbDict,TmplDir,ModlDir,WrDir,\n verbose=0):\n\n TurbName = TurbDict['TurbName']\n if verbose:\n sys.stdout.write('\\nWriting FAST 7.02 template for turbine {:s}...'.format(TurbName))\n \n # define path to base template and output filename\n fpath_temp = os.path.join(TmplDir,'Template.fst')\n fpath_out = os.path.join(WrDir,TurbName+'_template.fst')\n \n # get list of special .fst keys\n version, FastFlag = 7, 1\n windfile_keys = GetWindfileKeys(version,FastFlag) # skip - depend on wind file\n inputfile_keys = GetInputFileKeys(version) # add directory to fname\n \n # open base template file and file to write to (turbine-specific template)\n with open(fpath_temp,'r') as f_temp:\n with open(fpath_out,'w') as f_write:\n \n # read each line in template file\n for r_line in f_temp:\n \n # default to copying without modification\n w_line = r_line\n \n # if line has a write-able field\n if ('{:' in r_line):\n \n # get fieldname, format for value, and remaining string\n field = r_line.split()[1]\n value_format = r_line.split(field)[0]\n comment = r_line.split(field)[-1]\n \n # check if comment line\n if ('FASTCmnt' in field):\n w_line = TurbDict[field] + '\\n'\n \n # check if OutList\n elif (field == 'OutList'):\n for i_line in range(len(TurbDict['OutList'])-1):\n f_write.write(TurbDict['OutList'][i_line])\n w_line = TurbDict['OutList'][-1]\n \n # check if quadratic torque constant (may need to truncate)\n elif (field == 'VS_Rgn2K'):\n Rgn2K = int(1e6 * TurbDict[field])/float(1e6)\n value = Rgn2K\n w_line = field.join([value_format.format(value),\n comment])\n \n # otherwise, if key is not to be skipped\n elif (field not in windfile_keys):\n value = TurbDict[field]\n \n # if key is a used input file, add path to model directory\n if ((field in inputfile_keys) and ('unused' not in value)):\n value = os.path.join(ModlDir,value)\n \n w_line = field.join([value_format.format(value),\n comment])\n \n f_write.write(w_line)\n# TODO: add check for proper tower/bladgagnde handling (currently does not do it right) \n \n if verbose:\n print('done.')\n \n return",
"def process_file(src_file, dest_file):\n # read data\n with open(src_file) as fil:\n new_data = fil.read()\n # generate a chain of templates\n parent_template = None\n current_template = dest_file\n cursor = 1\n if EXTEND_FLAG in new_data:\n new_data = new_data.replace(EXTEND_FLAG, \"\")\n while exists(current_template):\n parent_template = current_template\n current_template = \"%s%s%d\" % (dest_file, CHILD_TPL_FLAG, cursor)\n cursor += 1\n # write data\n with open(current_template, \"w\") as fil:\n if parent_template:\n # in the chain of templates each has to extend one another\n new_data = \"\\n\".join([\n \"{%% extends \\\"%s\\\" %%}\" % parent_template,\n new_data\n ])\n fil.write(new_data)",
"def get_templates(self):\n\n\t\tif not os.path.isdir('./repo'): os.mkdir('./repo')\n\t\ttemps = self.settings['template']\n\t\t#---ensure that the template object is always in a list\n\t\tif len(temps) == 2 and type(temps[0])==str and type(temps[1])==str: temps = [temps]\n\t\tself.template = []\n\t\tfor t in temps:\n\t\t\tprint 'retrieving '+str(t[0])\n\t\t\t#---check if in repo and move\n\t\t\tif not os.path.isfile(self.rootdir+t[0]+'.pdb') and os.path.isfile('./repo/'+t[0]+'.pdb'):\n\t\t\t\tcopy('./repo/'+t[0]+'.pdb',self.rootdir+t[0]+'.pdb')\n\t\t\t\t#---fasta retrieval is deprecated\n\t\t\t\tif 0: copy('./repo/'+t[0]+'.fasta',self.rootdir+t[0]+'.fasta')\n\t\t\telif not os.path.isfile(self.rootdir+t[0]+'.pdb'):\n\t\t\t\tresponse = urllib2.urlopen('http://www.rcsb.org/pdb/files/'+t[0]+'.pdb')\n\t\t\t\tpdbfile = response.read()\n\t\t\t\twith open(self.rootdir+t[0]+'.pdb','w') as fp: fp.write(pdbfile)\n\t\t\t\tcopy(self.rootdir+t[0]+'.pdb','./repo/'+t[0]+'.pdb')\n\t\t\tself.template.append(t)",
"def generate_file(a_file, template_file, kwargs):\r\n new_ob = open(a_file, \"w\")\r\n for line in open(template_file):\r\n line = line % kwargs\r\n new_ob.write(line)\r\n new_ob.close()",
"def generate_file(a_file, template_file, kwargs):\r\n new_ob = open(a_file, \"w\")\r\n for line in open(template_file):\r\n line = line % kwargs\r\n new_ob.write(line)\r\n new_ob.close()",
"def __preprocess(self, infile, outfile):\r\n with open(outfile, \"w\") as _outfile:\r\n _outfile.write(textwrap.dedent(\"\"\"\\\r\n /*\r\n * This file is dynamically generated and ignored by Git.\r\n * DO NOT MAKE CHANGES HERE. Instead, go edit its template:\r\n * %s\r\n */\r\n \"\"\" % infile))\r\n _outfile.write(Template(filename=str(infile)).render(env=self.__context()))",
"def updateTemplateFile(self, source, placeHolder, value):\n source_file = open(source).read()\n source_file = source_file.replace(placeHolder, value)\n updated_file = open(source, 'w')\n updated_file.write(source_file)\n updated_file.close()"
] |
[
"0.6611633",
"0.62254953",
"0.6149684",
"0.61076134",
"0.6078601",
"0.60680085",
"0.60608476",
"0.6021166",
"0.6015032",
"0.6012825",
"0.59981674",
"0.59652746",
"0.59523976",
"0.59220034",
"0.5841677",
"0.58407724",
"0.58137214",
"0.58121264",
"0.5810166",
"0.5808299",
"0.58006996",
"0.5799066",
"0.578325",
"0.57638305",
"0.57461876",
"0.5742759",
"0.573894",
"0.573894",
"0.57319355",
"0.57312286"
] |
0.6777061
|
0
|
Multiple templates are used for the bcbio process. This wraps filling all templates.
|
def __fill_all_templates__(self,configs):
template_dir = configs['system'].get('Common_directories','template')
sample_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','sample'))
system_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','system'))
qsub_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','bcbio'))
self.__fill_template__(sample_template,self.sample_file)
self.__fill_template__(system_template,self.systems_file)
self.__fill_template__(qsub_template,self.qsub_file)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create_base_templates(outdir, templateEnv):\n for file in ME_TEMPLATES:\n filename = os.path.join(outdir, ME_FILENAME.format(file))\n template = templateEnv.get_template(file + '.go.jinja')\n\n with open(filename, 'w') as f:\n output = template.render(copyright=COPYRIGHT,\n generator_warning=GENERATOR_WARNING,\n package_name=PACKAGE_NAME)\n f.write(output)\n pass",
"def render_all(pages):\n for page in pages:\n render_template(page['template'], page['output'], page['values'])",
"def _fill_template(\n self,\n template: Dict[Text, Any],\n template_vars: Dict[Text, Any]\n ) -> Dict[Text, Any]:\n\n line_object_keys = [\"quickReply\", \"items\", \"action\", \"template\", \"actions\"]\n\n if type(template) == list:\n for item in template:\n self._fill_template(item, template_vars)\n else:\n self._fill_template_text(template, template_vars)\n for key in line_object_keys:\n if key in template:\n self._fill_template(template[key], template_vars)\n\n return template",
"def fillBackgroundTemplates(opt):\n\n totalBkg={}\n templates=[]\n\n #import signal events\n data=ROOT.TChain('data')\n for f in [os.path.join(opt.input,x) for x in os.listdir(opt.input) if 'Data13TeV' in x]:\n if 'MuonEG' in f : continue\n data.AddFile(f)\n\n #define final preselection cuts\n cuts='xangle==%d'%opt.xangle\n if len(opt.presel) : cuts += ' && ' + opt.presel \n if opt.csiacc:\n csiCuts ='csi1>%f && csi1<%f && '%opt.csiacc[opt.xangle][0]\n csiCuts+='csi2>%f && csi2<%f'%opt.csiacc[opt.xangle][1]\n cuts=csiCuts if len(cuts)==0 else '{0} && {1}'.format(cuts,csiCuts)\n\n #loop over categories build templates\n for icat in range(len(opt.categs)):\n\n #apply category cuts\n categCut=opt.categs[icat]\n categCut=cuts if len(categCut)==0 else '%s && %s'%(categCut,cuts)\n catName='%s_a%d_%d'%(opt.chTag,opt.xangle,icat)\n\n print '\\t',catName,categCut\n\n #background modelling histos\n histos=[]\n data_obs=None\n for name,pfix in [('bkg_'+catName,'mix'),('bkg_%s_bkgShape'%catName,'mixem')]:\n\n templCuts=categCut.replace('csi1',pfix+'csi1')\n templCuts=templCuts.replace('csi2',pfix+'csi2')\n data.Draw('{0}mmiss >> h({1},{2},{3})'.format(pfix,opt.nbins,opt.mMin,opt.mMax),templCuts,'goff')\n h=data.GetHistogram()\n histos.append(h.Clone(name))\n histos[-1].SetDirectory(0)\n\n if len(histos)==1:\n totalBkg[icat]=h.Integral()\n if not opt.unblind :\n data_obs=h.Clone('data_obs_'+catName)\n data_obs.SetDirectory(0)\n\n h.Reset('ICE')\n templates += defineProcessTemplates(histos)\n\n #observed data in this category if unblinding\n if opt.unblind:\n data.Draw('mmiss >> h({1},{2},{3})'.format(opt.nbins,opt.mMin,opt.mMax),categCut,'goff')\n data_obs=data.GetHistogram().Clone('data_obs_'+catName)\n data_obs.SetDirectory(0)\n\n templates.append(data_obs)\n\n print '\\t total background:',totalBkg\n return totalBkg,templates",
"def doMakeEyeballTemplate(self):\n \"\"\"\n returnList = []\n templObjNameList = []\n templHandleList = []\n \"\"\"\n try:\n log.debug(\">>> doMakeLimbTemplate\")\n assert self.cls == 'TemplateFactory.go',\"Not a TemlateFactory.go instance!\"\n\n #Gather limb specific data and check\n #==============\n mi_helper = self._mi_module.helper\n if not mi_helper:\n raise StandardError,\"No helper found!\"\n\n b_irisControl = mi_helper.irisHelper\n b_pupilControl = mi_helper.pupilHelper\n\n mi_helper.parent = self._mi_module.templateNull\n except Exception,error:raise Exception,\"doMakeEyeballTemplate | {0}\".format(error)\n\n\n return True",
"def prepare(self, **template_parameters: Any) -> None:\n\n self._body_plain = render_template(self._body_template_base_path + '.txt', **template_parameters)\n self._body_html = render_template(self._body_template_base_path + '.html', **template_parameters)",
"def _settemplates(self, onecol, twocol):\n\n self.template = \"\"\"\n <table class=\"f\"><tbody>\n %s\n %s\n </tbody></table>\n \"\"\" % (self.header, onecol)\n\n # This is suitable for two column width.\n self.wide_template = \"\"\"\n <table class=\"f\"><tbody>\n %s\n %s\n </tbody></table>\n \"\"\" % (self.header.replace(\"1\", \"2\"), twocol)",
"def render_templates(self):\n\n # dockerfile\n try:\n t = self.templates.get_template(\n 'docker/dockerfiles/{}.dockerfile.template'.format(self.repo)\n )\n except TemplateNotFound:\n t = self.templates.get_template(\n 'docker/dockerfiles/default.dockerfile.template'\n )\n\n self.files.append({\n 'name': 'Dockerfile',\n 'content': t.render(commit=self.commit),\n })\n\n # gunicorn\n t = self.templates.get_template(\n 'docker/gunicorn/gunicorn.conf.py'\n )\n self.files.append({\n 'name': 'gunicorn.conf.py',\n 'content': t.render(),\n })\n\n t = self.templates.get_template(\n 'docker/gunicorn/gunicorn.sh'\n )\n self.files.append({\n 'name': 'gunicorn.sh',\n 'content': t.render(),\n 'mode': 0555,\n })\n\n # nginx\n t = self.templates.get_template(\n 'docker/nginx/app.nginx.conf'\n )\n self.files.append({\n 'name': 'app.nginx.conf',\n 'content': t.render(),\n })\n\n t = self.templates.get_template(\n 'docker/nginx/nginx.sh'\n )\n self.files.append({\n 'name': 'nginx.sh',\n 'content': t.render(),\n 'mode': 0555,\n })\n\n # cron/, etc/ iif there exists a `self.repo` directory\n def _filter(p):\n return (\"cron/\" in p or \"etc/\" in p) and (self.repo in p) and \\\n (not os.path.basename(p).startswith('.'))\n\n for t in self.templates.list_templates(\n filter_func=_filter):\n\n self.files.append({\n 'name': os.path.basename(t),\n 'content': self.templates.get_template(t).render(),\n })",
"def _generate_and_load_initial_batch(self, working_directory: Path):\n\n template_dir = Path(working_directory) / \"template_1\"\n template_dir.mkdir()\n # changes here should often be reflected in\n # data_generator_opts and data_loader_opts\n\n channel_decl = self.channel_configs[0]\n\n plugin_options = {\n \"pid\": \"0\",\n \"big_ids\": \"True\",\n }\n # if it's efficient to do the whole load in one go, let's just do that.\n if self.run_until.gap < MIN_PORTION_SIZE:\n num_records = self.run_until.gap\n else:\n num_records = 1 # smallest possible batch to get to parallelizing fast\n results = self._generate_and_load_batch(\n template_dir,\n channel_decl.org_config,\n {\n \"generator_yaml\": self.options.get(\"recipe\"),\n \"num_records\": num_records,\n \"num_records_tablename\": self.run_until.sobject_name or COUNT_REPS,\n \"loading_rules\": self.loading_rules,\n \"vars\": channel_decl.merge_recipe_options(self.recipe_options),\n \"plugin_options\": plugin_options,\n \"bulk_mode\": self.bulk_mode,\n },\n )\n self.update_running_totals_from_load_step_results(results)\n\n # rename directory to reflect real number of sets created.\n wd = SnowfakeryWorkingDirectory(template_dir)\n if self.run_until.sobject_name:\n self.sets_finished_while_generating_template = wd.get_record_counts()[\n self.run_until.sobject_name\n ]\n else:\n self.sets_finished_while_generating_template = num_records\n\n new_template_dir = data_loader_new_directory_name(template_dir, self.run_until)\n shutil.move(template_dir, new_template_dir)\n template_dir = new_template_dir\n\n # don't send data tables to child processes. All they\n # care about are ID->OID mappings\n wd = SnowfakeryWorkingDirectory(template_dir)\n self._cleanup_object_tables(*wd.setup_engine())\n\n return template_dir, wd.relevant_sobjects()",
"def test_8_template(install_test_files, data_dir):\n fc_dir = os.path.join(data_dir, os.pardir, \"100326_FC6107FAAXX\")\n with make_workdir():\n cl = [\"bcbio_nextgen.py\", \"-w\", \"template\", \"--only-metadata\",\n \"freebayes-variant\",\n os.path.join(fc_dir, \"100326.csv\"),\n os.path.join(fc_dir, \"7_100326_FC6107FAAXX_1_fastq.txt\"),\n os.path.join(fc_dir, \"7_100326_FC6107FAAXX_2_fastq.txt\"),\n os.path.join(fc_dir, \"8_100326_FC6107FAAXX.bam\")]\n subprocess.check_call(cl)",
"def make_cake_templates():\n tmpl = dict()\n\n # Attributes\n tmpl['Cooking time'] = ConditionTemplate(\n name=\"Cooking time\",\n description=\"The time elapsed during a cooking process\",\n bounds=RealBounds(0, 7 * 24.0, \"hr\")\n )\n tmpl[\"Oven temperature setting\"] = ParameterTemplate(\n name=\"Oven temperature setting\",\n description=\"Where the knob points\",\n bounds=RealBounds(0, 2000.0, \"K\")\n )\n tmpl[\"Oven temperature\"] = ConditionTemplate(\n name=\"Oven temperature\",\n description=\"Actual temperature measured by the thermocouple\",\n bounds=RealBounds(0, 2000.0, \"K\")\n )\n\n tmpl[\"Tastiness\"] = PropertyTemplate(\n name=\"Tastiness\",\n description=\"Yumminess on a fairly arbitrary scale\",\n bounds=IntegerBounds(lower_bound=1, upper_bound=10)\n )\n\n # Objects\n tmpl[\"Baking in an oven\"] = ProcessTemplate(\n name=\"Baking in an oven\",\n description='Using heat to promote chemical reactions in a material',\n allowed_labels=['precursor'],\n conditions=[(tmpl[\"Oven temperature\"], RealBounds(0, 700, \"degF\"))],\n parameters=[(tmpl[\"Oven temperature setting\"], RealBounds(100, 550, \"degF\"))]\n )\n\n tmpl[\"Taste test\"] = MeasurementTemplate(\n name=\"Taste test\",\n properties=[tmpl[\"Tastiness\"]]\n )\n\n tmpl[\"Dessert\"] = MaterialTemplate(\n name=\"Dessert\",\n properties=[tmpl[\"Tastiness\"]]\n )\n\n tmpl[\"Generic Material\"] = MaterialTemplate(name=\"Generic\")\n tmpl[\"Icing\"] = ProcessTemplate(name=\"Icing\",\n description='Applying a coating to a substrate',\n allowed_labels=['coating', 'substrate'])\n tmpl[\"Mixing\"] = ProcessTemplate(name=\"Mixing\",\n description='Physically combining ingredients',\n allowed_labels=['wet', 'dry', 'leavening', 'seasoning',\n 'sweetener', 'shortening', 'flavoring'])\n tmpl[\"Procurement\"] = ProcessTemplate(name=\"Procurement\",\n description=\"Buyin' stuff\")\n\n return tmpl",
"def doMakeLimbTemplate2(self):\n \"\"\"\n returnList = []\n templObjNameList = []\n templHandleList = []\n \"\"\"\n log.debug(\">>> doMakeLimbTemplate\")\n assert self.cls == 'TemplateFactory.go',\"Not a TemlateFactory.go instance!\"\n\n try:#Gather limb specific data and check\n #==============\n self.curveDegree = self._mi_templateNull.curveDegree\n self.rollOverride = self._mi_templateNull.rollOverride\n\n doCurveDegree = getGoodCurveDegree(self)\n if not doCurveDegree:raise ValueError,\"Curve degree didn't query\"\n\n #>>>Scale stuff\n size = returnModuleBaseSize(self._mi_module)\n\n lastCountSizeMatch = len(self.corePosList) -1\n except Exception,error:raise Exception,\"Gather limb data | {0}\".format(error)\n\n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n # Making the template objects\n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n mc.progressBar(self.str_progressBar, edit=True, status = \"%s >>Template>> step:'%s' \"%(self._strShortName,self.l_strSteps[1]), progress=1) \t\t\t\t\t \n try:\n templHandleList = []\n self.ml_controlObjects = []\n self._mi_locs = []\n for i,pos in enumerate(self.corePosList):# Don't like this sizing method but it is what it is for now\n #>> Make each of our base handles\n #============================= \n if i == 0:\n sizeMultiplier = 1\n elif i == lastCountSizeMatch:\n sizeMultiplier = .8\n else:\n sizeMultiplier = .75\n\n #>>> Create and set attributes on the object\n i_obj = cgmMeta.validateObjArg( curves.createControlCurve('sphere',(size * sizeMultiplier)),'cgmObject',setClass = True )\n\n curves.setCurveColorByName(i_obj.mNode,self.moduleColors[0])\n\n i_obj.doStore('cgmName','%s.%s'%(self._mi_module.coreNames.mNode,self.d_coreNamesAttrs[i])) \n #i_obj.addAttr('cgmName',value = str(self.l_coreNames[i]), attrType = 'string', lock=True)#<<<<<<<<<<<FIX THIS str(call) when Mark fixes bug\n if self.direction != None:\n i_obj.addAttr('cgmDirection',value = self.direction,attrType = 'string',lock=True) \n i_obj.addAttr('cgmType',value = 'templateObject', attrType = 'string',lock=True) \n i_obj.doName()#Name it\n\n mc.move (pos[0], pos[1], pos[2], [i_obj.mNode], a=True)\n i_obj.parent = self._mi_templateNull\n\n #>>> Loc it and store the loc\n #i_loc = cgmMeta.cgmObject( i_obj.doLoc() )\n i_loc = i_obj.doLoc()\n i_loc.addAttr('cgmName',value = self._mi_module.getShortName(), attrType = 'string', lock=True) #Add name tag\n i_loc.addAttr('cgmType',value = 'templateCurveLoc', attrType = 'string', lock=True) #Add Type\n i_loc.v = False # Turn off visibility\n i_loc.doName()\n\n self._mi_locs.append(i_loc)\n i_obj.connectChildNode(i_loc.mNode,'curveLoc','owner')\n i_loc.parent = self._mi_templateNull#parent to the templateNull\n\n mc.pointConstraint(i_obj.mNode,i_loc.mNode,maintainOffset = False)#Point contraint loc to the object\n\n templHandleList.append (i_obj.mNode)\n self.ml_controlObjects.append(i_obj)\n except Exception,error:raise Exception,\"Template object creation | {0}\".format(error)\n\n try:#>> Make the curve\n #============================= \n mc.progressBar(self.str_progressBar, edit=True, status = \"%s >>Template>> step:'%s' \"%(self._strShortName,self.l_strSteps[2]), progress=2) \t\t\t\t\t \n i_crv = cgmMeta.validateObjArg( mc.curve (d=doCurveDegree, p = self.corePosList , os=True),'cgmObject',setClass = True )\n\n i_crv.addAttr('cgmName',value = str(self._mi_module.getShortName()), attrType = 'string', lock=True)#<<<<<<<<<<<FIX THIS str(call) when Mark fixes bug\n if self.direction != None:\n i_crv.addAttr('cgmDirection',value = self.direction, attrType = 'string', lock=True)#<<<<<<<<<<<FIX THIS str(call) when Mark fixes bug\n\n i_crv.addAttr('cgmType',value = 'templateCurve', attrType = 'string', lock=True)\n curves.setCurveColorByName(i_crv.mNode,self.moduleColors[0])\n i_crv.parent = self._mi_templateNull \n i_crv.doName()\n i_crv.setDrawingOverrideSettings({'overrideEnabled':1,'overrideDisplayType':2},True)\n\n for i,i_obj in enumerate(self.ml_controlObjects):#Connect each of our handles ot the cv's of the curve we just made\n mc.connectAttr ( (i_obj.curveLoc.mNode+'.translate') , ('%s%s%i%s' % (i_crv.mNode, '.controlPoints[', i, ']')), f=True )\n\n\n self.foundDirections = returnGeneralDirections(self,templHandleList)\n log.debug(\"directions: %s\"%self.foundDirections )\n except Exception,error:raise Exception,\"template curve | {0}\".format(error)\n\n try:#>> Create root control\n #============================= \n mc.progressBar(self.str_progressBar, edit=True, status = \"%s >>Template>> step:'%s' \"%(self._strShortName,self.l_strSteps[3]), progress=3) \t\t\t\t\t \n\n rootSize = (distance.returnBoundingBoxSizeToAverage(templHandleList[0],True)*1.25) \n i_rootControl = cgmMeta.validateObjArg( curves.createControlCurve('cube',rootSize),'cgmObject',setClass = True )\n\n curves.setCurveColorByName(i_rootControl.mNode,self.moduleColors[0])\n i_rootControl.addAttr('cgmName',value = str(self._mi_module.getShortName()), attrType = 'string', lock=True)#<<<<<<<<<<<FIX THIS str(call) when Mark fixes bug \n i_rootControl.addAttr('cgmType',value = 'templateRoot', attrType = 'string', lock=True)\n if self.direction != None:\n i_rootControl.addAttr('cgmDirection',value = self.direction, attrType = 'string', lock=True)#<<<<<<<<<<<FIX THIS str(call) when Mark fixes bug\n i_rootControl.doName()\n\n #>>> Position it\n if self._mi_module.moduleType in ['clavicle']:\n position.movePointSnap(i_rootControl.mNode,templHandleList[0])\n else:\n position.movePointSnap(i_rootControl.mNode,templHandleList[0])\n\n #See if there's a better way to do this\n log.debug(\"templHandleList: %s\"%templHandleList)\n if self._mi_module.moduleType not in ['foot']:\n if len(templHandleList)>1:\n log.debug(\"setting up constraints...\") \n constBuffer = mc.aimConstraint(templHandleList[-1],i_rootControl.mNode,maintainOffset = False, weight = 1, aimVector = [0,0,1], upVector = [0,1,0], worldUpVector = self.worldUpVector, worldUpType = 'vector' )\n mc.delete (constBuffer[0]) \n elif self._mi_module.getMessage('moduleParent'):\n #l_parentTemplateObjects = self._mi_module.moduleParent.templateNull.getMessage('controlObjects')\n helper = self._mi_module.moduleParent.templateNull.msgList_get('controlObjects',asMeta = True)[-1].helper.mNode\n if helper:\n log.info(\"helper: %s\"%helper)\n constBuffer = mc.orientConstraint( helper,i_rootControl.mNode,maintainOffset = False)\n mc.delete (constBuffer[0]) \n\n i_rootControl.parent = self._mi_templateNull\n i_rootControl.doGroup(maintain=True)\n except Exception,error:raise Exception,\"Root creation | {0}\".format(error)\n\n\n try:#>> Store objects\n #============================= \n self._mi_templateNull.curve = i_crv.mNode\n self._mi_templateNull.root = i_rootControl.mNode\n self._mi_templateNull.msgList_connect('controlObjects',templHandleList)\n\n self._mi_rootControl = i_rootControl#link to carry\n except Exception,error:raise Exception,\"store | {0}\".format(error)\n\n try:#>> Orientation helpers\n #============================= \n mc.progressBar(self.str_progressBar, edit=True, status = \"%s >>Template>> step:'%s' \"%(self._strShortName,self.l_strSteps[3]), progress=3) \t\t\t\t\t \n \"\"\" Make our Orientation Helpers \"\"\"\n doCreateOrientationHelpers(self)\n doParentControlObjects(self)\n\n #if self._mi_module.getMessage('moduleParent'):#If we have a moduleParent, constrain it\n #constrainToParentModule(self.m)\n\n #doOrientTemplateObjectsToMaster(self._mi_module)\n except Exception,error:raise Exception,\"Orientation helpers | {0}\".format(error)\n\n return True",
"def __fill_template__(self,template_file,output_fname):\n dictionary = {}\n for k,v in self.__dict__.iteritems():\n if k == 'sample_key':\n try:\n int(v)\n new_sample_key = \"Sample_\" + str(v)\n dictionary.update({k:new_sample_key})\n continue\n except ValueError:\n pass\n dictionary.update({k:str(v)})\n dictionary.update({'restats_tail': self.restats_file + '.tail'})\n with open(output_fname,'w') as f:\n string = fill_template(template_file,dictionary)\n f.write(string)",
"def pull_templates(self):\n try:\n backend_templates = self.client.list_all_templates()\n except VMwareError as e:\n raise VMwareBackendError(e)\n\n if is_basic_mode():\n # If basic mode is enabled, we should filter out templates which have more than 1 NIC\n backend_templates = [\n template\n for template in backend_templates\n if len(template['template']['nics']) == 1\n ]\n\n backend_templates_map = {\n item['library_item']['id']: item for item in backend_templates\n }\n\n frontend_templates_map = {\n p.backend_id: p\n for p in models.Template.objects.filter(settings=self.settings)\n }\n\n stale_ids = set(frontend_templates_map.keys()) - set(\n backend_templates_map.keys()\n )\n new_ids = set(backend_templates_map.keys()) - set(frontend_templates_map.keys())\n common_ids = set(backend_templates_map.keys()) & set(\n frontend_templates_map.keys()\n )\n\n for library_item_id in new_ids:\n template = self._backend_template_to_template(\n backend_templates_map[library_item_id]\n )\n template.save()\n\n for library_item_id in common_ids:\n backend_template = self._backend_template_to_template(\n backend_templates_map[library_item_id]\n )\n frontend_template = frontend_templates_map[library_item_id]\n fields = (\n 'cores',\n 'cores_per_socket',\n 'ram',\n 'disk',\n 'guest_os',\n 'modified',\n 'description',\n )\n update_pulled_fields(frontend_template, backend_template, fields)\n\n models.Template.objects.filter(\n settings=self.settings, backend_id__in=stale_ids\n ).delete()",
"def fillSignalTemplates(opt):\n\n totalSig={}\n templates=[]\n\n #import signal events\n data=ROOT.TChain('data')\n data.AddFile(os.path.join(opt.input,opt.sig))\n\n #define final preselection cuts\n cuts='xangle==%d'%opt.xangle\n if len(opt.presel) : cuts += ' && ' + opt.presel\n if opt.csiacc:\n csiCuts ='csi1>%f && csi1<%f && '%opt.csiacc[opt.xangle][0]\n csiCuts+='csi2>%f && csi2<%f'%opt.csiacc[opt.xangle][1]\n cuts=csiCuts if len(cuts)==0 else '{0} && {1}'.format(cuts,csiCuts)\n\n #loop over categories build templates\n for icat in range(len(opt.categs)):\n\n #apply category cuts\n categCut=opt.categs[icat]\n categCut=cuts if len(categCut)==0 else '%s && %s'%(categCut,cuts)\n\n catName='%s_a%d_%d'%(opt.chTag,opt.xangle,icat)\n print '\\t',catName,categCut\n\n #signal modelling histograms\n histos=[]\n for name,pfix in [('sig_'+catName,''),('sig_%s_sigShape'%catName,'mix')]:\n\n templCuts=categCut.replace('csi1',pfix+'csi1')\n templCuts=templCuts.replace('csi2',pfix+'csi2')\n wgtExpr='wgt*%f'%(SIGNALXSECS[opt.xangle]*opt.lumi)\n data.Draw('{0}mmiss >> h({1},{2},{3})'.format(pfix,opt.nbins,opt.mMin,opt.mMax),\n '{0}*({1})'.format(wgtExpr,templCuts),\n 'goff')\n h=data.GetHistogram()\n histos.append( h.Clone(name) ) \n histos[-1].SetDirectory(0)\n\n if len(histos)==1:\n totalSig[icat]=h.Integral()\n\n h.Reset('ICE')\n templates += defineProcessTemplates(histos)\n \n print '\\t total signal:',totalSig\n return totalSig,templates",
"def build(self):\n self.logger.debug(\"run\")\n\n self.onInit()\n self.work()\n \n self.afterWork()\n\n template = Templateengine(self.currenttemplate)\n template.readTemplateFile()\n contenttype = self.settings.contenttype \n self.defaultTemplateParameter()\n \n try:\n self.content = template.get(self.tplparam)\n except Exception as ex:\n Emergency.stop(ex)\n\n self.onDone()\n \n self.logger.debug(\"done\")",
"def copy_templates(root_directory, dist_directory, sdk_directory,\n cpus, families, boards):\n\n def _process(when, contexts):\n for context in contexts:\n for template in configuration.TEMPLATES:\n if template[\"when\"] == when:\n context.update({\n \"root\": root_directory,\n \"sdk\": sdk_directory,\n \"dist\": dist_directory\n })\n\n source = templates.from_string(template[\"source\"], context)\n target = templates.from_string(template[\"target\"], context)\n target = os.path.join(dist_directory, target)\n\n # Perform the action.\n sys.stdout.write(\"Processing '%s'\\n\" % source)\n\n if template[\"type\"] == \"file\":\n templates.from_file(source, target, context)\n elif template[\"type\"] == \"glob\":\n for source_file in glob.glob(source):\n if os.path.isfile(source_file):\n target_file = os.path.join(\n target, os.path.basename(source_file))\n\n templates.from_file(\n source_file, target_file, context)\n else:\n raise Exception(\"Not supported\")\n\n _process(\"per_family\", families)\n _process(\"per_cpu\", cpus)\n _process(\"per_board\", boards)\n _process(\"per_once\", [{\n \"families\": [family[\"family\"] for family in families],\n \"cpus\": [cpu[\"cpu\"] for cpu in cpus],\n \"boards\": [board[\"board\"] for board in boards]\n }])",
"def _load_templates(cls):\n if cls._raw_templates is None:\n cls._raw_templates = fetch_rrlyrae_templates()",
"def setup_templates(self):\n self.libs[\"template\"] = (\"#libs/templates/include\", None, \"\")\n self[\"CPPPATH\"].append(\"#libs/templates/include\")",
"def __init__(self, entityBlocks):\n Template.__init__(self, entityBlocks)",
"def main(temp_dir, extensions, template):\n env = load_env(template_dir=temp_dir)\n if not template:\n # Get all the templates and return a dict with enumerated \n # templates names\n ext = extensions if extensions else []\n template_dict = get_templates(env, extensions=ext)\n # Echo the content of the template directory by enumerating \n # the templates and a simple list join\n temp_list = list()\n for x in template_dict.items():\n num = str(x[0])\n # Remove whitespace, underscores and capitalize words\n temp_name = x[1].strip().replace(\"_\", \" \").title()\n temp_string = \"{}. {}\".format(num, temp_name)\n temp_list.append(temp_string)\n click.echo(\"\\n\".join(temp_list))\n # Prompt the user to give the number of the template\n temp_num = click.prompt(\n \"Choose a templeta by entering the number of the template.\",\n type=int\n )\n # Get the template from the template dictionary\n template = template_dict.get(temp_num)\n # Get the variables\n temp_vars = get_vars(template, env)\n # Crate a dict with variables and let the user input the variables\n vars_to_render = dict()\n for var in temp_vars:\n user_var = click.prompt(\"{}?\".format(var.capitalize()))\n vars_to_render[var] = user_var\n # Get the template\n temp = env.get_template(template)\n # Render the template\n click.echo(temp.render(vars_to_render))",
"def use_templates(self, templates):\n self.htmls = templates",
"def create_files_from_templates(self, model_attributes):\n for folder_name in [\"views\", \"urls\"]:\n file_path = \"%s/%s/%s_%s.py\" % (model_attributes['app_label'], folder_name,\n model_attributes['model_name_slug'], folder_name)\n template_path = \"django_baker/%s\" % (folder_name)\n self.create_file_from_template(file_path, template_path, model_attributes)\n for file_name in [\"base\", \"list\", \"detail\", \"create\", \"update\", \"delete\"]:\n file_path = \"%s/templates/%s/%s_%s.html\" % (model_attributes['app_label'], model_attributes['app_label'],\n model_attributes['model_name_slug'], file_name)\n template_path = \"django_baker/%s.html\" % (file_name)\n self.create_file_from_template(file_path, template_path, model_attributes)",
"def write_template_body1(template_filename):\n template_type = template_filename.split('/')[-1].split('_')[0]\n template_file = open(template_filename, 'a')\n template_file.write('<body>\\n') \n template_file.write('<div id=\"pageTitle\">\\n')\n template_file.write('<?php echo $stat_title; ?>\\n') \n template_file.write('</div>\\n')\n template_file.write('<div class=\"page-menu\"><div class=\"table\">\\n')\n template_file.write(' <div class=\"element\">\\n')\n template_file.write(' <span class=\"bold\">Basin:</span>\\n')\n template_file.write(\n ' <select id=\"maptype\" '\n +'onchange=\"changeMaptype(this.value)\"></select>\\n'\n )\n template_file.write(' </div>\\n')\n template_file.write(' <div class=\"element\">\\n')\n template_file.write(' <span class=\"bold\">Name:</span>\\n')\n template_file.write(\n ' <select id=\"domain\" '\n +'onchange=\"changeDomain(this.value);\"></select>\\n'\n )\n template_file.write(' </div>\\n')\n template_file.write(' <div class=\"element\">\\n')\n template_file.write(\n ' <span class=\"bold\">Forecast Lead:</span>\\n'\n )\n template_file.write(\n ' <select id=\"variable\" '\n +'onchange=\"changeVariable(this.value)\"></select>\\n'\n )\n template_file.write(' </div>\\n')\n template_file.write('</div></div>\\n')\n template_file.write('\\n')\n template_file.write('<!-- Middle menu -->\\n')\n template_file.write('<div class=\"page-middle\" id=\"page-middle\">\\n')\n template_file.write(\n 'Left/Right arrow keys = Change forecast lead | Up/Down arrow keys '\n +'= Change Storm\\n'\n )\n template_file.write(\n '<br>For information on tropical cyclone verification, '\n +'<button class=\"infobutton\" id=\"myBtn\">click here</button>\\n'\n )\n template_file.write('<div id=\"myModal\" class=\"modal\">\\n')\n template_file.write(' <div class=\"modal-content\">\\n')\n template_file.write(' <span class=\"close\">×</span>\\n')\n template_file.write(' Tropical Cyclone Verification Information\\n')\n template_file.write(\n ' <embed width=100% height=100% src=\"../main.php\">\\n'\n )\n template_file.write(' </div>\\n')\n template_file.write('</div>\\n')\n template_file.write('<!-- /Middle menu -->\\n')\n template_file.write('</div>\\n')\n template_file.write('\\n')\n template_file.write(\n '<div id=\"loading\"><img style=\"width:100%\" '\n +'src=\"../../images/loading.png\"></div>\\n'\n )\n template_file.write('\\n')\n template_file.write('<!-- Image -->\\n')\n template_file.write('<div id=\"page-map\">\\n')\n template_file.write(' <image name=\"map\" style=\"width:100%\">\\n')\n template_file.write('</div>\\n')\n template_file.write('\\n')\n template_file.write('<script type=\"text/javascript\">\\n')\n template_file.write('// Get the modal\\n')\n template_file.write('var modal = document.getElementById(\"myModal\");\\n')\n template_file.write('\\n')\n template_file.write('// Get the button that opens the modal\\n')\n template_file.write('var btn = document.getElementById(\"myBtn\");\\n')\n template_file.write('\\n')\n template_file.write('// Get the <span> element that closes the modal\\n')\n template_file.write(\n 'var span = document.getElementsByClassName(\"close\")[0];\\n'\n )\n template_file.write('\\n')\n template_file.write(\n '// When the user clicks the button, open the modal\\n'\n )\n template_file.write('btn.onclick = function() {\\n')\n template_file.write(' modal.style.display = \"block\";\\n')\n template_file.write('}\\n')\n template_file.write('\\n')\n template_file.write(\n '// When the user clicks on <span> (x), close the modal\\n'\n )\n template_file.write('span.onclick = function() {\\n')\n template_file.write(' modal.style.display = \"none\";\\n')\n template_file.write('}\\n')\n template_file.write('\\n')\n template_file.write(\n '// When the user clicks anywhere outside of the modal, close it\\n'\n )\n template_file.write('window.onclick = function(event) {\\n')\n template_file.write(' if (event.target == modal) {\\n')\n template_file.write(' modal.style.display = \"none\";\\n')\n template_file.write(' }\\n')\n template_file.write('}\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('//User-defined variables\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('\\n')\n template_file.write('//Global variables\\n')\n template_file.write(\n 'var minFrame = 0; //Minimum frame for every variable\\n'\n )\n template_file.write(\n 'var maxFrame = 26; //Maximum frame for every variable\\n'\n )\n template_file.write(\n 'var incrementFrame = 1; //Increment for every frame\\n'\n )\n template_file.write('\\n')\n template_file.write('var startFrame = 0; //Starting frame\\n')\n template_file.write('\\n')\n template_file.write('var cycle = 2018100600\\n')\n template_file.write('\\n')\n template_file.write('/*\\n')\n template_file.write(\n 'When constructing the URL below, DDD = domain, VVV = variable, '\n +'LLL = level, SSS = season, Y = frame number.\\n'\n )\n template_file.write(\n 'For X and Y, labeling one X or Y represents an integer '\n +'(e.g. 0, 10, 20). Multiple of these represent a string\\n'\n )\n template_file.write(\n 'format (e.g. XX = 00, 06, 12 --- XXX = 000, 006, 012).\\n'\n )\n template_file.write('*/\\n')\n template_file.write(\n 'var url = \"<?php echo $'+template_type+'_url; ?>\";\\n'\n )\n template_file.write('\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('//Add variables & domains\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('\\n')\n template_file.write('var variables = [];\\n')\n template_file.write('var domains = [];\\n')\n template_file.write('var levels = [];\\n')\n template_file.write('var seasons = [];\\n')\n template_file.write('var maptypes = [];\\n')\n template_file.write('var validtimes = [];\\n')\n template_file.write('\\n')\n template_file.write('\\n')\n template_file.close()",
"def mk_data(self):\n self.data = self.DEFAULTS.copy()\n\n for template in self.raw_data.get('extends', []):\n template_data = self.load_template(template)\n self.data.update(template_data)\n\n self.data.update(self.raw_data)\n\n str_replace(self.data)\n\n if self.data.get('redirect_stderr'):\n self.data.pop('stderr')",
"def load_templates(fwhm=400, line_complexes=True, stars=False,\n full_line_list=None, continuum_list=None,\n fsps_templates=False, alf_template=False):\n \n if stars:\n # templates = glob.glob('%s/templates/Pickles_stars/ext/*dat' %(os.getenv('GRIZLI')))\n # templates = []\n # for t in 'obafgkmrw':\n # templates.extend( glob.glob('%s/templates/Pickles_stars/ext/uk%s*dat' %(os.getenv('THREEDHST'), t)))\n # templates.extend(glob.glob('%s/templates/SPEX/spex-prism-M*txt' %(os.getenv('THREEDHST'))))\n # templates.extend(glob.glob('%s/templates/SPEX/spex-prism-[LT]*txt' %(os.getenv('THREEDHST'))))\n # \n # #templates = glob.glob('/Users/brammer/Downloads/templates/spex*txt')\n # templates = glob.glob('bpgs/*ascii')\n # info = catIO.Table('bpgs/bpgs.info')\n # type = np.array([t[:2] for t in info['type']])\n # templates = []\n # for t in 'OBAFGKM':\n # test = type == '-%s' %(t)\n # so = np.argsort(info['type'][test])\n # templates.extend(info['file'][test][so])\n # \n # temp_list = OrderedDict()\n # for temp in templates:\n # #data = np.loadtxt('bpgs/'+temp, unpack=True)\n # data = np.loadtxt(temp, unpack=True)\n # #data[0] *= 1.e4 # spex\n # scl = np.interp(5500., data[0], data[1])\n # name = os.path.basename(temp)\n # #ix = info['file'] == temp\n # #name='%5s %s' %(info['type'][ix][0][1:], temp.split('.as')[0])\n # print(name)\n # temp_list[name] = utils.SpectrumTemplate(wave=data[0],\n # flux=data[1]/scl)\n \n # np.save('stars_bpgs.npy', [temp_list])\n \n \n # tall = np.load(os.path.join(os.getenv('GRIZLI'), \n # 'templates/stars.npy'))[0]\n # \n # return tall\n # \n # temp_list = OrderedDict()\n # for k in tall:\n # if k.startswith('uk'):\n # temp_list[k] = tall[k]\n # \n # return temp_list\n # \n # for t in 'MLT':\n # for k in tall:\n # if k.startswith('spex-prism-'+t):\n # temp_list[k] = tall[k]\n # \n # return temp_list\n \n #return temp_list\n templates = ['M6.5.txt', 'M8.0.txt', 'L1.0.txt', 'L3.5.txt', 'L6.0.txt', 'T2.0.txt', 'T6.0.txt', 'T7.5.txt']\n templates = ['stars/'+t for t in templates]\n else:\n ## Intermediate and very old\n # templates = ['templates/EAZY_v1.0_lines/eazy_v1.0_sed3_nolines.dat', \n # 'templates/cvd12_t11_solar_Chabrier.extend.skip10.dat'] \n templates = ['eazy_intermediate.dat', \n 'cvd12_t11_solar_Chabrier.dat']\n \n ## Post starburst\n #templates.append('templates/UltraVISTA/eazy_v1.1_sed9.dat')\n templates.append('post_starburst.dat')\n \n ## Very blue continuum\n #templates.append('templates/YoungSB/erb2010_continuum.dat')\n templates.append('erb2010_continuum.dat')\n \n ### Test new templates\n # templates = ['templates/erb2010_continuum.dat',\n # 'templates/fsps/tweak_fsps_temp_kc13_12_006.dat',\n # 'templates/fsps/tweak_fsps_temp_kc13_12_008.dat']\n \n if fsps_templates:\n #templates = ['templates/fsps/tweak_fsps_temp_kc13_12_0{0:02d}.dat'.format(i+1) for i in range(12)]\n templates = ['fsps/fsps_QSF_12_v3_nolines_0{0:02d}.dat'.format(i+1) for i in range(12)]\n #templates = ['fsps/fsps_QSF_7_v3_nolines_0{0:02d}.dat'.format(i+1) for i in range(7)]\n \n \n if alf_template:\n templates.append('alf_SSP.dat')\n \n if continuum_list is not None:\n templates = continuum_list\n \n temp_list = OrderedDict()\n for temp in templates:\n data = np.loadtxt(os.path.join(os.getenv('GRIZLI'), 'templates', temp), unpack=True)\n #scl = np.interp(5500., data[0], data[1])\n scl = 1.\n name = temp #os.path.basename(temp)\n temp_list[name] = SpectrumTemplate(wave=data[0], flux=data[1]/scl,\n name=name)\n \n temp_list[name].name = name\n \n if stars:\n return temp_list\n \n ### Emission lines:\n line_wavelengths, line_ratios = get_line_wavelengths()\n \n if line_complexes:\n #line_list = ['Ha+SII', 'OIII+Hb+Ha', 'OII']\n #line_list = ['Ha+SII', 'OIII+Hb', 'OII']\n line_list = ['Ha+NII+SII+SIII+He', 'OIII+Hb', 'OII+Ne', 'Lya+CIV']\n else:\n if full_line_list is None:\n line_list = DEFAULT_LINE_LIST\n else:\n line_list = full_line_list\n \n #line_list = ['Ha', 'SII']\n \n # Use FSPS grid for lines\n wave_grid = None\n # if fsps_templates:\n # wave_grid = data[0]\n # else:\n # wave_grid = None \n \n for li in line_list:\n scl = line_ratios[li]/np.sum(line_ratios[li])\n for i in range(len(scl)):\n line_i = SpectrumTemplate(wave=wave_grid, \n central_wave=line_wavelengths[li][i], \n flux=None, fwhm=fwhm, velocity=True)\n \n if i == 0:\n line_temp = line_i*scl[i]\n else:\n line_temp = line_temp + line_i*scl[i]\n \n name = 'line {0}'.format(li)\n line_temp.name = name\n temp_list[name] = line_temp\n \n return temp_list",
"def prepare_template(self) -> PDBBlocks:\n with pymol2.PyMOL() as pymol:\n if len(self.code) == 4:\n pymol.cmd.fetch(self.code)\n elif '.pdb' in self.code: # file.\n pymol.cmd.load(self.code)\n else:\n raise ValueError\n pymol.cmd.remove('solvent')\n pymol.cmd.h_add('*')\n pymol.cmd.alter('all','segi=\"\"')\n pymol.cmd.sort()\n return self.fill_PDBBlocks(pymol, self.ref_resn)",
"def __init__(self,template_file, **kwargs):\r\n \r\n env = Environment(\r\n loader=PackageLoader('email_generator', 'templates'),\r\n autoescape=select_autoescape(['html', 'xml'])\r\n )\r\n template = env.get_template(template_file)\r\n self.body = template.render(**kwargs)",
"def processTemplates(self, tk, templateFile = '', id = '', shotNum = '', inprogressBar = ''):\r\n ## Now fetch all the template paths from shotgun\r\n getTemplatePaths = tk.paths_from_template(templateFile, {'Step' : 'Light', 'id' : id, 'Shot' : shotNum})\r\n debug(app = self, method = 'processTemplates', message = 'getTemplatePaths: %s' % getTemplatePaths, verbose = False)\r\n \r\n ## Now look for each assets template path: \r\n xmlFile = max(getTemplatePaths) \r\n debug(app = self, method = 'processTemplates', message = 'Max Version xmlFile.... %s' % xmlFile, verbose = False)\r\n \r\n ## Now if versions has stuff in it..\r\n if not xmlFile:\r\n debug(app = self, method = 'processTemplates', message = 'Can not find any xml files for %s' % shotNum, verbose = False)\r\n pass\r\n else:\r\n \r\n debug(app = self, method = 'processTemplates', message = 'PathTo: %s' % os.path.isfile(xmlFile.replace(os.path.sep, \"/\")), verbose = False)\r\n if os.path.isfile(xmlFile.replace(os.path.sep, \"/\")):## is this a valid xml file!?\r\n inprogressBar.updateProgress(percent = 10, doingWhat = 'createAll shaders...')\r\n self._createAllShaders(XMLPath = xmlFile.replace(os.path.sep, \"/\"), Namespace = '', Root = 'MaterialNodes')\r\n \r\n inprogressBar.updateProgress(percent = 30, doingWhat = 'connectAll shaders...')\r\n self._connectAllShaders(XMLPath = xmlFile.replace(os.path.sep, \"/\"), Namespace = '', Root = 'MaterialNodes')\r\n else:\r\n debug(app = self, method = 'processTemplates', message = 'FAILED Can not find a valid published xml file for %s ...' % os.path.isfile(xmlFile.replace(os.path.sep, \"/\")), verbose = False)\r\n pass",
"def fill_template(template, replacements):\n content = template\n for src, target in replacements.iteritems():\n content = content.replace(src, target)\n return content"
] |
[
"0.6271193",
"0.60885066",
"0.5877966",
"0.58509237",
"0.579428",
"0.57877594",
"0.5783016",
"0.5646136",
"0.5645704",
"0.55866253",
"0.55848837",
"0.55395776",
"0.55178434",
"0.5510485",
"0.5486276",
"0.54843503",
"0.54778534",
"0.5457643",
"0.5452079",
"0.54400253",
"0.54366183",
"0.5430452",
"0.54151547",
"0.5379089",
"0.53774637",
"0.53707415",
"0.53706944",
"0.53598154",
"0.5354781",
"0.5316993"
] |
0.74359393
|
0
|
Normalizes the inputs JSONRPC endpoints that take a 2tuple of `(ContentKey, bytes)`
|
def content_key_and_content_munger(
module: Any, content_key: ContentKey, content: bytes,
) -> Tuple[HexStr, HexStr]:
return (
encode_hex(content_key),
encode_hex(content),
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def normalize_transfer_result(cls, result: JSON) -> JSON:\n ...",
"def decompose_from_json(cls, key, val):\n key = (key or '').strip()\n val = (val or '').strip()\n\n if not key:\n raise EndpointError('The key must be specified')\n\n endpoint = '%s=' % key\n split = [item.strip() for item in val.split('#')]\n\n if len(split) > 1:\n # If # was found, the source was specified\n endpoint += (split[0] or key) + '#' + split[1]\n\n elif cls.RE_SOURCE.findall(val):\n # Check if value looks like a source\n endpoint += val + '#*'\n\n else:\n # Otherwise use the key as the source\n endpoint += key + '#' + split[0]\n\n return cls.decompose(endpoint)",
"def _update_from_rest_data(self) -> None:",
"def test_convert_request_arguments_with_encoded_items_to_dict():\n arguments = {\n \"key1\": [b\"value1\"],\n \"key2\": [b\"value2\"],\n \"key3\": [b\"value3\"],\n }\n expected = {\n \"key1\": \"value1\",\n \"key2\": \"value2\",\n \"key3\": \"value3\",\n }\n result = convert_request_to_dict(arguments)\n\n assert expected == result",
"def remote_pullSerialized(*keys):",
"def test_get_inputs_from_rpc_json_0a6a357e(self):\n inputs = bip69.get_inputs_from_rpc_json(self.tx_json_0a6a357e)\n\n self.assertEqual(len(inputs), 17)\n self.assertEqual(inputs[0], (('643e5f4e66373a57251fb173151e838ccd27d279'\n 'aca882997e005016bb53d5aa'), 0))\n self.assertEqual(inputs[15], (('6c1d56f31b2de4bfc6aaea28396b333102b1f60'\n '0da9c6d6149e96ca43f1102b1'), 1))",
"def _prepare_params(self, params):\n for key, value in params.items():\n if type(value) is list:\n params[key] = [(6, 0, value)]\n\n return params",
"def _decode(self, input_dict):\n pass",
"def transform_response_with_bytearray(response):\n from msrest import Serializer\n for item in response:\n if response[item] and isinstance(response[item], (bytes, bytearray)):\n response[item] = Serializer.serialize_bytearray(response[item])\n return response",
"def prepare_request_params(\n request_params: Dict, model_id: Text, model_data: Dict\n) -> Dict:\n request_params = correct_types(request_params, model_data[\"columns_data\"])\n if model_data[\"hashed_indexes\"]:\n request_params = reverse_hash_names(model_id, request_params)\n return request_params",
"def content_key_munger(module: Any, content_key: ContentKey,) -> Tuple[HexStr]:\n return (encode_hex(content_key),)",
"def _TransformInputs(self, _):\n raise NotImplementedError()",
"def convert_input_to_tuple(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n data = args[0].api.payload\n try:\n kwargs['tupled_output'] = json.loads(data,\n object_hook=_json_object_hook)\n return fn(*args, **kwargs)\n except Exception:\n data = json.dumps(data)\n kwargs['tupled_output'] = json.loads(data,\n object_hook=_json_object_hook)\n return fn(*args, **kwargs)\n\n return wrapper",
"def _transform(self, resource_from_api):\n for (project_id, backend_services) in resource_from_api.iteritems():\n for backend_service in backend_services:\n yield {'project_id': project_id,\n 'id': backend_service.get('id'),\n 'creation_timestamp': parser.format_timestamp(\n backend_service.get('creationTimestamp'),\n self.MYSQL_DATETIME_FORMAT),\n 'name': backend_service.get('name'),\n 'description': backend_service.get('description'),\n 'affinity_cookie_ttl_sec': self._to_int(\n backend_service.get('affinityCookieTtlSec')),\n 'backends': parser.json_stringify(\n backend_service.get('backends', [])),\n 'cdn_policy': parser.json_stringify(\n backend_service.get('cdnPolicy', {})),\n 'connection_draining': parser.json_stringify(\n backend_service.get('connectionDraining', {})),\n 'enable_cdn': self._to_bool(\n backend_service.get('enableCDN')),\n 'health_checks': parser.json_stringify(\n backend_service.get('healthChecks', [])),\n 'iap': parser.json_stringify(\n backend_service.get('iap', {})),\n 'load_balancing_scheme': backend_service.get(\n 'loadBalancingScheme'),\n 'port': self._to_int(backend_service.get('port')),\n 'port_name': backend_service.get('portName'),\n 'protocol': backend_service.get('protocol'),\n 'region': backend_service.get('region'),\n 'session_affinity': backend_service.get(\n 'sessionAffinity'),\n 'timeout_sec': backend_service.get('timeoutSec'),\n 'raw_backend_service':\n parser.json_stringify(backend_service)}",
"def handle_input(data: dict):",
"def seperate_endpoints(endpoints):\n seperated_endpoints = []\n\n # Seperate the list of endpoints to have unique methods and endpoints\n for endpoint in endpoints:\n for ep in endpoint['endpoints']:\n if not endpoint['methods']:\n # If there's no method set it to GET\n endpoint['methods'] = ['GET']\n for method in endpoint['methods']:\n tempDict = {\n 'endpoint': ep,\n 'method': method,\n 'plugin': endpoint['plugin'],\n 'params': endpoint['params'] or [],\n 'templates': list(set(endpoint['templates'])) or [],\n 'headers': endpoint['headers'] if 'headers' in endpoint else [],\n 'filepath': endpoint['filepath'] or None,\n 'line_number': endpoint['line_number'] if 'line_number' in endpoint else None\n }\n seperated_endpoints.append(tempDict)\n \n return seperated_endpoints",
"def parse_request_body(self):\n try:\n request_arguments = self.request.arguments\n if request_arguments:\n new_request_arguments = {\n k: common.my_str(v[0].decode('utf8'))\n for k, v in request_arguments.items()\n }\n return new_request_arguments\n else:\n request_body = self.request.body\n request_data = request_body.decode('utf-8')\n request_data_dict = json.loads(request_data)\n self.request.arguments = {\n k: [str(v)]\n for k, v in request_data_dict.items()\n }\n new_request_arguments = {\n k: common.my_str(v)\n for k, v in request_data_dict.items()\n }\n return new_request_arguments\n except Exception as e:\n raise tornado.web.HTTPError(\n status_code=400, log_message='bad_request: {}'.format(str(e)))",
"def __prepare_args(self, args):\n ret = []\n for a in args:\n if isinstance(a, bytes):\n if self.__size_expr.match(a):\n ret += [a]\n else:\n ret += [b'\"' + a + b'\"']\n continue\n ret += [bytes(str(a).encode(\"utf-8\"))]\n return ret",
"def postprocess(self, inputs: Dict[str, Any], **kwargs) -> Dict[str, Any]:\n return inputs",
"def from_bytes(self, *args, **kwargs): # real signature unknown\n pass",
"def from_bytes(self, *args, **kwargs): # real signature unknown\n pass",
"def from_bytes(self, *args, **kwargs): # real signature unknown\n pass",
"def from_bytes(self, *args, **kwargs): # real signature unknown\n pass",
"def from_bytes(self, *args, **kwargs): # real signature unknown\n pass",
"def from_bytes(self, *args, **kwargs): # real signature unknown\n pass",
"def from_bytes(self, *args, **kwargs): # real signature unknown\n pass",
"def from_bytes(self, *args, **kwargs): # real signature unknown\n pass",
"def from_bytes(self, *args, **kwargs): # real signature unknown\n pass",
"def from_bytes(self, *args, **kwargs): # real signature unknown\n pass",
"def from_bytes(self, *args, **kwargs): # real signature unknown\n pass"
] |
[
"0.5036987",
"0.49513078",
"0.48808864",
"0.48493755",
"0.48394534",
"0.48311353",
"0.48286295",
"0.47968972",
"0.47826153",
"0.47802904",
"0.47756737",
"0.4758542",
"0.4749534",
"0.47471386",
"0.47257885",
"0.47162017",
"0.47141236",
"0.4703763",
"0.47019362",
"0.46930027",
"0.46930027",
"0.46930027",
"0.46930027",
"0.46930027",
"0.46930027",
"0.46930027",
"0.46930027",
"0.46930027",
"0.46930027",
"0.46930027"
] |
0.50820345
|
0
|
Load weights from snapshot file
|
def load_weights(net, optimizer, scheduler, snapshot_file, restore_optimizer_bool=False):
logging.info("Loading weights from model %s", snapshot_file)
net, optimizer, scheduler, epoch, mean_iu = restore_snapshot(net, optimizer, scheduler, snapshot_file,
restore_optimizer_bool)
return epoch, mean_iu
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def load_weights(self, filepath):\n self.model.load_weights(filepath)",
"def _load_weights(self):\n self.npz_weights = np.load(self._weight_file)\n self._load_byte_embedding()\n self._load_cnn_weights()\n self._load_highway()\n self._load_projection()",
"def load_weights(self, weight_file):\r\n self.model.load_weights(weight_file)",
"def load_weights(self, path: str):\n self.load_state_dict(torch.load(path))",
"def load_weights(self, path: str):\n self.load_state_dict(torch.load(path))",
"def load_weights_from_checkpoint(self, path: str, key: str):\n ckpt = torch.load(path, map_location='cpu')\n self.load_state_dict(ckpt[key])\n # self.to(self.device)",
"def load_weights_from_checkpoint(self, path: str, key: str):\n ckpt = torch.load(path, map_location='cpu')\n self.load_state_dict(ckpt[key])\n # self.to(self.device)",
"def load_weights(self, path=None):\n\n if path is None:\n path = self.checkpoints_dir\n\n self.model.load_weights(tf.train.latest_checkpoint(path))\n logging.info(f'\\tWeights loaded from {path}')",
"def load_weights(self, file):\n self.model.load_weights(file)\n return",
"def load_weights(self, the_path):\n self.model.load_state_dict(torch.load(the_path))",
"def load_model_weights(self, filename):\n self.model.load_weights(filename)",
"def load(self, filename):\n self.model.load_weights(filename)",
"def load_weights(self, file_path):\n self.model.load_weights(file_path + '/policy_network.h5')\n print(\"\\nrestored weights of the policy network.\\n\")",
"def _load_local_weights(self, h5file):\n for name, layer in self._layers_to_save.items():\n self._load_layer_weights(layer, name, h5file)",
"def load_weights(model, fpath):\n state = torch.load(fpath)\n model.load_state_dict(state['state_dict'])",
"def load_weights(self, weights):\n weight = np.load(weights)\n return weight",
"def restore(self, weights_file):\r\n\r\n self.model.load_weights(weights_file, by_name=True)",
"def load_weights(self):\n try:\n print('loading weights from {}'.format(self.cfg.class_model_dir))\n self.load_state_dict(torch.load(self.cfg.class_model_dir + self.class_model_name + '.pth'))\n except Exception as e:\n print(\"load weights exception: {}\".format(e))",
"def load_weight(model):\n file = h5py.File(WEIGHT_SAVE, 'r')\n weight = []\n for i in range(len(file.keys())):\n weight.append(file['weight' + str(i)][:])\n model.set_weights(weight)",
"def load_model(self, file_name):\n\t\tself.model.load_weights(file_name)",
"def load_weights(self, model_name: str, checkpoint: int, path: str = './models/'):\n path_to_model = path + model_name + '/checkpoint_' + str(checkpoint) + '/model_weights'\n self.model.load_weights(path_to_model)",
"def load_weights_file(self, file_path):\n\n # Load the weights\n self._cnn_model.load_weights(file_path)",
"def load_weights(self, filename):\n checkpoint = torch.load(filename)\n if not checkpoint['input_size'] == self.state_size:\n print(f\"Error when loading weights from checkpoint {filename}: input size {checkpoint['input_size']} doesn't match state size of agent {self.state_size}\")\n return None\n if not checkpoint['output_size'] == self.action_size:\n print(f\"Error when loading weights from checkpoint {filename}: output size {checkpoint['output_size']} doesn't match action space size of agent {self.action_size}\")\n return None\n my_actor_hidden_layers = [each.out_features for each in self.actor_local.hidden_layers if each._get_name()!='BatchNorm1d']\n if not checkpoint['actor_hidden_layers'] == my_actor_hidden_layers:\n print(f\"Error when loading weights from checkpoint {filename}: actor hidden layers {checkpoint['actor_hidden_layers']} don't match agent's actor hidden layers {my_actor_hidden_layers}\")\n return None\n my_critic_hidden_layers = [each.out_features for each in self.critic_local.hidden_layers if each._get_name()!='BatchNorm1d']\n if not checkpoint['critic_hidden_layers'] == my_critic_hidden_layers:\n print(f\"Error when loading weights from checkpoint {filename}: critic hidden layers {checkpoint['critic_hidden_layers']} don't match agent's critic hidden layers {my_critic_hidden_layers}\")\n return None\n self.actor_local.load_state_dict(checkpoint['actor_state_dict'])\n self.critic_local.load_state_dict(checkpoint['critic_state_dict'])",
"def _load_weight_if_possible(self):\n try:\n self.keras_model.load_weights(self.model.WEIGHT_PATH)\n print('Weights loaded!')\n except OSError:\n print('No file with weights available! Starting from scratch...')",
"def load_model_weights(self):\n raise NotImplementedError",
"def load_weights(cls, model, path_to_weights_file, load_parts=True, verbose=False):\n path_to_weights_file = Path(path_to_weights_file)\n\n if not path_to_weights_file.is_absolute():\n path_to_weights_file = MODELS_DIR/path_to_weights_file\n\n if verbose:\n print('Load weights from {}.'.format(path_to_weights_file))\n\n device = torch.device(CUDA_DEVICE_NAME if torch.cuda.is_available() else 'cpu')\n model.to(device)\n\n if load_parts:\n model_dict = model.state_dict()\n\n # try to load those part of an existing model that match the architecture\n # Reference: https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/2\n pretrained_dict = torch.load(path_to_weights_file, map_location=device)\n\n no_correspondence = [key for key, value in pretrained_dict.items()\n if key not in model_dict or model_dict[key].shape!=value.shape]\n\n if len(no_correspondence)>0:\n print('Cannot load layers:')\n for key in no_correspondence:\n print(' * '+key)\n\n pretrained_dict = {key: value for key, value in pretrained_dict.items()\n if key in model_dict and model_dict[key].shape==value.shape}\n model_dict.update(pretrained_dict)\n model.load_state_dict(model_dict)\n else:\n model_dict = torch.load(path_to_weights_file, map_location=device)\n model.load_state_dict(model_dict)\n\n return model",
"def load_weights(self, model_file):\n if not self.encoder_decoder:\n raise TypeError('You need to build a model using the method '\n '`build_model` before trying to load an already '\n 'trained one.')\n self.encoder_decoder.load_weights(_join_model_path(model_file))",
"def load(self, folder):\n # load the weights from input folder\n self.generator.load_weights('%s/generator.h5'%folder)\n self.critic.load_weights('%s/critic.h5'%folder)",
"def init_weights(self):\n if isinstance(self.pretrained, str):\n logger = get_root_logger()\n logger.info(f'load model from: {self.pretrained}')\n load_checkpoint(self, self.pretrained, strict=False, logger=logger)\n elif self.pretrained is None:\n pass\n else:\n raise TypeError('pretrained must be a str or None')",
"def load_snapshot(device, net, snapshot_name, optimizer=None):\n\ttry:\n\t\tcheckpoint = torch.load(snapshot_name+'.pth', map_location=device)\n\t\tnet.load_state_dict(checkpoint['model_state_dict'])\n\t\tif optimizer:\n\t\t\trestore_optimizer(optimizer, checkpoint)\n\texcept:\n\t\tcheckpoint = None\t\n\treturn checkpoint"
] |
[
"0.7100965",
"0.7099289",
"0.70770496",
"0.69538385",
"0.69538385",
"0.6940326",
"0.6940326",
"0.6904215",
"0.686968",
"0.68578506",
"0.68147826",
"0.6770877",
"0.6694163",
"0.6670967",
"0.66293234",
"0.6612041",
"0.6583617",
"0.6533964",
"0.65129817",
"0.6429854",
"0.64259714",
"0.6375651",
"0.633951",
"0.63163126",
"0.6274082",
"0.62365687",
"0.62273633",
"0.6213798",
"0.62037456",
"0.6201402"
] |
0.7182239
|
0
|
Checks if claims user belongs to Tenant or has override permissions to edit other Tenants
|
def check_tenant_authorization(tenant_id, override_permission=None):
claims = get_jwt_claims()
if "id" in list(claims.keys()):
tenant_user = identity.TenantUser.query.filter_by(id=claims["id"]).first()
if (
tenant_user.tenant_id == tenant_id
or override_permission in tenant_user.permissions
):
return
abort(403, "Unauthorized Tenant")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def has_object_permission(self, request, view, user):\n return user == request.user or request.user.is_superuser",
"def has_object_permission(self, request, view, obj):\n\n #check if method is get i.e user only want to view\n if request.method in permissions.SAFE_METHODS:\n return True\n\n #if method is not get then will check if user wants to edit own profile\n return obj.id == request.user.ids",
"def can_be_edited(self, user):\n return (self.is_public or user == self.owner or\n user in list(self.auth_users.all()))",
"def has_object_permission(self, request, view, obj):\n if request.user.is_superuser:\n return True\n if request.user.profile.role == UserRole.CLIENT and obj.owner != request.user:\n return False\n if request.user.profile.role == UserRole.EXECUTOR and obj.executor != request.user:\n return False\n return True",
"def has_object_read_permission(self, request):\n user = request.user\n if user.is_superuser:\n return user.is_superuser\n\n return self.user == user",
"def user_can_edit(self, user):\n return user == self.owner",
"def test_permissions(self):\n taxonomy = self.get_taxonomy()\n return True if self.request.user == taxonomy.author else taxonomy.public",
"def has_object_permission(self, request, view, obj):\n if request.user and (request.user.is_staff or request.user.is_superuser):\n return True\n return super().has_object_permission(request, view, obj)",
"def has_object_permission(self, request, view, obj):\n if request.method in permissions.SAFE_METHODS:\n return True\n \n \"\"\"Check if the user has the permission to edit their profile. If True it will allow PUT, PATCH & DELETE operations\"\"\"\n return obj.id == request.user.id # returns True or False",
"def has_permission(self, request, view):\n if isinstance(request.user, TokenUser):\n return True\n\n return super().has_permission(request, view)",
"def has_change_permission(self, request, obj=None) -> bool:\n permission = super().has_change_permission(request, obj)\n\n if obj is not None:\n permission &= (obj.owner == request.user) or request.user.is_superuser\n\n return permission",
"def has_object_permission(self, request, view, obj):\n if request.method in permissions.SAFE_METHODS:\n return True\n \n \"\"\"Check if the user has the permission to edit their profile. If True it will allow PUT, PATCH & DELETE operations\"\"\"\n return obj.user_profile.id == request.user.id # returns True or False",
"def has_write_permission(request):\n user = request.user\n return user.is_superuser",
"def has_permission(self, request, view):\n user = request.user\n if (\n isinstance(user, TokenUser)\n and LTI_ROLES[self.__class__.role]\n & set(user.token.payload.get(\"roles\", []))\n and user.token.payload.get(\"permissions\", {}).get(\"can_update\", False)\n is True\n ):\n return True\n\n return False",
"def can_view(self, user):\n if self.applicant == user:\n return True\n elif user.has_perm('funding.view_all_applications'):\n # Fundihg commitee\n return True\n elif user.has_perm('funding.make_application_decisions'):\n # Fundihg manager - should have the view permissions, but just in case\n return True\n return False",
"def can_edit(self, user):\n if user.has_perm('funding.make_application_decisions'):\n # Funding manager can update things later, if required\n return True\n # Applicants can only edit the application before the final review step\n if self.status in ('S', 'U'):\n if self.applicant == user:\n return True\n return False",
"def can_edit(self, user):\n return self.author_id == user.id or user.is_staff",
"def has_super_access():\n current_user = frappe.get_doc('User', frappe.session.user)\n roles = set([role.role for role in current_user.roles])\n return bool(roles & {'Administrator', 'Instructor', 'Education Manager', 'System Manager', 'Academic User'})",
"def has_object_permission(self, request, view, obj):\n if request.method in permissions.SAFE_METHODS:\n return True\n if all([request.user, request.user.is_staff]):\n return True\n elif all([request.user, type(obj) == type(request.user), obj == request.user]):\n return True\n\n return True",
"def has_object_permission(self, request, view, obj):\n return request.user.is_manager or request.user.is_staff",
"def has_object_permission(self, request, view, obj):\n\n # Users can always see and edit their own comments\n if obj.create_user == request.user:\n return True\n\n # And see but not edit those from their others in their own\n # organization\n if obj.create_user.organization == request.user.organization and \\\n request.method in permissions.SAFE_METHODS:\n return True\n\n # Government roles can always view comments\n # and can view or edit privileged comments with correct permission\n if request.user.is_government_user:\n # read\n if request.method in permissions.SAFE_METHODS:\n if obj.privileged_access:\n return request.user.has_perm('DOCUMENTS_VIEW')\n return True\n\n # write\n if request.method not in permissions.SAFE_METHODS:\n if obj.privileged_access:\n return request.user.has_perm('DOCUMENTS_GOVERNMENT_REVIEW')\n return True\n\n # not authorized\n return False",
"def has_permission(self, request, view):\n return request.user.group == 'admin'",
"def has_object_update_permission(self, request):\n user = request.user\n if self == user:\n return True\n return user.is_superuser",
"def get_is_admin(self, obj):\n try:\n user = self.context.get('request').user\n except Exception:\n # raise serializers.ValidationError('Could not access request.user')\n return False\n if user == obj.admin:\n return True\n else:\n return False",
"def edit_allowed(self):\n account = Account.current_user_account\n if account is None:\n return False\n return self.user_can_edit(account.user)",
"def _check_admin_only(self, request):\r\n api_key = request.params.get(self.api_field, None)\r\n\r\n if request.user is None:\r\n user = self.user_fetcher(api_key=api_key)\r\n else:\r\n user = request.user\r\n\r\n if user is not None and user.is_admin:\r\n request.user = user\r\n return True",
"def has_permission(self, request, view):\n user = request.user\n try:\n user.user_client\n return True\n except Exception:\n return False",
"def has_view_permission(self, request, obj=None):\n user = request.user\n if obj and type(obj) is Client:\n return obj.is_user_in_sales_contacts_of_client(user) or obj.is_user_in_support_contacts_of_client(user)\n return True",
"def is_permitted(self):\n\t\tfrom frappe.utils import has_common\n\n\t\tallowed = [\n\t\t\td.role for d in frappe.get_all(\"Has Role\", fields=[\"role\"], filters={\"parent\": self.name})\n\t\t]\n\n\t\tcustom_roles = get_custom_allowed_roles(\"page\", self.name)\n\t\tallowed.extend(custom_roles)\n\n\t\tif not allowed:\n\t\t\treturn True\n\n\t\troles = frappe.get_roles()\n\n\t\tif has_common(roles, allowed):\n\t\t\treturn True",
"def test_func(self):\n return self.request.user.is_superuser"
] |
[
"0.6689976",
"0.66579235",
"0.6575025",
"0.6559304",
"0.65221727",
"0.64526767",
"0.64456385",
"0.64339375",
"0.64206105",
"0.64158726",
"0.63980037",
"0.6397329",
"0.63969445",
"0.6390685",
"0.6379953",
"0.6357644",
"0.6335498",
"0.63311714",
"0.6319886",
"0.6311173",
"0.6305596",
"0.62917256",
"0.6289603",
"0.62669075",
"0.623378",
"0.62189376",
"0.6210446",
"0.62070215",
"0.6204229",
"0.6199646"
] |
0.7168181
|
0
|
Given a tenant schema, create a tenant
|
def create_tenant(tenant):
exists = identity.Tenant.query.filter_by(name=tenant.name).first()
if exists:
abort(409, "Tenant Already Exists")
db.session.add(tenant)
db.session.commit()
return tenant.id
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create_tenant(tenant_name, description, enabled, auth_admin_url, admin_token):\n keystone = get_client(auth_admin_url, admin_token)\n tenant = keystone.tenants.create(tenant_name=tenant_name, description=description, enabled=enabled)\n print tenant\n return tenant.to_dict()",
"def createOrcaTenant(self,payload):\n response = None\n # Check if tenant with that name already exists\n systemObj = self.getSystemByUid(payload[\"system\"])\n try:\n # Systemname and tenant description always determine a specific tenant\n response = self.getTenantByName(systemObj[\"name\"],payload[\"description\"].upper())\n except KeyError as e:\n if e.args[1] == \"CIC_TENANT_LOOKUP_ERROR\":\n response = None\n pass\n else:\n raise\n try:\n # TMS delivers always a non-empty body if something was found\n if response:\n if response[\"description\"] == payload[\"description\"].upper():\n raise RuntimeError(\"*** INFO *** Tenant already exists\",\"CIC_CREATE_TENANT_ERROR\")\n # TMS delivers an empty body if nothing is found\n elif response is None:\n print \"*** INFO *** Starting tenant creation\"\n response = self.httpHandler.sendHttpRequest(CIC_TENANT_ENDPOINT,payload,\"POST\")\n status = response.getcode()\n if status == 202:\n print \"*** INFO *** Tenant creation successfully triggered\"\n\n except RuntimeError as e:\n print e.args[0]\n except AttributeError as e:\n print \"*** INFO *** Discarding request.Please wait until tenant creation finishes before sending another request\"",
"def setup_test_tenant(self):\n self.test_tenant = rand_name('test_tenant_')\n self.test_description = rand_name('desc_')\n resp, self.tenant = self.client.create_tenant(\n name=self.test_tenant,\n description=self.test_description)\n self.tenants.append(self.tenant)",
"def create_tenant(tenant_name, password, environment):\n environment.add_cleanup(\n environment.cfy.tenants.delete,\n kwargs={\n 'tenant_name': tenant_name,\n },\n )\n environment.cfy.tenants.create(tenant_name=tenant_name)\n\n environment.add_cleanup(\n environment.cfy.users.delete,\n kwargs={\n 'username': tenant_name,\n },\n )\n environment.cfy.users.create(\n role='user',\n password=password,\n username=tenant_name,\n )\n\n environment.add_cleanup(\n environment.cfy.tenants.remove_user,\n kwargs={\n 'username': tenant_name,\n 'tenant_name': tenant_name,\n },\n )\n environment.cfy.tenants.add_user(tenant_name=tenant_name,\n username=tenant_name)",
"def setUp(self):\n super().setUp()\n Tenant.objects.get_or_create(schema_name=\"public\")",
"def create_tenant(name, domain):\n manager = get_manager()\n tenant = manager.resolve_tenant_id(name, domain=domain)\n if not tenant:\n manager.create_tenant(tenant_name=name,\n domain=domain,\n description='Created by Juju')\n log(\"Created new tenant '%s' in domain '%s'\" % (name, domain),\n level=DEBUG)\n return\n\n log(\"Tenant '%s' already exists.\" % name, level=DEBUG)",
"def create_tenant(self, tenant_info):\n LOG_OBJ.debug(\"Creating Tenant:%s\" % tenant_info['project_name'])\n _tenant_name = tenant_info['project_name']\n _user_name = tenant_info.get('user_name', _tenant_name + \"_user\")\n _password = tenant_info.get('password', _tenant_name + \"_pass\")\n\n _url = \"http://\" + self.host_ip + \":35357/v2.0/tenants\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.cloud_admin_info['token_project']}\n _tenant_data = {\"tenant\": {\"enabled\": True, \"name\": _tenant_name,\n \"description\": \"Testing API 3\"}}\n\n _body = json.dumps(_tenant_data)\n\n response = self.request(\"POST\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating tenant: %s\"\n % _tenant_name)\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Create tenant Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Created tenant: %s successfully.\" % _tenant_name)\n\n _tenant_id = output['tenant']['id']\n # If user id is passed then, directly add that user to the tenant.\n # otherwise Create a new user.\n _user_id = tenant_info.get('user_id', None)\n if not _user_id:\n _user_data = {\"user\": {\"email\": None,\n \"password\": _password,\n \"enabled\": True,\n \"name\": _user_name,\n \"tenantId\": _tenant_id}}\n _user_id = self.create_user(_user_data)\n if not isinstance(_user_id, unicode):\n return\n tenant_info['userID'] = _user_id\n\n # Add the user roles.\n for role_name in tenant_info['roles']:\n role_id = self.get_role_id(role_name)\n if not isinstance(role_id, unicode):\n return\n # Add user role.\n if not self.add_user_role(_tenant_id, _user_id, role_id):\n return\n # Get the token.\n token_id = self.get_token(_tenant_name, _user_name, _password)\n if not isinstance(token_id, unicode):\n return\n # Set the new context. note: This is v2 token, so only project scope.\n self.set_tenant_info(_tenant_name, token_id, token_id, _tenant_id)\n\n # Adding Security Group Rules\n # Add the ICMP rule.\n # if not isinstance(self.add_security_group_rules(\"icmp\"), bool):\n # return\n # Add the rule for ssh\n # if not isinstance(self.add_security_group_rules(\n # \"tcp\", from_port='22', to_port='22'), bool):\n # return\n # Add the rule for all udp\n # if not isinstance(self.add_security_group_rules(\n # \"udp\", from_port='1', to_port='65535'), bool):\n # return\n\n # Modify the tenant quota.\n # if not isinstance(self.set_quota(_tenant_id), bool):\n # return\n # Update the quota\n # fields = {\"network\": 50, \"subnet\": 50, \"port\": 100, \"floatingip\": 50}\n # quotas = self.quota_update(_tenant_id, fields)\n # if not isinstance(quotas, dict):\n # return\n # LOG_OBJ.info(\"Quota for tenant[%s] is:%s\" % (_tenant_id,\n # str(quotas)))\n return _tenant_id",
"def createTenant(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def create_tenant_safe(self, tenant):\n new_name = self.get_available_tenant_name(tenant.name)\n if new_name != tenant.name:\n tenant.name = new_name\n tenant.save(update_fields=['name'])\n self.create_tenant(tenant)",
"def tenant_user(db) -> TenantUser:\n with schema_context('public'):\n return TenantUser.objects.create_user(email='[email protected]')",
"def create_admin_tenant(tenant, user_id, password, url):\n user = get_user_model().objects.get(pk=user_id)\n tenant = Tenant(schema_name=tenant)\n\n # Send email of welcome\n send_mailgun(\"Bienvenido a SCR\", user.email, url)\n\n with tenant_context(tenant):\n get_user_model().objects.create_superuser(email=user.email, password=password, first_name=user.first_name, last_name=user.last_name)",
"def createobj(self, tenantid='', tenantname='', notes='', tenantjson={}):\n tenantobj = {'tenantid': tenantid, 'tenantname': tenantname, 'notes': notes, 'tenantjson': tenantjson\n }\n return tenantobj",
"def add_tenants(key, tenant_names):\n for tenant_name in tenant_names:\n if not get_tenant(key, tenant_name):\n key.tenants.create(tenant_name=tenant_name, enabled=True)\n print(\"Created tenant/project '{}'\".format(tenant_name))\n\n return True",
"def set_tenant_in_app(tenant):\n utils.set_current_tenant(tenant)",
"def test_specify_non_default_tenant():\n pass",
"def tenant(self, tenant: \"str\"):\n self._attrs[\"tenant\"] = tenant",
"def test_tenant_user_change_tenant(sample_identity):\n access_token, tenant, tenant_user, tc = sample_identity\n # Create a new Tenant\n new_tenant = identity.Tenant()\n new_tenant.name = \"Aperture Science\"\n db.session.add(new_tenant)\n db.session.commit()\n # Create a Tenant Specific admin role\n new_special_role = authorization.Role()\n # Assign ability to create a user on specific tenant to the new role but\n # not the ability to create user on ANY tenant\n can_create_tenant_user = authorization.Permission.query.filter_by(\n name=authorization.PermissionType.CAN_CREATE_TENANT_USER.value\n ).first()\n new_special_role.permissions.append(can_create_tenant_user)\n db.session.add(new_special_role)\n db.session.commit()\n # Create a new TenantUser assigned to new Tenant\n new_tenant_user = identity.TenantUser()\n new_tenant_user.username = \"gordonfreeman\"\n new_tenant_user.tenant_id = new_tenant.id\n new_tenant_user.password = \"1234\"\n new_tenant_user.roles.append(new_special_role)\n db.session.add(new_tenant_user)\n db.session.commit()\n # Login new user\n new_access_token = tc.post(\n \"api/v1/authentication/login\",\n json={\"username\": new_tenant_user.username, \"password\": \"1234\"},\n ).json[\"data\"][\"access_token\"]\n\n # Try to re-assign original tenant_user to new tenant\n headers = {\"Authorization\": \"Bearer \" + new_access_token}\n tenant_user_json = id_schemas.TenantUserSchema().dump(tenant_user)\n tenant_user_json[\"tenant_id\"] = new_tenant_user.tenant_id\n response = tc.put(\n f\"api/v1/identity/tenant-user/{tenant_user.id}\",\n json=tenant_user_json,\n headers=headers,\n )\n # Assert that permission is blocked\n assert response.status_code == 403, \"Tenant Permission assignment not blocking\"\n\n # Login with admin user\n new_access_token = tc.post(\n \"api/v1/authentication/login\",\n json={\"username\": tenant_user.username, \"password\": \"1234\"},\n ).json[\"data\"][\"access_token\"]\n # Attempt to Change tenant of new_tenant_user\n headers = {\"Authorization\": \"Bearer \" + new_access_token}\n tenant_user_json = id_schemas.TenantUserSchema().dump(new_tenant_user)\n tenant_user_json[\"tenant_id\"] = tenant.id\n response = tc.put(\n f\"api/v1/identity/tenant-user/{new_tenant_user.id}\",\n json=tenant_user_json,\n headers=headers,\n )\n assert response.status_code == 200, \"Tenant change permission blocking\"",
"def public_tenant(db) -> TenantModel:\n return TenantModel.objects.get(schema_name='public')",
"def create_user(user_name, password, tenant_name, auth_admin_url, admin_token):\n keystone = get_client(auth_admin_url, admin_token)\n tenants = keystone.tenants.list()\n my_tenant = [x for x in tenants if x.name==tenant_name][0]\n my_user = keystone.users.create(name=user_name, password=password, tenant_id=my_tenant.id)\n print my_user\n return my_user.to_dict()",
"def tenant(self, name):\n # Returns a Tenant object for the given name.\n # Uses Keystone API to perform a direct name lookup,\n # as this is expected to work via name.\n\n data = self.auth.tenant_by_name(name)\n t = Tenant(data[\"tenant\"], self)\n return t",
"def create_schema(self, schema):\n sql = f'set role {self.write_role}; ' \\\n + f'CREATE SCHEMA IF NOT EXISTS {schema};'\n return sql",
"def tenant_present(\n name, description=None, enabled=True, profile=None, **connection_args\n):\n ret = {\n \"name\": name,\n \"changes\": {},\n \"result\": True,\n \"comment\": 'Tenant / project \"{}\" already exists'.format(name),\n }\n\n _api_version(profile=profile, **connection_args)\n\n # Check if tenant is already present\n tenant = __salt__[\"keystone.tenant_get\"](\n name=name, profile=profile, **connection_args\n )\n\n if \"Error\" not in tenant:\n if tenant[name].get(\"description\", None) != description:\n if __opts__.get(\"test\"):\n ret[\"result\"] = None\n ret[\"comment\"] = 'Tenant / project \"{}\" will be updated'.format(name)\n ret[\"changes\"][\"Description\"] = \"Will be updated\"\n return ret\n __salt__[\"keystone.tenant_update\"](\n name=name,\n description=description,\n enabled=enabled,\n profile=profile,\n **connection_args\n )\n ret[\"comment\"] = 'Tenant / project \"{}\" has been updated'.format(name)\n ret[\"changes\"][\"Description\"] = \"Updated\"\n if tenant[name].get(\"enabled\", None) != enabled:\n if __opts__.get(\"test\"):\n ret[\"result\"] = None\n ret[\"comment\"] = 'Tenant / project \"{}\" will be updated'.format(name)\n ret[\"changes\"][\"Enabled\"] = \"Will be {}\".format(enabled)\n return ret\n __salt__[\"keystone.tenant_update\"](\n name=name,\n description=description,\n enabled=enabled,\n profile=profile,\n **connection_args\n )\n ret[\"comment\"] = 'Tenant / project \"{}\" has been updated'.format(name)\n ret[\"changes\"][\"Enabled\"] = \"Now {}\".format(enabled)\n else:\n if __opts__.get(\"test\"):\n ret[\"result\"] = None\n ret[\"comment\"] = 'Tenant / project \"{}\" will be added'.format(name)\n ret[\"changes\"][\"Tenant\"] = \"Will be created\"\n return ret\n # Create tenant\n if _OS_IDENTITY_API_VERSION > 2:\n created = __salt__[\"keystone.project_create\"](\n name=name,\n domain=\"default\",\n description=description,\n enabled=enabled,\n profile=profile,\n **connection_args\n )\n else:\n created = __salt__[\"keystone.tenant_create\"](\n name=name,\n description=description,\n enabled=enabled,\n profile=profile,\n **connection_args\n )\n ret[\"changes\"][\"Tenant\"] = \"Created\" if created is True else \"Failed\"\n ret[\"result\"] = created\n ret[\"comment\"] = 'Tenant / project \"{}\" has been added'.format(name)\n return ret",
"def create_schema(self, schema: str):\n return",
"def tenant_user_admin(db) -> TenantUser:\n with schema_context('public'):\n return TenantUser.objects.create_superuser(\n _USER_PASS,\n email='[email protected]',\n )",
"def genUserTenant():\n global app_tenant\n user_tenant = {}\n\n with open(abs_path + '/../../data/scenario/user_details.yaml', 'r') as user_file:\n user_data = yaml.load(user_file, Loader=yaml.FullLoader)\n\n for user_record in user_data['users']:\n username = user_record['username']\n apps = user_record['applications'].split(',')\n tenant_list = set()\n\n for app in apps:\n tenant_list.add(app_tenant.get(app.strip()))\n \n user_tenant[username] = list(tenant_list)\n\n # write to tenant_details.yaml file\n with open(abs_path + '/../../data/scenario/tenant_details.yaml', 'a+') as f:\n yaml.dump({'user_tenants': user_tenant}, f, sort_keys=False)",
"def create_schema(schema): \n\n query = \"CREATE SCHEMA IF NOT EXISTS {}\".format(schema)\n qdb.execute(query)",
"def test_tenant_update(sample_identity):\n access_token, tenant, tenant_user, tc = sample_identity\n tenant.name = \"ilovebeansllc\"\n headers = {\"Authorization\": \"Bearer \" + access_token}\n updated_tenant_request = id_schemas.TenantSchema().dump(tenant)\n updated_tenant = tc.put(\n f\"api/v1/identity/tenant/{tenant.id}\",\n json=updated_tenant_request,\n headers=headers,\n )\n assert updated_tenant.status_code == 200, \"Tenant could not be updated\"",
"def _ensure_tenant_and_validate(tenant_, access_key):\n tenant_data = registry.TENANT_DATA_GATEWAY\n tenant = tenant_data.tenant_by_name(tenant_)\n if tenant is None:\n raise TenantNotFoundError(\n \"Tenant not found error. tenant='{}', access_key='{}'\".format(\n tenant_, access_key))\n\n if not tenant.has_access_key(access_key):\n raise AccessKeyNotValidError(\n \"The access key is not valid. tenant='{}', access_key='{}'\".format(\n tenant_, access_key))\n\n return tenant",
"def event_create(tenant_id, user_id=None):",
"async def _create_deployment_from_schema(self, schema: DeploymentCreate) -> UUID:\n # TODO: We are likely to remove this method once we have considered the\n # packaging interface for deployments further.\n response = await self._client.post(\n \"/deployments/\", json=schema.dict(json_compatible=True)\n )\n deployment_id = response.json().get(\"id\")\n if not deployment_id:\n raise httpx.RequestError(f\"Malformed response: {response}\")\n\n return UUID(deployment_id)"
] |
[
"0.7171252",
"0.7079633",
"0.6954982",
"0.69242764",
"0.6792502",
"0.679099",
"0.6785639",
"0.6658359",
"0.6618145",
"0.65157557",
"0.6450535",
"0.61557657",
"0.6074168",
"0.5968671",
"0.5956721",
"0.5930699",
"0.58401793",
"0.5772303",
"0.5763112",
"0.5693228",
"0.56806386",
"0.5665258",
"0.56210047",
"0.56083417",
"0.5556906",
"0.5468082",
"0.54497117",
"0.543727",
"0.5436106",
"0.5397495"
] |
0.77065414
|
0
|
Given tenant_id and tenant object, update a Tenant
|
def update_tenant(tenant_id, new_tenant):
check_tenant_authorization(tenant_id)
new_tenant.id = tenant_id
updated_tenant = db.session.merge(new_tenant)
db.session.commit()
return updated_tenant
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_tenant_update(sample_identity):\n access_token, tenant, tenant_user, tc = sample_identity\n tenant.name = \"ilovebeansllc\"\n headers = {\"Authorization\": \"Bearer \" + access_token}\n updated_tenant_request = id_schemas.TenantSchema().dump(tenant)\n updated_tenant = tc.put(\n f\"api/v1/identity/tenant/{tenant.id}\",\n json=updated_tenant_request,\n headers=headers,\n )\n assert updated_tenant.status_code == 200, \"Tenant could not be updated\"",
"def updateTenant(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def test_tenant_user_change_tenant(sample_identity):\n access_token, tenant, tenant_user, tc = sample_identity\n # Create a new Tenant\n new_tenant = identity.Tenant()\n new_tenant.name = \"Aperture Science\"\n db.session.add(new_tenant)\n db.session.commit()\n # Create a Tenant Specific admin role\n new_special_role = authorization.Role()\n # Assign ability to create a user on specific tenant to the new role but\n # not the ability to create user on ANY tenant\n can_create_tenant_user = authorization.Permission.query.filter_by(\n name=authorization.PermissionType.CAN_CREATE_TENANT_USER.value\n ).first()\n new_special_role.permissions.append(can_create_tenant_user)\n db.session.add(new_special_role)\n db.session.commit()\n # Create a new TenantUser assigned to new Tenant\n new_tenant_user = identity.TenantUser()\n new_tenant_user.username = \"gordonfreeman\"\n new_tenant_user.tenant_id = new_tenant.id\n new_tenant_user.password = \"1234\"\n new_tenant_user.roles.append(new_special_role)\n db.session.add(new_tenant_user)\n db.session.commit()\n # Login new user\n new_access_token = tc.post(\n \"api/v1/authentication/login\",\n json={\"username\": new_tenant_user.username, \"password\": \"1234\"},\n ).json[\"data\"][\"access_token\"]\n\n # Try to re-assign original tenant_user to new tenant\n headers = {\"Authorization\": \"Bearer \" + new_access_token}\n tenant_user_json = id_schemas.TenantUserSchema().dump(tenant_user)\n tenant_user_json[\"tenant_id\"] = new_tenant_user.tenant_id\n response = tc.put(\n f\"api/v1/identity/tenant-user/{tenant_user.id}\",\n json=tenant_user_json,\n headers=headers,\n )\n # Assert that permission is blocked\n assert response.status_code == 403, \"Tenant Permission assignment not blocking\"\n\n # Login with admin user\n new_access_token = tc.post(\n \"api/v1/authentication/login\",\n json={\"username\": tenant_user.username, \"password\": \"1234\"},\n ).json[\"data\"][\"access_token\"]\n # Attempt to Change tenant of new_tenant_user\n headers = {\"Authorization\": \"Bearer \" + new_access_token}\n tenant_user_json = id_schemas.TenantUserSchema().dump(new_tenant_user)\n tenant_user_json[\"tenant_id\"] = tenant.id\n response = tc.put(\n f\"api/v1/identity/tenant-user/{new_tenant_user.id}\",\n json=tenant_user_json,\n headers=headers,\n )\n assert response.status_code == 200, \"Tenant change permission blocking\"",
"def tenant_id(self, tenant_id):\n\n self._tenant_id = tenant_id",
"def tenant_id(self, tenant_id):\n\n self._tenant_id = tenant_id",
"def update_quota(self, tenant_id, body=None):\r\n return self.put(self.quota_path % (tenant_id), body=body)",
"def set_tenant_in_app(tenant):\n utils.set_current_tenant(tenant)",
"def test_tenant_user_aesthetic_update(sample_identity):\n access_token, tenant, tenant_user, tc = sample_identity\n headers = {\"Authorization\": \"Bearer \" + access_token}\n new_email = f\"{uuid.uuid4()}@c1.com\"\n new_first_name = str(uuid.uuid4())\n new_last_name = str(uuid.uuid4())\n updated_tenant_user = {\"first_name\": new_first_name, \"last_name\": new_last_name}\n update_request = tc.put(\n f\"api/v1/identity/tenant-user/{tenant_user.id}\",\n json=updated_tenant_user,\n headers=headers,\n )\n assert update_request.status_code == 200, \"Update Failed with non 200 error code\"\n assert update_request.json[\"data\"][\"first_name\"] == new_first_name\n assert update_request.json[\"data\"][\"last_name\"] == new_last_name",
"def tenant(self, tenant: \"str\"):\n self._attrs[\"tenant\"] = tenant",
"def create_tenant_safe(self, tenant):\n new_name = self.get_available_tenant_name(tenant.name)\n if new_name != tenant.name:\n tenant.name = new_name\n tenant.save(update_fields=['name'])\n self.create_tenant(tenant)",
"def updateTenantStatus(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def quota_update(self, tenant_id, fields):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/quotas/\" + \\\n tenant_id + \".json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.cloud_admin_info['token_project']}\n _body = {\"quota\": fields}\n\n response = self.request(\"PUT\", _url, _headers, json.dumps(_body))\n if response is None:\n LOG_OBJ.error(\"No response from server while updating the quota\"\n \" for tenant: %s\" % tenant_id)\n return\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Updating quota Failed with status %s \"\n % response.status)\n return response.status\n output = json.loads(response.data)\n\n LOG_OBJ.info(\"Tenant Quota Details : %s \" % output)\n return output",
"def _save_tenant_to_cache(tenant_id, tenant):\n tenant_cache = cache_handler.TenantCache()\n token_cache = cache_handler.TokenCache()\n\n #save token and tenant information to cache\n token_cache.set_token(tenant_id, tenant.token)\n tenant_cache.set_tenant(tenant)",
"def set_quota(self, tenant_id):\n # Get the admin tenant's id.\n\n _url = \"http://\" + self.host_ip + \":8774/v2/\" + \\\n self.cloud_admin_info['project_id'] + \"/os-quota-sets/\" + tenant_id\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.cloud_admin_info['token_project']}\n _body = {\"quota_set\": {\n \"cores\": 80,\n \"floating_ips\": 40,\n \"instances\": 100,\n \"ram\": 512000}}\n response = self.request(\"PUT\", _url, _headers, json.dumps(_body))\n if response is None:\n LOG_OBJ.error(\"No response from server while setting the quota\"\n \" for tenant: %s\" % tenant_id)\n return\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Modifying quota Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n LOG_OBJ.info(\"Tenant Quota Modified. Details : %s \" % output)\n\n return True",
"def create_tenant(self, tenant_info):\n LOG_OBJ.debug(\"Creating Tenant:%s\" % tenant_info['project_name'])\n _tenant_name = tenant_info['project_name']\n _user_name = tenant_info.get('user_name', _tenant_name + \"_user\")\n _password = tenant_info.get('password', _tenant_name + \"_pass\")\n\n _url = \"http://\" + self.host_ip + \":35357/v2.0/tenants\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.cloud_admin_info['token_project']}\n _tenant_data = {\"tenant\": {\"enabled\": True, \"name\": _tenant_name,\n \"description\": \"Testing API 3\"}}\n\n _body = json.dumps(_tenant_data)\n\n response = self.request(\"POST\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating tenant: %s\"\n % _tenant_name)\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Create tenant Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Created tenant: %s successfully.\" % _tenant_name)\n\n _tenant_id = output['tenant']['id']\n # If user id is passed then, directly add that user to the tenant.\n # otherwise Create a new user.\n _user_id = tenant_info.get('user_id', None)\n if not _user_id:\n _user_data = {\"user\": {\"email\": None,\n \"password\": _password,\n \"enabled\": True,\n \"name\": _user_name,\n \"tenantId\": _tenant_id}}\n _user_id = self.create_user(_user_data)\n if not isinstance(_user_id, unicode):\n return\n tenant_info['userID'] = _user_id\n\n # Add the user roles.\n for role_name in tenant_info['roles']:\n role_id = self.get_role_id(role_name)\n if not isinstance(role_id, unicode):\n return\n # Add user role.\n if not self.add_user_role(_tenant_id, _user_id, role_id):\n return\n # Get the token.\n token_id = self.get_token(_tenant_name, _user_name, _password)\n if not isinstance(token_id, unicode):\n return\n # Set the new context. note: This is v2 token, so only project scope.\n self.set_tenant_info(_tenant_name, token_id, token_id, _tenant_id)\n\n # Adding Security Group Rules\n # Add the ICMP rule.\n # if not isinstance(self.add_security_group_rules(\"icmp\"), bool):\n # return\n # Add the rule for ssh\n # if not isinstance(self.add_security_group_rules(\n # \"tcp\", from_port='22', to_port='22'), bool):\n # return\n # Add the rule for all udp\n # if not isinstance(self.add_security_group_rules(\n # \"udp\", from_port='1', to_port='65535'), bool):\n # return\n\n # Modify the tenant quota.\n # if not isinstance(self.set_quota(_tenant_id), bool):\n # return\n # Update the quota\n # fields = {\"network\": 50, \"subnet\": 50, \"port\": 100, \"floatingip\": 50}\n # quotas = self.quota_update(_tenant_id, fields)\n # if not isinstance(quotas, dict):\n # return\n # LOG_OBJ.info(\"Quota for tenant[%s] is:%s\" % (_tenant_id,\n # str(quotas)))\n return _tenant_id",
"def delete_tenant(self, tenant_id):\n self.delete_tenant_bulk([tenant_id])",
"def set_current_tenant(tenant):\n setattr(_thread_locals, \"tenant\", tenant)",
"def create_tenant(tenant):\n exists = identity.Tenant.query.filter_by(name=tenant.name).first()\n if exists:\n abort(409, \"Tenant Already Exists\")\n db.session.add(tenant)\n db.session.commit()\n return tenant.id",
"def testUpdate(self):\n response = self.runPut(self.root, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"vendor_id\"], self.post_data[\"vendor_id\"])",
"def add_admin_user_to_tenant(self, tenant):\n keystone = self.keystone_admin_client\n\n try:\n admin_user = keystone.users.find(name=self.settings.username)\n admin_role = keystone.roles.find(name='admin')\n try:\n keystone.roles.grant(\n user=admin_user.id, role=admin_role.id, project=tenant.backend_id\n )\n except keystone_exceptions.Conflict:\n pass\n except keystone_exceptions.ClientException as e:\n raise OpenStackBackendError(e)",
"def switch_tenant(tenant_name, password, environment, tester_conf):\n if hasattr(environment.cfy, '_current_user'):\n environment.add_cleanup(\n environment.cfy.profiles.set,\n kwargs=environment.cfy._current_user,\n )\n else:\n creds_conf = tester_conf['cloudify']\n environment.add_cleanup(\n environment.cfy.profiles.set,\n kwargs={\n 'tenant': 'default_tenant',\n 'username': creds_conf['existing_manager_username'],\n 'password': creds_conf['existing_manager_password'],\n },\n )\n\n environment.cfy._current_user = {\n 'tenant': tenant_name,\n 'username': tenant_name,\n 'password': password,\n }\n\n environment.cfy.profiles.set(\n tenant=tenant_name,\n username=tenant_name,\n password=password,\n )",
"def test_update_virtual_account_by_id(self):\n pass",
"def changeTenantMetadata(self,systemName,tenantDescription,parameter,value):\n\n tenantObj = self.getTenantByName(systemName,tenantDescription)\n\n # build payload\n payload = {\n \"versionUuid\": tenantObj[\"versionUuid\"],\n \"uuid\": tenantObj[\"uuid\"],\n parameter: value\n }\n logger.debug(\"Call to changeTenantMetadata - systemName: {} tenant description: {} parameter: {} value: {}\".format(systemName,tenantDescription,parameter,value))\n logger.debug(\" Next line contains json payload\")\n logger.debug(payload)\n\n try:\n response = self.httpHandler.sendHttpRequest(CIC_TENANT_ENDPOINT, payload, \"PATCH\", \"metadata\")\n\n except urllib2.HTTPError as e:\n\n logger.debug(traceback.format_exc())\n\n if e.code == 403:\n\n body = e.read()\n logger.debug(\"Response code: {}, response body: {}\".format(e.code, body))\n raise RuntimeError(\n \"User {} has no permission to update 'tenants' in {} {}\".format(self.cicUser, self.cicUrl, body),\n \"CIC_NO_ACCESS\"\n )\n\n else:\n\n body = e.read()\n logger.debug(\"Response code: {}, response body: {}\".format(e.code, body))\n raise RuntimeError(\n \"An http error occured during tenant metatdata update: \"\n \"{}, Response body: {}\".format(e, body),\n \"CIC_TENANT_METADATA_UPDATE_ERR\")\n\n else:\n\n responseString = response.read()\n returnDict = json.loads(responseString)\n logger.debug(\"Return dict is: {}\".format(returnDict))\n\n rc = self._validateResponse(returnDict, parameter, value)\n if rc == 0 or rc == 1:\n return returnDict\n elif rc == 2:\n raise RuntimeError(\n \"Tenant metadata update failed. \"\n \"Parameter '{}' not written. Maybe invalid parameter.\".format(parameter),\n \"CIC_TENANT_METADATA_UPDATE_NOTWRITE\")\n elif rc == 3:\n returnValue = returnDict[parameter]\n raise RuntimeError(\n \"Tenant metadata update failed. \"\n \"Parameter '{}' written but different return value: {} != {}.\".format(\n parameter, value, returnValue),\n \"CIC_TENANT_METADATA_UPDATE_MISMATCH\")",
"def setup_test_tenant(self):\n self.test_tenant = rand_name('test_tenant_')\n self.test_description = rand_name('desc_')\n resp, self.tenant = self.client.create_tenant(\n name=self.test_tenant,\n description=self.test_description)\n self.tenants.append(self.tenant)",
"def update_storage_plan(user, storage_plan):\n plans = StoragePlan.query\n storage_plan = plans.filter(StoragePlan.id == storage_plan).first()\n user.storage_plan_id = storage_plan.id\n user.storage_plan = storage_plan\n db.session.commit()",
"def update(self, customerguid, name=\"\", login=\"\", password=\"\", email=\"\", address=\"\", vat=\"\", jobguid=\"\", executionparams=None):",
"def get_by_id(tenant_id):\n tenant = Tenant.find_by_id(tenant_id)\n if tenant:\n tenant_schema = TenantSchema()\n return tenant_schema.dump(tenant)\n\n raise BusinessException(\"Invalid tenant\", HTTPStatus.BAD_REQUEST)",
"def test_update(self, requests_mock, accepts_marketing):\n matcher = requests_mock.post(\n f'{settings.CONSENT_SERVICE_BASE_URL}'\n f'{consent.CONSENT_SERVICE_PERSON_PATH}',\n json={\n 'consents': [\n CONSENT_SERVICE_EMAIL_CONSENT_TYPE,\n ],\n 'modified_at': '2020-03-12T15:33:50.907000Z',\n 'email': '[email protected]',\n 'phone': '',\n 'key_type': 'email',\n },\n status_code=status.HTTP_201_CREATED,\n )\n result = consent.update_consent('[email protected]', accepts_marketing)\n assert result is None\n assert matcher.called_once",
"def sites(self, site_id, data, tenant_id=None, api_version=\"v4.7\"):\n\n if tenant_id is None and self._parent_class.tenant_id:\n # Pull tenant_id from parent namespace cache.\n tenant_id = self._parent_class.tenant_id\n elif not tenant_id:\n # No value for tenant_id.\n raise TypeError(\"tenant_id is required but not set or cached.\")\n cur_ctlr = self._parent_class.controller\n\n url = str(cur_ctlr) + \"/{}/api/tenants/{}/sites/{}\".format(api_version,\n tenant_id,\n site_id)\n\n api_logger.debug(\"URL = %s\", url)\n return self._parent_class.rest_call(url, \"put\", data=data)",
"def _ensure_tenant_and_validate(tenant_, access_key):\n tenant_data = registry.TENANT_DATA_GATEWAY\n tenant = tenant_data.tenant_by_name(tenant_)\n if tenant is None:\n raise TenantNotFoundError(\n \"Tenant not found error. tenant='{}', access_key='{}'\".format(\n tenant_, access_key))\n\n if not tenant.has_access_key(access_key):\n raise AccessKeyNotValidError(\n \"The access key is not valid. tenant='{}', access_key='{}'\".format(\n tenant_, access_key))\n\n return tenant"
] |
[
"0.7870008",
"0.7024336",
"0.67325",
"0.67026174",
"0.67026174",
"0.66391766",
"0.6499703",
"0.6490579",
"0.64027005",
"0.61180586",
"0.5987017",
"0.59784436",
"0.5906629",
"0.5844687",
"0.5634975",
"0.5622623",
"0.5615344",
"0.548632",
"0.54852045",
"0.54754204",
"0.54738",
"0.53225356",
"0.522625",
"0.52195424",
"0.51974845",
"0.51941305",
"0.5139159",
"0.5104887",
"0.5085235",
"0.508365"
] |
0.7556466
|
1
|
Given a tenant id, fetch the tenant for that id
|
def get_tenant_by_id(tenant_id):
tenant = identity.Tenant.query.filter_by(id=tenant_id).first()
if tenant:
return tenant
abort(404, f"Unable to find tenant with id: {tenant_id}")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_tenant_config(tenant_id):\n for tenant in tenants:\n if tenant['tenant_id'] == tenant_id:\n return tenant\n raise errors.BaseTapisError(\"invalid tenant id.\")",
"def get_tenant(key, tenant_name):\n for tenant in key.tenants.list():\n if tenant.name == tenant_name:\n return tenant\n\n return None",
"def get_tenant_id(self, tenant_name):\n _url = \"http://\" + self.host_ip + \":35357/v2.0/tenants\"\n _headers = {'x-auth-token': self.cloud_admin_info['token_project']}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while getting tenants\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Tenant list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n for tenant in output['tenants']:\n if tenant['name'] == tenant_name:\n LOG_OBJ.debug(\"Tenant Details : %s \" % tenant)\n return tenant['id']\n\n LOG_OBJ.error(\"There is NO tenant with name: %s\" % tenant_name)\n return None",
"def get_by_id(tenant_id):\n tenant = Tenant.find_by_id(tenant_id)\n if tenant:\n tenant_schema = TenantSchema()\n return tenant_schema.dump(tenant)\n\n raise BusinessException(\"Invalid tenant\", HTTPStatus.BAD_REQUEST)",
"def test_get_tenant_by_id(sample_identity):\n access_token, tenant, tenant_user, tc = sample_identity\n new_access_token = tc.post(\n \"api/v1/authentication/login\",\n json={\"username\": tenant_user.username, \"password\": \"1234\"},\n ).json[\"data\"][\"access_token\"]\n headers = {\"Authorization\": \"Bearer \" + new_access_token}\n response = tc.get(f\"api/v1/identity/tenant/{tenant.id}\", headers=headers)\n assert response.status_code == 200, \"Failed to fetch Tenant By ID\"\n assert response.json[\"data\"][\"name\"] == tenant.name, \"Tenant name doesn't match\"",
"def tenant_id(self) -> Optional[str]:\n return pulumi.get(self, \"tenant_id\")",
"def tenant_id(self) -> str:\n return pulumi.get(self, \"tenant_id\")",
"def tenant_id(self) -> str:\n return pulumi.get(self, \"tenant_id\")",
"def tenant_id(self) -> str:\n return pulumi.get(self, \"tenant_id\")",
"def tenant_id(self) -> str:\n return pulumi.get(self, \"tenant_id\")",
"def tenant_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tenant_id\")",
"def tenant_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tenant_id\")",
"def tenant_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tenant_id\")",
"def tenant_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tenant_id\")",
"def tenant_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tenant_id\")",
"def tenant_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tenant_id\")",
"def tenant_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tenant_id\")",
"def tenant_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tenant_id\")",
"def tenant_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tenant_id\")",
"def tenant_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tenant_id\")",
"def get_tenant_by_name(tenant_name):\n tenant = Tenant.query.filter_by(name=tenant_name).first()\n if not tenant:\n raise Exception(\n 'Could not restore into tenant \"{name}\" as this tenant does '\n 'not exist.'.format(name=tenant_name)\n )\n return tenant",
"def tenant(self, name):\n # Returns a Tenant object for the given name.\n # Uses Keystone API to perform a direct name lookup,\n # as this is expected to work via name.\n\n data = self.auth.tenant_by_name(name)\n t = Tenant(data[\"tenant\"], self)\n return t",
"def get_account_for_tenant(test_auth, tenant_id):\n return '%s%s' % (test_auth.reseller_prefixes[0], tenant_id)",
"def tenant_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"tenant_id\")",
"def tenant_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"tenant_id\")",
"def get_tenant_id(self, **kwargs):\n if self.authenticate() == 200:\n return self.tenant_id\n else:\n return None",
"def tenant_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"tenant_id\")",
"def tenant_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"tenant_id\")",
"def gen_tenant_id(self):\n\n print \"\\t* Obtaining Tenant ID\"\n\n # request tenant info for tenant_id\n headers = {'X-Auth-Token': 'ADMIN'}\n\n # use this to get the tenant_id\n try:\n r = requests.get(\"http://%s:35357/v2.0/tenants\" % self.ip, headers=headers)\n tenants = json.loads(r.text)[\"tenants\"]\n\n # filter out other tenant information\n tenant = filter(lambda tenant: tenant['name']== self.tenant_name, tenants)[-1]\n self.tenant_id = tenant[\"id\"]\n except KeyError:\n # hard coded test value\n r = requests.get(\"http://%s:35357/v2.0/tenants\" % self.ip, headers=headers)\n tenants = json.loads(r.text)[\"tenants\"]\n\n # list tenants and prompt user to select apropriate tenant_id\n tenant_list = []\n for idx, tenant in enumerate(tenants):\n print \"\\t - [%d] Tenant: %s \\n\" % (idx, tenant['name'])\n tenant_list.append((tenant['name'], tenant['id']))\n\n tenant_num = int(raw_input(\"\\t - \"))\n\n print \"\\t* You have selected: %s\" % tenant_list[tenant_num][0]\n self.tenant_id = tenant_list[tenant_num][1]\n except:\n self.reheat_error = True\n self.reheat_errmsg = \"\\t! Could not obtain tenant ID information. Exiting...\"\n print self.reheat_errmsg",
"def get_object_tenant(instance):\n field = get_tenant_field(instance)\n\n if field.primary_key:\n return instance\n\n return getattr(instance, field.name, None)"
] |
[
"0.8102193",
"0.78376037",
"0.76502275",
"0.7277747",
"0.7269845",
"0.71548605",
"0.7004045",
"0.7004045",
"0.7004045",
"0.7004045",
"0.6898316",
"0.6898316",
"0.6898316",
"0.6898316",
"0.6898316",
"0.6898316",
"0.6898316",
"0.6898316",
"0.6898316",
"0.6898316",
"0.6854349",
"0.68374515",
"0.6817707",
"0.67923987",
"0.67923987",
"0.6787134",
"0.67686373",
"0.67686373",
"0.6743109",
"0.6682181"
] |
0.86720824
|
0
|
Provides list of all Tenants
|
def get_all_tenants():
tenants = identity.Tenant.query.all()
return tenants
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_tenants(self):",
"def list_tenants(self):\n _url = \"http://\" + self.host_ip + \":35357/v2.0/tenants\"\n _headers = {'x-auth-token': self.cloud_admin_info['token_project']}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\" no response from Server\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\n \" tenant list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n LOG_OBJ.info(\"Tenant List : %s \" % output)\n return output[\"tenants\"]",
"def tenants(self):\n # print \"tenant list is %s\" % self.auth.tenants.list()\n if not self._tenancy:\n self._tenancy = {}\n for tenant in self.auth.tenants.list():\n t = Tenant(tenant, self)\n self._tenancy[t[\"name\"]] = t\n return self._tenancy",
"def get_all_teas(self):\n self.tView.all_teas_display(self.manyTea)\n self.tView.prompt_display(0)",
"def get_tenants():\n # these are the tenant_id strings configured for the service -\n tenants_strings = conf.tenants\n result = []\n # the tenants service is a special case, as it must be a) configured to serve all tenants and b) actually maintains\n # the list of tenants in its own DB. in this case, we return the empty list since the tenants service will use direct\n # db access to get necessary data.\n if conf.service_name == 'tenants' and tenants_strings[0] == '*':\n return result\n\n # in dev mode, services can be configured to not use the security kernel, in which case we must get\n # configuration for a \"dev\" tenant directly from the service configs:\n if not conf.use_sk:\n for tenant in tenants_strings:\n t = {'tenant_id': tenant,\n 'iss': conf.dev_iss,\n 'public_key': conf.dev_jwt_public_key,\n 'default_access_token_ttl': conf.dev_default_access_token_ttl,\n 'default_refresh_token_ttl': conf.dev_default_refresh_token_ttl,\n }\n result.append(t)\n\n else:\n # TODO -- look up tenants in the tenants API, get the associated parameters (including sk location)\n pass\n return result",
"def get_tenants(self, **kwargs):\n url = self.get_url('tenants', kwargs, ['begin', 'end'])\n return self.api_client.get(url).json()",
"def get_all_restaurants():\n return list(Restaurant.objects.all().values())",
"def get_all_teas_select(self):\n self.tView.all_teas_display(self.manyTea)\n self.tView.prompt_display(2)",
"def restaurants_all() -> str:\n restaurant_objects = restaurants.load_restaurants()\n return jsonify(restaurant_objects)",
"def getAllTenants(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def get_etfs_list(self):\n return list(self.etfs.keys())",
"def get_all_terms(self):\n return self.term.all()",
"def export_tenants(self):\n print('\\n=== Exporting all tenant data...')\n\n tenant = dict(self.client.tenant)\n\n print('- Exporting tenant:', tenant['name'])\n\n json = {\n 'id': self.get_id(tenant),\n 'href': tenant['href'],\n 'name': tenant['name'],\n 'key': tenant['key'],\n 'createdAt': tenant['created_at'].isoformat(),\n 'modifiedAt': tenant['modified_at'].isoformat(),\n 'customData': self.get_custom_data(tenant),\n }\n\n #for application in tenant.applications:\n\n self.write('%s/%s/meta' % (self.location, json['id']), json)\n\n print('=== Done!\\n')",
"def display_accounts(cls):\n return cls.account_list",
"def get_instance_list():\n return parse_list_output(Popen('nova list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])",
"def get_all_tigers(self) -> List[Tiger]:\n tiger_positions = self.get_all_tiger_positions()\n return cast(List[Tiger], [pos.piece for pos in tiger_positions])",
"def get_teachers(self):\n query = Teacher.all().order('teacher')\n return query.fetch()",
"def tenants_for_token(self, context):\n token_ref = self.token_api.get_token(context=context,\n token_id=context['token_id'])\n assert token_ref is not None\n\n user_ref = token_ref['user']\n tenant_refs = []\n for tenant_id in user_ref['tenants']:\n tenant_refs.append(self.identity_api.get_tenant(\n context=context,\n tenant_id=tenant_id))\n return self._format_tenants_for_token(tenant_refs)",
"def list(self):\n return {'klanten': self.request.db.query(models.Klant).all()}",
"def list_accounts(self):\n pass",
"def territories(self) -> localedata.LocaleDataDict:\n return self._data['territories']",
"def get_all_thermals(self):\n return self._thermal_list",
"def get_turbine_info(self):\n turbines = self.__get_turbines().collect()\n\n return turbines",
"def turbines(self):\n return self.turbine_map.turbines",
"def all(self):\n return self.client.request_with_method(Methods.LIST % self.name)['items']",
"def get_all_templates(cls):\n raise NotImplementedError()",
"def getAll(self):\n return self.__lst",
"def get_all_environments():\n return ENVIRONMENTS",
"def health_titans():\r\n return [titan.health for titan in Titan.titans]",
"def get_occupants(self):\n return [x.get_occupant_type() for x in self.huts]"
] |
[
"0.7311913",
"0.7217978",
"0.7035775",
"0.68330145",
"0.6654493",
"0.6465175",
"0.62954146",
"0.6177893",
"0.61654216",
"0.6148272",
"0.604497",
"0.6009238",
"0.59118146",
"0.5889167",
"0.5887195",
"0.58863044",
"0.5854088",
"0.5853019",
"0.5846821",
"0.58389825",
"0.5838873",
"0.5822555",
"0.58036",
"0.57996285",
"0.5773398",
"0.57703537",
"0.5769933",
"0.5762229",
"0.57468504",
"0.5731152"
] |
0.7834845
|
0
|
Mark this track as missed (no association at the current time step).
|
def mark_missed(self):
if self.state == TrackState.Tentative:
self.state = TrackState.Deleted
elif self.time_since_update > self._max_age:
self.state = TrackState.Deleted
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def tests_missed(self, num):\n self.missing_tests = num",
"def tick_skipped(self):\n pass",
"def mark_as_not_done(self):\n grade_event = {'value': 0, 'max_value': self.points}\n self.runtime.publish(self, 'grade', grade_event)",
"def check_miss(self):\n if self.ball.center.x > SCREEN_WIDTH:\n # We missed!\n self.score -= SCORE_MISS\n self.ball.restart()",
"def missing(self, value):\n self.MISSING = value",
"def test_track_without_association(self):\n track = Track(artist='Artist', album='Album')\n pk = track.insert(self.app.db, self.app.curs,\n 'xmms',\n datetime.datetime.now())\n\n for line in self.app.associate_albums():\n pass\n\n row = self.get_track_by_id(pk)\n self.assertEqual(row['album_id'], 0)",
"def test_get_all_unassociated_single_track_already_associated(self):\n track = Track(artist='Artist', album='Album',\n title='Title', album_id=1)\n track.insert(self.app.db, self.app.curs,\n 'xmms', datetime.datetime.now())\n self.assertEqual(self.get_track_count(), 1)\n tracks = Track.get_all_unassociated(self.app.curs)\n self.assertEqual(len(tracks), 0)",
"def testNoForcedTrack(self):\n\n trackLine = _buildTrackLine(20, 'audio', {'hello': 'goodbye'})\n\n trackID, trackType, trackDict = tools._trackInfo(trackLine)\n\n self.assertTrue(\n 'forced_track' in trackDict.keys()\n )\n\n self.assertEqual(\n trackDict['forced_track'],\n '0'\n )",
"def testHealthAssessWorkMissed(self):\n attr = self.session.create_visit_attr()\n\n self.util.intTypeTest(self, attr, \"work_missed\")\n\n self.util.intPropertyTest(self, attr, \"work_missed\")",
"def test_missing_tracks(self):\n\n # Taylor Swift doesn't have her albums on Spotify\n expected_missing = [track_id for track_id, track in self.ilibrary.tracks.iteritems()\n if track.artists[0] == 'Taylor Swift']\n\n for missing_id in expected_missing:\n self.assertNotIn(missing_id, [track.i_id for track in self.tracks])",
"def test_08_no_break_record_before_set_record(self):\n record = SwimRecord(first_name='j',last_name='j',team_name='k',relay=True,stroke='butterfly',distance=100,record_date=timezone.now(),record_broken_date=(timezone.now() - timedelta(days=1)))\n record.save()\n try:\n record.full_clean()\n except ValidationError as e:\n self.assertTrue(\"Can't break record before record was set.\" in e.message_dict['record_broken_date'])",
"def test_get_all_unassociated_single_track_without_album(self):\n track = Track(artist='Artist', title='Title')\n track.insert(self.app.db, self.app.curs,\n 'xmms', datetime.datetime.now())\n self.assertEqual(self.get_track_count(), 1)\n tracks = Track.get_all_unassociated(self.app.curs)\n self.assertEqual(len(tracks), 0)",
"def set_skip_current_track(self):\n self.get(COMMAND_CPM, 'SetSkipCurrentTrack')",
"async def test_skipped_already_unsilenced(self):\n self.cog.scheduler.__contains__.return_value = False\n self.cog.previous_overwrites.get.return_value = None\n\n for channel in (MockVoiceChannel(), MockTextChannel()):\n with self.subTest(channel=channel):\n self.assertFalse(await self.cog._unsilence(channel))\n channel.set_permissions.assert_not_called()",
"def req_note_dismiss_reminder(self):\n if self.helper_action_get_request_is_wrong(\"req_dismiss_reminder_and_display_note\"):\n self.error_msg_queue_note.append(\"Reminder has not been dismisses - application error?\")\n return\n\n task_id = util.sanitize_singleline_string_for_tasksave(self.last_request_get_dict[\"taskid\"][0])\n task = self.task_store.store_dict_id[task_id]\n\n # TODO move to backend?\n task.due_date_reminder_dismissed = True\n self.task_store.touch(task.taskid)\n self.task_store.task_store_save()",
"def set_missings(self, var, missing_map='default', hide_on_y=True,\n ignore=None):\n var = self.unroll(var)\n ignore = self.unroll(ignore, both='all')\n if not missing_map:\n for v in var:\n if 'missings' in self._meta['columns'][v]:\n del self._meta['columns'][v]['missings']\n elif missing_map == 'default':\n self._set_default_missings(ignore)\n else:\n if isinstance(missing_map, list):\n m_map = {'exclude': missing_map}\n else:\n m_map = org_copy.deepcopy(missing_map)\n for v in var:\n if v in ignore: continue\n v_m_map = self._clean_missing_map(v, m_map)\n if self._has_missings(v):\n self._meta['columns'][v].update({'missings': v_m_map})\n else:\n self._meta['columns'][v]['missings'] = v_m_map\n if hide_on_y:\n self.hiding(var, missing_map, 'y', True)\n\n return None",
"def mark_seen(self):\r\n self.seen_at = now()\r\n return self",
"def test_single_track_no_album(self):\n self.add_mp3(set_album=True)\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('has no album tag', status)\n self.assertEqual(self.get_album_count(), 0)",
"def test_single_track_no_artist(self):\n self.add_mp3(set_artist=True)\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('has no artist tag', status)\n self.assertEqual(self.get_album_count(), 0)",
"def announce_missing(self, missing_guardian_key: ElectionPublicKey) -> None:\n missing_guardian_id = missing_guardian_key.owner_id\n\n # If guardian is available, can't be marked missing\n if missing_guardian_id in self._available_guardians:\n log_info(f\"guardian {missing_guardian_id} already announced\")\n return\n\n self._mark_missing(missing_guardian_key)",
"def _fire_missile(self):\n new_missile = Missile(self)\n self.missiles.add(new_missile)",
"def remove_track(self, track_uri):\n if track_uri in self.trackToPlay:\n del self.trackToPlay[track_uri]\n else:\n raise KeyError('Track not in tracklist')",
"def skip(self):\n self.skip_votes.clear()\n if self.is_playing():\n self.player.stop()",
"def test_remove_one(self):\n seq_run = SequencingRun(None, sequencing_run_type=\"test\",\n project_list=[\n Project(sample_list=[\n Sample(\"one\"),\n Sample(\"two\"),\n Sample(\"three\")\n ], id=1)\n ])\n sample_status_list = [DirectoryStatus.SampleStatus(sample_name=\"one\", project_id=\"1\", uploaded=False),\n DirectoryStatus.SampleStatus(sample_name=\"two\", project_id=\"1\", uploaded=True),\n DirectoryStatus.SampleStatus(sample_name=\"three\", project_id=\"1\", uploaded=False)]\n\n res = upload_helpers.set_uploaded_samples_to_skip(seq_run, sample_status_list)\n\n res_samples = res.project_list[0].sample_list\n self.assertEqual(res_samples[0].skip, False)\n self.assertEqual(res_samples[1].skip, True)\n self.assertEqual(res_samples[2].skip, False)",
"def reset(self):\n self.complete_misses = 0\n return",
"def _set_truly_broken(paired_notes, bucket):\n raise NotImplementedError(\"_set_truly_broken needs to be implemented!\")",
"def mark(self) -> None:\n self.zero = self._monotonic_ms()",
"def get_atom_miss(self):\r\n return Marker((255, 0, 0), self._screen)",
"def test_update_no_pk(self):\n track = Track(artist='Artist', album='Album', title='Title')\n with self.assertRaises(Exception):\n track.update(self.app.db, self.app.curs)\n self.assertEqual(self.get_track_count(), 0)",
"def on_reset(self):\n\n current = self.current_step\n if current:\n current.stop()\n\n logging.debug(u\"- seeking back before first step\")\n self.set('_index', None)"
] |
[
"0.62534416",
"0.60326207",
"0.5580928",
"0.55340266",
"0.5485797",
"0.5452688",
"0.5398362",
"0.5377762",
"0.53379023",
"0.52324027",
"0.52234924",
"0.52053666",
"0.5157145",
"0.5115357",
"0.510468",
"0.50325304",
"0.5029975",
"0.5010918",
"0.5010119",
"0.5003667",
"0.5001138",
"0.49980044",
"0.49745867",
"0.49653596",
"0.49627957",
"0.4961203",
"0.49549332",
"0.4954737",
"0.4953896",
"0.49143425"
] |
0.73827064
|
1
|
Returns True if this track is confirmed.
|
def is_confirmed(self):
return self.state == TrackState.Confirmed
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_confirmed(self) -> bool:\n return self._is_confirmed",
"def is_confirmed(self, is_confirmed: bool):\n\n self._is_confirmed = is_confirmed",
"def is_confirmation_mode_enabled(self):\n\t\treturn bool(call_sdk_function('PrlSrv_IsConfirmationModeEnabled', self.handle))",
"def confirmed(self, cr, uid, ids, context=None): \n self.write(cr, uid, ids, {'state':'confirmed'})\n return True",
"def action_confirm(self):\n self.check_txt_ids()\n self.write({'state': 'confirmed'})\n return True",
"def auto_confirmation_enabled(self):\n return self._auto_confirmation_enabled",
"def confirmed(self):",
"def is_booked(self):\n return self.booking_set.filter(confirmed=True).count() > 0",
"def confirm(self, token):\n ser = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = ser.loads(token.encode('utf-8'))\n except (BadSignature, SignatureExpired):\n return False\n if data.get('confirm') != self.id:\n return False\n self.confirmed = True\n db.session.add(self)\n return True",
"def has_talk(self):\n if self.applicant.talks.filter(Q(status=SUBMITTED) |\n Q(status=UNDER_CONSIDERATION) |\n Q(status=PROVISIONAL) |\n Q(status=ACCEPTED)):\n return True\n return False",
"def on_track(self):\n for account in self.accounts.all():\n if not account.on_track:\n return False\n return True",
"def verified(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"verified\")",
"def is_pending_approval(self):\n if self.registration_method == self.REQUESTED \\\n and self.is_pending_activation():\n return True\n else:\n return False",
"def on_track(self):\n for account in self.accounts:\n if not account.on_track:\n return False\n return True",
"def is_voicemail(self):\n return self._is_voicemail",
"def is_invited_pending_activation(self):\n if self.registration_method == self.INVITED \\\n and self.is_pending_activation():\n return True\n else:\n return False",
"def verified(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"verified\")",
"def has_succeeded(self):\n return self.transaction_result == TERMINAL_PAYMENT_SUCCESS",
"def action_confirm(self):\n options=self.env['plm.config.settings'].GetOptions()\n status = 'confirmed'\n action = 'confirm'\n default = {\n 'state': status,\n 'engineering_writable': False,\n }\n doc_default = {\n 'state': status,\n 'writable': False,\n }\n operationParams = {\n 'status': status,\n 'statusName': _('Confirmed'),\n 'action': action,\n 'docaction': 'confirm',\n 'excludeStatuses': ['confirmed', 'transmitted', 'released', 'undermodify', 'obsoleted'],\n 'includeStatuses': ['draft'],\n 'default': default,\n 'doc_default': doc_default,\n }\n if options.get('opt_showWFanalysis', False):\n return self.action_check_workflow(operationParams)\n else:\n ids=self._ids\n self.logging_workflow(ids, action, status)\n return self._action_to_perform(ids, operationParams, default)",
"def has_confidence(self):\n return self.confidence is not None",
"def is_on(self) -> bool:\n return self._state == \"yes\"",
"def is_proved(self):\n return len(self.proofs) > 0",
"def test_simple_confirmed(self):\n appt_date = datetime.date.today()\n reminders.Patient.objects.filter(\n pk__in=[self.test_patient.pk, self.other_patient.pk]\n ).update(next_visit=appt_date)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n unconfirmed = self.create_unconfirmed_notification(self.other_patient, appt_date)\n qs = reminders.Patient.objects.confirmed_for_date(appt_date)\n self.assertTrue(self.test_patient in qs)\n self.assertFalse(self.other_patient in qs)\n self.assertFalse(self.unrelated_patient in qs)",
"def can_mark_as_done(self):\n if (not self.event_store.done) and \\\n ((not self.file_submission_required) or self.event_store.has_file_submission) and \\\n (not self.contains_questions):\n return True\n return False",
"def test_simple_confirmed(self):\n appt_date = datetime.date.today()\n self.create_confirmed_notification(self.test_patient, appt_date)\n self.create_unconfirmed_notification(self.other_patient, appt_date)\n qs = Patient.objects.confirmed_for_date(appt_date)\n self.assertTrue(self.test_patient in qs)\n self.assertFalse(self.other_patient in qs)\n self.assertFalse(self.unrelated_patient in qs)",
"def contact_verified(self):\n return self.contact.verified",
"def is_approved(self) -> bool:\n return self.state == Order.OrderState.APPROVED.choice_value",
"def is_canceled(self):\n\n if self.status == self.STATUS['CANCELED']:\n return True\n else:\n return False",
"def is_pending(self):\n if self.status == \"PENDING\":\n return True\n else:\n return False",
"def get_confirmed(self):\n return BBSitting.objects.filter(booked=self).filter(booking__confirmed=True)"
] |
[
"0.814605",
"0.64437556",
"0.6437853",
"0.6334783",
"0.62049437",
"0.61790836",
"0.61653703",
"0.61243564",
"0.61206985",
"0.6007849",
"0.59947276",
"0.59857404",
"0.5919182",
"0.5889163",
"0.58517134",
"0.57956177",
"0.5776833",
"0.57404286",
"0.57015604",
"0.5700138",
"0.56860816",
"0.5685147",
"0.56767064",
"0.5650684",
"0.5633734",
"0.5620882",
"0.5615275",
"0.56016195",
"0.5572766",
"0.5566902"
] |
0.86330813
|
1
|
WYpisuje informacje o samochodzie
|
def wypisz_info(self):
print(f"Samochód: {self.producent} {self.model}")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def info(self):",
"def info(self):",
"def get_details(self):",
"def manage_info():",
"def mezclar_bolsa(self):",
"def dane_profilu(imie, nazwisko, **inne_informacje):\r\n inne_informacje['imie'] = imie\r\n inne_informacje['nazwisko'] = nazwisko\r\n return inne_informacje",
"def show_data():",
"def _get_information(self):\n pass",
"def pokazPrzedmiot(self,przedmiot:str)->None:\n try:\n print(self.przedmioty[przedmiot])\n except KeyError:\n print(\"Nie ma takiego przedmiotu\")",
"def info(self, id):",
"def details(self):\n pass",
"def __str__(self):\n return self.identificacao",
"def __str__(self):\n return self.identificacao",
"def __str__(self):\n return self.identificacao",
"def getInfo():",
"def kluisInfo():\r\n kluisDict = dictionary()\r\n beginSchermTerug.grid(pady=3, padx=(10, 10), sticky='w', row=1)\r\n\r\n for kluis in kluisDict:\r\n try:\r\n if kluisDict[kluis] is not None and int(beginSchermEntry.get()) in kluisDict[kluis]: # kluis zoeken in\r\n # dictionary\r\n beginSchermTopTitel['text'] = fietsStalTijd(kluisDict[kluis][1]) # functie fietsStalTijd op kluis\r\n # aanroepen\r\n beginSchermTitel['text'] = 'De huidige kosten zijn €' + str(prijs(kluisDict[kluis][0]))\r\n beginSchermEntry.delete(0, END)\r\n return\r\n except ValueError:\r\n beginSchermTitel['text'] = 'Geen geldige invoer'\r\n return\r\n beginSchermTitel['text'] = 'Dit OV nummer is onbekend'\r\n return",
"def prikazi_info_satni_rest(self, mapa):\r\n self.restAgregiraniModel.set_data(mapa)\r\n self.restAgregiraniView.update()\r\n self.labelVrijeme.setText(str(mapa['vrijeme']))\r\n chklist, smap = mapa['status']\r\n self.restSatniBitModel.set_data_and_smap(chklist, smap)\r\n self.restSatniBitView.update()",
"def __str__(self):\n return self.descricao",
"def Wraith_Form(self):\t\t\n\t\tprint(self.name.Title() + \"Wraith\")",
"def __str__(self):\n\t\treturn self.titre",
"def inscricao(self):\n\n return True",
"def zapisi_pot(pot):",
"def zapisi_pot(pot):",
"def zapisi_pot(pot):",
"def zapisi_pot(pot):",
"def zapisi_pot(pot):",
"def zapisi_pot(pot):",
"def show_data(self, ):\r\n return print('society_name : {}\\n'\r\n 'flat : {}\\n'\r\n 'house_no : {}\\n'\r\n 'no_of_members : {}\\n'\r\n 'income : {}\\n '\r\n .format(self.society_name, self.flat, self.house_no, self.no_of_members, self.income))",
"def getIntervenciones():",
"def cliquer_sur_unité(self):"
] |
[
"0.5897859",
"0.5897859",
"0.5885302",
"0.57649606",
"0.56881845",
"0.56734383",
"0.56343335",
"0.56061435",
"0.5544123",
"0.5528234",
"0.5506979",
"0.5506014",
"0.5506014",
"0.5506014",
"0.5501805",
"0.5500297",
"0.5481597",
"0.54327404",
"0.54267657",
"0.5403266",
"0.539135",
"0.53831595",
"0.53831595",
"0.53831595",
"0.53831595",
"0.53831595",
"0.53831595",
"0.53576946",
"0.5356457",
"0.53462476"
] |
0.6628599
|
0
|
Load all images in the specified file and returns an array with all of them.
|
def loadImages(loadPath):
img_array = []
for filename in glob.glob(loadPath):
img = cv2.imread(filename)
height, width, layers = img.shape
size = (width, height)
img_array.append(img)
return img_array
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_images(self, file_path: str) -> Iterable[Image]:\n return []",
"def load_images(filename):\n images = _load(filename)\n #_info_image(image, title=os.path.basename(filename))\n return images",
"def load_images(filelist):\n # pixel value range 0-255\n if not isinstance(filelist, list):\n im = Image.open(filelist).convert('L')\n return np.array(im).reshape(1, im.size[1], im.size[0], 1)\n data = []\n for file in filelist:\n im = Image.open(file).convert('L')\n data.append(np.array(im).reshape(1, im.size[1], im.size[0], 1))\n return data",
"def load_images(file):\n\timage_list = [] # List for storing all the images\n\ttargets = []\n\t\n\tfor filename in glob.glob(file + '/*.png'):\n\t\t# ==================\n\t\t# Reading the image\n\t\t# ==================\n\t\timage = scipy.misc.imread(filename).astype(np.float32)\n\t\t\n\t\t# ================================\n\t\t# Converting the image to a vector\n\t\t# ================================\n\t\timage = image.flatten() # (784, )\n\t\t\n\t\t# ==============================\n\t\t# Normalizing the image to numpy\n\t\t# ==============================\n\t\timage = image / 255.0\n\t\timage = image - 0.5\n\t\timage = image * 2.0\n\t\t\n\t\t# ===============================\n\t\t# Appending the image to the list\n\t\t# ===============================\n\t\timage_list.append(image)\n\t\t\n\t\t_, value = filename.split('\\\\')\n\t\t# print(value[0])\n\t\ttargets.append(int(value[0]))\n\t\n\timage_list = np.array(image_list)\n\ttargets = np.array(targets)\n\t\n\t# ================================================\n\t# \t\t\tShuffling the data\n\t# ================================================\n\timage_list, targets = shuffle(image_list, targets)\n\t\n\ttrain_images, test_images, train_targets, test_targets = split(image_list, targets)\n\treturn train_images, test_images, train_targets, test_targets",
"def load_images(files, open_fn=None):\n if open_fn is None:\n import cv2\n open_fn = cv2.imread\n images = list()\n for _file in files:\n images.append(np.asarray(open_fn(_file)))\n return images",
"def __readImages(self, filename):\n print 'Reading images from %s ...' % filename\n images = []\n with open(filename, 'rb') as infile:\n infile.read(4) # ignore magic number\n count = struct.unpack('>i', infile.read(4))[0]\n rows = struct.unpack('>i', infile.read(4))[0]\n columns = struct.unpack('>i', infile.read(4))[0]\n\n for i in xrange(count):\n data = infile.read(rows*columns)\n image = np.fromstring(data, dtype=np.uint8)\n image = image.reshape((rows, columns))\n image = 255 - image # now black digit on white background\n images.append(image)\n return images",
"def load_images(self):\n images_list = [os.path.join(self.root, image['file_name'])\n for image in self.data['images']]\n\n if self.shuffle:\n random.shuffle(images_list)\n images_list = images_list[:self.max_samples] if self.max_samples is not None and self.max_samples <= len(\n images_list) else images_list\n\n return images_list",
"def load_img(file_list, dir_path):\n data = []\n for file in file_list:\n img = plt.imread(dir_path + file)\n # Convert RGB image to grayscale\n if len(img.shape) == 3:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Resize image to desired size\n img = cv2.resize(img, (64, 64))\n # Store processed image to list\n data.append(img)\n return np.array(data)",
"def load_images(path):\n images = []\n images_names = []\n \n for file_name in os.listdir(path):\n image_name = file_name\n images_names.append(image_name)\n images_names = sorted(images_names) #use sort to insure linux file sys behaves\n print(images_names) #check for proper order\n\n for file_name in images_names:\n image = pygame.image.load(path + os.sep + file_name).convert()\n images.append(image)\n return images",
"def loadImages(files, targets):\n images = []\n for file in files:\n targets.append(file)\n images.append(snd.imread(file))\n return images, targets",
"def load_images(self):\n for image in self.gltf.images:\n self.images.append(image.load(self.path.parent))",
"def load_pic_in_directory(directory):\n return [Image.open(os.path.join(directory, img)) for img in os.listdir(directory)]",
"def load_images(self, image_path):\n X_train = []\n\n # Load all files from the image path using Image.open.\n for i in recursive_list(image_path):\n # Open images as ???\n img = Image.open(i)\n # Convert to NP array.\n img = np.asarray(img)\n # Append them into higher order array.\n if img.shape == (128, 128, 3):\n X_train.append(img)\n\n # return all the images concatenated as a 4D array\n return np.asarray(X_train)",
"def load_set(directName, n = np.inf):\n # Loaded a set of images\n\n files = os.listdir(directName)\n n = min(n, len(files))\n #n = len(files)\n print(\"Loading \" + str(n) + \" images\")\n imgs = [mpimg.imread(directName + files[i]) for i in range(n)]\n\n return imgs",
"def load_images(folder_path, num_images):\n imgs = np.zeros(shape=[num_images, 400, 400, 3])\n for i in range(1, num_images + 1):\n image_name = \"satImage_%.3d\" % i\n image_path = folder_path + image_name + \".png\"\n if os.path.isfile(image_path):\n print('Loading ' + image_path)\n img = mpimg.imread(image_path)\n\n #imgs[i - 1] = np.asarray(img).reshape(400, 400, 3)\n imgs[i - 1] = img.reshape(400, 400, 3)\n else:\n print('File ' + image_path + ' does not exist')\n return imgs",
"def load_images(self, folder):\n cwd = os.getcwd()\n dir = cwd + '/' + folder\n files = os.listdir(dir)\n for file in files:\n img = pygame.image.load(dir + '/' + file)\n self.images.append(img)",
"def readImages(imgFolder='img/'):\n #Each image in images is a numpy array of shape 192x168(x1) (heightxwidth)\n #images datatype is a regular numpy list\n filenames = os.listdir(imgFolder)\n if imgFolder == 'img/':\n images = [imageio.imread('img/'+fn+'/image0.jpg')[::,::].astype(np.float32)/255. for fn in filenames]#glob.glob(imgFolder+'*.jpg')]\n else:\n images = [imageio.imread(imgFolder+fn)[::,::].astype(np.float32)/255. for fn in filenames]\n return images",
"def load_images(filenames):\n h,w,c = SIZE\n images = np.empty((len(filenames),h,w,c))\n for i,img_path in enumerate(filenames):\n # Base64\n with open(img_path,'rb') as image_file:\n img_base64_encode = base64.b64encode(image_file.read())\n img_base64 = base64.b64decode(img_base64_encode)\n images[i] = np.array(Image.open(io.BytesIO(img_base64)))/255.0 # Reducción en distancia\n return images",
"def load_images_from_folder(folder):\n images = []\n for filename in os.listdir(folder):\n img = Image.open(os.path.join(folder,filename))\n images.append(img)\n return images",
"def load_images(pool, entries):\n start = time.perf_counter()\n images = pool.map(ski.io.imread, [x.path for x in entries])\n logger.info(\"Loaded %i images:\", len(images))\n util.pprint_log([x.name for x in entries], logger.info)\n logger.info(util.elapsed(start))\n logger.info(\"\\n\")\n return images",
"def read_local(path):\n files = os.listdir(path)\n imgs = []\n for f in files:\n if f.endswith(\".tiff\") or f.endswith(\".tif\"):\n img = Image.open(os.path.join(path, f))\n imgs.append(np.array(img))\n return imgs",
"def load_image(path):\n imagelist = []\n\n for image_file in os.listdir(path):\n image_path = os.path.join(path, image_file)\n image = Image.open(image_path).resize([224, 224])\n image = np.array(image).astype(np.float) / 128 - 1\n imagelist.append(image)\n\n return np.array(imagelist)",
"def load_from_folder(path):\n images = []\n files = os.listdir(path)\n files.sort()\n for file in tqdm(files):\n images.append(io.imread(path + file))\n return images",
"def load_images(self, image_paths):\n \n fill_list = []\n \n for idx in tqdm(range(len(image_paths))):\n path = image_paths[idx]\n yield cv2.imread(path)",
"def getImages(self,Project=\"\"):\n #images = [\"image1.jpg\",\"image2.jpg\",\"image3.jpg\"]\n \n os.chdir(self.dataDir)\n images = glob.glob(\"*.png\")\n \n return images",
"def read_image(images_root):\n im_array = np.load(images_root)\n return im_array",
"def load_images_rgb(filelist):\n # pixel value range 0-255\n if not isinstance(filelist, list):\n im = Image.open(filelist)\n if len(im.size) < 3:\n im = np.expand_dims(im, axis=2)\n if im.shape[2] == 1:\n im = np.concatenate((im, im, im), axis=2)\n\n return np.array(im).reshape(1, im.size[1], im.size[0], 3)\n data = []\n for file in filelist:\n im = Image.open(file)\n if len(im.size) < 3:\n im = np.expand_dims(im, axis=2)\n im = np.concatenate((im, im, im), axis=2)\n data.append(np.array(im).reshape(1, im.shape[1], im.shape[0], 3))\n else:\n data.append(np.array(im).reshape(1, im.size[1], im.size[0], 3))\n return data",
"def load_images(images_path, as_array=True):\n list_names = []\n list_img = []\n\n path_list = glob.glob(images_path + '/*', recursive=False)\n path_list.sort()\n\n for image_name in path_list:\n # ************** LINUX ****************\n name = image_name.split('/')[-1]\n # ************* WINDOWS **************\n name = name.split('\\\\')[-1]\n list_names.append(name)\n\n for image_name in path_list:\n # ******************* WINDOWS & LINUX ***************************\n image = cv2.imdecode(np.fromfile(image_name, np.uint8),\n cv2.IMREAD_UNCHANGED)\n # ******************* LINUX ******************************\n # imagen = cv2.imread(image_name)\n image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n list_img.append(image_rgb)\n\n if as_array is True:\n list_img = np.array(list_img)\n\n return(list_img, list_names)",
"def read_image():\n images = []\n for hand in os.listdir('images'):\n img = cv2.imread(os.path.join('images', hand))\n if img is not None:\n images.append(img)\n return images",
"def load_images(self, files, sub_dir):\n\n for f in files:\n self.images.append(Image(f, sub_dir))"
] |
[
"0.77775574",
"0.76499426",
"0.7614241",
"0.7502157",
"0.7476571",
"0.7434772",
"0.7387218",
"0.723891",
"0.7111967",
"0.7106821",
"0.7099478",
"0.70800775",
"0.7056213",
"0.7026734",
"0.7012313",
"0.7003299",
"0.7000063",
"0.6955172",
"0.69508356",
"0.6920103",
"0.6912523",
"0.68969226",
"0.6875327",
"0.6862652",
"0.6838168",
"0.68367136",
"0.68219525",
"0.6820854",
"0.68166655",
"0.6809868"
] |
0.7798019
|
0
|
Take some English text and return a Pirateish version thereof.
|
def translate(english):
# Normalise a list of words (remove whitespace and make lowercase)
words = [w.lower() for w in english.split()]
# Substitute some English words with Pirate equivalents.
result = [_PIRATE_WORDS.get(word, word) for word in words]
# Capitalize words that begin a sentence and potentially insert a pirate
# phrase with a chance of 1 in 5.
capitalize = True
for i, word in enumerate(result):
if capitalize:
result[i] = word.capitalize()
capitalize = False
if word.endswith((".", "!", "?", ":",)):
# It's a word that ends with a sentence ending character.
capitalize = True
if random.randint(0, 5) == 0:
result.insert(i + 1, random.choice(_PIRATE_PHRASES))
return " ".join(result)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def PigToEnglish(str):\r\n\r\n # TODO: Your code here\r\n\r\n\r\n # Change the return to return the converted string\r\n return(\"\")",
"def process_text(self, text, language):",
"def translate_leet(phrase):",
"def translateText(text):\r\n\treturn translator.translate(text, src='en', dest='ro')",
"def translate_to_pirate_talk(phrase):\n\n # I get to make a Pirate dictionary!!\n english_to_pirate = {\n \"sir\": \"matey\",\n \"hotel\": \"fleabag inn\",\n \"student\": \"swabbie\",\n \"boy\": \"matey\",\n \"madam\": \"proud beauty\",\n \"professor\": \"foul blaggart\",\n \"restaurant\": \"galley\",\n \"your\": \"yer\",\n \"excuse\": \"arr\",\n \"students\": \"swabbies\",\n \"are\": \"be\",\n \"lawyer\": \"foul blaggart\",\n \"the\": \"th'\",\n \"restroom\": \"head\",\n \"my\": \"me\",\n \"hello\": \"avast\",\n \"is\": \"be\",\n \"man\": \"matey\",\n # appended my own translation, just in case\n \"there\": \"thar\"\n }\n\n # separate phrase into constituent words and put in list\n phrase = phrase.split(\" \")\n new_phrase = \"\"\n\n # loop through list and add each word (or translation, if applicable)\n # to the new phrase\n for word in phrase:\n if word in english_to_pirate:\n new_phrase += english_to_pirate[word] + \" \"\n else:\n new_phrase += word + \" \"\n\n # remove final space, which is extra\n new_phrase = new_phrase[:-1]\n return new_phrase",
"def translate(str):\r\n if isPig(str):\r\n return(PigToEnglish(str))\r\n return(EnglishToPig(str))",
"def _pinyin(self, rest):\n # Fix if sentence contains some english '.tr yacin太牛了'\n rest = filter(lambda x: not self.isascii(x), rest.decode('utf8'))\n def reduce_reading((char, readings)):\n \"\"\"If a character has multiple cjklib readings, use the fine-tuning\n dict from pinyin toolkit and CEDICT as a backup.\"\"\"\n if len(readings) == 1:\n return readings[0]\n else:\n try:\n return self.pinyin_toolkit_lookup[char]\n except KeyError:\n return self._dict_reading_lookup(char)\n\n readings = [self.char_lookup.getReadingForCharacter(x, 'Pinyin') for x in rest]\n res = u' '.join(map(reduce_reading, zip(rest, readings)))\n return res.encode('utf8')",
"def EnglishToPig(str):\r\n\r\n # TODO: Your code here\r\n\r\n\r\n # Change the return to return the converted string\r\n return(\"\")",
"def findEnglish(data: str) -> str:\n # remove all string leading up to the word Translate\n data = data[data.find('Translate'):len(data)]\n # initalize list\n english_list = []\n\n # find all english in the string\n number_list = [int(num) for num in data if num.isnumeric()]\n\n # remove 4\n number_list.remove(4)\n\n # find smallest and largest numbers\n small = min(number_list)\n large = max(number_list)\n\n # first find the string with number\n for i in range(small,large+1):\n # find the line after i\n sym = f\"{i}\"\n symbol_lines_index = symbol_line_location(data, sym, move=0, addLast=False)\n\n # find index for that specific number\n eng = find(data, f\"{i}\")\n\n # for each location, determine if the 2 higher index is an alphabet or not\n for j in range(len(eng)):\n # if it is, then take that line\n if data[eng[j]+3].isalpha():\n indStart = eng[j]+3\n indEnd = symbol_lines_index[j][1]\n\n english = data[indStart:indEnd+1]\n english_list.append(english)\n\n # lastly combine the words, separating each translation with /\n english = \" / \".join(english_list)\n\n return english",
"def pig_latin(word):\n \n first_letter = word[0]\n rest_of_word = word[1 : ]\n \n # Student should complete function on the next lines.\n \n if first_letter == 'a' or first_letter == 'e' or first_letter == 'i' or first_letter == 'o' or first_letter == 'u':\n return word + \"way\"\n else:\n return rest_of_word + first_letter + \"ay\"",
"def get_pirate_talk(phrase):\n\n pirate_dict = {\" sir\": \"matey\",\n \"hotel\" : \"fleabag inn\",\n \"student\" : \"swabbie\",\n \"boy\" : \"matey\",\n \"madam\" : \"proud beauty\",\n \"professor\" : \"foul blaggart\",\n \"restaurant\" : \"galley\",\n \"your\" : \"yer\",\n \"excuse\" : \"arr\",\n \"students\" : \"swabbies\",\n \"are\": \"be\",\n \"lawyer\" : \"foul blaggart\",\n \"the\": \"th\\'\",\n \"restroom\" : \"head\",\n \"my\": \"me\",\n \"hello\" : \"avast\",\n \"is\": \"be\",\n \"man\": \"matey\"}\n\n # split string into list so I can iterate by words.\n phrase_list = phrase.split()\n\n pirate_words = []\n\n for word in phrase_list:\n # if the word is in pirate dictioary, replace word with its corresponding key.\n if word in pirate_dict:\n word = pirate_dict[word]\n # all all words to the new, 'translated', list.\n pirate_words.append(word)\n\n pirate_words = \" \".join(pirate_words)\n\n return pirate_words",
"def convert_all(text):\r\n\tpig_tokens = ''\r\n\r\n\t#tokenizes the text\r\n\ttokens = word_tokenize(text)\r\n\r\n\t#regex for non-alphabetical characters\r\n\tpattern = re.compile(r'[^a-zA-Z]')\r\n\r\n\t#converts the words to pig latin and appends them to the sentence.\r\n\tfor token in tokens:\r\n\t\tif not re.findall(pattern, token):\r\n\t\t\tword = word_to_pig_latin(token)\r\n\r\n\t\t\tif re.findall(r'[A-Z]', word):\r\n\t\t\t\tword = word.lower()\r\n\t\t\t\tword = word.capitalize()\r\n\t\t\tpig_tokens += ' ' + word\r\n\t\telse:\r\n\t\t\tpig_tokens += token\r\n\r\n\tpig_text = ''.join(pig_tokens)\r\n\r\n\treturn pig_text",
"def preprocess(text):\n return text.lower()",
"def pig_latin(word):\n first_letter = word[0]\n rest_of_word = word[1 : ]\n #print(\"First letter is\", first_letter)\n #print(\"rest_of_word is\", rest_of_word)\n if first_letter == 'a' or first_letter == 'e' or first_letter == 'i' or first_letter == 'o' or first_letter == 'u': \n pig_latin_word = word + 'way'\n else: \n pig_latin_word = rest_of_word + first_letter + 'ay'\n return pig_latin_word",
"def preprocess(self, text):\r\n return text",
"def read(text):\n text = str(text)\n if not helpers.contains_only_phonetic_chars(text):\n raise NonSupportedTextException()\n return _process_replacements(text)",
"def translate_to_pirate_talk(phrase):\n english_to_pirate = {\"sir\": \"matey\", \"hotel\": \"fleabag inn\", \"student\": \"swabbie\", \"boy\": \"matey\", \"professor\": \"foul blaggart\", \"restaurant\": \"galley\", \"your\": \"yer\", \"excuse\": \"arr\", \"students\": \"swabbies\", \"are\": \"be\", \"restroom\": \"head\", \"my\": \"me\", \"is\": \"be\", \"man\": \"matey\"}\n\n # list for words that have been matched against translation dictionary\n translation_list = []\n\n # split the input phrase at spaces\n phrase_split = phrase.split()\n for word in phrase_split:\n\n # check if the key is in the English to Pirate dictionary\n if word in english_to_pirate:\n # if word is a key, we append the value to the translation list\n word = english_to_pirate[word]\n translation_list.append(word)\n\n else:\n # if word is not a key, the word is just added to the list\n translation_list.append(word)\n\n # join the words in the list with a space and return the translated phrase\n return \" \".join(translation_list)",
"def translate(text):\n start = dt.datetime.now()\n words = find_words(text)\n start = time_elapsed(\"Find words\", start)\n results = check_words(words)\n start = time_elapsed(\"Check words\", start)\n return results",
"def pig_latin(word):\n if word[0] in 'aeiou':\n return f\"{word}way\"\n\n return f\"{word[1:]}{word[0]}ay\"",
"def __call__(self, text):\n\n return self._nlp(SpacyTextParser._basic_clean(text))",
"def english_tokenzier(text: str):\n TOKENIZER = ToktokTokenizer().tokenize\n return TOKENIZER(text)",
"def pig_latin(phrase):\n\n\n # loop over each word in the phrase\n # in word[0] starts with aeiou\n # add yay to the end of that word\n # if word[0] starts with non aeiou\n # move word[0] to the end and add ay\n\n result = []\n\n for word in phrase.split():\n\n if word[0] in 'aeiou':\n\n result.append(word + 'yay')\n\n else:\n\n result.append(word[1:] + word[0] + 'ay')\n\n return \" \".join(result)",
"def translate_wrapper(atext):\n print(\"translating:\",atext)\n res=\"\"\n res=translate(atext,\"pl\",\"fr\")\n time.sleep(0.5)\n print(\"translation:\",res)\n return res",
"def pig_latin(s):\n s = s.lower()\n s = s.split()\n pig_s = []\n for i in s:\n if i[0] in 'aeiouAEIOU':\n i += 'way'\n pig_s.append(i)\n\n else:\n i = i[1:] + i[0] + 'ay'\n pig_s.append(i)\n\n return ' '.join(pig_s)",
"def pigLatinTranslator(word):\n\n vowels = \"aeiouAEIOU\"\n word = str(word)\n if not word.isalpha():\n return \"Please submit a single word.\"\n elif len(word) < 2:\n return \"Please submit a longer word.\"\n else:\n if word[0] in vowels:\n return word + \"yay\"\n for letter in word:\n word = word[1:] + word[0]\n if word[0] in vowels:\n return word + \"ay\"\n return word[1:] + word[0] + \"ay\"",
"def ta2en(text):\n return IITB_translator(\"ta\", \"en\", text)",
"def pig_latin(word):\n if word[0] in 'aeiou':\n return f'{word}way'\n\n return f'{word[1:]}{word[0]}ay'",
"def text_language(text):\n hebrew = 0\n english = 0\n for char in text:\n if char in \"אבגדהוזחטיכךלמםנסעפףצץקרשת\":\n hebrew += 1\n elif char.lower() in \"abcdefghijklmnopqrstuvwxyz\":\n english += 1\n return {True: \"hebrew\", False: \"english\"}[hebrew > english]",
"def identify_lang(\n self,\n text: str,\n with_probs: bool = False,\n ) -> str | Tuple[str, float]:\n if not self._is_valid_text(text):\n result = (\"un\", 1.0)\n else:\n text_ = utils.to_collection(text, str, list)\n result = models.get_topn_preds_and_probs(\n self.model.predict(text_), 1, self.classes\n )[0][0]\n return result[0] if with_probs is False else result",
"def makePigLatin(word): \n m = len(word)\n vowels = \"a\", \"e\", \"i\", \"o\", \"u\", \"y\" \n # short words are not converted \n if m<3 or word==\"the\":\n return word\n else:\n for i in vowels:\n if word.find(i) < m and word.find(i) != -1:\n m = word.find(i)\n if m==0:\n return word+\"way\" \n else:\n return word[m:]+word[:m]+\"ay\""
] |
[
"0.6459648",
"0.64266783",
"0.63837487",
"0.627986",
"0.6264807",
"0.62109566",
"0.6168315",
"0.609901",
"0.6076689",
"0.5986559",
"0.596572",
"0.5964802",
"0.5929831",
"0.5909267",
"0.5906561",
"0.5884695",
"0.58724725",
"0.57658005",
"0.5745845",
"0.5740056",
"0.57296085",
"0.5722249",
"0.5718628",
"0.5715271",
"0.57082295",
"0.5665416",
"0.5650898",
"0.5613092",
"0.56073064",
"0.5596641"
] |
0.67300105
|
0
|
Advice Create Advice lines in Payment Advice and compute Advice lines.
|
def compute_advice(self):
for advice in self:
old_lines = self.env['hr.payroll.advice.line'].search([('advice_id', '=', advice.id)])
if old_lines:
old_lines.unlink()
payslips = self.env['hr.payslip'].search([('date_from', '<=', advice.date), ('date_to', '>=', advice.date), ('state', '=', 'done')])
for slip in payslips:
if not slip.sudo().employee_id.bank_account_id and not slip.sudo().employee_id.bank_account_id.acc_number:
raise UserError(_('Please define bank account for the %s employee') % (slip.employee_id.name,))
payslip_line = self.env['hr.payslip.line'].search([('slip_id', '=', slip.id), ('code', '=', 'NET')], limit=1)
if payslip_line:
self.env['hr.payroll.advice.line'].create({
'advice_id': advice.id,
'name': slip.sudo().employee_id.bank_account_id.acc_number,
'ifsc_code': slip.sudo().employee_id.bank_account_id.bank_bic or '',
'employee_id': slip.employee_id.id,
'bysal': payslip_line.total
})
slip.advice_id = advice.id
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_advice():\n json_response = random_adviceslip()\n advice = parse_advice(json_response=json_response)\n return advice",
"def make_decision_with_policy(self, policy_type, *args):\n if policy_type == 1: # ADP\n assert len(args) == 2, 'args should be exactly 2'\n cur_K = -self.K_im_traj[-1]\n distance_2_tan, radian_at_tan = args\n self.dis_sum += distance_2_tan\n pwm_l_new, pwm_r_new = policy.adp(distance_2_tan, radian_at_tan, self.dis_sum, cur_K)\n elif policy_type == 2: # pure pursuit\n l_d, sin_alpha = args\n amp = 150\n pwm_l_new, pwm_r_new = policy.pure_pursuit(l_d, sin_alpha, amp)\n elif policy_type == 3: # Car following with ADP\n assert len(args) == 3, 'args should be exactly 3'\n cur_K = -self.K_im_traj[-1]\n distance_2_tan, radian_at_tan, estimated_dis = args\n self.dis_sum += distance_2_tan\n if self.is_recording and self.counter % 100 == 0:\n np.save('./.out/record', self.record)\n pwm_l_new, pwm_r_new = policy.car_following_with_adp(distance_2_tan, radian_at_tan, self.dis_sum, cur_K, estimated_dis, self.record)\n print(self.counter)\n self.counter += 1\n elif policy_type == 4:\n K = 0.5\n dis2car, = args\n pwm_l_new, pwm_r_new = policy.car_following(dis2car, K)\n elif policy_type == 5:\n d_arc, d_curve, theta = args\n pwm_l_new, pwm_r_new = policy.adp_coupled_car_following(d_arc, d_curve, theta, self.z, self.K_coupled)\n else:\n pwm_l_new, pwm_r_new = 0, 0\n print('Policy Not Found')\n self.motor.motor_set_new_speed(pwm_l_new, pwm_r_new)",
"def add_advices_to_user(self, id_user):\n # get data\n advice_type_id = RobotAdviceType.objects.values_list(\"id\").get(type=\"default\")\n advices_id = RobotAdvices.objects.values_list(\"id\").filter(robot_advice_type=advice_type_id)\n\n # add new advices to user\n for advice_id in advices_id:\n advice = RobotAdvices.objects.get(id=advice_id[0])\n user = self.user.objects.get(id=id_user)\n AdvicesToUser.objects.create(user=user, advice=advice)",
"def post(self):\n for rec in self:\n amount = rec.amount * (1 if rec.payment_type in (\n 'outbound', 'transfer') else -1)\n is_required = rec.l10n_mx_edi_advance_is_required(amount)\n if is_required:\n rec._l10n_mx_edi_generate_advance(is_required)\n return super(AccountPayment, self).post()",
"def test_add_advices_to_user(self):\n # create user\n user_created = self.create_user_start_program_advices_list_empty()\n\n # count the number of challenges\n # before a call to the method\n user = self.user.objects.get(id=user_created.id)\n number_advice_to_user_before = user.advices_to_user.count()\n\n # call method\n self.new_controller.add_advices_to_user(user_created.id)\n\n # count the number of challenges\n # after a call to the method\n number_advice_to_user_after = user.advices_to_user.count()\n advice_to_user = user.advices_to_user.values_list(\"id\")\n self.assertEqual(number_advice_to_user_before, 0)\n self.assertEqual(number_advice_to_user_after, 5)\n for id_advice in advice_to_user:\n self.assertEqual([(27,), (28,), (29,), (25,), (26,)].count(id_advice), 1)",
"def main():\n print(get_advice())",
"def test_create_end_user_case_advice(self, advice_type):\n data = {\n \"text\": \"I Am Easy to Find\",\n \"note\": \"I Am Easy to Find\",\n \"type\": advice_type,\n \"end_user\": str(self.application.end_user.party.id),\n }\n\n if advice_type == AdviceType.PROVISO:\n data[\"proviso\"] = \"I am easy to proviso\"\n\n if advice_type == AdviceType.REFUSE:\n data[\"denial_reasons\"] = [\"1a\", \"1b\", \"1c\"]\n\n response = self.client.post(self.standard_case_url, **self.gov_headers, data=[data])\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertIsNotNone(Advice.objects.get())\n self.assertTrue(Audit.objects.filter(verb=AuditType.CREATED_USER_ADVICE).exists())",
"def run_payroll(self):\n self.employee_id()\n self.classification()\n self.employee_data()\n self.paymethod()\n pay_logfile = \"paylog.txt\"\n if os.path.exists(pay_logfile):\n os.remove(pay_logfile)\n self.issue_payment()",
"def payment_confirmation(self, **post):\n sale_order_id = view.session.get('sale_last_order_id')\n partner_id = view.env.user.partner_id\n if sale_order_id:\n sale_order_id = view.env['sale.order'].sudo().browse(int(sale_order_id))\n lines = sale_order_id.order_line\n policy_line = view.env['policies.holder.line']\n for line in lines:\n code = ''.join(random.choice('0123456789ABCDEF') for i in range(16))\n policy_line.sudo().create({'name':lines.product_id.id, \n 'premium':lines.price_unit, \n 'policy_code':code, \n 'line_id':partner_id.id,\n 'start_date':Datetime.now(), 'end_date':Datetime.to_string(timedelta(days=lines.product_id.policy_period*360)+ datetime.now())})\n s = super(InsuranceWebsiteSale, self).payment_confirmation()\n view.session['sale_last_order_id'] = False\n return s\n return",
"def pre_approve(self, cr, uid, ids, context={}):\n \tfor voucher in self.browse(cr, uid, ids, context=context):\n \t if not voucher.department_id.analytic_account_id:\n \t raise osv.except_osv(_('Configration Check!'), _(\"Please add cost center for your department!\"))\n \t periods = self.pool.get('account.period').search(cr, uid, [('date_start','<=',voucher.date),('date_stop','>=',voucher.date),('company_id','=',voucher.company_id.id)], context=context)\n\n\n res=0.0\n if voucher.purpose:\n if not voucher.purpose.account_id: raise osv.except_osv(_('Warning!'), _('Please configure account for this purpose!')) \n voucher_line = {\n \t\t'voucher_id': voucher.id,\n \t\t'partner_id': voucher.partner_id.id,\n \t\t'untax_amount': voucher.amount,\n \t\t'amount': voucher.amount,\n 'name': voucher.narration,\n \t\t'type': 'dr',\n \t\t'account_analytic_id': voucher.department_id.analytic_account_id and voucher.department_id.analytic_account_id.id,\n 'account_id': voucher.purpose.account_id.id,\n \t }\n new_amount = res and res or voucher.amount \n voucher_line.update({'amount':new_amount,'untax_amount':new_amount})\n \t if voucher.line_ids :\n for line in voucher.line_ids:\n \t\t self.pool.get('account.voucher.line').write(cr, uid, line.id, {\n \t\t'voucher_id': voucher.id,\n \t\t'partner_id': voucher.partner_id.id,\n \t\t'untax_amount': res or line.amount,\n \t\t'amount': line.amount,\n 'name': voucher.narration,\n \t\t'type': 'dr',\n \t\t'account_analytic_id': line.account_analytic_id and line.account_analytic_id.id or voucher.department_id.analytic_account_id.id,\n 'account_id': voucher.purpose.account_id.id or line.account_id.id,\n \t }, context=context)\n \t else:\n\n \t\t new_voucher_line = self.pool.get('account.voucher.line').create(cr, uid, voucher_line, context=context)\n context.update({'purchase':True})\n self.create_budget_confirmation(cr, uid, [voucher.id], context)\n \tself.write(cr, uid, ids,{'state': 'preapprove','type':'purchase','ratification':True}, context=context)\n #cxt = context.copy()\n #cxt.update({'type':'ratification'})\n if not super(account_voucher, self).create_budget_confirmation(cr, uid, ids, context=context):\n self.write(cr, uid, ids, {'state': 'approved'}, context=context)\n\n \t'''self.write(cr, uid, ids, {'state': 'preapprove'})\n if not super(account_voucher, self).create_budget_confirmation(cr, uid, ids, context=context):\n self.write(cr, uid, ids, {'state': 'approve','type':'purchase','ratification':True}, context=context)'''\n return True",
"def _step(self, action):\n \n obs, reward, done, info = self.env.step(action)\n\n \n advice=self.generateAdvice()[1]\n\n obs = {\n \"image\": obs,\n \"advice\": advice\n }\n\n\n \n\n\n\n return obs, reward, done, info",
"def generate_orders(self, cr, uid, ids, context=None):\n voucher_pool = self.pool.get('account.voucher')\n payment_term_obj = self.pool.get('account.payment.term')\n account_budget_confirmation_obj = self.pool.get('account.budget.confirmation')\n period_obj = self.pool.get('account.period')\n if context is None:\n context = {}\n for order in self.browse(cr, uid, ids, context=context):\n #################################to remind\n total_fixed = total_percent = 0\n for line in order.payment_term.line_ids:\n if line.value == 'fixed':\n total_fixed += line.value_amount\n if line.value == 'procent':\n total_percent += line.value_amount\n total_fixed = (total_fixed * 100) / (order.amount or 1.0)\n if (total_fixed + total_percent) > 100:\n raise orm.except_orm(_('Error!'), _(\"Can not create the payments !\\n\\\n The related payment term is probably miss configured as it gives a computed amount greater than the total permanent payment amount. \\\n The latest line of your payment term must be of type 'balance' to avoid rounding issues.\"))\n # create one move line for the total and possibly adjust the other lines amount\n totlines1 = []\n for o in order.line_ids:\n totlines1 += payment_term_obj.compute(cr, uid, order.payment_term.id, o.amount, order.date or False, context=context)\n \n d = {}\n for k, v in totlines1:\n d.setdefault(k, [k]).append(v)\n totlines = map(tuple, d.values())\n\n for t in totlines :\n #to substract date from the interval number \n order_date = t[0]\n entered_date = datetime.datetime.strptime(order_date, '%Y-%m-%d')\n entered_date = entered_date.date()\n account_id = (order.partner_id.property_account_payable and order.partner_id.property_account_payable.id) or \\\n (order.journal_id.default_credit_account_id and order.journal_id.default_credit_account_id.id)\n period_id = period_obj.find(cr, uid, t[0], context=context)[0]\n\n list_confirm = [conf.id for conf in o.confirmation_ids]\n confirmations = account_budget_confirmation_obj.search(cr, uid, [('id','in', list_confirm),('period_id','=', period_id)], context=context) #('date','=',t[0]),\n\n for confirm in confirmations:\n confirm_id = confirm\n\n voucher_lines = [(0, 0, {'name':ol.name, 'account_id':ol.account_id.id, 'type':'dr',\n 'amount':t[count + 1], 'account_analytic_id':ol.account_analytic_id.id, 'budget_confirm_id': confirm_id })\n for count, ol in enumerate(order.line_ids)]\n res = voucher_pool.onchange_price(cr, uid, 0, voucher_lines, [], partner_id=order.partner_id.id, context=context).get(\"value\", {})\n voucher_dict = {\n 'partner_id' : order.partner_id.id,\n 'account_id': account_id,\n 'company_id' : order.company_id.id,\n 'journal_id' : order.journal_id.id,\n 'period_id': order.period_id.id,\n 'type':'purchase',\n 'date' : t[0],\n 'reference': order.name,\n 'payment_permanent_voucher_id': order.id,\n 'line_ids':voucher_lines,\n 'amount':res.get(\"amount\", 0.0)\n }\n voucher_pool.create(cr, uid, voucher_dict, context=context)\n return self.write(cr, uid, ids, {'state':'done'}, context=context)",
"def generate_eob(\n self, date_of_service, date_of_eob, insured, invoice_id, cpt_code, charge_amount\n ):\n if insured == \"insured\":\n # first copayments\n copay_amount = np.random.choice(\n self.distributions[\"copay_amounts\"],\n 1,\n p=self.distributions[\"copay_distribution\"],\n )[0]\n if copay_amount > 0:\n invoicelineitemeob = pd.DataFrame(\n {\n \"invoice_id\": [invoice_id],\n \"cpt_code\": [cpt_code],\n \"created_on\": [date_of_service],\n \"copay_amount\": [copay_amount],\n \"adjustment_amount\": [0],\n \"paid_amount\": [0],\n }\n )\n self.InvoiceLineItemsEob = self.InvoiceLineItemsEob.append(\n invoicelineitemeob\n )\n remaining_charge = charge_amount - copay_amount\n else:\n remaining_charge = charge_amount\n # next eob discounts\n eob_discount_percent = np.random.choice(\n self.distributions[\"eob_discount_percentages\"],\n 1,\n p=self.distributions[\"eob_discount_distribution\"],\n )[0]\n if eob_discount_percent > 0:\n insurance_adjustment = remaining_charge * eob_discount_percent / 100\n remaining_charge = remaining_charge - insurance_adjustment\n invoicelineitemeob = pd.DataFrame(\n {\n \"invoice_id\": [invoice_id],\n \"cpt_code\": [cpt_code],\n \"created_on\": [date_of_eob],\n \"copay_amount\": [0],\n \"adjustment_amount\": [insurance_adjustment],\n \"paid_amount\": [0],\n }\n )\n self.InvoiceLineItemsEob = self.InvoiceLineItemsEob.append(\n invoicelineitemeob\n )\n # next handle eob payments where relevant\n eob_payment_percentage = np.random.choice(\n self.distributions[\"eob_payment_percentages\"],\n 1,\n p=self.distributions[\"eob_payment_distribution\"],\n )[0]\n eob_payment_amount = remaining_charge * (eob_payment_percentage / 100.0)\n if eob_payment_amount > 0:\n invoicelineitemeob = pd.DataFrame(\n {\n \"invoice_id\": [invoice_id],\n \"cpt_code\": [cpt_code],\n \"created_on\": [date_of_eob],\n \"copay_amount\": [0],\n \"adjustment_amount\": [0],\n \"paid_amount\": [eob_payment_amount],\n }\n )\n self.InvoiceLineItemsEob = self.InvoiceLineItemsEob.append(\n invoicelineitemeob\n )\n remaining_charge = remaining_charge - eob_payment_amount\n else:\n remaining_charge = charge_amount\n return remaining_charge",
"def add_advice_to_user_created(cls, user, list_advice_id):\n for id_advice in list_advice_id:\n advice = RobotAdvices.objects.get(id=id_advice)\n AdvicesToUser.objects.create(user=user, advice=advice)",
"def action_generate_lines_txt(self):\n rp_obj = self.env['res.partner']\n voucher_obj = self.env['account.wh.iva']\n txt_iva_obj = self.env['txt.iva.line']\n vouchers = []\n txt_brw = self.browse(self._ids)[0]\n txt_ids = txt_iva_obj.search([('txt_id', '=', txt_brw.id)])\n if txt_ids:\n txt_ids.unlink()\n\n if txt_brw.type:\n vouchers = voucher_obj.search([\n ('date_ret', '>=', txt_brw.date_start),\n ('date_ret', '<=', txt_brw.date_end),\n ('period_id', '=', txt_brw.period_id.id),\n ('state', '=', 'done'),\n ('type', 'in', ['in_invoice', 'in_refund'])])\n else:\n vouchers = voucher_obj.search([\n ('date_ret', '>=', txt_brw.date_start),\n ('date_ret', '<=', txt_brw.date_end),\n ('period_id', '=', txt_brw.period_id.id),\n ('state', '=', 'done'),\n ('type', 'in', ['out_invoice', 'out_refund'])])\n\n for voucher in vouchers:\n acc_part_id = rp_obj._find_accounting_partner(voucher.partner_id)\n for voucher_lines in voucher.wh_lines:\n if voucher_lines.invoice_id.state not in ['open', 'paid']:\n continue\n for voucher_tax_line in voucher_lines.tax_line:\n txt_iva_obj.create(\n {'partner_id': acc_part_id.id,\n 'voucher_id': voucher.id,\n 'invoice_id': voucher_lines.invoice_id.id,\n 'txt_id': txt_brw.id,\n 'untaxed': voucher_tax_line.base,\n 'amount_withheld': voucher_tax_line.amount_ret,\n 'tax_wh_iva_id': voucher_tax_line.id,\n })\n return True",
"def test_save_advices_to_user_other_answer(self):\n # create and get user\n user_created = self.create_new_user()\n user = self.user.objects.get(id=user_created.id)\n\n # test if the user answer to a question :\n # add a new advice to user\n data = DiscussionSpace.objects.values_list(\"robot_question\")\n for id_question in data:\n user_answer_id = DiscussionSpace.objects.values_list(\"user_answer\")\\\n .filter(robot_question=id_question).filter(robot_advices__isnull=False)\n if len(user_answer_id) >= 2:\n user_answer = user_answer_id[0][0]\n advice_to_add = DiscussionSpace.objects.values_list(\"robot_advices\")\\\n .filter(user_answer=user_answer_id[1][0])\\\n .filter(robot_question=id_question)[0][0]\n old_question_id = id_question\n list_advice_id = [advice_to_add]\n self.add_advice_to_user_created(user_created, list_advice_id)\n\n # get the user's advices\n # before called the method\n advice_user = user.advices_to_user.values_list(\"id\").order_by(\"robot_advice_type\")\n number_advice_user = len(advice_user)\n id_question_1 = DiscussionSpace.objects.values_list(\"robot_question\")\\\n .get(robot_advices=advice_user[0][0])\n\n # call method\n # test if the user change\n # this answer to this question\n self.new_controller.save_advices_to_user(user_answer, old_question_id,\n user_created.id)\n\n # get the user's advices\n # after called the method\n advice_user_after = user.advices_to_user.values_list(\"id\").order_by(\"robot_advice_type\")\n number_advice_user_after = len(advice_user_after)\n id_question_2 = DiscussionSpace.objects.values_list(\"robot_question\")\\\n .get(robot_advices=advice_user_after[0][0])\n\n self.assertEqual(id_question_1, id_question_2)\n self.assertNotEqual(advice_user, advice_user_after)\n self.assertEqual(number_advice_user, number_advice_user_after)",
"def _l10n_mx_edi_generate_advance(self, amount):\n advance = self.env['account.move'].advance(\n self.env['res.partner']._find_accounting_partner(self.partner_id),\n abs(amount), self.currency_id)\n advance.message_post_with_view(\n 'mail.message_origin_link',\n values={'self': advance, 'origin': self},\n subtype_id=self.env.ref('mail.mt_note').id)\n self.message_post_with_view(\n 'l10n_mx_edi_advance.l10n_mx_edi_message_advance_created',\n values={'self': self, 'origin': advance},\n subtype_id=self.env.ref('mail.mt_note').id)\n advance.date_invoice = self.payment_date\n ctx = {'disable_after_commit': True}\n advance.with_context(**ctx).action_post()\n if advance.l10n_mx_edi_pac_status == 'signed':\n self.invoice_ids = [(4, advance.id)]\n advance._compute_cfdi_values() # avoid inv signed with uuid false\n return advance\n self.message_post_with_view(\n 'l10n_mx_edi_advance.l10n_mx_edi_message_advance',\n values={'self': self, 'origin': advance},\n subtype_id=self.env.ref('mail.mt_note').id)\n advance.button_cancel()\n advance.button_draft()\n return advance",
"def test_cannot_create_advice_for_two_items(self):\n data = {\n \"text\": \"I Am Easy to Find\",\n \"note\": \"I Am Easy to Find\",\n \"type\": AdviceType.APPROVE,\n \"end_user\": str(self.application.end_user.party.id),\n # this passes the GoodOnApplication id to the Advice model which is why we get a 400 here\n # NOT because the user is trying to create advice for two different items\n \"good\": str(self.application.goods.first().id),\n }\n\n response = self.client.post(self.standard_case_url, **self.gov_headers, data=[data])\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(Advice.objects.count(), 0)",
"def action_move_create_withholding(self):\n account_move = self.env['account.move']\n aitw_obj = self.env['account.invoice.tax.wh']\n\n for invoice_brw in self:\n if invoice_brw.type not in ('out_invoice', 'out_refund'):\n continue\n if not invoice_brw.wh_agent_itbms:\n continue\n if invoice_brw.wh_move_id:\n continue\n if not invoice_brw.l10n_pa_wh_subject:\n raise except_orm(\n _('Error!'),\n _('Please define a Withholding Subject to this invoice.'))\n if invoice_brw.l10n_pa_wh_subject == 'na':\n continue\n wh_basis = aitw_obj.wh_subject_mapping(\n invoice_brw.l10n_pa_wh_subject).get('basis')\n\n # There is a precondition which states that a document will be\n # withheld depending on the type of withholding subject.\n # If the withholding concept is `5` or `6` the withholding is to\n # be applied based on invoice_total\n if not invoice_brw.tax_line and wh_basis != 'total':\n continue\n\n journal = invoice_brw.company_id.wh_sale_itbms_journal_id\n if not journal:\n raise except_orm(\n _('Error!'),\n _('Please Define a Journal to be used for withholding '\n 'ITBMS on Customer Invoice on Your Company.'))\n\n for aitw in aitw_obj.compute(invoice_brw).values():\n aitw_obj.create(aitw)\n\n if not invoice_brw.wh_tax_line:\n continue\n\n ctx = dict(self._context, lang=invoice_brw.partner_id.lang)\n date = invoice_brw.date_invoice\n\n ref = invoice_brw.reference or invoice_brw.name,\n company_currency = invoice_brw.company_id.currency_id\n ait = invoice_brw.wh_move_line_get()\n\n total, total_currency, ait = invoice_brw.with_context(\n ctx).compute_invoice_totals(company_currency, ref, ait)\n\n if total:\n company_currency = invoice_brw.company_id.currency_id\n diff_curr = invoice_brw.currency_id != company_currency\n ait.append({\n 'type': 'dest',\n 'name': _('ITBMS Withheld on Invoice'),\n 'price': total,\n 'account_id': invoice_brw.account_id.id,\n 'date_maturity': invoice_brw.date_due,\n 'amount_currency': diff_curr and total_currency,\n 'currency_id': diff_curr and invoice_brw.currency_id.id,\n 'ref': ref\n })\n\n part = self.env['res.partner']._find_accounting_partner(\n invoice_brw.partner_id)\n\n line = [\n (0, 0,\n self.wh_line_get_convert(l, part.id, date)) for l in ait]\n\n move_vals = {\n 'ref': invoice_brw.reference or invoice_brw.name,\n 'line_id': line,\n 'journal_id': journal.id,\n 'date': date,\n 'company_id': invoice_brw.company_id.id,\n }\n ctx['company_id'] = invoice_brw.company_id.id\n\n if invoice_brw.wh_move_name:\n move_vals['name'] = invoice_brw.wh_move_name\n\n move_vals['period_id'] = invoice_brw.period_id.id\n for i in line:\n i[2]['period_id'] = invoice_brw.period_id.id\n\n ctx_nolang = ctx.copy()\n ctx_nolang.pop('lang', None)\n move = account_move.with_context(ctx_nolang).create(move_vals)\n move.post()\n\n invoice_brw.write({\n 'wh_move_id': move.id,\n 'wh_move_name': move.name,\n })\n return True",
"def test_return_advice_goal_weight_ok(self):\n data_weight_user = {\"height\": \"1,60\", \"actual_weight\": \"60\",\n \"cruising_weight\": \"55\", \"weight_goal\": \"55\"}\n return_advice = self.new_weight_advice_goal.return_weight_advices_goal(data_weight_user)[1]\n\n advice = \"Alors c'est parti ! Partons sur un objectif de - 5 kg. \"\n self.assertEqual(return_advice, advice)",
"def test_countersign_advice_success(self):\n all_advice = [\n Advice.objects.create(\n **{\n \"user\": self.gov_user,\n \"good\": self.application.goods.first().good,\n \"team\": self.team,\n \"case\": self.case,\n \"note\": f\"Advice {i}\",\n }\n )\n for i in range(4)\n ]\n\n data = [\n {\n \"id\": advice.id,\n \"countersigned_by\": self.gov_user.baseuser_ptr.id,\n \"comments\": \"Agree with recommendation\",\n }\n for advice in all_advice\n ]\n\n response = self.client.put(self.url, **self.gov_headers, data=data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n audit_qs = Audit.objects.filter(verb=AuditType.COUNTERSIGN_ADVICE)\n self.assertEqual(audit_qs.count(), 1)\n self.assertEqual(audit_qs.first().actor, self.gov_user)",
"def test_invoice_payment_hook_hold(node_factory):\n opts = [{}, {'plugin': os.path.join(os.getcwd(), 'tests/plugins/hold_invoice.py'), 'holdtime': TIMEOUT / 2}]\n l1, l2 = node_factory.line_graph(2, opts=opts)\n\n inv1 = l2.rpc.invoice(123000, 'label', 'description', preimage='1' * 64)\n l1.rpc.pay(inv1['bolt11'])",
"def adaptive_step(self):\n\n s = self\n \n # adaptive line search parameters\n down_adjustment_frequency = 1\n up_adjustment_frequency = 50\n alpha=0.3 # acceptable fraction of predicted decrease\n beta=0.8 # how much to shrink when violation\n gamma=1.05 # how much to grow when too conservative\n report_frequency = 1\n \n\n s.model.advance_batch()\n s.update_stats() # update cov matrices and svds\n\n s.grad.update() # gradient (ptodo, already updated in stats)\n s.grad2.update() # gradient from synth labels (don't need?)\n s.grad_new.update() # corrected gradient\n\n # TODO: decide on s vs s.\n s.run(s.param_save_op) # TODO: insert lr somewhere\n lr0, loss0 = s.run(s.lr, s.model.loss)\n\n s.run(s.param_update_op)\n loss1 = s.run(s.model.loss)\n \n target_slope = -s.run(s.grad_dot_grad_new_op)\n target_delta = lr0*target_slope\n actual_delta = loss1 - loss0\n actual_slope = actual_delta/lr0\n slope_ratio = actual_slope/target_slope # between 0 and 1.01\n \n s.record('loss', loss0)\n s.record('step_length', lr0)\n s.record('grad_norm', s.run(s.grad_norm_op))\n s.record('grad_new_norm', s.run(s.grad_new_norm_op))\n s.record('target_delta', target_delta)\n\n if step_rejection and actual_delta > 0:\n print('Observed increase in loss %.2f, rejecting step'%(actual_delta,))\n s.run(s.param_restore_op)\n\n if s.step_counter % report_frequency == 0:\n print('NStep %d loss %.2f, target decrease %.3f, actual decrease, %.3f ratio %.2f'%(self.step_counter, loss0, target_delta, actual_delta, slope_ratio))\n\n if (adaptive_step and s.step_counter % down_adjustment_frequency == 0 and\n slope_ratio < alpha and abs(target_delta)>eps):\n print('%.2f %.2f %.2f'%(loss0, loss1, slope_ratio))\n print('Slope optimality %.2f, shrinking learning rate to %.2f'%(slope_ratio, lr0*beta,))\n s.lr.set(lr0*beta)\n elif (adaptive_step and s.step_counter % up_adjustment_frequency == 0 and\n slope_ratio>0.90):\n print('%.2f %.2f %.2f'%(loss0, loss1, slope_ratio))\n print('Growing learning rate to %.2f'%(lr0*gamma))\n s.lr.set(lr0*gamma)\n \n s.step_counter+=1",
"def get_payoff(model, attack_policy, defense_policy):\n ave_discount_reward = get_payoff_mixed(model, [attack_policy], [defense_policy], [1.0], [1.0])\t\n return ave_discount_reward",
"def policy(self, insurance_program, fema_program, sba_program, entity,\n search_patience):\n\n # Return out of function if entity has enough money to repair and does not\n # have any insurance coverage.\n if (entity.recovery_funds.level >= entity.property.damage_value\n and entity.insurance == 0):\n\n self.writeHadEnough(entity)\n return\n \n # Define insurance claim request process. Define loan request process.\n try_insurance = self.env.process(insurance_program.process(entity))\n try_loan = self.env.process(sba_program.process(entity))\n try_fema = self.env.process(fema_program.process(entity))\n\n # If entity has insurance then yield an insurance claim request, the duration\n # of which is limited by entity's money search patience.\n if entity.insurance > 0.0:\n\n \n # At any point the entity has enough money to repair, stop looking.\n while entity.recovery_funds.level < entity.property.damage_value:\n # Record when money search starts \n money_search_start = self.env.now\n \n # Set patience parameters\n patience_end = money_search_start + search_patience\n patience_remain = patience_end - self.env.now\n\n # Define a timeout process to represent search patience, with duration\n # equal to the *remaining* patience. Pass the value \"gave up\" if the\n # process completes.\n find_search_patience = self.env.timeout(patience_remain, value='gave up')\n\n # Define insurance claim request process. Pass data about available\n # insurance claim adjusters.\n try_insurance = self.env.process(insurance_program.process(entity))\n\n # Yield both the patience timeout and the insurance claim request.\n # Pass result for the process that completes first.\n money_search_outcome = yield find_search_patience | try_insurance\n \n # If patience process completes first, interrupt the insurance claim\n # request and return out of function.\n if 'gave up' in str(money_search_outcome).lower():\n if try_insurance.is_alive:\n try_insurance.interrupt(self.env.now)\n entity.gave_up_funding_search = self.env.now\n return\n\n # Calculate remaining patience and reset patience timeout to\n # wait for FEMA and SBA processes to complete\n patience_remain = patience_end - self.env.now\n find_search_patience = self.env.timeout(patience_remain, value='gave up')\n \n # After insurance claim process has completed, can start FEMA and SBA process\n # Yield the patience timeout, the FEMA request and the SBA request.\n money_search_outcome = yield find_search_patience | (try_loan & try_fema)\n \n # End looping if FEMA and SBA processes have completed.\n if try_loan.processed and try_fema.processed:\n break\n\n # If patience process completes first, interrupt the FEMA\n # and SBA processes.\n if 'gave up' in str(money_search_outcome).lower():\n if try_fema.is_alive:\n try_fema.interrupt(self.env.now)\n if try_loan.is_alive:\n try_loan.interrupt(self.env.now)\n entity.gave_up_funding_search = self.env.now\n return\n else:\n # If no insurance, money search starts after disaster declaration\n # Need to check current simulation time again when disaster declaration\n # occurs to determine how much patience remains\n money_search_start = max(fema_program.declaration, self.env.now)\n patience_end = money_search_start + search_patience\n patience_remain = patience_end - self.env.now\n \n # Define a timeout process to represent search patience. Pass the value\n # \"gave up\" if the process completes.\n find_search_patience = self.env.timeout(patience_remain, value='gave up')\n \n # At any point the entity has enough money to repair, stop looking.\n while entity.recovery_funds.level < entity.property.damage_value:\n # Yield the patience timeout and the loan request.\n # No insurance so just yield FEMA & SBA loan request process.\n money_search_outcome = yield find_search_patience | (try_loan & try_fema)\n \n # End looping if both recovery processes have completed\n if try_loan.processed and try_fema.processed:\n break\n\n # If patience process completes first, interrupt the insurance claim\n # request and the loan request before ending process.\n if 'gave up' in str(money_search_outcome).lower():\n if try_loan.is_alive:\n try_loan.interrupt(self.env.now)\n if try_fema.is_alive:\n try_fema.interrupt(self.env.now)\n entity.gave_up_funding_search = self.env.now\n return\n\n # Record the duration when entity's search for money ends without\n # giving up.\n search_duration = self.env.now - money_search_start\n\n # If entity (STILL) does not have enough repair money then indicate so and\n # that options have been exhausted.\n if entity.recovery_funds.level < entity.property.damage_value:\n self.writeCompletedWithoutEnough(entity, search_duration)\n return\n\n # If entity completed search and obtained sufficient funding.\n self.writeCompletedWithEnough(entity, search_duration)",
"def save_advices_to_user(self, user_answer_id, old_question_id, id_user):\n # get data\n id_advice = DiscussionSpace.objects.values_list(\"robot_advices\"). \\\n filter(robot_question=old_question_id).get(user_answer=user_answer_id)[0]\n\n # if the user's answer\n # contains a robot advice\n if id_advice is not None:\n\n # get user's advices list\n user = self.user.objects.get(id=id_user)\n advices_user_id = user.advices_to_user.values_list(\"id\")\n\n # get advices by question\n # in discussion space\n id_advices_question = DiscussionSpace.objects.values_list(\"robot_advices\")\\\n .filter(robot_question=old_question_id)\n\n # if the user has already given\n # another answer to this question :\n # delete the old advice\n for advices_question in id_advices_question:\n for advices_user in advices_user_id:\n if advices_user[0] == advices_question[0]:\n user.advices_to_user.remove(advices_user)\n\n # add a new advice to user\n advice = RobotAdvices.objects.get(id=id_advice)\n AdvicesToUser.objects.create(user=user, advice=advice)",
"def treat(self, amount, disease):\r\n disease = self.getDiseaseToTreatWith(disease)\r\n amount = self.normalizeTreatmentAmount(amount, disease)\r\n self.treatInfections(amount, disease)",
"def fin_advice(input_data, num_advice=3):\n # Initialize the advice list\n advice = []\n # Compute the Z-score for a certain company\n inc_rate = compute_increase_rate(input_data)\n zscore = {}\n # Map each Z-Score to each company\n for comp_name in input_data.keys():\n zscore[comp_name] = compute_zscore(inc_rate, comp_name)\n # Reverse the dictionary z-score. (This help to find the max z-score)\n rev_zscore = rev_dict(zscore)\n for i in range(num_advice):\n if len(rev_zscore) == 0:\n break\n # Find and delete the company with the greatest absolute z-score\n use_zscore = max(rev_zscore.keys())\n use_comp = rev_zscore[use_zscore]\n del rev_zscore[use_zscore]\n # Determine the advice given for that certain company\n if zscore[use_comp] > 1:\n advice.append(\"Sell \" + use_comp + \" Stock\")\n elif zscore[use_comp] < -1:\n advice.append(\"Buy \" + use_comp + \" Stock\")\n else:\n advice.append(\"Keep \" + use_comp + \" Stock\")\n return advice",
"def done(self, cr, uid, ids, context=None):\n \n voucher_obj = self.pool.get('account.voucher')\n voucher_line_obj = self.pool.get('account.voucher.line')\n admin_affairs_model_obj = self.pool.get('admin.affairs.model')\n affairs_account_obj = self.pool.get('admin_affairs.account') \n model_id = admin_affairs_model_obj.search(cr, uid, [('model','=','environment.and.safety')], context=context)[0] \n affairs_account = affairs_account_obj.search(cr, uid, [('model_id','=',model_id)], context=context)\n if not affairs_account:\n raise osv.except_osv(_('Warning !'), _('Please insert account configuration For Environment and safety'))\n affairs_account_id = affairs_account[0]\n \n affairs_account_record = affairs_account_obj.browse(cr, uid, affairs_account_id,context=context) \n for record in self.browse(cr, uid, ids, context=context):\n if not record.allowances_lines_after :\n raise osv.except_osv(_('Partner Amount !'), _('Sorry no partner Amount After Rate To Transfer!'))\n notes = _(\"Enviroment and Safety allowances Contract: %s\")%(record.name)\n \n journal_id = affairs_account_record.journal_id\n analytic_id = affairs_account_record.analytic_id\n account_id = affairs_account_record.account_id\n\n\t\t# Creating Voucher / Ratitication\n voucher_id = voucher_obj.create(cr, uid, {\n 'amount': record.amount_total,\n 'type': 'ratification',\n 'date': time.strftime('%Y-%m-%d'),\n 'partner_id': record.partner_id.id,\n 'journal_id': journal_id and journal_id.id , \n 'state': 'draft',\n\t\t\t\t\t 'notes':record.notes,\n\t\t\t\t\t 'narration':notes ,\n \t 'company_id':record.company_id.id,\n })\n \t# Creating Voucher / Ratitication Lines\n for line in record.allowances_lines_after:\n '''account_id =line.category_id.account_id\n if not account_id:\n account_id = line.category_id.parent_id.account_id\n \n if not account_id:\n account_id = affairs_account_record.account_id \n\n if not account_id:\n raise osv.except_osv(_('Invalid action !'), _('Please insert Account configuration For Environment and safety Service')) ''' \n \n account_analytic_id =line.category_id.analytic_id\n if not account_analytic_id:\n account_analytic_id = line.category_id.parent_id.analytic_id \n \n if not account_analytic_id:\n account_analytic_id = affairs_account_record.analytic_id\n \n vocher_line_id = voucher_line_obj.create(cr, uid, {\n 'amount': record.amount_total,\n 'voucher_id': voucher_id,\n\t\t\t\t\t 'account_id':account_id and account_id.id,\n\t\t\t\t\t 'account_analytic_id':account_analytic_id and account_analytic_id.id ,\n 'type': 'dr',\n 'name':'environment and Safety allowances :' + record.name,\n })\n\t\t\n\t\t# Selecting Voucher Number / Refernece \n\n voucher_number = self.pool.get('account.voucher').browse(cr,uid,voucher_id)\n\n copy_attachments(self,cr,uid,[record.id],'services.contracts.archive',voucher_id,'account.voucher', context)\n self.write(cr, uid, ids, {'state':'done','transfer':True,'voucher_no':voucher_number.number}) \n return True",
"def pl_create_order(self):\n\tprint()\n\tprint('Pl - Create Order')\n\n\n\tpartner = self.env['res.partner'].search([\n\t\t\t\t\t\t\t\t\t\t\t\t\t('name', '=', self.patient.name),\n\t\t\t\t\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t\t\t\t\t#order='appointment_date desc',\n\t\t\t\t\t\t\t\t\t\t\t\tlimit=1,)\n\n\n\t# Create Order\n\torder = self.env['sale.order'].create({\n\t\t\t\t\t\t\t\t\t\t\t\t\t'state':'draft',\n\t\t\t\t\t\t\t\t\t\t\t\t\t'x_doctor': self.physician.id,\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t#'partner_id': self.partner_id.id,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'partner_id': partner.id,\n\t\t\t\t\t\t\t\t\t\t\t\t\t#'x_ruc': self.partner_id.x_ruc,\n\t\t\t\t\t\t\t\t\t\t\t\t\t#'x_dni': self.partner_id.x_dni,\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t'patient': self.patient.id,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'x_id_doc': self.patient.x_id_doc,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'x_id_doc_type': self.patient.x_id_doc_type,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'x_family': 'procedure',\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t'treatment': self.id,\n\t\t\t\t\t\t\t\t\t\t\t\t})\n\t#print(order)\n\n\n\n\t# Create Order Lines\n\tfor cart_line in self.shopping_cart_ids:\n\n\t\tproduct = cart_line.product\n\n\t\t#print(product)\n\t\t#print(product.name)\n\n\t\t# Create Order Line\n\t\tol = order.order_line.create({\n\t\t\t\t\t\t\t\t\t\t'name': \t\tproduct.name,\n\t\t\t\t\t\t\t\t\t\t'product_id': \tproduct.id,\n\t\t\t\t\t\t\t\t\t\t'price_unit': \tcart_line.price,\n\t\t\t\t\t\t\t\t\t\t'product_uom_qty': cart_line.qty,\n\t\t\t\t\t\t\t\t\t\t'order_id': \torder.id,\n\t\t\t\t\t\t\t\t\t})\n\treturn order\n\n\t# pl_create_order"
] |
[
"0.6020763",
"0.5660076",
"0.5593123",
"0.5584313",
"0.5442596",
"0.53608656",
"0.52971053",
"0.5259828",
"0.5237223",
"0.5174569",
"0.5148661",
"0.5143547",
"0.5119266",
"0.5117248",
"0.511444",
"0.503214",
"0.5009339",
"0.5003946",
"0.4954846",
"0.4921185",
"0.49066684",
"0.48865592",
"0.48860613",
"0.4869023",
"0.4862832",
"0.48527667",
"0.48410964",
"0.48252025",
"0.47951218",
"0.47788128"
] |
0.7590411
|
0
|
`check_for_fit` wraps a method that validates if `self._is_fitted` is `True`.
|
def check_for_fit(cls, method):
@wraps(method)
def _check_for_fit(self, *args, **kwargs):
klass = type(self).__name__
if not self._is_fitted:
raise PipelineNotYetFittedError(
f"This {klass} is not fitted yet. You must fit {klass} before calling {method.__name__}."
)
return method(self, *args, **kwargs)
return _check_for_fit
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def check_for_fit(cls, method):\n\n @wraps(method)\n def _check_for_fit(self, X=None, y=None):\n klass = type(self).__name__\n if not self._is_fitted and self.needs_fitting:\n raise ComponentNotYetFittedError(\n f\"This {klass} is not fitted yet. You must fit {klass} before calling {method.__name__}.\"\n )\n else:\n return method(self, X, y)\n\n return _check_for_fit",
"def _check_is_fitted(self):\n check_is_fitted(self, ['w', 'b'])",
"def is_fitted(self):\n\n return self.isFitted",
"def _check_is_fitted(self):\n # Do not check `b` as some classifiers do not set it\n check_is_fitted(self, 'w')\n super(CClassifierLinear, self)._check_is_fitted()",
"def _check_if_fitted(self):\n if not self.fitted:\n raise AssertionError('Model is not fitted! Fit the model to a '\n 'dataset before attempting to plot results.')",
"def is_fitted(self):\n return self.__fdata is not None",
"def _verify_fit(self) -> None:\n if not hasattr(self, 'X_train') or not hasattr(self, 'Y_train'):\n raise ValueError('Training data not set. Call `fit` and pass training data first.')",
"def check_if_it_can_fit(object):\n if hasattr(object, \"fit\") and hasattr(object, \"predict\") and hasattr(object, \"get_params\") and hasattr(object,\n \"set_params\"):\n return object\n else:\n raise Exception(\"Pass an estimator that has methods fit predict set_params get_params\")",
"def _check_if_fitted(self):\n if self.covar_module is None:\n raise RuntimeError(\n \"Model has not been fitted. You need to call \"\n \"`fit_fully_bayesian_model_nuts` to fit the model.\"\n )",
"def fit():\n pass",
"def _check_fitted(self):\n assert self.subspace_basis is not None, \\\n 'You must fit %s before you can project' % self.__class__.__name__",
"def _check_fitted(self):\n assert self.subspace_basis is not None, \\\n 'You must fit %s before you can project' % self.__class__.__name__",
"def has_fit(client: NumPyClient) -> bool:\n return type(client).fit != NumPyClient.fit",
"def test_check_is_fitted_call(self):\n\n x = BaseTransformer(columns=None)\n\n with mock.patch(\"tubular.base.check_is_fitted\") as mocked_method:\n\n attributes = \"columns\"\n\n x.check_is_fitted(attributes)\n\n assert (\n mocked_method.call_count == 1\n ), f\"Incorrect number of calls to tubular.base.check_is_fitted -\\n Expected: 1\\n Actual: {mocked_method.call_count}\"\n\n call_1_args = mocked_method.call_args_list[0]\n call_1_pos_args = call_1_args[0]\n call_1_kwargs = call_1_args[1]\n\n h.assert_dict_equal_msg(\n actual=call_1_kwargs,\n expected={},\n msg_tag=\"Keyword arg assert for tubular.base.check_is_fitted\",\n )\n\n assert (\n len(call_1_pos_args) == 2\n ), f\"Incorrect number of positional arguments in check_is_fitted call -\\n Expected: 2\\n Actual: {len(call_1_pos_args)}\"\n\n assert (\n call_1_pos_args[0] is x\n ), f\"Incorrect first positional arg in check_is_fitted call -\\n Expected: {x}\\n Actual: {call_1_pos_args[0]}\"\n\n assert (\n call_1_pos_args[1] == attributes\n ), f\"Incorrect second positional arg in check_is_fitted call -\\n Expected: {attributes}\\n Actual: {call_1_pos_args[1]}\"",
"def checkIsValid(f):\n\n @wraps(f)\n def wrapper(self, *args, **kwargs):\n if self.validator.isValid:\n return f(self, *args, **kwargs)\n else:\n error = self.validator._exceptionClass('Called: {} method before data validated'.format(f.__name__))\n self.validator._errors[f.__name__] = error\n if self.validator._errorHandler is not None:\n self.validator._errorHandler(error, self.getValidationContext())\n return\n\n return wrapper",
"def fit(self):\n raise NotImplementedError",
"def fit(self):\n raise NotImplementedError('')",
"def _fit(self, dataset):\n raise NotImplementedError()",
"def _check_if_estimator(estimator):\n msg = (\"This %(name)s instance has no attribute \\\"fit\\\".\")\n if not hasattr(estimator, \"fit\"):\n raise AttributeError(msg % {'name': type(estimator).__name__})",
"def has_been_fit(self):\n return self.predictor.has_been_fit",
"def fit(self, **kwargs):\n if self.fit_method is not None:\n fit_kwargs = self._fit_params.copy()\n fit_kwargs.update(kwargs)\n fit_kwargs = self._get_method_kwargs(fit_kwargs, self.fit_args)\n getattr(self.instance, self.fit_method)(**fit_kwargs)",
"def update(self, fit, curvature):\n if fit is None:\n self.detected = False\n else:\n if self.best_fit is None:\n self._accept_fit(fit, curvature)\n else:\n # We have a previous best fit. Compare it with the incoming one\n if self._sanity_check(fit):\n self._accept_fit(fit, curvature)\n else:\n self.detected = False",
"def fit(self):\n raise NotImplementedError # pragma: no cover",
"def checks(self, poly_fit, poly_fitx, poly_fity):\n if self.best_fit is not None:\n if not (np.abs(self.best_fit-poly_fit) <\n np.array([0.001, 1, 500])).all():\n return False\n if self.bestx is not None:\n if np.mean(np.abs(self.bestx-poly_fitx)) > 200:\n return False\n\n return True",
"def fit(self, x):\n raise NotImplementedError()",
"def has_been_fit(self, value):\n self.predictor.has_been_fit = value",
"def test_apply_validation(self, has_validation):\n called_with = None\n def validator(*args, **kwargs):\n nonlocal called_with\n called_with = CallArguments(*args, **kwargs)\n\n ctx, name, value = object(), 'myparam', object()\n\n fparam = FParameter(\n POSITIONAL_ONLY,\n name=name,\n validator=validator if has_validation else None,\n )\n fparam.apply_validation(ctx, value)\n if has_validation:\n assert called_with.args == (ctx, name, value)\n else:\n assert called_with is None",
"def test_fit_returns_self(self):\n\n df = d.create_df_1()\n\n x = BaseTransformer(columns=\"a\")\n\n x_fitted = x.fit(df)\n\n assert x_fitted is x, \"Returned value from BaseTransformer.fit not as expected.\"",
"def is_fit(self):\n if not hasattr(self, '_icc_imgs'):\n return False\n else:\n return self._icc_imgs is not None",
"def test_with_fitted(self):\n X, y = make_blobs(\n n_samples=100, n_features=5, centers=3, shuffle=False, random_state=112\n )\n model = MiniBatchKMeans().fit(X)\n labels = model.predict(X)\n\n with mock.patch.object(model, \"fit\") as mockfit:\n oz = SilhouetteVisualizer(model)\n oz.fit(X)\n mockfit.assert_not_called()\n\n with mock.patch.object(model, \"fit\") as mockfit:\n oz = SilhouetteVisualizer(model, is_fitted=True)\n oz.fit(X)\n mockfit.assert_not_called()\n\n with mock.patch.object(model, \"fit_predict\", return_value=labels) as mockfit:\n oz = SilhouetteVisualizer(model, is_fitted=False)\n oz.fit(X)\n mockfit.assert_called_once_with(X, None)"
] |
[
"0.78038114",
"0.72576225",
"0.6834544",
"0.67376643",
"0.6695705",
"0.6651157",
"0.63908255",
"0.6336527",
"0.61963433",
"0.61222386",
"0.6119558",
"0.6119558",
"0.60666823",
"0.6043454",
"0.6026785",
"0.6026702",
"0.6005541",
"0.5910402",
"0.5892558",
"0.5877655",
"0.58553916",
"0.5851288",
"0.5837853",
"0.5835048",
"0.5795019",
"0.579409",
"0.57549596",
"0.571826",
"0.57002515",
"0.56637245"
] |
0.7731085
|
1
|
Set the title and title size of the figure.
|
def set_title(self):
plt.title(label=self.title, fontsize=self.titlesize)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def setFigureTitle(self, title: str):\n self.fig.text(0.5, 0.99, title,\n horizontalalignment='center',\n verticalalignment='top',\n fontsize='small')\n self.draw()",
"def __draw_title(self):\n if self.title is not None:\n self.fig.suptitle(\n self.title, y=self.settings.otherParams[\"figure.title.yposition\"])",
"def set_title(self, title):\n self.axplot.set_title(title)",
"def set_font_size(fig):\n fig.title.text_font_size = FONT_SIZE\n fig.xaxis.axis_label_text_font_size = FONT_SIZE\n fig.yaxis.axis_label_text_font_size = FONT_SIZE\n fig.xaxis.major_label_text_font_size = FONT_SIZE\n fig.yaxis.major_label_text_font_size = FONT_SIZE\n fig.legend.label_text_font_size = FONT_SIZE",
"def set_font_size():\n SMALLER_SIZE = 10\n MED_SIZE = 12\n BIG_SIZE = 18\n\n # plt.rc('font', size=SMALL_SIZE) # controls default text sizes\n plt.rc('axes', titlesize=MED_SIZE) # fontsize of the axes title\n plt.rc('axes', labelsize=MED_SIZE) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=SMALLER_SIZE) # fontsize of the tick labels\n plt.rc('ytick', labelsize=SMALLER_SIZE) # fontsize of the tick labels\n plt.rc('legend', fontsize=MED_SIZE) # legend fontsize\n plt.rc('figure', titlesize=BIG_SIZE) # fontsize of the figure title",
"def add_title(self, text, fontsize=None):\n self.title = text\n if fontsize is not None:\n self.title_fontsize = fontsize",
"def title(self, title: str):\n\n #self.master.title(title)\n self.ax.set_title(title)\n self.canvas.draw()",
"def setTitle(self, title):\n self.__title = title\n self.drawBorder()",
"def set_plot_title_labels(title, x_label, y_label):\n plt.title(title)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.legend(loc='best')",
"def add_title(self, title, x_title = \"\", y_title = \"\"):\n self._fig.update_layout(title_text = title,\n xaxis_title = x_title,\n yaxis_title = y_title)",
"def set_title(self, title, color):\n # check input\n assert isinstance(title, str), 'Title must be a string but not a {0}.'.format(type(title))\n assert isinstance(color, str), 'Color must be a string but not a {0}.'.format(type(color))\n \n print '[DB...BAT] Set {0} in color {1} as the figure\\'s title.'.format(title, color)\n self.axes.set_title(title)\n \n self.draw()\n \n return",
"def set_font(s: Optional[int] = 14, reset: Optional[bool] = False) -> None:\n if reset:\n plt.rcParams.update(plt.rcParamsDefault)\n plt.rcParams[\"figure.figsize\"] = [20, 10]\n # plt.rcParams['font.family'] = 'serif'\n # plt.rcParams['font.serif'] = ['Times New Roman'] + plt.rcParams['font.serif']\n plt.rc('font', size=s) # controls default text sizes\n plt.rc('axes', titlesize=s) # fontsize of the axes title\n plt.rc('axes', labelsize=s) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=s - 2) # fontsize of the tick labels\n plt.rc('ytick', labelsize=s - 2) # fontsize of the tick labels\n plt.rc('legend', fontsize=s) # legend fontsize\n plt.rc('figure', titlesize=s + 2) # fontsize of the figure title",
"def __init__(self):\n import matplotlib.pyplot as plt\n\n\n SMALL_SIZE = 12\n MEDIUM_SIZE = 14\n BIGGER_SIZE = 16\n\n plt.rc('font', size=SMALL_SIZE) # controls default text sizes\n plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\n plt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\n plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title",
"def set_matplotlib_font_size(SMALL_SIZE = 8, MEDIUM_SIZE = 10, BIGGER_SIZE = 12):\n\n plt.rc('font', size=SMALL_SIZE) # controls default text sizes\n plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\n plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\n plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title",
"def set_title(self, title, color='black'):\n self._myCanvas.set_title(title, color)\n\n return",
"def adjustTitleFont( self ):\n if ( self._titleFont ):\n return\n \n m = self.contentsMargins()\n w = self.rect().width() - (m[0] + m[2] + self.roundingRadius() * 2)\n \n if ( not w ):\n return\n \n font = QApplication.font()\n text = self.displayName()\n if not self.wordWrap() or projex.text.wordcount(text) == 1:\n metrics = QFontMetrics(font)\n \n while ( w < metrics.width(text) ):\n new_size = font.pointSize() - 1\n if ( new_size <= 5 ):\n break\n \n font.setPointSize(new_size)\n metrics = QFontMetrics(font)\n \n self._titleFont = font",
"def title(self, title: str, ax: figure | int = 0, color: str | None = None) -> None:\n fig = self._get_figure(ax)\n fig.title = title # type: ignore[assignment]\n fig.title.align = \"center\" # type: ignore[attr-defined]\n if color is not None:\n fig.title.text_color = self._convert_color(color) # type: ignore[attr-defined]",
"def figsetup(title, xlab, ylab, fname, show=False):\n plt.xlabel(xlab)\n plt.ylabel(ylab)\n plt.title(fname)\n plt.tight_layout()\n plt.title(title)\n plt.legend()\n plt.savefig(\"../figs/\" + fname + \".png\", dpi=250)\n if show is False:\n plt.close()\n else:\n plt.show()\n return",
"def figsetup(title, xlab, ylab, fname, show=False):\n plt.xlabel(xlab)\n plt.ylabel(ylab)\n plt.title(fname)\n plt.tight_layout()\n plt.title(title)\n plt.legend()\n plt.savefig(\"../figs/\" + fname + \".png\", dpi=250)\n if show is False:\n plt.close()\n else:\n plt.show()\n return",
"def set_title(self, title):\n self.title = title\n self.opf.title = title\n self.ncx.title = title",
"def set_title(self, title = \"FORM\"):\n\n c = self.canvas.setTitle(title)",
"def set_axis_title_labels(ax, title, x_label, y_label):\n ax.set_title(title)\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)",
"def set_plot_title(self):\n plot_title = self.input_plot_title.text()\n if plot_title:\n self.plot_title = self.input_plot_title.text()\n # Redraw the plot with given title\n if not self.plot_inverted:\n self.draw_plot(self.data_x_axis, self.data_y_axis, self.label_x_axis, self.label_y_axis)\n else:\n self.draw_plot(self.data_y_axis, self.data_x_axis, self.label_y_axis, self.label_x_axis)\n else:\n QMessageBox.about(self, \"Error!\", \"Please enter a title to set in the plot\")",
"def SetTitle(self, title):\n if self._title != title:\n self._title = title\n def closure(pane):\n pane.Caption(title)\n self._PaneInfoOperation(closure)",
"def figsetup(title, xlab, ylab, fname, show=False):\n plt.xlabel(xlab)\n plt.ylabel(ylab)\n plt.title(fname)\n plt.tight_layout()\n plt.title(title)\n plt.legend()\n plt.savefig(\"../figs/\" + fname + \".pdf\")\n if show is False:\n plt.close()\n else:\n plt.show()\n return",
"def setPlotTitle(self):\n plot_title = self.input_plot_title.text()\n if plot_title:\n self.plot_title = self.input_plot_title.text()\n # Redraw the plot with given title\n if not self.plot_inverted:\n self.drawPlot(self.data_x_axis, self.data_y_axis, self.label_x_axis, self.label_y_axis)\n else:\n self.drawPlot(self.data_y_axis, self.data_x_axis, self.label_y_axis, self.label_x_axis)\n else:\n QMessageBox.about(self, \"Error!\", \"Please enter a title to set in the plot\")",
"def set_fontsizes(sizes=None):\n if sizes is None:\n SMALL_SIZE = 8\n MEDIUM_SIZE = 10\n BIGGER_SIZE = 12\n elif isinstance(sizes, int):\n SMALL_SIZE = sizes\n MEDIUM_SIZE = sizes\n BIGGER_SIZE = sizes\n else:\n SMALL_SIZE = sizes[0]\n MEDIUM_SIZE = sizes[1]\n BIGGER_SIZE = sizes[2]\n\n plt.rc('font', size=SMALL_SIZE) # controls default text sizes\n plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\n plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\n plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title",
"def title(text, fontsize=FONT_SIZE_L, color='dark ink', ax=None):\n\n if ax is None:\n ax = plt.gca()\n color = decode_color(color)\n return ax.set_title(text, fontsize=fontsize, color=color)",
"def set_title(self, title):\r\n self.title = title",
"def plot_settings(clear = True, grid = True):\n if clear:\n plt.clf() # Clears any previous figures\n\n # Setting figure size\n figure = plt.gcf()\n figure.set_size_inches(18, 10)\n\n # Setting size of plot elements\n plt.rc('axes', labelsize = 22, titlesize = 24) \n plt.rc('xtick', labelsize = 18) \n plt.rc('ytick', labelsize = 18) \n plt.rc('legend', fontsize = 20)\n plt.rc('axes', axisbelow = True) # Ensures that the grid is behind any graph elements\n if grid:\n plt.grid() # Adds a grid to the plot"
] |
[
"0.76607907",
"0.73051995",
"0.7003987",
"0.69982874",
"0.69768006",
"0.69515336",
"0.68642354",
"0.68269694",
"0.67809445",
"0.6734112",
"0.6646728",
"0.659959",
"0.6583342",
"0.65374035",
"0.6534296",
"0.64808226",
"0.6479378",
"0.644033",
"0.644033",
"0.6433921",
"0.6430536",
"0.64226705",
"0.6402491",
"0.6397793",
"0.63326925",
"0.6317504",
"0.62705106",
"0.6264278",
"0.6239436",
"0.6224725"
] |
0.7859415
|
0
|
Output the figure, either as an image on the screen or to the harddisk as a .png or .fits file.
|
def to_figure(self, structure):
if not self.bypass:
if self.format is "show":
plt.show()
elif self.format is "png":
plt.savefig(self.path + self.filename + ".png", bbox_inches="tight")
elif self.format is "fits":
if structure is not None:
structure.output_to_fits(
file_path=self.path + self.filename + ".fits", overwrite=True
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def save_figure(self, data):\n\n\t\tsizes = np.shape(data)\n\t\tfig = plt.figure()\n\t\tfig.set_size_inches(1, 1. * sizes[0]/sizes[1], forward = False)\n\t\tax = plt.Axes(fig, [0., 0., 1., 1.])\n\t\tax.set_axis_off()\n\t\tfig.add_axes(ax)\n\t\tax.imshow(data, \"gray\")\n\n\t\t#plt.show()\n\t\tself.plotfile = os.path.join('static', 'Figure' + '.png')\n\t\tplt.savefig(self.plotfile, dpi = sizes[1])",
"def save_fig(fig, filename):\n fig_filepath = figures_path / filename\n fig.write_image(str(fig_filepath))\n logging.info(f\"Written figure to {fig_filepath.resolve()}\")",
"def subplot_to_figure(self):\n if self.format is \"show\":\n plt.show()\n elif self.format is \"png\":\n plt.savefig(self.path + self.filename + \".png\", bbox_inches=\"tight\")",
"def show_input_to_output(img_ns):\n figure()\n \n sp = subplot(1, 2, 1).imshow(img_ns.img)\n sp.axes.grid(False)\n sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0)\n title(\"Input Image\", fontsize=10);\n outimg = tiles_to_images(img_ns, img_ns.tile_grid, img_ns.tile_catalog, img_ns.tile_size)\n sp = subplot(1, 2, 2).imshow(outimg.astype(np.uint8));\n sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0)\n title(\"Output Image From Tiles\", fontsize=10);\n sp.axes.grid(False)\n #print(outimg.astype(np.uint8))\n #print(img_ns)\n plt.savefig(img_ns.output_filename + \"_input_to_output.pdf\", bbox_inches=\"tight\")\n plt.close()",
"def print_image(indiv,name):\n routine = gp.compile(indiv,pset)\n output = gen_beat_output(routine)\n bits = np.array(map(bitlist,output)[0:24000]).transpose()\n plt.style.use('classic')\n plt.imshow(bits,interpolation='nearest',aspect='auto',cmap=plt.get_cmap('Greys'))\n plt.savefig(name+\".png\",dpi=150)",
"def save_plot_as_image(self):\r\n plt.savefig(ROOT_DIR + '/presentation/images/' + self.folder + '/' + self.generated_image_name + '.png',\r\n bbox_inches='tight')",
"def outputFigure(figure, outputFolder, outputFileNameSuffix, plotType):\n logger = logging.getLogger(\"DMlog\")\n logger.info(\"Figure output generation...\")\n if outputFolder != '.' and not os.path.exists(outputFolder):\n os.makedirs(outputFolder)\n\n # Figure Output\n fig_filename_tmplt = \"{file_suffix}_{plot_type}_{plot_id}.png\".format(\n file_suffix=outputFileNameSuffix, plot_type=plotType,\n plot_id=\"{plot_id}\")\n\n fig_path = os.path.normpath(os.path.join(outputFolder, fig_filename_tmplt))\n\n # This will save multiple figures if multi_fig == True\n if isinstance(figure, list):\n for i, fig in enumerate(figure):\n fig.savefig(fig_path.format(plot_id=str(i)), bbox_inches='tight')\n else:\n figure.savefig(fig_path.format(plot_id='all'), bbox_inches='tight')\n\n logger.info(\"Figure output generation... Done.\")",
"def save_plot(self):\r\n\t\t# Generate the plot\r\n\t\tself.generate_plot()\r\n\t\t# Create save directory\r\n\t\tdirectory = self.dir + '/%s/' % str(int(self.universe.init_time))\r\n\t\tif not path_exists(directory):\r\n\t\t\tmakedirs(directory)\r\n\t\t# Save image file\r\n\t\tself.fig.savefig(directory+str(self.universe.time))",
"def fig_response(fig):\n img_bytes = io.BytesIO()\n fig.savefig(img_bytes)\n img_bytes.seek(0)\n return send_file(img_bytes, mimetype='image/png')",
"def render_pylab(figure, fmt=\"jpg\"):\n if not callable(getattr(figure, \"savefig\", None)):\n return None\n output = StringIO()\n figure.savefig(output, format=fmt)\n contents = output.getvalue()\n output.close()\n return contents, \"image/%s\" % fmt",
"def write_png(self, output_name):\n self.fig.savefig(output_name)\n return",
"def cb_save(event):\n fig.savefig('sample.univariate_discrete.py.png', dpi=300, format='png', transparent=True)",
"def save(file_name):\n setup()\n plt.savefig(file_name)",
"def _save_and_show(fig, ax, save, show, close, filename, file_format, dpi, axis_off):\n # save the figure if specified\n if save:\n # create the save folder if it doesn't already exist\n if not os.path.exists(settings.imgs_folder):\n os.makedirs(settings.imgs_folder)\n path_filename = os.path.join(settings.imgs_folder, os.extsep.join([filename, file_format]))\n\n if file_format == \"svg\":\n # if the file_format is svg, prep the fig/ax a bit for saving\n ax.axis(\"off\")\n ax.set_position([0, 0, 1, 1])\n ax.patch.set_alpha(0.0)\n fig.patch.set_alpha(0.0)\n fig.savefig(\n path_filename,\n bbox_inches=0,\n format=file_format,\n facecolor=fig.get_facecolor(),\n transparent=True,\n )\n else:\n if axis_off:\n # if axis is turned off, constrain the saved figure's extent to\n # the interior of the axis\n extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())\n else:\n extent = \"tight\"\n fig.savefig(\n path_filename,\n dpi=dpi,\n bbox_inches=extent,\n format=file_format,\n facecolor=fig.get_facecolor(),\n transparent=True,\n )\n utils.log(\"Saved the figure to disk\")\n\n # show the figure if specified\n if show:\n plt.show()\n utils.log(\"Showed the plot\")\n # if show=False, close the figure if close=True to prevent display\n elif close:\n plt.close()\n\n return fig, ax",
"def show_figure(self):\n pylab.show()",
"def quick_plot_save(self, mode='png'):\n batch = self.meta['batch']\n cellid = self.meta['cellid']\n modelname = self.meta['modelname']\n\n fig = plt.figure(figsize=(8, 9))\n plot_set = []\n for idx, m in enumerate(self.modules):\n if m.auto_plot:\n plot_set.append(idx)\n \n for sp, idx in enumerate(plot_set):\n m=self.modules[idx]\n log.info(self.mod_names[idx])\n plt.subplot(len(plot_set), 1, sp+1)\n m.do_plot(m)\n \n if len(plot_set)<6:\n plt.tight_layout()\n\n filename = (\n sc.DIRECTORY_ROOT + \"nems_saved_images/batch{0}/{1}/{2}.{3}\"\n .format(batch, cellid, modelname, mode)\n )\n\n if AWS:\n s3 = boto3.resource('s3')\n key = filename[len(sc.DIRECTORY_ROOT):]\n fileobj = io.BytesIO()\n fig.savefig(fileobj, format=mode)\n fileobj.seek(0)\n s3.Object(sc.PRIMARY_BUCKET, key).put(Body=fileobj)\n # return (\"s3://\" + sc.PRIMARY_BUCKET + \"/\" + key)\n else:\n dr = (\n sc.DIRECTORY_ROOT\n + \"nems_saved_images/batch{0}/{1}/\".format(batch, cellid)\n )\n try:\n os.stat(dr)\n if os.path.isfile(filename):\n os.remove(filename)\n except BaseException:\n os.mkdir(dr)\n\n try:\n fig.savefig(filename)\n except Exception as e:\n log.warn(\"Bad file extension for figure or couldn't save\")\n raise e\n\n try:\n os.chmod(filename, 0o666)\n except Exception as e:\n log.warn(\"Couldn't modify file permissions for figure\")\n raise e\n\n return filename",
"def make_image(self, frame, filename, **kwds):\n p = plot.plot(frame, **kwds)\n p.save_image(filename)",
"def save_figure(self, filename, format='png', size=None,\n magnification='auto', overwrite=False):\n from menpo.io.output.base import _export\n savefig_args = {'size': size, 'figure': self.figure,\n 'magnification': magnification}\n # Use the export code so that we have a consistent interface\n _export(savefig_args, filename, self._extensions_map, format,\n overwrite=overwrite)",
"def savefig(fp):\n try:\n plt.savefig(fp, dpi=400)\n _logger.info(\"...saved figure to %s\", fp)\n except Exception as err:\n _logger.warning(err)\n _logger.warning(\"could not save the figure to %s. i'll just show it to you:\", fp)\n plt.show()",
"def test_plot_save_figure(self):\n pname = os.path.join(\n self.datadir,\n 'monol_testA_E3-50_pds_rebin1.03' + HEN_FILE_EXTENSION)\n hen.plot.main([pname, '--noplot', '--figname',\n os.path.join(self.datadir,\n 'monol_testA_E3-50_pds_rebin1.03.png'),\n '-o', 'dummy.qdp'])",
"def printout(outFile: str, xlabel: str='', ylabel: str='',\r\n title: str='', outDir: str='.') -> None:\r\n \r\n plt.xlabel(xlabel)\r\n plt.ylabel(ylabel)\r\n plt.title(title)\r\n \r\n plt.tight_layout\r\n \r\n xlim = plt.gca().get_xlim()\r\n plt.hlines(0, xlim[0], xlim[1], linestyles='--', colors='#999999')\r\n plt.gca().set_xlim(xlim)\r\n \r\n saveTo = os.path.join(outDir, outFile)\r\n plt.savefig(saveTo, dpi=200)\r\n \r\n print('OutDir: {0}'.format(outDir))\r\n print('Figure saved to {0}'.format(outFile))\r\n \r\n plt.show()\r\n plt.close()",
"def _maybe_output_fig(fig, file=None, show=True):\n if fig is None:\n return\n\n if file is not None:\n fig.canvas.print_figure(file, dpi=fig.dpi)\n elif show:\n # If there was no file provided, pyplot should already be available and\n # we can import it safely without additional warnings.\n from matplotlib import pyplot\n pyplot.show()",
"def save():\n pl.savefig('/home/filippini/Documents/plot/RUN55/compa'+INFO_RUN+'.png')",
"def show_image_in_new_figure(img, to_save=False, fname=\"extractor_test_results/result.png\"):\n \n plt.figure()\n skimage.io.imshow(img, cmap = 'gray')\n if show_plots: plt.show()\n \n if to_save:\n plt.savefig(fname)",
"def save(self,fig,save=None):\n\n import matplotlib.pylab as plt\n #fig.set_tight_layout(True)\n plt.tight_layout()\n fig = plt.gcf()\n plt.show()\n if save is None:\n decision = input(\"Do you want to save the file? (Enter 'y' to save, enter others to skip)\")\n if decision is 'y':\n filename = input('Please specify .eps (1200 dpi) filename: ').strip()\n\n fig.savefig(filename, format='eps', dpi=1200)\n print('Plot saved to {}.'.format(save))\n else:\n fig.savefig(save, format='eps', dpi=1200)\n print('Plot saved to {}.'.format(save))\n\n\n return None",
"def on_export(self):\n path = os.path.dirname(os.path.realpath(__file__))\n\n filters = ('Portable Network Graphics (*.png);;'\n 'Portable Document Format (*.pdf);;'\n 'Postscript (*.ps);;'\n 'Encapsulated Postscript (*.eps);;'\n 'Scalable Vector Graphics (*.svg)')\n\n filename = QtGui.QFileDialog.getSaveFileName(self,\n caption='Export figure',\n directory=path,\n filter=filters)\n filename = str(filename)\n\n if filename != '':\n previous_size = self.fig.get_size_inches()\n self.fig.set_size_inches(float(self.le_width.text()),\n float(self.le_height.text()))\n\n dpi = int(self.le_dpi.text())\n\n self.fig.savefig(filename, dpi=dpi, bbox_inches='tight')\n self.fig.set_size_inches(previous_size)\n\n self.canvas.draw()",
"def save_figure(self, data, name, rects=None):\n\t\tsizes = np.shape(data)\n\t\tfig = plt.figure()\n\t\tfig.set_size_inches(1, 1. * sizes[0]/sizes[1], forward = False)\n\t\tax = plt.Axes(fig, [0., 0., 1., 1.])\n\t\tax.set_axis_off()\n\t\tfig.add_axes(ax)\n\t\tax.imshow(data, self.HEATMAP)\n\n\t\tif rects:\n\t\t\tfor r in rects:\n\t\t\t\tax.add_patch(r)\n\n\t\tplotfile = os.path.join('static', name + '_' + str(time.time()) + '.png')\n\t\t#plt.show()\n\t\tplt.savefig(plotfile, dpi = sizes[1])\n\n\t\treturn plotfile",
"def visualization_one(target_var = None, input_vars= None, output_image_name=None):\n ###\n # Main chunk of code here\n ###\n\n # Starter code for labeling the image\n plt.xlabel(None, figure = fig)\n plt.ylabel(None, figure = fig)\n plt.title(None, figure= fig)\n plt.legend()\n\n # exporting the image to the img folder\n plt.savefig(f'img/{output_image_name}.png', transparent = True, figure = fig)\n return fig",
"def show_save_plot(self, name=''):\n if self.save_plots:\n plt.savefig(os.path.join(self.out_folder, f'{name}.png'), dpi=300)\n plt.show()",
"def save_image(self, image_file):\r\n self.ensure_pyplot()\r\n command = 'plt.gcf().savefig(\"%s\")'%image_file\r\n #print 'SAVEFIG', command # dbg\r\n self.process_input_line('bookmark ipy_thisdir', store_history=False)\r\n self.process_input_line('cd -b ipy_savedir', store_history=False)\r\n self.process_input_line(command, store_history=False)\r\n self.process_input_line('cd -b ipy_thisdir', store_history=False)\r\n self.process_input_line('bookmark -d ipy_thisdir', store_history=False)\r\n self.clear_cout()"
] |
[
"0.7005608",
"0.68900794",
"0.67705536",
"0.66974014",
"0.6684906",
"0.6641125",
"0.65727204",
"0.65119344",
"0.6449455",
"0.6446146",
"0.644137",
"0.6423629",
"0.6375041",
"0.63690364",
"0.6357464",
"0.6352964",
"0.6331186",
"0.6325431",
"0.63066924",
"0.630663",
"0.62953204",
"0.6287095",
"0.62844014",
"0.6277421",
"0.6270837",
"0.62663156",
"0.6265784",
"0.62617034",
"0.6228359",
"0.6212999"
] |
0.7381266
|
0
|
Test that all test data can be converted back to a FITS header.
|
def test_fitsheader():
extensions = ('fts', 'fits')
for ext in extensions:
for ffile in Path(testpath).glob(f"*.{ext}*"):
fits_file = fits.open(ffile)
fits_file.verify("fix")
data, header = fits_file[0].data, fits_file[0].header
meta_header = MetaDict(OrderedDict(header))
sunpy.io.fits.header_to_fits(meta_header)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def verify_header (filename, htypes=None):\n\n # dictionary\n dict_head = {\n # raw header\n # commenting out SIMPLE, BSCALE and BZERO - basic keywords\n # that will be present in images but not in binary fits tables\n #'SIMPLE': {'htype':'raw', 'dtype':bool, 'DB':False, 'None_OK':True},\n #'BSCALE': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n #'BZERO': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BITPIX': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS1': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS2': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BUNIT': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n #'CCD-AMP': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'SET-TEMP': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'CCD-TEMP': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'XBINNING': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'YBINNING': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n #'CCD-SET': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'ALTITUDE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'AZIMUTH': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DOMEAZ': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'RADESYS': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'EPOCH': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'RA': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'RA-REF': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n #'RA-TEL': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'DEC': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'DEC-REF': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n #'DEC-TEL': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'HA': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'FLIPSTAT': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'EXPTIME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'ISTRACKI': {'htype':'raw', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'ACQSTART': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'ACQEND': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPSSTART': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPSEND': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPS-SHUT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DATE-OBS': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'MJD-OBS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'LST': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'UTC': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'TIMESYS': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'ORIGIN': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'MPC-CODE': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'TELESCOP': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'CL-BASE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-MAST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-DOME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-AIRCO': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-PIER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PRESSURE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-PIER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-DOME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-ROOF': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-AIRCO': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-MAST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-STRUT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRING': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-SPIDER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-FWN': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-FWS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-M2HOLD': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-GUICAM': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-M1': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYWIN': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYGET': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYCP': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PRES-CRY': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDAVE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDGUST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDDIR': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SITELAT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SITELONG': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ELEVATIO': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n #'WEATIME': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'FILTER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n #'FILTERID': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'CCD-ID': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'CONTROLL': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'DETSPEED': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n 'CCD-NW': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'CCD-NH': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'INSTRUME': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'FOCUSPOS': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n 'IMAGETYP': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'OBJECT': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'AIRMASS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'ORIGFILE': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'OBSERVER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'ABOTVER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PROGNAME': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PROGID': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GUIDERST': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GUIDERFQ': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'TRAKTIME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ADCX': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ADCY': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n #\n # full header\n 'BB-V': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'BB-START': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'KW-V': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'LOG': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'LOG-IMA': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'N-INFNAN': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'XTALK-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'XTALK-F': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'NONLIN-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NONLIN-F': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'GAIN-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'GAIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'GAIN1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'GAIN16': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'OS-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'BIASMEAN': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIASM1': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIASM16': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDNOISE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDN1': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDN16': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIAS1A0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BIAS1A1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'VFITOK1': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'BIAS16A0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BIAS16A1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'VFITOK16': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'MBIAS-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MBIAS-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MB-NDAYS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'SATURATE': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'NOBJ-SAT': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'MFLAT-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MFLAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MF-NDAYS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'MFRING-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MFRING-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'FRRATIO': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'COSMIC-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NCOSMICS': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SAT-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NSATS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'REDFILE': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MASKFILE': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'S-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'S-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'S-NOBJ': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'S-FWHM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'S-FWSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'S-SEEING': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-SEESTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-ELONG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-ELOSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-BKG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-BKGSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-VIGNET': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BKG-CORR': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'BKG-CHI2': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-CF1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-CF16': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-FDEG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BKG-FC0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'A-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'A-INDEX': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'A-PSCALE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-PSCALX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-PSCALY': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-ROT': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-ROTX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-ROTY': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-CAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'A-NAST': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'A-TNAST': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'A-NAMAX': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'A-DRA': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DRASTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DDEC': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DDESTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'PSF-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'PSF-RAD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-RADP': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SIZE': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-FRAC': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SAMP': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-CFGS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-NOBJ': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-FIX': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'PSF-PLDG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PSF-CHI2': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-FWHM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SEE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-PMIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PMAX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PMED': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMAX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMED': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMNM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMXM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMDM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-ESTM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMNM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMXM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMDM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FSTM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMNG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMXG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMDG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-ESTG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMNG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMXG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMDG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FSTG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'PC-CAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PC-NCAL': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PC-TNCAL': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-FNCAL': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NCMAX': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NCMIN': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-ZPFDG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-ZPF0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-TNSUB': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NSUB': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-MZPD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-MZPS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-ZPDEF': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-ZP': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-ZPSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-EXTCO': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'AIRMASSC': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RA-CNTR': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DEC-CNTR': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-AIRM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'NSIGMA': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'LIMEFLUX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'LIMMAG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'NOBJECTS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'RADECOFF': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'FORMAT-P': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'DUMCAT': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'QC-FLAG': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'DATEFILE': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n #\n # transient header\n 'SWARP-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'SWARP-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-REF': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-DXYLOC': {'htype':'trans', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'Z-DX': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DY': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DXSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DYSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FNRLOC': {'htype':'trans', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'Z-FNR': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FNRSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'Z-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-SIZE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'Z-BSIZE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'Z-SCMED': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-SCSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FPEMED': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'Z-FPESTD': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'T-NSIGMA': {'htype':'trans', 'dtype':int, 'DB':True, 'None_OK':True},\n 'T-LFLUX': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'T-NTRANS': {'htype':'trans', 'dtype':int, 'DB':True, 'None_OK':True},\n 'T-FTRANS': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-LMAG': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-NFAKE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'T-FAKESN': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'MC-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MC-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'MC-MODEL': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'TDUMCAT': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'TQC-FLAG': {'htype':'trans', 'dtype':str, 'DB':True, 'None_OK':False},\n }\n\n # read header of filename\n if isfile (filename):\n header = read_hdulist (filename, get_data=False, get_header=True)\n else:\n # return success=False if it does not exist\n log.warning ('file {} does not exist; not able to verify its header'\n .format(filename))\n return False\n\n\n # force [htypes] to be a list\n htypes_list = list(htypes)\n\n # loop keys in dict_head\n for key in dict_head.keys():\n\n # only check keywords with htype matching the input [htypes]\n if dict_head[key]['htype'] not in htypes_list:\n continue\n\n # check that key is present in header\n if key in header:\n\n # provide warning if dtype not as expected and header\n # keyword value is not 'None'\n if (dict_head[key]['dtype'] != type(header[key]) and\n header[key] != 'None'):\n log.warning ('dtype of keyword {}: {} does not match the '\n 'expected dtype: {} in header of {}'\n .format(key, type(header[key]),\n dict_head[key]['dtype'], filename))\n\n # if key goes to DataBase and value is 'None' or None\n # while 'None_OK' is False, raise an exception\n if (dict_head[key]['DB'] and not dict_head[key]['None_OK'] and\n (header[key] is None or header[key] == 'None')):\n msg = ('DataBase keyword {} not allowed to have \\'None\\' or '\n 'None value in header of {}'.format(key, filename))\n log.error (msg)\n raise ValueError (msg)\n\n\n else:\n msg = 'keyword {} not present in header of {}'.format(key, filename)\n # if keyword will be ingested into the database, raise an exception\n if dict_head[key]['DB']:\n log.error (msg)\n raise KeyError (msg)\n\n else:\n log.warning (msg)\n\n\n return",
"def test_header_update8(self, capsys):\n\n # Prepare input files.\n self.get_data(\"input\", \"ocu252cmq_raw.fits\")\n self.get_data(\"input\", \"ocu252cmq_spt.fits\")\n\n capsys.readouterr()\n\n tastis('ocu252cmq_raw.fits', update=True)\n\n captured = capsys.readouterr()\n assert captured.out == \"===============================================================================\\n\" \\\n \"ocu252cmq HST/STIS MIRVIS F28X50OII ACQ/POINT\\n\" \\\n \"prop: 14143 visit: 52 line: 1 target: BD+41-3306\\n\" \\\n \"obs date, time: 2016-06-06 08:30:05 exposure time: 2.10\\n\" \\\n \"dom GS/FGS: N2JU001340F2 sub-dom GS/FGS: N2K1001229F1\\n\" \\\n \"ACQ params: bias sub: 1510 checkbox: 3 method: FLUX CENTROID\\n\" \\\n \"subarray (axis1,axis2): size=(100,100) corner=(487,466)\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Coarse locate phase: Target flux in max checkbox (DN): 1442\\n\" \\\n \"\\n\" \\\n \" global local\\n\" \\\n \" axis1 axis2 axis1 axis2\\n\" \\\n \"Target location: 527.8 513.1 41.8 48.1\\n\" \\\n \"\\n\" \\\n \" axis1 axis2 axis1 axis2 V2 V3\\n\" \\\n \" (pixels) (arcsec) (arcsec)\\n\" \\\n \"Estimated slew: -7.9 -2.9 -0.400 -0.147 -0.387 -0.179\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Fine locate phase: Target flux in max checkbox (DN): 611\\n\" \\\n \"\\n\" \\\n \" global local\\n\" \\\n \" axis1 axis2 axis1 axis2\\n\" \\\n \"Target location: 534.1 516.1 48.1 51.1\\n\" \\\n \"Ref ap location: 537.5 516.5 19.5 16.5\\n\" \\\n \"\\n\" \\\n \" axis1 axis2 axis1 axis2 V2 V3\\n\" \\\n \" (pixels) (arcsec) (arcsec)\\n\" \\\n \"Estimated slew: -2.1 -0.4 -0.106 -0.020 -0.089 -0.061\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Total est. slew: -10.0 -3.3 -0.506 -0.168 -0.477 -0.239\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"The fluxes in the maximum checkbox in the fine and coarse stages differ\\n\" \\\n \"by more than 25%. This may indicate a problem with your acquisition.\\n\" \\\n \"\\n\" \\\n \"===============================================================================\\n\"\n\n # Compare results\n outputs = [(\"ocu252cmq_raw.fits\", \"ocu252cmq_raw_ref.fits\")]\n self.compare_outputs(outputs)",
"def test_header_update2(self, capsys):\n\n # Prepare input files.\n self.get_data(\"input\", \"octka7jeq_raw.fits\")\n self.get_data(\"input\", \"octka7jeq_spt.fits\")\n\n capsys.readouterr()\n\n tastis('octka7jeq_raw.fits', update=True)\n\n captured = capsys.readouterr()\n assert captured.out == \"===============================================================================\\n\" \\\n \"octka7jeq HST/STIS G430L 0.2X0.09 ACQ/PEAK-UP\\n\" \\\n \"prop: 14161 visit: A7 line: 2 target: HD-84937\\n\" \\\n \"obs date, time: 2016-05-09 23:15:29 exposure time: 0.20\\n\" \\\n \"dom GS/FGS: N6U6000023F2 sub-dom GS/FGS: N6U7000178F1\\n\" \\\n \"ACQ params: bias sub: 1510 method: MAX-FLUX-CENTROID\\n\" \\\n \"subarray (axis1,axis2): size=(1022,32) corner=(26,500)\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Scan type: LINEARAXIS1 Step size (mas): 69\\n\" \\\n \"\\n\" \\\n \" [ 0 16309 83580 21884 8029]\\n\" \\\n \"\\n\" \\\n \" axis1 axis2 axis1 axis2 V2 V3\\n\" \\\n \" (pixels) (arcsec) (arcsec)\\n\" \\\n \"Estimated slew: 0.2 0.0 0.010 0.000 0.007 0.007\\n\" \\\n \"Flux in post-slew confirmation image (852814) - Pedestal (791686) = 61128 DN\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"The flux in the confirmation image is only 73% of the maximum flux\\n\" \\\n \"in the ACQ/PEAK scan. Percentages below 80% often indicate problems\\n\" \\\n \"in the ACQ/PEAK.\\n\" \\\n \"\\n\" \\\n \"===============================================================================\\n\"\n\n # Compare results\n outputs = [(\"octka7jeq_raw.fits\", \"octka7jeq_raw_ref.fits\")]\n self.compare_outputs(outputs)",
"def test_table_to_hdu_filter_incompatible(self):\n table = Table(\n [[1, 2, 3], [\"a\", \"b\", \"c\"], [2.3, 4.5, 6.7]],\n names=[\"a\", \"b\", \"c\"],\n dtype=[\"i4\", \"U1\", \"f8\"],\n )\n table.meta.update(\n {\n \"OBSDATE\": \"2001-05-26\",\n \"RAMP\": np.arange(5),\n \"TARGETS\": {\"PRIMARY\": 1, \"SECONDAR\": 3},\n }\n )\n with pytest.warns(\n AstropyUserWarning,\n match=r\"Attribute \\S+ of type \"\n r\".+ cannot be added to FITS Header - skipping\",\n ):\n hdu = fits.table_to_hdu(table)\n\n assert hdu.header.get(\"OBSDATE\") == \"2001-05-26\"\n assert \"RAMP\" not in hdu.header\n assert \"TARGETS\" not in hdu.header",
"def test_header_update7(self, capsys):\n\n # Prepare input files.\n self.get_data(\"input\", \"octr11h4q_raw.fits\")\n self.get_data(\"input\", \"octr11h4q_spt.fits\")\n\n capsys.readouterr()\n\n tastis('octr11h4q_raw.fits', update=True)\n\n captured = capsys.readouterr()\n assert captured.out == \"===============================================================================\\n\" \\\n \"octr11h4q HST/STIS MIRVIS F25ND5 ACQ/POINT\\n\" \\\n \"prop: 14341 visit: 11 line: 1 target: HD128620\\n\" \\\n \"obs date, time: 2016-08-28 19:57:49 exposure time: 0.30\\n\" \\\n \"dom GS/FGS: S7QX000303F1 sub-dom GS/FGS: S7QX000751F2\\n\" \\\n \"ACQ params: bias sub: 1510 checkbox: 3 method: FLUX CENTROID\\n\" \\\n \"subarray (axis1,axis2): size=(100,100) corner=(487,466)\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Coarse locate phase: Target flux in max checkbox (DN): 278\\n\" \\\n \"\\n\" \\\n \" global local\\n\" \\\n \" axis1 axis2 axis1 axis2\\n\" \\\n \"Target location: 557.0 473.0 71.0 8.0\\n\" \\\n \"\\n\" \\\n \" axis1 axis2 axis1 axis2 V2 V3\\n\" \\\n \" (pixels) (arcsec) (arcsec)\\n\" \\\n \"Estimated slew: 21.3 -43.0 1.080 -2.184 -0.781 2.308\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Fine locate phase: Target flux in max checkbox (DN): 280\\n\" \\\n \"\\n\" \\\n \" global local\\n\" \\\n \" axis1 axis2 axis1 axis2\\n\" \\\n \"Target location: 547.0 564.0 61.0 99.0\\n\" \\\n \"Ref ap location: 537.6 517.3 19.6 17.3\\n\" \\\n \"\\n\" \\\n \" axis1 axis2 axis1 axis2 V2 V3\\n\" \\\n \" (pixels) (arcsec) (arcsec)\\n\" \\\n \"Estimated slew: 10.6 46.7 0.541 2.372 2.060 -1.295\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Total est. slew: 31.9 3.7 1.621 0.188 1.279 1.013\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"The fine slew (to center the target in the reference aperture) is larger\\n\" \\\n \"than 4 pixels. This may indicate a problem with your acquisition.\\n\" \\\n \"\\n\" \\\n \"===============================================================================\\n\"\n\n # Compare results\n outputs = [(\"octr11h4q_raw.fits\", \"octr11h4q_raw_ref.fits\")]\n self.compare_outputs(outputs)",
"def test_written_header(demo_data, written_data):\n\n channels=[0, 3]\n # open to get the unfiltered header\n with openEDF(demo_data) as reader:\n header = reader.header\n \n # open written to get the filtered header\n with openEDF(written_data) as reader:\n filtered_header = reader.header\n \n assert filtered_header == header.filter(channels)",
"def test_header(self):\n\n self.assertIsInstance(self.image.header, Header)",
"def test_header_update4(self, capsys):\n\n # Prepare input files.\n self.get_data(\"input\", \"ocui04xeq_raw.fits\")\n self.get_data(\"input\", \"ocui04xeq_spt.fits\")\n\n capsys.readouterr()\n\n tastis('ocui04xeq_raw.fits', update=True)\n\n captured = capsys.readouterr()\n assert captured.out == \"===============================================================================\\n\" \\\n \"ocui04xeq HST/STIS MIRVIS 52X0.1E1 ACQ/PEAK-UP\\n\" \\\n \"prop: 14086 visit: 04 line: 2 target: M62-VLA1\\n\" \\\n \"obs date, time: 2016-07-22 06:10:30 exposure time: 20.00\\n\" \\\n \"dom GS/FGS: S8ES000684F2 sub-dom GS/FGS: S8ES000207F1\\n\" \\\n \"ACQ params: bias sub: 1510 method: MAX-FLUX-CENTROID\\n\" \\\n \"subarray (axis1,axis2): size=(32,32) corner=(524,883)\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Scan type: LINEARAXIS1 Step size (mas): 75\\n\" \\\n \"\\n\" \\\n \" [17007 5446 1717 993 0]\\n\" \\\n \"\\n\" \\\n \" axis1 axis2 axis1 axis2 V2 V3\\n\" \\\n \" (pixels) (arcsec) (arcsec)\\n\" \\\n \"Estimated slew: -2.6 0.0 -0.132 0.000 -0.093 -0.093\\n\" \\\n \"Flux in post-slew confirmation image (56705) - Pedestal (43530) = 13175 DN\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"The flux in the confirmation image is only 77% of the maximum flux\\n\" \\\n \"in the ACQ/PEAK scan. Percentages below 80% often indicate problems\\n\" \\\n \"in the ACQ/PEAK.\\n\" \\\n \"\\n\" \\\n \"The maximum flux in the sequence occurred at one end.\\n\" \\\n \"This may indicate that the target was beyond that end\\n\" \\\n \"or that a neighboring object affected the acquisition.\\n\" \\\n \"===============================================================================\\n\"\n\n # Compare results\n outputs = [(\"ocui04xeq_raw.fits\", \"ocui04xeq_raw_ref.fits\")]\n self.compare_outputs(outputs)",
"def test_header(demo_data):\n\n pyeeg = pyEDF(demo_data)\n openheader = openHeader(demo_data)\n assert(openheader.version == pyeeg.meas_info['file_ver'])\n assert(openheader.patient == pyeeg.meas_info['subject_id'])\n assert(openheader.recording == pyeeg.meas_info['recording_id'])\n #dates & times in pyedf are not compliant with EDF specs\n pydate = [str(pyeeg.meas_info[x]) for x in ['day', 'month', 'year']]\n pydate = ['0' + x if len(x) < 2 else x for x in pydate]\n assert(openheader.start_date == '.'.join(pydate))\n pytime = [str(pyeeg.meas_info[x]) for x in 'hour minute second'.split()]\n pytime = ['0' + x if len(x) < 2 else x for x in pytime]\n assert openheader.start_time == '.'.join(pytime)\n assert openheader.header_bytes == pyeeg.meas_info['data_offset']\n # pyedf does not handle reserve section correctly. The 44 bytes of this\n # section hold the type of edf file. pyedf uses the file extension if\n # this is empty in the header but this fails to distinguish edf from\n # edf+. We therefore do not compare this field.\n assert openheader.num_records == pyeeg.meas_info['n_records']\n assert openheader.record_duration == pyeeg.meas_info['record_length']\n assert openheader.num_signals == pyeeg.meas_info['nchan']\n assert openheader.names == pyeeg.chan_info['ch_names']\n assert openheader.transducers == pyeeg.chan_info['transducers']\n assert openheader.physical_dim == pyeeg.chan_info['units']\n assert np.allclose(openheader.physical_min, \n pyeeg.chan_info['physical_min'])\n assert np.allclose(openheader.physical_max, \n pyeeg.chan_info['physical_max'])\n assert np.allclose(openheader.digital_min,\n pyeeg.chan_info['digital_min'])\n assert np.allclose(openheader.digital_max, \n pyeeg.chan_info['digital_max'])",
"def test_header_update3(self, capsys):\n\n # Prepare input files.\n self.get_data(\"input\", \"octr11hrq_raw.fits\")\n self.get_data(\"input\", \"octr11hrq_spt.fits\")\n\n capsys.readouterr()\n\n tastis('octr11hrq_raw.fits', update=True)\n\n captured = capsys.readouterr()\n assert captured.out == \"===============================================================================\\n\" \\\n \"octr11hrq HST/STIS G430M 31X0.05NDA ACQ/PEAK-UP\\n\" \\\n \"prop: 14341 visit: 11 line: 9 target: HD128621-2\\n\" \\\n \"obs date, time: 2016-08-28 22:33:14 exposure time: 0.10\\n\" \\\n \"dom GS/FGS: S7QX000303F1 sub-dom GS/FGS: S7QX000751F2\\n\" \\\n \"ACQ params: bias sub: 1510 method: MAX-FLUX-CENTROID\\n\" \\\n \"subarray (axis1,axis2): size=(1022,32) corner=(25,500)\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Scan type: LINEARAXIS1 Step size (mas): 39\\n\" \\\n \"\\n\" \\\n \" [5478 0 798 3264 4796 1923 4876]\\n\" \\\n \"\\n\" \\\n \" axis1 axis2 axis1 axis2 V2 V3\\n\" \\\n \" (pixels) (arcsec) (arcsec)\\n\" \\\n \"Estimated slew: 0.2 0.0 0.010 0.000 0.007 0.007\\n\" \\\n \"Flux in post-slew confirmation image (882661) - Pedestal (871184) = 11477 DN\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"The flux in the confirmation image is 110% greater than the maximum flux\\n\" \\\n \"in the ACQ/PEAK scan. An excess greater than 100% indicates\\n\" \\\n \"problems in the ACQ/PEAK.\\n\" \\\n \"\\n\" \\\n \"The flux in the confirmation image is 57% of the recommended minimum\\n\" \\\n \"of 20000 DN for a dispersed-light ACQ/PEAK. The signal-to-noise in\\n\" \\\n \"the ACQ/PEAK may be inadequate for an accurate centering.\\n\" \\\n \"\\n\" \\\n \"The maximum flux in the sequence occurred at one end.\\n\" \\\n \"This may indicate that the target was beyond that end\\n\" \\\n \"or that a neighboring object affected the acquisition.\\n\" \\\n \"===============================================================================\\n\"\n\n # Compare results\n outputs = [(\"octr11hrq_raw.fits\", \"octr11hrq_raw_ref.fits\")]\n self.compare_outputs(outputs)",
"def test_fileobj_not_closed(self):\n\n f = open(self.data('test0.fits'), 'rb')\n data = fits.getdata(f)\n assert not f.closed\n\n f.seek(0)\n header = fits.getheader(f)\n assert not f.closed",
"def test_fileobj_not_closed(self):\n\n f = open(self.data(\"test0.fits\"), \"rb\")\n _ = fits.getdata(f)\n assert not f.closed\n\n f.seek(0)\n _ = fits.getheader(f)\n assert not f.closed\n\n f.close() # Close it now",
"def test_header_update6(self, capsys):\n\n # Prepare input files.\n self.get_data(\"input\", \"ocmv0lw6q_raw.fits\")\n self.get_data(\"input\", \"ocmv0lw6q_spt.fits\")\n\n capsys.readouterr()\n\n tastis('ocmv0lw6q_raw.fits', update=True)\n\n captured = capsys.readouterr()\n assert captured.out == \"===============================================================================\\n\" \\\n \"ocmv0lw6q HST/STIS MIRVIS F25ND3 ACQ/POINT\\n\" \\\n \"prop: 13760 visit: 0L line: 1 target: CD-59D3300\\n\" \\\n \"obs date, time: 2016-09-29 23:43:50 exposure time: 1.10\\n\" \\\n \"dom GS/FGS: S4B0000993F2 sub-dom GS/FGS: S4B0000953F1\\n\" \\\n \"ACQ params: bias sub: 1510 checkbox: 3 method: FLUX CENTROID\\n\" \\\n \"subarray (axis1,axis2): size=(100,100) corner=(487,466)\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Coarse locate phase: Target flux in max checkbox (DN): 1560\\n\" \\\n \"\\n\" \\\n \" global local\\n\" \\\n \" axis1 axis2 axis1 axis2\\n\" \\\n \"Target location: 534.2 507.0 48.2 42.0\\n\" \\\n \"\\n\" \\\n \" axis1 axis2 axis1 axis2 V2 V3\\n\" \\\n \" (pixels) (arcsec) (arcsec)\\n\" \\\n \"Estimated slew: -1.5 -9.0 -0.079 -0.457 -0.379 0.268\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Fine locate phase: Target flux in max checkbox (DN): 1559\\n\" \\\n \"\\n\" \\\n \" global local\\n\" \\\n \" axis1 axis2 axis1 axis2\\n\" \\\n \"Target location: 534.2 516.8 48.2 51.8\\n\" \\\n \"Ref ap location: 537.5 517.0 19.5 17.0\\n\" \\\n \"\\n\" \\\n \" axis1 axis2 axis1 axis2 V2 V3\\n\" \\\n \" (pixels) (arcsec) (arcsec)\\n\" \\\n \"Estimated slew: -2.1 -0.2 -0.104 -0.010 -0.081 -0.067\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Total est. slew: -3.6 -9.2 -0.183 -0.467 -0.460 0.201\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Your ACQ appears to have succeeded, as the fluxes in the coarse\\n\" \\\n \"and fine stages agree within 25% and the fine slews were less than\\n\" \\\n \"4 pixels as expected\\n\" \\\n \"\\n\" \\\n \"===============================================================================\\n\"\n\n # Compare results\n outputs = [(\"ocmv0lw6q_raw.fits\", \"ocmv0lw6q_raw_ref.fits\")]\n self.compare_outputs(outputs)",
"def test_consitency_manual(self):\n name = os.path.basename(self.cbf_filename)\n obj = fabio.open(self.cbf_filename)\n new = fabio.cbfimage.cbfimage(data=obj.data, header=obj.header)\n new.write(os.path.join(self.tempdir, name))\n other = fabio.open(os.path.join(self.tempdir, name))\n self.assertEqual(abs(obj.data - other.data).max(), 0, \"data are the same\")\n for key in obj.header:\n if key in[ \"filename\", \"X-Binary-Size-Padding\"]:\n continue\n self.assertTrue(key in other.header, \"Key %s is in header\" % key)\n self.assertEqual(obj.header[key], other.header[key], \"value are the same for key %s [%s|%s]\" % (key, obj.header[key], other.header[key]))",
"def test_write_bintable(self):\n from ..io.util import write_bintable, fitsheader\n #\n # Input: Table\n #\n hdr = fitsheader(dict(A=1, B=2))\n hdr['C'] = ('BLAT', 'FOO')\n data = Table()\n data['X'] = [1, 2, 3]\n data['Y'] = [3, 4, 5]\n write_bintable(self.testfile, data, header=hdr)\n #\n # Standard suite of table tests.\n #\n result, newhdr = fits.getdata(self.testfile, header=True)\n self.assertEqual(sorted(result.dtype.names), sorted(data.dtype.names))\n for colname in data.dtype.names:\n self.assertTrue(np.all(result[colname] == data[colname]), '{} data mismatch'.format(colname))\n self.assertEqual(newhdr.comments['C'], 'FOO')\n for key in hdr.keys():\n self.assertIn(key, newhdr)\n self.assertIn('DATASUM', newhdr)\n self.assertIn('CHECKSUM', newhdr)\n os.remove(self.testfile)\n #\n # Input: ndarray\n #\n hdr = dict(A=1, B=2)\n data = data.as_array()\n write_bintable(self.testfile, data, header=hdr)\n #\n # Standard suite of table tests.\n #\n result, newhdr = fits.getdata(self.testfile, header=True)\n self.assertEqual(sorted(result.dtype.names), sorted(data.dtype.names))\n for colname in data.dtype.names:\n self.assertTrue(np.all(result[colname] == data[colname]), '{} data mismatch'.format(colname))\n # self.assertEqual(newhdr.comments['C'], 'FOO')\n for key in hdr.keys():\n self.assertIn(key, newhdr)\n self.assertIn('DATASUM', newhdr)\n self.assertIn('CHECKSUM', newhdr)\n os.remove(self.testfile)\n #\n # Input: dictionary\n #\n hdr = dict(A=1, B=2)\n d = dict(X=np.array([1, 2, 3]), Y=np.array([3, 4, 5]))\n write_bintable(self.testfile, d, header=hdr)\n #\n # Standard suite of table tests.\n #\n result, newhdr = fits.getdata(self.testfile, header=True)\n\n self.assertEqual(sorted(result.dtype.names), sorted(data.dtype.names))\n\n for colname in data.dtype.names:\n self.assertTrue(np.all(result[colname] == data[colname]), '{} data mismatch'.format(colname))\n # self.assertEqual(newhdr.comments['C'], 'FOO')\n for key in hdr.keys():\n self.assertIn(key, newhdr)\n self.assertIn('DATASUM', newhdr)\n self.assertIn('CHECKSUM', newhdr)\n os.remove(self.testfile)\n #\n # Input: Table with column comments.\n #\n hdr = fitsheader(dict(A=1, B=2))\n hdr['C'] = ('BLAT', 'FOO')\n data = Table()\n data['X'] = [1, 2, 3]\n data['Y'] = [3, 4, 5]\n write_bintable(self.testfile, data, header=hdr,\n comments={'X': 'This is X', 'Y': 'This is Y'},\n units={'X': 'mm', 'Y': 'mm'})\n #\n # Standard suite of table tests.\n #\n result, newhdr = fits.getdata(self.testfile, header=True)\n self.assertEqual(sorted(result.dtype.names), sorted(data.dtype.names))\n for colname in data.dtype.names:\n self.assertTrue(np.all(result[colname] == data[colname]), '{} data mismatch'.format(colname))\n # self.assertEqual(newhdr.comments['C'], 'FOO')\n for key in hdr.keys():\n self.assertIn(key, newhdr)\n self.assertIn('DATASUM', newhdr)\n self.assertIn('CHECKSUM', newhdr)\n self.assertEqual(newhdr['TTYPE1'], 'X')\n self.assertEqual(newhdr.comments['TTYPE1'], 'This is X')\n self.assertEqual(newhdr['TTYPE2'], 'Y')\n self.assertEqual(newhdr.comments['TTYPE2'], 'This is Y')\n self.assertEqual(newhdr['TUNIT1'], 'mm')\n self.assertEqual(newhdr.comments['TUNIT1'], 'X units')\n self.assertEqual(newhdr['TUNIT2'], 'mm')\n self.assertEqual(newhdr.comments['TUNIT2'], 'Y units')\n #\n # Input: Table with no EXTNAME, existing file\n #\n write_bintable(self.testfile, data, header=hdr)\n #\n # Input: Table with EXTNAME, existing file\n #\n write_bintable(self.testfile, data, header=hdr, extname='FOOBAR')\n #\n # Standard suite of table tests.\n #\n result, newhdr = fits.getdata(self.testfile, header=True, extname='FOOBAR')\n self.assertEqual(sorted(result.dtype.names), sorted(data.dtype.names))\n for colname in data.dtype.names:\n self.assertTrue(np.all(result[colname] == data[colname]), '{} data mismatch'.format(colname))\n # self.assertEqual(newhdr.comments['C'], 'FOO')\n for key in hdr.keys():\n self.assertIn(key, newhdr)\n self.assertIn('DATASUM', newhdr)\n self.assertIn('CHECKSUM', newhdr)\n #\n # Input: Table with existing EXTNAME, existing file\n #\n write_bintable(self.testfile, data, header=hdr, extname='FOOBAR')\n #\n # Input: Table with EXTNAME, existing file, overwrite\n #\n write_bintable(self.testfile, data, header=hdr, extname='FOOBAR', clobber=True)",
"def test_process_optional_header_data(self):\n self.assertDictEqual({'external_timestamp': td.external_timestamp()},\n decoder.process_optional_header_data(\n BytesIO(td.external_timestamp(True)),\n decoder.HeaderCodes.HEADER_W_ETS, self.mask))\n self.assertDictEqual({'esums': td.esums(False, True)},\n decoder.process_optional_header_data(BytesIO(td.esums(True)),\n decoder.HeaderCodes.HEADER_W_ESUM,\n self.mask))\n self.assertDictEqual(\n {'external_timestamp': td.external_timestamp(), 'esums': td.esums(False, True)},\n decoder.process_optional_header_data(\n BytesIO(td.esums(True) + td.external_timestamp(True)),\n decoder.HeaderCodes.HEADER_W_ESUM_ETS, self.mask))\n self.assertDictEqual({'qdc': td.qdc()},\n decoder.process_optional_header_data(BytesIO(td.qdc(True)),\n decoder.HeaderCodes.HEADER_W_QDC,\n self.mask))\n self.assertDictEqual({'external_timestamp': td.external_timestamp(), 'qdc': td.qdc()},\n decoder.process_optional_header_data(\n BytesIO(td.qdc(True) + td.external_timestamp(True)),\n decoder.HeaderCodes.HEADER_W_QDC_ETS, self.mask))\n self.assertDictEqual({'esums': td.esums(False, True), 'qdc': td.qdc()},\n decoder.process_optional_header_data(\n BytesIO(td.esums(True) + td.qdc(True)),\n decoder.HeaderCodes.HEADER_W_ESUM_QDC, self.mask))\n self.assertDictEqual({'external_timestamp': td.external_timestamp(), 'qdc': td.qdc(),\n 'esums': td.esums(False, True)}, decoder.process_optional_header_data(\n BytesIO(td.esums(True) + td.qdc(True) + td.external_timestamp(True)),\n decoder.HeaderCodes.HEADER_W_ESUM_QDC_ETS, self.mask))",
"def test_toTable(self):\r\n # Empty results.\r\n out_f = StringIO()\r\n self.res1.toTable(out_f)\r\n self.assertEqual(out_f.getvalue(),\r\n \"SampleID\\tSize\\tEstimate\\tStd Err\\tCI (lower)\\tCI (upper)\\n\")\r\n out_f.close()\r\n\r\n # Results with multiple samples.\r\n exp = \"\"\"SampleID\\tSize\\tEstimate\\tStd Err\\tCI (lower)\\tCI (upper)\r\nS1\\t5\\t21\\t1.5\\t2.5\\t3.5\r\nS1\\t10\\t20\\t2.5\\t2.5\\t3.5\r\nS1\\t20\\t30\\t3.5\\t2.5\\t3.5\r\nS2\\t1\\t3\\t0.4\\t2.5\\t3.5\r\n\"\"\"\r\n out_f = StringIO()\r\n self.res2.toTable(out_f)\r\n self.assertEqual(out_f.getvalue(), exp)\r\n out_f.close()\r\n\r\n # Custom header.\r\n exp = \"\"\"foo\\tbar\\tbaz\\tbazaar\\tbazaaar\\tbazaaaar\r\nS1\\t5\\t21\\t1.5\\t2.5\\t3.5\r\n\"\"\"\r\n out_f = StringIO()\r\n self.res1.addSample('S1', 42)\r\n self.res1.addSampleEstimate('S1', 5, 21, 1.5, 2.5, 3.5)\r\n self.res1.toTable(out_f,\r\n header=['foo', 'bar', 'baz', 'bazaar', 'bazaaar', 'bazaaaar'])\r\n self.assertEqual(out_f.getvalue(), exp)\r\n out_f.close()\r\n\r\n # Invalid header.\r\n with self.assertRaises(ValueError):\r\n out_f = StringIO()\r\n self.res1.toTable(out_f, header=['foo'])\r\n\r\n # Cells with None as their value.\r\n exp = \"\"\"SampleID\\tSize\\tEstimate\\tStd Err\\tCI (lower)\\tCI (upper)\r\nS1\\t43\\tN/A\\tN/A\\tN/A\\tN/A\r\n\"\"\"\r\n out_f = StringIO()\r\n res = RichnessEstimatesResults()\r\n res.addSample('S1', 42)\r\n res.addSampleEstimate('S1', 43, None, None, None, None)\r\n res.toTable(out_f)\r\n self.assertEqual(out_f.getvalue(), exp)\r\n out_f.close()",
"def test_short_header():\n with open(TEST_FILE_DXT5, \"rb\") as f:\n img_file = f.read()\n\n def short_header():\n with Image.open(BytesIO(img_file[:119])):\n pass # pragma: no cover\n\n with pytest.raises(OSError):\n short_header()",
"def test_check_metadata():\n comp = fits.PrimaryHDU()\n comp.header['FILENAME'] = 'something.fits'\n comp.header['EFFEXPTM'] = 14.12\n comp.header['TFRAME'] = 2.35\n comp.header['TGROUP'] = 7.06\n comp.header['NFRAMES'] = 2\n comp.header['NGROUPS'] = 2\n comp.header['SUBARRAY'] = 'FULL'\n\n hdu = fits.PrimaryHDU()\n hdu.header['FILENAME'] = 'something.fits'\n hdu.header['EFFEXPTM'] = 14.12\n hdu.header['TFRAME'] = 2.35\n hdu.header['TGROUP'] = 7.06\n hdu.header['NFRAMES'] = 2\n hdu.header['NGROUPS'] = 2\n hdu.header['SUBARRAY'] = 'SUB640'\n\n # This should raise an exception in check_metadata\n with pytest.raises(Exception) as e_info:\n bpd.check_metadata(hdu.header, comp.header)\n\n # This should not raise an exception\n hdu.header['SUBARRAY'] = 'FULL'\n bpd.check_metadata(hdu.header, comp.header)\n\n # This should also raise an exception\n hdu.header['NFRAMES'] = 4\n with pytest.raises(Exception) as e_info:\n bpd.check_metadata(hdu.header, comp.header)\n\n # This should also raise an exception\n hdu.header['NFRAMES'] = 2\n hdu.header['TGROUP'] = 7.5\n with pytest.raises(Exception) as e_info:\n bpd.check_metadata(hdu.header, comp.header)",
"def test_hsmcatalog():\n if __name__ == '__main__':\n logger = piff.config.setup_logger(verbose=2)\n else:\n logger = piff.config.setup_logger(log_file='output/test_hsmcatalog.log')\n\n image_file = os.path.join('output','test_stats_image.fits')\n cat_file = os.path.join('output','test_stats_cat.fits')\n psf_file = os.path.join('output','test_starstats.fits')\n hsm_file = os.path.join('output', 'test_hsmcatalog.fits')\n config = {\n 'input' : {\n 'image_file_name' : image_file,\n 'cat_file_name' : cat_file,\n 'stamp_size' : 48,\n },\n 'select' : {\n 'reserve_frac' : 0.2,\n 'seed' : 123\n },\n 'psf' : {\n 'model' : { 'type' : 'Gaussian',\n 'fastfit': True,\n 'include_pixel': False },\n 'interp' : { 'type' : 'Mean' },\n },\n 'output' : {\n 'file_name' : psf_file,\n 'stats' : [\n {\n 'type': 'HSMCatalog',\n 'file_name': hsm_file,\n }\n ]\n }\n }\n piff.piffify(config, logger)\n assert os.path.isfile(hsm_file)\n\n data, header = fitsio.read(hsm_file, header=True)\n for col in ['ra', 'dec', 'x', 'y', 'u', 'v',\n 'T_data', 'g1_data', 'g2_data',\n 'T_model', 'g1_model', 'g2_model',\n 'flux', 'reserve', 'flag_data', 'flag_model']:\n assert len(data[col]) == 10\n true_data = fitsio.read(cat_file)\n\n assert header['PIFF_VERSION'] == piff.__version__\n\n np.testing.assert_allclose(data['x'], true_data['x'])\n np.testing.assert_allclose(data['y'], true_data['y'])\n np.testing.assert_allclose(data['flux'], 123.45, atol=0.001)\n print('reserve = ',data['reserve'])\n print('nreserve = ',np.sum(data['reserve']))\n print('ntot = ',len(data['reserve']))\n assert np.sum(data['reserve']) == int(0.2 * len(data['reserve']))\n np.testing.assert_allclose(data['T_model'], data['T_data'], rtol=1.e-4)\n np.testing.assert_allclose(data['g1_model'], data['g1_data'], rtol=1.e-4)\n np.testing.assert_allclose(data['g2_model'], data['g2_data'], rtol=1.e-4)\n\n # On this file, no hsm errors\n np.testing.assert_array_equal(data['flag_data'], 0)\n np.testing.assert_array_equal(data['flag_model'], 0)\n\n image = galsim.fits.read(image_file)\n world = [image.wcs.toWorld(galsim.PositionD(x,y)) for x,y in zip(data['x'],data['y'])]\n np.testing.assert_allclose(data['ra'], [w.ra.deg for w in world], rtol=1.e-4)\n np.testing.assert_allclose(data['dec'], [w.dec.deg for w in world], rtol=1.e-4)\n\n # Repeat with non-Celestial WCS\n wcs = galsim.AffineTransform(0.26, 0.05, -0.08, -0.24, galsim.PositionD(1024,1024))\n config['input']['wcs'] = wcs\n piff.piffify(config, logger)\n data = fitsio.read(hsm_file)\n np.testing.assert_array_equal(data['ra'], 0.)\n np.testing.assert_array_equal(data['dec'], 0.)\n world = [wcs.toWorld(galsim.PositionD(x,y)) for x,y in zip(data['x'],data['y'])]\n np.testing.assert_allclose(data['u'], [w.x for w in world], rtol=1.e-4)\n np.testing.assert_allclose(data['v'], [w.y for w in world], rtol=1.e-4)\n\n # Use class directly, rather than through config.\n psf = piff.PSF.read(psf_file)\n stars, _, _ = piff.Input.process(config['input'])\n stars = piff.Select.process(config['select'], stars)\n hsmcat = piff.stats.HSMCatalogStats()\n with np.testing.assert_raises(RuntimeError):\n hsmcat.write('dummy') # Cannot write before compute\n hsmcat.compute(psf, stars)\n hsm_file2 = os.path.join('output', 'test_hsmcatalog2.fits')\n with np.testing.assert_raises(ValueError):\n hsmcat.write() # Must supply file_name if not given in constructor\n hsmcat.write(hsm_file2)\n data2 = fitsio.read(hsm_file2)\n for key in data.dtype.names:\n np.testing.assert_allclose(data2[key], data[key], rtol=1.e-5)",
"def test_header_update5(self, capsys):\n\n # Prepare input files.\n self.get_data(\"input\", \"ocyw05afq_raw.fits\")\n self.get_data(\"input\", \"ocyw05afq_spt.fits\")\n\n capsys.readouterr()\n\n tastis('ocyw05afq_raw.fits', update=True)\n\n captured = capsys.readouterr()\n assert captured.out == \"===============================================================================\\n\" \\\n \"ocyw05afq HST/STIS G430L 0.2X0.09 ACQ/PEAK-UP\\n\" \\\n \"prop: 14084 visit: 05 line: 2 target: BD-11D916\\n\" \\\n \"obs date, time: 2016-09-22 08:33:17 exposure time: 1.80\\n\" \\\n \"dom GS/FGS: S2AE000156F1 sub-dom GS/FGS: S2AE000086F2\\n\" \\\n \"ACQ params: bias sub: 1510 method: MAX-FLUX-CENTROID\\n\" \\\n \"subarray (axis1,axis2): size=(1022,32) corner=(26,500)\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Scan type: LINEARAXIS2 Step size (mas): 150\\n\" \\\n \"\\n\" \\\n \" [ 5139 67252 0]\\n\" \\\n \"\\n\" \\\n \" axis1 axis2 axis1 axis2 V2 V3\\n\" \\\n \" (pixels) (arcsec) (arcsec)\\n\" \\\n \"Estimated slew: 0.0 0.0 0.000 0.000 0.000 0.000\\n\" \\\n \"Flux in post-slew confirmation image (907707) - Pedestal (838752) = 68955 DN\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"The confirmation image has a flux between 0.8 and 2.0 times the\\n\" \\\n \"maximum flux in the peakup, which is typical of a successful ACQ/PEAK.\\n\" \\\n \"===============================================================================\\n\"\n\n # Compare results\n outputs = [(\"ocyw05afq_raw.fits\", \"ocyw05afq_raw_ref.fits\")]\n self.compare_outputs(outputs)",
"def test_fc(self):\n self.assertEqual(self.nhf.metadata[\"ndim\"], 3)\n self.assertEqual(self.nhf.metadata[\"ngroup\"], 4)\n self.assertEqual(self.nhf.metadata[\"ninti\"], 5)\n self.assertEqual(self.nhf.metadata[\"nintj\"], 5)\n self.assertEqual(self.nhf.metadata[\"nintk\"], 6)\n self.assertEqual(self.nhf.metadata[\"nSurf\"], 6)\n self.assertEqual(self.nhf.metadata[\"nMom\"], 5)\n self.assertEqual(self.nhf.metadata[\"nintxy\"], 19)\n self.assertEqual(self.nhf.metadata[\"npcxy\"], 144)\n self.assertEqual(self.nhf.metadata[\"iaprx\"], 4)\n self.assertEqual(self.nhf.metadata[\"iaprxz\"], 3)\n\n variantControlInfo = nhflux.FILE_SPEC_1D_KEYS_VARIANT11\n for info in variantControlInfo:\n self.assertTrue(info not in self.nhf.metadata)",
"def test_check_header(self):\r\n\r\n # Default header, should not generate any errors/warnings\r\n header = ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence',\r\n 'Description']\r\n errors = []\r\n warnings = []\r\n\r\n errors, warnings = check_header(header,\r\n errors,\r\n warnings,\r\n sample_id_ix=0,\r\n desc_ix=3,\r\n bc_ix=1,\r\n linker_primer_ix=2,\r\n added_demultiplex_field=None)\r\n\r\n expected_errors = []\r\n expected_warnings = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n self.assertEqual(warnings, expected_warnings)",
"def test_header_split(irregular_written_data, split_data):\n \n unsplit_fp, _ = irregular_written_data\n\n with openEDF(unsplit_fp) as reader:\n unsplit_header = reader.header\n\n for fp, indices in split_data.items():\n with openEDF(fp) as reader:\n header = reader.header\n assert header == unsplit_header.filter(indices)",
"def test_consitency_convert(self):\n name = os.path.basename(self.cbf_filename)\n obj = fabio.open(self.cbf_filename)\n new = obj.convert(\"cbf\")\n new.write(os.path.join(self.tempdir, name))\n other = fabio.open(os.path.join(self.tempdir, name))\n self.assertEqual(abs(obj.data - other.data).max(), 0, \"data are the same\")\n for key in obj.header:\n if key in[ \"filename\", \"X-Binary-Size-Padding\"]:\n continue\n self.assertTrue(key in other.header, \"Key %s is in header\" % key)\n self.assertEqual(obj.header[key], other.header[key], \"value are the same for key %s [%s|%s]\" % (key, obj.header[key], other.header[key]))",
"def test_export_raw_edf(tmp_path, dataset, format):\n if dataset == \"test\":\n raw = read_raw_fif(fname_raw)\n elif dataset == \"misc\":\n fname = misc_path / \"ecog\" / \"sample_ecog_ieeg.fif\"\n raw = read_raw_fif(fname)\n\n # only test with EEG channels\n raw.pick_types(eeg=True, ecog=True, seeg=True)\n raw.load_data()\n orig_ch_names = raw.ch_names\n temp_fname = tmp_path / f\"test.{format}\"\n\n # test runtime errors\n with pytest.warns() as record:\n raw.export(temp_fname, physical_range=(-1e6, 0))\n if dataset == \"test\":\n assert any(\"Data has a non-integer\" in str(rec.message) for rec in record)\n assert any(\"The maximum\" in str(rec.message) for rec in record)\n remove(temp_fname)\n\n with pytest.warns() as record:\n raw.export(temp_fname, physical_range=(0, 1e6))\n if dataset == \"test\":\n assert any(\"Data has a non-integer\" in str(rec.message) for rec in record)\n assert any(\"The minimum\" in str(rec.message) for rec in record)\n remove(temp_fname)\n\n if dataset == \"test\":\n with pytest.warns(RuntimeWarning, match=\"Data has a non-integer\"):\n raw.export(temp_fname)\n elif dataset == \"misc\":\n with pytest.warns(RuntimeWarning, match=\"EDF format requires\"):\n raw.export(temp_fname)\n\n if \"epoc\" in raw.ch_names:\n raw.drop_channels([\"epoc\"])\n\n raw_read = read_raw_edf(temp_fname, preload=True)\n assert orig_ch_names == raw_read.ch_names\n # only compare the original length, since extra zeros are appended\n orig_raw_len = len(raw)\n\n # assert data and times are not different\n # Due to the physical range of the data, reading and writing is\n # not lossless. For example, a physical min/max of -/+ 3200 uV\n # will result in a resolution of 0.09 uV. This resolution\n # though is acceptable for most EEG manufacturers.\n assert_array_almost_equal(\n raw.get_data(), raw_read.get_data()[:, :orig_raw_len], decimal=4\n )\n\n # Due to the data record duration limitations of EDF files, one\n # cannot store arbitrary float sampling rate exactly. Usually this\n # results in two sampling rates that are off by very low number of\n # decimal points. This for practical purposes does not matter\n # but will result in an error when say the number of time points\n # is very very large.\n assert_allclose(raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5)",
"def test_feature_format(X):\r\n print(\"test_feature_format()...\", end = \"\")\r\n for row in range(len(X)):\r\n for col in range(len(X[0])):\r\n assert (isinstance(X[row][col], float) == True)\r\n print(\"Passed!\")",
"def test_raw(pdf, config, hs, exported, tmp_path):\n # rx = 2 if 'linux' in pdf else 0\n pytest.raises(ValueError, read_raw_bti, pdf, \"eggs\", preload=False)\n pytest.raises(ValueError, read_raw_bti, pdf, config, \"spam\", preload=False)\n tmp_raw_fname = tmp_path / \"tmp_raw.fif\"\n ex = read_raw_fif(exported, preload=True)\n ra = read_raw_bti(pdf, config, hs, preload=False)\n assert \"RawBTi\" in repr(ra)\n assert_equal(ex.ch_names[:NCH], ra.ch_names[:NCH])\n assert_array_almost_equal(\n ex.info[\"dev_head_t\"][\"trans\"], ra.info[\"dev_head_t\"][\"trans\"], 7\n )\n assert len(ex.info[\"dig\"]) in (3563, 5154)\n assert_dig_allclose(ex.info, ra.info, limit=100)\n coil1, coil2 = [\n np.concatenate([d[\"loc\"].flatten() for d in r_.info[\"chs\"][:NCH]])\n for r_ in (ra, ex)\n ]\n assert_array_almost_equal(coil1, coil2, 7)\n\n loc1, loc2 = [\n np.concatenate([d[\"loc\"].flatten() for d in r_.info[\"chs\"][:NCH]])\n for r_ in (ra, ex)\n ]\n assert_allclose(loc1, loc2)\n\n assert_allclose(ra[:NCH][0], ex[:NCH][0])\n assert_array_equal(\n [c[\"range\"] for c in ra.info[\"chs\"][:NCH]],\n [c[\"range\"] for c in ex.info[\"chs\"][:NCH]],\n )\n assert_array_equal(\n [c[\"cal\"] for c in ra.info[\"chs\"][:NCH]],\n [c[\"cal\"] for c in ex.info[\"chs\"][:NCH]],\n )\n assert_array_equal(ra._cals[:NCH], ex._cals[:NCH])\n\n # check our transforms\n for key in (\"dev_head_t\", \"dev_ctf_t\", \"ctf_head_t\"):\n if ex.info[key] is None:\n pass\n else:\n assert ra.info[key] is not None\n for ent in (\"to\", \"from\", \"trans\"):\n assert_allclose(ex.info[key][ent], ra.info[key][ent])\n\n # MNE-BIDS needs these\n for key in (\"pdf_fname\", \"config_fname\", \"head_shape_fname\"):\n assert os.path.isfile(ra._raw_extras[0][key])\n\n ra.save(tmp_raw_fname)\n re = read_raw_fif(tmp_raw_fname)\n print(re)\n for key in (\"dev_head_t\", \"dev_ctf_t\", \"ctf_head_t\"):\n assert isinstance(re.info[key], dict)\n this_t = re.info[key][\"trans\"]\n assert_equal(this_t.shape, (4, 4))\n # check that matrix by is not identity\n assert not np.allclose(this_t, np.eye(4))",
"def test_time_to_fits_header(self, table_types):\n t = table_types()\n t['a'] = Time(self.time, format='isot', scale='utc',\n location=EarthLocation(-2446354,\n 4237210, 4077985, unit='m'))\n t['b'] = Time([1,2], format='cxcsec', scale='tt')\n\n ideal_col_hdr = {'OBSGEO-X' : t['a'].location.x.value,\n 'OBSGEO-Y' : t['a'].location.y.value,\n 'OBSGEO-Z' : t['a'].location.z.value}\n\n table, hdr = time_to_fits(t)\n\n # Check the global time keywords in hdr\n for key, value in GLOBAL_TIME_INFO.items():\n assert hdr[key] == value[0]\n assert hdr.comments[key] == value[1]\n hdr.remove(key)\n\n for key, value in ideal_col_hdr.items():\n assert hdr[key] == value\n hdr.remove(key)\n\n # Check the column-specific time metadata\n coord_info = table.meta['__coordinate_columns__']\n for colname in coord_info:\n assert coord_info[colname]['coord_type'] == t[colname].scale.upper()\n assert coord_info[colname]['coord_unit'] == 'd'\n\n assert coord_info['a']['time_ref_pos'] == 'TOPOCENTER'\n\n assert len(hdr) == 0",
"def define_testdata():\n wata_dict = {\n # info taken from main_hdr dict\n 'filename': ['jw09999001001_02101_00001_nrs1_uncal.fits'],\n 'date_obs': ['2022-06-22'],\n 'visit_id': ['V09999001001P0000000002101'],\n 'tafilter': ['F110W'],\n 'detector': ['NRS1'],\n 'readout': ['NRSRAPID'],\n 'subarray': ['FULL'],\n # info taken from ta_hdr dict\n 'ta_status': ['SUCCESSFUL'],\n 'status_reason': ['-999'],\n 'star_name': ['-999'],\n 'star_ra': [-999.0],\n 'star_dec': [-999.0],\n 'star_mag': [-999.0],\n 'star_catalog': [-999],\n 'planned_v2': [-999.0],\n 'planned_v3': [-999.0],\n 'stamp_start_col': [-999],\n 'stamp_start_row': [-999],\n 'star_detector': ['-999'],\n 'max_val_box': [-999.0],\n 'max_val_box_col': [-999.0],\n 'max_val_box_row': [-999.0],\n 'iterations': [-999],\n 'corr_col': [-999.0],\n 'corr_row': [-999.0],\n 'stamp_final_col': [-999.0],\n 'stamp_final_row': [-999.0],\n 'detector_final_col': [-999.0],\n 'detector_final_row': [-999.0],\n 'final_sci_x': [-999.0],\n 'final_sci_y': [-999.0],\n 'measured_v2': [-999.0],\n 'measured_v3': [-999.0],\n 'ref_v2': [-999.0],\n 'ref_v3': [-999.0],\n 'v2_offset': [-999.0],\n 'v3_offset': [-999.0],\n 'sam_x': [-999.0],\n 'sam_y': [-999.0],\n }\n # create the additional arrays\n bool_status, status_colors = [], []\n for tas, do_str in zip(wata_dict['ta_status'], wata_dict['date_obs']):\n if 'unsuccessful' not in tas.lower():\n bool_status.append(1)\n status_colors.append('blue')\n else:\n bool_status.append(0)\n status_colors.append('red')\n\n # add these to the bokeh data structure\n wata_dict['ta_status_bool'] = bool_status\n wata_dict['status_colors'] = status_colors\n\n # create the dataframe\n wata_data = pd.DataFrame(wata_dict)\n return wata_data"
] |
[
"0.6470424",
"0.6407262",
"0.6279246",
"0.6262113",
"0.62359947",
"0.62172025",
"0.61920476",
"0.6178004",
"0.6162437",
"0.61546427",
"0.613669",
"0.6111256",
"0.6099731",
"0.60726297",
"0.6065509",
"0.60646486",
"0.6034603",
"0.60217834",
"0.6021296",
"0.58954805",
"0.58918744",
"0.5883912",
"0.5837612",
"0.5829198",
"0.58290964",
"0.5825792",
"0.58081543",
"0.5793762",
"0.57877684",
"0.5746622"
] |
0.71362966
|
0
|
Given a pandas Series, generate a descriptive visualisation with a boxplot and a histogram with a kde. By default, this function drops `nan` values. If you desire to handle them differently, you should do so beforehand and/or specify dropna=False.
|
def dist_plot(series: pd.core.series.Series, dropna: bool = True) -> NoReturn:
if dropna:
series = series.dropna()
quarts = scipy.stats.mstats.mquantiles(series, [0.001, 0.25, 0.5, 0.75, 0.975])
f, (ax_box, ax_hist) = plt.subplots(
2, sharex=True, gridspec_kw={"height_ratios": (0.25, 0.75)}
)
sns.boxplot(series, ax=ax_box)
sns.stripplot(series, color="orange", jitter=0.2, size=2.5, ax=ax_box)
sns.distplot(series, ax=ax_hist, kde=True)
ax_hist.axvline(series.mean())
ax_hist.set_xticks(quarts)
# ax_box.set(xlabel=f'Mean value : {int(series.mean())}')
plt.title(
f"Glycaemic Distribution μ = {int(series.mean())}, σ = {int(series.std())}"
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def visualization(data):\n\t# preview top 5 row of data\n\tprint(\"\\n--------Data preview--------\\n{0}\"\n\t\t .format(data.head()))\n\tprint(\"\\nNull value status as follow:\\n{0}\".format(data.isnull().sum()))\n\tcols = [col for col in data.columns]\n\tprint(\"\\nNumber of original features: {0}\".format(len(cols)))\n\tprint(\"\\nFeatures types:\\n{0}\".format(data[cols].dtypes.value_counts()))\n\n\tcounts = [[], [], []]\n\tfor col in cols:\n\t\t# the data type of each feature\n\t\ttyp = data[col].dtype\n\t\t# the number of differents value in each feature\n\t\tuniq = len(np.unique(data[col]))\n\t\t# constant value feature\n\t\tif uniq == 1:\n\t\t\tcounts[0].append(col)\n\t\t# binary value feature\n\t\telif uniq == 2 and typ == np.int64:\n\t\t\tcounts[1].append(col)\n\t\t# multiple value feature\n\t\telse:\n\t\t\tcounts[2].append(col)\n\n\tprint('\\nConstant features: {}\\nBinary features: {} \\nCategorical features: {}\\n'.format(*[len(c) for c in counts]))\n\tprint('Constant features:', counts[0])\n\tprint('Binary features:', counts[1])\n\tprint('Categorical features:', counts[2])\n\n\tfig, axes = plt.subplots(2,2)\n\tfig.set_size_inches(12, 10)\n\tsn.boxplot(data=data,y=\"count\",orient=\"v\",ax=axes[0][0])\n\tsn.boxplot(data=data,y=\"count\",x=\"season\",orient=\"v\",ax=axes[0][1])\n\tsn.boxplot(data=data,y=\"count\",x=\"hour\",orient=\"v\",ax=axes[1][0])\n\tsn.boxplot(data=data,y=\"count\",x=\"workingday\",orient=\"v\",ax=axes[1][1])\n\n\taxes[0][0].set(ylabel='Count',title=\"Box Plot On Count\")\n\taxes[0][1].set(xlabel='Season', ylabel='Count',title=\"Box Plot On Count Across Season\")\n\taxes[1][0].set(xlabel='Hour Of The Day', ylabel='Count',title=\"Box Plot On Count Across Hour Of The Day\")\n\taxes[1][1].set(xlabel='Working Day', ylabel='Count',title=\"Box Plot On Count Across Working Day\")\n\tplt.show()\n\n\tfig,(ax1,ax2,ax3,ax4)= plt.subplots(nrows=4)\n\tfig.set_size_inches(12,20)\n\tsortOrder = [1,2,3,4,5,6,7,8,9,10,11,12]\n\thueOrder = [\"Sunday\",\"Monday\",\"Tuesday\",\"Wednesday\",\"Thursday\",\"Friday\",\"Saturday\"]\n\n\tmonthAggregated = pd.DataFrame(data.groupby(\"month\")[\"count\"].mean()).reset_index()\n\tmonthSorted = monthAggregated.sort_values(by=\"count\",ascending=False)\n\tsn.barplot(data=monthSorted,x=\"month\",y=\"count\",ax=ax1,order=sortOrder)\n\tax1.set(xlabel='Month', ylabel='Avearage Count',title=\"Average Count By Month\")\n\n\thourAggregated = pd.DataFrame(data.groupby([\"hour\",\"season\"],sort=True)[\"count\"].mean()).reset_index()\n\tsn.pointplot(x=hourAggregated[\"hour\"], y=hourAggregated[\"count\"],hue=hourAggregated[\"season\"],\n\t data=hourAggregated, join=True,ax=ax2)\n\tax2.set(xlabel='Hour Of The Day', ylabel='Users Count',\n\t title=\"Average Users Count By Hour Of The Day Across Season\",label='big')\n\n\thourAggregated = pd.DataFrame(data.groupby([\"hour\",\"weekday\"],sort=True)[\"count\"].mean()).reset_index()\n\tsn.pointplot(x=hourAggregated[\"hour\"], y=hourAggregated[\"count\"],hue=hourAggregated[\"weekday\"],hue_order=hueOrder,\n\t data=hourAggregated, join=True,ax=ax3)\n\tax3.set(xlabel='Hour Of The Day', ylabel='Users Count',\n\t title=\"Average Users Count By Hour Of The Day Across Weekdays\",label='big')\n\n\thourTransformed = pd.melt(data[[\"hour\",\"casual\",\"registered\"]], id_vars=['hour'], value_vars=['casual', 'registered'])\n\thourAggregated = pd.DataFrame(hourTransformed.groupby([\"hour\",\"variable\"],sort=True)[\"value\"].mean()).reset_index()\n\tsn.pointplot(x=hourAggregated[\"hour\"], y=hourAggregated[\"value\"],hue=hourAggregated[\"variable\"],\n\t hue_order=[\"casual\",\"registered\"], data=hourAggregated, join=True,ax=ax4)\n\tax4.set(xlabel='Hour Of The Day', ylabel='Users Count',\n\t title=\"Average Users Count By Hour Of The Day Across User Type\",label='big')\n\tplt.show()",
"def show_cleaned_vis(data, x, y = 'price', categorical = False, kde = True):\n\n ### Filter outliers first\n \n idx_out = find_outliers_IQR(data[x])\n \n df_cleaned = data[~idx_out].copy()\n\n ### Plot Data\n \n df_cleaned.value_counts().sort_index()\n \n fig, axs = plt.subplots(ncols=2, figsize= (12,6))\n \n sns.regplot(data=df_cleaned, x=x, y=y, ax=axs[0],line_kws={\"color\": \"red\"})\n sns.histplot(data=df_cleaned, x=x, discrete=categorical, kde=kde, ax=axs[1])\n \n fig.suptitle(f'{x.title()} vs. {y.title()}', fontsize=16)\n plt.tight_layout();\n \n return #df_cleaned",
"def build_box_plots(dep_vars, categorical, combined_data, categ_corr_data, drop_outliers=True):\n\n for column in dep_vars.columns:\n num_rows = len(params.t_categorical) + len(params.anova_categorical)\n fig = make_subplots(rows=num_rows, cols=1, subplot_titles=[i for i in range(num_rows)])\n row_num = 1\n for value in categorical.columns:\n box_data = pd.DataFrame({\"Cat\": combined_data[value],\n \"Var\": combined_data[column]})\n if drop_outliers:\n var_mean = box_data[\"Var\"].mean()\n var_std = box_data[\"Var\"].std()\n var_min = var_mean - 3 * var_std\n var_max = var_mean + 3 * var_std\n box_data = box_data.loc[(var_min < box_data[\"Var\"]) &\n (box_data[\"Var\"] < var_max)]\n # Calculating medians and sorting the list\n box_data = box_data.set_index(pd.Series([i for i in range(len(box_data))]))\n box_data.dropna(inplace=True)\n sorted_med = box_data.loc[:, [\"Cat\", \"Var\"]].groupby([\"Cat\"]).median().sort_values(by=\"Var\")\n fig.add_trace(go.Box(y=box_data[\"Var\"],\n x=box_data[\"Cat\"]),\n row=row_num,\n col=1)\n fig.update_xaxes(title_text=column, row=row_num, col=1, type=\"category\",\n categoryorder=\"array\", categoryarray=sorted_med.index)\n fig.update_yaxes(title_text=value, row=row_num, col=1)\n fig.update_layout(showlegend=False)\n p_value = categ_corr_data.loc[value, column]\n power = categ_corr_data.loc[value, f'{column}_power']\n effect_size = categ_corr_data.loc[value, f'{column}_effect_size']\n text = f\"{p_value:.4f}, power: {power:.4f}, effect_size: {effect_size:.4f}, {len(box_data)}\"\n if row_num == 1:\n fig.update_layout(title={\"text\": text})\n else:\n fig.layout.annotations[row_num - 2].update(text=text)\n row_num = row_num + 1\n fig.update_layout(height=3600, width=600, plot_bgcolor='rgba(0,0,0,0)')\n fig.write_html(f'figures/box_{column}_{time.time()}.html', auto_open=True)\n # fig.show()",
"def box_plot():\n # download the data from Kaggle: https://www.kaggle.com/camnugent/california-housing-prices/download\n\n df = pd.read_csv('./housing.csv')\n df = df.dropna()\n\n plt.figure(figsize=(10, 6))\n sns.boxplot(data=df, x='ocean_proximity', y='median_house_value', palette='viridis')\n plt.title('Box plot demo w/ Kaggle house pricing data')\n plt.show()\n\n return None",
"def plot_bv_strip(df, xcolname, ycolname, icol=1):\n # set plot size\n fig, ax = plt.subplots(figsize=(8,6))\n \n # plotting... box+kde\n sns.stripplot(ax=ax, data = df\n , x = str(xcolname)\n , y = str(ycolname)\n , color = sns.color_palette()[icol]);\n \n \n # title and labels\n plt.title(xcolname+' Vs '+ycolname, fontsize=20)\n plt.xlabel(xcolname+ ' (units)', fontsize=16)\n plt.ylabel(ycolname+ ' (units)', fontsize=16)\n \n return plt.show()",
"def feature_vis(data, x, y = 'price', categorical = False, kde = True):\n\n print(data[x].value_counts().sort_index())\n \n fig, axs = plt.subplots(ncols=2, figsize= (12,6))\n \n sns.regplot(data=data, x=x, y=y, ax=axs[0])\n sns.histplot(data=data, x=x, discrete=categorical, kde=kde, ax=axs[1])\n \n fig.suptitle(f'{x.title()} vs. {y.title()}', fontsize=16)\n plt.tight_layout();\n \n return",
"def display_dist(df, label):\n # print(sns.__version__)\n\n #TODO: make a categorical version of this?\n\n sns.set(style=\"white\", palette=\"pastel\")\n fig, axes = plt.subplots(2, 1, figsize=(6, 6)) # 6, 2 if OSA CSA\n\n axes[1].get_shared_x_axes().join(axes[1], axes[0])\n axes[1].set_aspect(aspect=25)\n\n sns.distplot(df[label], ax=axes[0], kde=False, norm_hist=False, color='teal')\n sns.boxplot(data=df, x=label, ax=axes[1], color='skyblue')\n\n sns.despine(ax=axes[0], top=True, bottom=True, right=True)\n sns.despine(ax=axes[1], top=True, left=True, right=True)\n\n axes[0].set_xlabel(\"\")\n axes[0].set_ylabel(\"Count per bin\", fontsize='large')\n\n row_label = \"{lab}\\nMean: {mean:.1f}, Std Dev: {std:.1f}\\nMedian: {med:.1f}, IQR: [{lower:.1f}, {upper:.1f}]\\nCount: {count:.0f}\"\\\n .format(lab=label, mean=df[label].describe()['mean'], std=df[label].describe()['std'],\n med=df[label].describe()['50%'], lower=df[label].describe()['25%'], upper=df[label].describe()['75%'],\n count=df[label].describe()['count'])\n\n axes[1].set_xlabel(row_label, fontsize='large')\n axes[1].set(xlim=(0, None))\n\n fig.suptitle(\"Distribution of \" + str(label), fontsize='xx-large')\n fig.tight_layout(rect=[0, 0, 1, .9]) # .95 to leave space for title\n fig.savefig('Display Dist ' + str(label) + '.png', dpi=100)",
"def check_outliers(df, col, cat):\n \n if len(cat) == 0:\n boxplot = df.boxplot(column=[col], rot = 90)\n else:\n for c in cat:\n boxplot = df.boxplot(column=[col], by=[c], rot = 90)",
"def simplePlots() -> None:\r\n \r\n # Univariate data -------------------------\r\n \r\n # Make sure that always the same random numbers are generated\r\n np.random.seed(1234)\r\n \r\n # Generate data that are normally distributed\r\n x = np.random.randn(500)\r\n \r\n # Other graphics settings\r\n # Set \" context='poster' \" for printouts, and \"set_fonts(32)\"\r\n sns.set(context='notebook', style='ticks', palette='muted')\r\n \r\n # Set the fonts the way I like them\r\n set_fonts(16)\r\n \r\n # Scatter plot\r\n plt.plot(x, '.', markersize=7)\r\n plt.xlim([0, len(x)])\r\n \r\n # Save and show the data, in a systematic format\r\n printout('scatterPlot.jpg', xlabel='Datapoints', ylabel='Values', title='Scatter')\r\n \r\n # Histogram\r\n plt.hist(x)\r\n printout('histogram_plain.jpg', xlabel='Data Values',\r\n ylabel='Frequency', title='Histogram, default settings')\r\n \r\n plt.hist(x, 25, density=True)\r\n printout('density_histogram.jpg', xlabel='Data Values', ylabel='Probability',\r\n title='Density Histogram, 25 bins')\r\n \r\n # Boxplot\r\n # The ox consists of the first, second (middle) and third quartile\r\n set_fonts(18)\r\n plt.boxplot(x, sym='*')\r\n printout('boxplot.jpg', xlabel='Values', title='Boxplot')\r\n \r\n plt.boxplot(x, sym='*', vert=False)\r\n plt.title('Boxplot, horizontal')\r\n plt.xlabel('Values')\r\n plt.show()\r\n \r\n # Errorbars\r\n x = np.arange(5)\r\n y = x**2\r\n errorBar = x/2\r\n plt.errorbar(x,y, yerr=errorBar, fmt='o', capsize=5, capthick=3)\r\n plt.xlim([-0.2, 4.2])\r\n plt.ylim([-0.2, 19])\r\n printout('Errorbars.jpg', xlabel='Data Values', ylabel='Measurements', title='Errorbars')\r\n\r\n # SD for two groups\r\n weight = {'USA':89, 'Austria':74}\r\n weight_SD_male = 12\r\n plt.errorbar([1,2], weight.values(), yerr=weight_SD_male * np.r_[1,1],\r\n capsize=5, LineStyle='', marker='o')\r\n plt.xlim([0.5, 2.5])\r\n plt.xticks([1,2], weight.keys())\r\n plt.ylabel('Weight [kg]')\r\n plt.title('Adult male, mean +/- SD')\r\n\r\n show_data('SD_groups.jpg', out_dir='.')\r\n \r\n # Barplot\r\n # The font-size is set such that the legend does not overlap with the data\r\n np.random.seed(1234)\r\n set_fonts(16)\r\n \r\n df = pd.DataFrame(np.random.rand(7, 3), columns=['one', 'two', 'three'])\r\n df.plot(kind='bar', grid=False, color=sns.color_palette('muted'))\r\n \r\n show_data('barplot.jpg')\r\n\r\n # Bivariate Plots\r\n df2 = pd.DataFrame(np.random.rand(50, 3), columns=['a', 'b', 'c'])\r\n df2.plot(kind='scatter', x='a', y='b', s=df2['c']*500);\r\n plt.axhline(0, ls='--', color='#999999')\r\n plt.axvline(0, ls='--', color='#999999')\r\n printout('bivariate.jpg')\r\n \r\n sns.set_style('ticks')\r\n\r\n # Pieplot\r\n txtLabels = 'Cats', 'Dogs', 'Frogs', 'Others'\r\n fractions = [45, 30, 15, 10]\r\n offsets =(0, 0.05, 0, 0)\r\n \r\n plt.pie(fractions, explode=offsets, labels=txtLabels,\r\n autopct='%1.1f%%', shadow=True, startangle=90,\r\n colors=sns.color_palette('muted') )\r\n plt.axis('equal')\r\n printout('piePlot.jpg', title=' ')",
"def plot_distribution(kind_, df, *col):\n \n if kind_ == 'box':\n \n if len(col) == 1:\n boxplot = df.boxplot(column = col[0], rot = 90)\n plt.show()\n \n elif len(col) > 1:\n for c in col[1:]:\n boxplot = df.boxplot(column = col[0], by = c, rot = 90)\n plt.show()\n \n else:\n if len(col) == 0:\n df.plot(kind = kind_)\n plt.show()\n \n elif len(col) == 1:\n df[col[0]].plot(kind = kind_)\n plt.xlabel(col[0])\n plt.show()\n \n elif len(col) == 2:\n df.plot(x = col[0], y = col[1], kind = kind_)\n plt.xlabel(col[0])\n plt.ylabel(col[1])\n plt.show()\n \n else:\n print(\"Unable to plot a chart with given parameters.\")",
"def density(categorical_var, numerical_var):\n #print(categorical_var)\n cat_list = categorical_var.astype('category')\n for cat in cat_list:\n sns.kdeplot(numerical_var[categorical_var == cat], label=cat)#, categorical_var)\n\n plt.show()",
"def plot_variation_distn(gene_vars: pd.DataFrame):\n plt.hist(gene_vars.median(axis=1), bins=100, alpha=0.4, label='median')\n plt.hist(gene_vars.mean(axis=1), bins=100, alpha=0.4, label='mean')\n plt.legend()",
"def boxplot_2_features(df, x, y, ylim_i = 0, set_y_limit = False, order_boxplot = False, print_value = False, num_label = 1, save_plot = False, path_dir = None):\n \n value_counts_temp = df[x].value_counts()\n sns.set(font_scale=2)\n f, ax = plt.subplots(figsize=(18, 7));\n if order_boxplot :\n plot =sns.boxplot(x=x, y=y, data=df, order = value_counts_temp.index)\n else:\n plot =sns.boxplot(x=x, y=y, data=df) \n ax.set_title('Boxplot of {} group by {}'.format(y, x));\n plt.xticks(rotation=90);\n if set_y_limit:\n ax.set_ylim(0, ylim_i);\n for ind, label in enumerate(plot.get_xticklabels()):\n if ind % num_label == 0: # every 15th label is kept\n label.set_visible(True)\n else:\n label.set_visible(False)\n if print_value :\n print(value_counts_temp)\n if save_plot == True:\n plt.savefig((plot_dir + \"boxplot\"+str(y)+\"per _\"+str(x)+\".png\"))\n plt.clf()",
"def plot_boxplot(df):\n\n\t# if graph is being plotted use this style\n\tplt.style.use('seaborn-darkgrid')\n\n\t# create a horizontal boxplot with title and axes of infant mortality\n\tboxplot = df.boxplot(column='Infant mortality (per 1000 births)', vert = False, rot=90)\n\tboxplot.set_title('Infant Mortality')\n\tplt.axis([0, 200, None, None])\n\tplt.show()",
"def plot_category_boxplots(\n df: pd.DataFrame,\n figsize: tuple,\n length: int = None,\n method: str = None,\n) -> sns.boxplot:\n\n # check if the method input is valid\n if method not in [\"median\", \"mean\", \"quantile75\", None]:\n print(\"Not a valid method.\")\n print(\"Valid methods are 'median', 'mean' and 'quantile75'.\\n\")\n return\n\n # create column list that contains only quantitative features\n quanti_cols_list, quali_col = create_quanti_cols(df)\n\n # create figure and axes for the subplots\n fig, axes = plt.subplots(len(quanti_cols_list), 1, figsize=figsize)\n\n # iterate over quantiative features\n # and plot boxplots against category\n for index, value in enumerate(quanti_cols_list):\n # Determine the order of boxes by median\n if method == \"median\":\n order = (\n df.groupby(by=[quali_col])[value]\n .median()\n .sort_values(ascending=False)\n .index\n )\n # Determine the order of boxes by mean\n elif method == \"mean\":\n order = (\n df.groupby(by=[quali_col])[value]\n .mean()\n .sort_values(ascending=False)\n .index\n )\n # Determine the order of boxes by quantile 75%\n elif method == \"quantile75\":\n order = (\n df.groupby(by=[quali_col])[value]\n .quantile(0.75)\n .sort_values(ascending=False)\n .index\n )\n else:\n order = None\n\n # plot boxplot\n sns.boxplot(\n y=df[quali_col],\n x=df[value],\n order=order[:length],\n showfliers=False,\n ax=axes[index],\n )\n\n plt.tight_layout()\n return",
"def descriptive_plot(data_onlyDV):\n outcome = data_onlyDV.columns.values[0] # get the outcome column name\n\n fig = plt.figure()\n # TODO: subplots appear in same frame instead of 3 separate ones (!!!)\n ax1 = fig.add_subplot(121)\n ax1 = data_onlyDV.plot(kind='hist', title=\"Histogram: \"+outcome, by=outcome)\n ax1.locator_params(axis='x', nbins=4)\n ax1.set_xlabel(outcome+\" bins\")\n ax1.set_ylabel(\"Num Instances\")\n\n ax2 = fig.add_subplot(122)\n ax2 = data_onlyDV.plot(kind='kde', title=\"KDE Density Plot: \"+outcome)\n\n fig.tight_layout()\n plt.show()",
"def visualize_outliers(df, var):\n import pandas as pd\n import numpy as np\n import matplotlib.pyplot as plt\n \n num_var = df.groupby(var)[var].count() \n total = np.float(len(df))\n \n var_perc = num_var / total \n \n var_perc.plot.bar()\n plt.ylabel('Percentage of observations per label')\n plt.title(var)\n \n return plt.show()",
"def boxplot1(S, X_Labels=[], Y_Label='Sensitivity',\r\n S_lb=np.array([]), S_ub=np.array([])):\r\n\r\n # Options for the graphic:\r\n pltfont = {'fontname': 'Bitstream Vera Sans', 'fontsize': 15} # font\r\n dh = 0.40 # semi-width of the box\r\n dv = 0.01 # semi-height of the box for deterministic value (no bootstrap)\r\n dv = 0.005 # semi-height of the box for bootstrap mean\r\n\r\n # Options for the colours:\r\n ec = 'k' # color of edges\r\n # You can produce a coloured plot or a black and white one\r\n # (printer-friendly). Furthermore, you can use matplotlib colourmaps or\r\n # repeat 5 'easy-to-distinguish' colours (see http://colorbrewer2.org/).\r\n # The variable 'col' must be a np.ndarray\r\n # Option 1a - coloured using colorbrewer: uncomment the following line:\r\n col = np.array([[228, 26, 28], [55, 126, 184], [77, 175, 74],\r\n [152, 78, 163], [255, 127, 0]])/256\r\n # Option 1b - coloured using matplotlib colormap: uncomment the following lines:\r\n # colorscale = plt.cm.jet\r\n # col = colorscale(np.linspace(0, 1, 5))\r\n # Option 1a - B&W using matlab colorbrewer: uncomment the following line:\r\n # col = np.array([[37, 37, 37], [90, 90, 90], [150, 150, 150],\r\n # [189, 189, 189], [217, 217, 217]])/256\r\n # Option 1b - B&W using matlab colormap: uncomment the following lines:\r\n # colorscale = plt.cm.gray\r\n # col = colorscale(np.linspace(0, 1, 5))\r\n\r\n ###########################################################################\r\n # Check inputs\r\n ###########################################################################\r\n if not isinstance(S, np.ndarray):\r\n raise ValueError('\"S\" must be a numpy.array.')\r\n if S.dtype.kind != 'f' and S.dtype.kind != 'i' and S.dtype.kind != 'u':\r\n raise ValueError('\"S\" must contain floats or integers.')\r\n\r\n Ns = S.shape\r\n if len(Ns) > 1:\r\n raise ValueError('\"S\" must be of size (M, ).')\r\n M = Ns[0]\r\n\r\n ###########################################################################\r\n # Check optional inputs\r\n ###########################################################################\r\n\r\n if not X_Labels:\r\n X_Labels = [np.nan]*M\r\n for i in range(M):\r\n X_Labels[i] = 'X' + str(i+1)\r\n else:\r\n if not isinstance(X_Labels, list):\r\n raise ValueError('\"X_Labels\" must be a list with M elements.')\r\n if not all(isinstance(i, str) for i in X_Labels):\r\n raise ValueError('Elements in \"X_Labels\" must be strings.')\r\n if len(X_Labels) != M:\r\n raise ValueError('\"X_Labels\" must have M elements.')\r\n\r\n if not isinstance(Y_Label, str):\r\n raise ValueError('\"str_legend\" must be a string.')\r\n\r\n if len(S_lb) != 0:\r\n if np.isnan(S_lb).any():\r\n S_lb = np.array([])\r\n else:\r\n if not isinstance(S_lb, np.ndarray):\r\n raise ValueError('\"S_lb\" must be a numpy.array.')\r\n if S_lb.dtype.kind != 'f' and S_lb.dtype.kind != 'i' and S_lb.dtype.kind != 'u':\r\n raise ValueError('\"S_lb\" must contain floats or integers.')\r\n S_lb = S_lb.flatten()\r\n Ns_lb = S_lb.shape\r\n if Ns_lb[0] != M:\r\n raise ValueError('\"S\" and \"S_lb\" must have the same number of elements')\r\n if (S_lb-S > 0).any():\r\n raise ValueError('\"S_lb\" must be lower or equal to S.')\r\n\r\n if len(S_ub) != 0:\r\n if np.isnan(S_ub).any():\r\n S_ub = np.array([])\r\n else:\r\n if not isinstance(S_ub, np.ndarray):\r\n raise ValueError('\"S_ub\" must be a numpy.array.')\r\n if S_ub.dtype.kind != 'f' and S_ub.dtype.kind != 'i' and S_ub.dtype.kind != 'u':\r\n raise ValueError('\"S_ub\" must contain floats or integers.')\r\n\r\n if (S_ub-S < 0).any():\r\n raise ValueError('\"S_ub\" must be higher or equal to S.')\r\n S_ub = S_ub.flatten()\r\n Ns_ub = S_ub.shape\r\n if Ns_ub[0] != M:\r\n raise ValueError('\"S\" and \"S_ub\" must have the same number of elements')\r\n\r\n ###########################################################################\r\n # Produce plots\r\n ###########################################################################\r\n A = len(col)\r\n L = int(np.ceil(M/A))\r\n clrs = repmat(col, L, 1)\r\n\r\n # Plot on curent figure\r\n if plt.get_fignums(): # if there is a figure recover axes of current figure\r\n ax = plt.gca()\r\n else: # else create a new figure\r\n plt.figure()\r\n ax = plt.gca()\r\n\r\n for j in range(M):\r\n\r\n if len(S_lb) == 0: # no confidence intervals\r\n # Plot the value as a tick line:\r\n if ec == 'none':\r\n ax.add_patch(Rectangle((j+1-dh, S[j]-dv), 2*dh, 2*dv, color=clrs[j]))\r\n else:\r\n ax.add_patch(Rectangle((j+1-dh, S[j]-dv), 2*dh, 2*dv,\r\n facecolor=clrs[j], edgecolor=ec))\r\n else:\r\n # Plot the confidence interval as a rectangle:\r\n if ec == 'none':\r\n ax.add_patch(Rectangle((j+1-dh, S_lb[j]), 2*dh, S_ub[j]-S_lb[j],\r\n color=clrs[j]))\r\n else:\r\n ax.add_patch(Rectangle((j+1-dh, S_lb[j]), 2*dh, S_ub[j]-S_lb[j],\r\n facecolor=clrs[j], edgecolor=ec))\r\n # Plot the mean as a tick line:\r\n ax.add_patch(Rectangle((j+1-dh, S[j]-dv), 2*dh, 2*dv, color='black'))\r\n\r\n x1 = 0\r\n x2 = M+1\r\n\r\n if len(S_lb) != 0:\r\n y1 = min(-0.1, np.min(S_lb))\r\n else:\r\n y1 = min(-0.1, np.min(S))\r\n if len(S_ub) != 0:\r\n y2 = max(1.1, np.max(S_ub))\r\n else:\r\n y2 = max(1.1, np.max(S))\r\n\r\n plt.plot([x1, x2], [0, 0], ':k') # Plot zero line\r\n plt.xlim((x1, x2)) # set axes limits\r\n plt.ylim((y1, y2)) # set axes limits\r\n plt.xticks(np.arange(1, M+1, 1), X_Labels, **pltfont)\r\n plt.yticks(**pltfont)\r\n plt.ylabel(Y_Label, **pltfont)\r\n plt.grid(axis='x')",
"def plot_columns(dataframe, title):\n sns.boxplot(x=dataframe['category_id'], y=dataframe['price'])\n plt.title(title)\n plt.xlabel('Category ID')\n plt.ylabel('Price')\n plt.show()",
"def show_stats(x, **kws):\n mean = np.mean(x)\n median = np.median(x)\n std = np.std(x,ddof=1)\n ax = plt.gca()\n ax.annotate(\"Mean: {:.2f}\\nMedian: {:.2f}\\n$\\sigma$: {:.3e}\".format(mean,median,std), xy=(.6,.3),xycoords=ax.transAxes, fontsize=9)",
"def spacegroup_hist(\n data: Sequence[int | str | Structure] | pd.Series,\n show_counts: bool = True,\n xticks: Literal[\"all\", \"crys_sys_edges\"] | int = 20,\n include_missing: bool = False,\n ax: plt.Axes | None = None,\n **kwargs: Any,\n) -> plt.Axes:\n ax = ax or plt.gca()\n\n if isinstance(next(iter(data)), Structure):\n # if 1st sequence item is structure, assume all are\n data = cast(Sequence[Structure], data)\n series = pd.Series(struct.get_space_group_info()[1] for struct in data)\n else:\n series = pd.Series(data)\n\n df = series.value_counts(sort=False).to_frame(name=\"counts\")\n\n crystal_sys_colors = {\n \"triclinic\": \"red\",\n \"monoclinic\": \"teal\",\n \"orthorhombic\": \"blue\",\n \"tetragonal\": \"green\",\n \"trigonal\": \"orange\",\n \"hexagonal\": \"purple\",\n \"cubic\": \"yellow\",\n }\n\n if df.index.is_numeric(): # assume index is space group numbers\n if include_missing:\n df = df.reindex(range(1, 231), fill_value=0)\n else:\n df = df.sort_index()\n df[\"crystal_sys\"] = [get_crystal_sys(x) for x in df.index]\n ax.set(xlim=(0, 230))\n xlabel = \"International Spacegroup Number\"\n\n else: # assume index is space group symbols\n # TODO: figure how to implement include_missing for space group symbols\n # if include_missing:\n # idx = [SpaceGroup.from_int_number(x).symbol for x in range(1, 231)]\n # df = df.reindex(idx, fill_value=0)\n df[\"crystal_sys\"] = [SpaceGroup(x).crystal_system for x in df.index]\n\n # sort df by crystal system going from smallest to largest spacegroup numbers\n # e.g. triclinic (1-2) comes first, cubic (195-230) last\n sys_order = dict(zip(crystal_sys_colors, range(len(crystal_sys_colors))))\n df = df.loc[df.crystal_sys.map(sys_order).sort_values().index]\n\n xlabel = \"International Spacegroup Symbol\"\n\n ax.set(xlabel=xlabel, ylabel=\"Count\")\n\n kwargs[\"width\"] = kwargs.get(\"width\", 0.9) # set default bar width\n # make plot\n df.counts.plot.bar(figsize=[16, 4], ax=ax, **kwargs)\n\n # https://matplotlib.org/3.1.1/gallery/lines_bars_and_markers/fill_between_demo\n trans = transforms.blended_transform_factory(ax.transData, ax.transAxes)\n\n # count rows per crystal system\n crys_sys_counts = df.groupby(\"crystal_sys\").sum(\"counts\")\n\n # sort by key order in dict crys_colors\n crys_sys_counts = crys_sys_counts.loc[\n [x for x in crystal_sys_colors if x in crys_sys_counts.index]\n ]\n\n crys_sys_counts[\"width\"] = df.value_counts(\"crystal_sys\")\n ax.set_title(\"Totals per crystal system\", fontdict={\"fontsize\": 18}, pad=30)\n crys_sys_counts[\"color\"] = pd.Series(crystal_sys_colors)\n\n x0 = 0\n for cryst_sys, count, width, color in crys_sys_counts.itertuples():\n x1 = x0 + width\n\n for patch in ax.patches[0 if x0 == 1 else x0 : x1 + 1]:\n patch.set_facecolor(color)\n\n text_kwds = dict(transform=trans, horizontalalignment=\"center\")\n ax.text(\n *[(x0 + x1) / 2, 0.95],\n cryst_sys,\n rotation=90,\n verticalalignment=\"top\",\n fontdict={\"fontsize\": 14},\n **text_kwds,\n )\n if show_counts:\n ax.text(\n *[(x0 + x1) / 2, 1.02],\n f\"{count:,} ({count/len(data):.0%})\",\n fontdict={\"fontsize\": 12},\n **text_kwds,\n )\n\n ax.fill_between(\n [x0 - 0.5, x1 - 0.5],\n *[0, 1],\n facecolor=color,\n alpha=0.1,\n transform=trans,\n edgecolor=\"black\",\n )\n x0 += width\n\n ax.yaxis.grid(True)\n ax.xaxis.grid(False)\n\n if xticks == \"crys_sys_edges\" or isinstance(xticks, int):\n if isinstance(xticks, int):\n # get x_locs of n=xticks tallest bars\n x_indices = df.reset_index().sort_values(\"counts\").tail(xticks).index\n else:\n # add x_locs of n=xticks tallest bars\n x_indices = crys_sys_counts.width.cumsum()\n\n majorLocator = FixedLocator(x_indices)\n\n ax.xaxis.set_major_locator(majorLocator)\n plt.xticks(rotation=90)\n\n return ax",
"def explore_col(s, e):\n \n fig = plt.figure(figsize=(10, 8))\n\n\n sub1 = fig.add_subplot(221) \n sub1.set_title(s +' histogram') \n sub1.hist(df_tr_lbl[s])\n\n sub2 = fig.add_subplot(222)\n sub2.set_title(s +' boxplot')\n sub2.boxplot(df_tr_lbl[s])\n \n #np.random.seed(12345)\n \n if e > 100 or e <= 0:\n select_engines = list(pd.unique(df_tr_lbl.id))\n else:\n select_engines = np.random.choice(range(1,101), e, replace=False)\n \n sub3 = fig.add_subplot(223)\n sub3.set_title('time series: ' + s +' / cycle')\n sub3.set_xlabel('cycle')\n for i in select_engines:\n df = df_tr_lbl[['cycle', s]][df_tr_lbl.id == i]\n sub3.plot(df['cycle'],df[s])\n \n sub4 = fig.add_subplot(224)\n sub4.set_title(\"scatter: \"+ s + \" / ttf (regr label)\")\n sub4.set_xlabel('ttf')\n sub4.scatter(df_tr_lbl['ttf'],df_tr_lbl[s])\n\n\n plt.tight_layout()\n plt.show()",
"def makeComparsionChart(columns, data):\n fig = plt.figure(figsize=(16, 10))\n gs = gridspec.GridSpec(2, 3, wspace = 0.2, hspace=0.2, right=0.96, left=0.04)\n ax1 = plt.subplot(gs[0, 0:1], label=\"\")\n ax2 = plt.subplot(gs[0, 1:2], label=\"\" )\n ax3 = plt.subplot(gs[0, 2:3], label=\"\" )\n ax4 = plt.subplot(gs[1, 0:1], label=\"\" )\n ax5 = plt.subplot(gs[1, 1:2], label=\"\" )\n ax1.set_title('Before Scaling')\n ax2.set_title('After Standard Scaler')\n ax3.set_title('After Min-Max Scaler')\n ax4.set_title('After Roboust Scaler')\n ax5.set_title('After Normalization')\n\n for column in columns:\n sns.kdeplot(data[0][column], ax=ax1, legend=False)\n sns.kdeplot(data[1][column], ax=ax2, legend=False)\n sns.kdeplot(data[2][column], ax=ax3, legend=False)\n sns.kdeplot(data[3][column], ax=ax4, legend=False)\n sns.kdeplot(data[4][column], ax=ax5, legend=False)\n\n plt.show()",
"def figure_size_resp_bms(df):\n sns.set_style('ticks')\n gs = GridSpec(2, 3)\n fig = plt.figure(figsize=(7, 8))\n axs = [fig.add_subplot(gs[0, 0]), fig.add_subplot(gs[0, 1]), fig.add_subplot(gs[0, 2]),\n fig.add_subplot(gs[1, :])]\n # fig, axs = plt.subplots(2, 2, figsize=(8, 6))\n # axs = axs.reshape(-1)\n\n sns.boxplot('genotype', 'area', hue='treatment', data=df, ax=axs[0], order=('wt', 'ko'), hue_order=('veh', 'bms'))\n axs[0].set_ylim((0, 2000000))\n axs[0].set_ylabel('Responsive area in µm²')\n sns.boxplot('genotype', 'max_df', hue='treatment', data=df, ax=axs[1], order=('wt', 'ko'), hue_order=('veh', 'bms'))\n axs[1].set_ylabel('Average peak response amplitude (%)')\n axs[1].set_ylim((0, 3.5))\n sns.boxplot('genotype', 'fwhm', hue='treatment', data=df, ax=axs[2], order=('wt', 'ko'), hue_order=('veh', 'bms'))\n gp = df.groupby(('genotype', 'treatment'))\n t = np.arange(-3, 5, .1)\n for g in product(('wt', 'ko'), ('veh', 'bms')):\n try:\n avg_df = np.vstack(gp.get_group(g).avg_df.as_matrix())\n mean_df = avg_df.mean(0)\n # mean_df[mean_df > 0.7] = 0\n axs[3].plot(t, mean_df, label=g, linewidth=2)\n except KeyError:\n pass\n axs[3].legend()\n axs[3].set_xlabel(TIME_LABEL)\n axs[3].set_ylabel('Average $\\Delta$ F / F (%)')\n fig.tight_layout()\n fig.savefig('Intrinsic/figure/responses.png')\n fig.savefig('Intrinsic/figure/responses.svg')\n with open('Intrinsic/figure/stats.txt', 'w') as f:\n f.write('Mann-Whitney U-test\\n\\n')\n for g1, g2 in combinations(product(('wt', 'ko'), ('veh', 'bms')), 2):\n f.write(f'+ {g1} vs {g2}:\\n')\n pval = mannwhitneyu(df.area[df.genotype == g1], df.area[df.genotype == g2]).pvalue\n f.write(f'\\tArea comparison {g1} vs {g2}: {pval:.3f}\\n')\n pval = mannwhitneyu(df.max_df[df.genotype == g1], df.max_df[df.genotype == g2]).pvalue\n f.write(f'\\tAmplitude comparison {g1} vs {g2}: {pval:.3f}\\n')\n pval = mannwhitneyu(df.fwhm[df.genotype == g1], df.fwhm[df.genotype == g2]).pvalue\n f.write(f'\\tFull width at half maximum comparison {g1} vs {g2}: {pval:.3f}\\n')",
"def plot_bv_swarm(df, xcolname, ycolname, icol=1):\n # set plot size\n fig, ax = plt.subplots(figsize=(8,6))\n \n # plotting... box+kde\n sns.swarmplot(ax=ax, data = df\n , x = str(xcolname)\n , y = str(ycolname)\n , color = sns.color_palette()[icol]);\n \n \n # title and labels\n plt.title(xcolname+' Vs '+ycolname, fontsize=20)\n plt.xlabel(xcolname+ ' (units)', fontsize=16)\n plt.ylabel(ycolname+ ' (units)', fontsize=16)\n \n return plt.show()",
"def get_boxplot(ensemble, gene, grouping, outliers, modality='methylation', \n\tmethylation_type='mcg', clustering='lv', level='normalized', \n\tsmoothing=False,\n\tmax_points='10000'):\n\tmodalityu = modality.replace('snATAC','ATAC').replace('snRNA','RNA')\n\t\n\twith open(log_file,'a') as f:\n\t\tprint(' Checkpoint 1: modality=%s grouping=%s' % (modality, grouping), file=f) \n\n\n\ttsne_type='mCH_ndim2_perp20'; # Note this doesn't matter, since we won't use tSNE for the box plot\n\tif ('methylation' in modality):\n\t\tpoints = get_gene_methylation(ensemble, methylation_type, gene, grouping, clustering, level, outliers, tsne_type, max_points)\n\t\tcontext = methylation_type[1:]\n\telif ('ATAC' in modality):\n\t\tpoints = get_gene_snATAC(ensemble, gene, grouping, outliers, \n\t\t\tsmoothing=smoothing, max_points=max_points, modality=modalityu)\n\telif ('RNA' in modality):\n\t\tpoints = get_gene_RNA(ensemble, gene, grouping, outliers, \n\t\t\tsmoothing=smoothing, max_points=max_points, modality=modalityu)\n\telse:\n\t\terror('Invalid modality in get_boxplot')\n\n\tif points is None:\n\t\traise FailToGraphException\n\tif grouping == 'annotation' and points['annotation_'+clustering].nunique() <= 1:\n\t\tgrouping = \"cluster\"\n\twith open(log_file,'a') as f:\n\t\tprint(' Checkpoint: modality=%s grouping=%s' % (modality, grouping), file=f) \n\t\tprint(points.columns)\n\n\ttraces = OrderedDict()\n\tif ('methylation' in modality):\t\n\t\tif grouping == \"dataset\":\n\t\t\tgroups = points[grouping]\n\t\t\tunique_groups = points[\"dataset\"].unique()\n\t\telif grouping == 'target_region':\n\t\t\tpoints['target_region'].fillna('N/A', inplace=True)\n\t\t\tgroups = points[grouping]\n\t\t\tunique_groups = points['target_region'].unique()\n\t\telif grouping == 'slice':\n\t\t\tdatasets_all_cells = points['dataset'].tolist()\n\t\t\tslices_list = [d.split('_')[1] if 'RS2' not in d else d.split('_')[2][2:4] for d in datasets_all_cells]\n\t\t\tpoints['slice'] = slices_list\n\t\t\tgroups = points[grouping]\n\t\t\tslices_set = set(slices_list)\n\t\t\tunique_groups = np.array(list(slices_set))\n\t\telif grouping == 'sex':\n\t\t\tunique_groups = points['sex'].unique()\n\t\t\tgroups = points[grouping]\n\t\telif grouping == 'cluster' or grouping == 'annotation':\n\t\t\tunique_groups = points[grouping+'_'+clustering].unique()\n\t\t\tgroups = points[grouping+'_'+clustering]\n\t\telse:\n\t\t\tgrouping = 'cluster'\n\t\t\tunique_groups = points[grouping+'_'+clustering].unique()\n\t\t\tgroups = points[grouping+'_'+clustering]\n\telif ('ATAC' in modality):\n\t\tgrouping='cluster_ATAC'\n\t\tgroups = points[grouping]\n\t\tunique_groups = groups.unique()\n\telif ('RNA' in modality):\t\n\t\n\t\tif grouping == \"dataset\":\n\t\t\tunique_groups = points[\"dataset\"].unique()\n\t\telif grouping == \"target_region\":\n\t\t\tpoints['target_region'].fillna('N/A', inplace=True)\n\t\t\tunique_groups = points[\"target_region\"].unique()\n\t\telif grouping == 'annotation' or grouping == 'cluster':\n\t\t\tif grouping == 'annotation' and grouping+'_RNA' not in points.columns: # If no cluster annotations available, group by cluster number instead\n\t\t\t\tgrouping = \"annotation\"\n\t\t\t\tpoints = get_gene_RNA(ensemble, gene, grouping, outliers)\n\t\t\t\tprint(\"**** Grouping by cluster\")\n\t\t\tunique_groups = points[grouping+'_RNA'].unique()\n\t\telse:\n\t\t\traise FailToGraphException\n\t\tif grouping=='cluster':\n\t\t\tgrouping='cluster_RNA'\n\t\tgroups = points[grouping]\n\t\n\twith open(log_file,'a') as f:\n\t\tprint(' Checkpoint: modality=%s grouping=%s' % (modality, grouping), file=f) \n\t\tprint(points.columns)\n\n\tnum_clusters = len(unique_groups)\n\n\t# ## ############\n\t# gene_info_df = pd.DataFrame()\n\t# gene_info_df = median_cluster_mch(points, grouping, clustering)\n\t# gene_info_dict = gene_info_df.to_dict(into=OrderedDict)\n\t# mch = list()\n\t# for key in list(gene_info_dict.keys()):\n\t# \tmch.append(list(gene_info_dict[key].values()))\n\t# mch = np.array(mch)\n\t# figure = ff.create_dendrogram(mch.transpose(), orientation=\"bottom\", labels=tuple([i for i in range(mch.shape[1])]), \n\t# \tcolorscale='beige')\n\t# for i in range(len(dendro_top['data'])):\n\t# \tdendro_top['data'][i]['yaxis'] = 'y2'\n\t# dendro_top_leaves = dendro_top['layout']['xaxis']['ticktext']\n\t# dendro_top_leaves = list(map(int, dendro_top_leaves))\n\t# mch = mch[:,dendro_top_leaves] # Reorder the columns according to the clustering\n\t# unique_groups = [unique_groups[i] for i in dendro_top_leaves]\n\t# mch = list(mch)\n\t# figure['data'].extend(dendro_top['data'])\n\t# ## ###########\n\n\tcolors = generate_cluster_colors(num_clusters, grouping)\n\tif grouping == \"cluster\":\n\t\tname_prepend=\"cluster_\"\n\telse:\n\t\tname_prepend=\"\"\n\tdata = []\n\tfor i, group in enumerate(unique_groups):\n\t\tcolor = colors[int(np.where(unique_groups==group)[0]) % len(colors)]\n\t\tif outliers:\n\t\t\tboxpoints='suspectedoutliers';\n\t\telse:\n\t\t\tboxpoints=False\n\t\ttrace = {\n\t\t\t\"type\": 'violin',\n\t\t\t# \"y\": points[methylation_type + '/' + context + '_' + level][groups==group],\n\t\t\t\"name\": name_prepend + str(group),\n\t\t\t\"points\": boxpoints,\n\t\t\t\"box\": {\n\t\t\t\t\"visible\": True,\n\t\t\t\t\"width\": .8,\n\t\t\t\t'fillcolor': color,\n\t\t\t},\n\t\t\t\"line\": {\n\t\t\t\t\"color\" : 'rgba(10,10,10,.5)'\n\t\t\t}\n\t\t}\n\t\tif (modality=='methylation'):\n\t\t\ttrace[\"y\"]=points[methylation_type + '/' + context + '_' + level][groups==group]\n\t\telif (modality=='snATAC'):\n\t\t\ttrace[\"y\"]=points[groups==group]['normalized_counts']\n\t\telif (modality=='RNA'):\n\t\t\ttrace[\"y\"]=points[groups==group]['normalized_counts']\n\t\tdata.append(trace)\n\n\tgene_name = get_gene_by_id([ gene ])[0]['gene_name']\n\n\t# with open(log_file,'a') as f:\n\t# \tprint(' Checkpoing 5', file=f) \n\t# \tprint(gene_name +' '+ modality + (('(%s)' % methylation_type) if modality=='methylation' else '') + ' in each cluster: ', file=f)\n\t# \tprint(methylation_type, file=f)\n\t\t\n\tlayout = Layout(\n\t\tautosize=True,\n\t\theight=450,\n\t\twidth=1000,\n\t\ttitle=gene_name +' '+ modality + (('(%s)' % methylation_type) if modality=='methylation' else '') + ' in each cluster: ',\n\t\ttitlefont={'color': 'rgba(1,2,2,1)',\n\t\t\t\t 'size': 20},\n\t\txaxis={\n\t\t\t'title': grouping.title(),\n\t\t\t'titlefont': {\n\t\t\t\t'size': 17\n\t\t\t},\n\t\t\t'type': 'category',\n\t\t\t'anchor': 'y',\n\t\t\t'ticks': 'outside',\n\t\t\t'ticklen': 4,\n\t\t\t'tickangle': -45,\n\t\t\t'tickwidth': 0.5,\n\t\t\t'showticklabels': True,\n\t\t\t'tickfont': {\n\t\t\t\t'size': 12\n\t\t\t},\n\t\t\t'showline': True,\n\t\t\t'zeroline': False,\n\t\t\t'showgrid': True,\n\t\t\t'linewidth': 1,\n\t\t\t'mirror': True,\n\t\t},\n\t\tyaxis={\n\t\t\t'title': gene_name + ' ' + level.capitalize() + ' ' + methylation_type,\n\t\t\t'titlefont': {\n\t\t\t\t'size': 15\n\t\t\t},\n\t\t\t'type': 'linear',\n\t\t\t'anchor': 'x',\n\t\t\t'ticks': 'outside',\n\t\t\t# 'tickcolor': 'white',\n\t\t\t'ticklen': 4,\n\t\t\t'tickwidth': 0.5,\n\t\t\t'showticklabels': True,\n\t\t\t'tickfont': {\n\t\t\t\t'size': 12\n\t\t\t},\n\t\t\t'showline': True,\n\t\t\t'zeroline': False,\n\t\t\t'showgrid': True,\n\t\t\t'linewidth': 1,\n\t\t\t'mirror': True,\n\t\t},\n\t\tshowlegend=False,\n\t)\n\n\treturn plotly.offline.plot(\n\t\t{\n\t\t\t'data': data,\n\t\t\t'layout': layout\n\t\t},\n\t\toutput_type='div',\n\t\tshow_link=False,\n\t\tinclude_plotlyjs=False,)",
"def test_boxplot(self):\n values = [37, 48, 30, 53, 3, 83, 19, 71, 90, 16, 19, 7, 11, 43, 43]\n result = boxplot(values)\n self.assertEqual(3, result['min_val'])\n self.assertEqual(17.5, result['q1_val'])\n self.assertEqual(37, result['mean_val'])\n self.assertEqual(50.5, result['q3_val'])\n self.assertEqual(90, result['max_val'])",
"def boxplot(data):\n sns.boxplot(data, width=0.5, palette=\"colorblind\")\n # add points on the plot\n sns.swarmplot(data, color='red', alpha=0.75)",
"def plot_boxplots(\n df: pd.DataFrame, drop_cols: list = None, sub_col=3, figsize: tuple = (18, 26)\n):\n\n # drop unnecessary columns\n if drop_cols:\n df = df.drop(drop_cols, axis=1)\n # keep only quantitative features\n df = create_quanti_df(df)\n print(f\"Number of quantitaive columns: {df.shape[1]}\")\n # create figure and axes based on the number of columns of the dataframe\n fig, axes = plt.subplots(ceil(len(df.columns) / sub_col), sub_col, figsize=figsize)\n y = 0 # set counter\n\n # plot boxplot for each column of data\n for col in df.columns:\n i, j = divmod(y, sub_col)\n sns.boxplot(x=df[col], ax=axes[i, j]).set_title(col, fontsize=20)\n y += 1\n\n plt.tight_layout()\n plt.show()\n return",
"def vscat(a,fig=None,ls=None,marker='o',nmin=2,mhmin=-3,density=False,out=None) :\n if fig == None : fig,ax=plots.multi(4,6,hspace=0.001,wspace=0.4,figsize=(12,8))\n else : fig,ax=fig\n tbins=[3000,3500,4000,4500,5500,8000,30000] \n hbins=[8,11,12,13,15]\n try: snr = a['SNREV']\n except: snr=a['SNR']\n j=np.where(snr > 300) [0]\n snr[j] = 300\n for i in range(len(tbins)-1) :\n ax[i,0].text(0.9,0.9,'{:d}<=RV_TEFF<{:d}'.format(tbins[i],tbins[i+1]),ha='right',transform=ax[i,0].transAxes,fontsize=8)\n for j in range(len(hbins)-1) :\n ax[0,j].set_title('{:d}<=H<{:d}'.format(hbins[j],hbins[j+1]))\n gd = np.where((a['RV_TEFF']>=tbins[i]) & (a['RV_TEFF']<tbins[i+1]) &\n (a['H']>=hbins[j]) & (a['H']<hbins[j+1]) &\n (a['NVISITS']>nmin) & (a['RV_FEH']>mhmin) & (a['VSCATTER'] > 0)) [0]\n print(tbins[i],tbins[i+1],hbins[j],hbins[j+1],nmin,len(gd))\n try :\n #plots.plotc(ax[i,2],snr[gd],a['VSCATTER'][gd],a['RV_FEH'][gd],marker=marker,xr=[0,310],yr=[0,1],xt='S/N',yt='VSCATTER')\n ax[i,j].hist(a['VSCATTER'][gd],bins=np.arange(0,1,0.01),ls=ls,histtype='step',color=colors[j],normed=density)\n ax[i,j].set_xlabel('VSCATTER (km/s)')\n ax[i,j].plot([0.1,0.1],ax[i,j].get_ylim())\n #ax[i,1].hist(a['VSCATTER'][gd],bins=np.arange(0,1,0.01),histtype='step',cumulative=True,normed=True,ls=ls,color=colors[j])\n #ax[i,1].set_xlabel('VSCATTER')\n except : pass\n\n if out is not None : \n fig.savefig(out+'.png')\n plt.close()\n\n fig.suptitle('NVISITS>{:d} [M/H]>{:6.2f}'.format(nmin,mhmin))\n return fig,ax"
] |
[
"0.60901725",
"0.57481736",
"0.57227015",
"0.56605124",
"0.56474423",
"0.5628243",
"0.5616306",
"0.5595369",
"0.5566381",
"0.5519587",
"0.543396",
"0.54185474",
"0.5369447",
"0.5367123",
"0.53473914",
"0.5331595",
"0.53172815",
"0.53153265",
"0.52583396",
"0.52550906",
"0.5237588",
"0.52320576",
"0.5227174",
"0.52224094",
"0.5213919",
"0.5191482",
"0.5177407",
"0.5169259",
"0.5167667",
"0.5160894"
] |
0.7083116
|
0
|
Store wavelengths for a spectrum
|
def storeWavelengths(self, nm):
pre = "w,0"
d = {"wavelength_nm": list(nm)}
self._writeline(pre, str(d))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def wavelength_ex(hdulist):\n wave = hdulist[1].data['loglam']\n wave = 10**wave\n\n return wave",
"def GetWavelengths (self) :\n\t\treturn self.run(\"GetWavelengths\")",
"def wavelength(self):\n return self.getparam(\"WAVELENGTH\")",
"def wavelength(self):\n return self.getparam(\"WAVELENGTH\")",
"def wavelength(self):\n return wavelength(energy)",
"def wavelength(self):\n return self.get(self._names[\"wavelength\"])",
"def add_wavelength(filename, model, std_tol, overwrite=False, plot_path=None):\n hdulist = fits.open(filename)\n\n # read both hdu's\n logger.debug(\"\\tObject: {}\".format(hdulist[0].header['OBJECT']))\n\n # extract just the middle part of the CCD (we only really care about Halpha)\n tbl = Table(hdulist[1].data)\n\n if 'wavelength' in tbl.colnames and not overwrite:\n logger.debug(\"\\tTable already contains wavelength values!\")\n return\n\n # compute wavelength array for the pixels\n wavelength, var = model.gp.predict(model.y, tbl['pix']-model.x_shift,\n return_var=True)\n bad_idx = np.sqrt(var) > std_tol.to(u.angstrom).value\n wavelength[bad_idx] = np.nan\n\n tbl['wavelength'] = wavelength\n tbl['wavelength_err'] = np.sqrt(var)\n\n new_hdu1 = fits.table_to_hdu(tbl)\n new_hdulist = fits.HDUList([hdulist[0], new_hdu1])\n\n logger.debug(\"\\tWriting out file with wavelength array.\")\n new_hdulist.writeto(filename, overwrite=True)\n\n if plot_path is not None:\n # plot the spectrum vs. wavelength\n fig,axes = plt.subplots(2, 1, figsize=(12,8), sharex=True)\n\n axes[0].plot(tbl['wavelength'], tbl['source_flux'],\n marker='', drawstyle='steps-mid', linewidth=1.)\n axes[0].errorbar(tbl['wavelength'], tbl['source_flux'], 1/np.sqrt(tbl['source_ivar']),\n linestyle='none', marker='', ecolor='#666666', alpha=1., zorder=-10)\n axes[0].set_ylim(tbl['source_flux'][200]/4, np.nanmax(tbl['source_flux']))\n axes[0].set_yscale('log')\n\n axes[1].plot(tbl['wavelength'], tbl['background_flux'],\n marker='', drawstyle='steps-mid', linewidth=1.)\n axes[1].errorbar(tbl['wavelength'], tbl['background_flux'], 1/np.sqrt(tbl['background_ivar']),\n linestyle='none', marker='', ecolor='#666666', alpha=1., zorder=-10)\n axes[1].set_ylim(1e-1, np.nanmax(tbl['background_flux']))\n axes[1].set_yscale('log')\n\n fig.tight_layout()\n _filename_base = path.splitext(path.basename(filename))[0]\n fig.savefig(path.join(plot_path, '{0}_1d_wvln.png'\n .format(_filename_base)))\n\n plt.close(fig)",
"def to_wavelength(self):\n\n if self.unit == 'f_lam':\n raise ValueError('Dispersion is arealdy in wavelength')\n elif self.unit == 'f_nu':\n self.flux = self.flux * self.dispersion**2 / (c.value * 1e+10)\n self.dispersion = (c.value * 1e+10) / self.dispersion\n\n self.flux = np.flip(self.flux, axis=0)\n self.dispersion = np.flip(self.dispersion, axis=0)\n\n elif self.unit == 'f_loglam':\n self.dispersion = np.exp(self.dispersion)\n self.flux = self.flux / self.dispersion\n else:\n raise ValueError('Spectrum unit not recognized: ', self.unit)\n\n self.unit = 'f_lam'",
"def set_wavelength(self, wavelength: float) -> None:\n\n assert isinstance(wavelength, float), \"Incompatible type\"\n\n #:SENSe[n][:CHANnel[m]]:POWer:WAVelength /?\n self._inst.write(\"SENS:POW:WAV {}\".format(wavelength))",
"def wavelength(self,freq):\n return self.phase_velocity()/freq",
"def spectrum(self, wl: Union[float, ndarray]) -> Union[float, ndarray]:\n wlm = wl * 1e-9 # Wavelength to meters\n return 3.74183e-16 * wlm ** -5. / (np.exp(0.014388 / (wlm * self.temp)) - 1.)",
"def wavelength(energy):\r\n return 2 * np.pi * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy",
"def waveband(self):\n return self.get(\"waveband\", default=\"\", decode=True).split(\"#\")",
"def spectral_data(spectra):\n weights = np.concatenate([ s.ivar for s in spectra ])\n flux = np.concatenate([ s.flux for s in spectra ])\n wflux = weights * flux\n return (weights, flux, wflux)",
"def get_wavelengths(system, info=False):\n\n system_data = system.SystemData\n wavelengths = system_data.Wavelengths\n N_wavelengths = wavelengths.NumberOfWavelengths\n\n if info is True:\n print(\"\\nReading Wavelengths\")\n print(\"Total Number of Wavelengths: %d\" % N_wavelengths)\n\n wavelength_array = np.zeros(N_wavelengths)\n\n for k in np.arange(1, N_wavelengths + 1):\n _wave = wavelengths.GetWavelength(k).Wavelength\n wavelength_array[k - 1] = _wave\n\n if info is True:\n print(\"%.5f microns\" % _wave)\n\n return wavelength_array",
"def get_wavelength_array(self):\n return self.Me.get_wavelength_array()",
"def wavelength(energy):\n return 2 * PI * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy",
"def PM_setWavelength(self,channel,wavelength):\n if channel not in ApexAP1000.PM_CHANNELS:\n raise ValueError('Unknow channel during power measurement')\n sentStr = self.headStr('PM')+'SETWAVELENGTH[%d] %g'%(channel,wavelength)\n return self.write(sentStr)",
"async def set_wavelength(self, wavelength: int):\n return await self.hw_device.set_wavelength(self.channel, wavelength)",
"def wavelength_axis(self):\n if 'WAVE' not in self.axes_wcs.wcs.ctype:\n raise cu.CubeError(2,\n 'No energy (wavelength, frequency) axis found')\n axis = 0 if self.axes_wcs.wcs.ctype[0] == 'WAVE' else 1\n delta = self.axes_wcs.wcs.cdelt[axis]\n crpix = self.axes_wcs.wcs.crpix[axis]\n crval = self.axes_wcs.wcs.crval[axis]\n start = crval - crpix * delta\n stop = start + self.data.shape[-1 - axis] * delta\n cunit = u.Unit(self.axes_wcs.wcs.cunit[axis])\n return np.linspace(start, stop, num=self.data.shape[-1 - axis]) * cunit",
"def get_spectrum_data():\n from resistics.spectra.data import SpectrumData\n import numpy as np\n\n # add some data\n startTime = \"2020-01-01 00:00:00.000000\"\n stopTime = \"2020-01-01 00:00:00.062500\"\n data = {}\n data[\"Ex\"] = np.array([1 + 3j, -2 + 5j, 7 - 6j, 3 + 2j, 4 + 8j])\n data[\"Ey\"] = np.array([12 - 4j, -6 + 2j, 2 + 6j, -4 - 2j, -6 - 6j])\n data[\"Hx\"] = np.array([-3 + 3j, -11 + 7j, 4 - 1j, 1 + 9j, 2 + 2j])\n data[\"Hy\"] = np.array([2 + 9j, 9 + 1j, 8 + 8j, 6 + 2j, 5 + 2j])\n specData = SpectrumData(8, 5, 128, startTime, stopTime, data)\n evalfreq = np.array([24, 40])\n return specData, evalfreq",
"def getAllSpectrumMeasurements(self): \n return self.spectrum",
"def wavelength_bins(width=.1, start=700, stop=1500, energy=True):\n if not energy:\n return np.linspace(start, stop, int((stop - start) / width) + 1)\n\n h = astropy.constants.h.to('eV s').value\n c = astropy.constants.c.to('m/s').value\n const = h * c * 1e9\n # Calculate upper and lower energy limits from wavelengths, note that start and stop switch when going to energy\n e_stop = const / start\n e_start = const / stop\n n = int((e_stop - e_start) / width)\n # Construct energy bin edges (reversed) and convert back to wavelength\n return const / np.linspace(e_stop, e_start, n + 1)",
"def to_log_wavelength(self):\n\n if self.unit == 'f_loglam':\n raise ValueError('Spectrum is already in logarithmic wavelength')\n\n elif self.unit == 'f_lam':\n self.flux = self.flux * self.dispersion\n self.dispersion = np.log(self.dispersion)\n\n elif self.unit == 'f_nu':\n self.to_wavelength()\n self.to_log_wavelength()\n\n self.unit='f_loglam'",
"def set_wavelenth(self, wavelength):\n if wavelength < 0:\n raise ValueError(\"The wavelength cannot be negative\")\n\n self.wavelength = wavelength\n RT_model_1D.set_scattering_cross_sec(self)\n RT_model_1D.get_atmoshperic_profiles(self)\n self.sun_intensity = f.sun_init_intensity(self.wavelength, self.stokes_dim)",
"def Create_Constant_WavelengthArray(spec_cube,final_wave_start,final_wave_end):\n\tdwave = np.zeros(len(spec_cube))\n\tfor n in xrange(len(spec_cube)):\n\t\ttemp_final_wave = spec_cube[n][0] # Take one of the spectrum use its resolution\n\t\tdwave[n] = np.median(temp_final_wave[1:] - temp_final_wave[:-1])\n\tdwave = np.max(dwave)\n\tfinal_wave = np.arange(final_wave_start,final_wave_end,dwave)\n\tprint 'Since input dv = 0 -> median resolution (constant) dwave = %f angstrom is used.' % dwave\n\treturn final_wave",
"def __init__(self, wavelengths=[], intensities=[]):\n wavelengths = [float(i) for i in wavelengths]\n intensities = [float(i) for i in intensities]\n if len(wavelengths) and not len(intensities):\n self.wavelengths = pylab.array(wavelengths)\n self.intensities = pylab.array([0.0] * len(wavelengths))\n elif not len(wavelengths) and len(intensities):\n raise ValueError('The wavelengths for the spectrum must be '\n 'specified.')\n elif len(wavelengths) != len(intensities):\n raise ValueError('The wavelengths and intensities must have the '\n 'same number of items')\n else:\n self.wavelengths = pylab.array(wavelengths)\n self.intensities = pylab.array(intensities)",
"def set_wavelength(self, wavelength):\n print('Setting Santec wavelength to %.4f nm' % wavelength)\n\n # We need to select which of the 4 lasers to select depending on\n # the desired wavelength\n\n if 1530.0 < wavelength < 1630.000001:\n self.santec1.write(\"SW 4\")\n self.santec4.write(\"WA %.4f\" % wavelength)\n if self.active_module != 4:\n self.active_module = 4\n time.sleep(5.00)\n else:\n time.sleep(0.01)\n\n elif 1440.0 < wavelength < 1530.1:\n self.santec1.write(\"SW 3\")\n self.santec3.write(\"WA %.4f\" % wavelength)\n if self.active_module != 3:\n self.active_module = 3\n time.sleep(5.00)\n else:\n time.sleep(0.01)\n\n elif 1355 < wavelength < 1440.1:\n self.santec1.write(\"SW 2\")\n self.santec2.write(\"WA %.4f\" % wavelength)\n if self.active_module != 2:\n self.active_module = 2\n time.sleep(5.00)\n else:\n time.sleep(0.01)\n\n elif 1259.999999 < wavelength < 1355.1:\n self.santec1.write(\"SW 1\")\n self.santec1.write(\"WA %.4f\" % wavelength)\n if self.active_module != 1:\n self.active_module = 1\n time.sleep(5.00)\n else:\n time.sleep(0.01)\n\n else:\n print(\"Wavelength out of range. No change will be made\")",
"def waveforms(self):\n return list(self._waveforms)",
"def __setSpectrum__(self):\n \n self.Ck = []\n TempCk = []\n TempOneCk = OneCk()\n \n # Process 1st frequency\n Tb = self.freqs[1].totalJ\n \n for b in range(-Tb, Tb, 1):\n TempOneCk.freq = b*self.freqs[1].Vph\n TempOneCk.Amp = self.freqs[1].Cjk(b)\n self.Ck.append(TempOneCk)\n \n # Process additional frequencies\n CkSize = len(self.Ck)\n Added = FALSE\n \n for f in range(2, len(self.freqs), 1):\n # Reset temporary variables\n Tb = self.freqs[f].totalJ\n TempCk = []\n \n # Calculate each Ck coefficient\n for b in range(-Tb, Tb, 1):\n for k in range(CkSize):\n TempOneCk.Amp = Ck[k].Amp * self.freq[f].Cjk(b)\n \n # Check to see if Amp is big enough to keep\n if( abs(TempOneCk.Amp) > self.min_Ck ):\n Added = FALSE\n TempOneCk.freq = self.Ck[k].freq + b*self.freqs.Vph\n \n # If freq is already in Ck, add new value to old,\n # if not, add new value and freq to spectrum\n for c in TempCk:\n if abs(c.freq-TempOneCk.freq < DOUBLE_PRECISION):\n c.Amp += TempOneCk.Amp\n Added = TRUE\n break\n \n if (not Added):\n TempCk.append(TempOneCk)\n \n self.Ck = TempCk\n CkSize = len(self.Ck)"
] |
[
"0.6924619",
"0.6861172",
"0.6594982",
"0.6594982",
"0.6565899",
"0.65652096",
"0.64793146",
"0.6433606",
"0.6409465",
"0.6385471",
"0.63177145",
"0.6312307",
"0.62819123",
"0.62664",
"0.624952",
"0.6237457",
"0.613862",
"0.61133295",
"0.6061152",
"0.60237616",
"0.6014794",
"0.5964035",
"0.5921026",
"0.5909329",
"0.59023523",
"0.58608323",
"0.58129716",
"0.5804002",
"0.57877785",
"0.5768052"
] |
0.7788513
|
0
|
Pop n oldest experiences from buffer
|
def _popN(self, n):
for _ in range(n):
self._buffer.popleft()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _cull_oldest(self, n=1):\n for msg_id in self.get_history()[:n]:\n self.log.debug(\"Culling record: %r\", msg_id)\n self._culled_ids.add(msg_id)\n self.drop_record(msg_id)",
"def pop(self, n):\n try:\n self._load(False)\n except KeyError:\n return\n\n # Delete the items we no longer need,\n # and most importantly decrease self.count\n key = (self.head - self.count) % self.size\n while n > 0 and self.count > 0:\n del self.db[key]\n key += 1\n if key == self.size:\n key = 0\n n -= 1\n self.count -= 1\n self.db['count'] = self.count",
"def pop(self):\n while self.number > self.maxlength:\n self.buffer.popleft()\n self.number -= 1",
"def pop():",
"def _prune(self):\n while len(self.data) > self.limit:\n self.data.popleft()",
"def top(self, N: int) -> list:\n if N == 0:\n return []\n items = self.stack[-N:]\n self.stack = self.stack[:-N]\n return items",
"def get_top_spammers(self, n):\n sql_command = \"SELECT * FROM points ORDER BY amount DESC;\"\n cursor, connection = self.execute_command_get_connection(sql_command, [])\n all = cursor.fetchall()\n\n return all[:n]",
"def tail(filep, n=10):\n with open(filep) as f:\n return list(deque(f, maxlen=n))",
"def pop(self):\r\n return self.buff.pop(-1)",
"def pop(self,n):\r\n\t\treturn self.queue.pop(0)[1]",
"def pop(self) -> int:\n old_top = self.topEle\n self.topEle = self.q1[self.n - 2]\n for i in range(self.n - 1):\n self.q2.append(self.q1[i])\n self.n -= 1\n self.q1 = self.q2\n return old_top",
"def previous():\n try:\n previousPastes = Paste.view('paste/all', limit=10).all()\n except:\n previousPastes = []\n return previousPastes",
"def pop(self):\n value = self.buffer[self.end - 1]\n self.buffer[self.end - 1] = None\n self.end = (self.end - 1) % len(self.buffer)\n return value",
"def _remove_old_items(self):\n if self.size_limit is not None:\n while len(self) > self.size_limit:\n self.popitem(last=False)",
"def pop(self):\n data = self.buffer.getvalue()\n self.buffer.seek(0)\n self.buffer.truncate()\n return data",
"def peek_list(self, n):\n return self._buffer[self.pos:self.pos+n]",
"def pop_last(self):\n self.pop_item(-1)",
"def pop(self):",
"def pop(self):",
"def pop(self, count=1):\n if count == 1:\n return self.items.pop(len(self.items) - 1)\n else:\n items = []\n for _ in range(count):\n items.append(self.items.pop(len(self.items) - 1))\n return items",
"def rePop(self):\n nScan = 0\n pScan = self.num-1\n while not self.isFull():\n while self.genepool[0][pScan] == None:\n pScan = (pScan-1)%self.num\n while self.genepool[0][nScan] != None:\n nScan = (nScan+1)%self.num\n self.genepool[0][nScan] = self.genepool[0][pScan].mutate()\n self.genepool[1][nScan] = self.genepool[1][pScan].mutate()\n nScan = (nScan+1)%self.num\n pScan = (pScan-1)%self.num",
"def rePop(self):\n nScan = 0\n pScan = self.num-1\n while not self.isFull():\n while self.genepool[0][pScan] == None:\n pScan = (pScan-1)%self.num\n while self.genepool[0][nScan] != None:\n nScan = (nScan+1)%self.num\n self.genepool[0][nScan] = self.genepool[0][pScan].mutate()\n self.genepool[1][nScan] = self.genepool[1][pScan].mutate()\n nScan = (nScan+1)%self.num\n pScan = (pScan-1)%self.num",
"def pop(self):\r\n tep = []\r\n res = -1\r\n cur = self.num\r\n cache = 0\r\n while self.queue and cur>1:\r\n cache = self.queue.pop(0)\r\n tep.append(cache)\r\n cur-=1\r\n res = self.queue.pop(0)\r\n self.topele = cache\r\n #print tep,res\r\n self.num-=1\r\n while tep:\r\n self.queue.append(tep.pop(0))\r\n return res",
"def pop_ans(self, n_cards):\n\n cards = self.answer_cards[self.used_answers : self.used_answers + n_cards]\n self.used_answers += n_cards\n\n return cards",
"def tail(iterable, n):\n if n <= 0:\n return []\n return list(deque(iterable, maxlen=n))",
"def pop_messages(self):\n msge = self.received_messages\n self.received_messages = []\n return msge",
"def tail(filepath, n):\n with open(filepath) as file_fd:\n lines = ''.join(file_fd.readlines())\n lines = lines.splitlines()[-n:]\n return lines",
"def keep_last_lines(self, num_lines):\n self.data = self.data[-num_lines:]",
"def fetch(self, n: int) -> t.List[Record]:\n self._buffer(n)\n return [\n self._record_buffer.popleft()\n for _ in range(min(n, len(self._record_buffer)))\n ]",
"def discard(self):\r\n self.pushes.pop()"
] |
[
"0.7055855",
"0.64302206",
"0.6242361",
"0.57871646",
"0.5730102",
"0.5669438",
"0.56686395",
"0.56471443",
"0.5638041",
"0.5636604",
"0.55627507",
"0.55513936",
"0.55501497",
"0.54806334",
"0.54579717",
"0.54518086",
"0.542837",
"0.5424907",
"0.5424907",
"0.5424588",
"0.54021734",
"0.54021734",
"0.53753704",
"0.53603315",
"0.53563094",
"0.53555185",
"0.53426236",
"0.5341797",
"0.5334531",
"0.5331899"
] |
0.7242431
|
0
|
output_isochrone. Writes isochrone as file. Used to save best fit isochrone.
|
def output_isochrone(self, file_to_name):
N = len(self.color) # length of data points
color = self.color
abs_mag = self.abs_mag
metallicity = self.metallicity
best_fit = int(self.best_fit)*np.ones(N)
age = self.age*np.ones(N)
df_out = pd.DataFrame({'color' : color, 'abs_mag' : abs_mag, 'metallicity': metallicity, 'best_fit' : best_fit, 'age' : age})
# TODO: Allow user to set their own directory
df_out.to_csv('/Users/cam/Desktop/astro_research/orion/orion_populations/best_fit_isochrones/' + file_to_name)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def write_output(self, output_path, output_filename):\n self.output_file = output_path + '/' + output_filename\n if os.path.isfile(self.output_file + '.txt'): # Creación del archivo txt de salida.\n os.remove(self.output_file + '.txt')\n file = open(self.output_file + '.txt', \"x\")\n\n self.parse_html() # Obtiene los html de entrada.\n file.write(\"############################\\n\")\n file.write(\"# ISAMI VERSION: v11.1.0 #\\n\")\n file.write(\"# INITIATION LUG #\\n\")\n file.write(\"# ISAMI_LUG VERSION: v1.0 #\\n\")\n file.write(\"############################\\n\")\n for id in self.parsed_html_dic: # Escribe la salida en el txt con el nombre del caso y kt correspondiente.\n file.writelines('-----------------------------------\\n')\n header = id + \"\\n\"\n file.writelines(header)\n file.writelines('-----------------------------------\\n')\n tables = self.read_tables(self.parsed_html_dic[id])\n info = tables[0]\n for i in info:\n file.writelines(i + \" = \" + str(info[i]) + \"\\n\")\n kt = self.find_kt(self.parsed_html_dic[id])\n file.writelines(\" Kt = \" + str(kt) + \"\\n\")\n file.close()",
"def outputtofile(filename, output):\n\n # Open outputfile in write mode, create if it doesn't exist. The with\n # statement of 'open' will take care of closing the file afterwards.\n with open(filename, 'w') as f:\n # Decode output string to UTF-8\n output = output.decode('utf-8')\n # Write all output to the file\n f.write(output)\n return",
"def write_to_file(self, filename):\n self.octree.write(str.encode(filename))\n print(\"Save octomap to \"+filename)",
"def write_fits(self, name=None, output_path=None):\n pass",
"def save_output(self, output_file_path):\r\n self.output_file.save(output_file_path)",
"def write_file_simple(self,filename):\n\n output = open(filename,\"w\")\n # write header\n output.write(\"# %1s %3s %22s %6s %22s\\n\"%(\"l\",\"n\",\"nu_theo (muHz)\",\"unused\",\"Inertia\"))\n for i in range(self.modes.shape[0]):\n output.write(\" %1d %3d %22.15e 0.0 %22.15e\\n\"%( \\\n self.modes[\"l\"][i], \\\n self.modes[\"n\"][i], \\\n self.modes[\"freq\"][i]*self.glb[ifreq_ref], \\\n self.modes[\"inertia\"][i]))\n output.close()",
"def make_file(self):\n\n f = open(get_output_path(), \"w\")\n \n f.write(self.export())\n \n f.close()\n\n return self",
"def write_file(self, i, path, fout):\n\n test_file = path + '/' + self.output[i]\n # Write file name\n print(test_file, file=fout, end='\\n\\n')\n\n extension = os.path.splitext(test_file)[1]\n if extension == '.fits' or extension == 'FITS':\n import subprocess\n prog = self.bindir + '/fits2ascii.py -i ' + test_file\n output = subprocess.check_output(prog.split(), shell=False)\n data = output.decode()\n else:\n fin = open(test_file, 'r')\n data = fin.read()\n fin.close()\n #fout.write(data)\n print(data, file=fout)\n print(file=fout, end='\\n')",
"def generate_ROI_file(FreeSurfer_ROI_file):\n\tfrom nipype.interfaces.freesurfer import MRIConvert\n\tmc = MRIConvert()\n\tmc.inputs.in_file = FreeSurfer_ROI_file\n\tmc.inputs.out_type = 'niigz'\n\tmc.run()\n\n\timport nipype.interfaces.cmtk as cmtk\n\trg = cmtk.ROIGen()\n\trg.inputs.aparc_aseg_file = FreeSurfer_ROI_file.split('.')[0] + '_out.nii.gz'\n\trg.inputs.use_freesurfer_LUT = True\n\tout_file = rg.run()\n\n\treturn out_file",
"def __export_file(self, filename, output):\n outfile = open(filename, \"w\")\n outfile.write(output)\n outfile.close\n print(\"Output written to file: \" + filename + \"\\n\")",
"def write_actual_output(self, output):\n actual_output_file = path.splitext(self.source_name)[0] + \".actual\"\n with open(actual_output_file, \"w\") as f:\n f.write(output)",
"def writeto(self, output):\n\n hdu = pyfits.PrimaryHDU(data=self.integrated_psf)\n (year, month, day, hour, minute, second, weekday, DOY, DST) = \\\n time.gmtime()\n hdu.header.update(\"DATE\", \"%4d-%02d-%02dT%02d:%02d:%02d\" %\n (year, month, day, hour, minute, second))\n hdu.header.update(\"FILENAME\", os.path.basename(output),\n comment=\"Name of this file\")\n hdu.header.update(\"INSTRUME\", self.instrument, \"Instrument name\")\n\n # Copy some specific keywords from the input header.\n ihdr = self.header\n if \"BUNIT\" in ihdr:\n hdu.header.update(\"BUNIT\", ihdr.get(\"BUNIT\"))\n if \"ERR_BUDG\" in ihdr:\n hdu.header.update(\"ERR_BUDG\", ihdr.get(\"ERR_BUDG\"),\n comment=\"Optical error budget version number\")\n if \"SI_FP\" in ihdr:\n hdu.header.update(\"SI_FP\", ihdr.get(\"SI_FP\"),\n comment=\"Focal plane for OPD calculation\")\n if \"OPD_WFE\" in ihdr:\n hdu.header.update(\"OPD_WFE\", ihdr.get(\"OPD_WFE\"),\n comment=\"OPD wavefront error (nm)\")\n if \"W\" in ihdr:\n hdu.header.update(\"W\", ihdr.get(\"W\"),\n comment=\"Flat width of hex segment (m)\")\n if \"GAP\" in ihdr:\n hdu.header.update(\"GAP\", ihdr.get(\"GAP\"),\n comment=\"Gap width between hex segments (m)\")\n if \"EDGE\" in ihdr:\n hdu.header.update(\"EDGE\", ihdr.get(\"EDGE\"),\n comment=\"Edge roll off (m)\")\n if \"SW\" in ihdr:\n hdu.header.update(\"SW\", ihdr.get(\"SW\"),\n comment=\"Obscuring strut width (m)\")\n if \"HTS\" in ihdr:\n hdu.header.update(\"HTS\", ihdr.get(\"HTS\"),\n comment=\"Height of segment isogrid\")\n if \"HT2\" in ihdr:\n hdu.header.update(\"HT2\", ihdr.get(\"HT2\"),\n comment=\"Height of secondary isogrid\")\n if \"HT3\" in ihdr:\n hdu.header.update(\"HT3\", ihdr.get(\"HT3\"),\n comment=\"Height of tertiary isogrid\")\n if \"FL\" in ihdr:\n hdu.header.update(\"FL\", ihdr.get(\"FL\"),\n comment=\"Focal length (m)\")\n\n # Add some keywords.\n if self.phase_file is not None:\n hdu.header.update(\"PHASE\", os.path.basename(self.phase_file),\n \"Name of phase image file\")\n if self.pupil_file is not None:\n hdu.header.update(\"PUPIL\", os.path.basename(self.pupil_file),\n \"Name of pupil image file\")\n hdu.header.update(\"OVERSAMP\", self.oversample, \"Oversampling factor\")\n hdu.header.update(\"CALCTYPE\", self.type,\n \"32 = single precision, 64 = double precision\")\n hdu.header.update(\"DIAMETER\", self.D, \"pupil diameter (meters)\")\n hdu.header.update(\"ORIG_NX\", self.header[\"naxis1\"],\n \"NAXIS1 in input image\")\n hdu.header.update(\"ORIG_NY\", self.header[\"naxis2\"],\n \"NAXIS2 in input image\")\n\n self.putCoordInfo(hdu)\n\n (wavelengths, weights) = self.filter\n if len(wavelengths) >= 99:\n root_wln = \"WAV\"\n root_wgt = \"WGT\"\n else:\n root_wln = \"WAVELN\"\n root_wgt = \"WEIGHT\"\n for i in range(len(wavelengths)):\n keyword = \"%s%d\" % (root_wln, i + 1)\n hdu.header.update(keyword, wavelengths[i],\n \"wavelength in microns\")\n keyword = \"%s%d\" % (root_wgt, i + 1)\n hdu.header.update(keyword, weights[i], \"weight\")\n\n ofd = pyfits.HDUList(hdu)\n try:\n ofd.writeto(output)\n except IOError as message:\n print(\"ERROR: Output file has NOT been written; \" \\\n \"use <psf>.writeto(output)\")\n print(message)\n return\n self.output_written = True",
"def write_input_file(y,z,fname):\n file = open('c:/4nec2/out/' + fname + '.nec', 'w')\n file.write('CM Seeddesign \\n')\n file.write('CM Zigzag Antenna \\n')\n file.write('CE File generated by python \\n')\n seg = 1\n\n #write the antenna\n for i in range(0,len(y)-1):\n file.write('GW %3i %3i %8.4f %8.4f %8.4f %8.4f %8.4f %8.4f %8.4f\\n' % (1,seg,0,y[i],z[i],0,y[i+1],z[i+1],1))\n\n file.write('GE 0 \\n')\n file.write('EK \\n')\n file.write('EX %3i %3i %3i %3i %3i %3i %3i\\n' % (0,1,1,1,1,0,0))\n file.write('GN -1 \\n')\n \n file.write('FR %3i %3i %3i %3i %8.4f %8.4f\\n' % (0,1,0,0,900,0))\n file.write('FR %3i %3i %3i %3i %8.4f %8.4f\\n' % (0,11,0,0,850,10))\n\n file.write('LD %3i %3i %3i %3i %8.4f %8.4f\\n' % (5,1,0,0,58000000,2))\n file.write('RP %3i %3i %3i %3i %8.4f %8.4f %8.4f %8.4f\\n' % (0,1,1,1000,90,0,0,0))\n\n file.write('EN \\n')\n file.close()",
"def write_input_file(self, outfile='input.dat', **options):\n out = ''\n if 'header' in options:\n out += options['header']\n\n if options['program'] == 'orca':\n ecp = f' NewECP \"{options[\"ecp\"]}\" end'\n\n form = ' {:<4}' + ' '*8 + ' {:> 13.8f}' * 3 + '\\n'\n for atom, xyz, charge in self.qc_mol:\n out += form.format(atom, *xyz)\n\n form = ' {:<4}' + ' {:>7.4f}' + ' {:> 13.8f}' * 3\n for atom, xyz, charge in self.br_mol:\n out += form.format(atom + '>', charge, *xyz) + ecp + '\\n'\n\n if 'separate_pc' in options:\n form = '{:>7.4f}' + ' {:> 13.8f}' * 3\n pc_out = f'{len(self.pc_mol)}\\n'\n for atom, xyz, charge in self.pc_mol:\n pc_out += form.format(charge, *xyz) + '\\n'\n with open(options['separate_pc'], 'w') as f:\n f.write(pc_out)\n else:\n for atom, xyz, charge in self.pc_mol:\n out += form.format('Q', charge, *xyz) + '\\n'\n\n out += '*'\n\n else:\n raise Exception(f'{options[\"program\"]} is not yet supported')\n\n if 'footer' in options:\n out += options['footer']\n\n with open(outfile, 'w') as f:\n f.write(out)",
"def write_po(self, outputfile):\n raise NotImplementedError(\n \"Writing to this file format is not yet implemented\")",
"def start_ocropy(file, path_out, ocropy_profile):\n print(\"Starting ocropy for:\" + file.split('/')[-1])\n\n fname = file.split('/')[-1].split('.')[0]\n create_dir(path_out)\n subprocess.Popen(args=[\"ocropus-nlbin\", file, \"-o\"+path_out+fname+\"/\"]).wait()\n subprocess.Popen(args=[\"ocropus-gpageseg\", path_out+fname+\"/????.bin.png\"]).wait()\n subprocess.Popen(args=[\"ocropus-rpred\", \"-Q 4\", path_out+fname+\"/????/??????.bin.png\"]).wait()\n test = [\"ocropus-hocr\", path_out+fname+\"/????.bin.png\", \"-o\"+path_out+\"/\"+fname.split('/')[-1]+\".html\"]\n subprocess.Popen(args=[\"ocropus-hocr\", path_out+fname+\"/????.bin.png\", \"-o\"+path_out+\"/\"+fname.split('/')[-1]+\".html\"]).wait()\n print(\"Finished ocropy for:\" + file.split('/')[-1])\n return 0",
"def create_output_file(self):\r\n self.output_file = openpyxl.Workbook()",
"def write_output(self):",
"def save_elem_file(self, output):\n with open(output, 'wb') as fid:\n self._write_elem_header(fid)\n self._write_nodes(fid)\n self._write_elements(fid)\n self._write_neighbors(fid)",
"def write_file(self):\n if self._write_file == None:\n return\n\n try:\n out = file(self._write_file, \"w\")\n except IOError, e:\n print e\n sys.exit(1)\n out.writelines(\"A cases\") \n out.close()",
"def set_output(output, binary=False):\n print(\"* Output *\")\n choice = _get_selection(\"(F)ile or (S)tring? \", \"FS\")\n if choice == 'S':\n print(output)\n else:\n filename = get_filename()\n flags = 'w'\n if binary:\n flags += 'b'\n with open(filename, flags) as outfile:\n print(\"Writing data to {}...\".format(filename))\n outfile.write(output)",
"def writeOutput(self, output):",
"def write_opal(self, file_name):\n \n return 0",
"def write(self, chars, output, format='png'):\n im = self.generate_image(chars)\n return im.save(output, format=format)",
"def to_file(self, outfile):\n\n with open(outfile, \"w\") as outf:\n outf.write(self.to_string())",
"def make_outputfile(self, solved_status, filename):\n filename = filename.split(\".\")\n filename[0] = filename[0].replace(\"Input\",\"Output\")\n str_filename = \".\"\n str_filename = str_filename.join(filename)\n # print(str_filename)\n\n f = open(str_filename,\"w+\")\n\n if(solved_status):\n string_rep = self.values_to_grid()\n ptr = 0\n for row in range(0,9):\n for col in range(0,9):\n f.write(string_rep[ptr]+ \" \")\n ptr += 1\n f.write(\"\\r\\n\") #windows compatiable formatting...\n else:\n f.write(\"Unable to solve this puzzle.\")\n\n f.close()",
"def writeCountryCodeFile(self):\n try:\n geojson = requests.get(self.GEOJSON_URL).json()\n except:\n sys.exit('GeoJSON data unavailable at source.')\n \n country_mapping = {}\n for country in geojson['features']:\n iso_2 = country['properties']['ISO_A2']\n country_name = country['properties']['ADMIN']\n country_mapping.update({country_name: iso_2})\n \n with open('countryNameISO2.json', 'w') as file:\n json.dump(country_mapping, file)",
"def write_nifti(self, output_path):\n nib.save(self.niftiImage, output_path)\n print('Image saved at: {}'.format(output_path))",
"def _write_output_file(output: str, file_name: str):\n\tfile1 = open(file_name, 'w')\n\tfile1.write(output)\n\tfile1.close()",
"def make_nifti(self, output_path=None):\n\n # save nifti\n if output_path is None:\n output = self.nifti_file\n else:\n output = output_path\n ecat2nii.ecat2nii(ecat_main_header=self.ecat_header, ecat_subheaders=self.subheaders, ecat_pixel_data=self.data,\n nifti_file=output, affine=self.affine)\n\n if 'nii.gz' not in output:\n output = helper_functions.compress(output)\n\n return output"
] |
[
"0.5922966",
"0.5694775",
"0.5694682",
"0.5676266",
"0.56761366",
"0.5656583",
"0.56536025",
"0.5648889",
"0.564605",
"0.56394976",
"0.5599014",
"0.55966353",
"0.5595966",
"0.559232",
"0.55898625",
"0.5571064",
"0.55685383",
"0.5543788",
"0.5543578",
"0.5533917",
"0.55298513",
"0.5517749",
"0.5502524",
"0.54932135",
"0.54837584",
"0.54791534",
"0.54777217",
"0.5474042",
"0.5462672",
"0.5448273"
] |
0.6751746
|
0
|
Usually removing constant columns gives improvement in model's quality.
|
def drop_const_columns(df, drop_columns=True, print_columns=True):
# 1. report
SingleValueCols = []
for col in df.columns:
unique_count=df[col].nunique()
if unique_count < 2:
SingleValueCols.append(col)
if print_columns:
print(col, unique_count)
print
print('Constant columns count: %s' % len(SingleValueCols))
# 2. dropping
if drop_columns:
print('%s columns total' % df.shape[1])
df = df.drop(SingleValueCols, 1)
print('%s columns left' % df.shape[1])
return df
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def strip_static_cols(df):\n for col in df.columns:\n if len((df[col]).unique()) == 1:\n df.drop(columns=[col], inplace=True)\n return df",
"def _remove_redundant_columns(self):\n self.dataframe.drop(['letter', 'sentiment'], axis=1, inplace=True)",
"def eliminateRedundantInfo(self):\n\n allEliminated = False\n edep = self.energyDependentWidths\n for colId in range(edep.nColumns)[::-1]:\n column = edep.columns[colId]\n columnData = edep.getColumn( column.name, units='eV' )\n if len(set( columnData ) ) == 1:\n setattr( self.constantWidths, column.name, PQU.PQU( PQU.pqu_float.surmiseSignificantDigits( columnData[0] ), column.units ) )\n [d.pop(colId) for d in edep.data]\n edep.columns.pop(colId)\n for idx, col in enumerate( edep.columns ): col.index = idx #re-number\n #if edep.nColumns == 1 and edep.columns[0].name == 'energy':\n # edep.columns, edep.data = [],[] # all widths are constant\n # allEliminated = True\n return allEliminated",
"def get_cols_drop():",
"def test_remove_columns(self):\n table = Table('table1', key=['col1', 'col2'])[\n Column('col1'),\n Column('col2'),\n Column('col3'),\n Column('col4'),\n ]\n\n table.remove_columns(('col2', 'col3'))\n\n self.assertEqual(2, len(table.columns))\n self.assertEqual('col1', table.columns[0].name)\n self.assertEqual('col4', table.columns[1].name)\n self.assertEqual([], table.key)",
"def _drop_cols(self, duplicate_cols):\n self._hybrid_meta.drop(\n duplicate_cols + DROPPED_COLUMNS,\n axis=1, inplace=True, errors='ignore'\n )",
"def remove_insertion_columns(self):\n cols = self.get_insertion_columns()\n s = []\n a = 0\n for b in cols:\n if b > a:\n s.append((a, b))\n a = b + 1\n s.append((a, len(self.col_labels)))\n for name, seq in list(self.items()):\n news = []\n for c in s:\n news.append(seq[c[0]:c[1]])\n self[name] = \"\".join(news)",
"def remove_intermediate_columns(dataframe):\n\n combined_dataframe_dropped_cols = dataframe.drop(columns = ['measureland_qualifier_flag_speed',\n 'measureland_qualifier_flag_distance',\n 'measureland_qualifier_flag_acceleration',\n 'measureland_qualifier_flag_visual'])\n\n print(\"Dimensions of combined dataframe after dropping columns:\", combined_dataframe_dropped_cols.shape)\n print(\"Combined dataframe after dropping columns: \", combined_dataframe_dropped_cols.sample(10))\n\n return combined_dataframe_dropped_cols",
"def remove_columns(df):\n avg = np.mean(df[df['sentiment'] != 'None']['sentiment'].astype('float'))\n df['sentiment'] = df['sentiment'].replace('None', avg).astype('float')\n\n to_remove = []\n print('column(s) removed: ')\n for column in df.columns:\n print(column)\n if(np.unique(df[column][df[column].notnull()]).shape[0] < 2):\n print(column)\n to_remove.append(column)\n \n return df.drop(columns = to_remove)",
"def drop_columns(self, col):\n try:\n self.cleaned_data.drop(col, axis=1, inplace=True)\n except Exception as e:\n raise e",
"def remove_urequired_columns(self, unrequired_columns):\n self.df = self.df.drop(columns=unrequired_columns)",
"def EliminateCols(self, cols):\n return _hypre.HypreParMatrix_EliminateCols(self, cols)",
"def test_structural_remove_columns_all_1_0(self):\n cp = Plotter.from_smiles(['CCCC', 'CCCC'], sim_type=\"structural\")\n self.assertTrue(cp._Plotter__df_descriptors.empty)",
"def required_fields(model, values):\n if values:\n for k in list(values):\n if k not in model.__table__.columns.keys():\n values.pop(k)\n return values",
"def drop_extra_columns(self):\n table = self.data.loc[:, self._required_columns]\n return self.as_dataframe(table)",
"def removeCols(self) -> List['StateNode']:\n cols = self.state[1]\n states: List[StateNode] = []\n for i in range(len(cols)):\n for j in range(i + 1, len(cols) + 1):\n # for j in range(i + 1, i + 2):\n new_cols = cols[:i] + cols[j:]\n if len(new_cols) == 0:\n continue\n states.append(StateNode(self.table, \n (self.state[0], new_cols),\n ([], cols[i:j]),\n self.cost + j - i + self.count_pairs(self.state[0], cols[i:j]),\n self))\n return states",
"def trim_features():\n pass",
"def select_columns(df):\n df = df.dropna(axis='columns', how='all') # drop columns containing only NaN\n keep_cols = [col for col in df.columns if 'normalized' not in col]\n df = df[keep_cols]\n return df",
"def get_cols_dummy():",
"def clear_columns(self):\n self._columns = []\n return self",
"def trimDf(df):\n cols = set(df.columns)\n\n cols.remove('exclamationCount') # bug in our feature extraction code\n cols.remove('price') # considered only free apps\n cols.remove('appName') # removing appNames\n\n # return df[list(cols)]\n\n\n\n return df[list(('revSent', 'appLabel'))]",
"def drop_unnecessary_columns(df):\n df = df.drop([\n 'id',\n 'imdb_id',\n 'poster_path',\n 'video',\n 'status',\n 'weighted_rating', # Only average_rating was used for this project\n 'original_title',\n 'crew', # Used in production_score\n 'producers', # Used in production_score\n 'executive_producers', # Used in production_score\n 'cast', # Used in production_score\n 'director', # Used in production_score\n 'production_companies', # Used in production_score\n 'production_countries', # Binarized\n 'genres', # Binarized\n 'original_language', # Binarized\n 'adult', # No adult movies in the dataset, so no variance between movies\n 'release_date', # Not being considered for this project\n 'overview',\n 'title',\n 'tagline',\n 'vote_average', # Ratings have been binned\n 'popularity', # Only considering average_rating\n 'vote_count', # We are making a predictor, so it makes no sense to use vote counts as input\n 'revenue', # We are making a predictor, so it makes no sense to use revenue as input\n 'keywords', # Not considering keywords for this project\n 'revenue_divide_budget', # We are making a predictor, so it makes no sense to use revenue/budget as input\n ], 1)\n return df",
"def _delete_null_columns(self):\r\n ds = DeleteStatement(self.column_family_name)\r\n deleted_fields = False\r\n for _, v in self.instance._values.items():\r\n col = v.column\r\n if v.deleted:\r\n ds.add_field(col.db_field_name)\r\n deleted_fields = True\r\n elif isinstance(col, Map):\r\n uc = MapDeleteClause(col.db_field_name, v.value, v.previous_value)\r\n if uc.get_context_size() > 0:\r\n ds.add_field(uc)\r\n deleted_fields = True\r\n\r\n if deleted_fields:\r\n for name, col in self.model._primary_keys.items():\r\n ds.add_where_clause(WhereClause(\r\n col.db_field_name,\r\n EqualsOperator(),\r\n col.to_database(getattr(self.instance, name))\r\n ))\r\n self._execute(ds)",
"def _clean_up_columns(\n self):\n self.log.debug('starting the ``_clean_up_columns`` method')\n\n tableName = self.dbTableName\n\n print \"cleaning up %(tableName)s columns\" % locals()\n\n sqlQuery = u\"\"\"\n set sql_mode=\"STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION\";\n \"\"\" % locals()\n writequery(\n log=self.log,\n sqlQuery=sqlQuery,\n dbConn=self.cataloguesDbConn,\n )\n\n sqlQuery = u\"\"\"\n update %(tableName)s set dist_mod_err = null where dist_mod_err = 0;\n update %(tableName)s set dist_in_ned_flag = null where dist_in_ned_flag = \"\";\n update %(tableName)s set notes = null where notes = \"\";\n update %(tableName)s set redshift = null where redshift = 0;\n update %(tableName)s set dist_derived_from_sn = null where dist_derived_from_sn = \"\";\n update %(tableName)s set hubble_const = null where hubble_const = 0;\n update %(tableName)s set lmc_mod = null where lmc_mod = 0;\n update %(tableName)s set master_row = 0;\n update %(tableName)s set master_row = 1 where primaryId in (select * from (select distinct primaryId from %(tableName)s group by galaxy_index_id) as alias);\n \"\"\" % locals()\n writequery(\n log=self.log,\n sqlQuery=sqlQuery,\n dbConn=self.cataloguesDbConn,\n )\n\n self.log.debug('completed the ``_clean_up_columns`` method')\n return None",
"def drop_one_elem_columns(self, df):\n df_ = df.copy()\n\n # Incldue columns in dataframe\n include_idx = []\n for i in df_.columns:\n len_unique = df_[i].dropna().unique().size\n if len_unique > 1:\n include_idx.append(i)\n\n df_ = df_[include_idx]\n return df_",
"def test_clean_columns():\n assert clean_columns('Id, AdCampaignId, CampaignId') == ['id', 'adCampaignId', 'campaignId']",
"def exclude_cols(self, *_, **__) -> Tuple[str, ...]:",
"def drop_attributes(df, cutoff=25, extra_add=[]):\n\n df_copy = df.copy()\n\n attributs_drop = []\n for var in sorted(df.columns):\n series = df[var]\n perc_missing = 100 - series.count() / len(series) * 100\n\n if perc_missing > cutoff:\n attributs_drop.append(var)\n else:\n continue\n\n if len(extra_add) == 0:\n df_copy.drop(attributs_drop, axis=1, inplace=True)\n\n else:\n attributs_drop = attributs_drop + extra_add\n df_copy.drop(attributs_drop, axis=1, inplace=True)\n\n return df_copy",
"def filter_cols(df):\n comm_keys = list( set(df.keys()) & set(KEYS_FOR_ML) )\n filt_col_df = df.copy()[comm_keys]\n\n return filt_col_df",
"def drop_columns(self, columns):\n dframe = self.dframe(keep_parent_ids=True)\n self.replace_observations(dframe.drop(columns, axis=1))"
] |
[
"0.6315389",
"0.62467325",
"0.62432563",
"0.6171818",
"0.61261225",
"0.601501",
"0.600602",
"0.5981777",
"0.59378827",
"0.58547854",
"0.57989967",
"0.5787804",
"0.5746577",
"0.5736866",
"0.5727179",
"0.5697473",
"0.5691183",
"0.56775194",
"0.56136245",
"0.55742145",
"0.5566863",
"0.55577695",
"0.5557523",
"0.5550479",
"0.55493337",
"0.5544904",
"0.5512738",
"0.55080926",
"0.55059385",
"0.5501895"
] |
0.6415829
|
0
|
1. Find date columns automatically 2. Convert them to datetime format
|
def find_date_columns(df):
def look_to_date(s):
dates = {date: pd.to_datetime(date) for date in s.unique()}
return s.apply(lambda v: dates[v])
date_cols = []
for col in df.select_dtypes(include=['object']).columns:
try:
df[col] = look_to_date(df[col])
print(col)
date_cols.append(col)
except ValueError:
pass
return df
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def recognize_dates(dframe):\n for i, dtype in enumerate(dframe.dtypes):\n if dtype.type == np.object_:\n column = dframe.columns[i]\n new_column = _convert_column_to_date(dframe, column)\n\n if not new_column is None:\n dframe[column] = new_column\n\n return dframe",
"def _convert(frame):\n frame = frame.convert_objects(convert_numeric=True)\n for column in frame:\n if column in c.dates:\n frame[column] = frame[column].astype('datetime64')\n return frame",
"def csv_handle_changedate(self,col_name,col_type):\n table = self.csv_dataframe\n if col_type == 'date':\n table[col_name] = pd.to_datetime(table[col_name]).dt.date\n elif col_type == 'datetime':\n table[col_name] = pd.to_datetime(table[col_name]).dt.to_pydatetime()\n elif col_type == 'year':\n table[col_name] = pd.to_datetime(table[col_name].apply(lambda x: str(x)+'/1/1')).dt.date",
"def convert_date_string(df,col_name):\n df[col_name] = pd.to_datetime(df[col_name], infer_datetime_format=True)\n return df",
"def fix_dates(self, row):\r\n for field in self.date_fields:\r\n if field in row:\r\n if not type(row[field]) is datetime:\r\n try:\r\n row[field] = datetime.fromtimestamp(float(row[field]))\r\n except Exception as e:\r\n row[field] = None",
"def find_date_columns(self) -> list:\n\n logger.info(\"Looking for date columns\")\n\n def look_for_date(column_i: pd.Series):\n dates = {date: pd.to_datetime(date) for date in column_i.unique()}\n return column_i.apply(lambda x: dates[x])\n\n date_columns = []\n possible_date = list(self.dataframe.select_dtypes(include=[\"datetime\"]).columns)\n if possible_date:\n logger.info(\"Date columns with native date format was found\")\n logger.debug(\n f\"there are {len(possible_date)} date column with native format (datetime)\"\n )\n\n date_columns = [x for x in possible_date]\n\n logger.debug(\n f\"the columns that contain native date format are {date_columns}\"\n )\n\n for col in self.dataframe.select_dtypes(include=[\"object\"]).columns:\n try:\n self.dataframe[col] = look_for_date(self.dataframe[col])\n date_columns.append(col)\n logger.info(f\"column {col} has date data type\")\n except ValueError:\n logger.debug(f\"{col} has no date data type\")\n pass\n return date_columns",
"def updatetotimeformat(tweetdf, colname):\r\n for i in range(len(tweetdf)):\r\n tweetdf.loc[i,colname] = parser.parse(tweetdf.loc[i,colname])\r\n \r\n return tweetdf",
"def date_formatting(df):\n from datetime import datetime\n sub_df = df.iloc[:, 1:]\n for i in range(0, len(sub_df.columns)):\n if i <= 2:\n pass\n else:\n date_string = sub_df.columns[i]\n d1 = datetime.date(datetime.strptime(date_string, '%m/%d/%y'))\n d2 = str(d1)\n sub_df.rename(columns={date_string: d2}, inplace=True)\n return sub_df",
"def datetime_column(filepath, skiprows, skipcolumns):\n df = pd.read_csv(filepath, skiprows=skiprows)\n df = df.drop(columns = skipcolumns)\n# df = df.head(10)\n \n# return df\n\n def try_parse(df):\n# print(df.iloc[1, :])\n # try parsing some rows from each column as date\n head = df.head()\n tail = df.tail()\n for column in df.columns:\n try:\n# print(dateutil.parser.parse(df[column].iloc[-1]))\n dt_head = dateutil.parser.parse(head[column].iloc[-1])\n dt_tail = dateutil.parser.parse(tail[column].iloc[-1])\n# print('possible datetime')\n# if not date.time() == datetime.time():\n if not dt_head.time() == dt_tail.time():\n if not dt_head.date() == dt_tail.date():\n # time seems to be present (not default parser value)\n return column\n except:\n continue\n return None\n \n # try without modifying values\n rv = try_parse(df=df)\n if rv:\n return rv\n \n # try modifying values\n chars = ['-', '_', '/', '#']\n for char in chars:\n dfc = df.copy()\n for col in dfc.columns:\n try:\n dfc[col] = dfc[col].str.split(char).str.join(' ')\n except:\n pass # will only work for str type\n# print(char, dfc.iloc[1, :])\n rv = try_parse(df=dfc)\n if rv:\n return rv",
"def fix_dates(df, column=None):\n if isinstance(column, list):\n for x in column:\n df[x] = pd.to_datetime(df[x], errors='coerce')\n df[x] = df[x].dt.strftime('%m-%d-%Y')\n df[x].replace('NaT', np.nan, inplace=True)\n return df\n else:\n df[column] = pd.to_datetime(df[column], errors='coerce')\n df[column] = df[column].dt.strftime('%m-%d-%Y')\n df[column].replace('NaT', np.nan, inplace=True)\n return df",
"def datetime_columns(df, feature):\r\n df['day'] = pd.to_datetime(df[feature]).dt.day\r\n df['month'] = pd.to_datetime(df[feature]).dt.month\r\n df['year'] = pd.to_datetime(df[feature]).dt.year\r\n return df",
"def date_cols_gen(df: pd.DataFrame) -> pd.DataFrame:\n # Get state action columns\n date_cols = [\n col\n for col in df.columns\n if bool(re.search(r\"(?<![^\\s_-])date(?![^\\s_-])\", col, flags=re.IGNORECASE))\n ]\n for d_col in date_cols:\n # Infer datetime in case format conventions change on ihme side\n df[d_col] = pd.to_datetime(df[d_col], infer_datetime_format=True)\n\n return df",
"def ensure_datetime_and_sort(df):\n datetime_cols = [\n \"Date_Reported_As_Of\",\n \"Design_Start\",\n \"Original_Schedule\",\n \"Forecast_Completion\",\n ]\n\n for col in datetime_cols:\n df[col] = pd.to_datetime(df[col])\n\n # make sure data is sorted properly\n df = df.sort_values(by=[\"PID\", \"PID_Index\"])\n\n return df",
"def _preprocess_temporal_columns(df: DataFrame) -> DataFrame:\n for col in df.select_dtypes(include=[\"datetime64[ns, UTC]\"]):\n df = df.astype({col: \"O\"})\n for col in df.select_dtypes(include=\"timedelta64[ns]\"):\n df = df.astype({col: \"O\"})\n return df",
"def df_multicolumn_date_to_datetime(row):\n year = row['arrival_date_year']\n month = row['arrival_date_month']\n day = row['arrival_date_day_of_month']\n # create datetime object from string of form \"YearMonthDay\" using full month name\n return datetime.datetime.strptime(f\"{year}{month}{day}\", '%Y%B%d').date()",
"def create_datetime_column(df):\n df[\"datetime\"] = pd.to_datetime(df[[\"year\", \"month\", \"day\"]])\n return df.drop([\"year\", \"month\", \"day\"], axis=1)",
"def recognize_dates_from_schema(dframe, schema):\n dframe_columns = dframe.columns.tolist()\n\n for column, column_schema in schema.items():\n if column in dframe_columns and\\\n schema.is_date_simpletype(column):\n new_column = _convert_column_to_date(dframe, column)\n\n if not new_column is None:\n dframe[column] = new_column\n\n return dframe",
"def __parse_dates(df):\n\t\tdf['release_date'] = pd.to_datetime(df['release_date'])\n\t\tdf['release_date'] = df['release_date'].fillna(df['release_date'].median())\n\t\tdf['year'] = df['release_date'].dt.year\n\t\tdf['month'] = df['release_date'].dt.month\n\t\tdf['day'] = df['release_date'].dt.weekday\n\t\tdf = pd.get_dummies(df, columns=['month', 'day'])\n\t\treturn df",
"def _get_datetime_cols(df):\n dtypes = df.dtypes\n return dtypes.loc[dtypes.apply(lambda x: str(x).startswith('datetime'))].index.tolist()",
"def get_datetime(df, col_name, dayfirst=False, yearfirst=False, use_format=False):\n if use_format:\n format = \"%d/%m/%Y\"\n return pd.to_datetime(df[col_name], dayfirst=dayfirst, yearfirst=yearfirst, format=format)",
"def get_cols_for_datetime(train: NumpyOrPandas) -> Tuple[List[str], List[str]]:\n base_dates = get_columns_by_role(train, \"Datetime\", base_date=True)\n datetimes = get_columns_by_role(train, \"Datetime\", base_date=False) + get_columns_by_role(\n train, \"Datetime\", base_date=True, base_feats=True\n )\n\n return base_dates, datetimes",
"def createDateColumn(dataframe):\n\n newDatecol = []\n format_str = r\"%Y-%m-%dT%H:%M:%SZ\"\n for i in dataframe['node.mergedAt']:\n if (i is not None):\n # making the string to a datetime format\n newdate = datetime.strptime(i, format_str)\n # appending to the list as a date\n newDatecol.append(newdate.date())\n if (i is None):\n newDatecol.append(\"None\")\n dataframe['Date Merged'] = newDatecol\n\n return dataframe",
"def datetime_preprocessing_pipeline(ddf, datetime_columns_to_transform = []):\n from project.utils.preprocessing.datetime_to_cat import add_datetime_cat\n \n ddf, new_categorical_columns = add_datetime_cat(ddf, datetime_columns_to_transform)\n ddf = ddf.drop(datetime_columns_to_transform, axis=1)\n \n return ddf",
"def _convert_column_to_date(dframe, column):\n try:\n return dframe[column].apply(parse_date)\n except AttributeError:\n # it is already a datetime\n pass\n except ValueError:\n # it is not a correctly formatted date\n pass\n except OverflowError:\n # it is a number that is too large to be a date\n pass",
"def _detect_columns_to_fold_dates(self):\n result = list()\n for index in range(len(self._column_names)):\n column_name = self._column_names[index]\n # do not want 12 to be parsed as date, minimum length should be 4 (year in YYYY format)\n if len(column_name) >= 4:\n try:\n # for now strict parsing is true, otherwise it'll parse 'year' as valid date.\n # in future, we'll have to specify date formats\n parsed_column_as_date = dateparser.parse(column_name, settings={'STRICT_PARSING': True})\n if parsed_column_as_date:\n # column_name has been parsed as a valid date, it is a candidate for fold\n result.append(index)\n except:\n # something went wrong, doesn't matter what\n pass\n return result",
"def date_cleaner(dataset):\n dataset['document_last_edition'] = dataset['meta_lastEdition']\n dataset = dataset.drop(['meta_lastEdition'], axis=1)\n \n \n \"\"\"\n Get column to correct date format\n \"\"\"\n dataset['document_last_edition'] = dataset['document_last_edition'].apply(lambda x: str(unify_date_format(x))[:10]) \n \n \n \"\"\"\n meta_lastPublication renaming\n \"\"\"\n dataset['document_last_publication'] = dataset['meta_lastPublication']\n dataset = dataset.drop(['meta_lastPublication'], axis=1)\n\n # DROP HOURS/M/S\n dataset['document_last_publication'] = dataset['document_last_publication'].apply(lambda x: str(unify_date_format(x))[:10]) \n \n \n # META CREATED DATE\n dataset['meta_created_date'] = dataset['meta_created_date'].str.replace('_', '-')\n dataset['meta_created_date'] = dataset['meta_created_date'].apply(lambda x: str(unify_date_format(x))[:10])\n dataset['document_created_at'] = dataset['meta_created_date']\n dataset = dataset.drop(['meta_created_date'], axis=1)\n\n # META_REVISED_MODIFIED\n dataset['document_revised_modified'] = dataset['meta_revised_modified']\n dataset = dataset.drop(['meta_revised_modified'], axis=1) \n \n \n date_column_list = ['document_created_at','document_last_edition', 'document_last_publication', 'document_revised_modified']\n \n \"\"\"\n \n THE PLAN IS TO FIRST REPLACE EMPTY SPOTS IN META_CREATED_DATE WITH CREATED_AT\n THEN WE DROP CREATED_AT\n THEN WE REPLACE EMPTY SPOTS IN OTHER COLUMNS WITH document_created_at\n \"\"\" \n \n dataset[date_column_list] = dataset[date_column_list].replace('Not Specified', np.nan)\n dataset[date_column_list] = dataset[date_column_list].replace('Not Specif', np.nan)\n dataset[date_column_list] = dataset[date_column_list].replace('nan', np.nan) \n dataset['document_created_at'].fillna(dataset['created_at'], inplace=True) \n dataset = dataset.drop(['created_at'], axis=1)\n \n dataset['document_last_edition'].fillna(dataset['document_created_at'], inplace=True)\n dataset['document_last_publication'].fillna(dataset['document_created_at'], inplace=True)\n dataset['document_revised_modified'].fillna(dataset['document_created_at'], inplace=True)\n \n \n\n \n \"\"\"\n FIXING NON-EXISTING DATES IN DATASET\n \"\"\"\n \n dataset = dataset.replace(['2020-1-29'], ['2020-01-29'])\n \n \n \n created_at_unique = list(dataset['document_created_at'].unique())\n last_edition_unique = list(dataset['document_last_edition'].unique())\n last_publication_unique = list(dataset['document_last_publication'].unique())\n revised_modified_unique = list(dataset['document_revised_modified'].unique())\n \n \n # IF LIST NEED TO GET UPDATED\n invalid_created_at = is_valid_date(created_at_unique)\n invalid_last_edition_unique = is_valid_date(last_edition_unique)\n invalid_last_publication_unique = is_valid_date(last_publication_unique)\n invalid_revised_modified_unique = is_valid_date(revised_modified_unique) \n invalid_dates = list(set(itertools.chain(invalid_created_at, invalid_last_edition_unique, invalid_last_publication_unique, invalid_revised_modified_unique)))\n \n \n \n \n # Non-existing dates from the list\n dataset = dataset.replace(['2019-04-31', '2016-11-31', '2019-09-31', '2015-02-31', '2017-04-31', '2015-11-31', '2015-09-31', '2017-02-29', '2018-09-31', '2017-06-31', '2018-04-31', '2015-04-31', '2018-11-31', '2017-09-31', '2015-02-29', '2019-02-29', '2019-06-31', '2018-02-29', '2016-02-30', '2016-06-31', '2016-09-31', '2018-06-31', '2019-18-03', '2020-02-31', '9999-12-31'], \n ['2019-04-30', '2016-11-30', '2019-09-30', '2015-02-28', '2017-04-30', '2015-11-30', '2015-09-30', '2017-02-28', '2018-09-30', '2017-06-30', '2018-04-30', '2015-04-30', '2018-11-30', '2017-09-30', '2015-02-28', '2019-02-28', '2019-06-30', '2018-02-28', '2016-02-28', '2016-06-30', '2016-09-30', '2018-06-30', '2019-03-18', '2020-02-28', '1999-12-31'])\n\n\n \n \n\n\n return dataset",
"def date_wrangler(self, date_col):\n data = self.copy() # Create a copy of the DataFrame\n\n # Convert target column to datetime\n data[date_col] = pd.to_datetime(\n data[date_col], infer_datetime_format=True\n )\n\n # Split column into datetime components\n data[f\"{date_col}_year\"] = data[date_col].dt.year\n data[f\"{date_col}_year\"] = data[date_col].dt.month\n data[f\"{date_col}_year\"] = data[date_col].dt.day\n\n # Drop original column\n data = data.drop(columns=date_col)\n\n return data",
"def _parse_date_columns(data_frame, parse_dates):\n # handle non-list entries for parse_dates gracefully\n if parse_dates is True or parse_dates is None or parse_dates is False:\n parse_dates = []\n\n if not hasattr(parse_dates, '__iter__'):\n parse_dates = [parse_dates]\n\n for col_name in parse_dates:\n df_col = data_frame[col_name]\n try:\n fmt = parse_dates[col_name]\n except TypeError:\n fmt = None\n data_frame[col_name] = _handle_date_column(df_col, format=fmt)\n\n return data_frame",
"def convert_to_datetime(df, column):\n datetime_format = '%Y-%m-%d'\n return pd.to_datetime(df[column], format=datetime_format)",
"def _process_date(self, data):\n def helper(val):\n # Sometime the date has a (1) or (2) following it. Strip that off\n # so that we can successful convert to date.\n s = val.find(\" (\")\n if s >= 0:\n val = val[0:s]\n dv = dt.datetime.strptime(val, '%A, %b %d')\n dv = dv.replace(year=self.start_date.year)\n return dv\n data['Date'] = data['Date'].apply(helper)\n return data"
] |
[
"0.692748",
"0.69037545",
"0.67863965",
"0.67673683",
"0.67539966",
"0.6692597",
"0.6667349",
"0.66223633",
"0.65745556",
"0.6555922",
"0.6553615",
"0.65185064",
"0.64774036",
"0.6430655",
"0.6345287",
"0.63288176",
"0.63201207",
"0.62785125",
"0.6272471",
"0.6249522",
"0.62456274",
"0.6230649",
"0.61785805",
"0.61347836",
"0.608775",
"0.60749525",
"0.6039514",
"0.6028903",
"0.60178953",
"0.6012389"
] |
0.7120016
|
0
|
Displays recieved messages. This ensures that recieved messages dont get lost by the time this display takes.
|
def display_messages(self):
while self.joined:
if len(self.messages) != 0:
for msg in self.messages:
#: If the message is empty, ignore it.
if msg == "":
continue
#: If the message is close", then the server has told the client
#: to shut down, so it will. This is not an issue, as users
#: messages will always have an identifier and : before their
#: message, thus,the only messages that don't include an
#: identifier will be from the server itself.
elif msg[:5] == "close":
reason = msg[6:]
print("This client was closed due to {}.".format(reason))
self.quit(True)
#: Otherwise, print the message to the commandline.
elif not self.silent:
print('\r' + msg, end='')
print("\nYou: ", end='')
self.displayed_you = True
#: Remove the processed message
self.messages.remove(msg)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def show_messages(self):\n for msg in self.messages:\n print msg['text']",
"def show_messages(self):\n if not self.messages:\n u_print(\" Queue.show_messages() ERR - There is no messages or malformed messages on queue. \")\n u_print(json.dumps(self.messages, indent=4))\n sys.exit(1)\n\n try:\n for m in self.messages:\n self.show_message(m.body)\n except:\n raise",
"def display_messages(self, layout):",
"def displayMessages(window,messages=['']):\n \n # update messages text\n message_in_line = ''\n for msg in messages:\n message_in_line += '\\n'+msg\n\n window['messages'].update(f'{message_in_line}')",
"def display_message():",
"def process_chatter(self, msg):\n # note, nothing in here is ROS specific, it's just python code that\n # runs when new info appears\n\n print msg.data # print the recieved message\n\n self.msgs_recieved += 1 # increase msg count\n self.msgs_recieved %= 500 # mod 500 so we don't get enormous numbers\n self.msg = \"%d messages recieved\" % self.msgs_recieved # set message",
"def show_messages(self):\n self.masterlog.revealme()",
"def display_messenger_status(self):\n caller = self.caller\n unread = caller.messages.pending_messengers\n read = caller.messages.messenger_history\n if not (read or unread):\n caller.msg(\n \"You have no messengers waiting for you, and have never received any messengers.\"\n + \" {wEver{n. At all. Not {rone{n.\"\n )\n if read:\n caller.msg(\"You have {w%s{n old messages you can re-read.\" % len(read))\n if unread:\n caller.msg(\n \"{mYou have {w%s{m new messengers waiting to be received.\" % len(unread)\n )",
"def display_received_table(self, num_disp, old):\n caller = self.caller\n msgtable = PrettyTable(\n [\"{wMsg #\", \"{wSender\", \"{wIC Date\", \"{wOOC Date\", \"{wSave\"]\n )\n mess_num = 1\n old = old[:num_disp]\n for mess in old:\n try:\n name = caller.messages.get_sender_name(mess)\n except AttributeError:\n mess = reload_model_as_proxy(mess)\n print(\n \"Error: Had to reload Msg ID %s as Messenger when displaying received table.\"\n % mess.id\n )\n name = caller.messages.get_sender_name(mess)\n date = caller.messages.get_date_from_header(mess) or \"Unknown\"\n ooc_date = mess.db_date_created.strftime(\"%x\")\n saved = \"{w*{n\" if mess.preserved else \"\"\n msgtable.add_row([mess_num, name, date, ooc_date, saved])\n mess_num += 1\n self.msg(msgtable)",
"def list_messages(self):",
"def display_message(self, message):\n\t\tself.render('message.html', {'message': message})",
"def showMessage(self):",
"async def messages(self, ctx):\n\n\t\tawait self.message_leaderboard(ctx, \"messages\")",
"def display(self,message):\r\n \r\n print(message)",
"def display_message(self, message):\n params = {\n 'message': message\n }\n self.render_template('message.html', params)",
"def display_message(self, message):\n with self.lock:\n self.messages_list.configure(state='normal')\n self.messages_list.insert(tk.END, message)\n self.messages_list.configure(state='disabled')\n self.messages_list.see(tk.END)",
"def display_message(self, message):\n with self.lock:\n self.messages_list.configure(state='normal')\n self.messages_list.insert(tk.END, message)\n self.messages_list.configure(state='disabled')\n self.messages_list.see(tk.END)",
"def read_messages(self, msg_num):\r\n self.clear_screen()\r\n user_label = Label(self.root, text=\"Hello \" + self.username, font=self.title_font,\r\n bg=self.bg_color, height=2)\r\n user_label.pack(pady=5, padx=50)\r\n lbl_msg = Label(self.root, text=\"Message \" + str(msg_num), font=self.title_font,\r\n bg=self.bg_color)\r\n lbl_msg.pack(pady=5, padx=10)\r\n self.refresh_button = Button(self.root, text=\"Refresh page\", font=self.text_font,\r\n bg=self.bg_color, command=lambda: self.refresh(msg_num))\r\n self.refresh_button.pack(padx=10, pady=10)\r\n messages_frame = Frame(self.root)\r\n messages_frame.pack(padx=30, pady=15)\r\n scrollbar_msg = Scrollbar(messages_frame)\r\n scrollbar_msg.pack(side=RIGHT, fill=Y)\r\n text_widget = Text(messages_frame, width=50, height=15, font=self.text_font,\r\n yscrollcommand=scrollbar_msg.set)\r\n text_widget.pack()\r\n scrollbar_msg.config(command=text_widget.yview)\r\n button_send = Button(self.root, text=\"go back\", font=self.text_font,\r\n height=2, width=20, command=self.go_back_read)\r\n button_send.pack(pady=5, side=BOTTOM)\r\n button_send = Button(self.root, text=\"see/close message\\ncontrol panel\",\r\n font=self.text_font,\r\n height=2, width=20,\r\n command=lambda: self.new_window_messages(button_send))\r\n button_send.pack(pady=5, side=BOTTOM)\r\n if self.msg_list:\r\n if msg_num < len(self.msg_list):\r\n next_msg = Button(self.root, text=\"next message\", font=self.text_font,\r\n height=2, width=20,\r\n command=lambda: self.read_messages(msg_num + 1))\r\n next_msg.pack(pady=5, padx=5, side=RIGHT)\r\n if msg_num > 1:\r\n previous_msg = Button(self.root, text=\"previous message\", font=self.text_font,\r\n height=2, width=20,\r\n command=lambda: self.read_messages(msg_num - 1))\r\n previous_msg.pack(pady=5, padx=5, side=LEFT)\r\n text_widget.insert(END, \"from: \" + self.msg_list[msg_num - 1][2] + \"\\n\")\r\n text_widget.tag_add('sender', '1.0', '1.end')\r\n text_widget.tag_config('sender', font='none 14')\r\n\r\n text_widget.insert(END, self.msg_list[msg_num - 1][0])\r\n text_widget.tag_add('msg', '2.0', END)\r\n text_widget.tag_config('msg', font='none 12')\r\n\r\n text_widget.config(state=DISABLED)",
"def show_messages():\n\n messages = Message.query.all()\n # translation_list = [\"\"]\n\n for message in messages:\n # message.translation gives list of objects. All the translation for the \n # language. Here assgin it to one trans_text based on user's language\n # selection. \n message.translation = Translation.query.filter_by(language=g.user.language, \n message_id=message.message_id).first()\n\n return render_template(\"messages.html\", messages=messages, user=g.user)",
"def _keep_getting_new_messages(self):\n while True:\n new_messages = self.get_new_messages()\n for message in new_messages:\n self.handle(message)\n time.sleep(self.refresh_delay)",
"def showRecvMsg(self, recvmsg):\r\n s = self.bytesToStr(recvmsg, self.chkHexShow.isChecked())\r\n self.txtRecvMsg.append(s)\r\n # self.txtRecvMsg.setPlainText(self.txtRecvMsg.toPlainText() + s)\r\n\r\n self.m_count[0] += len(s)\r\n self.showCount(self.m_count)\r\n\r\n if self.m_callback != 0:\r\n self.parseRecvMsg(msg)",
"def recv_messages(self):\n while True:\n b = unwrap_read(self.sock.recv(4096))\n msgs = self.parser.feed(b)\n if msgs:\n for msg in msgs:\n self.router.incoming(msg)\n return",
"def show_message(request):\n return render_to_response('woodstock/messages/view.html', {},\n context_instance = RequestContext(request))",
"def index(request):\n messages = SESSION.get_messages_sent_list(request.session)\n feedbacks = SESSION.get_messages_received_list(request.session)\n \n # initially display the first 20 messages/feedback chronologically\n messages.sort(key=lambda r: r.createdAt, reverse=True)\n feedbacks.sort(key=lambda r: r.createdAt, reverse=True)\n \n # prepare our template context variables use in our messages template\n data = {\n 'messages_nav': True,\n 'messages': messages[:PAGINATION_THRESHOLD],\n 'feedback': feedbacks[:PAGINATION_THRESHOLD],\n 'pag_threshold': PAGINATION_THRESHOLD,\n 'pag_page': 1,\n 'sent_count': len(messages),\n 'feedback_count': len(feedbacks),\n 'tab_feedback': request.GET.get('tab_feedback'),\n }\n \n if SESSION.get_patronStore_count(request.session):\n data['has_patrons'] = True\n \n # inserting this success and error message into the template\n # should be done in a cleaner way - this was done by the first guy\n # I just didn't bother changing it.\n if request.GET.get(\"success\"):\n data['success'] = request.GET.get(\"success\")\n if request.GET.get(\"error\"):\n data['error'] = request.GET.get(\"error\")\n \n return render(request, 'manage/messages.djhtml', data)",
"def list(request):\r\n usermessages = request.user.profile.recent_messages()\r\n d = {\r\n 'form': NewMessageForm(),\r\n 'usermessages': usermessages,\r\n 'title': 'Messages',\r\n }\r\n return render_to_response('usermessages/list.html', d, \r\n context_instance=RequestContext(request))",
"def show_message(messages):\n for message in messages:\n printed_message = f\"{message}\"\n print(printed_message)",
"def _hear_message_from_server(self):\n while self.is_alive:\n data = self._socket.recv(1024)\n content = loads(data)\n self._current_list = content\n print(\"Servidor: {}\".format(content))",
"def bus_messages(self):\n\n output = []\n for message in self.__bus_messages:\n if time.time() - message['time'] > BusController.MESSAGES_TTL:\n self.__bus_messages.remove(message)\n output.append(f\"l{message['sender'].line_num}-s{message['sender'].station_num} sent: {message['text']}\")\n while len(output)<BusController.MAX_MESSAGES_TO_DISPLAY:\n output.append(\"\")\n return output",
"def show_msgdialog(self):\n log_msg = log.getBufferAsString()\n if not log_msg:\n return\n\n # initialise message dialog\n msg_dialog = msgdialog.MessageDialog(None, -1, \"\")\n msg_dialog.msg_list.InsertColumn(0, \"\")\n\n # clear dialog and show new messages\n msg_dialog.msg_list.Freeze()\n msg_dialog.msg_list.DeleteAllItems()\n for line in log_msg.split('\\n'):\n msg_dialog.msg_list.Append([line, ])\n msg_dialog.msg_list.SetColumnWidth(0, -1)\n msg_dialog.msg_list.Thaw()\n msg_dialog.ShowModal()\n msg_dialog.Destroy()",
"def display_sent_table(self, num_disp, old):\n msgtable = PrettyTable([\"{wMsg #\", \"{wReceiver\", \"{wDate\"])\n mess_num = 1\n old = old[:num_disp]\n for mess in old:\n receiver = mess.receivers\n if receiver:\n receiver = receiver[0]\n name = receiver.key\n else:\n name = \"Unknown\"\n try:\n date = self.caller.messages.get_date_from_header(mess) or \"Unknown\"\n except AttributeError:\n mess = reload_model_as_proxy(mess)\n print(\n \"Error: Had to reload Msg ID %s as Messenger when displaying sent table.\"\n % mess.id\n )\n date = self.caller.messages.get_date_from_header(mess) or \"Unknown\"\n msgtable.add_row([mess_num, name, date])\n mess_num += 1\n self.msg(msgtable)\n return"
] |
[
"0.7500149",
"0.70102125",
"0.68906677",
"0.6661795",
"0.6626118",
"0.66245234",
"0.65538955",
"0.6548553",
"0.6502435",
"0.6483911",
"0.6469836",
"0.64564407",
"0.64451647",
"0.63219106",
"0.6284495",
"0.62636817",
"0.62636817",
"0.6240366",
"0.62241864",
"0.6174454",
"0.61685723",
"0.6166167",
"0.61647725",
"0.61579895",
"0.6149006",
"0.6126044",
"0.61192816",
"0.6117996",
"0.6116483",
"0.6109617"
] |
0.7889956
|
0
|
Generates a list of accessible reciprocal lattice vectors. To be accessible, the magnitude of a rlv's wavevector must be less than twice that of the input radiation's wavenumber.
|
def find_accessible_rlvs(crystal, wavelength):
# The wavenumber of the input wavelength
nu = 2*n.pi/wavelength
# Now we find the shortest distance to a wall of a
# parallelogram "shell" in the reciprocal lattice
min_step = min(abs(n.dot(
(crystal.rlattice[0]+crystal.rlattice[1]
+crystal.rlattice[2]),
n.cross(crystal.rlattice[i],crystal.rlattice[j])
/n.linalg.norm(n.cross(crystal.rlattice[i],crystal.rlattice[j]))))
for i,j in [(0,1),(1,2),(2,0)])
# If we look at all the points in this many parallelogram
# "shells", we can't miss all the accessible wavevectors
num_shells = int(2*nu / min_step)
# Now we generate these possibilities
possibilities = [(crystal.rlattice[0]*h + crystal.rlattice[1]*j
+ crystal.rlattice[2]*k)
for h,j,k in it.product(
range(-num_shells,num_shells+1),
repeat=3)]
# And we filter the possibilities, getting rid of all the
# rlvs that are too long and the 0 vector
rlvs = [rlv for rlv in possibilities
if n.linalg.norm(rlv) < 2*nu
and not n.allclose(rlv,0)]
return n.array(rlvs)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def R(self,v):\n R=sparse.lil_matrix((len(v),self.N_win),dtype='complex128')\n b=v*self.N_win\n b_rounded=np.round(b).astype('int')\n b_frac=b_rounded-b\n # Figure out what bins are worth looking up\n lobe_radii=self.lobe_radius_interp(v)\n lu_bins=[np.arange(-lr,lr+1) for lr in lobe_radii]\n b_ranges=[lub - bf for bf,lub in zip(b_frac,lu_bins)]\n b_indices=[(lub - br) % self.N_max for br,lub in zip(b_rounded,lu_bins)]\n r_indices=[idx * np.ones(len(lub)) for idx,lub in enumerate(lu_bins)]\n vs=[v_ * np.ones(len(lub)) for v_,lub in zip(v,lu_bins)]\n R[np.concatenate(r_indices),np.concatenate(b_indices)]=self.win_dft_interp(\n np.concatenate(b_ranges),np.concatenate(vs))\n return R.tocsr()",
"def _calculate_reciprocal_lattice(self, lattice):\n self.lattice = np.array(lattice)\n self.a, self.b, self.c, self.alpha, self.beta, self.gamma = self.convert_unitcell_to_abc()\n self.volume = self.calculate_volume()\n self.inverse_lattice = np.linalg.inv(self.lattice)\n self.reciprocal_lattice = self.inverse_lattice",
"def reciprocal_lattice_vectors(a):\n b = np.zeros(shape=(3,3))\n b[:,0] = 2 * np.pi * np.cross(a[:,1], a[:,2]) / triple_product(a[:,0], a[:,1], a[:,2])\n b[:,1] = 2 * np.pi * np.cross(a[:,2], a[:,0]) / triple_product(a[:,1], a[:,2], a[:,0])\n b[:,2] = 2 * np.pi * np.cross(a[:,0], a[:,1]) / triple_product(a[:,2], a[:,0], a[:,1])\n return b",
"def rvs(self):\n raise NotImplementedError",
"def to_reciprocal_lattice(self, ks: Array) -> Array:\n # Ensure that ks has at least 2 dimensions\n ks = _np.asarray(ks)\n if ks.ndim == 1:\n ks = ks[_np.newaxis, :]\n\n result = ks @ self._lattice_dims.T / (2 * pi)\n # Check that these are integers\n is_valid = is_approx_int(result)\n if not _np.all(is_valid):\n raise InvalidWaveVectorError(\n \"Some wave vectors are not reciprocal lattice vectors of the simulation\"\n \"box spanned by\\n\"\n + \"\\n\".join(\n [\n str(self._lattice_dims[i])\n + (\" (PBC)\" if self.pbc[i] else \" (OBC)\")\n for i in range(self.ndim)\n ]\n )\n )\n\n result = _np.asarray(_np.rint(result), dtype=int)\n # For axes with non-periodic BCs, the k-component must be 0\n is_valid = _np.logical_or(self.pbc, result == 0)\n if not _np.all(is_valid):\n raise InvalidWaveVectorError(\n \"Some wave vectors are inconsistent with open boundary conditions\"\n )\n\n return result",
"def _VRF(self) -> array:\n pass",
"def RV(dist, lbound=None, ubound=None):\n if lbound or ubound:\n while True:\n yield int(np.clip(dist.rvs(size=1), lbound, ubound))\n else:\n while True:\n yield int(dist.rvs(size=1))",
"def vector(self):\n \n v_list = Householder.triangle_operation(self)[1]\n \n return(v_list)",
"def get_verts(v_l, v_r):\n\n\t\tv_l = v_l%chain.length\n\t\tv_r = v_r%chain.length\n\n\t\tpoints = []\n\t\tcoords = list(chain.coords)\n\t\tif v_r > v_l:\n\n\t\t\tfor i in range(1, len(coords)):\n\t\t\t\n\t\t\t\tpd = LineString(coords[:i+1]).length\n\n\t\t\t\tif pd > v_l and pd < v_r:\n\t\t\t\t\tpoints.append(coords[i])\n\t\telse:\n\n\t\t\tfor i in range(1, len(coords)):\n\t\t\t\n\t\t\t\tpd = LineString(coords[:i+1]).length\n\n\t\t\t\tif pd > v_l:\n\t\t\t\t\tpoints.append(coords[i])\n\n\t\t\tfor i in range(1, len(coords)):\n\t\t\t\n\t\t\t\tpd = LineString(coords[:i+1]).length\n\n\t\t\t\tif pd < v_r:\n\t\t\t\t\tpoints.append(coords[i])\n\n\n\t\treturn points",
"def generate_V(self):\n\n n_samples, n_dimensions, L = self.n_samples, self.n_dimensions, self.L\n\n V = np.zeros([L + 1, n_dimensions], dtype=int)\n V[1:, 0] = [1 << (self.scale - j) for j in range(1, L + 1)]\n\n for i in range(n_dimensions - 1):\n\n m = np.array(directions[i], dtype=int)\n s = len(m) - 1\n\n # The following code discards the first row of the ``m`` array\n # Because it has floating point errors, e.g. values of 2.24e-314\n if L <= s:\n V[1:, i + 1] = [m[j] << (self.scale - j) for j in range(1, L + 1)]\n else:\n V[1 : s + 1, i + 1] = [\n m[j] << (self.scale - j) for j in range(1, s + 1)\n ]\n for j in range(s + 1, L + 1):\n V[j, i + 1] = V[j - s, i + 1] ^ (V[j - s, i + 1] >> s)\n for k in range(1, s):\n V[j, i + 1] ^= ((m[0] >> (s - 1 - k)) & 1) * V[j - k][i + 1]\n\n return V",
"def derive_cardelli(wavelength, Rv):\n x = 1.0 / np.array(wavelength)\n\n # check for applicability\n if (np.min(x) < 0.3):\n print( 'wavelength is longer than applicable range for Cardelli law')\n return None\n\n if (np.max(x) > 8.0):\n print( 'wavelength is shorter than applicable range for Cardelli law')\n return None\n \n # Set up some arrays for coefficients that we will need\n a = np.zeros(len(x), dtype=float)\n b = np.zeros(len(x), dtype=float)\n\n y = x - 1.82\n\n # Calculate coefficients for long wavelengths (low wavenumber)\n # Wavenumger <= 1.1 (Eq. 2a, 2b)\n idx = np.where(x <= 1.1)[0]\n a[idx] = 0.574 * x[idx] ** 1.61\n b[idx] = -0.527 * x[idx] ** 1.61\n\n # Calculate coefficients for intermediate wavelengths\n # 1.1 < wavenumber <= 3.3 (Eq. 3a, 3b)\n idx = np.where((x > 1.1) & (x <= 3.3))[0]\n yy = y[idx]\n a[idx] = 1 + (0.17699 * yy) - (0.50447 * yy ** 2) - \\\n (0.02427 * yy ** 3) + (0.72085 * yy ** 4) + \\\n (0.01979 * yy ** 5) - (0.77530 * yy ** 6) + \\\n (0.32999 * yy ** 7)\n b[idx] = (1.41338 * yy) + (2.28305 * yy ** 2) + \\\n (1.07233 * yy ** 3) - (5.38434 * yy ** 4) - \\\n (0.62251 * yy ** 5) + (5.30260 * yy ** 6) - \\\n (2.09002 * yy ** 7)\n\n # Calculate the long wavelength\n # 3.3 < wavenumber < 5.9 (Eq. 4a, 4b)\n idx = np.where((x > 3.3) & (x < 5.9))[0]\n xx = x[idx]\n a[idx] = 1.752 - (0.316 * xx) - (0.104/((xx - 4.67) ** 2 + 0.341))\n b[idx] = -3.090 + (1.825 * xx) + (1.206/((xx - 4.62) ** 2 + 0.263))\n\n # Calculate the longest wavelength\n # 5.9 <= wavenumber (Eq. 4a, 4b)\n idx = np.where(x >= 5.9)[0]\n xx = x[idx]\n a[idx] = 1.752 - (0.316 * xx) - (0.104/((xx - 4.67) ** 2 + 0.341)) + \\\n (-0.04473 * (xx - 5.9) ** 2) - (0.009779 * (xx - 5.9) ** 3)\n b[idx] = -3.090 + (1.825 * xx) + (1.206/((xx - 4.62) ** 2 + 0.263)) + \\\n (0.2130 * (xx - 5.9) ** 2) + (0.1207 * (xx - 5.9) ** 3)\n\n # A(lam) / A(V), from Eq. 1\n extinction = a + b/Rv\n\n # Now, want to produce A_lambda / AKs, to match other laws\n k_ind = np.where(abs(x-0.46) == min(abs(x-0.46)))\n Aks_Av = a[k_ind] + b[k_ind]/Rv # Aks / Av\n Av_Aks = 1.0 / Aks_Av # Av / Aks\n \n output = extinction * Av_Aks # (A(lamb) / Av) * (Av / Aks) = (A(lamb) / Aks)\n\n return output",
"def __init__( self, u = [ 1., 0., 0. ], v = [ 0., 1., 0. ], w = [ 0., 0., 1. ], coeff = 1. ): \n\tdirect = [ u, v, w ]\n self.coeff = coeff\n\tself.direct = [ [ i*coeff for i in j ] for j in direct ]\n self.reciprocal_updated = False\n self.lattice_parameters_updated = False\n self.volume_updated = False\n self.get_lattice_parameters( u, v, w )\n self.get_volume( u, v, w )\n self.get_reciprocal_basis( u, v, w )",
"def rvs(self, size=[]):\n pass",
"def generador_v(vector_n, constante):\n\n v = []\n\n for x in range(len(vector_n)):\n nv = vector_n[x] // constante # // = Division entera\n v.append(nv)\n\n # print(\"valores n: \", vector_n)\n # print(\"valores v: \", v)\n\n return v",
"def get_vectors(dim, R2):\n\n #collecting base vectors\n base_vecs = []\n numbers = get_set(dim, R2)\n while len(numbers) >= dim:\n vec = get_base_vector(dim, R2, deepcopy(numbers))\n if vec is not False:\n base_vecs += [np.sqrt(vec)]\n numbers.remove(max(numbers))\n #permuting base vectors\n uvecs = []\n for vec in base_vecs:\n for per_vec in permutations(vec):\n uvecs += [per_vec]\n uvecs = list(set(uvecs))\n\n #adding all possible sign options\n vecs = []\n for vec in uvecs:\n for sign in sign_possibilities(dim):\n vecs += [tuple([int(a*b) for a, b in zip(sign, vec)])]\n vecs = list(set(vecs))\n return vecs",
"def create_vectors(self):\n self.localStatistics = []\n self.lastStatistics = []\n self.globalV = []\n self.estimate = []\n self.delta = []\n self.drift = []\n self.slack = [] # only for coordBased model",
"def get_rewired_lattice(Nv, Kv, pR):\n if type(pR) is not float:\n logging.error(\"get_rewired_lattice: Error: pR should be a float data type.\")\n return None\n\n if pR < 0 or pR > 1:\n logging.error(\"get_rewired_lattice: Error: pR should be in [0,1].\")\n return None\n\n N = get_ring_lattice(Nv, Kv)\n logging.debug(\"get_rewired_lattice: Nv is \" + str(Nv))\n logging.debug(\"get_rewired_lattice: Kv is \" + str(Kv))\n logging.debug(\"get_rewired_lattice: pR is \" + str(pR))\n Kv_half = int(round(Kv / 2))\n for i in range(0, Nv):\n for c in range(0, Kv_half):\n j = i + c + 1\n if j >= Nv:\n j = j - Nv\n r1 = rg.random()\n if r1 <= pR:\n N[i][j] = 0\n N[j][i] = 0\n k = None\n while k is None:\n r2 = rg.random()\n k = int(round(r2 * (Nv - 1)))\n if k != i and k != j and N[i][k] == 0:\n N[i][k] = 1\n N[k][i] = 1\n else:\n k = None\n return N",
"def lvec(self):\n lv = ROOT.TLorentzVector()\n# if self.pt < 0 or abs(self.eta) > 6:\n# raise Exception(\"Invalid values for TLorentzVector\")\n lv.SetPtEtaPhiM(self.pt, self.eta, self.phi, self.mass)\n# if abs(lv.Pt()) > 100000 or abs(lv.Eta()) > 100000:\n# raise Exception(\"Invalid values for TLorentzVector\")\n return lv",
"def v_from_omega_r(w, r):\n return w.cross(r)",
"def get_fermi_velocities():\n\n vr = Vasprun('vasprun.xml')\n # eigenvalues = vr.eigenvalues\n bs = vr.get_band_structure()\n bands = bs.bands\n kpoints = bs.kpoints\n efermi = bs.efermi\n h_bar = 6.582e-16 # eV*s\n\n fermi_bands = []\n for spin in bands:\n for i in range(len(bands[spin])):\n if max(bands[spin][i]) > efermi > min(bands[spin][i]):\n fermi_bands.append(bands[spin][i])\n\n fermi_velocities = []\n for band in fermi_bands:\n for i in range(len(band)-1):\n if (band[i] < efermi < band[i+1]) or (band[i] > efermi > band[i+1]):\n dk = np.sqrt((kpoints[i+1].cart_coords[0]\n - kpoints[i].cart_coords[0])**2\n + (kpoints[i+1].cart_coords[1]\n - kpoints[i].cart_coords[1])**2)\n v_f = abs((band[i+1] - band[i]) / (h_bar * dk))\n fermi_velocities.append(v_f)\n\n return fermi_velocities # Values are in Angst./s",
"def square_bravais_lattice(self,R,lattice_multiplier=1):\n a = lattice_multiplier*self.a\n b = lattice_multiplier*self.b\n c = lattice_multiplier*self.c\n\n #Calculate the number of lattice points needed in each direction to cover a length of R\n #I use the ceiling function so that when I shift the origin by a one unit cell vector,\n #I still cover all lattive points within a distance of R\n Na = int(np.ceil(R/np.linalg.norm(a)))\n Nb = int(np.ceil(R/np.linalg.norm(b)))\n Nc = int(np.ceil(R/np.linalg.norm(c)))\n\n #calculate the number of vertices in a grid that covers the sphere\n #A sphere of radius R fits within a grid of size 2R x 2R x 2R\n #Adding one to account for origin\n number_vertices = (2*Na+1)*(2*Nb+1)*(2*Nc+1)\n vertices = np.empty((number_vertices,3))\n vertex_labels = np.empty(number_vertices ,dtype=int)\n \n # populate the vertices list with the positions of a lattice with single spacing\n n = 0\n for i in np.arange(-Na,Na+1):\n for j in np.arange(-Nb,Nb+1):\n for k in np.arange(-Nc,Nc+1):\n vertices[n]=np.dot([[i,j,k]],[[a[0],a[1],a[2]],[b[0],b[1],b[2]],[c[0],c[1],c[2]]])\n vertex_labels[n] = self.position_map_inverse[(i*lattice_multiplier)%2,(j*lattice_multiplier)%2,(k*lattice_multiplier)%2]\n n += 1\n return vertices, vertex_labels",
"def rvs(self, size=[]):\n raise NotImplementedError()",
"def get_radii(self) -> np.ndarray:\n return np.array([self._radii[p] for p in self.particles])",
"def rvs(self):\n return self._root.rvs()",
"def LV_Matrices(self):\n self.LV_inhM = self.LotkaVolterra_InhibitMatrix() # (nF, nF)\n self.LV_c, self.LV_s = self.LotkaVolterra_Dynamics() # (nS, 1)\n # TODO: The LV weights are incredibly slow...\n # self.LV_W = self.LotkaVolterra_Weights2()",
"def powder_XRD(crystal,wavelength, get_mults=False):\n \n # The wavenumber of the input wavelength\n nu = 2*n.pi/wavelength\n\n # Make a list of the accessible rlvs\n rlvs = find_accessible_rlvs(crystal,wavelength)\n \n # Now we calculate the scattering intensity from each rlv\n intensities = {\n tuple(rlv): n.abs(crystal.structure_factor(rlv))**2\n for rlv in rlvs}\n \n # Now sum up all rlvs with the same magnitude. We also\n # get rid of all the scattering vectors with 0 intensity\n magnitudes = {}\n multiplicities = {}\n for rlv, intensity in intensities.items():\n repeat = False\n mag = n.linalg.norm(rlv)\n for oldmag in magnitudes:\n if n.isclose(mag,oldmag):\n magnitudes[oldmag] += intensity\n multiplicities[oldmag] += 1\n repeat = True\n break\n if not repeat and not n.isclose(mag,0):\n multiplicities[mag] = 1\n magnitudes[mag] = intensity\n \n # Now we reformat the multiplicity data in a nice way\n multiplicities = {2 * n.arcsin(mag / (2 * nu)) * 180 / n.pi:\n multiplicity\n for mag, multiplicity in multiplicities.items()\n if not n.allclose(magnitudes[mag],0)}\n\n # And now we calculate the scattering intensities\n # (a.u. per steradian) as a function of scattering angle\n intensities = {2 * n.arcsin(mag / (2 * nu)) * 180 / n.pi:\n intensity * \n # This factor corrects for the fact that the same total\n # power in the debye scherrer rings is more\n # concentrated when 2\\theta is near 0 or 2pi\n 1 / n.sin(2*n.arcsin(mag/(2*nu))) *\n # This factor corrects for the probability that any\n # given crystal domain will scatter into the rlv\n 1 / mag *\n # This factor corrects for polarization effects,\n # Assuming an unpolarized input beam and no polarization\n # analysis\n (1 + n.cos(2*n.arcsin(mag/(2*nu)))**2)/2\n for mag, intensity in magnitudes.items()\n if not n.allclose(intensity,0)}\n if get_mults:\n return intensities, multiplicities\n else:\n return intensities",
"def _vlerchphi(self, z: np.ndarray, a: int) -> np.ndarray:\n return np.array([self._lerchphi(z_, a) for z_ in z])",
"def _get_res_vol_sides(radar):\n deltar = radar.range['data'][1]-radar.range['data'][0]\n if 'radar_beam_width_h' in radar.instrument_parameters:\n beamwidth = (\n radar.instrument_parameters['radar_beam_width_h']['data'][0])\n else:\n warn('Unknown radar antenna beamwidth. Assumed 1 degree')\n beamwidth = 1.\n\n _, _, hlowerleft = (\n antenna_vectors_to_cartesian(\n radar.range['data'] - deltar/2, radar.azimuth['data'],\n radar.elevation['data'] - beamwidth/2) +\n radar.altitude['data'][0])\n\n _, _, hupperright = (\n antenna_vectors_to_cartesian(\n radar.range['data'] + deltar/2, radar.azimuth['data'],\n radar.elevation['data'] + beamwidth/2) +\n radar.altitude['data'][0])\n\n return hlowerleft, hupperright",
"def place_rebar_trans_v(asv_req, d_v, n_legs):\r\n \"\"\"asv_req - required area of steel in mm2/m\"\"\"\r\n \"\"\"d_v - diameter of shear link\"\"\"\r\n \"\"\"n_legs - number of legs\"\"\"\r\n spacings = [250, 225, 200, 175, 150, 125, 100, 75] # available spacings\r\n a_leg = np.pi * d_v ** 2 / 4 # area of each link leg\r\n a_links = a_leg * n_legs # total area per links set\r\n\r\n for i in range(len(spacings)):\r\n\r\n Asv = a_links / (spacings[i] * 0.001)\r\n if Asv > asv_req:\r\n break\r\n a = [n_legs, d_v, spacings[i]]\r\n\r\n if i == len(spacings) - 1 and Asv < Asv_req: # in case number of legs and diameters is not enough\r\n a = [0, 0, 0]\r\n\r\n return a",
"def getVRF(self) -> array:\n '''@ V : array'''\n V = self._VRF();\n return V;"
] |
[
"0.60902166",
"0.5821111",
"0.5767367",
"0.5698596",
"0.5697524",
"0.5608284",
"0.56012243",
"0.5584463",
"0.5564152",
"0.55254877",
"0.5486465",
"0.54747415",
"0.54615265",
"0.54448056",
"0.54020214",
"0.5399121",
"0.5389129",
"0.53405744",
"0.53344333",
"0.5328175",
"0.53094816",
"0.5260232",
"0.52292275",
"0.5221706",
"0.52007705",
"0.5196239",
"0.51814365",
"0.5177518",
"0.5173207",
"0.5167887"
] |
0.6662302
|
0
|
Generates a powder XRD spectrum for radiation with the given wavelength (in angstroms)
|
def powder_XRD(crystal,wavelength, get_mults=False):
# The wavenumber of the input wavelength
nu = 2*n.pi/wavelength
# Make a list of the accessible rlvs
rlvs = find_accessible_rlvs(crystal,wavelength)
# Now we calculate the scattering intensity from each rlv
intensities = {
tuple(rlv): n.abs(crystal.structure_factor(rlv))**2
for rlv in rlvs}
# Now sum up all rlvs with the same magnitude. We also
# get rid of all the scattering vectors with 0 intensity
magnitudes = {}
multiplicities = {}
for rlv, intensity in intensities.items():
repeat = False
mag = n.linalg.norm(rlv)
for oldmag in magnitudes:
if n.isclose(mag,oldmag):
magnitudes[oldmag] += intensity
multiplicities[oldmag] += 1
repeat = True
break
if not repeat and not n.isclose(mag,0):
multiplicities[mag] = 1
magnitudes[mag] = intensity
# Now we reformat the multiplicity data in a nice way
multiplicities = {2 * n.arcsin(mag / (2 * nu)) * 180 / n.pi:
multiplicity
for mag, multiplicity in multiplicities.items()
if not n.allclose(magnitudes[mag],0)}
# And now we calculate the scattering intensities
# (a.u. per steradian) as a function of scattering angle
intensities = {2 * n.arcsin(mag / (2 * nu)) * 180 / n.pi:
intensity *
# This factor corrects for the fact that the same total
# power in the debye scherrer rings is more
# concentrated when 2\theta is near 0 or 2pi
1 / n.sin(2*n.arcsin(mag/(2*nu))) *
# This factor corrects for the probability that any
# given crystal domain will scatter into the rlv
1 / mag *
# This factor corrects for polarization effects,
# Assuming an unpolarized input beam and no polarization
# analysis
(1 + n.cos(2*n.arcsin(mag/(2*nu)))**2)/2
for mag, intensity in magnitudes.items()
if not n.allclose(intensity,0)}
if get_mults:
return intensities, multiplicities
else:
return intensities
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def wavelength_ex(hdulist):\n wave = hdulist[1].data['loglam']\n wave = 10**wave\n\n return wave",
"def wavelength(energy):\r\n return 2 * np.pi * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy",
"def GetWavelengths (self) :\n\t\treturn self.run(\"GetWavelengths\")",
"def spectrum(self, wl: Union[float, ndarray]) -> Union[float, ndarray]:\n wlm = wl * 1e-9 # Wavelength to meters\n return 3.74183e-16 * wlm ** -5. / (np.exp(0.014388 / (wlm * self.temp)) - 1.)",
"def wavelength(energy):\n return 2 * PI * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy",
"def evaluate(self, wavelength):\n micron = wavelength.to(u.micron).value\n x = 1 / micron\n optical_indx = np.where(np.logical_and(0.63 <= micron, micron <= 2.20))\n ir_indx = np.where(np.logical_and(0.12 <= micron, micron <= 0.63))\n x = np.asarray(x)\n if x.ndim == 0:\n x = x[None]\n k = np.empty(len(x))\n k[optical_indx] = 2.659 * (-1.857 + 1.040 * x) + self.Rv\n k[ir_indx] = 2.659 * (-2.156 + 1.509 * x - 0.198 * x**2 + 0.011 * x**3) + self.Rv\n return k",
"def wavelength(refractive_index, omega):\n return 2 * np.pi * cgs.c / (refractive_index * omega)",
"def wavelength(self):\n return wavelength(energy)",
"def create_spectral_bandpass_only_gaussian(dframe, radiance, file_path):\n print(radiance)\n save_dir = os.path.join(file_path, 'spectral_bandpass_1400')\n print(save_dir)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n dframe1 = pd.DataFrame()\n for i in range(0, len(dframe['W1'])):\n #plt.plot(radiance['Wavelength'], radiance['Response']/np.max(radiance['Response']),\n # 'k--', markersize=2, label='SAO 2100 Solar Irradiance Spectrum')\n\n # for i in range(0, 5):\n a1_val = dframe['A1'][i]\n sigma1 = dframe['Sigma1'][i]\n w1_val = dframe['W1'][i]\n\n lower_range = w1_val - 1.92\n upper_range = w1_val + 1.92\n\n wavelens = np.arange(lower_range, upper_range, 0.01)\n #wavelens = ran\n bandpass = [gauss_function_only(a1_val, sigma1, w1_val, wavelens)\n for wavelens in np.arange(lower_range, upper_range, 0.01)]\n\n dframe1['Wavelength'] = wavelens\n dframe1['Response'] = bandpass/np.max(bandpass)\n #dframe1 = dframe1.round(3)\n dframe1.round(4).to_csv(save_dir + '/' + 'bandpass_' + str(round(w1_val, 2))+'_nm.csv')\n plt.plot(wavelens, bandpass/np.max(bandpass), 'r.--')\n plt.grid(True, linestyle=':')\n plt.xlabel('Wavelength (nm)')\n plt.ylabel('Normalized Spectral Response')\n plt.title('TEMPO Spectral Bandpass (WL = ' + str(round(w1_val, 2)) + ' nm)')\n plt.ylim(0, 1.1)\n plt.xlim(lower_range, upper_range)\n #plt.show()\n # plt.show()\n # Now let us save the spectral bandpass data and spectral bandpass plot\n plt.savefig(save_dir + '/' + 'bandpass_' + str(round(w1_val, 2))+'_nm.png', dpi=100)\n plt.close('all')",
"def get_experimental_spectra(mol):\n\n data = pd.read_csv(mol, sep=',')\n wavelength = data.values[:, 0]\n\n absorption = data.values[:, 1]\n\n func = interp1d(wavelength, absorption, kind='quadratic')\n wavelength_new = 1. / np.linspace(1. / wavelength.max(), 1. / wavelength.min(), 100)\n absorption_new = func(wavelength_new)\n absorption_new *= 100. / absorption_new.max()\n\n return wavelength_new, absorption_new",
"def deredden_spectrum(wl, spec, E_bv):\n # dust model\n wls = numpy.array([ 2600, 2700, 4110, 4670, 5470, 6000, 12200, 26500])\n a_l = numpy.array([ 6.591, 6.265, 4.315, 3.806, 3.055, 2.688, 0.829, 0.265])\n f_interp = interp1d(wls, a_l, kind=\"cubic\")\n\n a_l_all = f_interp(wl)\n #E_bv = extinction_g / 3.793\n A_lambda = E_bv * a_l_all\n spec_real = spec * 10 ** (A_lambda / 2.5)\n\n return spec_real",
"def derive_RiekeLebofsky(wavelength):\n filters = ['U', 'B', 'V', 'R', 'I', 'J', 'H', 'K', 'L', 'M', \n '[8.0]', '[8.5]', '[9.0]', '[9.5]', '[10.0]', '[10.5]', \n '[11.0]', '[11.5]', '[12.0]', '[12.5]', '[13.0]']\n #wave = np.array([0.365, 0.445, 0.551, 0.658, 0.806, 1.25, 1.635, 2.2, \n # 3.77, 4.68, 4.75, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0,\n # 11.5, 12.0, 12.5, 13.0])\n \n # Wavelengths from Nishiyama+09 plot of RL+85 law...slightly different than standard, \n # drop N filter\n wave = np.array([0.365, 0.445, 0.551, 0.658, 0.806, 1.17, 1.57, 2.12, \n 3.40, 4.75, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0,\n 11.5, 12.0, 12.5, 13.0])\n A_Av = np.array([1.531, 1.324, 1.00, 0.748, 0.482, 0.282, 0.175, 0.112,\n 0.058, 0.023, 0.02, 0.043, 0.074, 0.087, 0.083,\n 0.074, 0.060, 0.047, 0.037, 0.030, 0.027])\n # Want to change this from A/Av to A/AK\n k_ind = np.where(np.array(filters) == 'K')\n Ak_Av = A_Av[k_ind]\n Av_Ak = 1.0 / Ak_Av\n\n A_Ak = A_Av * Av_Ak\n \n # Interpolate over the curve\n spline_interp = interpolate.splrep(wave, A_Ak, k=3, s=0)\n A_Ak_at_wave = interpolate.splev(wavelength, spline_interp)\n\n return A_Ak_at_wave",
"def comp_output_spectra(self):\n assert(hasattr(self,'r'))\n \n self.nx=int(self.nx)\n \n r_mat=self.r.T.reshape(self.nx,self.nx,self.N)\n\n in_allfreqs = np.fft.fftshift(np.fft.fftfreq(self.nx,d=self.L/self.nx))\n \n self.freqs=in_allfreqs[self.nx/2:]\n \n r_dft_flat=np.fft.fftshift(np.fft.fft2(r_mat,axes=[0,1]),axes=[0,1])*(self.L/self.nx)**2\n\n r_pw=abs(r_dft_flat)**2 \n r_pw_profiles=gl.dft2d_profiles(r_pw)\n \n self.re_pw_profile=np.mean(r_pw_profiles,axis=0)\n self.he_pw_profile=self.inputs.in_mean_pw_profile",
"def spectral_model(self):\n pars, errs = {}, {}\n pars['amplitude'] = self.data['Flux']\n pars['emin'], pars['emax'] = self.energy_range\n pars['index'] = self.data['Spectral_Index'] * u.dimensionless_unscaled\n errs['amplitude'] = self.data['Unc_Flux']\n errs['index'] = self.data['Unc_Spectral_Index'] * u.dimensionless_unscaled\n model = PowerLaw2(**pars)\n model.parameters.set_parameter_errors(errs)\n return model",
"def generate_spectrum(self):\n matlab_method = self.matlab_mapper[self.matlab_script]\n n, dm, peak_locations, omega_res, n_shell, gamma_amp = matlab_method(float(self.n_max), float(self.n_max_s),\n float(self.num_channels), float(self.scale),\n float(self.omega_shift), float(self.dg),\n float(self.dgs), float(self.gamma_amp_factor),\n float(self.amp_factor), float(self.epsilon2),\n nargout=6)\n dm = [list(d) for d in dm]\n self.num_timesteps = len(dm[0])\n if type(peak_locations) == float:\n peak_locations = list([peak_locations])\n else:\n peak_locations = [list(p) for p in peak_locations]\n spectrum = Spectrum(n=n, dm=dm, peak_locations=peak_locations, n_shell=n_shell, gamma_amp=gamma_amp, **self.__dict__)\n return spectrum",
"def wavelength(self,freq):\n return self.phase_velocity()/freq",
"def create_spectral_bandpass(dframe, radiance, file_path):\n\n save_dir = os.path.join(file_path, 'spectral_bandpass_1400')\n print(save_dir)\n print(radiance)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n dframe1 = pd.DataFrame()\n for i in range(0, len(dframe['W1'])):\n #plt.plot(radiance['Wavelength'], radiance['Response']/np.max(radiance['Response']),\n #'k--', markersize=2, label='SAO 2100 Solar Irradiance Spectrum')\n\n # for i in range(0, 5):\n a1_val = dframe['A1'][i]\n a2_val = dframe['A2'][i]\n sigma1 = dframe['Sigma1'][i]\n sigma2 = dframe['Sigma2'][i]\n w1_val = dframe['W1'][i]\n w2_val = dframe['W2'][i]\n\n\n lower_range = w1_val - 1.92\n upper_range = w1_val + 1.92\n\n wavelens = np.arange(lower_range, upper_range, 0.01)\n #wavelens = ran\n bandpass = [flat_top_gaussian(a1_val, a2_val, sigma1, sigma2, w1_val,\n w2_val, wavelens)\n for wavelens in np.arange(lower_range, upper_range, 0.01)]\n\n dframe1['Wavelength'] = wavelens\n dframe1['Response'] = bandpass/np.max(bandpass)\n #dframe1 = dframe1.round(3)\n dframe1.round(4).to_csv(save_dir + '/' + 'bandpass_' + str(round(w1_val, 2))+'_nm.csv')\n plt.plot(wavelens, bandpass/np.max(bandpass), 'r.--')\n plt.grid(True, linestyle=':')\n plt.xlabel('Wavelength (nm)')\n plt.ylabel('Normalized Spectral Response')\n plt.title('TEMPO Spectral Bandpass (WL = ' + str(round(w1_val, 2)) + ' nm)')\n plt.ylim(0, 1.1)\n plt.xlim(lower_range, upper_range)\n #plt.show()\n # plt.show()\n # Now let us save the spectral bandpass data and spectral bandpass plot\n plt.savefig(save_dir + '/' + 'bandpass_' + str(round(w1_val, 2))+'_nm.png', dpi=100)\n plt.close('all')",
"def calculate_wavelength(period, depth, gravity):\r\n return geometry.gmCalculateWavelength(period, depth, gravity)",
"def spectral_model(self):\n pars, errs = {}, {}\n pars['amplitude'] = self.data['Flux50']\n pars['emin'], pars['emax'] = self.energy_range\n pars['index'] = self.data['Spectral_Index'] * u.dimensionless_unscaled\n\n errs['amplitude'] = self.data['Unc_Flux50']\n errs['index'] = self.data['Unc_Spectral_Index'] * u.dimensionless_unscaled\n\n model = PowerLaw2(**pars)\n model.parameters.set_parameter_errors(errs)\n return model",
"def create_spectral_bandpass_interpol(interpol_wavelen, interpol_rad, center_wvl,\n save_dir):\n\n save_dir = os.path.join(save_dir, r'look_up_table')\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n\n center_wvl1 = np.arange(min(center_wvl), max(center_wvl), 2)\n\n\n\n\n for j in np.arange(0, interpol_wavelen.shape[1]):\n #print(j)\n dframe = pd.DataFrame()\n wavelen = interpol_wavelen[:, j]\n\n radiance = interpol_rad[:, j]\n sampled_wvl = np.arange(min(wavelen), max(wavelen), 0.01)\n fit_params = interp1d(wavelen, radiance, kind='slinear')\n fitted_val = fit_params(sampled_wvl)\n #peak_val = np.where(fitted_val==max(fitted_val))[0]\n #print(peak_val)\n #peak_shift = sampled_wvl[peak_val] - CW1[j]\n\n\n# if peak_shift >0:\n# sampled_wvl = sampled_wvl - peak_shift\n# elif peak_shift <0:\n# sampled_wvl = sampled_wvl + peak_shift\n# else:\n# sampled_wvl = sampled_wvl\n#\n# print(sampled_wvl[peak_val] - CW1[j])\n\n dframe['Wavelength'] = sampled_wvl\n dframe['Radiance'] = fitted_val\n dframe.round(4).to_csv(save_dir + '/' + 'bandpass_' + \\\n str(round(center_wvl1[j], 2))+'_nm.csv')\n plt.plot(sampled_wvl, fitted_val/np.max(fitted_val), 'g.--')\n plt.grid(True, linestyle=':')\n plt.xlabel('Wavelength (nm)')\n plt.ylabel('Normalized Spectral Response')\n plt.title('TEMPO Spectral Bandpass (WL = ' + str(round(center_wvl1[j], 2)) + ' nm)')\n plt.ylim(0, 1.1)\n plt.xlim(min(wavelen), max(wavelen))\n #plt.show()\n\n # Now let us save the spectral bandpass data and spectral bandpass plot\n plt.savefig(save_dir + '/' + 'bandpass_' + str(round(center_wvl1[j], 2))+'_nm.png',\n dpi=100)\n plt.close('all')",
"def spectral(w, s=1.0):\n n_in, n_out = w.size()\n n = max(n_out, n_in)\n gain = s / math.sqrt(n)\n return w.normal_(0, 1).mul_(gain)",
"def simulate_spectrum(file_name = \"bands_full.dat\", k0=0,kf= 0.576,Nk=200,E_max=0.5,E_min=-0.5,Ne=200,gamma_k=0.002,gamma=0.004,lambda_0=20,orbital=\"pz\",suffix=\"\"):\n\n #define energy and momentum domains\n dk = (kf-k0)/Nk\n momenta = np.linspace(k0, kf+dk, Nk)\n energies = np.linspace(E_min, E_max, Ne)\n\n #initialize spectral function A_final to zero\n A_final = []\n for i_k in range(len(momenta)):\n I_e = []\n for j_e in range(len(energies)):\n I_e.append(0)\n A_final.append(I_e)\n\n #compute all lorenztian functions\n all_lor = compute_functions(file_name,E_max,E_min,gamma,gamma_k)\n\n #evaluate all functions\n print \"Evaluating functions\"\n for func in all_lor:\n s = np.vectorize(func)\n A = s(momenta[:,None],energies[None,:])\n A_final += A\n\n #print output\n file_output = \"\"\"A_gammak_%(gamma_k)s_gammae_%(gamma)s_Nk_%(Nk)s_Ne_%(Ne)s_lambda_%(lambda_0)s_%(orbital)s%(suffix)s\"\"\"%locals()\n file = open(file_output,'w')\n for i in range(len(momenta)):\n for j in range(len(energies)):\n print >> file,momenta[i],energies[j],A_final[i][j]\n print >> file,\"\"\n file.close()\n\n return file_output",
"def govardovskii2000_template(\n wavelengths: np.ndarray,\n alpha_max: Union[float, np.ndarray],\n A_alpha: Union[float, np.ndarray] = 69.7,\n a_alpha1: Union[float, np.ndarray] = 0.8795,\n a_alpha2: Union[float, np.ndarray] = 0.0459,\n a_alpha3: Union[float, np.ndarray] = 300.0,\n a_alpha4: Union[float, np.ndarray] = 11940.0,\n B_alpha: Union[float, np.ndarray] = 28.0,\n b_alpha: Union[float, np.ndarray] = 0.922,\n C_alpha: Union[float, np.ndarray] = -14.9,\n c_alpha: Union[float, np.ndarray] = 1.104,\n D_alpha: Union[float, np.ndarray] = 0.674,\n A_beta: Union[float, np.ndarray] = 0.26,\n beta_max1: Union[float, np.ndarray] = 189.0,\n beta_max2: Union[float, np.ndarray] = 0.315,\n d_beta1: Union[float, np.ndarray] = -40.5,\n d_beta2: Union[float, np.ndarray] = 0.195,\n) -> np.ndarray:\n x_alpha = (wavelengths / alpha_max) ** -1\n a_alpha = a_alpha1 + a_alpha2 * np.exp(-((alpha_max - a_alpha3) ** 2) / a_alpha4)\n\n alpha_band = (\n np.exp(A_alpha * (a_alpha - x_alpha))\n + np.exp(B_alpha * (b_alpha - x_alpha))\n + np.exp(C_alpha * (c_alpha - x_alpha))\n + D_alpha\n ) ** -1\n\n beta_max = beta_max1 + beta_max2 * alpha_max\n d_beta = d_beta1 + d_beta2 * alpha_max\n beta_band = np.exp(-(((wavelengths - beta_max) / d_beta) ** 2))\n\n return alpha_band + A_beta * beta_band",
"def wavelength_axis(self):\n if 'WAVE' not in self.axes_wcs.wcs.ctype:\n raise cu.CubeError(2,\n 'No energy (wavelength, frequency) axis found')\n axis = 0 if self.axes_wcs.wcs.ctype[0] == 'WAVE' else 1\n delta = self.axes_wcs.wcs.cdelt[axis]\n crpix = self.axes_wcs.wcs.crpix[axis]\n crval = self.axes_wcs.wcs.crval[axis]\n start = crval - crpix * delta\n stop = start + self.data.shape[-1 - axis] * delta\n cunit = u.Unit(self.axes_wcs.wcs.cunit[axis])\n return np.linspace(start, stop, num=self.data.shape[-1 - axis]) * cunit",
"def runWavelengthDependency():\n RunData([getFiles(mintime=(15, 39, 58), maxtime=(15, 47, 58), folder='data/30Jul/')[0],], out='I600nmwave',\n wavelength='l600')\n RunData([getFiles(mintime=(17, 48, 35), maxtime=(17, 56, 03), folder='data/30Jul/')[0],], out='I700nmwave',\n wavelength='l700')\n RunData([getFiles(mintime=(15, 40, 07), maxtime=(15, 45, 14), folder='data/29Jul/')[0],], out='I800nmwave',\n wavelength='l800')\n RunData([getFiles(mintime=(14, 17, 57), maxtime=(14, 25, 49), folder='data/01Aug/')[4],], out='I890nmwave',\n wavelength='l890')",
"def Create_Constant_WavelengthArray(spec_cube,final_wave_start,final_wave_end):\n\tdwave = np.zeros(len(spec_cube))\n\tfor n in xrange(len(spec_cube)):\n\t\ttemp_final_wave = spec_cube[n][0] # Take one of the spectrum use its resolution\n\t\tdwave[n] = np.median(temp_final_wave[1:] - temp_final_wave[:-1])\n\tdwave = np.max(dwave)\n\tfinal_wave = np.arange(final_wave_start,final_wave_end,dwave)\n\tprint 'Since input dv = 0 -> median resolution (constant) dwave = %f angstrom is used.' % dwave\n\treturn final_wave",
"def set_wavelength(self, wavelength: float) -> None:\n\n assert isinstance(wavelength, float), \"Incompatible type\"\n\n #:SENSe[n][:CHANnel[m]]:POWer:WAVelength /?\n self._inst.write(\"SENS:POW:WAV {}\".format(wavelength))",
"def spectral_power(img, avg_window_size=None, log=True): #COMPLETE spectrum generator\r\n image = img.copy()\r\n # to avoid large spectral power at the 0 frequency :\r\n image -= np.mean(image)\r\n # wiener filter to reduce non physical variability in the spectral power\r\n if avg_window_size:\r\n N = avg_window_size\r\n image = wiener(image, (N, N))\r\n # compute the spectral power function. Place the 0 frequency-component in the center\r\n fshift = np.fft.fftshift(np.fft.fft2(image))\r\n spectrum = np.abs(fshift)**2\r\n if log:\r\n spectrum = 10*np.log(spectrum)\r\n return spectrum",
"def use_w(args):\n try:\n bounddata = Table.read(\n f'./Input/UseWv/WaveRegions_{args.WRegion}_{args.band}.csv',\n format='csv')\n except IOError:\n sys.exit(\n f'WaveRegions FILE \"./Input/UseWv/WaveRegions'\n '_{args.WRegion}_{args.band}.csv\" NOT FOUND!')\n\n wavesols = pd.read_csv(f'./Input/UseWv/WaveSolns_{args.band}.csv')\n#-------------------------------------------------------------------------------\n XRegion_dir = f'./Input/UseWv/XRegions_{args.WRegion}_{args.band}.csv'\n with open(XRegion_dir,'w') as filew:\n filew.write('order, start, end, masks\\n')\n\n m_order = np.array(bounddata['order'])\n starts = np.array(bounddata['start'])\n ends = np.array(bounddata['end'])\n ords = list( sorted(OrderDictCla().orderdict[args.band].keys()) )\n\n Ostarts = [OrderDictCla().orderdict[args.band][k][0] for k in ords]\n Oends = [OrderDictCla().orderdict[args.band][k][1] for k in ords]\n labels = []\n\n m_orders_unique = np.unique(m_order)\n\n # For each order specified, find what pixel numbers correspond to the\n # wavelength bounds presented.\n # If multiple wavelength bounds given for a single order, output a\n # pixel mask between the two, as well.\n for o in range(len(m_orders_unique)):\n\n # if len(m_orders_unique) == 9:\n # filew.write('9, 150, 1950, []\\n')\n # continue\n\n pixs = []\n mini = np.where(m_order == m_orders_unique[o])[0]\n for j in range(len(mini)):\n i = mini[j]\n\n wavebounds = [starts[i],ends[i]]\n wO = wavesols['w'+str(m_orders_unique[o])]\n pixO = wavesols['x'+str(m_orders_unique[o])]\n pix = [pixO[(np.argmin(abs(wO-wavebounds[k])))] for k in [0,1]]\n pixs = pixs + pix\n\n pixsS = list(sorted(pixs))\n q = pixsS[1:-1]\n if len(pixsS) == 2:\n filew.write('{}, {}, {},[]\\n'.format(\n m_orders_unique[o], pixsS[0], pixsS[-1])\n )\n else:\n filew.write('{}, {}, {},\"{}\"\\n'.format(\n m_orders_unique[o], pixsS[0], pixsS[-1],\n [[first,second] for first, second in zip(q[0::2], q[1::2])]\n ))",
"def getwientemp(_inputdata, _distance, _derr, _id):\n # Maxwell-Boltzmann distribution formula probability density function\n def curve(_x, _a, _scale):\n _a1 = np.sqrt(2 / np.pi)\n _a2 = _x**2 / (2 * _a**2)\n return _scale * _a1 * (_x**2 * np.exp(-_a2)) / _a**3\n\n # Set pyplot style to be consistent through the program\n plt.style.use('seaborn-whitegrid')\n\n # Convert the distance in parsecs to metres\n _distance = 3.0857 * 10**16 * _distance\n _derr = 3.0857 * 10**16 * _derr\n # Create array for x and y axis data\n _xdata = _inputdata[:, 0]\n _ydata = _inputdata[:, 1]\n _ydatalum = _ydata\n\n # Iterate through each band and convert from Janskys to W/m^2/um\n i = 0\n while i < 5:\n _ydata[i] = 3*10**14 * (_ydata[i] * 10**-26) / (Wavelength[i]**2)\n i += 1\n # Calculate optimal values and covariance using scipy curve_fit function\n _popt, _pcov = curve_fit(curve, _xdata, _ydata)\n # Create x axis to plot curve against\n _x = np.linspace(0, 5, 100)\n # Determine y value for each point on the x axis\n _yplot = curve(_x, *_popt)\n # Plot the curve to the screen\n plt.plot(_x, _yplot)\n # Determine the area under the graph, integral gives total energy recieved per m^2\n _area = np.trapz(_yplot, dx=5/100)\n # Total luminosity found by multiplying by the surface area of a sphere with diameter of the distance\n _lum = 4 * np.pi * _distance**2 * _area\n _lumerr = 4 * np.pi * _distance * _derr * _area\n # Peak value of Maxwell-Boltzmann distribution\n _mu = 2 * _popt[0] * np.sqrt(2 / np.pi)\n\n # Plot data on the graph\n plt.plot(_xdata, _ydata, '.')\n # Set axis labels\n plt.xlabel('Wavelength (um)')\n plt.ylabel('Spectral Irradiance (W m^-2 um^-1)')\n if _id == 1:\n _str = 'Large Star'\n else:\n _str = 'Small Star'\n\n # Calculate effective surface temperature using Wien's law\n _wien = round_sig(2898 / _mu)\n # Round luminosity to 2 significant figures\n _lum = round_sig(_lum)\n # Set graph title\n plt.suptitle('Black Body Plot for the ' + _str)\n # Save to current folder\n _filename = _str + '.png'\n plt.savefig(_filename)\n # Display to the screen\n plt.show()\n\n # Returns calculated values\n return _lum, _lumerr, _wien"
] |
[
"0.64374226",
"0.6167109",
"0.6059381",
"0.6053003",
"0.59999365",
"0.5970338",
"0.59206903",
"0.58600605",
"0.5800283",
"0.57909775",
"0.57907706",
"0.57897896",
"0.5705752",
"0.5701563",
"0.5603155",
"0.56020117",
"0.5589162",
"0.5587772",
"0.557587",
"0.5571971",
"0.5562617",
"0.55519",
"0.55059993",
"0.55023414",
"0.5498124",
"0.5485682",
"0.54855037",
"0.5466052",
"0.5436075",
"0.5432539"
] |
0.6727283
|
0
|
This is just a nice function to turn the raw scattering data into a humanreadable scattering spectrum
|
def spectrumify(scattering_data, instr_broadening=0.1):
graph_angles = n.linspace(0,180,10000)
graph_intensities = n.zeros(graph_angles.shape)
for angle, intensity in sorted(scattering_data.items()):
graph_intensities += intensity * \
n.exp(-(graph_angles - angle)**2 / \
(2*(instr_broadening)**2))
return graph_angles, graph_intensities
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def convertToSpectroGram(self):",
"def create_data(self) -> str:\r\n s = self.scale\r\n mini, maxi = self.get_min_max()\r\n diff = maxi - mini\r\n\r\n output = \"const data = {\\n\"\r\n\r\n # Create the data for the scatters\r\n # TODO: If it's not interactive, labels shouldn't be exported.\r\n for name, data in self.scatters_data.items():\r\n mapping = self.scatters[name][\"mapping\"]\r\n colormaps = self.scatters[name][\"colormap\"]\r\n cmaps = [None] * len(colormaps)\r\n\r\n for i, colormap in enumerate(colormaps):\r\n if isinstance(colormap, str):\r\n cmaps[i] = plt.cm.get_cmap(colormap)\r\n else:\r\n cmaps[i] = colormap\r\n\r\n output += name + \": {\\n\"\r\n x_norm = [round(s * (x - mini) / diff, 3) for x in data[mapping[\"x\"]]]\r\n output += \"x: [\" + \",\".join(map(str, x_norm)) + \"],\\n\"\r\n\r\n y_norm = [round(s * (y - mini) / diff, 3) for y in data[mapping[\"y\"]]]\r\n output += \"y: [\" + \",\".join(map(str, y_norm)) + \"],\\n\"\r\n\r\n z_norm = [round(s * (z - mini) / diff, 3) for z in data[mapping[\"z\"]]]\r\n output += \"z: [\" + \",\".join(map(str, z_norm)) + \"],\\n\"\r\n\r\n if mapping[\"labels\"] in data:\r\n fmt_labels = [\"'{0}'\".format(s) for s in data[mapping[\"labels\"]]]\r\n output += \"labels: [\" + \",\".join(fmt_labels) + \"],\\n\"\r\n\r\n if mapping[\"s\"] in data:\r\n output += \"s: [\"\r\n\r\n for series in range(len(data[mapping[\"s\"]])):\r\n output += (\r\n \"[\"\r\n + \",\".join(map(str, np.round(data[mapping[\"s\"]][series], 3)))\r\n + \"],\\n\"\r\n )\r\n\r\n output += \"],\\n\"\r\n\r\n output += \"colors: [\\n\"\r\n for series in range(len(data[mapping[\"c\"]])):\r\n output += \"{\\n\"\r\n if mapping[\"cs\"] in data:\r\n colors = np.array(\r\n [cmaps[series](x) for x in data[mapping[\"c\"]][series]]\r\n )\r\n\r\n for i, c in enumerate(colors):\r\n hsl = np.array(colour.rgb2hsl(c[:3]))\r\n hsl[1] = hsl[1] - hsl[1] * data[mapping[\"cs\"]][series][i]\r\n colors[i] = np.append(np.array(colour.hsl2rgb(hsl)), 1.0)\r\n\r\n colors = np.round(colors * 255.0)\r\n\r\n output += (\r\n \"r: [\" + \",\".join(map(str, map(int, colors[:, 0]))) + \"],\\n\"\r\n )\r\n output += (\r\n \"g: [\" + \",\".join(map(str, map(int, colors[:, 1]))) + \"],\\n\"\r\n )\r\n output += (\r\n \"b: [\" + \",\".join(map(str, map(int, colors[:, 2]))) + \"],\\n\"\r\n )\r\n elif mapping[\"c\"] in data:\r\n colors = np.array(\r\n [cmaps[series](x) for x in data[mapping[\"c\"]][series]]\r\n )\r\n colors = np.round(colors * 255.0)\r\n output += (\r\n \"r: [\" + \",\".join(map(str, map(int, colors[:, 0]))) + \"],\\n\"\r\n )\r\n output += (\r\n \"g: [\" + \",\".join(map(str, map(int, colors[:, 1]))) + \"],\\n\"\r\n )\r\n output += (\r\n \"b: [\" + \",\".join(map(str, map(int, colors[:, 2]))) + \"],\\n\"\r\n )\r\n output += \"},\\n\"\r\n\r\n output += \"]\"\r\n output += \"},\\n\"\r\n\r\n for name, data in self.trees_data.items():\r\n mapping = self.trees[name][\"mapping\"]\r\n point_helper = self.trees[name][\"point_helper\"]\r\n\r\n output += name + \": {\\n\"\r\n\r\n if point_helper is not None and point_helper in self.scatters_data:\r\n scatter = self.scatters_data[point_helper]\r\n scatter_mapping = self.scatters[point_helper][\"mapping\"]\r\n\r\n x_t = []\r\n y_t = []\r\n z_t = []\r\n\r\n for i in range(len(data[mapping[\"from\"]])):\r\n x_t.append(scatter[scatter_mapping[\"x\"]][data[mapping[\"from\"]][i]])\r\n x_t.append(scatter[scatter_mapping[\"x\"]][data[mapping[\"to\"]][i]])\r\n y_t.append(scatter[scatter_mapping[\"y\"]][data[mapping[\"from\"]][i]])\r\n y_t.append(scatter[scatter_mapping[\"y\"]][data[mapping[\"to\"]][i]])\r\n z_t.append(scatter[scatter_mapping[\"z\"]][data[mapping[\"from\"]][i]])\r\n z_t.append(scatter[scatter_mapping[\"z\"]][data[mapping[\"to\"]][i]])\r\n\r\n x_norm = [round(s * (x - mini) / diff, 3) for x in x_t]\r\n output += f\"x: [\" + \",\".join(map(str, x_norm)) + \"],\\n\"\r\n\r\n y_norm = [round(s * (y - mini) / diff, 3) for y in y_t]\r\n output += \"y: [\" + \",\".join(map(str, y_norm)) + \"],\\n\"\r\n\r\n z_norm = [round(s * (z - mini) / diff, 3) for z in z_t]\r\n output += \"z: [\" + \",\".join(map(str, z_norm)) + \"],\\n\"\r\n else:\r\n x_norm = [round(s * (x - mini) / diff, 3) for x in data[mapping[\"x\"]]]\r\n output += \"x: [\" + \",\".join(map(str, x_norm)) + \"],\\n\"\r\n\r\n y_norm = [round(s * (y - mini) / diff, 3) for y in data[mapping[\"y\"]]]\r\n output += \"y: [\" + \",\".join(map(str, y_norm)) + \"],\\n\"\r\n\r\n z_norm = [round(s * (z - mini) / diff, 3) for z in data[mapping[\"z\"]]]\r\n\r\n output += \"z: [\" + \",\".join(map(str, z_norm)) + \"],\\n\"\r\n\r\n if mapping[\"c\"] in data:\r\n colormap = self.trees[name][\"colormap\"]\r\n cmap = None\r\n if isinstance(colormap, str):\r\n cmap = plt.cm.get_cmap(colormap)\r\n else:\r\n cmap = colormap\r\n\r\n colors = np.array([cmap(x) for x in data[mapping[\"c\"]]])\r\n colors = np.round(colors * 255.0)\r\n output += \"r: [\" + \",\".join(map(str, colors[:, 0])) + \"],\\n\"\r\n output += \"g: [\" + \",\".join(map(str, colors[:, 1])) + \"],\\n\"\r\n output += \"b: [\" + \",\".join(map(str, colors[:, 2])) + \"],\\n\"\r\n\r\n output += \"},\\n\"\r\n\r\n output += \"};\\n\"\r\n\r\n return output",
"def spectrum_to_xyz(spectrum: Callable) -> ndarray:\n xyz = spectrum(WAVELENGTHS_380_780) @ CIE_XYZ_380_780\n xyz /= sum(xyz)\n return xyz",
"def reformat(dataset):\n x = dataset[:, 1] \n x = np.stack(x) # reshape to (n, mel bands, timesteps)\n x = np.expand_dims(np.moveaxis(x, 1, -1), axis=3) # reformat x to (n, timesteps, mel bands, 1) \n y = dataset[:, 2] \n y = np.moveaxis(np.stack(y), 1, -1) # reformat y to (n, timesteps, 8)\n return x, y",
"def data_normalize (self, data):\r\n data = data + (2**15)\r\n data = data / ((2**16) - 1)\r\n data = 2 * data\r\n data = data - 1\r\n\r\n return data",
"def loadtext2(infile):\n warrsn, farrsn =np.loadtxt(infile, usecols=(0, 1), unpack=True)\n return create_spectrum(warrsn, farrsn)",
"def process(self, data):\n spectr = stft(data, n_fft=512, hop_length=160)\n return np.concatenate((spectr.real[:, :, np.newaxis], spectr.imag[:, :, np.newaxis]), axis=2)",
"def spectrl2_data():\n # reference spectra generated with solar_utils==0.3\n kwargs = {\n 'surface_tilt': 0,\n 'relative_airmass': 1.4899535986910446,\n 'apparent_zenith': 47.912086486816406,\n 'aoi': 47.91208648681641,\n 'ground_albedo': 0.2,\n 'surface_pressure': 101300,\n 'ozone': 0.344,\n 'precipitable_water': 1.42,\n 'aerosol_turbidity_500nm': 0.1,\n 'dayofyear': 75\n }\n df = pd.read_csv(SPECTRL2_TEST_DATA)\n # convert um to nm\n df['wavelength'] *= 1000\n df[['specdif', 'specdir', 'specetr', 'specglo']] /= 1000\n return kwargs, df",
"def _read_backscatter(self, lines: list) -> np.ndarray:\n n_chars = self._hex_conversion_params[0]\n n_gates = int(len(lines[0]) / n_chars)\n profiles = np.zeros((len(lines), n_gates), dtype=int)\n ran = range(0, n_gates * n_chars, n_chars)\n for ind, line in enumerate(lines):\n try:\n profiles[ind, :] = [int(line[i : i + n_chars], 16) for i in ran]\n except ValueError:\n logging.warning(\"Bad value in raw ceilometer data\")\n ind = profiles & self._hex_conversion_params[1] != 0\n profiles[ind] -= self._hex_conversion_params[2]\n return profiles.astype(float) / self._backscatter_scale_factor",
"def example_spectral_to_xyz():\r\n\r\n print(\"=== Example: Spectral->XYZ ===\")\r\n spc = SpectralColor(\r\n observer='2', illuminant='d50',\r\n spec_380nm=0.0600, spec_390nm=0.0600, spec_400nm=0.0641,\r\n spec_410nm=0.0654, spec_420nm=0.0645, spec_430nm=0.0605,\r\n spec_440nm=0.0562, spec_450nm=0.0543, spec_460nm=0.0537,\r\n spec_470nm=0.0541, spec_480nm=0.0559, spec_490nm=0.0603,\r\n spec_500nm=0.0651, spec_510nm=0.0680, spec_520nm=0.0705,\r\n spec_530nm=0.0736, spec_540nm=0.0772, spec_550nm=0.0809,\r\n spec_560nm=0.0870, spec_570nm=0.0990, spec_580nm=0.1128,\r\n spec_590nm=0.1251, spec_600nm=0.1360, spec_610nm=0.1439,\r\n spec_620nm=0.1511, spec_630nm=0.1590, spec_640nm=0.1688,\r\n spec_650nm=0.1828, spec_660nm=0.1996, spec_670nm=0.2187,\r\n spec_680nm=0.2397, spec_690nm=0.2618, spec_700nm=0.2852,\r\n spec_710nm=0.2500, spec_720nm=0.2400, spec_730nm=0.2300)\r\n xyz = convert_color(spc, XYZColor)\r\n print(xyz)\r\n print(\"=== End Example ===\\n\")",
"def standardize_data(data):\n return (data - np.mean(data, axis=0)) / (np.std(data, axis=0) + 10 ** -16)",
"def get_d65_spectrum():\n\n filename = os.path.dirname(os.path.abspath(__file__))\\\n + os.path.normpath(\"/data/d65_spectrum.csv\")\n data = np.loadtxt(filename, delimiter=',', skiprows=1).T\n\n return np.uint16(data[0]), data[1]",
"def get_spectrum_data():\n from resistics.spectra.data import SpectrumData\n import numpy as np\n\n # add some data\n startTime = \"2020-01-01 00:00:00.000000\"\n stopTime = \"2020-01-01 00:00:00.062500\"\n data = {}\n data[\"Ex\"] = np.array([1 + 3j, -2 + 5j, 7 - 6j, 3 + 2j, 4 + 8j])\n data[\"Ey\"] = np.array([12 - 4j, -6 + 2j, 2 + 6j, -4 - 2j, -6 - 6j])\n data[\"Hx\"] = np.array([-3 + 3j, -11 + 7j, 4 - 1j, 1 + 9j, 2 + 2j])\n data[\"Hy\"] = np.array([2 + 9j, 9 + 1j, 8 + 8j, 6 + 2j, 5 + 2j])\n specData = SpectrumData(8, 5, 128, startTime, stopTime, data)\n evalfreq = np.array([24, 40])\n return specData, evalfreq",
"def processing_data(raw_data):\n data = np.frombuffer(raw_data, np.uint8)\n data = np.reshape(data, [data.shape[0]//1029, -1])\n data = data[:, 5:]\n data = np.reshape(data, [1, -1])\n data = 256 * data[0, 0::2] + data[0, 1::2]\n data = 10 * (data / 65535)\n data = np.reshape(data, [-1, 8]).T\n return data",
"def spectrum_creator(file_name):\n file_data = read_file(file_name)\n image_data = file_data[1]\n\n segmentation_data = file_data[2]\n\n collapsed_data = image_collapser(file_name)\n\n # spectrum for central pixel\n cp_bright = []\n for key, data in collapsed_data.items():\n lgst_val = data.argmax()\n lgst_loc = unravel_index(data.argmax(), data.shape)\n cp_bright.append(lgst_loc)\n\n cp_loc = 0\n if ( cp_bright[0] == cp_bright[1] ):\n cp_loc = cp_bright[0]\n else: \n cp_loc = cp_bright[1]\n\n cp_spec_data = image_data[:][:,cp_loc[0]][:,cp_loc[1]]\n\n # spectrum as defined by the segmentation area\n curr_file_name = file_name.split('.')\n curr_file_name = curr_file_name[0].split('/')\n stk_f_n = curr_file_name[-1]\n cube_id = [int(x) for x in re.findall('\\d+', stk_f_n)][0]\n\n # locating where the galaxy pixels are from the cube_id\n seg_curr_cube = np.where(segmentation_data == cube_id)\n scc_rows, scc_cols = seg_curr_cube\n\n #np.set_printoptions(threshold=np.nan)\n #print(segmentation_data)\n\n collapsed_spectrum = np.zeros([np.shape(image_data)[0], len(scc_rows)])\n for i_r in range(len(scc_rows)):\n # I want to pull out each pixel and store it into the collapsed spectrum array\n collapsed_spectrum[:,i_r] = image_data[:,scc_rows[i_r],scc_cols[i_r]]\n \n galaxy_spectrum = np.zeros(np.shape(image_data)[0])\n for i_ax in range(len(galaxy_spectrum)):\n galaxy_spectrum[i_ax] = np.nansum(collapsed_spectrum[i_ax])\n \n return {'central': cp_spec_data, 'galaxy': galaxy_spectrum, \n 'segmentation': segmentation_data}",
"def spectrum_parser():\n from tools import file_importer, file_outporter\n from random import random\n # from math import log10\n \n print(\"this is spectrum parser\")\n \n relPath = \"bob/processed/OST-24-05-2017_combined.csv\"\n outPath = \"bob/processed/OST-24-05-2017_combined_no0_spectrum.csv\"\n inpF = file_importer(relPath)\n outF = file_outporter(outPath) \n headerFlag = True\n rowCount = 0\n for inpLine in inpF:\n if headerFlag: \n headerFlag = False\n spColCount = 0\n inpList = inpLine.split(\"\\t\")\n for headerI in inpList:\n if \"Peptides ST-1|Peptides ST-2|Peptides ST-3\" == headerI:\n break\n else: spColCount += 1\n outF.write(\"ID,Protein ID, Gene name,\") # write header into output file\n for headI in inpList[spColCount].split(\"|\"):\n outF.write(headI + \",\")\n for headI in inpList[spColCount + 1].split(\"|\")[:-1]:\n outF.write(headI + \",\")\n outF.write(inpList[spColCount + 1].split(\"|\")[-1] + \"\\n\")\n rowCount += 1\n continue\n \n outF.write(str(rowCount) + \",\")\n \n inpLine = inpLine.strip()\n inpItems = inpLine.split(\"\\t\")\n inpName = max(inpItems[0].split(\"|\"), key=len) # get unique protein ID\n inpGeneName = max(inpItems[6].split(\"|\"), key=len) # and gene name\n outF.write(inpName + \",\" + inpGeneName + \",\")\n \n inpSP = inpItems[spColCount].split(\"|\") + inpItems[spColCount + 1].split(\"|\") # get lfq intensity scores\n # print inpSP\n for lfqI in inpSP[:-1]: # write lfq values\n if lfqI == \"_\" or lfqI == \"0\":\n outF.write(str(random()) + \",\") ################## try with log10 values this time\n else:\n try:\n outF.write(str(lfqI) + \",\")\n except ValueError:\n print(inpItems)\n raise\n \n if inpSP[-1] == \"_\" or inpSP[-1] == \"0\": outF.write(str(random()) + \"\\n\")\n else: outF.write(inpSP[-1] + \"\\n\")\n \n \n \n rowCount += 1",
"def combine_data(self):\n\t\tself.Full_E = None\n\t\tself.Imaginary_Spectrum = None\n\t\tif self.raw_file is not None:\n\t\t\tlogger.info(\"Convert to scattering factors\")\n\t\t\tself.NearEdgeData = data.convert_data(self.raw_file,self.DataTypeCombo.GetValue(),'ASF')\n#\t\t\tif self.InvertDataCheckBox.GetValue():\n#\t\t\t\tself.NearEdgeData[:,1] = numpy.abs(self.NearEdgeData[:,1] - 2*numpy.mean(self.NearEdgeData[:,1]))\n\t\tlogger.info(\"Combine Data\")\n\t\t# Get splice points\n\t\tsplice_eV = numpy.array([10.0, 30000.0]) # Henke limits\n\t\tif self.SpliceText1.GetValue() == \"Start\":\n\t\t\tif self.raw_file is not None:\n\t\t\t\tsplice_eV[0] = self.NearEdgeData[0, 0]\n\t\telse:\n\t\t\tsplice_eV[0] = float(self.SpliceText1.GetValue())\n\t\tif self.SpliceText2.GetValue() == \"End\":\n\t\t\tif self.raw_file is not None:\n\t\t\t\tsplice_eV[1] = self.NearEdgeData[-1, 0]\n\t\telse:\n\t\t\tsplice_eV[1] = float(self.SpliceText2.GetValue())\n\t\tif self.raw_file is not None and self.ASF_Data is None:\n\t\t\tself.Full_E, self.Imaginary_Spectrum, self.NearEdgeData, self.splice_ind = data.merge_spectra(self.NearEdgeData, self.ASF_E, self.ASF_Data, merge_points=splice_eV, add_background=self.AddBackgroundCheckBox.GetValue(), plotting_extras=True)\n\n\t\telif self.raw_file is None and self.ASF_Data is not None:\n\t\t\tself.Full_E = self.ASF_E\n\t\t\tself.Imaginary_Spectrum = self.ASF_Data\n\n\t\telif self.raw_file is not None and self.ASF_Data is not None:\n\t\t\t\n\t\t\tself.Full_E, self.Imaginary_Spectrum, self.NearEdgeData, self.splice_ind = data.merge_spectra(self.NearEdgeData, self.ASF_E, self.ASF_Data, merge_points=splice_eV, add_background=self.AddBackgroundCheckBox.GetValue(), fix_distortions=self.FixDistortionsCheckBox.GetValue(), plotting_extras=True)\n\n\t\t\t### get start and end Y values from nexafs and asf data\n\t\t\t##splice_nexafs_Im = numpy.interp(splice_eV, raw_Im[:, 0], raw_Im[:, 1])\n\t\t\t###splice_asf_Im = numpy.interp(splice_eV, self.total_asf[:, 0], self.total_asf[:, 2])\n\t\t\t##splice_asf_Im = (data.coeffs_to_ASF(splice_eV[0],self.total_Im_coeffs[numpy.where(self.total_E<splice_eV[0])[0][-1]]),data.coeffs_to_ASF(splice_eV[1],self.total_Im_coeffs[numpy.where(self.total_E<splice_eV[1])[0][-1]]))\n\t\t\t##cut_boolean = (splice_eV[0]<raw_Im[:, 0]) == (raw_Im[:, 0]<splice_eV[1])\n\t\t\t### Merge Y values\n\t\t\t##if not self.AddBackgroundCheckBox.GetValue():\n\t\t\t\t##logger.info(\"Merge data sets\")\n\t\t\t\t##scale = (splice_asf_Im[1]-splice_asf_Im[0])/(splice_nexafs_Im[1]-splice_nexafs_Im[0])\n\t\t\t\t##scaled_nexafs_Im = ((raw_Im[:, 1]-splice_nexafs_Im[0])*scale)+splice_asf_Im[0]\n\t\t\t\t##self.asf_bg = None # We won't be using this variable this time\n\t\t\t##else:\n\t\t\t\t##logger.info(\"Add data sets (this will currently only work at energies below 30 keV)\")\n\t\t\t\t### Set up background function\n\t\t\t\t### We trust this point to be just before the absorption edge\n\t\t\t\t##trusted_ind = max(0, numpy.where(self.total_asf[:, 0]>splice_eV[0])[0][0]-1)\n\t\t\t\t##Log_total_asf = numpy.log(self.total_asf[:, 2])\n\t\t\t\t### Lets trust the 5 points before our trusted point and make an initial guess at the background function\n\t\t\t\t##p = numpy.polyfit(self.total_asf[(trusted_ind-5):trusted_ind, 0], Log_total_asf[(trusted_ind-5):trusted_ind], 1)\n\t\t\t\t### Now lets look for the points up util the absorption edge\n\t\t\t\t##p_vals = numpy.exp(numpy.polyval(p, self.total_asf[(trusted_ind-5):-1, 0]))\n\t\t\t\t##p_err = max(p_vals[0:5]-self.total_asf[(trusted_ind-5):trusted_ind, 2])\n\t\t\t\t##edge_ind = numpy.where(self.total_asf[trusted_ind:-1, 2]-p_vals[4:-1]>p_err*10)\n\t\t\t\t##if len(edge_ind[0])!=0:\n\t\t\t\t\t##edge_ind = edge_ind[0][0]\n\t\t\t\t##else:\n\t\t\t\t\t##edge_ind = trusted_ind\n\t\t\t\t### Redo background using the 5 points before the background point\n\t\t\t\t##p = numpy.polyfit(self.total_asf[(trusted_ind+edge_ind-5):trusted_ind+edge_ind, 0], Log_total_asf[(trusted_ind+edge_ind-5):trusted_ind+edge_ind], 1)\n\t\t\t\t##asf_bg = numpy.exp(numpy.polyval(p, raw_Im[:, 0]))\n\t\t\t\t##logger.info(\"Background defined as: y=exp(%(p1)ex %(p0)+e)\" % {\"p1\":p[1], \"p0\":p[0]})\n\t\t\t\t### Apply background function\n\t\t\t\t##scale = (splice_asf_Im[1]-numpy.exp(numpy.polyval(p, splice_eV[1])))/splice_nexafs_Im[1]\n\t\t\t\t##scaled_nexafs_Im = raw_Im[:, 1]*scale+asf_bg\n\t\t\t\t### store background data for plotting\n\t\t\t\t##cut_boolean_wide = numpy.roll(cut_boolean, -1) + numpy.roll(cut_boolean, 1)\n\t\t\t\t##self.asf_bg = [[trusted_ind+edge_ind-5, trusted_ind+edge_ind], numpy.vstack((raw_Im[cut_boolean_wide, 0], asf_bg[cut_boolean_wide])).T]\n\t\t\t\n\t\t\t##nexafs_cut = numpy.vstack((raw_Im[cut_boolean, 0], scaled_nexafs_Im[cut_boolean])).T\n\t\t\t####Merge point-wise data sets together\n\t\t\t##asf_cut_high = self.total_asf[self.total_asf[:, 0]>splice_eV[1], :]\n\t\t\t##asf_cut_low = self.total_asf[self.total_asf[:, 0]<splice_eV[0], :]\n\t\t\t##self.merged_Im = numpy.vstack((asf_cut_low[:, [0, 2]], (splice_eV[0], splice_asf_Im[0]), nexafs_cut, (splice_eV[1], splice_asf_Im[1]), asf_cut_high[:, [0, 2]]))\n\t\t\t\n\t\t\t####Merge coeff data together\n\t\t\t##coeffs_cut_high = self.total_Im_coeffs[self.total_E[:-1]>splice_eV[1],:]\n\t\t\t##coeffs_cut_low = self.total_Im_coeffs[self.total_E[:-1]<splice_eV[0],:]\n\t\t\t###convert points to coeffs\n\t\t\t##nexafs_coeffs_cut = numpy.zeros((len(nexafs_cut)+1,5))\n\t\t\t##Y = numpy.append(numpy.insert(nexafs_cut[:,1],0,splice_asf_Im[0]),splice_asf_Im[1])\n\t\t\t##nexafs_E = numpy.append(numpy.insert(nexafs_cut[:,0],0,splice_eV[0]),splice_eV[1])\n\t\t\t##M = (Y[1:]-Y[:-1])/(nexafs_E[1:]-nexafs_E[:-1])\n\t\t\t##nexafs_coeffs_cut[:,0] = M\n\t\t\t##nexafs_coeffs_cut[:,1] = Y[:-1]-M*nexafs_E[:-1]\n\t\t\t###assemble merged coeffs and energy values\n\t\t\t##self.merged_Im_coeffs = numpy.vstack((coeffs_cut_low, nexafs_coeffs_cut, self.total_Im_coeffs[-coeffs_cut_high.shape[0]-2,:], coeffs_cut_high))\n\t\t\t##self.merged_E = numpy.concatenate((self.total_E[self.total_E<splice_eV[0]], nexafs_E, self.total_E[self.total_E>splice_eV[1]]))\n\t\t\t### Extras for plotting\n\t\t\t##self.splice_ind = (len(asf_cut_low[:, 0]), -len(asf_cut_high[:, 0]))\n\t\t\t##cut_boolean = (splice_eV[0]<=raw_Im[:, 0]) != (raw_Im[:, 0]<=splice_eV[1])\n\t\t\t##self.nexafs_CutOut = numpy.vstack((raw_Im[cut_boolean, 0], scaled_nexafs_Im[cut_boolean])).T\n\t\t### Previous calculation of f_1 is no longer matching displayed f_2 data\n\t\t##self.KK_Real_Spectrum = None",
"def _formatData(self):\r\n assert self._runData is not None\r\n\r\n # Getting Axes data into separate lists\r\n x=[]; y=[]; z=[]\r\n for i in range(len(self._runData)):\r\n ySet = []; xSet = []; zSet = []\r\n for _ in range(len(self._runData[i][1][0])):\r\n ySet.append(self._runData[i][0])\r\n y.append(ySet)\r\n xSet.append(self._runData[i][1][0])\r\n x.append(xSet)\r\n zSet.append(self._runData[i][1][1])\r\n z.append(zSet)\r\n\r\n # Reduce extra brackets\r\n xnew = []; znew = []\r\n for i in range(len(x)):\r\n xnew.append(x[i][0])\r\n znew.append(z[i][0])\r\n x = xnew; z = znew\r\n\r\n self._frequency = x\r\n self._voltages = y\r\n self._intensity = z",
"def transform(data, dmin, dmax, dformat):\n\n if dformat == 'UV8':\n dform = 255\n else:\n dform = 65535\n # or even better: use numpy arrays, which removes need of for loops\n t = dmin + data * (dmax - dmin) / dform\n return t",
"def to_spectral_img(data):\n assert data.size(-1) == 2\n\n spectral_vol = torch.zeros([data.size(-2), data.size(-2), data.size(-2)])\n\n for i in range(data.size(-2)):\n kspc1 = torch.zeros(data.size())\n kspc1[:, i, :] = data[:, i, :]\n img1 = ifft2(kspc1)\n img1_abs = complex_abs(img1)\n\n spectral_vol[i, :, :] = img1_abs\n\n return spectral_vol",
"def synthetic_seismogram(green, wavelet):\n return np.real(ifft(fft(wavelet) * fft(green)))",
"def loadtext(infile):\n warrsn, farrsn =np.loadtxt(infile, usecols=(0, 1), unpack=True)\n return create_spectrum(warrsn, farrsn)",
"def export_spectrum(filename, data, data_view):\r\n location = 'x' + str(data_view.x) + 'y' + str(data_view.y)\r\n no_ext_filename, ext = os.path.splitext(filename)\r\n out_filename = no_ext_filename + location + '.csv'\r\n xdata = analysis.xdata_calc(data,data_view)\r\n ydata = analysis.ydata_calc(data,data_view)\r\n out = np.c_[xdata,ydata]\r\n np.savetxt(str(out_filename), out, delimiter=\",\", fmt=\"%10.5f\")",
"def apply():\r\n result = dataSampling(str, \"hhhhhhahhhhhahhahahahahhahahha\", 5)\r\n final_res = dataScreening(result, \"ha\")\r\n print(final_res)",
"def structured_spectra(pivData, d=25*0.33, **kwargs):\n \n es = corrLib.energy_spectrum(pivData, d=d)\n es = es.loc[es.k>0] # make sure the 1/es.k step won't encounter error\n \n x, y = xy_bin(es.k, es.E, **kwargs)\n y *= 2 * np.pi * x\n x = (2 * np.pi / x) ** 2 / 9\n spectra = pd.DataFrame({'l_r': x, 'E': y}).set_index('l_r').sort_index()\n \n return spectra",
"def to_real_series(self, data: pd.Series) -> pd.Series:\n ...",
"def _generateLSSData():\n d65data=matFromVec([d65Illum(x) for x in brange(10,380,730)])\n a=matNew([cie1931cmf(x) for x in brange(10,380,730)])\n aprime=matT(a)\n width=matShape(a)[0]\n wdiag=matDiag(d65data[0])\n mat=matNew([ \\\n [3.240970, -1.537383, -0.4986108],\\\n [-0.9692436, 1.875968, 0.04155506],\\\n [0.05563008, -0.2039770, 1.056972]])\n wnorm=vecDot(d65data[0],aprime[1])\n t=matScale(matMul(matMul(mat,aprime),wdiag),1.0/wnorm)\n # Compute Least Slope Squared matrix\n d=matScale(matEye(width),4)\n dSize=matShape(d)[0]\n matSet(d,0,0,2)\n matSet(d,dSize-1,dSize-1,2)\n for i in range(1,dSize):\n matSet(d,i,i-1,-2)\n matSet(d,i-1,i,-2)\n dt=matT(d)\n vt=matT(t)\n tshape=matShape(t)\n bm=matBlock(d,vt,t,matZeros((tshape[0],tshape[0])))\n bm=matI(bm)\n b11=matPart(bm,0,width,0,width)\n b12=matPart(bm,0,matShape(vt)[0],width,matShape(bm)[1])\n return [b11, b12]",
"def create_python_data(self) -> dict:\r\n s = self.scale\r\n minimum, maximum = self.get_min_max()\r\n diff = maximum - minimum\r\n\r\n output = {}\r\n\r\n # Create the data for the scatters\r\n for name, data in self.scatters_data.items():\r\n mapping = self.scatters[name][\"mapping\"]\r\n colormaps = self.scatters[name][\"colormap\"]\r\n cmaps = [None] * len(colormaps)\r\n\r\n for i, colormap in enumerate(colormaps):\r\n if isinstance(colormap, str):\r\n cmaps[i] = plt.cm.get_cmap(colormap)\r\n else:\r\n cmaps[i] = colormap\r\n\r\n output[name] = {}\r\n output[name][\"meta\"] = self.scatters[name]\r\n output[name][\"type\"] = \"scatter\"\r\n\r\n output[name][\"x\"] = np.array(\r\n [s * (x - minimum) / diff for x in data[mapping[\"x\"]]], dtype=np.float32\r\n )\r\n output[name][\"y\"] = np.array(\r\n [s * (y - minimum) / diff for y in data[mapping[\"y\"]]], dtype=np.float32\r\n )\r\n output[name][\"z\"] = np.array(\r\n [s * (z - minimum) / diff for z in data[mapping[\"z\"]]], dtype=np.float32\r\n )\r\n\r\n if mapping[\"labels\"] in data:\r\n # Make sure that the labels are always strings\r\n output[name][\"labels\"] = list(map(str, data[mapping[\"labels\"]]))\r\n\r\n if mapping[\"s\"] in data:\r\n output[name][\"s\"] = np.array(data[mapping[\"s\"]], dtype=np.float32)\r\n\r\n output[name][\"colors\"] = [{}] * len(data[mapping[\"c\"]])\r\n for s in range(len(data[mapping[\"c\"]])):\r\n if mapping[\"cs\"] in data:\r\n colors = np.array([cmaps[s](x) for x in data[mapping[\"c\"]][s]])\r\n\r\n for i, c in enumerate(colors):\r\n hsl = np.array(colour.rgb2hsl(c[:3]))\r\n hsl[1] = hsl[1] - hsl[1] * data[mapping[\"cs\"]][s][i]\r\n colors[i] = np.append(np.array(colour.hsl2rgb(hsl)), 1.0)\r\n\r\n colors = np.round(colors * 255.0)\r\n\r\n output[name][\"colors\"][s][\"r\"] = np.array(\r\n colors[:, 0], dtype=np.float32\r\n )\r\n output[name][\"colors\"][s][\"g\"] = np.array(\r\n colors[:, 1], dtype=np.float32\r\n )\r\n output[name][\"colors\"][s][\"b\"] = np.array(\r\n colors[:, 2], dtype=np.float32\r\n )\r\n else:\r\n colors = np.array([cmaps[s](x) for x in data[mapping[\"c\"]][s]])\r\n colors = np.round(colors * 255.0)\r\n output[name][\"colors\"][s][\"r\"] = np.array(\r\n colors[:, 0], dtype=np.float32\r\n )\r\n output[name][\"colors\"][s][\"g\"] = np.array(\r\n colors[:, 1], dtype=np.float32\r\n )\r\n output[name][\"colors\"][s][\"b\"] = np.array(\r\n colors[:, 2], dtype=np.float32\r\n )\r\n\r\n for name, data in self.trees_data.items():\r\n mapping = self.trees[name][\"mapping\"]\r\n point_helper = self.trees[name][\"point_helper\"]\r\n\r\n output[name] = {}\r\n output[name][\"meta\"] = self.trees[name]\r\n output[name][\"type\"] = \"tree\"\r\n\r\n if point_helper is not None and point_helper in self.scatters_data:\r\n scatter = self.scatters_data[point_helper]\r\n scatter_mapping = self.scatters[point_helper][\"mapping\"]\r\n\r\n x_t = []\r\n y_t = []\r\n z_t = []\r\n\r\n for i in range(len(data[mapping[\"from\"]])):\r\n x_t.append(scatter[scatter_mapping[\"x\"]][data[mapping[\"from\"]][i]])\r\n x_t.append(scatter[scatter_mapping[\"x\"]][data[mapping[\"to\"]][i]])\r\n y_t.append(scatter[scatter_mapping[\"y\"]][data[mapping[\"from\"]][i]])\r\n y_t.append(scatter[scatter_mapping[\"y\"]][data[mapping[\"to\"]][i]])\r\n z_t.append(scatter[scatter_mapping[\"z\"]][data[mapping[\"from\"]][i]])\r\n z_t.append(scatter[scatter_mapping[\"z\"]][data[mapping[\"to\"]][i]])\r\n\r\n output[name][\"x\"] = np.array(\r\n [s * (x - minimum) / diff for x in x_t], dtype=np.float32\r\n )\r\n output[name][\"y\"] = np.array(\r\n [s * (y - minimum) / diff for y in y_t], dtype=np.float32\r\n )\r\n output[name][\"z\"] = np.array(\r\n [s * (z - minimum) / diff for z in z_t], dtype=np.float32\r\n )\r\n else:\r\n output[name][\"x\"] = np.array(\r\n [s * (x - minimum) / diff for x in data[mapping[\"x\"]]],\r\n dtype=np.float32,\r\n )\r\n output[name][\"y\"] = np.array(\r\n [s * (y - minimum) / diff for y in data[mapping[\"y\"]]],\r\n dtype=np.float32,\r\n )\r\n output[name][\"z\"] = np.array(\r\n [s * (z - minimum) / diff for z in data[mapping[\"z\"]]],\r\n dtype=np.float32,\r\n )\r\n\r\n if mapping[\"c\"] in data:\r\n colormap = self.trees[name][\"colormap\"]\r\n cmap = None\r\n if isinstance(colormap, str):\r\n cmap = plt.cm.get_cmap(colormap)\r\n else:\r\n cmap = colormap\r\n\r\n colors = np.array([cmap(x) for x in data[mapping[\"c\"]]])\r\n colors = np.round(colors * 255.0)\r\n output[name][\"r\"] = np.array(colors[:, 0], dtype=np.float32)\r\n output[name][\"g\"] = np.array(colors[:, 1], dtype=np.float32)\r\n output[name][\"b\"] = np.array(colors[:, 2], dtype=np.float32)\r\n\r\n return output",
"def make_sunpy(evtdata, hdr):\n\n\t# Parse Header keywords\n\tfor field in hdr.keys():\n\t\tif field.find('TYPE') != -1:\n\t\t\tif hdr[field] == 'X':\n\t\t\t\tprint(hdr[field][5:8])\n\t\t\t\txval = field[5:8]\n\t\t\tif hdr[field] == 'Y':\n\t\t\t\tprint(hdr[field][5:8])\n\t\t\t\tyval = field[5:8]\n\t\t\n\tmin_x= hdr['TLMIN'+xval]\n\tmin_y= hdr['TLMIN'+yval]\n\tmax_x= hdr['TLMAX'+xval]\n\tmax_y= hdr['TLMAX'+yval]\n\n\tdelx = hdr['TCDLT'+xval]\n\tdely = hdr['TCDLT'+yval]\n\n\tx = evtdata['X'][:]\n\ty = evtdata['Y'][:]\n\tmet = evtdata['TIME'][:]*u.s\n\tmjdref=hdr['MJDREFI']\n\tmid_obs_time = astropy.time.Time(mjdref*u.d+met.mean(), format = 'mjd')\n\n\t# Use the native binning for now\n\n\t# Assume X and Y are the same size\n\tresample = 1.0\n\tscale = delx * resample\n\tbins = (max_x - min_x) / (resample)\n\n\tH, yedges, xedges = np.histogram2d(y, x, bins=bins, range = [[min_y,max_y], [min_x, max_x]])\n\n\n\tdict_header = {\n\t\"DATE-OBS\": mid_obs_time.iso,\n\t\"CDELT1\": scale,\n\t\"NAXIS1\": bins,\n\t\"CRVAL1\": 0.,\n\t\"CRPIX1\": bins*0.5,\n\t\"CUNIT1\": \"arcsec\",\n\t\"CTYPE1\": \"HPLN-TAN\",\n\t\"CDELT2\": scale,\n\t\"NAXIS2\": bins,\n\t\"CRVAL2\": 0.,\n\t\"CRPIX2\": bins*0.5 + 0.5,\n\t\"CUNIT2\": \"arcsec\",\n\t\"CTYPE2\": \"HPLT-TAN\",\n\t\"HGLT_OBS\": 0,\n\t\"HGLN_OBS\": 0,\n\t\"RSUN_OBS\": sun.solar_semidiameter_angular_size(mid_obs_time).value,\n\t\"RSUN_REF\": sun.constants.radius.value,\n\t\"DSUN_OBS\": sun.sunearth_distance(mid_obs_time).value\n\t}\n\t# For some reason the DSUN_OBS crashed the save...\n\n\theader = sunpy.map.MapMeta(dict_header)\n\n\tnustar_map = sunpy.map.Map(H, header)\n\t\n\treturn nustar_map",
"def stats_from_data(data,reverse_data):\n start = -1\n end = -1\n for num, pix in enumerate(data):\n if (pix[0]+pix[1]+pix[2]) < (192*3):\n start = num\n break\n for num, pix in enumerate(reverse_data):\n if (pix[0]+pix[1]+pix[2]) < (192*3):\n end = len(reverse_data) - (num + 1)\n break\n totalr, totalg, totalb = 0,0,0\n minr, ming, minb = 255,255,255\n maxr, maxg, maxb = 0,0,0\n count = 0\n dark = True\n tcount = 0\n if start == -1 or end == -1 or start==(end-1):\n return (start,end,(end-start)+1, 0,0,0, 0,0,0, 0,0,0, 0)\n for pix in data[start:end+1]:\n totalr += pix[0]\n totalg += pix[1]\n totalb += pix[2]\n if pix[0]<minr: minr=pix[0]\n if pix[1]<ming: ming=pix[1]\n if pix[2]<minb: minb=pix[2]\n if pix[0]>maxr: maxr=pix[0]\n if pix[1]>maxg: maxg=pix[1]\n if pix[2]>maxb: maxb=pix[2]\n#Light to dark transitions will be those in which, following a value above\n#208, the value drops to 184 or below on any color channel.\n if (pix[0]+pix[1]+pix[2])<=(184*3) and not dark:\n dark = True\n tcount += 1\n elif (pix[0]+pix[1]+pix[2])>(208*3) and dark:\n dark = False\n count += 1\n meanr = int(round(float(totalr*10)/count))\n meang = int(round(float(totalg*10)/count))\n meanb = int(round(float(totalb*10)/count))\n return (start, end, (end-start)+1, \n tcount, \n minr,maxr,meanr, \n ming,maxg,meang, \n minb,maxb,meanb)"
] |
[
"0.58403826",
"0.5734011",
"0.5645727",
"0.5622272",
"0.5570202",
"0.556081",
"0.5543349",
"0.5482549",
"0.5461118",
"0.5426467",
"0.5399718",
"0.53791714",
"0.53609496",
"0.5282781",
"0.52763706",
"0.5257553",
"0.51904374",
"0.51811427",
"0.5170123",
"0.5169117",
"0.5167688",
"0.5145525",
"0.51377386",
"0.5126391",
"0.51145154",
"0.5104944",
"0.51038766",
"0.5097814",
"0.50951654",
"0.509242"
] |
0.6065294
|
0
|
yield each group of choices with replacement.
|
def bootstrap(items, choices, repeats):
for i in range(repeats):
yield sample(items, choices, replace=True)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def shuffle_choices(self, choices, rng):\r\n # Separate out a list of the stuff to be shuffled\r\n # vs. the head/tail of fixed==true choices to be held back from the shuffle.\r\n # Rare corner case: A fixed==true choice \"island\" in the middle is lumped in\r\n # with the tail group of fixed choices.\r\n # Slightly tricky one-pass implementation using a state machine\r\n head = []\r\n middle = [] # only this one gets shuffled\r\n tail = []\r\n at_head = True\r\n for choice in choices:\r\n if at_head and choice.get('fixed') == 'true':\r\n head.append(choice)\r\n continue\r\n at_head = False\r\n if choice.get('fixed') == 'true':\r\n tail.append(choice)\r\n else:\r\n middle.append(choice)\r\n rng.shuffle(middle)\r\n return head + middle + tail",
"def test_replace_groups(self):\n pass",
"def iter_choices(self):\n\n for pk, obj in self._get_object_list():\n if hasattr(obj, self.id):\n selected = getattr(obj, self.id)\n else:\n selected = obj in self.data\n\n yield (pk, self.get_label(obj), selected)",
"def feedReplaceSetups(self, recipies, *args, **keys):\n log(\"REPLACE SELECTION\")\n self.addFeedback(\"replaceSelection\", recipies)\n return \"\"",
"def mapGroups(groupList, letters):\r\n changeList = findIndices(groupList)\r\n i = 0\r\n for index in changeList:\r\n toReplace = groupList[index]\r\n groupList = qMS.listReplace(groupList, toReplace, letters[i])\r\n i = i+1\r\n return list(groupList)",
"def randomize_all_groups(self):\n \n for day in self.days:\n \n # resetting the professor's\n for prof in day.keys(): \n day[prof] = []\n \n # rescrambling\n scramble_list(self.groups)\n \n # re adding the groups\n done = False\n counter = 0 \n for unused_var in range(self.max_groups_per_room):\n \n if done:\n break\n \n for room in day.values(): \n \n room.append( self.groups[counter] )\n counter += 1\n \n if counter >= len(self.groups):\n done = True\n break",
"def section_4_9():\n from itertools import permutations\n from itertools import combinations\n from itertools import combinations_with_replacement\n\n items = ['a', 'b', 'c']\n\n def test1():\n for p in permutations(items):\n print(p)\n\n def test2():\n for p in combinations(items, 3):\n print(p)\n print()\n for p in combinations(items, 2):\n print(p)\n print()\n for p in combinations(items, 1):\n print(p)\n print()\n for p in combinations_with_replacement(items, 3):\n print(p)",
"def __iter__(self):\n for key in itertools.chain(list(self._opts.keys()),\n list(self._groups.keys())):\n yield key",
"def mc_setup_response(self):\r\n i = 0\r\n for response in self.xml.xpath(\"choicegroup\"):\r\n # Is Masking enabled? -- check for shuffle or answer-pool features\r\n ans_str = response.get(\"answer-pool\")\r\n if response.get(\"shuffle\") == \"true\" or (ans_str is not None and ans_str != \"0\"):\r\n self._has_mask = True # pylint: disable=W0201\r\n self._mask_dict = {} # pylint: disable=W0201\r\n # We do not want the random mask names to be the same\r\n # for all responses in a problem (sharing the one seed),\r\n # like mask_2 in view-source turns out to always be the correct choice.\r\n # But it must be repeatable and a function of the seed.\r\n # Therefore we add the _1 number from the .id to the seed.\r\n seed_delta = int(self.id[self.id.rindex(\"_\") + 1:])\r\n rng = random.Random(self.context[\"seed\"] + seed_delta)\r\n # e.g. mask_ids = [3, 1, 0, 2]\r\n mask_ids = range(len(response))\r\n rng.shuffle(mask_ids)\r\n rtype = response.get('type')\r\n if rtype not in [\"MultipleChoice\"]:\r\n # force choicegroup to be MultipleChoice if not valid\r\n response.set(\"type\", \"MultipleChoice\")\r\n for choice in list(response):\r\n # The regular, non-masked name:\r\n if choice.get(\"name\") is not None:\r\n name = \"choice_\" + choice.get(\"name\")\r\n else:\r\n name = \"choice_\" + str(i)\r\n i += 1\r\n # If using the masked name, e.g. mask_0, save the regular name\r\n # to support unmasking later (for the logs).\r\n if self.has_mask():\r\n mask_name = \"mask_\" + str(mask_ids.pop())\r\n self._mask_dict[mask_name] = name\r\n choice.set(\"name\", mask_name)\r\n else:\r\n choice.set(\"name\", name)",
"def _apply_character_maskings(self):\n for permutation in self.permutations:\n for char_symbol in self.characters.keys():\n for i in permutation.find_all(\"character-link\", ref=char_symbol): \n i.string.replace_with(self.characters[char_symbol])\n\n self.plain_text = \" \".join([permuation.description.text for permuation in self.permutations])\n self.reapply_plain_text_editing()",
"def __iter__(self):\n for key in self._group._opts.keys():\n yield key",
"def do_shuffle(self, tree, problem):\r\n # The tree is already pared down to this <multichoiceresponse> so this query just\r\n # gets the child choicegroup (i.e. no leading //)\r\n choicegroups = tree.xpath('choicegroup[@shuffle=\"true\"]')\r\n if choicegroups:\r\n choicegroup = choicegroups[0]\r\n if choicegroup.get('answer-pool') is not None:\r\n _ = self.capa_system.i18n.ugettext\r\n # Translators: 'shuffle' and 'answer-pool' are attribute names and should not be translated.\r\n msg = _(\"Do not use shuffle and answer-pool at the same time\")\r\n raise LoncapaProblemError(msg)\r\n # Note in the response that shuffling is done.\r\n # Both to avoid double-processing, and to feed the logs.\r\n if self.has_shuffle():\r\n return\r\n self._has_shuffle = True # pylint: disable=W0201\r\n # Move elements from tree to list for shuffling, then put them back.\r\n ordering = list(choicegroup.getchildren())\r\n for choice in ordering:\r\n choicegroup.remove(choice)\r\n ordering = self.shuffle_choices(ordering, self.get_rng(problem))\r\n for choice in ordering:\r\n choicegroup.append(choice)",
"def sequence_replace(sequences, char_to_replace, char_replacements):\n return [sequence_replace_single(sequence, char_to_replace, char_replacements) for sequence in sequences]",
"def extend_words_iter(words: _Iterable[str]):\n output = set(words)\n for word in output:\n yield word\n for word in words:\n tmp = set()\n change_case(tmp, word)\n for w in tmp:\n if w not in output:\n output.add(w)\n yield w\n for word in output.copy():\n new_words = set()\n replace(new_words, word)\n for w in new_words:\n if w not in output:\n output.add(w)\n yield w",
"def replace_groups(self):\n newstr = []\n for state in self._parsed:\n newstr.append(self._handle_state(state))\n return ''.join(newstr)",
"def choices(self, choices: Iterable[Tuple[str, str]]):\n try:\n iter(choices)\n except TypeError:\n raise TypeError(\"'choices' isn't a valid iterable\")\n\n apply_choices = []\n for i, (choice_id, choice_label) in enumerate(choices):\n apply_choices.append((str(choice_id), str(choice_label)))\n\n if len(apply_choices) < 2:\n raise ValueError(\"you need to specify at least two choices\")\n\n self._choices = apply_choices\n self.specific.refresh()\n self._selected = 0",
"def test_replace_group(self):\n pass",
"def __iter__(self):\n seen = set()\n for elem, group in self._mapping.items():\n if elem not in seen:\n yield group\n seen.update(group)",
"def replace_text_in_selections(view, edit, text):\n for region in view.sel():\n view.replace(edit, region, text)",
"def items(self):\n\t\tfor k, vs in self.multiple.items():\n\t\t\tfor v in vs: yield k, v",
"def __iter__(self):\n try:\n groupNames = sorted(self.groups)\n except Exception: # pragma: no cover\n groupNames = self.groups.keys()\n\n for groupName in groupNames:\n yield groupName, self[groupName]",
"def choices(self, choices):\n\n self._choices = choices",
"def __iter__(self):\n yield self.match",
"def test_shuffle_island(self):\r\n xml_str = textwrap.dedent(\"\"\"\r\n <problem>\r\n <multiplechoiceresponse>\r\n <choicegroup type=\"MultipleChoice\" shuffle=\"true\">\r\n <choice correct=\"false\" fixed=\"true\">A</choice>\r\n <choice correct=\"false\">Mid</choice>\r\n <choice correct=\"true\" fixed=\"true\">C</choice>\r\n <choice correct=\"False\">Mid</choice>\r\n <choice correct=\"false\" fixed=\"true\">D</choice>\r\n </choicegroup>\r\n </multiplechoiceresponse>\r\n </problem>\r\n \"\"\")\r\n problem = new_loncapa_problem(xml_str, seed=0)\r\n the_html = problem.get_html()\r\n self.assertRegexpMatches(the_html, r\"<div>.*\\[.*'A'.*'Mid'.*'Mid'.*'C'.*'D'.*\\].*</div>\")",
"def iter_recipes(self, pattern):\n raise NotImplementedError()",
"def mix_iterator(self):\n self.job = OrderedDict()\n for list_i in self.grid_iterator():\n # Pick the values to be used in this run\n for (k, i) in zip(self.table.keys(), list_i):\n self.job[k] = self.table[k][i]\n # Do the string replace operations on the values themselves\n self.expand_values()\n yield self.job",
"def __iter__(self):\n return iproduct(*self.sets)",
"def sampleWithReplacement(population, choiceSize):\n\n n = len(population)\n _random, _int = random.random, int # speed hack\n return [_int(_random()*n) for _ in itertools.repeat(None, choiceSize)]",
"def option_registrations_iter(self):\n\n def normalize_kwargs(orig_args, orig_kwargs):\n nkwargs = copy.copy(orig_kwargs)\n dest = self.parse_dest(*orig_args, **nkwargs)\n nkwargs[\"dest\"] = dest\n if not (\"default\" in nkwargs and isinstance(nkwargs[\"default\"], RankedValue)):\n type_arg = nkwargs.get(\"type\", str)\n member_type = nkwargs.get(\"member_type\", str)\n default_val = self.to_value_type(nkwargs.get(\"default\"), type_arg, member_type)\n if isinstance(default_val, (ListValueComponent, DictValueComponent)):\n default_val = default_val.val\n nkwargs[\"default\"] = RankedValue(Rank.HARDCODED, default_val)\n return nkwargs\n\n # Yield our directly-registered options.\n for args, kwargs in self._option_registrations:\n normalized_kwargs = normalize_kwargs(args, kwargs)\n yield args, normalized_kwargs",
"def process(e):\n result = []\n current = first_unprocessed_expansion(e)\n\n # Handle cases where no processing is required\n if not current:\n return [e]\n\n copies = []\n if isinstance(current, AlternativeSet):\n dictation_children = [] # again, not necessarily only dictation.\n jsgf_only_children = []\n for child in current.children:\n if dictation_in_expansion(child):\n dictation_children.append(child)\n else:\n jsgf_only_children.append(child)\n\n # Create a replacements list, create copies of the expansion tree and\n # replace the copy of the AlternativeSet currently being processed\n if len(jsgf_only_children) == 1:\n replacements = jsgf_only_children\n else:\n replacements = [AlternativeSet(*jsgf_only_children)]\n replacements.extend(dictation_children)\n\n elif isinstance(current, (OptionalGrouping, KleeneStar)):\n # Handle not required - remove from a copy\n copy = deepcopy(current.root_expansion)\n copy_x = find_expansion(copy, current)\n copy_parent = copy_x.parent\n ancestor = copy_parent\n\n # Traverse up the parent tree and remove copy_x or one of its ancestors\n # where there is another child\n while ancestor:\n if ancestor.children > 1:\n ancestor.children.remove(copy_x)\n break\n\n copy_x = ancestor\n ancestor = ancestor.parent\n\n # copy_x or one of its ancestors was removed from the tree correctly\n # If this isn't true, the expansion is an empty tree and shouldn't be\n # added.\n if ancestor:\n copies.append(copy)\n\n # Let replacement loop handle required\n if isinstance(current, OptionalGrouping):\n replacements = [current.child]\n else:\n replacements = [Repeat(current.child)]\n else:\n replacements = []\n\n for replacement in replacements:\n # Find the copy of the current AlternativeSet being processed\n copy = deepcopy(current.root_expansion)\n copy_x = find_expansion(copy, current)\n copy_parent = copy_x.parent\n if copy_parent:\n index = copy_parent.children.index(copy_x)\n copy_parent.children.remove(copy_x)\n copy_parent.children.insert(index, replacement)\n else:\n # copy is the root expansion.\n copy = replacement\n copies.append(copy)\n\n for copy in copies:\n next_unprocessed = first_unprocessed_expansion(copy)\n if not next_unprocessed and copy not in result:\n result.append(copy)\n else:\n # Process the next unprocessed expansion and add the result\n result.extend(process(next_unprocessed))\n\n return result"
] |
[
"0.5570695",
"0.5404692",
"0.5384516",
"0.5351762",
"0.53155327",
"0.5287687",
"0.5285",
"0.52001417",
"0.5197755",
"0.51410365",
"0.51231086",
"0.5089644",
"0.5087817",
"0.50691915",
"0.5068467",
"0.5063193",
"0.5062266",
"0.5049467",
"0.5021306",
"0.50199336",
"0.50183135",
"0.5017606",
"0.5017259",
"0.5013588",
"0.4991808",
"0.4991764",
"0.49913952",
"0.49497485",
"0.4928187",
"0.49238193"
] |
0.564822
|
0
|
return a matrix of words ~ genera word_prob_provided_genus. use total_seqs as int, genus_seqs as int 1darray.
|
def _get_word_posteriors(self, seq_counts, genus_seqs, total_seqs):
# n(wi) as a col vector
word_seqs = c_[seq_counts.sum(1)]
# p(wi) as a col vector
word_priors = (word_seqs + 0.5) / (total_seqs + 1)
# p(wi|G)
word_posteriors = (seq_counts + word_priors) / (genus_seqs + 1)
return word_posteriors
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_matrix_of_vectors(wv_from_bin, required_words=['softball', 'technology','street','project','fellow','maps','view','fuel','summer','clubhouse','ball','steal','soccer','driving','motor','comedy']):\n import random\n words = list(wv_from_bin.vocab.keys())\n print(\"Shuffling words ...\")\n random.shuffle(words)\n wrds = words[:10000]\n print(\"Putting %i words into word2Ind and matrix M...\" % len(words))\n word2Ind = {}\n M = []\n curInd = 0\n for w in words:\n try:\n M.append(wv_from_bin.word_vec(w))\n word2Ind[w] = curInd\n curInd += 1\n except KeyError:\n continue\n for w in required_words:\n try:\n M.append(wv_from_bin.word_vec(w))\n word2Ind[w] = curInd\n curInd += 1\n except KeyError:\n continue\n M = np.stack(M)\n print(\"Done.\")\n return M, word2Ind",
"def fake_data(n_docs, n_words, n_sent_length, n_topics):\n # These are log ratios for the doc & word topics\n doc_topics = orthogonal_matrix([n_docs, n_topics])\n wrd_topics = orthogonal_matrix([n_topics, n_words])\n # Multiply log ratios and softmax to get prob of word in doc\n doc_to_wrds = softmax(np.dot(doc_topics, wrd_topics))\n # Now sample from doc_to_wrd to get realizations\n indices = np.arange(n_words).astype('int32')\n sentences = []\n for doc_to_wrd in doc_to_wrds:\n words = sample(indices, doc_to_wrd, n_sent_length)\n sentences.append(words)\n\n return np.array(sentences, dtype=np.int32)",
"def train(self, seq_genera, drop_single_seq_genera=False):\n seq_genera, seq_genera_again = tee(seq_genera) #two iters\n #[#seqs in genus], {genus: idx}, #total_seqs.\n genus_seqs, genus_idxs, total_seqs = self._get_genus_seqs(\n seq_genera, drop_single_seq_genera)\n #a matrix of words ~ genera seqcounts and {word: idx}.\n seq_counts, word_idxs = self._get_seq_counts(\n seq_genera_again, genus_idxs)\n self._word_posteriors = self._get_word_posteriors(\n seq_counts, genus_seqs, total_seqs)\n self._genus_idxs, self._word_idxs = genus_idxs, word_idxs",
"def get_ngramlogprobs_fromcorpus(tokenizedseqs, n):\n return",
"def get_bigramlogprobs_fromcorpus_addalpha(tokenizedseqs, alpha):\n return",
"def generate_words(phi, theta, M, N_min, N_max):\n \n doc_lens = np.random.randint(N_min, N_max, M)\n z = {}\n w = {}\n for m in range(M):\n z[m] = []\n w[m] = []\n for n in range(doc_lens[m]):\n z[m].extend(np.nonzero(np.random.multinomial(1, theta[m,:]))[0])\n w[m].extend(np.nonzero(np.random.multinomial(1, phi[z[m][n], :]))[0])\n \n return w",
"def gen_bag_of_words_df(self):\n\t\tdef word_vector(doc_text):\n\t\t\tfreqs = pd.Series(collections.Counter(doc_text.split()))\n\t\t\treturn freqs.loc[set(freqs.index.values)|set(self.stems)]\n\t\tself.bagofwords = self.dataframe.text.apply(word_vector).replace({np.nan:0})",
"def get_topics_strings(\n topics_words, mu, sigma, vocabulary, topics_to_print=10, words_per_topic=30\n):\n mu = np.squeeze(mu, axis=0)\n sigma = np.squeeze(sigma, axis=0)\n # Use a stable sorting algorithm so that when alpha is fixed\n # we always get the same topics.\n highest_weight_topics = np.argsort(-mu, kind=\"mergesort\")\n top_words = np.argsort(-topics_words, axis=1)\n\n res = []\n # try:\n for topic_idx in highest_weight_topics[:topics_to_print]:\n lst = [\n \"index={} mu={:.2f} sigma={:.2f}\".format(\n topic_idx, mu[topic_idx], sigma[topic_idx]\n )\n ]\n lst += [vocabulary[word] for word in top_words[topic_idx, :words_per_topic]]\n res.append(\" \".join(lst))\n # except:\n # res.append('')\n\n return np.array(res)",
"def bigram_model(list_of_words, unigram_count, bigram_count):\n if start_phrase not in list_of_words:\n list_of_words.insert(0, start_phrase)\n if end_phrase not in list_of_words:\n list_of_words.append(end_phrase)\n uni_count = pd.read_csv(unigram_count)\n bigram_count = pd.read_csv(bigram_count)\n # proba_matrix = defaultdict(float)\n proba_dict = {list_of_words[i] + \" \" + list_of_words[i+1]: (bigram_count[list_of_words[i] + \" \" + list_of_words[i+1]].values[0] / float(uni_count[list_of_words[i]].values[0]))\n if list_of_words[i] + \" \" + list_of_words[i+1] in bigram_count.columns.values else 0.0 for i in xrange(len(list_of_words) - 1)}\n return proba_dict\n # for i in xrange(len(list_of_words) - 1):\n # bi_words = list_of_words[i] + \" \" + list_of_words[i+1]\n # if bi_words in bigram_count.columns.values:\n # proba_matrix = {bi_words: (bigram_count[bi_words] / float(list_of_words[i]))}\n # else:\n # proba_matrix = {bi_words: 0.0}",
"def seq2Vec(sequences):\r\n global dict_words_n_vectors\r\n for sent in sequences:\r\n for i in range(len(sent)):\r\n if sent[i] in dict_words_n_vectors:\r\n sent[i] = dict_words_n_vectors[sent[i]]\r\n else:\r\n sent[i] = np.zeros(300)\r\n return np.array(sequences, dtype=\"float32\")",
"def fit_words(self, frequencies):\n return self.generate_from_frequencies(frequencies)",
"def get_topic_matrix(self):\n print('get topic matrix')\n\n topic_words_dict = self.config['topic_words']\n\n topic_matrix = np.empty((0, self.wordvec.embedding_dim))\n\n topic_id = 0\n for topic in topic_words_dict.keys():\n topic_words = topic_words_dict[topic]\n topic_vector = self.wordvec.avg_words_vector(topic_words)\n\n topic_matrix = np.append(topic_matrix, topic_vector, axis=0)\n\n self.id2topic[str(topic_id)] = topic\n topic_id += 1\n\n return topic_matrix",
"def title_words_matrix( corpus, cooccurrence_only = True, ignore_words = Ignore_words ) :\n return words_matrix( corpus, 'TI', cooccurrence_only, ignore_words )",
"def abstract_words_matrix( corpus, cooccurrence_only = True, ignore_words = Ignore_words ) :\n return words_matrix( corpus, 'AB', cooccurrence_only, ignore_words )",
"def generate(markov_process):\n sequence = []\n for index, matrix in enumerate(markov_process):\n prefix = tuple(sequence[-min(index, matrix.order):])\n probabilities = matrix[prefix]\n value = np.random.choice(probabilities.keys(), p=probabilities.values())\n sequence.append(value)\n return sequence",
"def card_generator(mw, prob_dist_dict, gensim_model):\n\n # First things first: make sure that the word is actually in the word2vec vocab.\n # word_vectors = gensim_model.wv\n if mw not in gensim_model.wv.vocab:\n return False\n\n # Generate five categories with the weighted probabilities based on their frequency in the gold standard data.\n five_semrels_list = select_five_categories(prob_dist_dict)\n five_semrels = pd.Series(five_semrels_list)\n\n # Count the number of instances of each semrel category in that list.\n semrels_counts = dict( five_semrels.value_counts() )\n\n # Generate the semantic relations dictionary.\n srdict = sr.make_semrel_dict(mw)\n\n # Rejig five_semrels_list, if need be, to one whose labels are compatible with the cardinality of the sets available\n # in srdict.\n good_five_labels = get_good_label_distrib(srdict, semrels_counts)\n\n # Now we just populate a list with the required number of each kind of word!\n # First, initialise list to contain the five final Taboo words (yay!)\n tws = []\n\n # Go through good_five_labels and, for the labels that aren't 'collocation', access their list in the dictionary and\n # randomly select however many out of it.\n for label, count in good_five_labels.items():\n if label != 'collocation':\n tws.extend( rd.sample( tuple( srdict[label] ), count ) )\n\n # Now, take the number of collocations needed and return the most similar words according to gensim, removing the\n # words that are forbidden (i.e. the main word and also the other words that are already in tws)\n forbidden_words = set(tws + [mw])\n num_coll = good_five_labels['collocation']\n collocates = sr.get_collocations(mw, forbidden_words, gensim_model, num_collocates = num_coll)\n\n # If there are more collocates than needed, randomly select num_coll of them and add to tws. Else just add list to tws.\n if len(collocates) > num_coll:\n tws.extend( rd.sample( tuple(collocates), num_coll ) )\n else:\n tws.extend(collocates)\n\n return {mw: tws}",
"def bigram(self, docs):\n new_docs = list(embedding_bigram[docs])\n \n return pd.Series(new_docs)",
"def speaker_vocab(filenames, target_speaker):\n return unique_ngrams(filenames, target_speaker, gram_size=1)",
"def get_mixed_matrix():\n words = [\"Returns\", \"a\", \"new\", \"list\", \"containing\", \"elements\", \"from\", \"the\", \"population\", \"while\", \"leaving\", \"the\", \"original\", \"population\", \"unchanged.\", \"The\", \"resulting\", \"list\", \"is\", \"in\", \"selection\", \"order\", \"so\", \"that\", \"all\", \"sub-slices\", \"will\", \"also\", \"be\", \"valid\", \"random\", \"samples.\", \"This\", \"allows\", \"raffle\", \"winners\", \"(the\", \"sample)\", \"to\", \"be\", \"partitioned\", \"into\", \"grand\", \"prize\", \"and\", \"second\", \"place\", \"winners\", \"(the\", \"subslices).\"]\n matrix = []\n for x in range(10):\n my_list = []\n if x % 2 == 0:\n for y in range(10):\n my_list.append(random.randint(0, 100))\n else:\n for y in range(10):\n my_list.append(random.choice(words))\n matrix.append(my_list)\n return matrix",
"def words_matrix( corpus, key, cooccurrence_only = True, ignore_words = Ignore_words ) :\n atw = all_words(corpus, key, ignore_words)\n atw.sort()\n row_dois = [x['DI'] for x in corpus]\n result = zeros( (len(corpus),len(atw)), dtype = int32 )\n for paper in corpus :\n for word, occurrences in wordbag( paper[key], ignore_words ).iteritems() :\n result[ row_dois.index( paper['DI'] ) ][ atw.index( word ) ] = occurrences\n\n if cooccurrence_only :\n result[ result > 1 ] = 1\n return result, row_dois, atw",
"def _get_max_bootstrap_genus(self, seq, repeats):\n word_posteriors = self._word_posteriors\n word_idxs = self._word_idxs\n word_size = self._word_size\n\n all_words = list(unique_words(seq, word_size))\n print sorted(map(word_idxs.get, all_words))\n decisions = [] #genera idxs\n for words in bootstrap(all_words, len(seq)//word_size, repeats):\n decisions.append(self._get_max_likelihood_genus(words,\n word_posteriors, word_idxs))\n freqs = calc_freqs(concatenate(decisions))\n sorted_freqs = sorted(freqs.items(), key=itemgetter(1))\n return sorted_freqs[-1] #what if a tie here?",
"def generateMatrix(self):\n if self.tokenWeights and self.extraFeatures:\n nFeatures = self.wordId + self.wordId2 + len(self.EXTRA_WEIGHTS_LABELS)\n logging.info('Exporting TOKEN WEIGHTS AND EXTRA FEATURES %dx%d'%(self.docId, nFeatures))\n mtrx = np.zeros((self.docId, nFeatures))\n \n for docId, doc in self.documents.iteritems():\n # iterate through 1st sentence\n for wId, val in doc['s1'].iteritems():\n mtrx[docId, wId] = val\n # then iterate thru 2nd sentence, store on 2ND PARTITION\n for wId, val in doc['s2'].iteritems():\n mtrx[docId, self.wordId + wId] = val\n # finally extra features values stored at the end of the vector\n for label, val in doc['extraFeatures'].iteritems():\n mtrx[docId, self.wordId + self.wordId2 + self.EXTRA_WEIGHTS_LABELS.index(label)] = val\n\n elif self.tokenWeights and not self.extraFeatures:\n nFeatures = self.wordId + self.wordId2\n logging.info('Exporting TOKEN WEIGHTS %dx%d'%(self.docId, nFeatures))\n mtrx = np.zeros((self.docId, nFeatures))\n \n for docId, doc in self.documents.iteritems():\n # iterate through 1st sentence\n for wId, val in doc['s1'].iteritems():\n mtrx[docId, wId] = val\n # then iterate thru 2nd sentence, store on 2ND PARTITION\n for wId, val in doc['s2'].iteritems():\n mtrx[docId, self.wordId + wId] = val\n else:\n nFeatures = len(self.EXTRA_WEIGHTS_LABELS)\n logging.info('Exporting EXTRA FEATURES %dx%d'%(self.docId, nFeatures))\n mtrx = np.zeros((self.docId, nFeatures))\n \n for docId, doc in self.documents.iteritems():\n for label, val in doc['extraFeatures'].iteritems():\n mtrx[docId, self.EXTRA_WEIGHTS_LABELS.index(label)] = val\n logging.info('Matrix generated')\n logging.info(mtrx.shape)\n return mtrx",
"def computeWordMatrix( Docs, Keywords ) :\n\n w2vec_count = CountVectorizer( ngram_range=(1, 4), vocabulary=Keywords )\n X_Count = w2vec_count.fit_transform( Docs )\n\n return X_Count",
"def vocabulary(self) -> np.ndarray:\n return np.array(\n list(set(word for text in self.preprocess_corpus for word in text))\n )",
"def estimate_probabilities(previous_n_gram, n_gram_counts, n_plus1_gram_counts, vocabulary, k=1.0):\r\n \r\n previous_n_gram = tuple(previous_n_gram)\r\n \r\n # add <e> <unk> to the vocabulary\r\n # <s> is not needed since it should not appear as the next word\r\n vocabulary = vocabulary + [\"<e>\", \"<unk>\"]\r\n vocabulary_size = len(vocabulary)\r\n \r\n probabilities = {}\r\n for word in vocabulary:\r\n probability = estimate_probability(word, previous_n_gram, \r\n n_gram_counts, n_plus1_gram_counts, \r\n vocabulary_size, k=k)\r\n probabilities[word] = probability\r\n\r\n return probabilities",
"def probability(self, words):\n prob = 1\n for w in words: # Loop through words\n if w not in self.mdl.index: # Not in tokens\n return 0\n else: # Get prob\n prob *= self.mdl.loc[w] \n return prob",
"def probability(self, words):\n prob = 1\n for w in words: # Loop through words\n if w not in self.mdl.index: # Not in tokens\n return 0\n else: # Get prob\n prob *= self.mdl.loc[w] \n return prob",
"def generate_solutions(possible_words, labels):\r\n return []",
"def char_matrix(self, unique_tokens):\n\n embed_vocab = list()\n base_vector = numpy.zeros(len(self.unique_symbols) * self.max_token_length)\n embed_vocab.append(base_vector)\n for tokens in unique_tokens:\n features_per_token = numpy.array([], dtype='int8')\n for index_chars in range(0, self.max_token_length):\n array_char = numpy.zeros((len(self.unique_symbols),))\n try:\n array_char[self.unique_symbols.index(tokens[index_chars])] = 1\n # print(word[index_chars], array_char)\n except IndexError:\n pass\n features_per_token = numpy.append(features_per_token, array_char)\n embed_vocab.append(features_per_token)\n return numpy.array(embed_vocab).astype('int8')",
"def generate_products():\n # initialize list of noun and adj\n num_products = 30\n products = [0] * num_products\n prices = [0] * num_products\n weights = [0] * num_products\n flammabilities = [0] * num_products\n\n # initlize random word object\n random = RandomWords()\n\n adj = [random.get_random_word(includePartOfSpeech=\"adjective\")\n for product in products]\n noun = [random.get_random_word(includePartOfSpeech=\"noun\")\n for product in products]\n products = [noun + \" \" + adj for noun, adj in zip(adj, noun)]\n\n prices = [random.randint(5, 100) for price in prices]\n weights = [random.randint(5, 100) for weight in weights]\n flammabilities = [random.randint(0.0, 2.5)\n for flammability in flammabilities]\n\n return products, prices, weights, flammabilities"
] |
[
"0.5885148",
"0.5827227",
"0.58017194",
"0.57225364",
"0.5393069",
"0.5331855",
"0.53210473",
"0.53188634",
"0.5291317",
"0.52824926",
"0.5277673",
"0.5267698",
"0.52600175",
"0.5247408",
"0.5245951",
"0.52345306",
"0.52255493",
"0.52097064",
"0.52083904",
"0.5207958",
"0.51978755",
"0.5095638",
"0.5080405",
"0.5080161",
"0.5064913",
"0.5050326",
"0.5050326",
"0.5035398",
"0.50198936",
"0.5015869"
] |
0.657779
|
0
|
Raise a ValidationError if data does not match the author format.
|
def __call__(self, data):
if self.required and len(data) <= 0:
raise ValidationError('An author is required')
if not isinstance(data, list):
# Convert single instance to a list
data = [data]
AUTHOR_TYPES = {'author', 'photographer', 'illustrator', 'videographer'}
for author in data:
if 'person' not in author:
raise ValidationError('An author must contain a person.')
if 'type' in author:
if isinstance(author, dict):
if not isinstance(author['type'], str) or author['type'].lower() not in AUTHOR_TYPES:
raise ValidationError('The author type must be a string, matching a predefined type.')
elif isinstance(author, str):
tokens = author.split('"')
for i in range(0, len(tokens)):
if 5 + 6 * i < len(tokens):
author_type = tokens[5 + 6 * i]
if author_type.lower() not in AUTHOR_TYPES:
raise ValidationError('The author type must be a string, matching a predefined type.')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _validate_author_id(cls, item):\n if (\n item.author_id and\n not user_services.is_user_or_pseudonymous_id(item.author_id)\n ):\n cls._add_error(\n 'final %s' % (\n base_model_validators.ERROR_CATEGORY_AUTHOR_CHECK),\n 'Entity id %s: Author ID %s is in a wrong format. '\n 'It should be either pid_<32 chars> or uid_<32 chars>.'\n % (item.id, item.author_id))",
"def _validate_original_author_id(cls, item):\n if (\n item.original_author_id and\n not user_services.is_user_or_pseudonymous_id(\n item.original_author_id)\n ):\n cls._add_error(\n 'final %s' % (\n base_model_validators.ERROR_CATEGORY_AUTHOR_CHECK),\n 'Entity id %s: Original author ID %s is in a wrong format. '\n 'It should be either pid_<32 chars> or uid_<32 chars>.'\n % (item.id, item.original_author_id))",
"def validate(self, data):\n if data[\"title\"] == data[\"description\"]:\n raise srs.ValidationError(\n \"Title and Description must be different \")\n return data",
"def _validate_last_nonempty_message_author_id(cls, item):\n if (\n item.last_nonempty_message_author_id and\n not user_services.is_user_or_pseudonymous_id(\n item.last_nonempty_message_author_id)\n ):\n cls._add_error(\n 'final %s' % (\n base_model_validators.ERROR_CATEGORY_AUTHOR_CHECK),\n 'Entity id %s: Last non-empty message author ID %s is in a '\n 'wrong format. It should be either pid_<32 chars> or '\n 'uid_<32 chars>.' % (\n item.id, item.last_nonempty_message_author_id))",
"def create(self, validated_data):\n return Author.objects.get_or_create_author(validated_data['author'])",
"def validate_data(self, data):\n # TODO use schema\n assert \"file_contents\" in data, data\n assert \"type\" in data, data",
"def validate(self, data):\n email = data['email']\n if re.search(r'[\\w.-]+@[\\w.-]+.\\w+', email): # check name has more than 1 word\n return data\n raise serializers.ValidationError(\"Please enter valide email\")",
"def validate(self, item):\n attempt, pkg_analyzer, journal_and_issue_data = item[:3]\n j_publisher_name = journal_and_issue_data.get('journal', {}).get('publisher_name', None)\n if j_publisher_name:\n data = pkg_analyzer.xml\n xml_publisher_name = data.findtext('.//journal-meta/publisher/publisher-name')\n\n if xml_publisher_name:\n if self._normalize_data(xml_publisher_name) == self._normalize_data(j_publisher_name):\n r = [models.Status.ok, 'Valid publisher name: ' + xml_publisher_name]\n else:\n r = [models.Status.error, 'Mismatched data: %s. Expected: %s' % (xml_publisher_name, j_publisher_name)]\n else:\n r = [models.Status.error, 'Missing data: publisher name']\n else:\n r = [models.Status.error, 'Missing data: publisher name, in scieloapi']\n return r",
"def test_parse_quotes_no_author(self):\n with self.assertRaisesRegexp(Exception, re.escape('an author was not included with the quote. Expecting '\n 'quote in the format \\\"<quote> - <author>\\\".')):\n api.parse_quote(\"This is a quote. | | Publication | tag1, tag2 , tag3 \", simple_format=False)",
"def test_author_validation(self):\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', AUDIO_FILE)\n self.fv('minus_upload', 'author', 'Дует Співаки')\n self.submit200()\n #self.assert_equal(smart_str(MinusAuthor.objects.all()[0].name), smart_str('Співаки'))\n MinusAuthor.objects.get(name = smart_str('Співаки'))",
"def validate(self, document) -> None:\n if not self._re.match(document.text):\n raise ValidationError(\n message=self._message, cursor_position=document.cursor_position\n )",
"def test_parse_simple_quote_with_no_author(self):\n with self.assertRaisesRegexp(Exception, \"unable to parse the author and publication. Try \\\\'Quote \\\\- Author \\\\(Publication\\\\)\\\\', or \\\\'Quote \\\\- Author\\\\, Publication\\\\'\"):\n api.parse_quote(\" Quote - \", simple_format=True)",
"def validate_format(self):\n raise NotImplementedError()",
"def post_author_data():\n data = None\n if request.get_json() is None:\n data = request.form.to_dict()\n print(data)\n else:\n data = request.get_json()\n\n if data is None or data == {} or all(value == '' for value in data.values()):\n return render_template('error.html', message='Input format is not correct'), 400\n\n data.get('name', None)\n data.get('author_url', None)\n data.get('author_id', None)\n data.get('rating', None)\n data.get('rating_count', None)\n data.get('review_count', None)\n data.get('image_url', None)\n data.get('related_authors', None)\n data.get('author_books', None)\n\n if isinstance(data, list):\n mongo.db.Authors.insert_many(data)\n else:\n mongo.db.Authors.insert_one(data)\n return render_template(\"post_author.html\", output=data), 200",
"def validate(self, data):\n age = data.get(\"age\", None)\n age = age.split(\",\")\n size = data.get(\"size\", None)\n size = size.split(\",\")\n gender = data.get(\"gender\", None)\n gender = gender.split(\",\")\n for i in age:\n if i not in ['b', 'y', 'a', 's']:\n raise serializers.ValidationError(\n \"Age must be either 'b' for baby, 'y' for young,\"\n \" 'a' for adult, or 's' for senior. Can do multiple with\"\n \" commas, ex: a,y,e\")\n for i in size:\n if i not in ['s', 'm', 'l', 'xl']:\n raise serializers.ValidationError(\n \"Size must be either 's' for small, 'm' for medium, 'l' \"\n \"for large, or 'xl' for extra large. Can do multiple with\"\n \" commas, ex: s,l,xl\")\n for i in gender:\n if i not in ['m', 'f']:\n raise serializers.ValidationError(\n \"Gender must be either 'm' for male, or 'f' for female. Can\"\n \" have both using commas, ex: m,f\")\n return data",
"def validate(self, data):\n raise NotImplementedError(\"Inherit this class and override this method.\")",
"def test_author_initials(self):\n inv_search = 'author:\"polyakov, a* m*\"'\n spi_search = 'find a a m polyakov'\n self._compare_searches(inv_search, spi_search)",
"def validate(self, document) -> None:\n if not len(document.text) > 0:\n raise ValidationError(\n message=self._message,\n cursor_position=document.cursor_position,\n )",
"def _validate_against_schema(self, strand, data):\n schema = self._get_schema(strand)\n\n try:\n jsonschema_validate(instance=data, schema=schema)\n logger.debug(\"Validated %s against schema\", strand)\n\n except ValidationError as e:\n raise exceptions.invalid_contents_map[strand](str(e))",
"def validate_against_schema(request, schema, data):\n try:\n data_pure = schema.deserialize(data)\n data_clean = post_serialize(data_pure)\n # Attach data_clean to request: see usage in views.\n request.data_clean = data_clean\n except Invalid as e:\n # here we transform the errors we got from colander into cornice\n # errors\n for field, error in e.asdict().items():\n request.errors.add('body', field, error)",
"def validate(self, content_retriever):\n pass",
"def validate(self, content_retriever):\n pass",
"def sanitize_author(name, email):\n # deal with inconsistent email addresses/names in commits.\n # feel free to fill this method out.\n return name",
"def _validate(self):\n REQUIRED_KEYS = [ 'name', 'year', 'artist_id', 'genre_ids', 'sources' ]\n\n missing_keys = get_missing_keys(self.request.data, REQUIRED_KEYS)\n if len(missing_keys) > 0:\n return f\"Request body is missing the following required properties: {', '.join(missing_keys)}.\"\n\n artist_id = self.request.data['artist_id']\n\n try:\n Artist.objects.get(pk=artist_id)\n except Artist.DoesNotExist:\n return \"`artistId` supplied does not match an existing artist.\" \n\n genre_ids = self.request.data['genre_ids']\n if len(genre_ids) == 0:\n return \"You must specify at least one genre id in `genreIds` array.\"\n\n for genre_id in genre_ids:\n try:\n Genre.objects.get(pk=genre_id)\n except Genre.DoesNotExist:\n return f\"The genre id {genre_id} does not match an existing genre.\"\n\n sources = self.request.data['sources']\n if len(sources) == 0:\n return \"You must specify at least one source in `sources` array.\"\n\n for source in sources:\n if 'service' not in source or 'url' not in source or 'is_primary' not in source:\n return \"All sources must contain `service`, `url`, and `is_primary` properties.\"\n\n primary_sources = [ source for source in sources if source['is_primary'] == True ]\n if len(primary_sources) != 1:\n return \"There must be one and only one primary source.\"\n\n return False",
"def clean(self):\n super().clean()\n cd = self.cleaned_data\n ack = cd.get('acknowledgement_file')\n filename = ack.name\n if not (len(filename) in [18, 19] and filename[-4:].upper() == '.V21'):\n raise ValidationError('Wrong file name format.')\n self.cleaned_data['filename'] = filename\n content = ack.file.read().decode('latin1')\n match = re.match(self.RE_HDR, content)\n if not match:\n raise ValidationError('Incorrect CWR header')\n code, name, date1, date2 = match.groups()\n self.cleaned_data['society_code'] = code.strip().lstrip('0')\n self.cleaned_data['society_name'] = name.strip()\n self.cleaned_data['date'] = datetime.strptime(\n max([date1, date2]), '%Y%m%d').date()\n self.cleaned_data['acknowledgement_file'] = content",
"def _validate(self):\n if not self._contents.has_key('type'):\n raise ValidationFailed(\"Metadata file %s contains no type field\" % (self._filename))\n \n if not self._contents.has_key('version'):\n raise ValidationFailed(\"Metadata file %s contains no version field\" %\n (self._filename))",
"def validate(cls, data, errors):",
"def validate(self, data):\n request = self.context.get('request')\n data['poster'] = request.user\n\n return validate_complete_address(data)",
"def test_author_many_initials(self):\n inv_search = 'author:\"bach, p* d* q*\"'\n spi_search = 'find a p d q bach'\n self._compare_searches(inv_search, spi_search)",
"def validate(self, data):\n\n if data['sinceWhen'] > data['tilWhen']:\n raise serializers.ValidationError(\"sinceWhen must precede after tilWhen\")\n \n return data"
] |
[
"0.6855748",
"0.66630197",
"0.6189032",
"0.60399836",
"0.6025009",
"0.59523976",
"0.5834141",
"0.580403",
"0.5802128",
"0.57902837",
"0.5752016",
"0.5670109",
"0.5639969",
"0.56111646",
"0.55745125",
"0.5572385",
"0.55602807",
"0.55434805",
"0.5539788",
"0.5521505",
"0.5487859",
"0.5487859",
"0.5469855",
"0.54249346",
"0.54237264",
"0.5423448",
"0.54129183",
"0.53732187",
"0.53667396",
"0.5366426"
] |
0.7580519
|
0
|
Makes a cue ball with a starting velocity of (0,0,0) at the right end of the map
|
def make_cueball(starting_position = vec(15,0,0), starting_vel = vec(0,0,0)):
cueball = sphere(size = 1.0*vec(1,1,1), pos = starting_position)
cueball.vel = starting_vel
isMoving = False
return cueball
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def bomb_vector(self):\n\n\t\tif self.b_offset == 0:\n\t\t\top = sin\n\t\telse:\n\t\t\top = cos\n\n\t\tself.y -= self.speed\n\t\tself.rect.y = self.y\n\t\t# MMMMMMMMMMMMMMMMMMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAATHS\n\t\tself.x = int((self.g_settings.screen_height/2) + self.amplitude*op(self.frequency*((float(self.y)/self.g_settings.screen_width)*(2*pi) + (self.speed*time()))))\n\t\tif self.b_offset == 0:\n\t\t\tself.rect.x = self.x + self.position_x - 16\n\t\telif self.b_offset == 1:\n\t\t\tself.rect.x = self.x + self.position_x + 16\n\t\tself.screen.blit(self.image, self.rect)",
"def __init__(self, posn_x, posn_y, velocity_x, velocity_y, kula): \n self.posn_x = posn_x # x position of box containing the ball (bottom). \n self.posn_y = posn_y # x position of box containing the ball (left edge). \n self.velocity_x = velocity_x # amount of x-movement each cycle of the 'for' loop. \n self.velocity_y = 100.0 # amount of y-movement each cycle of the 'for' loop. \n self.color = kula # color of the ball \n\n self.ball_width = 20.0 # size of ball - width (x-dimension). \n self.ball_height = 20.0 # size of ball - height (y-dimension). \n self.coef_restitution = 0.90",
"def run(self):\n MAX_ANGULAR_VELOCITY = 3.14/2 * 0.5\n\n # After 1.5 meters, we don't care about how far the ball is. It doesn't make us\n # approach it any faster.\n DISTANCE_THRESHOLD = 1.5\n \n # Factor to multiply thresholded distance by to get a maximum value equal to one\n DISTANCE_CONSTANT = 2/3.\n \n # Ball pursing thresholds\n MAX_FORWARD_VELOCITY = .75\n MIN_FORWARD_VELOCITY = 0.50\n \n if self.getTime() > 2.0:\n self.postSignal(\"restart\")\n \n ball = memory.world_objects.getObjPtr(core.WO_BALL)\n if not ball.seen:\n return\n \n # Reset the timer to act as a failsafe against losing the ball\n self.reset()\n \n # Ball in the bottom frame?\n if not ball.fromTopCamera:\n self.finish()\n \n # Ball coordinates\n ball_x, ball_y = ball.imageCenterX, ball.imageCenterY\n \n # Calculate forward velocity\n ball_distance = ball.visionDistance / 1000\n# print('Ball distance: {}'.format(ball_distance))\n ball_distance = min(ball_distance, DISTANCE_THRESHOLD)\n \n # Cache the ball distances\n PursueBall.ball_distances = (PursueBall.ball_distances + [ball_distance])[-30:]\n# print('Ball distances: {}'.format(PursueBall.ball_distances))\n slope = sum(PursueBall.ball_distances[-10:])/10 - sum(PursueBall.ball_distances[:10])/10\n# print('Slope: {} - {} = {}'.format(sum(PursueBall.ball_distances[-10:]) / 10,\n# sum(PursueBall.ball_distances[:10]) / 10,\n# slope))\n# print('Input: {}'.format(1 / slope if slope else 1))\n \n \n # Get the maximum velocity to be 1\n forward_vel = ball_distance * DISTANCE_CONSTANT\n forward_vel *= MAX_FORWARD_VELOCITY\n forward_vel = max(MIN_FORWARD_VELOCITY, forward_vel)\n# print('forward velocity: {}'.format(forward_vel))\n \n # Calculate sideways velocity\n angular_vel = -(ball_x-160.0) / 160.0 * MAX_ANGULAR_VELOCITY\n# print('Sideways Amount: {}'.format(angular_vel))\n \n commands.setWalkVelocity(forward_vel, 0, angular_vel)",
"def step(self):\n\n self.ball_x = self.ball_x + self.vel_x\n self.ball_y = self.ball_y + self.vel_y\n if self.ball_y >= 480:\n self.vel_y *= -1\n elif self.ball_y <= 0:\n self.vel_y *= -1\n if self.ball_x >= 640:\n self.vel_x *= -1\n elif self.ball_x <= 0:\n self.vel_x *= -1",
"def jump(self):\r\n if self.grounded == True:\r\n self.vel.y = -13",
"def before_bounce(vx_0,vy_0,vz_0,wx,wy,wz):\r\n # We define all the constants that we need to use\r\n g = 32.2 # gravitational constant in ft/ sec^2 \r\n radius = 0.116 # of the cricket ball in ft\r\n\r\n # We use the following two constants to calculate the value of constant_1\r\n #weight = 0.344 lb\r\n #ro = 0.075 lb/ft**2\r\n constant = 0.00461 # ro*area/(2*m)\r\n\r\n # C_d and C_l are constants for calculating the values of k_D and k_L\r\n c_d = 0.4\r\n c_l = 0.116\r\n k_d = c_d * constant # (c_d*ro*area)/(2*m)\r\n k_l = c_l * constant # (c_l*ro*area)/(2*m)\r\n \r\n # the initial and final time\r\n t_0 = 0.0 #s\r\n t_f = 3.0\r\n\r\n # number of steps and value of h \r\n N = 1000\r\n h = (t_f-t_0)/N\r\n\r\n e = 0.32 # coefficient of restitution\r\n c = 0.1 # constant for moisture level in the ground ranging from 0 to 1\r\n eps = 10E-2 # error constant\r\n \r\n # the values of the initial position of the ball and its \r\n # x, y and z components\r\n x_0 = 1 #ft\r\n y_0 = 2 #ft\r\n z_0 = 7 #ft\r\n\r\n def f(r,t):\r\n \"\"\"\r\n Helper function for using the fourth-order Runge Kutta (RK-4) method on the \r\n second order differential equations which help plot the ball's trajectory in its\r\n x, y and z axes.\r\n \"\"\"\r\n x = r[0]\r\n y = r[2]\r\n z = r[4]\r\n vx = r[1]\r\n vy = r[3]\r\n vz = r[5]\r\n velocity = np.sqrt(vx**2+vy**2+vz**2)\r\n #if np.abs(z)>eps:\r\n velocity = np.sqrt((vx+c*radius*wy)**2+(vy-c*radius*wx)**2+(-e*vz)**2)\r\n \r\n # equations for a cricket ball in motion\r\n return np.array([vx, (-k_d*velocity*vx+k_l*(wy*vz-wz*vy)),\r\n vy, (-k_d*velocity*vy+k_l*(wz*vx-wx*vz)),\r\n vz,(-k_d*velocity*vz+k_l*(wz*vy-wy*vx)-g)], float)\r\n \r\n t_before = np.arange(t_0, t_f, h) #array of time \r\n x_before = [] \r\n y_before = []\r\n z_before = []\r\n r_before = np.array([x_0, vx_0, y_0, vy_0, z_0, vz_0], float)\r\n \r\n # Applies RK-4 for each value of the position and velocity components\r\n for t in t_before:\r\n if np.abs(r_before[4])>=eps and r_before[0] <= (60+eps): \r\n x_before.append(r_before[0])\r\n y_before.append(r_before[2])\r\n z_before.append(r_before[4])\r\n k1 = h * f(r_before, t)\r\n k2 = h * f(r_before + 0.5 * k1, t + 0.5 * h)\r\n k3 = h * f(r_before + 0.5 * k2, t + 0.5 * h)\r\n k4 = h * f(r_before + k3, t + h)\r\n r_before += (k1 + 2 * k2 + 2 * k3 + k4) / 6\r\n # sets the initial component values for after the bounce when z is 0\r\n x_f = r_before[0]\r\n y_f = r_before[2]\r\n z_f = r_before[4]\r\n vx_f = r_before[1]\r\n vy_f = r_before[3]\r\n vz_f = r_before[5]\r\n \r\n # Makes a 3-D plot of the x, y and z axes representing the ball before hitting\r\n # the ground\r\n plt.figure(1)\r\n plot1 = plt.axes(projection=\"3d\")\r\n plot1.plot3D(x_before,y_before,z_before,'blue')\r\n plot1.set_xlabel('x')\r\n plot1.set_ylabel('y')\r\n plot1.set_zlabel('z')\r\n plot1.set_title('Before Bounce')\r\n \r\n return x_f,y_f,z_f,vx_f,vy_f,vz_f,x_before,y_before,z_before",
"def diffEquation(self): \n self.posn_x += self.velocity_x * time_scaling \n self.velocity_y = self.velocity_y + GRAVITY # a crude equation incorporating gravity. \n self.posn_y += self.velocity_y * time_scaling \n canvas_1.create_oval( self.posn_x, self.posn_y, self.posn_x + self.ball_width, \n self.posn_y + self.ball_height, fill= self.color) \n self.detectWallCollision() # Has the ball collided with any container wall? ",
"def advance(self): \n self.center.x = self.center.x + self.velocity.dx\n self.center.y = self.center.y + self.velocity.dy",
"def __init__(self):\n self.center = Point()\n self.velocity = Velocity()",
"def bounce(self):\n \n if self.x > width - self.size:\n self.x = 2*(width - self.size) - self.x\n self.angle = self.angle * -1\n self.speed *= elasticity # Added to all to account for elasticity\n elif self.x < self.size:\n self.x = 2*self.size - self.x\n self.angle = self.angle * -1\n self.speed *= elasticity\n\n if self.y > height - self.size:\n self.y = 2*(height - self.size) - self.y\n self.angle = math.pi - self.angle\n self.speed *= elasticity\n elif self.y < self.size:\n self.y = 2*self.size - self.y\n self.angle = math.pi - self.angle\n self.speed *= elasticity",
"def bounce(self):\n self.y_dir *= -1 # Reverse vertical direction of travel",
"def __init__(self, x = 140, y = 140):\r\n super(Ball, self).__init__(image = Ball.image,\r\n x = 600, y = 240,\r\n dx = -3, dy = 1)",
"def _bounce(self):\n right = self.surface.get_width() - self.size\n left = self.size\n top = self.size\n bottom = self.surface.get_height() - self.size\n if self.pos.x > right: # right border\n self.pos.x = right\n self.direction = self.direction.elementwise() * pygame.Vector2(-1.0, 1.0)\n elif self.pos.x < left: # left border\n self.pos.x = left\n self.direction = self.direction.elementwise() * pygame.Vector2(-1.0, 1.0)\n if self.pos.y > bottom: # bottom border\n self.pos.y = bottom\n self.direction = self.direction.elementwise() * pygame.Vector2(1.0, -1.0)\n elif self.pos.y < top: # top border\n self.pos.y = top\n self.direction = self.direction.elementwise() * pygame.Vector2(1.0, -1.0)",
"def jump(self):\n self.vy = -9",
"def update_ball(self):\n\t\tself.ball_x += self.velocity_x\n\t\tself.ball_y += self.velocity_y\n\t\tif self.ball_y < 0:\n\t\t\tself.ball_y = -self.ball_y\n\t\t\tself.velocity_y = -self.velocity_y\n\t\tif self.ball_y > 1:\n\t\t\tself.ball_y = 2 - self.ball_y\n\t\t\tself.velocity_y = -self.velocity_y\n\t\tif self.ball_x < 0:\n\t\t\tself.ball_x = -self.ball_x\n\t\t\tself.velocity_x = -self.velocity_x\n\t\tif self.ball_x < 1:\n\t\t\treturn 0\n\t\tif self.ball_y > self.paddle_y + State.paddle_height or self.ball_y < self.paddle_y:\n\t\t\treturn -1\n\t\tself.ball_x = 2 - self.ball_x\n\t\tself.velocity_x = random.uniform(-0.015, 0.015) - self.velocity_x\n\t\tif abs(self.velocity_x) < 0.03:\n\t\t\tself.velocity_x = 0.03 if self.velocity_x > 0 else -0.03\n\t\tself.velocity_y = random.uniform(-0.03, 0.03) - self.velocity_y\n\t\tself.velocity_x = max(min(self.velocity_x, 1.0), -1.0)\n\t\tself.velocity_y = max(min(self.velocity_y, 1.0), -1.0)\n\t\treturn 1",
"def __init__(self):\r\n self.radius = BALL_RADIUS\r\n self.center_x = BALL_START_X\r\n self.center_y = BALL_START_Y\r\n self.velocity = BALL_SPEED\r\n self.angle = - math.pi / 2\r\n self.rectangle = pygame.Rect(self.center_x - self.radius, self.center_y - self.radius, 2 * self.radius, 2 * self.radius)\r\n self.color = \"white\"\r\n self.save_pos = (self.center_x, self.center_y)",
"def __init__(self):\n self.center = Point()\n #x coordinate is set in these amount of pixels to leave a slight gap between the screen and paddle just like in real pong video games\n self.center.x = SCREEN_WIDTH - 10\n #when game starts, paddle is placed on the middle of screen's right edge\n self.center.y = SCREEN_HEIGHT / 2",
"def move_car(self):\n a = self.h / 50\n self.x += self.speed_x / FPS\n if self.x + 170 * a >= 1100:\n self.dir = -1\n self.speed_x = -self.speed_x\n if self.x - 170 * a <= 50:\n self.dir = 1\n self.speed_x = -self.speed_x",
"def bouncing(self):\n x = random.randint(-250, 250) # where the ball will bounce on the X axis\n left_x = -850\n right_x = 850\n rand_y = random.randint(-350, 350) # random height where the ball goes\n floor = -350 # bouncing floor\n\n if self.xcor() > 300:\n self.goto(x, floor)\n self.goto(left_x, rand_y)\n elif self.xcor() < -300:\n self.goto(x, floor)\n self.goto(right_x, rand_y)",
"def _wall_bounce(self):\n\n if (self.pos[0] < self.rad):\n self.vel[0] = abs(self.vel[0])\n elif (self.pos[0] > self.disp_size[0]-self.rad):\n self.vel[0] = -abs(self.vel[0])\n if (self.pos[1] < self.rad):\n self.vel[1] = abs(self.vel[1])\n elif (self.pos[1] > self.disp_size[1]-self.rad):\n self.vel[1] = -abs(self.vel[1])",
"def update(self):\r\n # Desplaza el bloque un píxel hacia abajo. s\r\n if self.rect.left < 50 or self.rect.right > 600:\r\n self.speed[0] = -self.speed[0]\r\n if self.rect.top < 0 or self.rect.bottom > 200:\r\n self.speed[1] = -self.speed[1]\r\n self.rect.move_ip((self.speed[0], self.speed[1])) \r\n if self.rect.y > 500:\r\n self.rect.x = random.randrange(10,600) \r\n self.rect.y = random.randrange(-400,-200)\r\n self.rect.y += 5",
"def __init__(self):\n super().__init__()\n self.waypoint_vector = [-1, 10]",
"def Bas():\r\n X1, Y1, X2, Y2 = canvas.coords(boule)\r\n canvas.coords(boule,X1,Y1+20,X2,Y2+20)",
"def move(self) -> None:\n self.delta_time += 1 / 30 # FPS is 30 frames per second\n\n if self.is_active:\n self.y -= self.velocity * self.delta_time + 0.5 * self.gravity * (self.delta_time ** 2) # s = ut + 0.5at^2\n self.velocity = self.velocity + self.gravity * self.delta_time # v = u + at\n\n # Limit the velocity to the terminal velocity\n self.velocity = max(self.terminal_velocity, self.velocity)\n\n # Limit the y-pos to within the top of the screen and the base\n self.y = min(max(0, self.y), BACKGROUND_SPRITE.get_height() - Base.Height - Bird.Height)\n\n # Animation\n # -e^-x graph is found suitable for the slow descent\n # The value of the function converges to -90 as x peaks out at 4.5\n # The value of the function converges to 0 as x becomes negative\n self.angle = -np.exp(self.velocity / self.terminal_velocity * 4.5) + (self.velocity > 0) * self.up_angle\n else:\n self.y = self.init_y + np.sin(self.delta_time * np.pi) * self.glide_height",
"def jump(self):\n\t\tself.vel = -10\n\t\tself.tick_count = 0\n\t\tself.height = self.y",
"def after_bounce(x_f,y_f,z_f,vx_f,vy_f,vz_f,wx,wy,wz):\r\n # We define all the constants that we need to use\r\n g = 32.2 # gravitational constant in ft/ sec^2 \r\n radius = 0.116 # of the cricket ball in ft\r\n \r\n # We use the following two constants to calculate the value of constant_1\r\n #weight = 0.344 lb\r\n #ro = 0.075 lb/ft**2\r\n constant = 0.00461 # ro*area/(2*m)\r\n\r\n # C_d and C_l are constants for calculating the values of k_D and k_L\r\n c_d = 0.4\r\n c_l = 0.116\r\n k_d = c_d * constant # (c_d*ro*area)/(2*m)\r\n k_l = c_l * constant # (c_l*ro*area)/(2*m)\r\n \r\n # the initial and final time\r\n t_0 = 0.0 #s\r\n t_f = 3.0\r\n\r\n # number of steps and value of h \r\n N = 1000\r\n h = (t_f-t_0)/N\r\n\r\n e = 0.32 # coefficient of restitution\r\n c = 0.1 # constant for moisture level in the ground ranging from 0 to 1\r\n eps = 10E-2 # error constant\r\n \r\n def f_2(r,t):\r\n \"\"\"\r\n Helper function for using the fourth-order Runge Kutta (RK-4) method on the \r\n second order differential equations which help plot the ball's trajectory in its\r\n x, y and z axes after a bounce.\r\n \"\"\"\r\n x = r[0]\r\n y = r[2]\r\n z = r[4]\r\n vx = r[1]\r\n vy = r[3]\r\n vz = r[5]\r\n # velocity equation for the ball after the bounce\r\n velocity = np.sqrt((vx+c*radius*wy)**2+(vy-c*radius*wx)**2+(-e*vz)**2)\r\n \r\n return np.array([vx, (-k_d*velocity*vx+k_l*(wy*vz-wz*vy)),\r\n vy, (-k_d*velocity*vy+k_l*(wz*vx-wx*vz)),\r\n vz,(-k_d*velocity*vz+k_l*(wz*vy-wy*vx)-g)], float)\r\n \r\n # Applies RK-4 for each value of the position and velocity components\r\n t_after = np.arange(t_0, t_f, h)\r\n x_after = []\r\n y_after = []\r\n z_after = []\r\n r_after = np.array([x_f, vx_f, y_f, vy_f, z_f, vz_f], float)\r\n for t in t_after:\r\n # continues the function until it reaches the end of the pitch\r\n if r_after[0] <= (60+eps):\r\n x_after.append(abs(r_after[0]))\r\n y_after.append(abs(r_after[2]))\r\n z_after.append(abs(r_after[4]))\r\n k1 = h * f_2(r_after, t)\r\n k2 = h * f_2(r_after + 0.5 * k1, t + 0.5 * h)\r\n k3 = h * f_2(r_after + 0.5 * k2, t + 0.5 * h)\r\n k4 = h * f_2(r_after + k3, t + h)\r\n r_after += (k1 + 2 * k2 + 2 * k3 + k4) / 6\r\n \r\n # Makes a 3-D plot of the x, y and z axes representing the ball after hitting\r\n # the ground\r\n plt.figure(2)\r\n plot2 = plt.axes(projection=\"3d\")\r\n plot2.plot3D(x_after,y_after,z_after,'blue')\r\n plot2.set_xlabel('x')\r\n plot2.set_ylabel('y')\r\n plot2.set_zlabel('z')\r\n plot2.set_title('After Bounce')\r\n \r\n return x_after,y_after,z_after",
"def ball_increase_velocity():\n global ball_vel\n ball_vel[0] = ball_vel[0] * 1.10\n ball_vel[1] = ball_vel[1] * 1.10",
"def bounce(self, diff):\n \n self.direction = (180 - self.direction) % 360\n self.direction -= diff",
"def physics(self):\n\n self.v_y += self.a_y * self.dt # v =at\n dy = self.v_y * self.dt # x = vt\n self.rect.move_ip(0, -dy)",
"def velocity_map(self, output='test'):\n self.figure = figure(figsize=(10,3))\n self.axes = self.figure.gca() \n xWindowLim = (self.analyst.windowSize[0], self.analyst.windowSize[1])\n yWindowLim = (self.analyst.windowSize[2], self.analyst.windowSize[3])\n \n # Generate contours for velocity magnitude \n xGrid = linspace(\\\n xWindowLim[0]*self.millimetersPerPixel, \n xWindowLim[1]*self.millimetersPerPixel, self.nbins)\n yGrid = linspace(\\\n yWindowLim[0]*self.millimetersPerPixel, \n yWindowLim[1]*self.millimetersPerPixel, self.nbins)\n magVelGrid = griddata(self.xs, self.ys, self.magVel, xGrid, yGrid) \n # csf = self.axes.contourf(xGrid, yGrid, magVelGrid, range(2,26,2), cmap=myColorMap)\n csf = self.axes.contourf(xGrid, yGrid, magVelGrid, cmap=myColorMap)\n cbar = self.figure.colorbar(csf) \n cbar.set_label(\"Velocity magnitude, px/s\")\n \n # Generate arrow plot\n # q = self.axes.quiver(self.xs, self.ys, self.us, self.vs,\n # angles = 'xy', scale_units='xy', scale=2, pivot = 'mid')\n # self.axes.quiverkey(q, 0.9, 1.0, 10, \"10 px/frame\", coordinates='axes') \n \n # Save figure \n self.axes.set_aspect('equal')\n self.axes.set_xlim(*xWindowLim)\n self.axes.set_ylim(*yWindowLim)\n self.figure.savefig(output + '_velocity_map.pdf')"
] |
[
"0.6513015",
"0.63712394",
"0.6334853",
"0.6243784",
"0.62388325",
"0.62215716",
"0.6183578",
"0.6173977",
"0.6141188",
"0.60439825",
"0.60349804",
"0.598072",
"0.5956398",
"0.5952032",
"0.5942249",
"0.5923052",
"0.59218514",
"0.58929884",
"0.58900994",
"0.5872543",
"0.5848486",
"0.5823713",
"0.58125937",
"0.58100814",
"0.5796067",
"0.5791848",
"0.5770971",
"0.5767976",
"0.5755482",
"0.5743167"
] |
0.6539402
|
0
|
Makes a new moving blackHole with a starting position and default velocity with vec(0,0,0)
|
def make_blackHole(starting_position, starting_vel = vec(0,0,0)):
blackHole = helix(size = 1.0*vec(1, 1, 1), pos = starting_position, color = color.black) # ball is an object of class helix
blackHole.vel = starting_vel # This is the initial velocity
return blackHole
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def createBouncingPixel():\n height = width = 16\n empty = numpy.zeros((height, width), dtype=numpy.int32)\n x = 0\n y = 0\n x_diff = 1\n y_diff = 1\n frames = []\n for i in xrange(61): # Comes back to starting position in 60 steps\n copy = numpy.array(empty)\n if x + x_diff not in range(0, width):\n x_diff *= -1\n if y + y_diff not in range(0, height):\n y_diff *= -1\n copy[y, x] = 1\n print copy\n x += x_diff\n if i % 2:\n y += y_diff\n frames.append(copy.tolist())\n writeFrames(frames, 'actual', 'bouncing_pixel')",
"def move(self):\n \n self.rect.move_ip(0,self.speed) # Funcion para mover el enemigo especificando la velocidad xy\n \n if (self.rect.top > SCREEN_HEIGHT): # Condicion cuando llega a la parte inferior y no colisiono con el jugador\n del self.surf #Libera memoria\n del self.rect\n self.randomNumber = random.choice([70,64,32]) # Su tamaño se asigna nuevamente\n self.size = (self.randomNumber,self.randomNumber) #Se genera su tamaño como un cuadrado de lado aleatorio\n self.surf = pygame.Surface(self.size) #Se genera la superficie que aparecera la pantalla\n self.surf.fill(RED)\n self.rect = self.surf.get_rect(center = (random.randint(40,SCREEN_WIDTH-40),0))# me da info de las coordenadas de surf\n if(self.randomNumber == 32):\n self.surf = self.imagen\n elif(self.randomNumber ==64):\n self.surf = self.imagen2\n elif self.randomNumber ==70 :\n self.surf = self.imagen3",
"def make_cueball(starting_position = vec(15,0,0), starting_vel = vec(0,0,0)):\r\n cueball = sphere(size = 1.0*vec(1,1,1), pos = starting_position)\r\n cueball.vel = starting_vel\r\n isMoving = False\r\n return cueball",
"def __init__(self):\n # start x position\n self.x = random.randrange(size_x)\n # start y position\n self.y = - random.randrange(100)\n # drift x (amount of change each loop along the x axis)\n self.dx = random.randrange(3) - random.randrange(6)\n # drift y (amount of change each loop along the y axis)\n self.dy = random.randrange(1, 20) + random.randrange(4)\n # the size of the circular snowflake\n self.size = random.randrange(1, 4)\n # the colour of the snowflake (from sludgy grey to snowy white)\n c = random.randrange(200, 256)\n self.color = [c, c, c]",
"def __init__(self, \n nd = 2, \n goal = np.array([1.0,1.0]),\n state_bound = [[0,1],[0,1]],\n nA = 4,\n action_list = [[0,1],[0,-1],[1,0],[-1,0]],\n<<<<<<< HEAD:archive-code/puddleworld.py\n ngrid = [10.0,10.0],\n maxStep = 40):\n ngrid = [40, 40]\n x_vec = np.linspace(0,1,ngrid[0])\n y_vec = np.linspace(0,1,ngrid[1])\n for x in x_vec:\n for y in y_vec:\n if ~self.inPuddle([x,y]):\n puddle.append([x,y])\n # puddle is a closed loop \n outpuddlepts = np.asarray(puddle)\n \"\"\"\n\n\n # Horizontal wing of puddle consists of \n # 1) rectangle area xch1<= x <=xc2 && ych1-radius <= y <=ych2+radius\n # (xchi,ychi) is the center points (h ==> horizantal)\n # x, y = state[0], state[1]\n xch1, ych1 = 0.3, 0.7\n xch2, ych2 = 0.65, ych1\n radius = 0.1\n\n\n #Vertical wing of puddle consists of \n # 1) rectangle area xcv1-radius<= x <=xcv2+radius && ycv1 <= y <= ycv2\n # where (xcvi,ycvi) is the center points (v ==> vertical)\n xcv1 = 0.45; ycv1=0.4;\n xcv2 = xcv1; ycv2 = 0.8;\n\n # % 2) two half-circle at end edges of rectangle\n \n # POINTS ON HORIZANTAL LINES OF PUDDLE BOUNDARY\n for x in np.arange(xch1,xcv1-radius,self.meshsize[0]/2):\n puddle.append([x,ych1-radius])\n puddle.append([xcv1-radius,ych1-radius])\n \n for x in np.arange(xcv1+radius,xch2,self.meshsize[0]/2):\n puddle.append([x,ych1-radius])\n \n for x in np.arange(xch1,xcv1-radius,self.meshsize[0]/2):\n puddle.append([x,ych1+radius])\n \n puddle.append([xcv1-radius,ych1+radius])\n\n\n for x in np.arange(xcv1+radius,xch2,self.meshsize[0]/2):\n puddle.append([x,ych1+radius])\n\n # POINTS ON VERTICAL LINES OF PUDDLE BOUNDARY\n for y in np.arange(ycv1,ych1-radius,self.meshsize[1]/2):\n puddle.append([xcv1-radius,y])\n \n for y in np.arange(ycv1,ych1-radius,self.meshsize[1]/2):\n puddle.append([xcv1+radius,y])\n \"\"\"\n for y in np.arrange():\n puddle.append([])\n \n for y in np.arrange():\n puddle.append([])\n \"\"\"\n\n # HALF CIRCLES\n ngridTheta = 10\n thetaVec = np.linspace(0,pi,ngridTheta)\n\n for t in thetaVec:\n puddle.append([xch1+radius*np.cos(pi/2+t),ych1+radius*np.sin(pi/2+t)])\n\n for t in thetaVec:\n puddle.append([xch2+radius*np.cos(-pi/2+t),ych2+radius*np.sin(-pi/2+t)])\n\n for t in thetaVec:\n puddle.append([xcv1+radius*np.cos(pi+t),ycv1+radius*np.sin(pi+t)])\n\n for t in thetaVec:\n puddle.append([xcv2+radius*np.cos(t),ycv2+radius*np.sin(t)])\n\n \n outpuddlepts = np.asarray(puddle)\n return outpuddlepts",
"def bomb_vector(self):\n\n\t\tif self.b_offset == 0:\n\t\t\top = sin\n\t\telse:\n\t\t\top = cos\n\n\t\tself.y -= self.speed\n\t\tself.rect.y = self.y\n\t\t# MMMMMMMMMMMMMMMMMMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAATHS\n\t\tself.x = int((self.g_settings.screen_height/2) + self.amplitude*op(self.frequency*((float(self.y)/self.g_settings.screen_width)*(2*pi) + (self.speed*time()))))\n\t\tif self.b_offset == 0:\n\t\t\tself.rect.x = self.x + self.position_x - 16\n\t\telif self.b_offset == 1:\n\t\t\tself.rect.x = self.x + self.position_x + 16\n\t\tself.screen.blit(self.image, self.rect)",
"def home(self, max_dist=150, reset_pos=True): \n while not self.lim_cw:\n self.move_cm(True, max_dist, velocity=1)\n if reset_pos:\n self.step_position = 0\n self.homed = True",
"def update(self):\r\n # Desplaza el bloque un píxel hacia abajo. s\r\n if self.rect.left < 50 or self.rect.right > 600:\r\n self.speed[0] = -self.speed[0]\r\n if self.rect.top < 0 or self.rect.bottom > 200:\r\n self.speed[1] = -self.speed[1]\r\n self.rect.move_ip((self.speed[0], self.speed[1])) \r\n if self.rect.y > 500:\r\n self.rect.x = random.randrange(10,600) \r\n self.rect.y = random.randrange(-400,-200)\r\n self.rect.y += 5",
"def interaction_hole(self) -> None:\n x_dead_char = self.moving_character.x_obj\n y_dead_char = self.moving_character.y_obj\n void = ob.Void(x_dead_char, y_dead_char)\n # Replacing character by a Void\n self.grid.obj_list[self.moving_character] = void\n del self.grid.character_list[self.index_character]\n self.grid.character_just_died = True",
"def step(self):\n prey_neighbors = [x for x in self.model.space.get_neighbors(self.pos, self.vision+ 20, False) if isinstance(x,boid.Boid)]\n nearby_obstacles = [x for x in self.model.space.get_neighbors(self.pos, self.vision + 15, False) if isinstance(x, Obstacle)]\n self.velocity += (self.avoid_collision(nearby_obstacles) * self.collision_separation +\n self.attack(prey_neighbors)) / 2\n self.velocity /= np.linalg.norm(self.velocity)\n new_pos = self.pos + self.velocity * self.speed\n self.model.space.move_agent(self, new_pos)\n self.eat(prey_neighbors)\n\n\n # update for drawing\n self.update()",
"def __init__(self,x,y,width,height):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.velocity_x = 0.0",
"def __init__(self):\r\n self.radius = BALL_RADIUS\r\n self.center_x = BALL_START_X\r\n self.center_y = BALL_START_Y\r\n self.velocity = BALL_SPEED\r\n self.angle = - math.pi / 2\r\n self.rectangle = pygame.Rect(self.center_x - self.radius, self.center_y - self.radius, 2 * self.radius, 2 * self.radius)\r\n self.color = \"white\"\r\n self.save_pos = (self.center_x, self.center_y)",
"def move(self) -> None:\n self.delta_time += 1 / 30 # FPS is 30 frames per second\n\n if self.is_active:\n self.y -= self.velocity * self.delta_time + 0.5 * self.gravity * (self.delta_time ** 2) # s = ut + 0.5at^2\n self.velocity = self.velocity + self.gravity * self.delta_time # v = u + at\n\n # Limit the velocity to the terminal velocity\n self.velocity = max(self.terminal_velocity, self.velocity)\n\n # Limit the y-pos to within the top of the screen and the base\n self.y = min(max(0, self.y), BACKGROUND_SPRITE.get_height() - Base.Height - Bird.Height)\n\n # Animation\n # -e^-x graph is found suitable for the slow descent\n # The value of the function converges to -90 as x peaks out at 4.5\n # The value of the function converges to 0 as x becomes negative\n self.angle = -np.exp(self.velocity / self.terminal_velocity * 4.5) + (self.velocity > 0) * self.up_angle\n else:\n self.y = self.init_y + np.sin(self.delta_time * np.pi) * self.glide_height",
"def guider(x=0,y=0):\n if x==0 and y==0 and (gzero.gxoff<>0 or gzero.gyoff<>0):\n opticalcoupler.HomeXYStage()\n opticalcoupler.MoveXYStage( x=(x+gzero.gxoff), y=(y+gzero.gyoff) )\n camera.status.guider = (x,y)",
"def __init__(self, posn_x, posn_y, velocity_x, velocity_y, kula): \n self.posn_x = posn_x # x position of box containing the ball (bottom). \n self.posn_y = posn_y # x position of box containing the ball (left edge). \n self.velocity_x = velocity_x # amount of x-movement each cycle of the 'for' loop. \n self.velocity_y = 100.0 # amount of y-movement each cycle of the 'for' loop. \n self.color = kula # color of the ball \n\n self.ball_width = 20.0 # size of ball - width (x-dimension). \n self.ball_height = 20.0 # size of ball - height (y-dimension). \n self.coef_restitution = 0.90",
"def set_cell_to_hole(self):\n self.tick = \"H\"\n self.is_hole = True\n self.is_active = False",
"def create_wall():\n if config.W_LIST == []:\n pos = randint(config.M.x_pos+4, common.R2)\n if common.value_arr(pos, common.MIDS_R) == \" \" and \\\n common.value_arr(pos, common.MIDS_R+1) == \"0\":\n try:\n witem = obstacle.Wall(pos)\n config.W_LIST.append(witem)\n except config.GapHere:\n pass\n\n elif len(config.W_LIST) < int((3*common.COLS)/80):\n if randint(0, 10) == 5:\n # create a obstacle\n pos = config.W_LIST[-1].x_pos + randint(10, 20)\n if pos < common.COLS - 3:\n try:\n witem = obstacle.Wall(pos)\n config.W_LIST.append(witem)\n except config.GapHere:\n pass\n\n else:\n pass",
"def move_to_start(self):\n self.pos = (SCREEN_WIDTH / 2, SCREEN_HEIGHT - 64)",
"def __init__(self):\n super(SteklovBoundary, self).__init__()\n self.value = SteklovBoundary.value\n SteklovBoundary.value -= 1\n self.update(param=\"1\")",
"def __init__(self):\n self.size = 16\n self.color = COLOR\n self.pos = self.spawn()",
"def breaking_of_the_box(size = (10, 10), verbose = False):\n import numpy as np\n r, l, u, d = \"R\", \"L\", \"U\", \"D\" # initiating walkind directions\n np.random.seed(int(time.time()))\n \n # initiating field with walking directions\n field = np.random.randint(1, 5, size = (10, 10))\n field = np.where(field ==1, r, field)\n field = np.where(field =='2', l, field)\n field = np.where(field =='3', u, field)\n field = np.where(field =='4', d, field)\n\n i, j = 0, 0\n coordinates = []\n \n # iterating in a field\n while (i<field.shape[0] and i>-1) and (j<field.shape[1] and j>-1):\n prev_i,prev_j = i, j\n coordinates.append((i, j)) \n \n copy_field = field.copy()\n \n if field[i][j] == r:\n j+=1\n elif field[i][j] == l:\n j-=1\n elif field[i][j] == u:\n i-=1\n elif field[i][j] == d:\n i+=1\n copy_field[i][j] = \"X\"\n if verbose == True:\n print(copy_field, \"#\"*48, sep = \"\\n\") #printing step by step position of a player\n if (i, j) in coordinates:\n # in case of infitine loop break\n print(\"Player is stucked inside of a box\")\n break\n\n else:\n print(\"Player came out of the box\")\n print(\"Coordinates of a breaking point\", \"(\", prev_i, prev_j, \")\")",
"def __init__(self, surface: pygame.Surface, pos: pygame.Vector2, direction: pygame.Vector2, size: int, color: tuple):\n self.surface = surface\n self.pos = pos # initial position\n self.direction = direction # move direction\n self.size = size\n self.color = color\n # garivty is 9.81 m/s\n self.gravity = pygame.Vector2(0, 9.81 / FPS) # force to the ground aka bottom side\n self.drag = pygame.Vector2(1.0, 0.999) # decrease in speed per FPS",
"def __init__(self, hit_y, room, wall, wall_direction):\n # Call the parent's constructor\n pygame.sprite.Sprite.__init__(self)\n\n self.wall = wall\n self.room = room\n self.direction = wall_direction\n\n if self.wall.rect.height <= 50:\n height = self.wall.rect.height\n else:\n height = 50\n \n # Make a blue wall, of the size specified in the parameters\n if self.direction == 'right':\n self.image = pygame.image.load('png/ledge_attach_right.png').convert_alpha()\n else:\n self.image = pygame.image.load('png/ledge_attach_left.png').convert_alpha()\n self.image = pygame.transform.scale(self.image, (50, height))\n \n # Make our top-left corner the passed-in location.\n self.rect = self.image.get_rect()\n self.rect.centery = hit_y\n if wall_direction == 'right':\n self.rect.x = self.wall.rect.left - 50\n else:\n self.rect.x = self.wall.rect.right\n self.spread_per_update = 1\n self.spread_up = self.rect.top\n self.spread_down = self.rect.bottom\n self.climb_okay = True\n # Keep track of the most recent fungi grown on the wall, looking at the top of the ones growing\n # upward and the bottom of the ones growing down\n self.grow_above = self.rect.top\n self.grow_below = self.rect.bottom\n\n self.timer = 660",
"def __make_slide(self):\n # Create base rectangle for slide\n length = self.parameters['slide_base_length'] + self.parameters['bearing_slide_travel']\n width = self.parameters['slide_width']\n height = self.parameters['slide_height']\n slide = fso.Box(x=length,y=width,z=height)\n # Create the mounting holes\n radius = 0.5*self.parameters['slide_screw_size']\n base_hole = fso.Cylinder(r=radius, l=2*height)\n hole_list = []\n for i in (-1,1):\n for j in (-1,1):\n xpos = i*(0.5*length - self.parameters['slide_screw_inset'])\n ypos = j*(0.5*self.parameters['slide_screw_dW'])\n hole = base_hole.copy()\n hole.translate([xpos,ypos,0])\n hole_list.append(hole)\n # Remove hole material\n slide -= hole_list\n slide.set_color(self.slide_color,recursive=True)\n self.slide = slide",
"def __init__(self, surface_size):\n random_x = random.uniform(Molecule.radius, surface_size[0] - Molecule.radius)\n random_y = random.uniform(Molecule.radius, surface_size[1] - Molecule.radius)\n self.position = pygame.Vector2(random_x, random_y)\n max_speed = 3\n random_sx = random.uniform(-max_speed, max_speed)\n random_sy = random.uniform(-max_speed, max_speed)\n self.speed = pygame.Vector2(random_sx, random_sy)",
"def move(self):\n if self._z >= 75:\n a = random.random()\n print(str(a))\n if a < 0.2:\n self._z += 1\n if a > 0.2 and a < 0.9:\n self._z -= 1\n if a > 0.9:\n self._z = self._z\n else: \n self._z -= 1\n \n b = random.random()\n print(str(b))\n if b < 0.1:\n self._y += 1\n if b > 0.1 and b < 0.2:\n self._y -= 1\n if b > 0.2 and b < 0.25:\n self._x -= 1\n if b > 0.25:\n self._x += 1",
"def __init__(self):\n super().__init__()\n self.waypoint_vector = [-1, 10]",
"def __init__(self, pos):\r\n self.pos = pos\r\n self.vel = [0, 0]\r\n self.acc = [0, 0]\r\n self.heading = math.pi\r\n self.screen = [0, 0]",
"def make_move(self, move):\n self.board[int(move) - 1] = self.nplayer",
"def move(self):\n if random.random() < 0.5:\n self.y = (self.y + 1) % 100\n else:\n self.y = (self.y - 1) % 100\n if random.random() < 0.5:\n self.x = (self.x + 1) % 100\n else:\n self.x = (self.x - 1) % 100"
] |
[
"0.57895917",
"0.56245697",
"0.56215173",
"0.55915743",
"0.5580983",
"0.5520519",
"0.5514952",
"0.54837316",
"0.5476015",
"0.5460184",
"0.5457634",
"0.54353166",
"0.542367",
"0.54190814",
"0.54180855",
"0.54020005",
"0.5399414",
"0.5391632",
"0.5374892",
"0.5359748",
"0.5354885",
"0.5352538",
"0.53423244",
"0.534091",
"0.5340178",
"0.53396934",
"0.5339226",
"0.53031117",
"0.5291628",
"0.52909446"
] |
0.74914
|
0
|
Implements Python's choice using the random() function.
|
def choice(L):
LEN = len(L) # Get the length
randomindex = int(LEN*random()) # Get a random index
return L[randomindex] # Return that element
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def choice(seq):\r\n i = int(random() * len(seq))\r\n return seq[i]",
"def my_random_choice(choices):\n def getKey(x):\n return x[0] + 0.001 * x[1]\n\n return min(choices, key=getKey)\n\n # for actual random selection, we may replace the above with this:\n #return random.choice(choices)",
"def test_choice(self):\r\n # numpy.random.choice is only available for numpy versions >= 1.7\r\n major, minor, _ = numpy.version.short_version.split('.')\r\n if (int(major), int(minor)) < (1, 7):\r\n raise utt.SkipTest('choice requires at NumPy version >= 1.7 '\r\n '(%s)' % numpy.__version__)\r\n \r\n # Check over two calls to see if the random state is correctly updated.\r\n rng_R = random_state_type()\r\n # Use non-default parameters, and larger dimensions because of\r\n # the integer nature of the result\r\n post_r, out = choice(rng_R, (11, 8), 10, 1, 0)\r\n\r\n f = compile.function(\r\n [compile.In(rng_R,\r\n value=numpy.random.RandomState(utt.fetch_seed()),\r\n update=post_r, mutable=True)],\r\n [out], accept_inplace=True)\r\n\r\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\r\n val0 = f()\r\n val1 = f()\r\n numpy_val0 = numpy_rng.choice(10, (11, 8), True, None)\r\n numpy_val1 = numpy_rng.choice(10, (11, 8), True, None)\r\n print val0\r\n print numpy_val0\r\n print val1\r\n print numpy_val1\r\n self.assertTrue(numpy.allclose(val0, numpy_val0))\r\n self.assertTrue(numpy.allclose(val1, numpy_val1))",
"def computer_choice():\n random_number= random.randrange(50) # choose a random number between 0 and 49\n return random_number",
"def choice(seq) -> Union[Any, None]:\n if not seq:\n return None\n return random.choice(seq)",
"def pick(self, mess, args):\n return random.choice(args)",
"def computer_generate(self):\n return choice[random.randrange(3)]",
"def touching_choice(self,p):\n choose = random.sample(part,2)\n\n return choose",
"async def choose(ctx, *choices: str):\n await ctx.send(random.choice(choices))",
"def test_choice(self):\r\n # numpy.random.choice is only available for numpy versions >= 1.7\r\n major, minor, _ = numpy.version.short_version.split('.')\r\n if (int(major), int(minor)) < (1, 7):\r\n raise utt.SkipTest('choice requires at NumPy version >= 1.7 '\r\n '(%s)' % numpy.__version__)\r\n \r\n # Check over two calls to see if the random state is correctly updated.\r\n random = RandomStreams(utt.fetch_seed())\r\n fn = function([], random.choice((11, 8), 10, 1, 0))\r\n fn_val0 = fn()\r\n fn_val1 = fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n numpy_val0 = rng.choice(10, (11, 8), True, None)\r\n numpy_val1 = rng.choice(10, (11, 8), True, None)\r\n\r\n assert numpy.all(fn_val0 == numpy_val0)\r\n assert numpy.all(fn_val1 == numpy_val1)",
"def rs():\n return random.choice([-1,1])",
"def rs():\n return random.choice([-1,1])",
"def decision():\n return random.choice(['GoToNormal','GoToSleep'])",
"def random_choice(var_name):\r\n return random.choice(var_name)",
"async def choose(self, ctx, *args):\n choicelist = []\n for choice in args:\n choicelist.append(choice)\n result = random.choice(choicelist)\n await ctx.send(\"Like it or not, I choose {}!\".format(result))",
"def choice(random_state, size=None, a=2, replace=True, p=None, ndim=None,\r\n dtype='int64'):\r\n # numpy.random.choice is only available for numpy versions >= 1.7\r\n major, minor, _ = numpy.version.short_version.split('.')\r\n if (int(major), int(minor)) < (1, 7):\r\n raise ImportError('choice requires at NumPy version >= 1.7 '\r\n '(%s)' % numpy.__version__)\r\n a = tensor.as_tensor_variable(a)\r\n if isinstance(replace, bool):\r\n replace = tensor.constant(replace, dtype='int8')\r\n else:\r\n replace = tensor.as_tensor_variable(replace)\r\n # encode p=None as an empty vector\r\n p = tensor.as_tensor_variable(p or [])\r\n ndim, size, bcast = _infer_ndim_bcast(ndim, size)\r\n op = RandomFunction(choice_helper, tensor.TensorType(dtype=dtype,\r\n broadcastable=bcast))\r\n return op(random_state, size, a, replace, p)",
"def _random_pick(lst):\n\n choice = random.randint(0, len(lst) - 1)\n return lst[choice]",
"def get_computers_choice():\n choices = ['Rock', 'Paper', 'Scissors']\n choice_index = randint(0, 2)\n choice = choices[choice_index]\n return choice",
"async def choose(*choices : str):\n await bot.say(random.choice(choices))",
"def getRandom(self) -> int:\n return choice(self.list)",
"def __call__(self):\n return random.choice(self.fakers)",
"def __test_choice(L, S, A, R, Y):\n return choice(Y)",
"async def randomChoice(self, ctx: commands.Context, *choices: str):\n if not choices:\n await ctx.reply(f\"Command failed - no arguments given.\\nEnter a sequence of arguments to choose from (you can use quotes for grouping).\", mention_author=False)\n elif len(choices)==1:\n await ctx.reply(f\"After some extremely randomized choosing from the one singular option that was given to choose from, the surprising result is:\\n{choices[0]}\", mention_author=False)\n else:\n await ctx.reply(f\"Randomly chosen result:\\n{random.choice(choices)}\", mention_author=False)",
"def getRandom(self) -> int:\n return choice(self.arr)",
"def getRandom(self) -> int:\n return choice(self.arr)",
"def getRandom(self) -> int:\n return choice(self.arr)",
"async def choose(self, ctx, *choices : str):\n await ctx.send(random.choice(choices))",
"def getRandom(self) -> int:\n return choice(self.array)",
"def spinit(list):\n return (random.choice(list))",
"def Chose_rand():\r\n total_list=list(range(1,467681))\r\n select=13788\r\n random_selected= random.sample(total_list,select)\r\n return (random_selected)"
] |
[
"0.80179393",
"0.73609155",
"0.73578393",
"0.7326621",
"0.72457415",
"0.7187231",
"0.71675736",
"0.7123166",
"0.7090522",
"0.70814466",
"0.7080028",
"0.7080028",
"0.7074489",
"0.706705",
"0.70160604",
"0.70116484",
"0.70015615",
"0.698032",
"0.69781715",
"0.69609964",
"0.695883",
"0.69525194",
"0.69408745",
"0.69380975",
"0.69380975",
"0.69380975",
"0.6902397",
"0.6893875",
"0.6864746",
"0.6837853"
] |
0.7396507
|
1
|
Implements Python's randint using the random() function. returns an int from low to hi _inclusive_ (so, it's not 100% Pythonic)
|
def randint(low, hi):
if hi < low:
low, hi = hi, low # Swap if out of order!
LEN = int(hi) - int(low) + 1. # Get the span and add 1
randvalue = LEN*random() + int(low) # Get a random value
return int(randvalue) # Return the integer part of it
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def mt_rand (low = 0, high = sys.maxint):\n return random.randint (low, high)",
"def random_randint(lower_bound, upper_bound):\r\n return random_randrange(lower_bound, upper_bound+1)",
"def random_int(low: int, high: int, seed=None):\n random.seed(seed)\n return random.randint(low, high)",
"def rint(lo, hi):\n return round(0.5 + rand(lo, hi))",
"def _rand_int(self, low, high):\n\n return self.np_random.randint(low, high)",
"def random_int(max=1000):\r\n return randint(0, max)",
"def get_random_integer():\n return random.randint(-MAX_GENERATED_NUMBER_RANGE, MAX_GENERATED_NUMBER_RANGE)",
"def randInt(max):\n return int(max * random.random())",
"def random_int(start: int = 0, end: int = 100) -> int:\n return random.randint(start, end)",
"def rand(lo=0, hi=1):\n global Seed\n Seed = (16807 * Seed) % 2147483647\n return lo + (hi - lo) * Seed / 2147483647",
"def random_num(range_start,range_end):\r\n return random.randint(range_start,range_end)",
"def random_int(self, min_int = 1, max_int = 10240):\n return random.randint(min_int, max_int)",
"def rand(self, lo, hi):\n lo, hi = lo or 0, hi or 1\n\n self.seed = (16807 * self.seed) % 2147483647\n return lo + (hi - lo) * self.seed / 2147483647",
"def rand(self, lo, hi):\n lo, hi = lo or 0, hi or 1\n\n self.seed = (16807 * self.seed) % 2147483647\n return lo + (hi - lo) * self.seed / 2147483647",
"def rand_int(min=0, max=100):\n\n num = random.random() * (max - min) + min\n return round(num)",
"def myrandint(begin, end):\n if begin == end:\n return begin\n else:\n return randint(begin, end)",
"def _randint(*args, **kwargs):\n return random.randint(*args, **kwargs)",
"def choose_in(low, high):\n return random.randint(low, high)",
"def generate_random(limit_lo, limit_hi):\n\n return RAND.randint(limit_lo, limit_hi)",
"def mt_rand(min = 0, max = sys.maxint):\n return random.randint(min, max)",
"def get_random_int_op(minimum: int, maximum: int) -> int:\n import random\n result = random.randint(minimum, maximum)\n print(result)\n return result",
"def randint(a: int, b: int) -> int:\n ...",
"def get_number(maxValue):\r\n return random.randint(1, maxValue)",
"def generate() -> int:\n return randint(0, 1000000000)",
"def seed_random(max_integer):\n return random.randrange(0,max_integer);",
"def random_int(self, top):\n return random.randint(0, top) % self.prime",
"def random_range():\n rnd = int(random.randrange(1,8))\n print \"Random number generated: %s\" %(rnd)\n return rnd",
"def sample_randint(a, b):\n return a + sample(b - a + 1)",
"def getRandom(self) -> int:",
"def getRandom(self) -> int:"
] |
[
"0.8181219",
"0.7950442",
"0.7925715",
"0.7910221",
"0.7900935",
"0.7869978",
"0.7862936",
"0.78217626",
"0.7807758",
"0.7801138",
"0.7768885",
"0.77118933",
"0.76688373",
"0.76688373",
"0.7663735",
"0.76537967",
"0.7641981",
"0.75275236",
"0.7524197",
"0.747597",
"0.7453642",
"0.7411059",
"0.73788244",
"0.7348018",
"0.73470765",
"0.72545004",
"0.72194517",
"0.72145337",
"0.719279",
"0.719279"
] |
0.8425401
|
0
|
Returns a vec of (r, g, b) random from 0.0 to 1.0.
|
def randcolor():
r = random(0.0, 1.0)
g = random(0.0, 1.0)
b = random(0.0, 1.0)
return vec(r, g, b) # A color is a three-element vec
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def random_color_gen():\n r = randint(0, 255)\n g = randint(0, 255)\n b = randint(0, 255)\n return [r, g, b]",
"def get_random_color():\n def get_random_value():\n \"\"\" Return a random value between 0.0 and 1.0 \"\"\"\n return randint(0, 255) / 256.0\n return Vector(get_random_value(), get_random_value(), get_random_value())",
"def get_random_color():\n\n def get_random_value():\n \"\"\" Return a random value between 0.0 and 1.0 \"\"\"\n return randint(0, 255) / 256.0\n\n return Vector(get_random_value(), get_random_value(), get_random_value())",
"def _random_color() -> List[float]:\n return [np.random.uniform(), np.random.uniform(), np.random.uniform()]",
"def _genRandomColor():\n b = random.randint(0, 255)\n g = random.randint(0, 255)\n r = random.randint(0, 255)\n return (b, g, r)",
"def random_color():\n\n rgbl=[255,0,0]\n random.shuffle(rgbl)\n return tuple(rgbl)",
"def random():\r\n return R.NextDouble()",
"def get_random_color():\n r=random.randint(0,255)\n g=random.randint(0,255)\n b=random.randint(0,255)\n return(r,g,b)",
"def randColor():\r\n return np.array([random.random(), random.random(), random.random()]).reshape((1, 1, 3))",
"def getRandColor():\n\treturn (randrange(0,256), randrange(0,256), randrange(0,256))",
"def random_color() -> Tuple[int, int, int]:\n return randrange(0, 255), randrange(0, 255), randrange(0, 255)",
"def random_color():\n colormode(255)\n return randint(0, 255), randint(0, 255), randint(0, 255)",
"def get_random_vec():\n return [random.gauss(GENERATING_MEAN, GENERATING_DEVIATION) for _ in range(VECTOR_SIZE)]",
"def get_random_rgb(seed):\n random.seed(seed)\n r = random.randint(0, 255)\n g = random.randint(0, 255)\n b = random.randint(0, 255)\n return [r, g, b]",
"def randcolor():\n return (randint(0,255), randint(0,255), randint(0,255))",
"def random_vec(self, rand):\n return array([rand.uniform(*c) for c in self.constraints])",
"def random_colour(rng: random.Random) -> TupleInt3:\n r = rng.randint(0, 255)\n g = rng.randint(0, 255)\n b = rng.randint(0, 255)\n return r, g, b",
"def generate_rgb(S, R_possible_values, R_probabilities):\n R, = rd.choices(population=R_possible_values, weights=R_probabilities)\n S -= R\n G = rd.randint(max(S - 255, 0), min(S, 255))\n S -= G\n B = S\n return (R, G, B)",
"def getRandomColor():\n r = randint(0, 255)\n g = randint(0, 255)\n b = randint(0, 255)\n return \"rgb(\" + str(r) + \", \" + str(g) + \", \" + str(b) +\")\"",
"def _random_color(self):\n levels = range(0, 256)\n return tuple(random.choice(levels) for _ in range(3))",
"def random_color() -> Tuple[int, ...]:\n red = random.randrange(0, 255)\n blue = random.randrange(0, 255)\n green = random.randrange(0, 255)\n return (red, blue, green)",
"def randomRGBValue(self):\n return random.randrange(0, 256)",
"def rngnext():\n out = []\n # random\n state = random.getstate()\n out.append(f\"r={random.random():0.4f}\")\n random.setstate(state)\n\n # numpy\n state = np.random.get_state()\n out.append(f\"n={np.random.random():0.4f}\")\n np.random.set_state(state)\n\n # torch\n state = torch.random.get_rng_state()\n out.append(f\"t={torch.rand(1)[0]:0.4f}\")\n torch.random.set_rng_state(state)\n\n # cuda\n if torch.cuda.is_available():\n state = torch.cuda.get_rng_state()\n # note there is no function for generating a random in cuda but this may work?\n out.append(f\"c={state.float().std()%1:0.4f} {torch.backends.cudnn.deterministic}\")\n\n return out",
"def _random_vector(self):\n index = random.randrange(len(self.data))\n return (index, self.data[index])",
"def rand_branch_color():\n red = random.randint(0, 100)\n green = random.randint(175, 255)\n blue = random.randint(0, 100)\n return (red, green, blue)",
"def get_random_value():\n return randint(0, 255) / 256.0",
"def generate_colour():\n red = random.randrange(0, 256)\n green = random.randrange(0, 256)\n blue = random.randrange(0, 256)\n alpha = random.randrange(0, 256)\n return (red, green, blue, alpha)",
"def getRandom(self):\n return random.choice(self.vec)",
"def uniform_random_value(l_boundary: float, r_boundary: float) -> float:\n return uniform(l_boundary, r_boundary)",
"def initial_vector(self):\n\n return asarray([np.random.uniform(l, u) for l, u in self.bounds])"
] |
[
"0.78245014",
"0.7490951",
"0.74682033",
"0.7444328",
"0.7442808",
"0.73355055",
"0.7088772",
"0.7078341",
"0.7064664",
"0.7050868",
"0.7014214",
"0.6991338",
"0.6988401",
"0.69807917",
"0.68873507",
"0.686652",
"0.6838748",
"0.67905116",
"0.6724731",
"0.6675243",
"0.6661542",
"0.666063",
"0.6632879",
"0.65502036",
"0.65261185",
"0.65065295",
"0.6478332",
"0.64680904",
"0.6444599",
"0.6430794"
] |
0.7952764
|
0
|
Corral collisions! Ball must have a .vel field and a .pos field.
|
def corral_collide(ball):
# If the ball hits wallA
if ball.pos.z < wallA.pos.z: # Hit -- check for z
ball.pos.z = wallA.pos.z # Bring back into bounds
ball.vel.z *= -1.0 # Reverse the z velocity
# If the ball hits wallB
if ball.pos.x < wallB.pos.x: # Hit -- check for x
ball.pos.x = wallB.pos.x # Bring back into bounds
ball.vel.x *= -1.0 # Reverse the x velocity
# If the ball hits wallC
if ball.pos.z > wallC.pos.z: # Hit -- check for x
ball.pos.z = wallC.pos.z # Bring back into bounds
ball.vel.z *= -1.0 # Reverse the x velocity
# If the ball hits wallD
if ball.pos.x > wallD.pos.x: #Hit -- check for z
ball.pos.x = wallD.pos.x # Bring back into bounds
ball.vel.x *= -1.0 #Reverse the z velocity
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def checkBallCollision(self, ball):\n\t\tres, cornx, corny = ball.collWithRect(self.bbox)\n\t\t# print res,cornx, corny\n\t\tif res==0:\n\t\t\treturn False\n\n\t\tleft = self.x-self.length/2\n\t\tif ball.x >= left and ball.x <= self.x+self.length/2:\n\t\t\t#bounce normally\n\t\t\tmagnitude = math.sqrt(ball.vx**2+ball.vy**2)\n\t\t\tfor i in range(1, 7):\n\t\t\t\tif ball.x < left+self.length/7*i:\n\t\t\t\t\tangle = math.radians(120.-10.*(i-1))\n\t\t\t\t\tball.vx = magnitude*math.cos(angle)\n\t\t\t\t\tball.vy = -magnitude*math.sin(angle)\n\t\t\t\t\treturn True\n\t\t\t#maximum right\n\t\t\tangle = math.radians(60.)\n\t\t\tball.vx = magnitude*math.cos(angle)\n\t\t\tball.vy = -magnitude*math.sin(angle)\n\t\t\treturn True\n\t\t\treturn True\n\n\t\t#bounce with the caps\n\t\tif ball.collWithCircle(self.x-self.length/2, self.y, self.height/2):\n\t\t\tball.bouncePoint(self.x-self.length/2, self.y)\n\t\t\treturn True\n\t\telif ball.collWithCircle(self.x+self.length/2, self.y, self.height/2):\n\t\t\tball.bouncePoint(self.x+self.length/2, self.y)\n\t\t\treturn True\n\t\treturn False",
"def detect_collision():\n # with the top and bottom of screen\n if ball.ycor() > GAME_Y_BARRIER or ball.ycor() < -GAME_Y_BARRIER:\n ball.bounce_y()\n # with the paddles\n if ball.distance(paddle_right) < 50 and ball.xcor() > GAME_X_BARRIER \\\n or ball.distance(paddle_left) < 50 and ball.xcor() < -GAME_X_BARRIER:\n ball.bounce_x()",
"def bounce_collision(self, otherball):\r\n # relative positions\r\n dx = self.unif[0] - otherball.unif[0]\r\n dy = self.unif[1] - otherball.unif[1]\r\n rd = self.radius + otherball.radius\r\n # check sign of a.b to see if converging\r\n dotP = dot([dx, dy, 0.0],\r\n [self.vx - otherball.vx, self.vy - otherball.vy, 0.0])\r\n if dx * dx + dy * dy <= rd * rd and dotP < 0:\r\n R = otherball.mass / self.mass #ratio of masses\r\n \"\"\"Glancing angle for equating angular momentum before and after collision.\r\n Three more simultaneous equations for x and y components of momentum and\r\n kinetic energy give:\r\n \"\"\"\r\n if dy:\r\n D = dx / dy\r\n delta2y = 2 * (D * self.vx + self.vy -\r\n D * otherball.vx - otherball.vy) / (\r\n (1 + D * D) * (R + 1))\r\n delta2x = D * delta2y\r\n delta1y = -1 * R * delta2y\r\n delta1x = -1 * R * D * delta2y\r\n elif dx:\r\n # Same code as above with x and y reversed.\r\n D = dy / dx\r\n delta2x = 2 * (D * self.vy + self.vx -\r\n D * otherball.vy - otherball.vx) / (\r\n (1 + D * D) * (R + 1))\r\n delta2y = D * delta2x\r\n delta1x = -1 * R * delta2x\r\n delta1y = -1 * R * D * delta2x\r\n else:\r\n delta1x = delta1y = delta2x = delta2y = 0\r\n\r\n self.vx += delta1x\r\n self.vy += delta1y\r\n otherball.vx += delta2x\r\n otherball.vy += delta2y",
"def moveBall(self):\n \n #move ball one step\n vx = self._ball.get_vx()\n vy = self._ball.get_vy()\n self._ball.x = self._ball.x + vx\n self._ball.y = self._ball.y + vy\n \n #COLLISIONS\n if vy > 0:\n balltop = self._ball.y + BALL_DIAMETER\n if balltop >= GAME_HEIGHT:\n self._ball.set_vy(-vy)\n if (self._getCollidingObject() != None and\n self._getCollidingObject() != self._paddle):\n self._ball.set_vy(-vy)\n self._wall.removebrick(self._getCollidingObject())\n if vy < 0:\n ballbottom = self._ball.y\n if ballbottom <= 0:\n self._lostlife = True\n if self._getCollidingObject() == self._paddle:\n self._ball.set_vy(-vy)\n if (self._getCollidingObject() != None and\n self._getCollidingObject() != self._paddle):\n self._ball.set_vy(-vy)\n self._wall.removebrick(self._getCollidingObject())\n if vx > 0:\n ballright = self._ball.x + BALL_DIAMETER\n if ballright >= GAME_WIDTH:\n self._ball.set_vx(-vx)\n if vx < 0:\n ballleft = self._ball.x\n if ballleft <= 0:\n self._ball.set_vx(-vx)",
"def _wall_bounce(self):\n\n if (self.pos[0] < self.rad):\n self.vel[0] = abs(self.vel[0])\n elif (self.pos[0] > self.disp_size[0]-self.rad):\n self.vel[0] = -abs(self.vel[0])\n if (self.pos[1] < self.rad):\n self.vel[1] = abs(self.vel[1])\n elif (self.pos[1] > self.disp_size[1]-self.rad):\n self.vel[1] = -abs(self.vel[1])",
"def check_collision(self):\n if self.window.get_object_at(self.ball.x,self.ball.y+self.radius*2) is self.paddle:\n self.bounce()\n if self.window.get_object_at(self.ball.x+self.radius*2,self.ball.y+self.radius*2) is self.paddle:\n self.bounce()",
"def __init__(self, posn_x, posn_y, velocity_x, velocity_y, kula): \n self.posn_x = posn_x # x position of box containing the ball (bottom). \n self.posn_y = posn_y # x position of box containing the ball (left edge). \n self.velocity_x = velocity_x # amount of x-movement each cycle of the 'for' loop. \n self.velocity_y = 100.0 # amount of y-movement each cycle of the 'for' loop. \n self.color = kula # color of the ball \n\n self.ball_width = 20.0 # size of ball - width (x-dimension). \n self.ball_height = 20.0 # size of ball - height (y-dimension). \n self.coef_restitution = 0.90",
"def collision_3():\r\n tu.reset()\r\n print(\"collision_3\")\r\n r = 100\r\n b1 = Ball2D(r=r, x=0, y=tbl.y_min, vy=tbl.v_max, color=\"blue\")\r\n b2 = Ball2D(r=r, x=0, y=tbl.y_max, vy=-tbl.v_max, color=\"red\")\r\n bc = BallCollision2D(balls=[b1, b2])\r\n max_r_sq = tbl.x_max**2 + tbl.y_max**2\r\n while (b1.x**2 + b1.y**2 < max_r_sq\r\n or b2.x**2 + b2.y**2 < max_r_sq):\r\n bc.ball_display()\r\n bc.ball_collision_update()\r\n time.sleep(t_update)\r\n bc.ball_update()\r\n if clear_at_end:\r\n bc.reset()",
"def updateBall(self):\n if self._ball.getLeft() <= 0 or self._ball.getRight() >= GAME_WIDTH:\n self._ball.bounceSides()\n if self._ball.getTop() >= GAME_HEIGHT:\n self._ball.bounceTopBottom()\n self._ball.moveBall()\n \n self._updateCollisions()",
"def ball_bounce(wall):\n if wall: # top & bottom walls\n ball_vel[1] = -ball_vel[1]\n else: # left or right walls\n ball_vel[0] = -ball_vel[0]",
"def collision_4():\r\n tu.reset()\r\n print(\"collision_4\")\r\n r = 100\r\n b1 = Ball2D(r=r, x=r, y=tbl.y_min, vy=tbl.v_max, color=\"blue\")\r\n b2 = Ball2D(r=r, x=0, y=tbl.y_max, vy=-tbl.v_max, color=\"red\")\r\n bc = BallCollision2D(balls=[b1, b2])\r\n max_r_sq = tbl.x_max**2 + tbl.y_max**2\r\n while (b1.x**2 + b1.y**2 < max_r_sq\r\n or b1.x**2 + b1.y**2 < max_r_sq):\r\n bc.ball_display()\r\n bc.ball_collision_update()\r\n time.sleep(t_update)\r\n bc.ball_update()\r\n if clear_at_end:\r\n bc.reset()",
"def updateBall(self):\n \n self._ball.moveBall()\n self._ball.collideBallPaddle(self._paddle)\n for a in self._bricks:\n self._ball.collideBallBrick(a)",
"def collision_1():\r\n tu.reset()\r\n print(\"collision_1\")\r\n r = 100\r\n b1 = Ball2D(r=r, x=0, y=tbl.y_min, vy=tbl.v_max, color=\"blue\")\r\n b2 = Ball2D(r=r, x=0, y=0, color=\"red\")\r\n bc = BallCollision2D(balls=[b1,b2])\r\n while (b2.x**2 + b2.y**2 < max_r_sq):\r\n bc.ball_display()\r\n bc.ball_collision_update()\r\n time.sleep(t_update)\r\n bc.ball_update()\r\n if clear_at_end:\r\n bc.reset()",
"def collision_5():\r\n tu.reset()\r\n print(\"collision_5\")\r\n r = 100\r\n maxby = tbl.y_max - 3*r\r\n b1 = Ball2D(r=r, x=0, y=tbl.y_min, vy=tbl.v_max, color=\"blue\")\r\n b2 = Ball2D(r=r, x=-r, y=maxby, vy=0, color=\"red\")\r\n b3 = Ball2D(r=r, x=+r, y=maxby, vy=0, color=\"orange\")\r\n b4 = Ball2D(r=r, x=0, y=maxby-r*sqrt(3), vy=0, color=\"green\")\r\n bc = BallCollision2D(balls=[b1, b2, b3, b4])\r\n max_r_sq = tbl.x_max**2 + tbl.y_max**2\r\n while (b2.x**2 + b2.y**2 < max_r_sq\r\n or b3.x**2 + b3.y**2 < max_r_sq):\r\n bc.ball_display()\r\n bc.ball_collision_update()\r\n #time.sleep(int(.01))\r\n time.sleep(t_update)\r\n bc.ball_update()\r\n if clear_at_end:\r\n bc.reset()",
"def collision( ball1, ball2 ):\n\t\t\n\t#equation from wikipedia\n\ta1 = 2 * float(ball2.mass / (ball1.mass + ball2.mass))\t\t\t\t\t\t\t\t# 2 * m2 / ( m1 + m2 ) \n\ta2 = 2 - a1\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# 2 * m1 / ( m1 + m2 ) = 2 - m2 / ( m1 + m2 ) \n\tb = (ball1.velocity - ball2.velocity) * (ball1.position - ball2.position)\t\t\t# < v1 - v2, x1 - x2 > = < v2 - v1, x2 - x1 >\n\tc = (ball1.position - ball2.position).norm() \t\t\t\t\t\t\t\t\t\t# || x1 - x2 || ^ 2\t= || x2 - x1 || ^ 2\t\n\tif c == 0:\n\t\tc = 0.01\t\t\t\t\t\t\n\td = b / c\n\n\t#enter new velocites\n\tball1.velocity = ball1.velocity - (ball1.position - ball2.position) * a1 * d\n\tball2.velocity = ball2.velocity - (ball2.position - ball1.position) * a2 * d\n\n\t#changing color \n\tball1.color = ball2.color = ( \t(ball1.color[0] + ball2.color[0]) * 0.5, \n\t\t\t\t\t\t\t\t\t(ball1.color[1] + ball2.color[1]) * 0.5, \n\t\t\t\t\t\t\t\t\t(ball1.color[2] + ball2.color[2]) * 0.5\t\t)",
"def checkball(self):\r\n for ball in self.overlapping_sprites:\r\n ball.bottom=self.top\r\n if math.fabs(ball.x-self.x)<math.fabs(ball.x-self.left) and math.fabs(ball.x-self.x)<math.fabs(ball.x-self.right):\r\n ball.vertbounce()\r\n if math.fabs(ball.x-self.left)<math.fabs(ball.x-self.x) and math.fabs(ball.x-self.left)<math.fabs(ball.x-self.right):\r\n ball.leftbounce()\r\n if math.fabs(ball.x-self.right)<math.fabs(ball.x-self.left) and math.fabs(ball.x-self.right)<math.fabs(ball.x-self.x):\r\n ball.rightbounce()\r\n self.points.value+=10\r\n if self.points.value==500:\r\n ball.ballchange()\r\n elif self.points.value==2000:\r\n ball.ballchange()\r\n elif self.points.value==4500:\r\n ball.ballchange()\r\n elif self.points.value==10000:\r\n ball.ballchange()",
"def __handle_wall_collision(self):\n if self.__ball.x <= 0 or self.__ball.x + self.__ball.width >= self.__window.width:\n self.__dx = - self.__dx\n\n next_target_top = self.__window.get_object_at(self.__ball.x + self.__dx*1.5, self.__ball.y + self.__dy*1.5)\n next_target_bot = self.__window.get_object_at(self.__ball.x + self.__ball.width + self.__dx*1.5,\n self.__ball.y + self.__ball.height + self.__dy*1.5)\n\n if self.__hit_paddle(next_target_top) or self.__hit_paddle(next_target_bot):\n self.__dy = - abs(self.__dy)\n if self.__ball.x <= self.__paddle.x + 20:\n # The ball will fly left if hit the left of the paddle\n self.__dx = - abs(self.__dx)\n elif self.__ball.x > self.__paddle.x + self.__paddle.width - 20:\n # The ball will fly right if hit the right of the paddle\n self.__dx = abs(self.__dx)\n elif self.__hit_bricks(next_target_top) or self.__hit_bricks(next_target_bot):\n target_brick = next_target_top if next_target_top else next_target_bot\n self.__remove_brick(target_brick)\n self.__dy = - self.__dy\n elif self.__ball.y <= 0:\n self.__dy = - self.__dy\n elif self.__ball.y + self.__ball.height >= self.__window.height:\n self.__num_lives -= 1\n self.__playing = False\n self.__set_ball_position()\n self.__set_paddle_position()\n self.__set_ball_velocity()\n self.__set_record_board()",
"def collision_2():\r\n tu.reset()\r\n print(\"collision_2\")\r\n r = 100\r\n b1 = Ball2D(r=r, x=r, y=tbl.y_min, vy=tbl.v_max, color=\"blue\")\r\n b2 = Ball2D(r=r, x=0, y=0, color=\"red\")\r\n bc = BallCollision2D(balls=[b1, b2])\r\n while (b1.x**2 + b1.y**2 < max_r_sq\r\n or b2.x**2 + b2.y**2 < max_r_sq):\r\n bc.ball_display()\r\n bc.ball_collision_update()\r\n time.sleep(t_update)\r\n bc.ball_update()\r\n if clear_at_end:\r\n bc.reset()",
"def collide(b1,b2):\n if mag(b1.pos-b2.pos) < (b1.radius + b2.radius - .05):\n return True",
"def collide(self, pos):\n\t\tpass",
"def collision_6():\r\n tu.reset()\r\n print(\"collision_6\")\r\n r = 100\r\n sep = r*.0\r\n maxby = tbl.y_min + 7*r\r\n b1 = Ball2D(r=r, x=0, y=tbl.y_min, vy=tbl.v_max, color=\"blue\")\r\n b2 = Ball2D(r=r, x=0, y=maxby, vy=0, color=\"red\")\r\n b3 = Ball2D(r=r, x=0, y=maxby-2*(r+sep), vy=0, color=\"orange\")\r\n b4 = Ball2D(r=r, x=0, y=maxby-4*(r+sep), vy=0, color=\"green\")\r\n bc = BallCollision2D(balls=[b1, b2, b3, b4])\r\n while (b1.x**2 + b1.y**2 < max_r_sq\r\n and b2.x**2 + b2.y**2 < max_r_sq\r\n and b3.x**2 + b3.y**2 < max_r_sq\r\n and b4.x**2 + b4.y**2 < max_r_sq):\r\n bc.ball_display()\r\n bc.ball_collision_update()\r\n time.sleep(t_update)\r\n bc.ball_update()\r\n if clear_at_end:\r\n bc.reset()",
"def bounce(self):\n \n if self.x > width - self.size:\n self.x = 2*(width - self.size) - self.x\n self.angle = self.angle * -1\n self.speed *= elasticity # Added to all to account for elasticity\n elif self.x < self.size:\n self.x = 2*self.size - self.x\n self.angle = self.angle * -1\n self.speed *= elasticity\n\n if self.y > height - self.size:\n self.y = 2*(height - self.size) - self.y\n self.angle = math.pi - self.angle\n self.speed *= elasticity\n elif self.y < self.size:\n self.y = 2*self.size - self.y\n self.angle = math.pi - self.angle\n self.speed *= elasticity",
"def check_bounce(self):\n if self.ball.center.x < 0 and self.ball.velocity.dx < 0:\n self.ball.bounce_horizontal()\n\n if self.ball.center.y < 0 and self.ball.velocity.dy < 0:\n self.ball.bounce_vertical()\n\n if self.ball.center.y > SCREEN_HEIGHT and self.ball.velocity.dy > 0:\n self.ball.bounce_vertical()",
"def run(self):\n MAX_ANGULAR_VELOCITY = 3.14/2 * 0.5\n\n # After 1.5 meters, we don't care about how far the ball is. It doesn't make us\n # approach it any faster.\n DISTANCE_THRESHOLD = 1.5\n \n # Factor to multiply thresholded distance by to get a maximum value equal to one\n DISTANCE_CONSTANT = 2/3.\n \n # Ball pursing thresholds\n MAX_FORWARD_VELOCITY = .75\n MIN_FORWARD_VELOCITY = 0.50\n \n if self.getTime() > 2.0:\n self.postSignal(\"restart\")\n \n ball = memory.world_objects.getObjPtr(core.WO_BALL)\n if not ball.seen:\n return\n \n # Reset the timer to act as a failsafe against losing the ball\n self.reset()\n \n # Ball in the bottom frame?\n if not ball.fromTopCamera:\n self.finish()\n \n # Ball coordinates\n ball_x, ball_y = ball.imageCenterX, ball.imageCenterY\n \n # Calculate forward velocity\n ball_distance = ball.visionDistance / 1000\n# print('Ball distance: {}'.format(ball_distance))\n ball_distance = min(ball_distance, DISTANCE_THRESHOLD)\n \n # Cache the ball distances\n PursueBall.ball_distances = (PursueBall.ball_distances + [ball_distance])[-30:]\n# print('Ball distances: {}'.format(PursueBall.ball_distances))\n slope = sum(PursueBall.ball_distances[-10:])/10 - sum(PursueBall.ball_distances[:10])/10\n# print('Slope: {} - {} = {}'.format(sum(PursueBall.ball_distances[-10:]) / 10,\n# sum(PursueBall.ball_distances[:10]) / 10,\n# slope))\n# print('Input: {}'.format(1 / slope if slope else 1))\n \n \n # Get the maximum velocity to be 1\n forward_vel = ball_distance * DISTANCE_CONSTANT\n forward_vel *= MAX_FORWARD_VELOCITY\n forward_vel = max(MIN_FORWARD_VELOCITY, forward_vel)\n# print('forward velocity: {}'.format(forward_vel))\n \n # Calculate sideways velocity\n angular_vel = -(ball_x-160.0) / 160.0 * MAX_ANGULAR_VELOCITY\n# print('Sideways Amount: {}'.format(angular_vel))\n \n commands.setWalkVelocity(forward_vel, 0, angular_vel)",
"def update_ball(self):\n\t\tself.ball_x += self.velocity_x\n\t\tself.ball_y += self.velocity_y\n\t\tif self.ball_y < 0:\n\t\t\tself.ball_y = -self.ball_y\n\t\t\tself.velocity_y = -self.velocity_y\n\t\tif self.ball_y > 1:\n\t\t\tself.ball_y = 2 - self.ball_y\n\t\t\tself.velocity_y = -self.velocity_y\n\t\tif self.ball_x < 0:\n\t\t\tself.ball_x = -self.ball_x\n\t\t\tself.velocity_x = -self.velocity_x\n\t\tif self.ball_x < 1:\n\t\t\treturn 0\n\t\tif self.ball_y > self.paddle_y + State.paddle_height or self.ball_y < self.paddle_y:\n\t\t\treturn -1\n\t\tself.ball_x = 2 - self.ball_x\n\t\tself.velocity_x = random.uniform(-0.015, 0.015) - self.velocity_x\n\t\tif abs(self.velocity_x) < 0.03:\n\t\t\tself.velocity_x = 0.03 if self.velocity_x > 0 else -0.03\n\t\tself.velocity_y = random.uniform(-0.03, 0.03) - self.velocity_y\n\t\tself.velocity_x = max(min(self.velocity_x, 1.0), -1.0)\n\t\tself.velocity_y = max(min(self.velocity_y, 1.0), -1.0)\n\t\treturn 1",
"def compute_coll(self, ball, step):\n m1 = self.mass\n m2 = ball.mass\n r1 = self.radius\n r2 = ball.radius\n v1 = self.velocity\n v2 = ball.velocity\n x1 = self.position\n x2 = ball.position\n di = x2-x1\n norm = np.linalg.norm(di)\n\n if norm-r1-r2 < step*abs(np.dot(v1-v2,di))/norm:\n self.vafter = v1 - 2.*m2/(m1+m2) * np.dot(v1-v2,di)/(np.linalg.norm(di)**2.) * di",
"def ball_collision_update(self):\r\n ball_pairs = self.balls_colliding()\r\n for ball_pair in ball_pairs:\r\n b1,b2 = ball_pair\r\n self.ball_pair_collision_update(b1,b2)",
"def __init__(self, max_vel=5., drag_coeff=0.05, ball_radius=0.2, bounce_restitution=0.9):\n self.max_vel = max_vel\n\n self.drag_coeff = drag_coeff\n self.ball_radius = ball_radius\n self.bounce_restitution = bounce_restitution\n\n super().__init__(\n state_dim=3,\n control_dim=1,\n control_limits=[np.array([-self.max_vel]), np.array([self.max_vel])])",
"def bouncing(self):\n x = random.randint(-250, 250) # where the ball will bounce on the X axis\n left_x = -850\n right_x = 850\n rand_y = random.randint(-350, 350) # random height where the ball goes\n floor = -350 # bouncing floor\n\n if self.xcor() > 300:\n self.goto(x, floor)\n self.goto(left_x, rand_y)\n elif self.xcor() < -300:\n self.goto(x, floor)\n self.goto(right_x, rand_y)",
"def wall_collision(self, table_length, table_breadth):\n\t\t\n\t\tR = self.radius\n\t\t#For 90degree shots\n\t\tif( self.x_vec == 0 and self.y_vec>0 ):\n\t\t\treturn self.x_pos,table_breadth-R\n\t\t\n\t\t#For 270degree shots\n\t\telif( self.x_vec == 0 and self.y_vec<0 ):\n\t\t\treturn self.x_pos,R\n\t\t\n\t\t#For 0degree shots\n\t\telif( self.x_vec > 0 and self.y_vec == 0 ):\n\t\t\treturn table_length-R,self.y_pos\n\t\t\n\t\t#For 180degree shots\n\t\telif( self.x_vec < 0 and self.y_vec == 0 ):\n\t\t\treturn R,self.y_pos\n\t\t#Other angles\n\t\telse:\n\t\t\tslope = (self.y_vec)/(self.x_vec)\n\t\t\t# y- y_ball = slop* (x - x_ball)\n\t\t\t#Equations where x,y correspond to a particular wall boundary where collision takes place based on https://bit.ly/2Ab1NdR \n\t\t\t#Ball moves towards top right\n\t\t\tif( self.x_vec>=0 and self.y_vec>=0 ):\n\t\t\t\t#Check if it collides with the top or right side\n\t\t\t\t\n\t\t\t\t#Collides with right side at co-ordinates (table_length-R,y)\n\t\t\t\ty = self.y_pos + (slope)*(table_length-R - self.x_pos)\n\t\t\t\t#Collides with top side at co-ordinates (x,table_breadth-R)\n\t\t\t\tx = self.x_pos + (1/slope)*(table_breadth - R - self.y_pos)\n\t\t\t\t\n\t\t\t\tif ( y<= table_breadth-R):\n\t\t\t\t\treturn table_length-R,y\n\t\t\t\t\n\t\t\t\telif ( x<= table_length-R):\n\t\t\t\t\treturn x,table_breadth-R\n\n\t\t\telif( self.x_vec>=0 and self.y_vec<=0 ):\t\t\t\t\n\t\t\t\t#Check if it collides with the bottom or right side\n\t\t\t\t\n\t\t\t\t#Collides with right side at co-ordinates (table_length-R,y)\n\t\t\t\ty = self.y_pos + (slope)*(table_length-R - self.x_pos)\n\t\t\t\t#Collides with bottom side at co-ordinates (x,R)\n\t\t\t\tx = self.x_pos + (1/slope)*(R - self.y_pos)\n\t\t\t\t\n\t\t\t\tif ( y<= table_breadth):\n\t\t\t\t\treturn table_length-R,y\n\t\t\t\t\n\t\t\t\telif ( x>= R):\n\t\t\t\t\treturn x,R\t\t\t\t\n\n\t\t\telif( self.x_vec<=0 and self.y_vec<=0 ):\n\t\t\t\t\n\t\t\t\t#Check if it collides with the bottom or left side\n\t\t\t\t\n\t\t\t\t#Collides with left side at co-ordinates (R,y)\n\t\t\t\ty = self.y_pos + (slope)*(R - self.x_pos)\n\t\t\t\t#Collides with bottom side at co-ordinates (x,R)\n\t\t\t\tx = self.x_pos + (1/slope)*(R - self.y_pos)\n\t\t\t\t\n\t\t\t\tif ( y>= R):\n\t\t\t\t\treturn R,y\n\t\t\t\t\n\t\t\t\telif ( x>= R):\n\t\t\t\t\treturn x,R\t\t\t\t\n\n\t\t\telse:\n\t\t\t\t#Check if it collides with the top or left side\n\t\t\t\t\n\t\t\t\t#Collides with left side at co-ordinates (R,y)\n\t\t\t\ty = self.y_pos + (slope)*(R - self.x_pos)\n\t\t\t\t#Collides with top side at co-ordinates (x,table_breadth - R)\n\t\t\t\tx = self.x_pos + (1/slope)*(table_breadth - R - self.y_pos)\n\t\t\t\t\n\t\t\t\tif ( y>= R):\n\t\t\t\t\treturn R,y\n\t\t\t\t\n\t\t\t\telif (x<= table_length-R ):\n\t\t\t\t\treturn x,table_breadth - R"
] |
[
"0.6752811",
"0.673313",
"0.67180336",
"0.66739434",
"0.6579822",
"0.6557391",
"0.64670503",
"0.64647895",
"0.6444945",
"0.64384305",
"0.6435668",
"0.6405922",
"0.639538",
"0.6363032",
"0.63621885",
"0.63595736",
"0.6239505",
"0.6206427",
"0.6186919",
"0.6175984",
"0.6161149",
"0.61547625",
"0.6142594",
"0.61018384",
"0.60883814",
"0.60628265",
"0.6050268",
"0.6033083",
"0.60313463",
"0.600752"
] |
0.7226013
|
0
|
Paints a square on the grid at a particular (int, int) position. Color is given as an RGB triple (of floats between 0 and 1); cr is the Cairo context. Used only in the expose methods of Board and NextPieceDisplay
|
def paint_square(self, pos, color, cr):
cr.set_source_rgb(*color)
i, j = pos
cr.rectangle(i*DOT_SIZE+1, j*DOT_SIZE-1, DOT_SIZE-2, DOT_SIZE-2)
cr.fill()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def draw_square(self, surface, color, position):\n rect = pygame.Rect(position, (50, 50))\n pygame.draw.rect(surface, color, rect)",
"def draw_square(display, coord, box_size, color, bg_color):\n left, top = coord\n half = int(box_size * 0.5)\n quarter = int(box_size * 0.25)\n pygame.draw.rect(\n display, color, (left + quarter, top + quarter, half, half))\n return",
"def draw_multicolor_square(t,sz):\r\n for i in [\"red\", \"purple\", \"hotpink\", \"blue\"]:\r\n t.color(i)\r\n t.forward(sz)\r\n t.left(90)",
"def render_grid(self, surface, color, pos, size):\n\t\tax, ay = pos\n\t\tsx, sy = size\n\t\tbx = ax + sx\n\t\tby = ay + sy\n\n\t\ttsx = sx / self.w\n\t\ttsy = sy / self.h\n\n\t\t# Draw vertical lines.\n\t\tfor x in range(ax, bx, tsx):\n\t\t\tpygame.draw.aaline(\n\t\t\t\t\tsurface, color, \n\t\t\t\t\t(x, ay), (x, by), 1)\n\t\t# Draw horizontal lines.\n\t\tfor y in range(ay, by, tsy):\n\t\t\tpygame.draw.aaline(\n\t\t\t\t\tsurface, color, \n\t\t\t\t\t(ax, y), (bx, y), 1)\n\t\t# Draw a rect around it.\n\t\tpygame.draw.rect(surface, color, (ax, ay, sx, sy), 1)",
"def squareColour(square):\n row, col = position(square)\n return positionColour(row, col)",
"def draw_board(screen):\n colors = [p.Color(\"white\"), p.Color(\"dark gray\")]\n\n for row in range(DIMENSION):\n for col in range(DIMENSION):\n # For all light squares: row + col => even\n # dark squares: row + col => odd\n color = colors[(row + col) % 2]\n p.draw.rect(screen, color, p.Rect(col * SQ_SIZE, row * SQ_SIZE, SQ_SIZE, SQ_SIZE))",
"def _draw_square(self, left_x, top_y, side, color, fill):\n self.pen.up()\n self.pen.color(color)\n self.pen.goto(left_x, top_y)\n self.pen.down()\n self.pen.begin_fill()\n for _ in range(4):\n self.pen.forward(side)\n self.pen.right(90)\n self.pen.end_fill()",
"def drawPiece(pos,color):\n\tif color == 0:\n\t\tcolor_piece = BGCOLOR\n\telif color == 1:\n\t\tcolor_piece = BLACK\n\telif color == 2:\n\t\tcolor_piece = WHITE\n\telif color == 3:\n\t\tcolor_piece = LIGHTGREEN\n\t#draws a circle of the right color on the board\n\tpygame.draw.ellipse(DISPLAYSURF, color_piece, [MARGINH + (pos[0]-1)*CASESIZE+4, MARGINV + (pos[1]-1)*CASESIZE+4, CASESIZE-8, CASESIZE-8])",
"def draw(self, window, color):\n rect = (self.row*self.size, self.col*self.size, self.size, self.size)\n pygame.draw.rect(window, color, rect)",
"def positionColour(row, col):\n if (row + col) % 2 == 0:\n return BLACK\n else:\n return WHITE",
"def draw_square(self, x, y, color):\n return self.canvas.create_rectangle(x * self.scale, y * self.scale, \\\n (x + 1) * self.scale, (y + 1) * self.scale, fill = color)",
"def drawRect (self, x, y, w, h, colour):\r\n for i in range (y,y+h):\r\n row = self.image [i]\r\n\r\n for j in range (x,x+w):\r\n row [j] = colour",
"def draw(self, cr, width, height):\n cr.set_source_rgb(0, 0, 0)\n cr.rectangle(0, 0, width, height)\n cr.fill()",
"def drawBoard(self):\r\n \r\n for i in range(8):\r\n for j in range(8):\r\n if (i %2 == 0 and j % 2 == 0) or (i % 2 !=0 and j % 2 != 0):\r\n COLOR = COLOR1\r\n else: COLOR = COLOR2\r\n pygame.draw.rect(screen, COLOR, Rect(i*50, j*50, 50, 50))\r\n\r\n self.drawLabels()\r\n \r\n if not self.piecesDrawn:\r\n self.drawPieces()\r\n self.piecesDrawn = True",
"def square(square_x, square_y, square_width, square_height, square_color):\n arcade.draw_rectangle_filled(square_x, square_y, square_width, square_height, square_color)",
"def __draw(self, display, color, size):\n\t\tif self.walls[0]: # up\n\t\t\tpygame.draw.line(display, color, (self.col * size , self.row * size) , (self.col * size + size, self.row * size))\n\t\tif self.walls[3]: # down\n\t\t\tpygame.draw.line(display, color, (self.col * size + size, self.row * size + size), (self.col * size , self.row * size + size))\n\t\tif self.walls[1]: #left\n\t\t\tpygame.draw.line(display, color, (self.col * size + size, self.row * size) , (self.col * size + size, self.row * size + size))\n\t\tif self.walls[2]: #right\n\t\t\tpygame.draw.line(display, color, (self.col * size , self.row * size + size), (self.col * size , self.row * size))\n\n\t\tif self.current:\n\t\t\tdraw_rect_with_alpha(display, self.CURRENT_COLOR, Vector((self.col, self.row)) * size, (size, size))\n\n\t\telif self.backtracked and self.SHOW_BACKTRACK:\n\t\t\tdraw_rect_with_alpha(display, self.BACKTRACKED_COLOR, Vector((self.col, self.row)) * size, (size, size))\n\n\t\telif self.visited:\n\t\t\tdraw_rect_with_alpha(display, self.VISITED_COLOR, Vector((self.col, self.row)) * size, (size, size))",
"def paint_cell(self, col, row, color):\r\n if isinstance(color, Number):\r\n self.A[row, col] = color\r\n else:\r\n self.A[row, col] = self.cdict[color]\r\n self.plot()",
"def green_cell(self, x, y):\n r = self.rect_area(x, y) # gets rect area for cell\n pygame.draw.rect(self.screen, (0, 255, 0), r, 3)\n pygame.display.update(r) # updates screen to showcase green rect",
"def draw_block(position, color):\n x = position.col*DX+DX+2\n y = position.row*DY+DY+2\n width = DX-4\n height = DY-4\n pygame.draw.rect(screen, color, (x,y,width,height), 0)",
"def overwrite_board_square(self, row, col):\n x = self.board_lft_x + col * self.next_square\n y = self.board_top_y - row * self.next_square\n color = self.square_light if (row + col) % 2 == 0 else self.square_dark\n self._draw_square(x, y, self.square_side_size, color, True)",
"def draw_cell(self, board, x, y, color):\n r = self.rect_area(x, y) # gets rect area for given cell\n pygame.draw.rect(self.screen, color, r, 3)\n e = self.font.render(str(board[y][x]), 1, (0, 0, 0)) # creates number\n self.screen.blit(e, (self.x_pos + x * 80, self.y_pos + y * 80)) # draws number\n pygame.display.update(r) # updates screen to showcase rect",
"def square(self, char, left, top, length, filled=False, thickness=1):\n pointsIterable = pybresenham.rectangle(left, top, length, length, filled, thickness)\n self.points(char, pointsIterable)",
"def drawSquare(t, sz):\n\n t.shape(\"turtle\")\n while 1:\n\t if sz > 200:\n\t \tbreak\n\t for j in range (36):\n\t \tt.left(10)\n\t \tsz = sz + 1 \n\n\t \tif j%2 == 1:\n\t \t\tt.color(\"red\")\n\t \telse:\n\t \t\tt.color(\"blue\")\n\t \tfor i in range(4):\n\t \t\tt.forward(sz)\n\t \t\tt.left(90)\n\t sz = sz + 1",
"def solid(t, coord, ii, n_pixels, random_values):\n\n\n return (100,100,100)",
"def draw_grid(self):\n\n screen.fill(GREY)\n\n for row in self.grid:\n for cell in row:\n if cell.root:\n color = GREEN\n elif cell.goal:\n color = RED\n elif cell.value:\n color = DARK_BLUE\n elif cell.visited:\n color = LIGHT_BLUE\n elif cell.f:\n color = LIGHT_GREEN\n elif cell.wall:\n color = GRAY\n else:\n color = WHITE\n\n pygame.draw.rect(screen, color, cell.rect)\n\n x, y = cell.rect.x, cell.rect.y\n\n if cell.g:\n self.draw_score(x + 2, y + 2, cell.g)\n if cell.h:\n self.draw_score(x + 18, y + 2, cell.h)\n if cell.f:\n self.draw_score(x + 2, y + self.cell_size - 10, cell.f)",
"def actually_draw_square(canvas_mode, canvas_size_in_pixels, canvas_background_color_rgb, rectangle_position, rectangle_fill, rectangle_outline):\n im = Image.new(canvas_mode, canvas_size_in_pixels, canvas_background_color_rgb)\n dr = ImageDraw.Draw(im)\n\n dr.rectangle(rectangle_position, fill=rectangle_fill, outline=rectangle_outline)\n\n im.save(\"square.png\")",
"def draw_grid(self) -> None:\n grid = self.life.curr_generation\n for row in range(self.cell_height):\n for column in range(self.cell_width):\n if grid[row][column] == 1:\n color = \"green\"\n else:\n color = \"white\"\n pygame.draw.rect(\n self.screen,\n pygame.Color(color),\n (column * self.cell_size, row * self.cell_size, self.cell_size, self.cell_size),\n )",
"def coordinates_of_square(crd):\n col = ord(crd[0]) - ord('a')\n row = int(crd[1]) - 1\n return (col * SQUARE_EDGE + BOARD_MARGIN, (7 - row) * SQUARE_EDGE + BOARD_MARGIN)",
"def draw_rect(self, i, j, col, d=0):\n pygame.draw.rect(self.screen, col, self.get_rect(i, j), d)",
"def set(self, row: int, col: int, color: Color) -> None:\n super(ColorGrid, self).set(row, col, color)"
] |
[
"0.7065544",
"0.67776066",
"0.637372",
"0.6360873",
"0.6332401",
"0.63036317",
"0.6284072",
"0.6204069",
"0.61639875",
"0.60808086",
"0.6073086",
"0.6061461",
"0.60506696",
"0.6016593",
"0.5891857",
"0.5889505",
"0.5847315",
"0.5842801",
"0.58213603",
"0.5807137",
"0.58049685",
"0.579154",
"0.57892376",
"0.57579535",
"0.5755905",
"0.5737735",
"0.56382525",
"0.56303453",
"0.56270874",
"0.5626358"
] |
0.7957317
|
0
|
Drop (and lock) curr_piece as far as possible, granting points equal to the distance of the drop.
|
def drop_curr_piece(self):
if self.over: return
delta = (0, 0) # now make this as big as possible
while True:
new_delta = tuple_add(delta, (0, 1))
if self.can_move_curr_piece(new_delta):
delta = new_delta
else:
break
self.increment_score(delta[1])
self.move_curr_piece(delta)
self.lock_curr_piece()
self.queue_draw()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def move_curr_piece(self, delta, point=False):\n if self.over: return\n elif self.can_move_curr_piece(delta):\n self.curr_piece.confirm_move(delta)\n if point: self.increment_score(1)\n elif delta == (0,1): # \"illegal\" down move\n self.lock_curr_piece()\n self.queue_draw()",
"def drop_piece(self, piece, x):\n if piece == Piece.NONE:\n raise ValueError('Invalid piece')\n\n y = self.get_drop_row(x)\n if y < 0:\n raise RuntimeError('Cannot drop piece at column {} because it is full.'.format(x))\n\n self._set_piece_at_opening(piece, x, y)\n self.drop_history.append((piece, x, y))\n\n winning_piece_positions = self._check_for_win(piece, x, y)\n if winning_piece_positions:\n self._on_player_won(piece, winning_piece_positions)\n\n if self._is_tie():\n self._on_tie()",
"def static_drop(self):\n if self.any_in_buffer(self.active_piece):\n return\n for cell in TransformPiece.sort_cells(self.grid.keys(), self.current_direction):\n self.drop([cell])",
"def drop(self):\n init_x = self.x\n init_y = self.y\n init_z = self.z\n drop_z = self.drop_height\n \n #drop to given height\n self.move_to(init_x, init_y, drop_z)\n \n #open gripper\n self.gripper.command_position(100)\n \n #return to initial position\n self.move_to(init_x, init_y, init_z)",
"def move(self, piece):\n\n if list(piece) in self.find_moves():\n self.block[tuple( self.find_free() )] = self.block[tuple(piece)]\n self.block[tuple(piece)] = 0\n return \"success\"\n else:\n return \"error\"",
"def spawn_new_piece(self):\n\n del self.active_piece\n\n new_x = self.WIDTH // 2 - 1\n self.active_piece = Figure(random.choice(PIECE_TYPES), new_x, 0)",
"def remove_piece(self) -> None:\r\n if self.has_piece():\r\n self.piece.square = None\r\n self.piece = None",
"def mouse_release(self,event):\n global drag_sq\n if drag_sq != -1:\n# dst_sq = (event.y // sq_size) * 8+ (event.x // sq_size)\n dst_sq = self.coord_to_sq((event.x, event.y))\n \n m = Move(drag_sq, dst_sq)\n m.set_from_user() # this is input from user (not file)\n \n if not self.on_move_piece(m):\n # Withdraw the piece to original spot\n obj = self.piece_objs[drag_sq]\n \n self.canvas.coords(obj, \n self.sq_to_coord(drag_sq))\n# ((drag_sq%8)*sq_size, (drag_sq//8)*sq_size))\n drag_sq = -1\n return",
"def lock_curr_piece(self):\n for pos in self.curr_piece.occupying():\n self.locked_squares[pos] = self.curr_piece.color\n self.clear_rows()\n self.curr_piece = self.next_piece_display.get_piece()\n if any(pos in self.locked_squares\n for pos in self.curr_piece.occupying()):\n self.game_over()",
"def legal_move_on(draw, board):\n start, _ = draw(strategies.sampled_from(sorted(board.pieces)))\n end = draw(strategies.sampled_from(sorted(board.movable_from(start))))\n return start, end",
"def rotate_curr_piece(self):\n if self.over: return\n hypothetical = self.curr_piece.test_rotate()\n if all(pos not in self.locked_squares and self.on_board(pos)\n for pos in hypothetical):\n self.curr_piece.confirm_rotate()\n self.queue_draw()",
"def get_possible_next_coords(self, selected_piece_coords, player):\n next_coords = []\n special_coords = [(0, 0), (0, 8), (8, 0), (8, 8), (4, 4)]\n if player == 1 or player == 2:\n if selected_piece_coords[0] < 9:\n for i in range(selected_piece_coords[0]+1, 9):\n temp_coords = (i, selected_piece_coords[1])\n if self.is_piece(temp_coords):\n break\n if temp_coords not in special_coords:\n next_coords.append(temp_coords)\n \n if selected_piece_coords[0] > 0:\n for i in range(selected_piece_coords[0]-1, -1, -1):\n temp_coords = (i, selected_piece_coords[1])\n if self.is_piece(temp_coords):\n break\n if temp_coords not in special_coords:\n next_coords.append(temp_coords)\n \n if selected_piece_coords[1] < 9:\n for i in range(selected_piece_coords[1]+1, 9):\n temp_coords = (selected_piece_coords[0], i)\n if self.is_piece(temp_coords):\n break\n if temp_coords not in special_coords:\n next_coords.append(temp_coords)\n \n if selected_piece_coords[1] > 0:\n for i in range(selected_piece_coords[1]-1, -1, -1):\n temp_coords = (selected_piece_coords[0], i)\n if self.is_piece(temp_coords):\n break\n if temp_coords not in special_coords:\n next_coords.append(temp_coords)\n return next_coords\n # KING movement\n else:\n if selected_piece_coords[0] < 9:\n for i in range(selected_piece_coords[0]+1, 9):\n temp_coords = (i, selected_piece_coords[1])\n if self.is_piece(temp_coords):\n break\n next_coords.append(temp_coords)\n \n if selected_piece_coords[0] > 0:\n for i in range(selected_piece_coords[0]-1, -1, -1):\n temp_coords = (i, selected_piece_coords[1])\n if self.is_piece(temp_coords):\n break\n next_coords.append(temp_coords)\n \n if selected_piece_coords[1] < 9:\n for i in range(selected_piece_coords[1]+1, 9):\n temp_coords = (selected_piece_coords[0], i)\n if self.is_piece(temp_coords):\n break\n next_coords.append(temp_coords)\n \n if selected_piece_coords[1] > 0:\n for i in range(selected_piece_coords[1]-1, -1, -1):\n temp_coords = (selected_piece_coords[0], i)\n if self.is_piece(temp_coords):\n break\n next_coords.append(temp_coords)\n return next_coords",
"def drop(self):\n if (pyxel.frame_count % self.vy) == 0:\n mapDel(self, theFallen)\n self.y = (self.y + 1)\n mapAdd(self, theFallen)",
"def get_piece(self):\n old = self.next_piece\n new = self.create_piece()\n self.next_piece = new\n self.queue_draw()\n return old",
"def drop(self, oldpos, fieldgroup, currentplayer):\n\t\ttry:\n\t\t\tdropped_on = pygame.sprite.spritecollide(self,\n\t\t\t\t\t\t\t\t\t fieldgroup,\n\t\t\t\t\t\t\t\t\t False)[0]\n\t\t\tif dropped_on.type == \"home\":\n\t\t\t\tcurrentplayer[\"meeples_home\"] += 1\n\t\t\telif dropped_on.id == currentplayer.name*10:\n\t\t\t\tcurrentplayer.meeples_out -= 1\n\t\t\tself.grabbed = False\n\n\t\t\treturn True\n\t\texcept:\n\t\t\tself.rect = oldpos\n\t\t\tself.grabbed = False\n\t\t\treturn False",
"def move_piece(self, selected_piece_coords, destination_coords):\n \n if selected_piece_coords[0] < 0 or destination_coords[0] < 0:\n return False\n if selected_piece_coords[1] >= 9 or destination_coords[1] >= 9:\n return False\n \n if self.is_piece(destination_coords):\n return False\n \n for piece in self.game_pieces:\n if selected_piece_coords[0] == piece.x and selected_piece_coords[1] == piece.y:\n piece.x = destination_coords[0]\n piece.y = destination_coords[1]\n capture_coords = self.check_for_captures(destination_coords,piece.player)\n for coords in capture_coords:\n self.remove_piece(coords)\n break\n return True",
"def eaten(self): # called when this piece has been 'eaten'\r\n \r\n self.board.removePiece((self.x, self.y)) # remove the 'Piece' object\r\n addr = self.x-25, self.y-25\r\n empty = Empty(addr)\r\n self.board.addPiece(empty) # replace it with the 'Empty' object\r",
"def moveChecker(self, point):\r\n \r\n clickedPiece = self.clickedPiece\r\n \r\n # Checks whether the new point should be hit and calls the the pointHit\r\n # method if necessary\r\n if point.isBlot() and \\\r\n point.getTeam() != self.points[clickedPiece].getTeam():\r\n self.pointHit(point)\r\n \r\n # Adds checker to new point and updates the new point correspondingly\r\n point.addChecker(self.points[self.clickedPiece].returnChecker())\r\n point.organize()\r\n point.update()\r\n point.setActiveTurn()\r\n \r\n # Removes checker from the clicked point and updates the point\r\n self.points[clickedPiece].removeChecker()\r\n self.points[clickedPiece].organize()\r\n self.points[clickedPiece].update()\r\n self.points[clickedPiece].setActiveTurn()\r\n self.points[clickedPiece].undoClick()\r\n \r\n # Updates available dice numbers\r\n self.diceNumbers.remove((point.getNumber() - clickedPiece)\r\n * (-1) ** self.currentPlayer)\r\n \r\n # Changes turn if necessary\r\n if not len(self.diceNumbers):\r\n self.changeTurn()\r\n\r\n self.drawBoard()",
"def cut(self, piece):\n self.substrates = self.substrates.difference(piece)",
"def undoPossibleMoves(self, startingPiece):\r\n self.clickedPiece = None #Indicates that the board has no clicked piece\r\n for num in self.diceNumbers:\r\n nextPoint = startingPiece + num * ((-1) ** self.currentPlayer)\r\n if nextPoint < len(self.points) and nextPoint >= 0:\r\n self.points[nextPoint].setValidMove(False)\r\n self.points[nextPoint].setBorder(BLACK, 1)",
"def drag(self, mouse_occupied, surf ):\n\n if pygame.mouse.get_pressed()[0]:\n if self.is_undermouse(surf):\n if not self.pickup and not mouse_occupied:\n self.pickup = True\n mouse_occupied = True\n self.mouse_anchor = ((pygame.mouse.get_pos()[0]-self.pos[0]),(pygame.mouse.get_pos()[1]-self.pos[1]))\n else:\n if self.pickup:\n self.pickup = False\n mouse_occupied = False\n\n\n if self.pickup:\n self.pos = ((pygame.mouse.get_pos()[0]-self.mouse_anchor[0]),(pygame.mouse.get_pos()[1]-self.mouse_anchor[1]))\n\n\n return mouse_occupied",
"def capture_piece(self, captured_piece):\n self.row = self.get_piece_jumping_position(captured_piece)['opp_row']\n self.col = self.get_piece_jumping_position(captured_piece)['opp_col']\n captured_piece.alive = False",
"def line_moved(self):\n\n # The line is supposed to be moved by hand to the beginning of first wrinkle.\n # The optimal spot is local maximum (not always visible)\n ext_index = self.index_of_drop + int(self.line.value() * 10000)\n ext_value = self.data[ext_index]\n\n p_i, p_f = toolbox_2.get_pressure_change(self.measurement)\n smallest_growing_particle = toolbox_2.minimum_particle_diameter(p_i, p_f, self.saturation_percentage / 100)\n\n n = toolbox_2.particle_count_2(ext_value)\n\n # measurement series 1\n if self.selected_data == 3 and 7 <= self.meas_selected_number <= 17 and self.meas_selected_series == 1:\n index = self.meas_selected_number - 7 # Assumes that first measurement is number 7\n self.smallest_particles[index] = smallest_growing_particle\n self.number_counts[index] = n\n\n self.update_distribution()\n # Update plot\n self.curve_distribution.setData(self.particle_distribution_x, self.particle_distribution_y*1e-10)\n self.curve_distribution_cumulative.setData(self.smallest_particles, self.number_counts*1e-10)\n\n # measurement series 2\n elif self.selected_data == 3 and self.meas_selected_series == 2:\n index = self.meas_selected_number - 1 # begins from 1, 0th measurement is just copy of 8th\n self.number_counts_2[index] = n\n\n self.curve_rotatometer.setData(np.array([4, 6, 8, 10, 12, 14, 16, 18]), self.number_counts_2*1e-10)\n x = np.linspace(3.5, 20, 100)\n self.curve_rotatometer_fit.setData(x, self.number_counts_2[0] * 4 * (1 / x) *1e-10)\n\n #print(\"N\", \"%.2e\"%n, \"dpres\", round(p_i - p_f))",
"def possibleMoves(self, startingPiece):\r\n self.clickedPiece = startingPiece\r\n for num in self.diceNumbers:\r\n nextPoint = startingPiece + num * ((-1) ** self.currentPlayer)\r\n \r\n #sets points as valid moves if a move can be made to them from the\r\n #clicked piece\r\n if nextPoint < len(self.points) and nextPoint >= 0:\r\n if self.points[nextPoint].isOpen()\\\r\n or self.points[nextPoint].isBlot()\\\r\n or self.points[nextPoint].getTeam() == self.getTurn():\r\n self.points[nextPoint].setValidMove(True)\r\n self.points[nextPoint].setBorder(RED, 3)",
"def simulate_place_disc(self, board, col_nr, curr_player):\n if board[0, col_nr] != 0:\n return board\n new_board = np.copy(board)\n for row_nr in reversed(range(self.rows())):\n if new_board[row_nr, col_nr] == 0:\n new_board[row_nr, col_nr] = curr_player\n return new_board",
"def take_remove_tile_turn(self, remove_tile_fxn):\n tilesAroundOpponents = []\n for player in self.board.players:\n if not player == self.player:\n x, y = player.x, player.y\n nearbyTiles = self.board.get_removable_tiles_around(x, y)\n tilesAroundOpponents.extend(nearbyTiles)\n tilesAroundOpponents = set(tilesAroundOpponents)\n x, y = self.player.x, self.player.y\n tilesAroundMe = set(self.board.get_removable_tiles_around(x, y)) # tiles around controlled player (me)\n safelyAroundOpponents = list(tilesAroundOpponents - tilesAroundMe) # tiles around opponents but not around me\n removableTiles = set(self.board.get_all_open_removable_tiles()) # all removable tiles\n safelyRemovable = list(removableTiles - tilesAroundMe) # all removable tiles except those around me\n try:\n if safelyAroundOpponents:\n target = random.choice(safelyAroundOpponents)\n elif tilesAroundOpponents: # likely that I'm next to other player. I'll have to remove a tile available for both of us\n target = random.choice(list(tilesAroundOpponents))\n else: # no open spots to remove around players can only happen if solid unremovable tiles exist\n target = random.choice(safelyRemovable)\n except IndexError: # this error will catch if last else statement possibly triggered it\n super(TileRemoveBot, self).take_remove_tile_turn(remove_tile_fxn)\n return\n remove_tile_fxn(target.x, target.y)",
"def possibleMove(self, dist, blockList):\r\n \r\n if self.orientation == \"v\":\r\n for block in blockList:\r\n if dist >= 0:\r\n for n in range(dist):\r\n for coords in self.getCoords():\r\n if ((coords[0], coords[1] + n) in\r\n block.getCoords()) and (block.getNum() !=\r\n self.getNum()):\r\n self.collidedPieces = 1\r\n else:\r\n for n in range(0, dist, -1):\r\n for coords in self.getCoords():\r\n if ((coords[0], coords[1] +n) in\r\n block.getCoords()) and (block.getNum() !=\r\n self.getNum()):\r\n self.collidedPieces = 1\r\n \r\n self.y += dist\r\n self.setCoords()\r\n \r\n elif self.orientation == \"h\":\r\n for block in blockList:\r\n if dist >= 0:\r\n for n in range(dist):\r\n for coords in self.getCoords():\r\n if ((coords[0] + n, coords[1]) in\r\n block.getCoords()) and (block.getNum() !=\r\n self.getNum()):\r\n self.collidedPieces = 1\r\n else:\r\n for n in range(0, dist, -1):\r\n for coords in self.getCoords():\r\n if ((coords[0] + n, coords[1]) in\r\n block.getCoords()) and(block.getNum() !=\r\n self.getNum()):\r\n self.collidedPieces = 1\r\n \r\n self.x += dist\r\n self.setCoords()",
"def _array_with_piece_down(self, piece: \"CurrentPiece\") -> Optional[np.ndarray]:\n\n if not piece.piece or not piece.position:\n return None\n\n baseline = np.sum(self.array_with_piece(piece))\n\n while (\n piece.position.y < FIELD_SHAPE[0] - 1\n and np.sum(self.array_with_piece(piece)) >= baseline\n ):\n piece = CurrentPiece(\n piece.piece, position=Point(piece.position.x, piece.position.y + 1)\n )\n\n if not piece.position:\n return None\n\n piece = CurrentPiece(\n piece.piece, position=Point(piece.position.x, piece.position.y - 1)\n )\n\n return self.array_with_piece(piece)",
"def pick(self, inv, pl, group, sc):\r\n if self.rect.colliderect(pl) and not self.used:\r\n group.remove(self)\r\n inv += ['score {}'.format(id(self))]\r\n sc += [sc[len(sc) - 1] + 100]\r\n self.used = True",
"def take(self, desired_amount):\n if self.amount >= desired_amount:\n grab = desired_amount\n else:\n grab = min(desired_amount, self.amount)\n self.amount -= grab\n print(f\"{self} {self.amount} of supplies left\")\n return grab"
] |
[
"0.65945",
"0.64016134",
"0.59951985",
"0.5804648",
"0.57469374",
"0.57464397",
"0.573787",
"0.5701909",
"0.5693444",
"0.566303",
"0.5634835",
"0.5607702",
"0.5607433",
"0.5579537",
"0.5576645",
"0.5542439",
"0.55122036",
"0.54979587",
"0.54958576",
"0.548495",
"0.5484019",
"0.5478004",
"0.5449981",
"0.54476106",
"0.54267675",
"0.54051423",
"0.53557074",
"0.5350717",
"0.53361183",
"0.5334463"
] |
0.8361154
|
0
|
Add squares of current piece to the collection of locked squares. Make calls to clear full rows, generate another piece, and check whether the game should end.
|
def lock_curr_piece(self):
for pos in self.curr_piece.occupying():
self.locked_squares[pos] = self.curr_piece.color
self.clear_rows()
self.curr_piece = self.next_piece_display.get_piece()
if any(pos in self.locked_squares
for pos in self.curr_piece.occupying()):
self.game_over()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_piece(self, piece: Model):\n new_piece_coordinates = piece.get_block_positions()\n for coordinates in new_piece_coordinates:\n if not self.piece_encompasses_coordinates(coordinates):\n continue\n else:\n print('GAME OVER')\n return False\n self.pieces.append(piece)\n piece.parent_board = self\n\n return True",
"def dirty_squares(self) -> None:\n row = ran(0, self.__squares.__len__() - 1)\n column = ran(0, self.__squares[0].__len__() - 1)\n self.__squares[row][column] = Floor._dirty\n print(\"Ensuciamos el piso y quedo así: \", self.__str__())",
"def update_grid(self):\n if self.game_over:\n return\n if self.active_piece is None:\n self.place_new_piece()\n if self.piece_collision_exists(self.active_piece):\n self.handle_active_piece_collision()\n self.place_new_piece()\n self.shift_cells(self.active_piece, self.current_direction)\n self.active_piece = TransformPiece.shift_coordinates(self.active_piece, self.current_direction)\n self.merge_with_completed_rows()\n if self.is_game_won():\n self.game_over = True",
"def add_to_board(board: list, piece: dict) -> None:\n for x in range(TEMPLATEWIDTH):\n for y in range(TEMPLATEHEIGHT):\n if PIECES[piece['shape']][piece['rotation']][y][x] != BLANK:\n board[y + piece['y']][x + piece['x']] = piece['color']",
"def add_square(self, row, col):\n square = []\n r, c = row, col\n while r < row + self.r_size:\n while c < col + self.c_size:\n square.append((r, c))\n c += 1\n r += 1\n c = col\n return square",
"def dirty_square(self, row: int, column: int) -> None:\n self.__squares[row][column] = Floor._dirty",
"def add_to_board(self, piece):\n for x in range(pieces.Piece.TEMPLATE_WIDTH):\n for y in range(pieces.Piece.TEMPLATE_HEIGHT):\n if piece.get_template()[y][x]: \n if piece.get_pos_x() + x < game_config.BOARD_BOX_COUNT_X and piece.get_pos_y() + y < game_config.BOARD_BOX_COUNT_Y:\n self.board.set_cell(x + piece.get_pos_x(), y + piece.get_pos_y(), piece.get_piece_color())",
"def op_add_piece_postconditions(self,newPieceCoords):\n\n # Start of new state constrution\n next_gs_board = Board.from_binary_matrix(self.board)\n next_gs_board.set_element(newPieceCoords[0], newPieceCoords[1], self.curr_player)\n next_gs_next_player = self.get_enemy(self.curr_player)\n next_gs_next_pieces = set()\n next_gs_next_move = self.FREE\n\n new_gs = Eximo(next_gs_next_player,next_gs_next_move,next_gs_next_pieces,next_gs_board)\n\n # Reduce the number of pieces that can be added. If the added piece was the last one rever to FREE flag\n if self.next_move == self.ADDPIECE_2:\n new_gs.next_move = self.ADDPIECE_1\n new_gs.curr_player = self.curr_player\n new_gs.next_pieces = { piece for piece in self.next_pieces }\n new_gs.next_pieces.remove(newPieceCoords)\n elif self.next_move == self.ADDPIECE_1:\n new_gs.next_move = self.FREE\n\n # Perform checkup to set correct flags (for mandatory captures)\n new_gs.perform_checkup()\n new_gs.last_piece = newPieceCoords\n return new_gs",
"def set_pieces(self):\n\n for i in range(len(self._game_board)):\n\n # Row 1\n if i == 0:\n for ii in range(len(self._game_board[i])):\n if ii == 0 or ii == 8:\n self._game_board[i][ii] = Chariot(\"black\", \"BCHA\")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Horse(\"black\", \" BH \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 2 or ii == 6:\n self._game_board[i][ii] = Elephant(\"black\", \" BE \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 3 or ii == 5:\n self._game_board[i][ii] = Advisor(\"black\", \" BA \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 4:\n self._game_board[i][ii] = General(\"black\", \" BG \")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 3\n if i == 2:\n for ii in range(len(self._game_board[i])):\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Cannon(\"black\", \"BCAN\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 4\n if i == 3:\n for ii in range(len(self._game_board[i])):\n if ii % 2 == 0:\n self._game_board[i][ii] = Soldier(\"black\", \"BSOL\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 7\n if i == 6:\n for ii in range(len(self._game_board[i])):\n if ii % 2 == 0:\n self._game_board[i][ii] = Soldier(\"red\", \"RSOL\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 8\n if i == 7:\n for ii in range(len(self._game_board[i])):\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Cannon(\"red\", \"RCAN\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 10\n if i == 9:\n for ii in range(len(self._game_board[i])):\n if ii == 0 or ii == 8:\n self._game_board[i][ii] = Chariot(\"red\", \"RCHA\")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Horse(\"red\", \" RH \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 2 or ii == 6:\n self._game_board[i][ii] = Elephant(\"red\", \" RE \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 3 or ii == 5:\n self._game_board[i][ii] = Advisor(\"red\", \" RA \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 4:\n self._game_board[i][ii] = General(\"red\", \" RG \")\n self._game_board[i][ii].update_location([i, ii])",
"def add_piece(self):\n self.active_piece = None\n piece_type = random.randint(0, len(TetrisPiece.PIECE_TYPES) - 1)\n max_row = 10 - TetrisPiece.get_piece_width(piece_type)\n origin = (0, random.randint(0, max_row))\n self.active_piece = TetrisPiece(piece_type, origin)\n if self.will_collide(direction='origin'):\n return False\n else:\n self.points += 1\n return True",
"def put_piece(self, piece, row, col):\n self.squares[row][col] = piece\n self._put_chr_at(piece, row, col, self.not_select_color)",
"def clear_rows(self):\n ### Previous version had a bug, in that it assumed the set of ###\n ### indices of full rows had to be a contiguous sequence! ###\n full_rows = [j for j in range(ROWS) if all(\n (i, j) in self.locked_squares for i in range(COLS))]\n if not full_rows: return\n ### Calculate how for to drop each other row, and do it ###\n drop = {j: len([k for k in full_rows if k > j]) for j in range(ROWS)}\n self.locked_squares = {(i, j+drop[j]): color for (i, j), color in\n self.locked_squares.items() if j not in full_rows}\n ### Now just update score, etc. ###\n d = len(full_rows)\n self.increment_lines(d)\n self.increment_score(self.level*{1: 40, 2: 100, 3: 300, 4: 1200}[d])\n if self.level < self.lines // 10 + 1:\n self.increment_level()",
"def update_board_and_check_for_eog(self):\n self.display_piece(clear=True)\n if self.will_collide():\n self.display_piece() # put the piece back\n over = self.check_and_clear_rows() # clear rows, check to see if top has been reached\n if over:\n return over\n else:\n added_piece = self.add_piece()\n if added_piece:\n self.check_points_and_level_up()\n else:\n return \"Game Over! Couldn't add another piece.\"\n else:\n self.move_piece()",
"def update_pieces(self, /, pieces_set: list=[],\r\n pieces_removed: list=[]) -> None:\r\n if not self.pieces:\r\n # If the board hasn't had its pieces attribute set, iterate through\r\n # all of the squares and add the pieces to their relevant lists.\r\n for square in self.squares.flat:\r\n if square.has_piece():\r\n piece = square.get_piece()\r\n name = piece.get_name()\r\n color = piece.get_color()\r\n self.pieces.append(piece)\r\n if name in self.piece_lists.keys():\r\n self.piece_lists[name][color].append(piece)\r\n \r\n else:\r\n # If pieces are removed or added to the board, \r\n # remove/add them to their relevant lists.\r\n if pieces_set:\r\n for piece in reversed(pieces_set):\r\n name = piece.get_name()\r\n color = piece.get_color()\r\n self.pieces.append(piece)\r\n if name in self.piece_lists.keys():\r\n self.piece_lists[name][color].append(piece)\r\n self.piece_lists[name][color] = list(set(\r\n self.piece_lists[name][color]))\r\n if pieces_removed:\r\n for piece in reversed(pieces_removed):\r\n name = piece.get_name()\r\n color = piece.get_color()\r\n self.pieces.remove(piece)\r\n if name in self.piece_lists.keys():\r\n self.piece_lists[name][color].remove(piece)\r\n self.piece_lists[name][color] = list(set(\r\n self.piece_lists[name][color]))\r\n \r\n self.pieces = list(set(self.pieces))",
"def chessboardGame(x, y):\n xin = x\n yin = y\n\n # These squares have no possible move, therefore, are losing;\n # we chose these squares by sight; while loop below expands these sets\n # until we encompass whole board\n # it was not clear to me in the beginning that every square has a unique\n # determinant ending under optimal play\n losing_start = set([(1, 1), (2, 1), (1, 2), (2, 2)])\n\n # These squares can jump to losing_start in one move, so are winning\n winning_start = set([(1, 3), (1, 4), (2, 3), (2, 4),\n (3, 1), (3, 2), (3, 3), (3, 4),\n (4, 1), (4, 2), (4, 3)])\n\n def nextset(x, y):\n def isvalid(coord):\n return True if coord[0] >= 1 and coord[1] >= 1 \\\n and coord[0] <= 15 and coord[1] <= 15 else False\n\n nextsquares = [(x - 2, y + 1), (x - 2, y - 1), (x + 1, y - 2),\n (x - 1, y - 2)]\n nextsquares = set([*filter(isvalid, nextsquares)])\n # print(nextsquares)\n return nextsquares\n\n # run a few times through whole board;\n # it takes 5 times to find a definitive win path for all 225 squares\n # 161 squares are winning for first player\n # 64 squares are losing starting for first player\n test_set = [(i, j) for i in range(1, 16) for j in range(1, 16)]\n times = 1\n while (len(winning_start) + len(losing_start)) < 225:\n for coords in test_set:\n x_ = coords[0]\n y_ = coords[1]\n thenextset = nextset(x_, y_)\n # print('testing', x_, y_, thenextset)\n\n if (x_, y_) in losing_start:\n # print('No Path, Second wins')\n pass\n elif (x_, y_) in winning_start:\n # print('One jump to terminal square, First wins')\n pass\n elif (len(winning_start.intersection(thenextset))\n == len(thenextset)):\n # if next set ONLY includes winning_starts, First loses because\n # he has no choice but give win to opponent\n # need to add x,y to losing_start\n losing_start.add((x_, y_))\n # print('we lose, Second wins')\n elif len(losing_start.intersection(thenextset)) > 0:\n # if next set includes ANY losing_start, we win by choosing it\n # need to add x,y to winning_start\n winning_start.add((x_, y_))\n # print('First wins')\n else:\n # print('do not know')\n pass\n\n print('Run', times, len(winning_start) + len(losing_start))\n times += 1\n\n print(len(winning_start))\n print(len(losing_start))\n\n # prints schematic of Winor Loss of each of 15x15 squares\n\n print(' '.join(map(str, [i for i in range(1, 16)])))\n for i in range(15):\n row = ''\n for j in range(15):\n if test_set[i * 15 + j] in winning_start:\n row = row + 'W '\n else:\n row = row + 'L '\n print(row + str(i))\n\n if (xin, yin) in winning_start:\n print('First wins with', xin, yin)\n return 'First'\n else:\n print('Second wins with', xin, yin)\n return 'Second'",
"def add_gear_piece(self):\n self.__num_gear_collected += 1",
"def put_piece(self, x: int, y: int, piece: int):\n self.board_values[x, y] = piece\n self.tiles_taken[x, y] = True",
"def _relief_square_outer_refresh(self, add_instruction: Callable,\n top_color: ColorRGB, bottom_color: ColorRGB,\n wid_x: float, wid_y: float, wid_width: float, wid_height: float):\n lines = int(self.relief_square_outer_lines)\n for line in range(1, lines + 1):\n alpha = 0.9 - (line / lines) * 0.81\n line2 = 2 * line\n\n out_x1 = wid_x - line\n out_x2 = out_x1 + wid_width + line2\n out_y1 = wid_y - line\n out_y2 = out_y1 + wid_height + line2\n\n add_instruction(Color(*top_color, alpha)) # outside upper left\n add_instruction(Line(points=[out_x1, out_y1, out_x1, out_y2, out_x2, out_y2]))\n add_instruction(Color(*bottom_color, alpha)) # outside bottom right\n add_instruction(Line(points=[out_x1, out_y1, out_x2, out_y1, out_x2, out_y2]))",
"def random_insertion(self, puzzle, num_squares, avail, deleted):\n rv = 0\n for _ in range(min(num_squares, len(avail))):\n # Initialize possible values for random square, and choose row and column of square\n vals = [i for i in range(self.sl+1) if i != 0]\n ind = random.choice(avail)\n row = ind // self.sl\n col = ind % self.sl\n\n # Attempt to put random value into random square\n while len(vals):\n val = random.choice(vals)\n if puzzle.valid_square(row, col, val):\n puzzle.insert(row, col, val)\n avail.remove(ind)\n deleted.append(ind)\n rv += 1\n break\n vals.remove(val)\n \n # Return the amount of successful insertions\n return rv",
"def draw_pieces(screen, board):\n for row in range(DIMENSION):\n for col in range(DIMENSION):\n piece = board[row][col]\n # Check for empty square\n if piece != \"--\":\n screen.blit(IMAGES[piece], p.Rect(col * SQ_SIZE, row * SQ_SIZE, SQ_SIZE, SQ_SIZE))",
"def update_board(self, symbol, modified_squares):\n\t\tfor coord in modified_squares:\n\t\t\tself.board[coord] = symbol",
"def move(self, row, col):\r\n # TODO: add check for valid row, col\r\n self.pebbles = self.squares[row][col]\r\n self.squares[row][col] = 0\r\n\r\n while self.pebbles > 0:\r\n # If end of row 0 has been reached\r\n if col == self._squareCount - 1 and row == 0:\r\n row = 1\r\n # If beginning of row 1 has been reached\r\n elif col == 0 and row == 1:\r\n row = 0\r\n elif row == 0:\r\n col += 1\r\n elif row == 1:\r\n col -= 1\r\n\r\n self.squares[row][col] += 1\r\n self.pebbles -= 1",
"def can_add_to_square(self, tile, value):\n start_row = tile.row // self.board_squared * self.board_squared\n start_col = tile.column // self.board_squared * self.board_squared\n\n for row in range(start_row, start_row + self.board_squared):\n for col in range(start_col, start_col + self.board_squared):\n if self.puzzle[row][col].value == value:\n return False\n\n return True",
"def _relief_square_inner_refresh(self, add_instruction: Callable,\n top_color: ColorRGB, bottom_color: ColorRGB,\n wid_x: float, wid_y: float, wid_width: float, wid_height: float):\n lines = int(self.relief_square_inner_lines)\n offset = int(self.relief_square_inner_offset)\n for line in range(1, lines + 1):\n alpha = 0.9 - (line / lines) * 0.81\n line += offset\n line2 = 2 * line\n\n in_x1 = wid_x + line\n in_x2 = in_x1 + wid_width - line2\n in_y1 = wid_y + line\n in_y2 = in_y1 + wid_height - line2\n\n add_instruction(Color(*top_color, alpha)) # inside top left\n add_instruction(Line(points=[in_x1, in_y1, in_x1, in_y2, in_x2, in_y2]))\n add_instruction(Color(*bottom_color, alpha)) # inside bottom right\n add_instruction(Line(points=[in_x1, in_y1, in_x2, in_y1, in_x2, in_y2]))",
"def generatePiece(self):\n\n empty_tiles = []\n for y in range(BOARD_SIZE):\n for x in range(BOARD_SIZE):\n if self.grid[x][y].isEmpty():\n empty_tiles.append(self.grid[x][y])\n\n two_or_four = random.choice([2, 4])\n random.choice(empty_tiles).set(two_or_four)",
"def add_mines(self):\n for x, y in sample(list(itertools.product(range(self.width), range(self.height))), self.num_mines):\n self.grid[y][x] = self.mine",
"def add(self, mp):\n \n self.tile_contents.append(mp)\n if(self.tile_contents[-1].raised == False):\n self.paint_blocks += 1.00",
"def new_tile(self):\r\n # replace with your code\r\n empty_square_lists = []\r\n for row in range(self._grid_height):\r\n for col in range(self._grid_width):\r\n if(self.get_tile(row, col) == 0):\r\n empty_square_lists.append((row, col))\r\n \r\n if len(empty_square_lists) == 0:\r\n return \"game over!\"\r\n \r\n random_cell = random.choice(empty_square_lists)\r\n random_cell_row = random_cell[0]\r\n random_cell_col = random_cell[1]\r\n \r\n values = [2] * 90 + [4] * 10\r\n value = random.choice(values)\r\n \r\n self.set_tile(random_cell_row, random_cell_col, value)",
"def mate_pinned(self, turn):\n \n pinned_list = []\n opposite_colour = next_turn(turn)\n opp_king_index = (piece_class.KING_LOCATION[opposite_colour][0] + piece_class.KING_LOCATION[opposite_colour][1] * 8)\n opp_poss_moves = {tuple(i) for i in self.poss_dict[opposite_colour]}\n check_path = {tuple(i) for i in self.check_dict[turn]}\n \n if self.board[opp_king_index].possible_moves == []:\n if opp_poss_moves.intersection(check_path) != set():\n for i in self.board:\n if i != self.empty:\n if i.colour == opposite_colour:\n if i.graphic != piece_class.PIECEDICT[opposite_colour][piece_class.King]:\n if check_path.intersection({tuple(y) for y in i.possible_moves}):\n intersec = check_path.intersection({tuple(y) for y in i.possible_moves})\n intersec = [list(i) for i in intersec]\n for y in intersec:\n# self.display_board(self.board)\n# \n# print(y)\n \n select = self.coords.index(i.location)\n move = self.coords.index(y)\n move_square = self.board[move]\n selected_piece = self.board[select]\n \n \n if i.graphic == piece_class.PIECEDICT[opposite_colour][piece_class.Pawn] and i.location in piece_class.PAWN_START_DICT[turn]:\n self.board[move] = piece_class.Queen(opposite_colour, piece_class.PIECEDICT[opposite_colour][piece_class.Queen])\n else:\n self.board[move] = self.board[select]\n \n self.board[select] = self.empty\n self.loads_pathways(opposite_colour)\n \n# self.display_board(self.board) \n if piece_class.KING_LOCATION[opposite_colour] in self.path_dict[turn]:\n# print(\"1\")\n pinned_list.append(1)\n else:\n# print(\"0\")\n pinned_list.append(0)\n \n self.board[select] = selected_piece\n self.board[move] = move_square\n \n self.loads_pathways(opposite_colour)\n \n# input()\n# intersec = [item for sublist in intersec for item in sublist]\n# print(\"pinned list is: \", pinned_list)\n \n if pinned_list != [] and 0 not in pinned_list:\n# input(\"CHECKMATE\")\n COUNT[turn] += 1\n print(sum(COUNT.values()))\n print(\"Checkmate.\", turn, \"wins.\")\n\n self.endgame = True",
"def we_move(self):\n if self.player_squares.__len__() == 0:\n print \"This is the first move!\"\n self.record_move(self.our_squares, self.our_symbol, 5)\n self.finish_move(self.our_symbol, self.our_squares)\n else:\n print \"This is not the first move.\"\n # See where we should move next\n # Take square 5 if it's open\n if self.is_square_free(5):\n print \"Taking square 5.\"\n self.record_move(self.our_squares, self.our_symbol, 5)\n self.finish_move(self.our_symbol, self.our_squares)\n else:\n # See if the player is about to win\n print \"Square 5 is gone. Picking another.\"\n for win in TicTacToe.wins:\n print \"Testing winning combos for player.\"\n win_count = 0\n win_matches = []\n win_misses = []\n for i in win:\n if i in self.player_squares:\n print \"square %d is in win\" % i\n win_count += 1\n win_matches.append(i)\n elif i not in self.our_squares:\n win_misses.append(i)\n print \"win_count is %s\" % win_count\n if win_count == 2 and win_misses.__len__() > 0:\n print \"Uh-oh! Looks like the player might win soon.\"\n print \"win is %s\" % win\n print \"win_matches is %s\" % win_matches\n print \"win_misses is %s\" % win_misses[0]\n self.record_move(self.our_squares, self.our_symbol, win_misses[0])\n self.finish_move(self.our_symbol, self.our_squares)\n return\n # Try to block based on the player's last move\n if self.players_last_move == 1:\n if self.is_square_free(2):\n self.record_move(self.our_squares, self.our_symbol, 2)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(4):\n self.record_move(self.our_squares, self.our_symbol, 4)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(3):\n self.record_move(self.our_squares, self.our_symbol, 3)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(7):\n self.record_move(self.our_squares, self.our_symbol, 7)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.players_last_move == 3:\n if self.is_square_free(2):\n self.record_move(self.our_squares, self.our_symbol, 2)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(6):\n self.record_move(self.our_squares, self.our_symbol, 6)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(9):\n self.record_move(self.our_squares, self.our_symbol, 9)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(1):\n self.record_move(self.our_squares, self.our_symbol, 1)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.players_last_move == 9:\n if self.is_square_free(6):\n self.record_move(self.our_squares, self.our_symbol, 6)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(8):\n self.record_move(self.our_squares, self.our_symbol, 8)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(3):\n self.record_move(self.our_squares, self.our_symbol, 3)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(7):\n self.record_move(self.our_squares, self.our_symbol, 7)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.players_last_move == 7:\n if self.is_square_free(8):\n self.record_move(self.our_squares, self.our_symbol, 8)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(4):\n self.record_move(self.our_squares, self.our_symbol, 4)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(9):\n self.record_move(self.our_squares, self.our_symbol, 9)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(1):\n self.record_move(self.our_squares, self.our_symbol, 1)\n self.finish_move(self.our_symbol, self.our_squares)\n # No fancy logic here!\n elif self.is_square_free(1):\n self.record_move(self.our_squares, self.our_symbol, 1)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(3):\n self.record_move(self.our_squares, self.our_symbol, 3)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(9):\n self.record_move(self.our_squares, self.our_symbol, 9)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(7):\n self.record_move(self.our_squares, self.our_symbol, 7)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(2):\n self.record_move(self.our_squares, self.our_symbol, 2)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(6):\n self.record_move(self.our_squares, self.our_symbol, 6)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(8):\n self.record_move(self.our_squares, self.our_symbol, 8)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(4):\n self.record_move(self.our_squares, self.our_symbol, 4)\n self.finish_move(self.our_symbol, self.our_squares)"
] |
[
"0.6908374",
"0.6561864",
"0.62816846",
"0.6172833",
"0.61137444",
"0.6108231",
"0.60665935",
"0.5999639",
"0.59820265",
"0.5963833",
"0.5893313",
"0.588994",
"0.57835466",
"0.57342786",
"0.5715649",
"0.5707915",
"0.56976265",
"0.56739694",
"0.56672215",
"0.56570166",
"0.5649787",
"0.5637296",
"0.5632954",
"0.5602707",
"0.55671436",
"0.55506855",
"0.55457467",
"0.5541916",
"0.55246484",
"0.5504364"
] |
0.70793056
|
0
|
Clear any full rows, modifying the variables locked_squares, level, lines, and score as appropriate.
|
def clear_rows(self):
### Previous version had a bug, in that it assumed the set of ###
### indices of full rows had to be a contiguous sequence! ###
full_rows = [j for j in range(ROWS) if all(
(i, j) in self.locked_squares for i in range(COLS))]
if not full_rows: return
### Calculate how for to drop each other row, and do it ###
drop = {j: len([k for k in full_rows if k > j]) for j in range(ROWS)}
self.locked_squares = {(i, j+drop[j]): color for (i, j), color in
self.locked_squares.items() if j not in full_rows}
### Now just update score, etc. ###
d = len(full_rows)
self.increment_lines(d)
self.increment_score(self.level*{1: 40, 2: 100, 3: 300, 4: 1200}[d])
if self.level < self.lines // 10 + 1:
self.increment_level()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def clear_rows(self):\n ...",
"def erase_scores(self):\n self.database.erase_scores(self.difficulty)",
"def reset(self):\n self.rows = deepcopy(self.empty_rows)\n self._update_max_row_info()",
"def dirty_squares(self) -> None:\n row = ran(0, self.__squares.__len__() - 1)\n column = ran(0, self.__squares[0].__len__() - 1)\n self.__squares[row][column] = Floor._dirty\n print(\"Ensuciamos el piso y quedo así: \", self.__str__())",
"def reset(self) -> None:\n self.logger.info(\"Reset\")\n\n self._has_bob = False\n self._has_single = False\n self._index = 0\n self._row = self.rounds()",
"def clearRows(self):\n self.data['rows'] = []",
"def check_and_clear_rows(self):\n # if board is full, then there will be a '#' in the first row\n if '#' in self.board[0]:\n return 'Game Over! Top has been reached.'\n for row in xrange(self.height):\n # if any given row is full, then that row won't have any blank spaces\n if not ' ' in self.board[row]:\n del self.board[row]\n self.board.insert(0, [' '] * self.width)",
"def check_clear_lines(self):\n num_lines = 0\n\n i = self.HEIGHT - 1\n\n level = (self.cleared_lines // 10) + 1\n\n while i >= 0:\n if 0 not in self.board[i]:\n self.board[i] = [0 for i in range(self.WIDTH)]\n num_lines += 1\n\n for j in range(i - 1, -1, -1):\n self.board[j + 1] = self.board[j]\n self.board[0] = [0 for i in range(self.WIDTH)]\n else:\n i -= 1\n\n self.cleared_lines += num_lines\n\n self.score += LINE_SCORES[num_lines] * level",
"def clearLayers(self):\n for _ in range(self.jobRow.rowCount()):\n self.jobRow.removeRow(0)",
"def reset_row(self):\n\n self.line = [0] * LINE_LENGTH\n self.current_index = 0",
"def clear(self):\n row, col = self.selected\n if self.cubes[row][col].value == 0:\n self.cubes[row][col].set_temp(0)",
"def clean(self):\n self.board_values = np.zeros((self.size, self.size))\n self.tiles_taken[:, :] = False",
"def clear(self):\n board.change_grid(self.x, self.y, 0)",
"def clear_for_new_board(self):\r\n self.game_board = []\r\n self.good_contours = []\r\n self.game_board_contours = []",
"def reset(self):\r\n store = get_store()\r\n nbval = store.get('Nbtimecompound')[\"value\"]\r\n for i in range(1, nbval):\r\n self.del_line(1)",
"def reset(self):\r\n # replace with your code\r\n for row in range(0, self._grid_height):\r\n for col in range(0, self._grid_width):\r\n self._grid_tile[row][col] = 0\r\n # at this step, all cells should be available\r\n self.new_tile()\r\n self.new_tile()",
"def clear(self):\n for y in range(len(self.matrix)):\n for x in range(len(self.matrix[0])):\n self.matrix[y-1][x-1] = (0,0,0)",
"def reset_hl_stats(self):\n\n self.ships_left = self.settings.ship_limit\n self.score = 0\n self.level = 1",
"def clear(self):\n for row in range(self.rows):\n for col in range(self.cols):\n self.data[row][col] = '.'",
"def clear(self):\n for row in range(self.rows):\n for col in range(self.cols):\n self.data[row][col] = '.'",
"def clear_data(self):\n self.game_list.clear()\n self.game_scores.clear()",
"def _clear_matrix(self):\n\t\tself._w2i_matrix = self._i2w_matrix = None",
"def clear(self):\n self._grid = [[None]]",
"def clean_square(self, row: int, column: int) -> None:\n self.__squares[row][column] = Floor._clean",
"def reset(self):\n # self.grid = [[0] * self.grid_width] * self.grid_height\n self.grid = []\n for dummy_row in range(self.grid_height):\n new_row = []\n for dummy_col in range(self.grid_width):\n new_row.append(0)\n self.grid.append(new_row)\n self.new_tile()\n self.new_tile()",
"def clear(self):\n self.__hasTABLE = False\n self.__hasGRAPHS = False\n self.__ndoubledollar = 0\n buffer.clear(self)",
"def clear_row_proportions(self):\n\n self._proportions[1].clear()",
"def clear_blockages(self):\n debug.info(3,\"Clearing all blockages\")\n self.rg.clear_blockages()",
"def clear(self):\n self._baseline = 0\n self._sensitivity_im = 0\n self._is_update = False",
"def dirty_square(self, row: int, column: int) -> None:\n self.__squares[row][column] = Floor._dirty"
] |
[
"0.6815103",
"0.6737937",
"0.6626234",
"0.6478328",
"0.63999295",
"0.62693805",
"0.625021",
"0.6215124",
"0.62023664",
"0.61826193",
"0.61183906",
"0.6101011",
"0.6049277",
"0.60386187",
"0.6013733",
"0.5997911",
"0.599455",
"0.59881616",
"0.5986746",
"0.5986746",
"0.5968666",
"0.5956183",
"0.5926352",
"0.5908606",
"0.59068745",
"0.5906193",
"0.5866519",
"0.5842289",
"0.5838395",
"0.5835732"
] |
0.82794744
|
0
|
Increment lines by d, and change the label.
|
def increment_lines(self, d):
self.lines += d
styled_set_label_text(self.lines_display, "Lines: "+str(self.lines))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def line_counter(self, event=None):\n try:\n text_area = self.get_current()\n self.canvas.delete('all')\n i = text_area.index(\"@0,0\")\n while True:\n dline = text_area.dlineinfo(i)\n if dline is None: break\n y = dline[1]\n linenum = str(i).split(\".\")[0]\n self.canvas.create_text(10, y + 28, anchor=\"w\", text=linenum,\n font=self.lineFont, width=0)\n text_length = self.canvas.bbox('all') # returns a tuple in the form of (x1, y1, x2, y2)\n width = text_length[2] - text_length[0] # x2-x1\n self.canvas.config(width=width + 15)\n i = text_area.index(\"%s+1line\" % i)\n # print(self.cursor_pos.cget('pady'), self.statusbar_frame.cget('pady'), )\n except:\n self.canvas.delete('all')",
"def advance(self):\n self._current_inst += 1\n self._line = self._lines[self._current_inst].strip()",
"def _dig_line_count_changed(self, text):\n self._setup_table_digital()",
"def put_label(i):\n i = min(i, len(x)-2)\n dx = sx[i+1] - sx[i]\n dy = sy[i+1] - sy[i]\n rotation = np.rad2deg(math.atan2(dy, dx)) + rotation_offset\n pos = [(x[i] + x[i+1])/2. + offset[0], (y[i] + y[i+1])/2 + offset[1]]\n plt.text(pos[0], pos[1], label_text, size=9, rotation=rotation, color = line.get_color(),\n ha=\"center\", va=\"center\", bbox = dict(ec='1',fc='1',alpha=0.8))",
"def inc(self, labels: dict[str, str]):\n\n val = self.get(labels)\n\n if val is None:\n val = 0\n\n val += 1\n\n self.set(labels, val)",
"def increase_counter(self):\n self.values = self.values + 1",
"def put_label(i):\n i = min(i, len(x) - 2)\n dx = sx[i + 1] - sx[i]\n dy = sy[i + 1] - sy[i]\n rotation = np.rad2deg(math.atan2(dy, dx)) + rotation_offset\n pos = [(x[i] + x[i + 1]) / 2. + offset[0],\n (y[i] + y[i + 1]) / 2 + offset[1]]\n plt.text(pos[0],\n pos[1],\n label_text,\n size=9,\n rotation=rotation,\n color=line.get_color(),\n ha=\"center\",\n va=\"center\",\n bbox=dict(ec='1', fc='1', alpha=0.8))",
"def update_progress(i, n):\n global _current_line\n prog = progress_string(i, n)\n if _current_line is not None:\n _current_line.update(prog)",
"def label_consecutive_lines():\n offset = 0.1\n\n def get_points():\n \"\"\"Prompts for a point triple. Returns a list of the points:\n [<iter>, ...]\n \"\"\"\n points = rs.GetPoints(\n draw_lines=False, in_plane=False, \n message1='Select first tail', message2='Select heads', \n max_points=None, base_point=None)\n return points\n\n def draw_lpoint_triple(text, tail, head):\n \"\"\"Receives label text and a list of point triples:\n str\n [<iter>, ...]\n Draws text dots with <text>-a, -b, -c\n \"\"\"\n line_vector = rs.PointSubtract(head, tail)\n offset_vector = line_vector * offset\n offset_tail = rs.VectorAdd(tail, offset_vector)\n offset_head = rs.VectorSubtract(head, offset_vector)\n axis = [0, 0, 1]\n angle = 90\n rotated_offset_vector = rs.VectorRotate(offset_vector, angle, axis)\n offset_side = rs.VectorAdd(offset_tail, rotated_offset_vector)\n rs.AddTextDot(('%s-a' % text), offset_tail)\n rs.AddTextDot(('%s-b' % text), offset_head)\n rs.AddTextDot(('%s-c' % text), offset_side)\n\n def side_is_same_as_rule(point):\n \"\"\"Receives a point (i.e., a list):\n [num, num, num]\n Returns whether the point is on the same side as the side label in the\n rule\n \"\"\"\n return False\n \n points = get_points()\n text = rs.StringBox('Enter label text')\n for i in range(len(points) - 1):\n # for point in points:\n tail = points[i]\n head = points[i + 1]\n draw_lpoint_triple(text, tail, head)",
"def newLine(self) :\n if not self.hpgl2 :\n dic = self.pages.get(self.pagecount, None)\n if dic is None :\n self.setPageDict(\"linescount\", 1) \n dic = self.pages.get(self.pagecount)\n nblines = dic[\"linescount\"] \n self.setPageDict(\"linescount\", nblines + 1) \n if (self.linesperpage is not None) \\\n and (dic[\"linescount\"] > self.linesperpage) :\n self.pagecount += 1",
"def inc( self ):\n self.count += 1",
"def linenumber(self, pad, linepad):\n linepad.config(state=GUI.NORMAL)\n coordinate_pad = map(int, pad.index(GUI.END).split('.'))\n linepad.delete('1.0', GUI.END)\n for i in range(coordinate_pad[0] - 1):\n linepad.insert(GUI.END, str(i + 1) + '.\\n')\n linepad.config(state=GUI.DISABLED)\n linepad.see(GUI.END)",
"def update(self, line):",
"def inc(self):\n \n self.count += 1",
"def increment_steps(self):\n self.num_steps += 1",
"def UpdateLabel(self) -> _n_6_t_0:",
"def draw_label(self, text, event_name, num_items = 1, item = 0):\n width = self.XCOLUMNSKIP//num_items\n self.guiElements[event_name] = Draw.Label(\n text,\n self.xPos + item*width, self.yPos, width, self.YLINESKIP)\n if item + 1 == num_items:\n self.yPos -= self.YLINESKIP",
"def increment(self):\n self._deltas += 1",
"def advance(self, i):\n sys.stdout.write('\\r')\n sys.stdout.write(\"[%-30s] %d%%\" % ('=' * int(\n ceil(i / self._n * self._length)),\n (i + 1) / self._n * 100))\n sys.stdout.flush()",
"def increment(self):\n self.pos += 1\n if self.pos == len(self.progress) - 1:\n self.pos = 0",
"def draw_increasing(i, j):\n return \"\\\\draw[line width = \" + str(latex_options[\"line_width\"]) + \", color=\" + latex_options[\"color_increasing\"] + \"] (T\" + str(i) + \") -- (T\" + str(j) + \");\\n\"",
"def ad_step_to_progress_bar(self, n):\r\n self.progress_step += n\r\n self.progress[\"value\"] = self.progress_step\r\n self.progress.update_idletasks()",
"def cb_update_line_numbers(data, signal, signal_data):\n weechat.hook_timer(10, 0, 1, \"cb_timer_update_line_numbers\", \"\")\n return weechat.WEECHAT_RC_OK",
"def add_line(self, src_state: int, dst_state: int, color: str):\n src_y = src_state // self.width\n src_x = src_state % self.width\n dst_y = dst_state // self.width\n dst_x = dst_state % self.width\n\n self.lines[(src_x, src_y, dst_x, dst_y, color)] += 1",
"def update(self):\n self.__token += self.__lines[self.__i]\n self.__i += 1",
"def __add__(self, i):\n self.n += i\n plt.subplot(self.nx, self.ny, self.n)\n return True",
"def add_to_plot(self, line_name, points):\n points = [x * 100 for x in points]\n plt.plot(points, label=line_name)",
"def increment_value(self, d_value: float):\n self.set_value(self.get_value() + d_value)\n return self",
"def next_line():\r\n set_point(point().next_line())",
"def advance(self):\n if self.has_more_commands():\n self.counter += 1\n # if self._is_label():\n # self.counter += 1"
] |
[
"0.64690274",
"0.6017067",
"0.5998562",
"0.587672",
"0.5860527",
"0.5846188",
"0.5840155",
"0.5809756",
"0.57616186",
"0.57600725",
"0.57464296",
"0.5722578",
"0.5712588",
"0.5707973",
"0.5702293",
"0.57016605",
"0.5681828",
"0.5676111",
"0.5673482",
"0.56705654",
"0.56421065",
"0.5635648",
"0.56274945",
"0.56219053",
"0.5610206",
"0.5595161",
"0.55621773",
"0.5547686",
"0.5534935",
"0.5519696"
] |
0.87534755
|
0
|
Increment score by x, and change the label.
|
def increment_score(self, x=1):
self.score += x
styled_set_label_text(self.score_display, "Score: "+str(self.score))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def increase_score(self):\n self.score += 1",
"def updateScore(score):\n return score + 1",
"def augmenter_score():\n\n global label_score\n global score\n\n score += 1\n label_score.config(text= \"score : \" + str(score))",
"def updateScore(self, score):\n self.__score += score",
"def update_score(self, score: int) -> int:\n self.score += score\n return self.score",
"def increase_score(self, increase):\n if increase > 0:\n self.__score += increase",
"def l_point(self):\n self.l_score += 1\n self.update_scoreboard()",
"def update_score(self, mark):\n if mark == 'X':\n self.model.game_score[self.model.player_1] += 1\n else:\n self.model.game_score[self.model.player_2] += 1",
"def increase(self, points):\n self.score += points",
"def l_point(self):\n self.l_score += 1\n self.update()",
"def update_score(self, board):\n self._score += 1",
"def increase_score(self):\n\n old_score = self.get_score()\n new_score = old_score + 1\n sql = \"UPDATE Users SET score = ? WHERE username = ?\"\n self.conn.execute(sql, (new_score, self.username))\n self.conn.commit()",
"def r_point(self):\n self.r_score += 1\n self.update_scoreboard()",
"def update_turn_score(self, score):\n\n # Increment the attribute by the passed value\n self._current_score += score",
"def update_score(self):\n score_text = ' ' + str(self.x_score) + ' - ' + str(self.o_score) + ' '\n self.Score_Label.configure(text=score_text, foreground='#FFFFFF')",
"def add_score(self, score):\n self._score += score",
"def win(self):\n self.score += 1\n self.ids['score'].text = 'SCORE: ' + str(self.score)",
"def r_point(self):\n self.r_score += 1\n self.update()",
"def hit(self, label=None):\n self.labels[label] += 1",
"def set_score(self, change):\n self._score = self._score + change",
"def update_score(self, engine, *args):\n #pdb.set_trace()\n self.score_label.text = \"Gold: {}/{}\".format(str(engine.score),\n str(engine.win_score))",
"def update_score():\n pass",
"def inc(self, labels: dict[str, str]):\n\n val = self.get(labels)\n\n if val is None:\n val = 0\n\n val += 1\n\n self.set(labels, val)",
"def set_score(self, points):\n self.score += points",
"def update(self):\n self.clear()\n self.score += 1\n self.write(f\"Score : {self.score}\",\n align=\"center\", font=(\"Arial Black\", 20))",
"def change_score(self, change: float=1):\n self._score += change",
"def change_score(self, change: float = 1):\n self._score += change",
"def add_score(score):\n global SCORE\n SCORE = SCORE + score\n # update the display\n mvaddstr(1, 2, \"Score:\", color_pair(HEADING_COLOUR) | A_BOLD)\n mvaddstr(1, 9, \"%d\" % SCORE, color_pair(TEXT_COLOUR) | A_BOLD)",
"def _tally(self, score):\n self._score[self._turn] += score",
"def add_point(self):\n self.total_score = self.total_score + 1\n if self.total_score // 10 == 0:\n self.level = self.total_score / 10"
] |
[
"0.7557804",
"0.73200655",
"0.7297284",
"0.72214425",
"0.70572585",
"0.70543677",
"0.69891006",
"0.694322",
"0.6930679",
"0.6875506",
"0.68736583",
"0.68078065",
"0.68063426",
"0.6762595",
"0.67082787",
"0.6703826",
"0.6693392",
"0.66802347",
"0.6653046",
"0.6616634",
"0.66058207",
"0.6580904",
"0.65803415",
"0.65597284",
"0.65553284",
"0.6519006",
"0.65189373",
"0.6448361",
"0.64476085",
"0.64347744"
] |
0.9048463
|
0
|
Increment level by 1, and change the label. Also call make_timer and hook up the resulting function with glib.timeout_add, to be called every 2.0/(level+3) seconds.
|
def increment_level(self):
self.level += 1
styled_set_label_text(self.level_display, "Level: "+str(self.level))
glib.timeout_add(2000//(self.level+3), self.make_timer(self.level))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def change_state(self, level: int, timed: int = 0) -> None:\n self._pilothouse.queue_change(level, timed)",
"def timer_callback(self):\n # There're 5 logger-level in ROS 2 get_logger() System.\n # Try out and watch whats difference.\n self.get_logger().debug(f'==== Hello ROS 2 : {self.count}====')\n self.get_logger().info(f'==== Hello ROS 2 : {self.count}====')\n self.get_logger().warn(f'==== Hello ROS 2 : {self.count}====')\n self.get_logger().error(f'==== Hello ROS 2 : {self.count}====')\n self.get_logger().fatal(f'==== Hello ROS 2 : {self.count}====')\n\n self.count += 1",
"def _tally(self, user_gpio, level, tick):\n self.count += 1",
"def main():\n running = True\n sense.show_message(\"Select the level\",\n text_colour=WHITE, scroll_speed=0.05)\n sleep(0.5)\n lvl = 0 #(0 = level 1, 1 = level 2, etc.)\n sense.show_letter(lvl_name[lvl],\n text_colour=WHITE)\n while running:\n for event in sense.stick.get_events():\n if event.action == 'pressed':\n if event.direction == 'left': #select a lower level.\n if lvl >= 1:\n lvl = lvl-1\n sense.show_letter(lvl_name[lvl],\n text_colour=WHITE)\n else:\n pass\n elif event.direction == 'right': #select a higher level.\n if lvl <= len(lvl_name)-2:\n lvl = lvl+1\n sense.show_letter(lvl_name[lvl],\n text_colour=WHITE)\n else:\n pass\n elif event.direction == 'down':#turn off the game\n running = False\n sense.clear()\n elif event.direction == 'up':#turn off the game\n running = False\n sense.clear()\n elif event.direction == 'middle':#start the selected level\n running = False\n start_level(levels[lvl])",
"def tick():\n global counter\n counter += 1",
"def tick():\n\n global time1\n # get the current local time from the PC\n time2 = time.strftime(\"%H:%M:%S\")\n # if time string has changed, update it\n if time2 != time1:\n time1 = time2\n timeLabel.config(text=time2)\n # calls itself every 200 milliseconds\n # to update the time display as needed\n # could use >200 ms, but display gets jerky\n timeLabel.after(200, tick)",
"def use_level(self, level, grid=None):\n\n if grid is not None:\n self.grid = grid\n self.level = level\n self._gevent_seq = []\n self._gevent_queue = Queue.Queue()\n level.add_event_listener(self._new_event)\n self._new_map()",
"def update_label(\r\n self,\r\n root,\r\n label_var,\r\n text = \"\",\r\n delay = 2 #seconds\r\n ):\r\n label_var.set(text)\r\n root.update()\r\n time.sleep(delay)",
"def printStatus(level,text):\n\tglobal t0\n\tpre = \"[{0:>7.2f}] \".format(time.time()-t0)\n\tfor x in range(0,level):\n\t\tpre += \"-\"\n\tpre += \"> \"\n\tprint(pre+text)",
"def timer_handler():\r\n \r\n global elapsed_time\r\n elapsed_time += 1",
"def update(self, func):\n if self.current_time == 0:\n func()\n return\n self.current_time -= 1\n hours = self.current_time // 3600\n minutes = self.current_time % 3600 // 60\n seconds = self.current_time % 60\n try:\n self.timer_label.setText('%02d:%02d:%02d' % (hours, minutes, seconds))\n if self.current_time <= 10:\n self.timer_label.setStyleSheet('color: red')\n Qt.QTimer().singleShot(1000, lambda: self.update(func))\n except RuntimeError:\n return",
"def UpdateLabel(self) -> _n_6_t_0:",
"def timer_change(self):\n if self.time < 999:\n self.time += 1\n self.time_lcd.display(self.time)\n else:\n self.timer.stop()",
"def run(self, start_level):\n self.world.set_level(self.world.levels[start_level])\n self.goal = self.world.level.end_time()\n\n time_of_death = None\n level_start = pygame.time.get_ticks() / 1000\n\n while True:\n\n time = (pygame.time.get_ticks() / 1000) - level_start\n\n # TODO: Remove some day\n self.stats['fps'] = self.clock.get_fps()\n\n if time > 1 and time < 3 and not self.world.stage_start:\n self.world.stage_start = True\n self.assets.sounds['incoming-alarm'].play()\n elif time > 3:\n self.world.stage_start = False\n\n if time > self.goal + 3:\n self.world.set_level(self.world.levels[self.world.level.number])\n self.goal = self.world.level.end_time()\n level_start = pygame.time.get_ticks() / 1000 # Reset timer\n continue\n\n if time > self.goal and not self.world.stage_clear:\n if self.world.level.number == len(self.world.levels):\n return 'victory' # Beat the final level\n self.world.stage_clear = True\n self.assets.sounds['level-success'].play()\n\n for event in pygame.event.get():\n if event.type == QUIT:\n return 'quit'\n elif event.type in (KEYUP, KEYDOWN):\n self.world.hero.receive_message(event.type, event.key)\n if event.key in (K_q, K_ESCAPE):\n return 'quit'\n elif event.key == K_p:\n if self.pause_game() == 'quit':\n return 'quit'\n\n self.world.update(time)\n\n if self.world.infection >= 100:\n return 'infected'\n\n self.collider.update()\n\n if self.world.hero.dead:\n if time_of_death is None:\n time_of_death = time\n else:\n if time - time_of_death > 2:\n return 'died'\n self.renderer.render()\n\n self.clock.tick(self.renderer.fps)",
"def draw_level(self, DISP, level:int):\r\n windowsize = DISP.get_size()\r\n Level_Text_Obj = self.FontObj.render(\"LEVEL: \" + str(level), True, Colors.colors['WHITE'])\r\n Level_Text_rec = Level_Text_Obj.get_rect()\r\n Level_Text_rec.top = windowsize[1] - Level_Text_rec.height\r\n Level_Text_rec.left = windowsize[0] - Level_Text_rec.width\r\n DISP.blit(Level_Text_Obj, Level_Text_rec)",
"def _set_label_level(self, prc=50.0):\n xpos, ypos = self.position_line(prc)\n\n percentg_lb = \"0.{}\".format(int(prc))\n label = pg.TextItem(text=' {} ({})'.format(percentg_lb, round(ypos[1], 2)),\n anchor=(0, 0.5),\n )\n\n # Lock Label to the Right of ROI\n if xpos[0] < ypos[0]:\n position = ypos[0]\n else:\n position = xpos[0]\n\n label.setPos(position, ypos[1])\n return label",
"def upgrage_level(self):\n print('level is upgraded on one point')\n self.level += 1",
"def tick():\n global n, message\n n += 1\n message = format(n)",
"def update_timer(self):\r\n frmt_time = \"%d:%02d\" % (self.time_minutes, self.time_seconds)\r\n self.time_seconds += 1\r\n if self.time_seconds == 60:\r\n self.time_seconds = 0\r\n self.time_minutes += 1\r\n\r\n self.mainWidget.statusLabel.setText(\"{} {} --- {} {} --- {}\".format(self.elapsedTimeString,\r\n frmt_time,\r\n self.freeSpaceString,\r\n get_free_space(self.config.videodir),\r\n self.recordingString))",
"def my_settings_function():\n global level\n level += 1",
"def __change_level(self, level):\n self.level = level",
"def change_level(self):\n new_level = GameLevel[self.scoreboard.current_level]\n self.greeterboard.reset(level=new_level, msg='')\n self.end_game(i18n.OUT_MSG_NEW_GAME)\n self.init_game_metrics()",
"def node_timer(self, mode, interval):\n self.node_total += interval\n print '{:s}: {:.3f}'.format(mode, self.node_total / self.node_count)\n self.node_count += 1",
"def _update_power_label(self):\n\n #Checks if > 0.5s has elapsed since the last change to the power reading label\n #I do this since otherwise the text label updates too quickly and it's annoying\n #to read.\n currTime = time.time()\n if currTime - self._last_power_text_update > 0.5:\n #If it updates, reads in the power and updates\n #TODO: Read the power in one function only and then all of the places that use it (updating feedback, updating power label, and plotting)\n #access that member variable. Not a huge deal will slightly speed it up I guess and is a bit cleaner.\n power = self.gain*np.array(self._ai_client.get_ai_voltage(self._ai_channel, max_range=self.max_input_voltage))\n self.widgets['label_power'].setText(str(power[-1]))\n self._last_power = power[-1]/self.gain\n self._last_power_text_update = currTime",
"def _updateLevel(self, level, lastUpdate, time, timeout):\n\t\ttimeoutsPassed = (time - lastUpdate) / timeout\n\t\treturn max(0, level - timeoutsPassed)",
"def update():\n global current_level\n # Initialization (only runs on start/restart)\n player = Player()\n\n walls, goals, start = parse_level(levels[current_level])\n player.centerx = start[0]\n player.centery = start[1]\n\n # Main update loop\n while True:\n update_player(player, delta())\n draw_player(player)\n\n for wall in walls:\n window = pg.display.get_surface()\n pg.draw.rect(window, pg.Color(100, 100, 100), wall)\n\n player_vel, wall_vel, overlap = solve_rect_overlap(player,\n wall,\n player.velocity,\n mass_b=0,\n bounce=0.1)\n player.velocity = player_vel\n\n for goal in goals:\n window = pg.display.get_surface()\n pg.draw.rect(window, pg.Color(20, 100, 20), goal)\n\n normal, depth = overlap_data(player, goal)\n if depth > 0:\n current_level = (current_level + 1) % len(levels)\n restart()\n\n draw_text(f\"Level: {current_level + 1}\", (0, 0))\n\n # Main loop ends here, put your code above this line\n yield",
"def start_countdown(self):\n top2=tk.Toplevel(self.root, bg=\"lightyellow\")\n top2.geometry(\"485x753+900+300\")\n ''' BOOSTERS '''\n # Wealth Clock\n self.label_wealth = tk.IntVar()\n self.label_wealth.set(self.ctr)\n tk.Label(top2, text=\"Wealth Clock\", bg=\"lightblue\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=0)\n tk.Label(top2, textvariable=self.label_wealth, bg=\"lightblue\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=0)\n tk.Button(top2, text=\"Restart\", bg=\"lightblue\", width=10, font=(\"Verdans\", 10), command=self.resetWealth).grid(column=2, row=0)\n # Field Boost\n self.label_field = tk.IntVar()\n self.label_field.set(self.ctr)\n tk.Label(top2, text=\"Field Boost\", bg=\"lightblue\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=1)\n tk.Label(top2, textvariable=self.label_field, bg=\"lightblue\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=1)\n tk.Button(top2, text=\"Restart\", bg=\"lightblue\", width=10, font=(\"Verdans\", 10), command=self.resetField).grid(column=2, row=1)\n # Blue Field Boost\n self.label_bluefield = tk.IntVar()\n self.label_bluefield.set(self.ctr)\n tk.Label(top2, text=\"Blue Field Boost\", bg=\"lightblue\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=2)\n tk.Label(top2, textvariable=self.label_bluefield, bg=\"lightblue\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=2)\n tk.Button(top2, text=\"Restart\", bg=\"lightblue\", width=10, font=(\"Verdans\", 10), command=self.resetBlueField).grid(column=2, row=2)\n # Red Field Boost\n self.label_redfield = tk.IntVar()\n self.label_redfield.set(self.ctr)\n tk.Label(top2, text=\"Red Field Boost\", bg=\"lightblue\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=3)\n tk.Label(top2, textvariable=self.label_redfield, bg=\"lightblue\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=3)\n tk.Button(top2, text=\"Restart\", bg=\"lightblue\", width=10, font=(\"Verdans\", 10), command=self.resetRedField).grid(column=2, row=3)\n ''' DISPENSERS '''\n # Star Area Jelly Dispenser\n self.label_jelly = tk.IntVar()\n self.label_jelly.set(self.ctr)\n tk.Label(top2, text=\"Royal Jelly\", bg=\"lightgreen\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=10)\n tk.Label(top2, textvariable=self.label_jelly, bg=\"lightgreen\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=10)\n tk.Button(top2, text=\"Restart\", bg=\"lightgreen\", width=10, font=(\"Verdans\", 10), command=self.resetJelly).grid(column=2, row=10)\n # Ant Pass\n self.label_ant = tk.IntVar()\n self.label_ant.set(self.ctr)\n tk.Label(top2, text=\"Ant Pass\", bg=\"lightgreen\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=11)\n tk.Label(top2, textvariable=self.label_ant, bg=\"lightgreen\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=11)\n tk.Button(top2, text=\"Restart\", bg=\"lightgreen\", width=10, font=(\"Verdans\", 10), command=self.resetAnt).grid(column=2, row=11)\n # Blueberry Dispenser\n self.label_blueberry = tk.IntVar()\n self.label_blueberry.set(self.ctr)\n tk.Label(top2, text=\"Blueberry Dispenser\", bg=\"lightgreen\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=12)\n tk.Label(top2, textvariable=self.label_blueberry, bg=\"lightgreen\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=12)\n tk.Button(top2, text=\"Restart\", bg=\"lightgreen\", width=10, font=(\"Verdans\", 10), command=self.resetBlueberry).grid(column=2, row=12)\n # Strawberry Dispenser\n self.label_strawberry = tk.IntVar()\n self.label_strawberry.set(self.ctr)\n tk.Label(top2, text=\"Strawberry Dispenser\", bg=\"lightgreen\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=13)\n tk.Label(top2, textvariable=self.label_strawberry, bg=\"lightgreen\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=13)\n tk.Button(top2, text=\"Restart\", bg=\"lightgreen\", width=10, font=(\"Verdans\", 10), command=self.resetStrawberry).grid(column=2, row=13)\n # Honey Dispenser\n self.label_honey = tk.IntVar()\n self.label_honey.set(self.ctr)\n tk.Label(top2, text=\"Honey Dispenser\", bg=\"lightgreen\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=14)\n tk.Label(top2, textvariable=self.label_honey, bg=\"lightgreen\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=14)\n tk.Button(top2, text=\"Restart\", bg=\"lightgreen\", width=10, font=(\"Verdans\", 10), command=self.resetHoney).grid(column=2, row=14)\n # Treat Dispenser\n self.label_treat = tk.IntVar()\n self.label_treat.set(self.ctr)\n tk.Label(top2, text=\"Treat Dispenser\", bg=\"lightgreen\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=15)\n tk.Label(top2, textvariable=self.label_treat, bg=\"lightgreen\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=15)\n tk.Button(top2, text=\"Restart\", bg=\"lightgreen\", width=10, font=(\"Verdans\", 10), command=self.resetTreat).grid(column=2, row=15)\n # Glue Dispenser\n self.label_glue = tk.IntVar()\n self.label_glue.set(self.ctr)\n tk.Label(top2, text=\"Glue Dispenser\", bg=\"lightgreen\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=16)\n tk.Label(top2, textvariable=self.label_glue, bg=\"lightgreen\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=16)\n tk.Button(top2, text=\"Restart\", bg=\"lightgreen\", width=10, font=(\"Verdans\", 10), command=self.resetGlue).grid(column=2, row=16)\n ''' MOBS '''\n self.label_ladybug= tk.IntVar()\n self.label_ladybug.set(self.ctr)\n tk.Label(top2, text=\"Lady Bug\", bg=\"red\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=20)\n tk.Label(top2, textvariable=self.label_ladybug, bg=\"red\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=20)\n tk.Button(top2, text=\"Restart\", bg=\"red\", width=10, font=(\"Verdans\", 10), command=self.resetLadybug).grid(column=2, row=20)\n self.label_rhino= tk.IntVar()\n self.label_rhino.set(self.ctr)\n tk.Label(top2, text=\"Rhino Beetle\", bg=\"red\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=21)\n tk.Label(top2, textvariable=self.label_rhino, bg=\"red\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=21)\n tk.Button(top2, text=\"Restart\", bg=\"red\", width=10, font=(\"Verdans\", 10), command=self.resetRhino).grid(column=2, row=21)\n self.label_spider= tk.IntVar()\n self.label_spider.set(self.ctr)\n tk.Label(top2, text=\"Spider\", bg=\"red\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=22)\n tk.Label(top2, textvariable=self.label_spider, bg=\"red\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=22)\n tk.Button(top2, text=\"Restart\", bg=\"red\", width=10, font=(\"Verdans\", 10), command=self.resetSpider).grid(column=2, row=22)\n self.label_mantis= tk.IntVar()\n self.label_mantis.set(self.ctr)\n tk.Label(top2, text=\"Mantis\", bg=\"red\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=23)\n tk.Label(top2, textvariable=self.label_mantis, bg=\"red\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=23)\n tk.Button(top2, text=\"Restart\", bg=\"red\", width=10, font=(\"Verdans\", 10), command=self.resetMantis).grid(column=2, row=23)\n self.label_scorpion= tk.IntVar()\n self.label_scorpion.set(self.ctr)\n tk.Label(top2, text=\"Scorpion\", bg=\"red\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=24)\n tk.Label(top2, textvariable=self.label_scorpion, bg=\"red\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=24)\n tk.Button(top2, text=\"Restart\", bg=\"red\", width=10, font=(\"Verdans\", 10), command=self.resetScorpion).grid(column=2, row=24)\n self.label_werewolf= tk.IntVar()\n self.label_werewolf.set(self.ctr)\n tk.Label(top2, text=\"Werewolf\", bg=\"red\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=25)\n tk.Label(top2, textvariable=self.label_werewolf, bg=\"red\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=25)\n tk.Button(top2, text=\"Restart\", bg=\"red\", width=10, font=(\"Verdans\", 10), command=self.resetWerewolf).grid(column=2, row=25)\n self.label_snail= tk.IntVar()\n self.label_snail.set(self.ctr)\n tk.Label(top2, text=\"Stump Snail\", bg=\"red\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=26)\n tk.Label(top2, textvariable=self.label_snail, bg=\"red\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=26)\n tk.Button(top2, text=\"Restart\", bg=\"red\", width=10, font=(\"Verdans\", 10), command=self.resetSnail).grid(column=2, row=26)\n self.label_cavemonster= tk.IntVar()\n self.label_cavemonster.set(self.ctr)\n tk.Label(top2, text=\"Cave Monster\", bg=\"red\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=27)\n tk.Label(top2, textvariable=self.label_cavemonster, bg=\"red\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=27)\n tk.Button(top2, text=\"Restart\", bg=\"red\", width=10, font=(\"Verdans\", 10), command=self.resetCavemonster).grid(column=2, row=27)\n ''' BOSSES '''\n self.label_king = tk.IntVar()\n self.label_king.set(self.ctr)\n tk.Label(top2, text=\"King Beetle\", bg=\"maroon\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=30)\n tk.Label(top2, textvariable=self.label_king, bg=\"maroon\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=30)\n tk.Button(top2, text=\"Restart\", bg=\"maroon\", width=10, font=(\"Verdans\", 10), command=self.resetKing).grid(column=2, row=30)\n self.label_tunnel = tk.IntVar()\n self.label_tunnel.set(self.ctr)\n tk.Label(top2, text=\"Tunnel Bear\", bg=\"maroon\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=31)\n tk.Label(top2, textvariable=self.label_tunnel, bg=\"maroon\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=31)\n tk.Button(top2, text=\"Restart\", bg=\"maroon\", width=10, font=(\"Verdans\", 10), command=self.resetTunnel).grid(column=2, row=31)\n self.label_stick = tk.IntVar()\n self.label_stick.set(self.ctr)\n tk.Label(top2, text=\"Stick Bug\", bg=\"maroon\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=32)\n tk.Label(top2, textvariable=self.label_stick, bg=\"maroon\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=32)\n tk.Button(top2, text=\"Restart\", bg=\"maroon\", width=10, font=(\"Verdans\", 10), command=self.resetStick).grid(column=2, row=32)\n ''' QUESTS '''\n self.label_brownbear = tk.IntVar()\n self.label_brownbear.set(self.ctr)\n tk.Label(top2, text=\"Brown Bear Quest\", bg=\"teal\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=40)\n tk.Label(top2, textvariable=self.label_brownbear, bg=\"teal\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=40)\n tk.Button(top2, text=\"Restart\", bg=\"teal\", width=10, font=(\"Verdans\", 10), command=self.resetBrownbear).grid(column=2, row=40)\n self.label_blackbear = tk.IntVar()\n self.label_blackbear.set(self.ctr)\n tk.Label(top2, text=\"Black Bear Quest\", bg=\"teal\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=41)\n tk.Label(top2, textvariable=self.label_blackbear, bg=\"teal\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=41)\n tk.Button(top2, text=\"Restart\", bg=\"teal\", width=10, font=(\"Verdans\", 10), command=self.resetBlackbear).grid(column=2, row=41)\n ''' QUESTS '''\n self.label_honeystorm = tk.IntVar()\n self.label_honeystorm.set(self.ctr)\n tk.Label(top2, text=\"Honey Storm\", bg=\"yellow\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=50)\n tk.Label(top2, textvariable=self.label_honeystorm, bg=\"yellow\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=50)\n tk.Button(top2, text=\"Restart\", bg=\"yellow\", width=10, font=(\"Verdans\", 10), command=self.resetHoneystorm).grid(column=2, row=50)\n self.label_sproutsummoner = tk.IntVar()\n self.label_sproutsummoner.set(self.ctr)\n tk.Label(top2, text=\"Sprout Summoner\", bg=\"yellow\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=51)\n tk.Label(top2, textvariable=self.label_sproutsummoner, bg=\"yellow\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=51)\n tk.Button(top2, text=\"Restart\", bg=\"yellow\", width=10, font=(\"Verdans\", 10), command=self.resetSproutsummoner).grid(column=2, row=51)\n\n if self.ctr > 0:\n self.update()\n else:\n self.top2.destroy()",
"def tick(self):\n self.current_count += 1\n self.progress(self.current_count)",
"def level_screen(screen,wof_settings, current_level):\n \n title_image = 'images/bitcoin.bmp'\n level_text = 'Level %i' % (current_level + 1)\n text = [level_text,\n '',\n 'Press any key to play or Esc to quit.']\n \n text_font = 'fonts/Future TimeSplitters.otf'\n text_font_size = 26\n \n displayTextToScreen(wof_settings,screen,title_image,text,text_font,text_font_size)\n sleep(1.)",
"def add_level(self, level):\n return"
] |
[
"0.58169186",
"0.5782551",
"0.57647955",
"0.57608676",
"0.5739205",
"0.5662492",
"0.5624032",
"0.5617376",
"0.5608305",
"0.5559562",
"0.5549147",
"0.55384463",
"0.5514774",
"0.5487441",
"0.54303175",
"0.54259276",
"0.5405828",
"0.53985447",
"0.5396091",
"0.53815854",
"0.5379012",
"0.5375969",
"0.53526783",
"0.5344738",
"0.5307973",
"0.52830005",
"0.5259897",
"0.52387303",
"0.523033",
"0.5220021"
] |
0.8231671
|
0
|
Creates a callback function on_timer, which moves current piece down (without granting a point). If the current level moves beyond lev, then on_timer will stop working, and will need to be replaced.
|
def make_timer(self, lev):
def on_timer():
if (lev == self.level) and not self.over: # finds lev in scope
self.move_curr_piece((0, 1))
return True
else:
return False # kills on_timer
return on_timer
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def timed_callback(self, dx, dy):\n def _timed_callback():\n # self.move_it(dx, dy)\n if self.timed_tick_counter == 3:\n GEvent.post_event(\n GEvent.ENGINE,\n GEvent.LOGGER,\n self,\n GEvent.SCENE,\n \"GObject is being deleted\")\n GEvent.post_event(\n self.timed_event_type,\n GEvent.DELETE,\n self,\n self.timed_event_destination,\n {\n \"callback\": None,\n \"validation\": None,\n })\n return _timed_callback",
"def _update_cb(self, timeout, offset=0):\n if timeout <= 0:\n timeout = float(\"inf\")\n start = time.time()\n is_done = [False]\n def cb(e=None):\n if e is None and not is_done[0]:\n now = time.time()\n if now-start > timeout:\n is_done[0] = True\n blutil.notice(\"motor position: {1:{0}}\".format(self._prec(), self.wm() - offset))\n return cb",
"def timer_handler():\r\n \r\n global elapsed_time\r\n elapsed_time += 1",
"def cb_move(self, event):\n if not self.move_timer.IsRunning():\n self.move_timer.StartOnce(2000)",
"async def _cb(self, gpio, level, tick):\n\n if level < asyncpio.TIMEOUT:\n\n if self.in_code == False:\n self.bits = 1\n self.num = 0\n\n self.in_code = True\n self.code_timeout = 0\n await self.pi.set_watchdog(self.gpio_0, self.bit_timeout)\n await self.pi.set_watchdog(self.gpio_1, self.bit_timeout)\n else:\n self.bits += 1\n self.num = self.num << 1\n\n if gpio == self.gpio_0:\n self.code_timeout = self.code_timeout & 2 # clear gpio 0 timeout\n else:\n self.code_timeout = self.code_timeout & 1 # clear gpio 1 timeout\n self.num = self.num | 1\n\n else:\n\n if self.in_code:\n\n if gpio == self.gpio_0:\n self.code_timeout = self.code_timeout | 1 # timeout gpio 0\n else:\n self.code_timeout = self.code_timeout | 2 # timeout gpio 1\n\n if self.code_timeout == 3: # both gpios timed out\n await self.pi.set_watchdog(self.gpio_0, 0)\n await self.pi.set_watchdog(self.gpio_1, 0)\n self.in_code = False\n self.callback(self.bits, self.num)",
"def update():\n\twith locked_level(create=False) as level:\n\t\tif level is None:\n\t\t\t# There is no timer\n\t\t\tlogging.info('There is currently no timer')\n\t\t\treturn\n\t\tdelay = pickle.load(level)\n\t\tif delay.source_position <= DIM_FLOOR:\n\t\t\tos.remove(LEVEL_FILE)\n\t\t\treturn\n\t\tnow = datetime.datetime.utcnow()\n\t\tremaining = delay.when - now\n\t\tif remaining >= datetime.timedelta(minutes=1):\n\t\t\tlogging.info('Aborting because the timer still has: %s', remaining)\n\t\t\treturn\n\t\tif remaining.total_seconds() > 0:\n\t\t\tlogging.info('Sleeping because the timer still has: %s', remaining)\n\t\t\tfcntl.lockf(level, fcntl.LOCK_UN)\n\t\t\ttime.sleep(remaining.total_seconds())\n\t\t\tfcntl.lockf(level, fcntl.LOCK_EX)\n\n\t\tif delay.expected_positions:\n\t\t\t# There shouldn't be any expected positions left, so something has interrupted the dimmer\n\t\t\tlogging.info('Expected positions were not consumed, so reverting from %d to %d',\n\t\t\t\tdelay.target_position, delay.source_position)\n\t\t\tposition = delay.source_position\n\n\t\tposition_increment = min(\n\t\t\tdelay.target_position - DIM_FLOOR,\n\t\t\tmax(\n\t\t\t\t1,\n\t\t\t\tint((100 - DIM_FLOOR) * DIM_DURATION_MAX_INCREMENT.total_seconds() / DIM_DURATION_TOTAL.total_seconds())\n\t\t\t)\n\t\t)\n\t\tif position_increment <= 0:\n\t\t\treturn\n\t\tposition = delay.target_position - position_increment\n\t\t# This will be near DIM_DURATION_MAX_INCREMENT but accounts for rounding\n\t\tramp_time = datetime.timedelta(seconds=int(position_increment / (100 - DIM_FLOOR) * DIM_DURATION_TOTAL.total_seconds()))\n\n\t\tif delay.target_position > DIM_FLOOR:\n\t\t\t# The switch reports the old and then the new position when it dims\n\t\t\tnext_delay = delay_record.Delay(now + ramp_time, delay.target_position, position, [delay.target_position, position])\n\t\t\texpect(level, next_delay)\n\t\telse:\n\t\t\tos.remove(LEVEL_FILE)\n\n\tlogging.info('Dimming to %d over %s', position, ramp_time)\n\twith ozwd_util.get_thrift_client() as thrift_client, (\n\t\t\tozwd_util.get_stompy_client()) as stompy_client:\n\t\tozwd_set_value.set_value_connected(DIMMER_RAMP_TIME_VALUE.value, ramp_time.total_seconds(), thrift_client)\n\t\ttry:\n\t\t\tozwd_set_value.set_value_connected(DIMMER_VALUE.value, position, thrift_client)\n\t\tfinally:\n\t\t\tozwd_set_value.set_value_connected(DIMMER_RAMP_TIME_VALUE.value, 2, thrift_client)",
"async def on_timer_update(self, secs: int):\n pass",
"def minus_time(cur_button):\r\n\r\n def func():\r\n global time, game_on\r\n time -= 1\r\n if game_on:\r\n change_stopwatch(time)\r\n change_timer_color(time)\r\n if time == 0:\r\n game_on = True\r\n new_game(cur_button)\r\n else:\r\n window.after(1000, minus_time(cur_button))\r\n return func",
"def _ontimer(self, fun, t):\n if t == 0:\n self.cv.after_idle(fun)\n else:\n self.cv.after(t, fun)",
"def _updateLevel(self, level, lastUpdate, time, timeout):\n\t\ttimeoutsPassed = (time - lastUpdate) / timeout\n\t\treturn max(0, level - timeoutsPassed)",
"def callback(self, fun: Callable[[Timer], None] | None, /) -> None:",
"def callback(self, fun: Callable[[Timer], None] | None, /) -> None:",
"def caller(self,press,timer):\n self.set_numkey(press)\n self.alien_fire()\n self.moveBolts(timer)\n self.deleteBolts()\n self.change_PU(timer)\n self.col_detectorPU()\n self.col_detector()\n self.GameState()",
"def run(self, start_level):\n self.world.set_level(self.world.levels[start_level])\n self.goal = self.world.level.end_time()\n\n time_of_death = None\n level_start = pygame.time.get_ticks() / 1000\n\n while True:\n\n time = (pygame.time.get_ticks() / 1000) - level_start\n\n # TODO: Remove some day\n self.stats['fps'] = self.clock.get_fps()\n\n if time > 1 and time < 3 and not self.world.stage_start:\n self.world.stage_start = True\n self.assets.sounds['incoming-alarm'].play()\n elif time > 3:\n self.world.stage_start = False\n\n if time > self.goal + 3:\n self.world.set_level(self.world.levels[self.world.level.number])\n self.goal = self.world.level.end_time()\n level_start = pygame.time.get_ticks() / 1000 # Reset timer\n continue\n\n if time > self.goal and not self.world.stage_clear:\n if self.world.level.number == len(self.world.levels):\n return 'victory' # Beat the final level\n self.world.stage_clear = True\n self.assets.sounds['level-success'].play()\n\n for event in pygame.event.get():\n if event.type == QUIT:\n return 'quit'\n elif event.type in (KEYUP, KEYDOWN):\n self.world.hero.receive_message(event.type, event.key)\n if event.key in (K_q, K_ESCAPE):\n return 'quit'\n elif event.key == K_p:\n if self.pause_game() == 'quit':\n return 'quit'\n\n self.world.update(time)\n\n if self.world.infection >= 100:\n return 'infected'\n\n self.collider.update()\n\n if self.world.hero.dead:\n if time_of_death is None:\n time_of_death = time\n else:\n if time - time_of_death > 2:\n return 'died'\n self.renderer.render()\n\n self.clock.tick(self.renderer.fps)",
"def on_timer(context, data_type, data):\n pass",
"def onTimer(self, tid, userArg):\n\t\tDEBUG_MSG(\"%s::onTimer: %i, tid:%i, arg:%i\" % (self.getScriptName(), self.id, tid, userArg))\n\t\tNPCObject.onTimer(self, tid, userArg)\n\t\tAbility.onTimer(self, tid, userArg)\n\t\tAI.onTimer(self, tid, userArg)\n\t\t#AnimationState.onTimer(self, tid, userArg)\n\t\t#AnimationState.onMotionChanged(self, self.isMoving)",
"def countdown(timer, client):\r\n global gameover\r\n while timer :\r\n time.sleep(1)\r\n timer -= 1\r\n gameover = True",
"def downElevator(building, lift, improved_algorithm):\n\n start_position = lift.current_position\n while lift.current_position >= 0:\n total = 0\n # fill lift with people at current floor\n if improved_algorithm == True:\n fillLiftImproved(lift, building, lift.current_position, 'down')\n else:\n fillLift(lift, building, lift.current_position)\n # sort passengers in lift by destination\n quickSort(lift.current_passengers, 0, len(lift.current_passengers)-1)\n continue_down = False\n if (len(lift.current_passengers) == 0 and improved_algorithm == True):\n i = lift.current_position\n while i >= 0:\n passengers = building.down_dictionary[i]\n if passengers:\n continue_down = True\n passengers = building.up_dictionary[i]\n if passengers:\n continue_down = True\n i -= 1\n else:\n # first element in list will always be lowest\n if len(lift.current_passengers) != 0:\n if (lift.current_passengers[0] < lift.current_position):\n continue_down = True\n while True:\n index = binarySearch(lift.current_passengers, lift.current_position, len(lift.current_passengers)-1, 0)\n if index != 'not found':\n del lift.current_passengers[index]\n # counter for how many people removed at floor\n total += 1\n else:\n break\n\n if improved_algorithm == True:\n fillLiftImproved(lift, building, lift.current_position, 'down')\n else:\n fillLift(lift, building, lift.current_position)\n\n moveLift(total, building, lift, improved_algorithm)\n sleep(0.1)\n # increment counters\n lift.current_position -= 1\n lift.total_moves += 1\n # check to terminate lift\n continue_lift = continueCheck(building, lift)\n if continue_lift == False:\n printMoves(lift, improved_algorithm)\n break\n if continue_down == False and improved_algorithm == True:\n # begins going up from floor above, prevents double delivery\n upElevator((lift.current_position+1),building, lift, improved_algorithm)\n break\n\n # once bottom floor is reached\n continue_lift = continueCheck(building, lift)\n if continue_lift == True:\n upElevator((lift.current_position+1), building, lift, improved_algorithm)",
"def timerUp(self):\n self.setDown(False)",
"def timer_callback(*args):\n logging.debug(\"timer callback at %s\" % datetime.now())",
"def timer_callback(self):\n self.get_logger().debug(f\"Timer heartbeat {self.timer_count}\")\n self.timer_count += 1",
"def tmpDown(self, mSec):\n timer = QtCore.QTimer(self)\n timer.setSingleShot(True)\n self.connect(timer, QtCore.SIGNAL('timeout()'), self.timerUp)\n timer.start(mSec)\n self.setDown(True)",
"def timer_change(self):\n if self.time < 999:\n self.time += 1\n self.time_lcd.display(self.time)\n else:\n self.timer.stop()",
"def stop_handler():\r\n \r\n global rounds\r\n global score\r\n \r\n if timer.is_running():\r\n timer.stop()\r\n # Increases the number of rounds\r\n rounds += 1\r\n # If the timmer stoped in a whole second\r\n if elapsed_time % 10 == 0:\r\n # Increases the score\r\n score += 1",
"def floor_reached_callback(self, floor):\n\t\tself.currentFloor = floor\n\t\tself.set_floor_indicator_light()\n\t\tself.should_stop()",
"def animation_victoire():\n voiture = liste_vehicules[0].rectangle\n coords = jeu.coords(voiture)\n if coords[0] <= 600:\n jeu.move(voiture, 5, 0)\n jeu.after(5, animation_victoire) #On rappelle la fonction pour animer",
"def start_opponents_timer(self, timer, p):\n if self.running_timer is not None:\n self.after_cancel(self.running_timer)\n self.running_timer = None\n self.running_timer = self.after(1000, self.show_time, timer, p)\n else:\n # First move\n self.running_timer = self.after(1000, self.show_time, timer, p)",
"def ontimer(self, fun, t=0):\n self._ontimer(fun, t)",
"def timerCallback(self,evprent):\n self._odom_list.waitForTransform('map', 'base_footprint', rospy.Time(0), rospy.Duration(1.0))\n (position, orientation) = self._odom_list.lookupTransform('map','base_footprint', rospy.Time(0)) #finds the position and oriention of two objects relative to each other (hint: this returns arrays, while Pose uses lists)\n self._current.position.x = position[0]\n self._current.position.y = position[1]\n\n self._current.orientation.x = orientation[0]\n self._current.orientation.y = orientation[1]\n self._current.orientation.z = orientation[2]\n self._current.orientation.w = orientation[3]\n q = [self._current.orientation.x,\n self._current.orientation.y,\n self._current.orientation.z,\n self._current.orientation.w] # quaternion nonsense\n\n (roll, pitch, yaw) = euler_from_quaternion(q)",
"def rwsched_timer_callback(self, timer, user_data):\n logger.info(\"**** Voila1 Python TIMER callback ****\")\n logger.debug(timer)\n logger.debug(user_data)\n logger.debug(\"\\n\\n\")"
] |
[
"0.547525",
"0.54469144",
"0.5425048",
"0.5416489",
"0.54151726",
"0.54000765",
"0.53114885",
"0.53095424",
"0.5265004",
"0.5257605",
"0.51930314",
"0.51930314",
"0.51743674",
"0.5133805",
"0.51006556",
"0.5099005",
"0.5094788",
"0.508105",
"0.5074436",
"0.50398487",
"0.49785668",
"0.4969261",
"0.49603862",
"0.49403733",
"0.49385196",
"0.49363297",
"0.4910376",
"0.48754627",
"0.48743275",
"0.48638678"
] |
0.75402504
|
0
|
Generates a new piece and shows it; returns the old piece. Analogous to next() operation for iterators.
|
def get_piece(self):
old = self.next_piece
new = self.create_piece()
self.next_piece = new
self.queue_draw()
return old
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def spawn_new_piece(self):\n\n del self.active_piece\n\n new_x = self.WIDTH // 2 - 1\n self.active_piece = Figure(random.choice(PIECE_TYPES), new_x, 0)",
"def update_next_piece(self, board):\n # next piece\n if board.next_shape:\n for preview_row_offset in range(4):\n self.stdscr.addstr(\n PREVIEW_ROW+preview_row_offset+BORDER_WIDTH,\n (PREVIEW_COLUMN-1)*BLOCK_WIDTH+BORDER_WIDTH*2,\n ' '*BLOCK_WIDTH,\n curses.color_pair(0)\n )\n for block in board.next_shape.blocks:\n self.stdscr.addstr(\n block.row_position+BORDER_WIDTH,\n block.column_position*BLOCK_WIDTH+BORDER_WIDTH*2,\n ' '*BLOCK_WIDTH,\n curses.color_pair(block.color)\n )",
"def expose(self, widget, event):\n cr = widget.window.cairo_create()\n cr.set_source_rgb(0.05, 0.05, 0.05)\n cr.paint()\n for pos in self.next_piece.occupying():\n self.paint_square(tuple_add(pos, (-1, 1)),\n self.next_piece.color, cr)",
"def new_piece(self):\n piece_type = pieces.PieceSelector.select_random_piece()\n return piece_type(game_config.NEXT_PIECE_POSX, game_config.NEXT_PIECE_POSY)",
"def place_new_piece(self):\n new_piece = PieceFactory.get_piece()\n new_piece = TransformPiece.transform(new_piece, PieceFactory.get_start_point(self.size, self.current_direction))\n self.active_piece = new_piece\n value = PieceFactory.get_value()\n for cell in self.active_piece:\n self.set_cell_value(cell, value)\n if self.piece_collision_exists(self.active_piece):\n self.handle_active_piece_collision()",
"def move_1_piece(context: GUI, old_coordinate, new_coordinate):\n\n old_tile = context.board.board_dict[old_coordinate]\n new_tile = context.board.board_dict[new_coordinate]\n\n new_tile.piece = old_tile.piece\n old_tile.piece = None\n\n context.update_move_printer(old_coordinate + \" \" + new_coordinate)",
"def copy(self):\n piece_type = type(self)\n new_piece = piece_type(self.player_name, self.coords, True)\n new_piece.id = self.id\n return new_piece",
"def change_piece(self, new_piece: Piece) -> None:\n self.piece = new_piece",
"def new_piece() -> dict:\n shape = random.choice(list(PIECES.keys()))\n # start the new piece above the board (i.e. y < 0)\n return {\n 'shape': shape,\n 'rotation': random.randint(0, len(PIECES[shape]) - 1),\n 'x': int(BOARDWIDTH / 2) - int(TEMPLATEWIDTH / 2),\n 'y': -2,\n 'color': PALETTE[shape]\n }",
"def compute_next(self):\n tpoints = []\n for i in range(len(self.body)):\n x = self.height - self.body[i].y - 1;\n y = self.body[i].x;\n tpoint = Tpoint(x,y)\n tpoints.append(tpoint)\n return Piece(tpoints);",
"def nextState(self, piece, pos):\n # Copy current pieceList to new state obj\n nextState = copy.deepcopy(self)\n\n nextState.movePiece(nextState.player.getCurrentPieceList(), piece, pos)\n\n return nextState",
"def start(self, piece):\n self._piece = piece",
"def draw(self):\n c = self.cards[0]\n self.cards = self.cards[1:]\n self.discards.append(c)\n return(c)",
"def eaten(self): # called when this piece has been 'eaten'\r\n \r\n self.board.removePiece((self.x, self.y)) # remove the 'Piece' object\r\n addr = self.x-25, self.y-25\r\n empty = Empty(addr)\r\n self.board.addPiece(empty) # replace it with the 'Empty' object\r",
"def get_piece(self, at):\n return self.nodes[at].piece",
"def new_tile(self):\r\n rand_x = random.randrange(self.width)\r\n rand_y = random.randrange(self.height)\r\n while self.get_tile(rand_y, rand_x) != 0:\r\n rand_x = random.randrange(self.width)\r\n rand_y = random.randrange(self.height)\r\n value = random.choice([2,2,2,2,2,2,2,2,2,4])\r\n del self.board[rand_y][rand_x]\r\n self.board[rand_y].insert(rand_x,value)\r\n return self.board",
"def display_piece(self, clear=False):\n for row, col in self.active_piece.positions:\n y, x = self.active_piece.origin[0] + row, self.active_piece.origin[1] + col\n self.board[y][x] = '#' if not clear else ' '",
"def addPiece(self, piece):\r\n \r\n self.pieces[(piece.x, piece.y)] = piece",
"def draw(self):\n\n return self.deck.popleft()",
"def generate_new_puzzle():\n new_puzzle = pb() \n\n # only generate solvable puzzles\n while not new_puzzle.is_solvable():\n new_puzzle = pb()\n\n return new_puzzle",
"def advance(self):\n self._current_inst += 1\n self._line = self._lines[self._current_inst].strip()",
"def draw(self):\n return self.cards.pop()",
"def draw(self, count=1):\n try:\n for i in range(count):\n yield self.deck.pop()\n except IndexError:\n yield None",
"def add_piece(self, piece: Model):\n new_piece_coordinates = piece.get_block_positions()\n for coordinates in new_piece_coordinates:\n if not self.piece_encompasses_coordinates(coordinates):\n continue\n else:\n print('GAME OVER')\n return False\n self.pieces.append(piece)\n piece.parent_board = self\n\n return True",
"def add_piece(self):\n self.active_piece = None\n piece_type = random.randint(0, len(TetrisPiece.PIECE_TYPES) - 1)\n max_row = 10 - TetrisPiece.get_piece_width(piece_type)\n origin = (0, random.randint(0, max_row))\n self.active_piece = TetrisPiece(piece_type, origin)\n if self.will_collide(direction='origin'):\n return False\n else:\n self.points += 1\n return True",
"def add_piece(self, piece, to):\n if self.nodes[to].piece != None:\n raise ValueError('A piece is already on %d' % to)\n piece.position = to\n self.nodes[to].piece = piece\n self.nodes[to].piece.set_active(to)",
"def draw(self):\n if len(self.cards) > 0:\n return self.cards.pop(0)\n else:\n self.reshuffle()\n return self.cards.pop(0)",
"def next_line():\r\n set_point(point().next_line())",
"def piecesGenerator(self,player):\n for row in range(8):\n for col in range(8):\n if self.board[row][col] != None:\n piece,pos = self.pieceAt((row,col)) ,((row,col))\n if piece['player'] == player:\n yield piece,pos",
"def get_piece(self, index):\n return self.squares[index]"
] |
[
"0.6434271",
"0.6405382",
"0.58710295",
"0.56589395",
"0.5634714",
"0.5630031",
"0.5568397",
"0.5532417",
"0.551113",
"0.54950935",
"0.54757816",
"0.5345696",
"0.5345064",
"0.5302938",
"0.5273212",
"0.52669513",
"0.5223759",
"0.52072686",
"0.5190134",
"0.51814973",
"0.51715875",
"0.51312995",
"0.5117496",
"0.51126343",
"0.5095467",
"0.50699705",
"0.5068423",
"0.5064628",
"0.50567526",
"0.50422776"
] |
0.7066668
|
0
|
Set the text of a gtk.Label with the preferred markup scheme. (Simple enough not to be worth extending gtk.Label just for this method.)
|
def styled_set_label_text(label, text):
front = "<b><span foreground='#AAAAAA' size='large'>"
end = "</span></b>"
label.set_markup(front+text+end)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_label_text (self, text):\n if text: self.label.set_label(text)",
"def set_text(self, text: str) -> Label:\n self.setText(text)\n return self",
"def set_label_text(self, text: str) -> None:\n self._label_text.set(text)\n self.update_label_text()",
"def set_label(self, text):\n self._label = text",
"def set_text(self, text):\n self._label.setText(text)",
"def set_text_format(\n self, text_format: constants.TextFormatStr | constants.TextFormat\n ) -> Label:\n self.setTextFormat(constants.TEXT_FORMAT.get_enum_value(text_format))\n return self",
"def setText(self, text):\n assert self.isLabel(text)\n self._text = text",
"def set_temporary_label(self, label):\n self._label.SetForegroundColour((100, 100, 100))\n self._label.SetLabel(label)\n self._label.SetFont(wx.NORMAL_FONT.Italic())\n # Don't seem to get automatic layout calls inside toolbars\n self.Layout()",
"def create_label(self, on, text: str):\n return tk.Label(on, font=self.FONT, bg=self.BG_COLOR, text=text)",
"def set_label(self, label):\n self._label.SetForegroundColour((0, 0, 0))\n self._label.SetLabel(label)\n self._label.SetFont(wx.NORMAL_FONT)\n # Don't seem to get automatic layout calls inside toolbars\n self.Layout()",
"def set_text_f(self, format, *args):\n self._text.set(format % args)\n self.change_bg(\"green\")\n self._label.update_idletasks()",
"def set_label_text(self, text, warn=False):\n self.text = text\n self.warn = warn\n self.opacity = .8\n if self.warn:\n self.color = 'darksalmon'\n else:\n self.color = 'lightgreen'\n self.on_size()",
"def createLabel(self, text, font):\n label = QLabel(self)\n label.setFont(font)\n label.setText(text)\n label.setFixedHeight(40)\n label.setAlignment(Qt.AlignCenter)\n label.setStyleSheet('background-color: rgba(0,0,0,0);color: white; border: 0px solid black; ')\n return label",
"def set_text(self, T):\n self.text = T",
"def bold_label(text, size=13):\n label = QtWidgets.QLabel(text)\n label.setStyleSheet(\"font-weight: bold; font-size: {}px\".format(size))\n label.update()\n return label",
"def set_text(self, text):\n self.set_text_f(\"%s\", text)",
"def setText(self, text):\n self.label.blockSignals(True)\n self.label.setText(text)\n self.label.blockSignals(False)",
"def renderLabel(self):\n self.render = self.font.render(self.text, True, self.color)\n self.rect = self.render.get_rect()",
"def label_plain_text(self, label_plain_text):\n\n self._label_plain_text = label_plain_text",
"def label_plain_text(self, label_plain_text):\n\n self._label_plain_text = label_plain_text",
"def SetLabel(self, s):\r\n\r\n self.label = s",
"def _create_label(self, x, y, text, width=50, **config):\n\n self.main_canvas.create_text(x, y, text='%6s' % text, width=width, **config)",
"def setLabel(self, label):\r\n\t\tself.label = label",
"def setLabel(self, label):\r\n\t\tself.label = label",
"def setLabel(self, label):\r\n\t\tself.label = label",
"def setText(self, *args):\n return _libsbml.TextGlyph_setText(self, *args)",
"def set_text(self):\n pass",
"def label_maker(string, size, font='Courier'):\n label = GLabel(string)\n label.font = str(font) + '-' + str(size)\n return label",
"def text_changed(self, text):\n self.lbl.setText(text)",
"def set_label(self, value):\n self._set_one_attribute(self.AttributeNames.LABEL, value)\n return self"
] |
[
"0.6827273",
"0.67186964",
"0.6487902",
"0.64215046",
"0.6359941",
"0.61352456",
"0.61226773",
"0.60732174",
"0.60162574",
"0.6002159",
"0.5993437",
"0.5968697",
"0.59637225",
"0.59441745",
"0.59131277",
"0.58569264",
"0.5849551",
"0.58246344",
"0.5805955",
"0.5805955",
"0.57830817",
"0.5760847",
"0.5690618",
"0.5690618",
"0.5690618",
"0.5690201",
"0.568829",
"0.5677446",
"0.56636983",
"0.5659608"
] |
0.8151647
|
0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.